1 /* SPDX-License-Identifier: GPL-2.0
3 * IO cost model based controller.
5 * Copyright (C) 2019 Tejun Heo <tj@kernel.org>
6 * Copyright (C) 2019 Andy Newell <newella@fb.com>
7 * Copyright (C) 2019 Facebook
9 * One challenge of controlling IO resources is the lack of trivially
10 * observable cost metric. This is distinguished from CPU and memory where
11 * wallclock time and the number of bytes can serve as accurate enough
14 * Bandwidth and iops are the most commonly used metrics for IO devices but
15 * depending on the type and specifics of the device, different IO patterns
16 * easily lead to multiple orders of magnitude variations rendering them
17 * useless for the purpose of IO capacity distribution. While on-device
18 * time, with a lot of clutches, could serve as a useful approximation for
19 * non-queued rotational devices, this is no longer viable with modern
20 * devices, even the rotational ones.
22 * While there is no cost metric we can trivially observe, it isn't a
23 * complete mystery. For example, on a rotational device, seek cost
24 * dominates while a contiguous transfer contributes a smaller amount
25 * proportional to the size. If we can characterize at least the relative
26 * costs of these different types of IOs, it should be possible to
27 * implement a reasonable work-conserving proportional IO resource
32 * IO cost model estimates the cost of an IO given its basic parameters and
33 * history (e.g. the end sector of the last IO). The cost is measured in
34 * device time. If a given IO is estimated to cost 10ms, the device should
35 * be able to process ~100 of those IOs in a second.
37 * Currently, there's only one builtin cost model - linear. Each IO is
38 * classified as sequential or random and given a base cost accordingly.
39 * On top of that, a size cost proportional to the length of the IO is
40 * added. While simple, this model captures the operational
41 * characteristics of a wide varienty of devices well enough. Default
42 * parameters for several different classes of devices are provided and the
43 * parameters can be configured from userspace via
44 * /sys/fs/cgroup/io.cost.model.
46 * If needed, tools/cgroup/iocost_coef_gen.py can be used to generate
47 * device-specific coefficients.
51 * The device virtual time (vtime) is used as the primary control metric.
52 * The control strategy is composed of the following three parts.
54 * 2-1. Vtime Distribution
56 * When a cgroup becomes active in terms of IOs, its hierarchical share is
57 * calculated. Please consider the following hierarchy where the numbers
58 * inside parentheses denote the configured weights.
64 * A0 (w:100) A1 (w:100)
66 * If B is idle and only A0 and A1 are actively issuing IOs, as the two are
67 * of equal weight, each gets 50% share. If then B starts issuing IOs, B
68 * gets 300/(100+300) or 75% share, and A0 and A1 equally splits the rest,
69 * 12.5% each. The distribution mechanism only cares about these flattened
70 * shares. They're called hweights (hierarchical weights) and always add
71 * upto 1 (WEIGHT_ONE).
73 * A given cgroup's vtime runs slower in inverse proportion to its hweight.
74 * For example, with 12.5% weight, A0's time runs 8 times slower (100/12.5)
75 * against the device vtime - an IO which takes 10ms on the underlying
76 * device is considered to take 80ms on A0.
78 * This constitutes the basis of IO capacity distribution. Each cgroup's
79 * vtime is running at a rate determined by its hweight. A cgroup tracks
80 * the vtime consumed by past IOs and can issue a new IO if doing so
81 * wouldn't outrun the current device vtime. Otherwise, the IO is
82 * suspended until the vtime has progressed enough to cover it.
84 * 2-2. Vrate Adjustment
86 * It's unrealistic to expect the cost model to be perfect. There are too
87 * many devices and even on the same device the overall performance
88 * fluctuates depending on numerous factors such as IO mixture and device
89 * internal garbage collection. The controller needs to adapt dynamically.
91 * This is achieved by adjusting the overall IO rate according to how busy
92 * the device is. If the device becomes overloaded, we're sending down too
93 * many IOs and should generally slow down. If there are waiting issuers
94 * but the device isn't saturated, we're issuing too few and should
97 * To slow down, we lower the vrate - the rate at which the device vtime
98 * passes compared to the wall clock. For example, if the vtime is running
99 * at the vrate of 75%, all cgroups added up would only be able to issue
100 * 750ms worth of IOs per second, and vice-versa for speeding up.
102 * Device business is determined using two criteria - rq wait and
103 * completion latencies.
105 * When a device gets saturated, the on-device and then the request queues
106 * fill up and a bio which is ready to be issued has to wait for a request
107 * to become available. When this delay becomes noticeable, it's a clear
108 * indication that the device is saturated and we lower the vrate. This
109 * saturation signal is fairly conservative as it only triggers when both
110 * hardware and software queues are filled up, and is used as the default
113 * As devices can have deep queues and be unfair in how the queued commands
114 * are executed, solely depending on rq wait may not result in satisfactory
115 * control quality. For a better control quality, completion latency QoS
116 * parameters can be configured so that the device is considered saturated
117 * if N'th percentile completion latency rises above the set point.
119 * The completion latency requirements are a function of both the
120 * underlying device characteristics and the desired IO latency quality of
121 * service. There is an inherent trade-off - the tighter the latency QoS,
122 * the higher the bandwidth lossage. Latency QoS is disabled by default
123 * and can be set through /sys/fs/cgroup/io.cost.qos.
125 * 2-3. Work Conservation
127 * Imagine two cgroups A and B with equal weights. A is issuing a small IO
128 * periodically while B is sending out enough parallel IOs to saturate the
129 * device on its own. Let's say A's usage amounts to 100ms worth of IO
130 * cost per second, i.e., 10% of the device capacity. The naive
131 * distribution of half and half would lead to 60% utilization of the
132 * device, a significant reduction in the total amount of work done
133 * compared to free-for-all competition. This is too high a cost to pay
136 * To conserve the total amount of work done, we keep track of how much
137 * each active cgroup is actually using and yield part of its weight if
138 * there are other cgroups which can make use of it. In the above case,
139 * A's weight will be lowered so that it hovers above the actual usage and
140 * B would be able to use the rest.
142 * As we don't want to penalize a cgroup for donating its weight, the
143 * surplus weight adjustment factors in a margin and has an immediate
144 * snapback mechanism in case the cgroup needs more IO vtime for itself.
146 * Note that adjusting down surplus weights has the same effects as
147 * accelerating vtime for other cgroups and work conservation can also be
148 * implemented by adjusting vrate dynamically. However, squaring who can
149 * donate and should take back how much requires hweight propagations
150 * anyway making it easier to implement and understand as a separate
155 * Instead of debugfs or other clumsy monitoring mechanisms, this
156 * controller uses a drgn based monitoring script -
157 * tools/cgroup/iocost_monitor.py. For details on drgn, please see
158 * https://github.com/osandov/drgn. The output looks like the following.
160 * sdb RUN per=300ms cur_per=234.218:v203.695 busy= +1 vrate= 62.12%
161 * active weight hweight% inflt% dbt delay usages%
162 * test/a * 50/ 50 33.33/ 33.33 27.65 2 0*041 033:033:033
163 * test/b * 100/ 100 66.67/ 66.67 17.56 0 0*000 066:079:077
165 * - per : Timer period
166 * - cur_per : Internal wall and device vtime clock
167 * - vrate : Device virtual time rate against wall clock
168 * - weight : Surplus-adjusted and configured weights
169 * - hweight : Surplus-adjusted and configured hierarchical weights
170 * - inflt : The percentage of in-flight IO cost at the end of last period
171 * - del_ms : Deferred issuer delay induction level and duration
172 * - usages : Usage history
175 #include <linux/kernel.h>
176 #include <linux/module.h>
177 #include <linux/timer.h>
178 #include <linux/time64.h>
179 #include <linux/parser.h>
180 #include <linux/sched/signal.h>
181 #include <asm/local.h>
182 #include <asm/local64.h>
183 #include "blk-rq-qos.h"
184 #include "blk-stat.h"
186 #include "blk-cgroup.h"
188 #ifdef CONFIG_TRACEPOINTS
190 /* copied from TRACE_CGROUP_PATH, see cgroup-internal.h */
191 #define TRACE_IOCG_PATH_LEN 1024
192 static DEFINE_SPINLOCK(trace_iocg_path_lock
);
193 static char trace_iocg_path
[TRACE_IOCG_PATH_LEN
];
195 #define TRACE_IOCG_PATH(type, iocg, ...) \
197 unsigned long flags; \
198 if (trace_iocost_##type##_enabled()) { \
199 spin_lock_irqsave(&trace_iocg_path_lock, flags); \
200 cgroup_path(iocg_to_blkg(iocg)->blkcg->css.cgroup, \
201 trace_iocg_path, TRACE_IOCG_PATH_LEN); \
202 trace_iocost_##type(iocg, trace_iocg_path, \
204 spin_unlock_irqrestore(&trace_iocg_path_lock, flags); \
208 #else /* CONFIG_TRACE_POINTS */
209 #define TRACE_IOCG_PATH(type, iocg, ...) do { } while (0)
210 #endif /* CONFIG_TRACE_POINTS */
215 /* timer period is calculated from latency requirements, bound it */
216 MIN_PERIOD
= USEC_PER_MSEC
,
217 MAX_PERIOD
= USEC_PER_SEC
,
220 * iocg->vtime is targeted at 50% behind the device vtime, which
221 * serves as its IO credit buffer. Surplus weight adjustment is
222 * immediately canceled if the vtime margin runs below 10%.
226 MARGIN_TARGET_PCT
= 50,
228 INUSE_ADJ_STEP_PCT
= 25,
230 /* Have some play in timer operations */
233 /* 1/64k is granular enough and can easily be handled w/ u32 */
234 WEIGHT_ONE
= 1 << 16,
239 * As vtime is used to calculate the cost of each IO, it needs to
240 * be fairly high precision. For example, it should be able to
241 * represent the cost of a single page worth of discard with
242 * suffificient accuracy. At the same time, it should be able to
243 * represent reasonably long enough durations to be useful and
244 * convenient during operation.
246 * 1s worth of vtime is 2^37. This gives us both sub-nanosecond
247 * granularity and days of wrap-around time even at extreme vrates.
249 VTIME_PER_SEC_SHIFT
= 37,
250 VTIME_PER_SEC
= 1LLU << VTIME_PER_SEC_SHIFT
,
251 VTIME_PER_USEC
= VTIME_PER_SEC
/ USEC_PER_SEC
,
252 VTIME_PER_NSEC
= VTIME_PER_SEC
/ NSEC_PER_SEC
,
254 /* bound vrate adjustments within two orders of magnitude */
255 VRATE_MIN_PPM
= 10000, /* 1% */
256 VRATE_MAX_PPM
= 100000000, /* 10000% */
258 VRATE_MIN
= VTIME_PER_USEC
* VRATE_MIN_PPM
/ MILLION
,
259 VRATE_CLAMP_ADJ_PCT
= 4,
261 /* switch iff the conditions are met for longer than this */
262 AUTOP_CYCLE_NSEC
= 10LLU * NSEC_PER_SEC
,
266 /* if IOs end up waiting for requests, issue less */
267 RQ_WAIT_BUSY_PCT
= 5,
269 /* unbusy hysterisis */
273 * The effect of delay is indirect and non-linear and a huge amount of
274 * future debt can accumulate abruptly while unthrottled. Linearly scale
275 * up delay as debt is going up and then let it decay exponentially.
276 * This gives us quick ramp ups while delay is accumulating and long
277 * tails which can help reducing the frequency of debt explosions on
278 * unthrottle. The parameters are experimentally determined.
280 * The delay mechanism provides adequate protection and behavior in many
281 * cases. However, this is far from ideal and falls shorts on both
282 * fronts. The debtors are often throttled too harshly costing a
283 * significant level of fairness and possibly total work while the
284 * protection against their impacts on the system can be choppy and
287 * The shortcoming primarily stems from the fact that, unlike for page
288 * cache, the kernel doesn't have well-defined back-pressure propagation
289 * mechanism and policies for anonymous memory. Fully addressing this
290 * issue will likely require substantial improvements in the area.
292 MIN_DELAY_THR_PCT
= 500,
293 MAX_DELAY_THR_PCT
= 25000,
295 MAX_DELAY
= 250 * USEC_PER_MSEC
,
297 /* halve debts if avg usage over 100ms is under 50% */
299 DFGV_PERIOD
= 100 * USEC_PER_MSEC
,
301 /* don't let cmds which take a very long time pin lagging for too long */
302 MAX_LAGGING_PERIODS
= 10,
305 * Count IO size in 4k pages. The 12bit shift helps keeping
306 * size-proportional components of cost calculation in closer
307 * numbers of digits to per-IO cost components.
310 IOC_PAGE_SIZE
= 1 << IOC_PAGE_SHIFT
,
311 IOC_SECT_TO_PAGE_SHIFT
= IOC_PAGE_SHIFT
- SECTOR_SHIFT
,
313 /* if apart further than 16M, consider randio for linear model */
314 LCOEF_RANDIO_PAGES
= 4096,
323 /* io.cost.qos controls including per-dev enable of the whole controller */
330 /* io.cost.qos params */
341 /* io.cost.model controls */
348 /* builtin linear cost model coefficients */
378 u32 qos
[NR_QOS_PARAMS
];
379 u64 i_lcoefs
[NR_I_LCOEFS
];
380 u64 lcoefs
[NR_LCOEFS
];
381 u32 too_fast_vrate_pct
;
382 u32 too_slow_vrate_pct
;
398 struct ioc_pcpu_stat
{
399 struct ioc_missed missed
[2];
401 local64_t rq_wait_ns
;
411 struct ioc_params params
;
412 struct ioc_margins margins
;
419 struct timer_list timer
;
420 struct list_head active_iocgs
; /* active cgroups */
421 struct ioc_pcpu_stat __percpu
*pcpu_stat
;
423 enum ioc_running running
;
424 atomic64_t vtime_rate
;
428 seqcount_spinlock_t period_seqcount
;
429 u64 period_at
; /* wallclock starttime */
430 u64 period_at_vtime
; /* vtime starttime */
432 atomic64_t cur_period
; /* inc'd each period */
433 int busy_level
; /* saturation history */
435 bool weights_updated
;
436 atomic_t hweight_gen
; /* for lazy hweights */
438 /* debt forgivness */
441 u64 dfgv_usage_us_sum
;
443 u64 autop_too_fast_at
;
444 u64 autop_too_slow_at
;
446 bool user_qos_params
:1;
447 bool user_cost_model
:1;
450 struct iocg_pcpu_stat
{
451 local64_t abs_vusage
;
461 /* per device-cgroup pair */
463 struct blkg_policy_data pd
;
467 * A iocg can get its weight from two sources - an explicit
468 * per-device-cgroup configuration or the default weight of the
469 * cgroup. `cfg_weight` is the explicit per-device-cgroup
470 * configuration. `weight` is the effective considering both
473 * When an idle cgroup becomes active its `active` goes from 0 to
474 * `weight`. `inuse` is the surplus adjusted active weight.
475 * `active` and `inuse` are used to calculate `hweight_active` and
478 * `last_inuse` remembers `inuse` while an iocg is idle to persist
479 * surplus adjustments.
481 * `inuse` may be adjusted dynamically during period. `saved_*` are used
482 * to determine and track adjustments.
492 sector_t cursor
; /* to detect randio */
495 * `vtime` is this iocg's vtime cursor which progresses as IOs are
496 * issued. If lagging behind device vtime, the delta represents
497 * the currently available IO budget. If running ahead, the
500 * `vtime_done` is the same but progressed on completion rather
501 * than issue. The delta behind `vtime` represents the cost of
502 * currently in-flight IOs.
505 atomic64_t done_vtime
;
508 /* current delay in effect and when it started */
513 * The period this iocg was last active in. Used for deactivation
514 * and invalidating `vtime`.
516 atomic64_t active_period
;
517 struct list_head active_list
;
519 /* see __propagate_weights() and current_hweight() for details */
520 u64 child_active_sum
;
522 u64 child_adjusted_sum
;
526 u32 hweight_donating
;
527 u32 hweight_after_donation
;
529 struct list_head walk_list
;
530 struct list_head surplus_list
;
532 struct wait_queue_head waitq
;
533 struct hrtimer waitq_timer
;
535 /* timestamp at the latest activation */
539 struct iocg_pcpu_stat __percpu
*pcpu_stat
;
540 struct iocg_stat stat
;
541 struct iocg_stat last_stat
;
542 u64 last_stat_abs_vusage
;
548 /* this iocg's depth in the hierarchy and ancestors including self */
550 struct ioc_gq
*ancestors
[];
555 struct blkcg_policy_data cpd
;
556 unsigned int dfl_weight
;
566 struct wait_queue_entry wait
;
572 struct iocg_wake_ctx
{
578 static const struct ioc_params autop
[] = {
581 [QOS_RLAT
] = 250000, /* 250ms */
583 [QOS_MIN
] = VRATE_MIN_PPM
,
584 [QOS_MAX
] = VRATE_MAX_PPM
,
587 [I_LCOEF_RBPS
] = 174019176,
588 [I_LCOEF_RSEQIOPS
] = 41708,
589 [I_LCOEF_RRANDIOPS
] = 370,
590 [I_LCOEF_WBPS
] = 178075866,
591 [I_LCOEF_WSEQIOPS
] = 42705,
592 [I_LCOEF_WRANDIOPS
] = 378,
597 [QOS_RLAT
] = 25000, /* 25ms */
599 [QOS_MIN
] = VRATE_MIN_PPM
,
600 [QOS_MAX
] = VRATE_MAX_PPM
,
603 [I_LCOEF_RBPS
] = 245855193,
604 [I_LCOEF_RSEQIOPS
] = 61575,
605 [I_LCOEF_RRANDIOPS
] = 6946,
606 [I_LCOEF_WBPS
] = 141365009,
607 [I_LCOEF_WSEQIOPS
] = 33716,
608 [I_LCOEF_WRANDIOPS
] = 26796,
613 [QOS_RLAT
] = 25000, /* 25ms */
615 [QOS_MIN
] = VRATE_MIN_PPM
,
616 [QOS_MAX
] = VRATE_MAX_PPM
,
619 [I_LCOEF_RBPS
] = 488636629,
620 [I_LCOEF_RSEQIOPS
] = 8932,
621 [I_LCOEF_RRANDIOPS
] = 8518,
622 [I_LCOEF_WBPS
] = 427891549,
623 [I_LCOEF_WSEQIOPS
] = 28755,
624 [I_LCOEF_WRANDIOPS
] = 21940,
626 .too_fast_vrate_pct
= 500,
630 [QOS_RLAT
] = 5000, /* 5ms */
632 [QOS_MIN
] = VRATE_MIN_PPM
,
633 [QOS_MAX
] = VRATE_MAX_PPM
,
636 [I_LCOEF_RBPS
] = 3102524156LLU,
637 [I_LCOEF_RSEQIOPS
] = 724816,
638 [I_LCOEF_RRANDIOPS
] = 778122,
639 [I_LCOEF_WBPS
] = 1742780862LLU,
640 [I_LCOEF_WSEQIOPS
] = 425702,
641 [I_LCOEF_WRANDIOPS
] = 443193,
643 .too_slow_vrate_pct
= 10,
648 * vrate adjust percentages indexed by ioc->busy_level. We adjust up on
649 * vtime credit shortage and down on device saturation.
651 static const u32 vrate_adj_pct
[] =
653 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
654 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
655 4, 4, 4, 4, 4, 4, 4, 4, 8, 8, 8, 8, 8, 8, 8, 8, 16 };
657 static struct blkcg_policy blkcg_policy_iocost
;
659 /* accessors and helpers */
660 static struct ioc
*rqos_to_ioc(struct rq_qos
*rqos
)
662 return container_of(rqos
, struct ioc
, rqos
);
665 static struct ioc
*q_to_ioc(struct request_queue
*q
)
667 return rqos_to_ioc(rq_qos_id(q
, RQ_QOS_COST
));
670 static const char __maybe_unused
*ioc_name(struct ioc
*ioc
)
672 struct gendisk
*disk
= ioc
->rqos
.disk
;
676 return disk
->disk_name
;
679 static struct ioc_gq
*pd_to_iocg(struct blkg_policy_data
*pd
)
681 return pd
? container_of(pd
, struct ioc_gq
, pd
) : NULL
;
684 static struct ioc_gq
*blkg_to_iocg(struct blkcg_gq
*blkg
)
686 return pd_to_iocg(blkg_to_pd(blkg
, &blkcg_policy_iocost
));
689 static struct blkcg_gq
*iocg_to_blkg(struct ioc_gq
*iocg
)
691 return pd_to_blkg(&iocg
->pd
);
694 static struct ioc_cgrp
*blkcg_to_iocc(struct blkcg
*blkcg
)
696 return container_of(blkcg_to_cpd(blkcg
, &blkcg_policy_iocost
),
697 struct ioc_cgrp
, cpd
);
701 * Scale @abs_cost to the inverse of @hw_inuse. The lower the hierarchical
702 * weight, the more expensive each IO. Must round up.
704 static u64
abs_cost_to_cost(u64 abs_cost
, u32 hw_inuse
)
706 return DIV64_U64_ROUND_UP(abs_cost
* WEIGHT_ONE
, hw_inuse
);
710 * The inverse of abs_cost_to_cost(). Must round up.
712 static u64
cost_to_abs_cost(u64 cost
, u32 hw_inuse
)
714 return DIV64_U64_ROUND_UP(cost
* hw_inuse
, WEIGHT_ONE
);
717 static void iocg_commit_bio(struct ioc_gq
*iocg
, struct bio
*bio
,
718 u64 abs_cost
, u64 cost
)
720 struct iocg_pcpu_stat
*gcs
;
722 bio
->bi_iocost_cost
= cost
;
723 atomic64_add(cost
, &iocg
->vtime
);
725 gcs
= get_cpu_ptr(iocg
->pcpu_stat
);
726 local64_add(abs_cost
, &gcs
->abs_vusage
);
730 static void iocg_lock(struct ioc_gq
*iocg
, bool lock_ioc
, unsigned long *flags
)
733 spin_lock_irqsave(&iocg
->ioc
->lock
, *flags
);
734 spin_lock(&iocg
->waitq
.lock
);
736 spin_lock_irqsave(&iocg
->waitq
.lock
, *flags
);
740 static void iocg_unlock(struct ioc_gq
*iocg
, bool unlock_ioc
, unsigned long *flags
)
743 spin_unlock(&iocg
->waitq
.lock
);
744 spin_unlock_irqrestore(&iocg
->ioc
->lock
, *flags
);
746 spin_unlock_irqrestore(&iocg
->waitq
.lock
, *flags
);
750 #define CREATE_TRACE_POINTS
751 #include <trace/events/iocost.h>
753 static void ioc_refresh_margins(struct ioc
*ioc
)
755 struct ioc_margins
*margins
= &ioc
->margins
;
756 u32 period_us
= ioc
->period_us
;
757 u64 vrate
= ioc
->vtime_base_rate
;
759 margins
->min
= (period_us
* MARGIN_MIN_PCT
/ 100) * vrate
;
760 margins
->low
= (period_us
* MARGIN_LOW_PCT
/ 100) * vrate
;
761 margins
->target
= (period_us
* MARGIN_TARGET_PCT
/ 100) * vrate
;
764 /* latency Qos params changed, update period_us and all the dependent params */
765 static void ioc_refresh_period_us(struct ioc
*ioc
)
767 u32 ppm
, lat
, multi
, period_us
;
769 lockdep_assert_held(&ioc
->lock
);
771 /* pick the higher latency target */
772 if (ioc
->params
.qos
[QOS_RLAT
] >= ioc
->params
.qos
[QOS_WLAT
]) {
773 ppm
= ioc
->params
.qos
[QOS_RPPM
];
774 lat
= ioc
->params
.qos
[QOS_RLAT
];
776 ppm
= ioc
->params
.qos
[QOS_WPPM
];
777 lat
= ioc
->params
.qos
[QOS_WLAT
];
781 * We want the period to be long enough to contain a healthy number
782 * of IOs while short enough for granular control. Define it as a
783 * multiple of the latency target. Ideally, the multiplier should
784 * be scaled according to the percentile so that it would nominally
785 * contain a certain number of requests. Let's be simpler and
786 * scale it linearly so that it's 2x >= pct(90) and 10x at pct(50).
789 multi
= max_t(u32
, (MILLION
- ppm
) / 50000, 2);
792 period_us
= multi
* lat
;
793 period_us
= clamp_t(u32
, period_us
, MIN_PERIOD
, MAX_PERIOD
);
795 /* calculate dependent params */
796 ioc
->period_us
= period_us
;
797 ioc
->timer_slack_ns
= div64_u64(
798 (u64
)period_us
* NSEC_PER_USEC
* TIMER_SLACK_PCT
,
800 ioc_refresh_margins(ioc
);
804 * ioc->rqos.disk isn't initialized when this function is called from
807 static int ioc_autop_idx(struct ioc
*ioc
, struct gendisk
*disk
)
809 int idx
= ioc
->autop_idx
;
810 const struct ioc_params
*p
= &autop
[idx
];
815 if (!blk_queue_nonrot(disk
->queue
))
818 /* handle SATA SSDs w/ broken NCQ */
819 if (blk_queue_depth(disk
->queue
) == 1)
820 return AUTOP_SSD_QD1
;
822 /* use one of the normal ssd sets */
823 if (idx
< AUTOP_SSD_DFL
)
824 return AUTOP_SSD_DFL
;
826 /* if user is overriding anything, maintain what was there */
827 if (ioc
->user_qos_params
|| ioc
->user_cost_model
)
830 /* step up/down based on the vrate */
831 vrate_pct
= div64_u64(ioc
->vtime_base_rate
* 100, VTIME_PER_USEC
);
832 now_ns
= blk_time_get_ns();
834 if (p
->too_fast_vrate_pct
&& p
->too_fast_vrate_pct
<= vrate_pct
) {
835 if (!ioc
->autop_too_fast_at
)
836 ioc
->autop_too_fast_at
= now_ns
;
837 if (now_ns
- ioc
->autop_too_fast_at
>= AUTOP_CYCLE_NSEC
)
840 ioc
->autop_too_fast_at
= 0;
843 if (p
->too_slow_vrate_pct
&& p
->too_slow_vrate_pct
>= vrate_pct
) {
844 if (!ioc
->autop_too_slow_at
)
845 ioc
->autop_too_slow_at
= now_ns
;
846 if (now_ns
- ioc
->autop_too_slow_at
>= AUTOP_CYCLE_NSEC
)
849 ioc
->autop_too_slow_at
= 0;
856 * Take the followings as input
858 * @bps maximum sequential throughput
859 * @seqiops maximum sequential 4k iops
860 * @randiops maximum random 4k iops
862 * and calculate the linear model cost coefficients.
864 * *@page per-page cost 1s / (@bps / 4096)
865 * *@seqio base cost of a seq IO max((1s / @seqiops) - *@page, 0)
866 * @randiops base cost of a rand IO max((1s / @randiops) - *@page, 0)
868 static void calc_lcoefs(u64 bps
, u64 seqiops
, u64 randiops
,
869 u64
*page
, u64
*seqio
, u64
*randio
)
873 *page
= *seqio
= *randio
= 0;
876 u64 bps_pages
= DIV_ROUND_UP_ULL(bps
, IOC_PAGE_SIZE
);
879 *page
= DIV64_U64_ROUND_UP(VTIME_PER_SEC
, bps_pages
);
885 v
= DIV64_U64_ROUND_UP(VTIME_PER_SEC
, seqiops
);
891 v
= DIV64_U64_ROUND_UP(VTIME_PER_SEC
, randiops
);
897 static void ioc_refresh_lcoefs(struct ioc
*ioc
)
899 u64
*u
= ioc
->params
.i_lcoefs
;
900 u64
*c
= ioc
->params
.lcoefs
;
902 calc_lcoefs(u
[I_LCOEF_RBPS
], u
[I_LCOEF_RSEQIOPS
], u
[I_LCOEF_RRANDIOPS
],
903 &c
[LCOEF_RPAGE
], &c
[LCOEF_RSEQIO
], &c
[LCOEF_RRANDIO
]);
904 calc_lcoefs(u
[I_LCOEF_WBPS
], u
[I_LCOEF_WSEQIOPS
], u
[I_LCOEF_WRANDIOPS
],
905 &c
[LCOEF_WPAGE
], &c
[LCOEF_WSEQIO
], &c
[LCOEF_WRANDIO
]);
909 * struct gendisk is required as an argument because ioc->rqos.disk
910 * is not properly initialized when called from the init path.
912 static bool ioc_refresh_params_disk(struct ioc
*ioc
, bool force
,
913 struct gendisk
*disk
)
915 const struct ioc_params
*p
;
918 lockdep_assert_held(&ioc
->lock
);
920 idx
= ioc_autop_idx(ioc
, disk
);
923 if (idx
== ioc
->autop_idx
&& !force
)
926 if (idx
!= ioc
->autop_idx
) {
927 atomic64_set(&ioc
->vtime_rate
, VTIME_PER_USEC
);
928 ioc
->vtime_base_rate
= VTIME_PER_USEC
;
931 ioc
->autop_idx
= idx
;
932 ioc
->autop_too_fast_at
= 0;
933 ioc
->autop_too_slow_at
= 0;
935 if (!ioc
->user_qos_params
)
936 memcpy(ioc
->params
.qos
, p
->qos
, sizeof(p
->qos
));
937 if (!ioc
->user_cost_model
)
938 memcpy(ioc
->params
.i_lcoefs
, p
->i_lcoefs
, sizeof(p
->i_lcoefs
));
940 ioc_refresh_period_us(ioc
);
941 ioc_refresh_lcoefs(ioc
);
943 ioc
->vrate_min
= DIV64_U64_ROUND_UP((u64
)ioc
->params
.qos
[QOS_MIN
] *
944 VTIME_PER_USEC
, MILLION
);
945 ioc
->vrate_max
= DIV64_U64_ROUND_UP((u64
)ioc
->params
.qos
[QOS_MAX
] *
946 VTIME_PER_USEC
, MILLION
);
951 static bool ioc_refresh_params(struct ioc
*ioc
, bool force
)
953 return ioc_refresh_params_disk(ioc
, force
, ioc
->rqos
.disk
);
957 * When an iocg accumulates too much vtime or gets deactivated, we throw away
958 * some vtime, which lowers the overall device utilization. As the exact amount
959 * which is being thrown away is known, we can compensate by accelerating the
960 * vrate accordingly so that the extra vtime generated in the current period
961 * matches what got lost.
963 static void ioc_refresh_vrate(struct ioc
*ioc
, struct ioc_now
*now
)
965 s64 pleft
= ioc
->period_at
+ ioc
->period_us
- now
->now
;
966 s64 vperiod
= ioc
->period_us
* ioc
->vtime_base_rate
;
967 s64 vcomp
, vcomp_min
, vcomp_max
;
969 lockdep_assert_held(&ioc
->lock
);
971 /* we need some time left in this period */
976 * Calculate how much vrate should be adjusted to offset the error.
977 * Limit the amount of adjustment and deduct the adjusted amount from
980 vcomp
= -div64_s64(ioc
->vtime_err
, pleft
);
981 vcomp_min
= -(ioc
->vtime_base_rate
>> 1);
982 vcomp_max
= ioc
->vtime_base_rate
;
983 vcomp
= clamp(vcomp
, vcomp_min
, vcomp_max
);
985 ioc
->vtime_err
+= vcomp
* pleft
;
987 atomic64_set(&ioc
->vtime_rate
, ioc
->vtime_base_rate
+ vcomp
);
989 /* bound how much error can accumulate */
990 ioc
->vtime_err
= clamp(ioc
->vtime_err
, -vperiod
, vperiod
);
993 static void ioc_adjust_base_vrate(struct ioc
*ioc
, u32 rq_wait_pct
,
994 int nr_lagging
, int nr_shortages
,
995 int prev_busy_level
, u32
*missed_ppm
)
997 u64 vrate
= ioc
->vtime_base_rate
;
998 u64 vrate_min
= ioc
->vrate_min
, vrate_max
= ioc
->vrate_max
;
1000 if (!ioc
->busy_level
|| (ioc
->busy_level
< 0 && nr_lagging
)) {
1001 if (ioc
->busy_level
!= prev_busy_level
|| nr_lagging
)
1002 trace_iocost_ioc_vrate_adj(ioc
, vrate
,
1003 missed_ppm
, rq_wait_pct
,
1004 nr_lagging
, nr_shortages
);
1010 * If vrate is out of bounds, apply clamp gradually as the
1011 * bounds can change abruptly. Otherwise, apply busy_level
1014 if (vrate
< vrate_min
) {
1015 vrate
= div64_u64(vrate
* (100 + VRATE_CLAMP_ADJ_PCT
), 100);
1016 vrate
= min(vrate
, vrate_min
);
1017 } else if (vrate
> vrate_max
) {
1018 vrate
= div64_u64(vrate
* (100 - VRATE_CLAMP_ADJ_PCT
), 100);
1019 vrate
= max(vrate
, vrate_max
);
1021 int idx
= min_t(int, abs(ioc
->busy_level
),
1022 ARRAY_SIZE(vrate_adj_pct
) - 1);
1023 u32 adj_pct
= vrate_adj_pct
[idx
];
1025 if (ioc
->busy_level
> 0)
1026 adj_pct
= 100 - adj_pct
;
1028 adj_pct
= 100 + adj_pct
;
1030 vrate
= clamp(DIV64_U64_ROUND_UP(vrate
* adj_pct
, 100),
1031 vrate_min
, vrate_max
);
1034 trace_iocost_ioc_vrate_adj(ioc
, vrate
, missed_ppm
, rq_wait_pct
,
1035 nr_lagging
, nr_shortages
);
1037 ioc
->vtime_base_rate
= vrate
;
1038 ioc_refresh_margins(ioc
);
1041 /* take a snapshot of the current [v]time and vrate */
1042 static void ioc_now(struct ioc
*ioc
, struct ioc_now
*now
)
1047 now
->now_ns
= blk_time_get_ns();
1048 now
->now
= ktime_to_us(now
->now_ns
);
1049 vrate
= atomic64_read(&ioc
->vtime_rate
);
1052 * The current vtime is
1054 * vtime at period start + (wallclock time since the start) * vrate
1056 * As a consistent snapshot of `period_at_vtime` and `period_at` is
1057 * needed, they're seqcount protected.
1060 seq
= read_seqcount_begin(&ioc
->period_seqcount
);
1061 now
->vnow
= ioc
->period_at_vtime
+
1062 (now
->now
- ioc
->period_at
) * vrate
;
1063 } while (read_seqcount_retry(&ioc
->period_seqcount
, seq
));
1066 static void ioc_start_period(struct ioc
*ioc
, struct ioc_now
*now
)
1068 WARN_ON_ONCE(ioc
->running
!= IOC_RUNNING
);
1070 write_seqcount_begin(&ioc
->period_seqcount
);
1071 ioc
->period_at
= now
->now
;
1072 ioc
->period_at_vtime
= now
->vnow
;
1073 write_seqcount_end(&ioc
->period_seqcount
);
1075 ioc
->timer
.expires
= jiffies
+ usecs_to_jiffies(ioc
->period_us
);
1076 add_timer(&ioc
->timer
);
1080 * Update @iocg's `active` and `inuse` to @active and @inuse, update level
1081 * weight sums and propagate upwards accordingly. If @save, the current margin
1082 * is saved to be used as reference for later inuse in-period adjustments.
1084 static void __propagate_weights(struct ioc_gq
*iocg
, u32 active
, u32 inuse
,
1085 bool save
, struct ioc_now
*now
)
1087 struct ioc
*ioc
= iocg
->ioc
;
1090 lockdep_assert_held(&ioc
->lock
);
1093 * For an active leaf node, its inuse shouldn't be zero or exceed
1094 * @active. An active internal node's inuse is solely determined by the
1095 * inuse to active ratio of its children regardless of @inuse.
1097 if (list_empty(&iocg
->active_list
) && iocg
->child_active_sum
) {
1098 inuse
= DIV64_U64_ROUND_UP(active
* iocg
->child_inuse_sum
,
1099 iocg
->child_active_sum
);
1101 inuse
= clamp_t(u32
, inuse
, 1, active
);
1104 iocg
->last_inuse
= iocg
->inuse
;
1106 iocg
->saved_margin
= now
->vnow
- atomic64_read(&iocg
->vtime
);
1108 if (active
== iocg
->active
&& inuse
== iocg
->inuse
)
1111 for (lvl
= iocg
->level
- 1; lvl
>= 0; lvl
--) {
1112 struct ioc_gq
*parent
= iocg
->ancestors
[lvl
];
1113 struct ioc_gq
*child
= iocg
->ancestors
[lvl
+ 1];
1114 u32 parent_active
= 0, parent_inuse
= 0;
1116 /* update the level sums */
1117 parent
->child_active_sum
+= (s32
)(active
- child
->active
);
1118 parent
->child_inuse_sum
+= (s32
)(inuse
- child
->inuse
);
1119 /* apply the updates */
1120 child
->active
= active
;
1121 child
->inuse
= inuse
;
1124 * The delta between inuse and active sums indicates that
1125 * much of weight is being given away. Parent's inuse
1126 * and active should reflect the ratio.
1128 if (parent
->child_active_sum
) {
1129 parent_active
= parent
->weight
;
1130 parent_inuse
= DIV64_U64_ROUND_UP(
1131 parent_active
* parent
->child_inuse_sum
,
1132 parent
->child_active_sum
);
1135 /* do we need to keep walking up? */
1136 if (parent_active
== parent
->active
&&
1137 parent_inuse
== parent
->inuse
)
1140 active
= parent_active
;
1141 inuse
= parent_inuse
;
1144 ioc
->weights_updated
= true;
1147 static void commit_weights(struct ioc
*ioc
)
1149 lockdep_assert_held(&ioc
->lock
);
1151 if (ioc
->weights_updated
) {
1152 /* paired with rmb in current_hweight(), see there */
1154 atomic_inc(&ioc
->hweight_gen
);
1155 ioc
->weights_updated
= false;
1159 static void propagate_weights(struct ioc_gq
*iocg
, u32 active
, u32 inuse
,
1160 bool save
, struct ioc_now
*now
)
1162 __propagate_weights(iocg
, active
, inuse
, save
, now
);
1163 commit_weights(iocg
->ioc
);
1166 static void current_hweight(struct ioc_gq
*iocg
, u32
*hw_activep
, u32
*hw_inusep
)
1168 struct ioc
*ioc
= iocg
->ioc
;
1173 /* hot path - if uptodate, use cached */
1174 ioc_gen
= atomic_read(&ioc
->hweight_gen
);
1175 if (ioc_gen
== iocg
->hweight_gen
)
1179 * Paired with wmb in commit_weights(). If we saw the updated
1180 * hweight_gen, all the weight updates from __propagate_weights() are
1183 * We can race with weight updates during calculation and get it
1184 * wrong. However, hweight_gen would have changed and a future
1185 * reader will recalculate and we're guaranteed to discard the
1186 * wrong result soon.
1190 hwa
= hwi
= WEIGHT_ONE
;
1191 for (lvl
= 0; lvl
<= iocg
->level
- 1; lvl
++) {
1192 struct ioc_gq
*parent
= iocg
->ancestors
[lvl
];
1193 struct ioc_gq
*child
= iocg
->ancestors
[lvl
+ 1];
1194 u64 active_sum
= READ_ONCE(parent
->child_active_sum
);
1195 u64 inuse_sum
= READ_ONCE(parent
->child_inuse_sum
);
1196 u32 active
= READ_ONCE(child
->active
);
1197 u32 inuse
= READ_ONCE(child
->inuse
);
1199 /* we can race with deactivations and either may read as zero */
1200 if (!active_sum
|| !inuse_sum
)
1203 active_sum
= max_t(u64
, active
, active_sum
);
1204 hwa
= div64_u64((u64
)hwa
* active
, active_sum
);
1206 inuse_sum
= max_t(u64
, inuse
, inuse_sum
);
1207 hwi
= div64_u64((u64
)hwi
* inuse
, inuse_sum
);
1210 iocg
->hweight_active
= max_t(u32
, hwa
, 1);
1211 iocg
->hweight_inuse
= max_t(u32
, hwi
, 1);
1212 iocg
->hweight_gen
= ioc_gen
;
1215 *hw_activep
= iocg
->hweight_active
;
1217 *hw_inusep
= iocg
->hweight_inuse
;
1221 * Calculate the hweight_inuse @iocg would get with max @inuse assuming all the
1222 * other weights stay unchanged.
1224 static u32
current_hweight_max(struct ioc_gq
*iocg
)
1226 u32 hwm
= WEIGHT_ONE
;
1227 u32 inuse
= iocg
->active
;
1228 u64 child_inuse_sum
;
1231 lockdep_assert_held(&iocg
->ioc
->lock
);
1233 for (lvl
= iocg
->level
- 1; lvl
>= 0; lvl
--) {
1234 struct ioc_gq
*parent
= iocg
->ancestors
[lvl
];
1235 struct ioc_gq
*child
= iocg
->ancestors
[lvl
+ 1];
1237 child_inuse_sum
= parent
->child_inuse_sum
+ inuse
- child
->inuse
;
1238 hwm
= div64_u64((u64
)hwm
* inuse
, child_inuse_sum
);
1239 inuse
= DIV64_U64_ROUND_UP(parent
->active
* child_inuse_sum
,
1240 parent
->child_active_sum
);
1243 return max_t(u32
, hwm
, 1);
1246 static void weight_updated(struct ioc_gq
*iocg
, struct ioc_now
*now
)
1248 struct ioc
*ioc
= iocg
->ioc
;
1249 struct blkcg_gq
*blkg
= iocg_to_blkg(iocg
);
1250 struct ioc_cgrp
*iocc
= blkcg_to_iocc(blkg
->blkcg
);
1253 lockdep_assert_held(&ioc
->lock
);
1255 weight
= iocg
->cfg_weight
?: iocc
->dfl_weight
;
1256 if (weight
!= iocg
->weight
&& iocg
->active
)
1257 propagate_weights(iocg
, weight
, iocg
->inuse
, true, now
);
1258 iocg
->weight
= weight
;
1261 static bool iocg_activate(struct ioc_gq
*iocg
, struct ioc_now
*now
)
1263 struct ioc
*ioc
= iocg
->ioc
;
1264 u64 __maybe_unused last_period
, cur_period
;
1269 * If seem to be already active, just update the stamp to tell the
1270 * timer that we're still active. We don't mind occassional races.
1272 if (!list_empty(&iocg
->active_list
)) {
1274 cur_period
= atomic64_read(&ioc
->cur_period
);
1275 if (atomic64_read(&iocg
->active_period
) != cur_period
)
1276 atomic64_set(&iocg
->active_period
, cur_period
);
1280 /* racy check on internal node IOs, treat as root level IOs */
1281 if (iocg
->child_active_sum
)
1284 spin_lock_irq(&ioc
->lock
);
1289 cur_period
= atomic64_read(&ioc
->cur_period
);
1290 last_period
= atomic64_read(&iocg
->active_period
);
1291 atomic64_set(&iocg
->active_period
, cur_period
);
1293 /* already activated or breaking leaf-only constraint? */
1294 if (!list_empty(&iocg
->active_list
))
1295 goto succeed_unlock
;
1296 for (i
= iocg
->level
- 1; i
> 0; i
--)
1297 if (!list_empty(&iocg
->ancestors
[i
]->active_list
))
1300 if (iocg
->child_active_sum
)
1304 * Always start with the target budget. On deactivation, we throw away
1305 * anything above it.
1307 vtarget
= now
->vnow
- ioc
->margins
.target
;
1308 vtime
= atomic64_read(&iocg
->vtime
);
1310 atomic64_add(vtarget
- vtime
, &iocg
->vtime
);
1311 atomic64_add(vtarget
- vtime
, &iocg
->done_vtime
);
1315 * Activate, propagate weight and start period timer if not
1316 * running. Reset hweight_gen to avoid accidental match from
1319 iocg
->hweight_gen
= atomic_read(&ioc
->hweight_gen
) - 1;
1320 list_add(&iocg
->active_list
, &ioc
->active_iocgs
);
1322 propagate_weights(iocg
, iocg
->weight
,
1323 iocg
->last_inuse
?: iocg
->weight
, true, now
);
1325 TRACE_IOCG_PATH(iocg_activate
, iocg
, now
,
1326 last_period
, cur_period
, vtime
);
1328 iocg
->activated_at
= now
->now
;
1330 if (ioc
->running
== IOC_IDLE
) {
1331 ioc
->running
= IOC_RUNNING
;
1332 ioc
->dfgv_period_at
= now
->now
;
1333 ioc
->dfgv_period_rem
= 0;
1334 ioc_start_period(ioc
, now
);
1338 spin_unlock_irq(&ioc
->lock
);
1342 spin_unlock_irq(&ioc
->lock
);
1346 static bool iocg_kick_delay(struct ioc_gq
*iocg
, struct ioc_now
*now
)
1348 struct ioc
*ioc
= iocg
->ioc
;
1349 struct blkcg_gq
*blkg
= iocg_to_blkg(iocg
);
1350 u64 tdelta
, delay
, new_delay
, shift
;
1351 s64 vover
, vover_pct
;
1354 lockdep_assert_held(&iocg
->waitq
.lock
);
1357 * If the delay is set by another CPU, we may be in the past. No need to
1358 * change anything if so. This avoids decay calculation underflow.
1360 if (time_before64(now
->now
, iocg
->delay_at
))
1363 /* calculate the current delay in effect - 1/2 every second */
1364 tdelta
= now
->now
- iocg
->delay_at
;
1365 shift
= div64_u64(tdelta
, USEC_PER_SEC
);
1366 if (iocg
->delay
&& shift
< BITS_PER_LONG
)
1367 delay
= iocg
->delay
>> shift
;
1371 /* calculate the new delay from the debt amount */
1372 current_hweight(iocg
, &hwa
, NULL
);
1373 vover
= atomic64_read(&iocg
->vtime
) +
1374 abs_cost_to_cost(iocg
->abs_vdebt
, hwa
) - now
->vnow
;
1375 vover_pct
= div64_s64(100 * vover
,
1376 ioc
->period_us
* ioc
->vtime_base_rate
);
1378 if (vover_pct
<= MIN_DELAY_THR_PCT
)
1380 else if (vover_pct
>= MAX_DELAY_THR_PCT
)
1381 new_delay
= MAX_DELAY
;
1383 new_delay
= MIN_DELAY
+
1384 div_u64((MAX_DELAY
- MIN_DELAY
) *
1385 (vover_pct
- MIN_DELAY_THR_PCT
),
1386 MAX_DELAY_THR_PCT
- MIN_DELAY_THR_PCT
);
1388 /* pick the higher one and apply */
1389 if (new_delay
> delay
) {
1390 iocg
->delay
= new_delay
;
1391 iocg
->delay_at
= now
->now
;
1395 if (delay
>= MIN_DELAY
) {
1396 if (!iocg
->indelay_since
)
1397 iocg
->indelay_since
= now
->now
;
1398 blkcg_set_delay(blkg
, delay
* NSEC_PER_USEC
);
1401 if (iocg
->indelay_since
) {
1402 iocg
->stat
.indelay_us
+= now
->now
- iocg
->indelay_since
;
1403 iocg
->indelay_since
= 0;
1406 blkcg_clear_delay(blkg
);
1411 static void iocg_incur_debt(struct ioc_gq
*iocg
, u64 abs_cost
,
1412 struct ioc_now
*now
)
1414 struct iocg_pcpu_stat
*gcs
;
1416 lockdep_assert_held(&iocg
->ioc
->lock
);
1417 lockdep_assert_held(&iocg
->waitq
.lock
);
1418 WARN_ON_ONCE(list_empty(&iocg
->active_list
));
1421 * Once in debt, debt handling owns inuse. @iocg stays at the minimum
1422 * inuse donating all of it share to others until its debt is paid off.
1424 if (!iocg
->abs_vdebt
&& abs_cost
) {
1425 iocg
->indebt_since
= now
->now
;
1426 propagate_weights(iocg
, iocg
->active
, 0, false, now
);
1429 iocg
->abs_vdebt
+= abs_cost
;
1431 gcs
= get_cpu_ptr(iocg
->pcpu_stat
);
1432 local64_add(abs_cost
, &gcs
->abs_vusage
);
1436 static void iocg_pay_debt(struct ioc_gq
*iocg
, u64 abs_vpay
,
1437 struct ioc_now
*now
)
1439 lockdep_assert_held(&iocg
->ioc
->lock
);
1440 lockdep_assert_held(&iocg
->waitq
.lock
);
1443 * make sure that nobody messed with @iocg. Check iocg->pd.online
1444 * to avoid warn when removing blkcg or disk.
1446 WARN_ON_ONCE(list_empty(&iocg
->active_list
) && iocg
->pd
.online
);
1447 WARN_ON_ONCE(iocg
->inuse
> 1);
1449 iocg
->abs_vdebt
-= min(abs_vpay
, iocg
->abs_vdebt
);
1451 /* if debt is paid in full, restore inuse */
1452 if (!iocg
->abs_vdebt
) {
1453 iocg
->stat
.indebt_us
+= now
->now
- iocg
->indebt_since
;
1454 iocg
->indebt_since
= 0;
1456 propagate_weights(iocg
, iocg
->active
, iocg
->last_inuse
,
1461 static int iocg_wake_fn(struct wait_queue_entry
*wq_entry
, unsigned mode
,
1462 int flags
, void *key
)
1464 struct iocg_wait
*wait
= container_of(wq_entry
, struct iocg_wait
, wait
);
1465 struct iocg_wake_ctx
*ctx
= key
;
1466 u64 cost
= abs_cost_to_cost(wait
->abs_cost
, ctx
->hw_inuse
);
1468 ctx
->vbudget
-= cost
;
1470 if (ctx
->vbudget
< 0)
1473 iocg_commit_bio(ctx
->iocg
, wait
->bio
, wait
->abs_cost
, cost
);
1474 wait
->committed
= true;
1477 * autoremove_wake_function() removes the wait entry only when it
1478 * actually changed the task state. We want the wait always removed.
1479 * Remove explicitly and use default_wake_function(). Note that the
1480 * order of operations is important as finish_wait() tests whether
1481 * @wq_entry is removed without grabbing the lock.
1483 default_wake_function(wq_entry
, mode
, flags
, key
);
1484 list_del_init_careful(&wq_entry
->entry
);
1489 * Calculate the accumulated budget, pay debt if @pay_debt and wake up waiters
1490 * accordingly. When @pay_debt is %true, the caller must be holding ioc->lock in
1491 * addition to iocg->waitq.lock.
1493 static void iocg_kick_waitq(struct ioc_gq
*iocg
, bool pay_debt
,
1494 struct ioc_now
*now
)
1496 struct ioc
*ioc
= iocg
->ioc
;
1497 struct iocg_wake_ctx ctx
= { .iocg
= iocg
};
1498 u64 vshortage
, expires
, oexpires
;
1502 lockdep_assert_held(&iocg
->waitq
.lock
);
1504 current_hweight(iocg
, &hwa
, NULL
);
1505 vbudget
= now
->vnow
- atomic64_read(&iocg
->vtime
);
1508 if (pay_debt
&& iocg
->abs_vdebt
&& vbudget
> 0) {
1509 u64 abs_vbudget
= cost_to_abs_cost(vbudget
, hwa
);
1510 u64 abs_vpay
= min_t(u64
, abs_vbudget
, iocg
->abs_vdebt
);
1511 u64 vpay
= abs_cost_to_cost(abs_vpay
, hwa
);
1513 lockdep_assert_held(&ioc
->lock
);
1515 atomic64_add(vpay
, &iocg
->vtime
);
1516 atomic64_add(vpay
, &iocg
->done_vtime
);
1517 iocg_pay_debt(iocg
, abs_vpay
, now
);
1521 if (iocg
->abs_vdebt
|| iocg
->delay
)
1522 iocg_kick_delay(iocg
, now
);
1525 * Debt can still be outstanding if we haven't paid all yet or the
1526 * caller raced and called without @pay_debt. Shouldn't wake up waiters
1527 * under debt. Make sure @vbudget reflects the outstanding amount and is
1530 if (iocg
->abs_vdebt
) {
1531 s64 vdebt
= abs_cost_to_cost(iocg
->abs_vdebt
, hwa
);
1532 vbudget
= min_t(s64
, 0, vbudget
- vdebt
);
1536 * Wake up the ones which are due and see how much vtime we'll need for
1537 * the next one. As paying off debt restores hw_inuse, it must be read
1538 * after the above debt payment.
1540 ctx
.vbudget
= vbudget
;
1541 current_hweight(iocg
, NULL
, &ctx
.hw_inuse
);
1543 __wake_up_locked_key(&iocg
->waitq
, TASK_NORMAL
, &ctx
);
1545 if (!waitqueue_active(&iocg
->waitq
)) {
1546 if (iocg
->wait_since
) {
1547 iocg
->stat
.wait_us
+= now
->now
- iocg
->wait_since
;
1548 iocg
->wait_since
= 0;
1553 if (!iocg
->wait_since
)
1554 iocg
->wait_since
= now
->now
;
1556 if (WARN_ON_ONCE(ctx
.vbudget
>= 0))
1559 /* determine next wakeup, add a timer margin to guarantee chunking */
1560 vshortage
= -ctx
.vbudget
;
1561 expires
= now
->now_ns
+
1562 DIV64_U64_ROUND_UP(vshortage
, ioc
->vtime_base_rate
) *
1564 expires
+= ioc
->timer_slack_ns
;
1566 /* if already active and close enough, don't bother */
1567 oexpires
= ktime_to_ns(hrtimer_get_softexpires(&iocg
->waitq_timer
));
1568 if (hrtimer_is_queued(&iocg
->waitq_timer
) &&
1569 abs(oexpires
- expires
) <= ioc
->timer_slack_ns
)
1572 hrtimer_start_range_ns(&iocg
->waitq_timer
, ns_to_ktime(expires
),
1573 ioc
->timer_slack_ns
, HRTIMER_MODE_ABS
);
1576 static enum hrtimer_restart
iocg_waitq_timer_fn(struct hrtimer
*timer
)
1578 struct ioc_gq
*iocg
= container_of(timer
, struct ioc_gq
, waitq_timer
);
1579 bool pay_debt
= READ_ONCE(iocg
->abs_vdebt
);
1581 unsigned long flags
;
1583 ioc_now(iocg
->ioc
, &now
);
1585 iocg_lock(iocg
, pay_debt
, &flags
);
1586 iocg_kick_waitq(iocg
, pay_debt
, &now
);
1587 iocg_unlock(iocg
, pay_debt
, &flags
);
1589 return HRTIMER_NORESTART
;
1592 static void ioc_lat_stat(struct ioc
*ioc
, u32
*missed_ppm_ar
, u32
*rq_wait_pct_p
)
1594 u32 nr_met
[2] = { };
1595 u32 nr_missed
[2] = { };
1599 for_each_online_cpu(cpu
) {
1600 struct ioc_pcpu_stat
*stat
= per_cpu_ptr(ioc
->pcpu_stat
, cpu
);
1601 u64 this_rq_wait_ns
;
1603 for (rw
= READ
; rw
<= WRITE
; rw
++) {
1604 u32 this_met
= local_read(&stat
->missed
[rw
].nr_met
);
1605 u32 this_missed
= local_read(&stat
->missed
[rw
].nr_missed
);
1607 nr_met
[rw
] += this_met
- stat
->missed
[rw
].last_met
;
1608 nr_missed
[rw
] += this_missed
- stat
->missed
[rw
].last_missed
;
1609 stat
->missed
[rw
].last_met
= this_met
;
1610 stat
->missed
[rw
].last_missed
= this_missed
;
1613 this_rq_wait_ns
= local64_read(&stat
->rq_wait_ns
);
1614 rq_wait_ns
+= this_rq_wait_ns
- stat
->last_rq_wait_ns
;
1615 stat
->last_rq_wait_ns
= this_rq_wait_ns
;
1618 for (rw
= READ
; rw
<= WRITE
; rw
++) {
1619 if (nr_met
[rw
] + nr_missed
[rw
])
1621 DIV64_U64_ROUND_UP((u64
)nr_missed
[rw
] * MILLION
,
1622 nr_met
[rw
] + nr_missed
[rw
]);
1624 missed_ppm_ar
[rw
] = 0;
1627 *rq_wait_pct_p
= div64_u64(rq_wait_ns
* 100,
1628 ioc
->period_us
* NSEC_PER_USEC
);
1631 /* was iocg idle this period? */
1632 static bool iocg_is_idle(struct ioc_gq
*iocg
)
1634 struct ioc
*ioc
= iocg
->ioc
;
1636 /* did something get issued this period? */
1637 if (atomic64_read(&iocg
->active_period
) ==
1638 atomic64_read(&ioc
->cur_period
))
1641 /* is something in flight? */
1642 if (atomic64_read(&iocg
->done_vtime
) != atomic64_read(&iocg
->vtime
))
1649 * Call this function on the target leaf @iocg's to build pre-order traversal
1650 * list of all the ancestors in @inner_walk. The inner nodes are linked through
1651 * ->walk_list and the caller is responsible for dissolving the list after use.
1653 static void iocg_build_inner_walk(struct ioc_gq
*iocg
,
1654 struct list_head
*inner_walk
)
1658 WARN_ON_ONCE(!list_empty(&iocg
->walk_list
));
1660 /* find the first ancestor which hasn't been visited yet */
1661 for (lvl
= iocg
->level
- 1; lvl
>= 0; lvl
--) {
1662 if (!list_empty(&iocg
->ancestors
[lvl
]->walk_list
))
1666 /* walk down and visit the inner nodes to get pre-order traversal */
1667 while (++lvl
<= iocg
->level
- 1) {
1668 struct ioc_gq
*inner
= iocg
->ancestors
[lvl
];
1670 /* record traversal order */
1671 list_add_tail(&inner
->walk_list
, inner_walk
);
1675 /* propagate the deltas to the parent */
1676 static void iocg_flush_stat_upward(struct ioc_gq
*iocg
)
1678 if (iocg
->level
> 0) {
1679 struct iocg_stat
*parent_stat
=
1680 &iocg
->ancestors
[iocg
->level
- 1]->stat
;
1682 parent_stat
->usage_us
+=
1683 iocg
->stat
.usage_us
- iocg
->last_stat
.usage_us
;
1684 parent_stat
->wait_us
+=
1685 iocg
->stat
.wait_us
- iocg
->last_stat
.wait_us
;
1686 parent_stat
->indebt_us
+=
1687 iocg
->stat
.indebt_us
- iocg
->last_stat
.indebt_us
;
1688 parent_stat
->indelay_us
+=
1689 iocg
->stat
.indelay_us
- iocg
->last_stat
.indelay_us
;
1692 iocg
->last_stat
= iocg
->stat
;
1695 /* collect per-cpu counters and propagate the deltas to the parent */
1696 static void iocg_flush_stat_leaf(struct ioc_gq
*iocg
, struct ioc_now
*now
)
1698 struct ioc
*ioc
= iocg
->ioc
;
1703 lockdep_assert_held(&iocg
->ioc
->lock
);
1705 /* collect per-cpu counters */
1706 for_each_possible_cpu(cpu
) {
1707 abs_vusage
+= local64_read(
1708 per_cpu_ptr(&iocg
->pcpu_stat
->abs_vusage
, cpu
));
1710 vusage_delta
= abs_vusage
- iocg
->last_stat_abs_vusage
;
1711 iocg
->last_stat_abs_vusage
= abs_vusage
;
1713 iocg
->usage_delta_us
= div64_u64(vusage_delta
, ioc
->vtime_base_rate
);
1714 iocg
->stat
.usage_us
+= iocg
->usage_delta_us
;
1716 iocg_flush_stat_upward(iocg
);
1719 /* get stat counters ready for reading on all active iocgs */
1720 static void iocg_flush_stat(struct list_head
*target_iocgs
, struct ioc_now
*now
)
1722 LIST_HEAD(inner_walk
);
1723 struct ioc_gq
*iocg
, *tiocg
;
1725 /* flush leaves and build inner node walk list */
1726 list_for_each_entry(iocg
, target_iocgs
, active_list
) {
1727 iocg_flush_stat_leaf(iocg
, now
);
1728 iocg_build_inner_walk(iocg
, &inner_walk
);
1731 /* keep flushing upwards by walking the inner list backwards */
1732 list_for_each_entry_safe_reverse(iocg
, tiocg
, &inner_walk
, walk_list
) {
1733 iocg_flush_stat_upward(iocg
);
1734 list_del_init(&iocg
->walk_list
);
1739 * Determine what @iocg's hweight_inuse should be after donating unused
1740 * capacity. @hwm is the upper bound and used to signal no donation. This
1741 * function also throws away @iocg's excess budget.
1743 static u32
hweight_after_donation(struct ioc_gq
*iocg
, u32 old_hwi
, u32 hwm
,
1744 u32 usage
, struct ioc_now
*now
)
1746 struct ioc
*ioc
= iocg
->ioc
;
1747 u64 vtime
= atomic64_read(&iocg
->vtime
);
1748 s64 excess
, delta
, target
, new_hwi
;
1750 /* debt handling owns inuse for debtors */
1751 if (iocg
->abs_vdebt
)
1754 /* see whether minimum margin requirement is met */
1755 if (waitqueue_active(&iocg
->waitq
) ||
1756 time_after64(vtime
, now
->vnow
- ioc
->margins
.min
))
1759 /* throw away excess above target */
1760 excess
= now
->vnow
- vtime
- ioc
->margins
.target
;
1762 atomic64_add(excess
, &iocg
->vtime
);
1763 atomic64_add(excess
, &iocg
->done_vtime
);
1765 ioc
->vtime_err
-= div64_u64(excess
* old_hwi
, WEIGHT_ONE
);
1769 * Let's say the distance between iocg's and device's vtimes as a
1770 * fraction of period duration is delta. Assuming that the iocg will
1771 * consume the usage determined above, we want to determine new_hwi so
1772 * that delta equals MARGIN_TARGET at the end of the next period.
1774 * We need to execute usage worth of IOs while spending the sum of the
1775 * new budget (1 - MARGIN_TARGET) and the leftover from the last period
1778 * usage = (1 - MARGIN_TARGET + delta) * new_hwi
1780 * Therefore, the new_hwi is:
1782 * new_hwi = usage / (1 - MARGIN_TARGET + delta)
1784 delta
= div64_s64(WEIGHT_ONE
* (now
->vnow
- vtime
),
1785 now
->vnow
- ioc
->period_at_vtime
);
1786 target
= WEIGHT_ONE
* MARGIN_TARGET_PCT
/ 100;
1787 new_hwi
= div64_s64(WEIGHT_ONE
* usage
, WEIGHT_ONE
- target
+ delta
);
1789 return clamp_t(s64
, new_hwi
, 1, hwm
);
1793 * For work-conservation, an iocg which isn't using all of its share should
1794 * donate the leftover to other iocgs. There are two ways to achieve this - 1.
1795 * bumping up vrate accordingly 2. lowering the donating iocg's inuse weight.
1797 * #1 is mathematically simpler but has the drawback of requiring synchronous
1798 * global hweight_inuse updates when idle iocg's get activated or inuse weights
1799 * change due to donation snapbacks as it has the possibility of grossly
1800 * overshooting what's allowed by the model and vrate.
1802 * #2 is inherently safe with local operations. The donating iocg can easily
1803 * snap back to higher weights when needed without worrying about impacts on
1804 * other nodes as the impacts will be inherently correct. This also makes idle
1805 * iocg activations safe. The only effect activations have is decreasing
1806 * hweight_inuse of others, the right solution to which is for those iocgs to
1807 * snap back to higher weights.
1809 * So, we go with #2. The challenge is calculating how each donating iocg's
1810 * inuse should be adjusted to achieve the target donation amounts. This is done
1811 * using Andy's method described in the following pdf.
1813 * https://drive.google.com/file/d/1PsJwxPFtjUnwOY1QJ5AeICCcsL7BM3bo
1815 * Given the weights and target after-donation hweight_inuse values, Andy's
1816 * method determines how the proportional distribution should look like at each
1817 * sibling level to maintain the relative relationship between all non-donating
1818 * pairs. To roughly summarize, it divides the tree into donating and
1819 * non-donating parts, calculates global donation rate which is used to
1820 * determine the target hweight_inuse for each node, and then derives per-level
1823 * The following pdf shows that global distribution calculated this way can be
1824 * achieved by scaling inuse weights of donating leaves and propagating the
1825 * adjustments upwards proportionally.
1827 * https://drive.google.com/file/d/1vONz1-fzVO7oY5DXXsLjSxEtYYQbOvsE
1829 * Combining the above two, we can determine how each leaf iocg's inuse should
1830 * be adjusted to achieve the target donation.
1832 * https://drive.google.com/file/d/1WcrltBOSPN0qXVdBgnKm4mdp9FhuEFQN
1834 * The inline comments use symbols from the last pdf.
1836 * b is the sum of the absolute budgets in the subtree. 1 for the root node.
1837 * f is the sum of the absolute budgets of non-donating nodes in the subtree.
1838 * t is the sum of the absolute budgets of donating nodes in the subtree.
1839 * w is the weight of the node. w = w_f + w_t
1840 * w_f is the non-donating portion of w. w_f = w * f / b
1841 * w_b is the donating portion of w. w_t = w * t / b
1842 * s is the sum of all sibling weights. s = Sum(w) for siblings
1843 * s_f and s_t are the non-donating and donating portions of s.
1845 * Subscript p denotes the parent's counterpart and ' the adjusted value - e.g.
1846 * w_pt is the donating portion of the parent's weight and w'_pt the same value
1847 * after adjustments. Subscript r denotes the root node's values.
1849 static void transfer_surpluses(struct list_head
*surpluses
, struct ioc_now
*now
)
1851 LIST_HEAD(over_hwa
);
1852 LIST_HEAD(inner_walk
);
1853 struct ioc_gq
*iocg
, *tiocg
, *root_iocg
;
1854 u32 after_sum
, over_sum
, over_target
, gamma
;
1857 * It's pretty unlikely but possible for the total sum of
1858 * hweight_after_donation's to be higher than WEIGHT_ONE, which will
1859 * confuse the following calculations. If such condition is detected,
1860 * scale down everyone over its full share equally to keep the sum below
1865 list_for_each_entry(iocg
, surpluses
, surplus_list
) {
1868 current_hweight(iocg
, &hwa
, NULL
);
1869 after_sum
+= iocg
->hweight_after_donation
;
1871 if (iocg
->hweight_after_donation
> hwa
) {
1872 over_sum
+= iocg
->hweight_after_donation
;
1873 list_add(&iocg
->walk_list
, &over_hwa
);
1877 if (after_sum
>= WEIGHT_ONE
) {
1879 * The delta should be deducted from the over_sum, calculate
1880 * target over_sum value.
1882 u32 over_delta
= after_sum
- (WEIGHT_ONE
- 1);
1883 WARN_ON_ONCE(over_sum
<= over_delta
);
1884 over_target
= over_sum
- over_delta
;
1889 list_for_each_entry_safe(iocg
, tiocg
, &over_hwa
, walk_list
) {
1891 iocg
->hweight_after_donation
=
1892 div_u64((u64
)iocg
->hweight_after_donation
*
1893 over_target
, over_sum
);
1894 list_del_init(&iocg
->walk_list
);
1898 * Build pre-order inner node walk list and prepare for donation
1899 * adjustment calculations.
1901 list_for_each_entry(iocg
, surpluses
, surplus_list
) {
1902 iocg_build_inner_walk(iocg
, &inner_walk
);
1905 root_iocg
= list_first_entry(&inner_walk
, struct ioc_gq
, walk_list
);
1906 WARN_ON_ONCE(root_iocg
->level
> 0);
1908 list_for_each_entry(iocg
, &inner_walk
, walk_list
) {
1909 iocg
->child_adjusted_sum
= 0;
1910 iocg
->hweight_donating
= 0;
1911 iocg
->hweight_after_donation
= 0;
1915 * Propagate the donating budget (b_t) and after donation budget (b'_t)
1918 list_for_each_entry(iocg
, surpluses
, surplus_list
) {
1919 struct ioc_gq
*parent
= iocg
->ancestors
[iocg
->level
- 1];
1921 parent
->hweight_donating
+= iocg
->hweight_donating
;
1922 parent
->hweight_after_donation
+= iocg
->hweight_after_donation
;
1925 list_for_each_entry_reverse(iocg
, &inner_walk
, walk_list
) {
1926 if (iocg
->level
> 0) {
1927 struct ioc_gq
*parent
= iocg
->ancestors
[iocg
->level
- 1];
1929 parent
->hweight_donating
+= iocg
->hweight_donating
;
1930 parent
->hweight_after_donation
+= iocg
->hweight_after_donation
;
1935 * Calculate inner hwa's (b) and make sure the donation values are
1936 * within the accepted ranges as we're doing low res calculations with
1939 list_for_each_entry(iocg
, &inner_walk
, walk_list
) {
1941 struct ioc_gq
*parent
= iocg
->ancestors
[iocg
->level
- 1];
1943 iocg
->hweight_active
= DIV64_U64_ROUND_UP(
1944 (u64
)parent
->hweight_active
* iocg
->active
,
1945 parent
->child_active_sum
);
1949 iocg
->hweight_donating
= min(iocg
->hweight_donating
,
1950 iocg
->hweight_active
);
1951 iocg
->hweight_after_donation
= min(iocg
->hweight_after_donation
,
1952 iocg
->hweight_donating
- 1);
1953 if (WARN_ON_ONCE(iocg
->hweight_active
<= 1 ||
1954 iocg
->hweight_donating
<= 1 ||
1955 iocg
->hweight_after_donation
== 0)) {
1956 pr_warn("iocg: invalid donation weights in ");
1957 pr_cont_cgroup_path(iocg_to_blkg(iocg
)->blkcg
->css
.cgroup
);
1958 pr_cont(": active=%u donating=%u after=%u\n",
1959 iocg
->hweight_active
, iocg
->hweight_donating
,
1960 iocg
->hweight_after_donation
);
1965 * Calculate the global donation rate (gamma) - the rate to adjust
1966 * non-donating budgets by.
1968 * No need to use 64bit multiplication here as the first operand is
1969 * guaranteed to be smaller than WEIGHT_ONE (1<<16).
1971 * We know that there are beneficiary nodes and the sum of the donating
1972 * hweights can't be whole; however, due to the round-ups during hweight
1973 * calculations, root_iocg->hweight_donating might still end up equal to
1974 * or greater than whole. Limit the range when calculating the divider.
1976 * gamma = (1 - t_r') / (1 - t_r)
1978 gamma
= DIV_ROUND_UP(
1979 (WEIGHT_ONE
- root_iocg
->hweight_after_donation
) * WEIGHT_ONE
,
1980 WEIGHT_ONE
- min_t(u32
, root_iocg
->hweight_donating
, WEIGHT_ONE
- 1));
1983 * Calculate adjusted hwi, child_adjusted_sum and inuse for the inner
1986 list_for_each_entry(iocg
, &inner_walk
, walk_list
) {
1987 struct ioc_gq
*parent
;
1988 u32 inuse
, wpt
, wptp
;
1991 if (iocg
->level
== 0) {
1992 /* adjusted weight sum for 1st level: s' = s * b_pf / b'_pf */
1993 iocg
->child_adjusted_sum
= DIV64_U64_ROUND_UP(
1994 iocg
->child_active_sum
* (WEIGHT_ONE
- iocg
->hweight_donating
),
1995 WEIGHT_ONE
- iocg
->hweight_after_donation
);
1999 parent
= iocg
->ancestors
[iocg
->level
- 1];
2001 /* b' = gamma * b_f + b_t' */
2002 iocg
->hweight_inuse
= DIV64_U64_ROUND_UP(
2003 (u64
)gamma
* (iocg
->hweight_active
- iocg
->hweight_donating
),
2004 WEIGHT_ONE
) + iocg
->hweight_after_donation
;
2006 /* w' = s' * b' / b'_p */
2007 inuse
= DIV64_U64_ROUND_UP(
2008 (u64
)parent
->child_adjusted_sum
* iocg
->hweight_inuse
,
2009 parent
->hweight_inuse
);
2011 /* adjusted weight sum for children: s' = s_f + s_t * w'_pt / w_pt */
2012 st
= DIV64_U64_ROUND_UP(
2013 iocg
->child_active_sum
* iocg
->hweight_donating
,
2014 iocg
->hweight_active
);
2015 sf
= iocg
->child_active_sum
- st
;
2016 wpt
= DIV64_U64_ROUND_UP(
2017 (u64
)iocg
->active
* iocg
->hweight_donating
,
2018 iocg
->hweight_active
);
2019 wptp
= DIV64_U64_ROUND_UP(
2020 (u64
)inuse
* iocg
->hweight_after_donation
,
2021 iocg
->hweight_inuse
);
2023 iocg
->child_adjusted_sum
= sf
+ DIV64_U64_ROUND_UP(st
* wptp
, wpt
);
2027 * All inner nodes now have ->hweight_inuse and ->child_adjusted_sum and
2028 * we can finally determine leaf adjustments.
2030 list_for_each_entry(iocg
, surpluses
, surplus_list
) {
2031 struct ioc_gq
*parent
= iocg
->ancestors
[iocg
->level
- 1];
2035 * In-debt iocgs participated in the donation calculation with
2036 * the minimum target hweight_inuse. Configuring inuse
2037 * accordingly would work fine but debt handling expects
2038 * @iocg->inuse stay at the minimum and we don't wanna
2041 if (iocg
->abs_vdebt
) {
2042 WARN_ON_ONCE(iocg
->inuse
> 1);
2046 /* w' = s' * b' / b'_p, note that b' == b'_t for donating leaves */
2047 inuse
= DIV64_U64_ROUND_UP(
2048 parent
->child_adjusted_sum
* iocg
->hweight_after_donation
,
2049 parent
->hweight_inuse
);
2051 TRACE_IOCG_PATH(inuse_transfer
, iocg
, now
,
2053 iocg
->hweight_inuse
,
2054 iocg
->hweight_after_donation
);
2056 __propagate_weights(iocg
, iocg
->active
, inuse
, true, now
);
2059 /* walk list should be dissolved after use */
2060 list_for_each_entry_safe(iocg
, tiocg
, &inner_walk
, walk_list
)
2061 list_del_init(&iocg
->walk_list
);
2065 * A low weight iocg can amass a large amount of debt, for example, when
2066 * anonymous memory gets reclaimed aggressively. If the system has a lot of
2067 * memory paired with a slow IO device, the debt can span multiple seconds or
2068 * more. If there are no other subsequent IO issuers, the in-debt iocg may end
2069 * up blocked paying its debt while the IO device is idle.
2071 * The following protects against such cases. If the device has been
2072 * sufficiently idle for a while, the debts are halved and delays are
2075 static void ioc_forgive_debts(struct ioc
*ioc
, u64 usage_us_sum
, int nr_debtors
,
2076 struct ioc_now
*now
)
2078 struct ioc_gq
*iocg
;
2079 u64 dur
, usage_pct
, nr_cycles
, nr_cycles_shift
;
2081 /* if no debtor, reset the cycle */
2083 ioc
->dfgv_period_at
= now
->now
;
2084 ioc
->dfgv_period_rem
= 0;
2085 ioc
->dfgv_usage_us_sum
= 0;
2090 * Debtors can pass through a lot of writes choking the device and we
2091 * don't want to be forgiving debts while the device is struggling from
2092 * write bursts. If we're missing latency targets, consider the device
2095 if (ioc
->busy_level
> 0)
2096 usage_us_sum
= max_t(u64
, usage_us_sum
, ioc
->period_us
);
2098 ioc
->dfgv_usage_us_sum
+= usage_us_sum
;
2099 if (time_before64(now
->now
, ioc
->dfgv_period_at
+ DFGV_PERIOD
))
2103 * At least DFGV_PERIOD has passed since the last period. Calculate the
2104 * average usage and reset the period counters.
2106 dur
= now
->now
- ioc
->dfgv_period_at
;
2107 usage_pct
= div64_u64(100 * ioc
->dfgv_usage_us_sum
, dur
);
2109 ioc
->dfgv_period_at
= now
->now
;
2110 ioc
->dfgv_usage_us_sum
= 0;
2112 /* if was too busy, reset everything */
2113 if (usage_pct
> DFGV_USAGE_PCT
) {
2114 ioc
->dfgv_period_rem
= 0;
2119 * Usage is lower than threshold. Let's forgive some debts. Debt
2120 * forgiveness runs off of the usual ioc timer but its period usually
2121 * doesn't match ioc's. Compensate the difference by performing the
2122 * reduction as many times as would fit in the duration since the last
2123 * run and carrying over the left-over duration in @ioc->dfgv_period_rem
2124 * - if ioc period is 75% of DFGV_PERIOD, one out of three consecutive
2125 * reductions is doubled.
2127 nr_cycles
= dur
+ ioc
->dfgv_period_rem
;
2128 ioc
->dfgv_period_rem
= do_div(nr_cycles
, DFGV_PERIOD
);
2130 list_for_each_entry(iocg
, &ioc
->active_iocgs
, active_list
) {
2131 u64 __maybe_unused old_debt
, __maybe_unused old_delay
;
2133 if (!iocg
->abs_vdebt
&& !iocg
->delay
)
2136 spin_lock(&iocg
->waitq
.lock
);
2138 old_debt
= iocg
->abs_vdebt
;
2139 old_delay
= iocg
->delay
;
2141 nr_cycles_shift
= min_t(u64
, nr_cycles
, BITS_PER_LONG
- 1);
2142 if (iocg
->abs_vdebt
)
2143 iocg
->abs_vdebt
= iocg
->abs_vdebt
>> nr_cycles_shift
?: 1;
2146 iocg
->delay
= iocg
->delay
>> nr_cycles_shift
?: 1;
2148 iocg_kick_waitq(iocg
, true, now
);
2150 TRACE_IOCG_PATH(iocg_forgive_debt
, iocg
, now
, usage_pct
,
2151 old_debt
, iocg
->abs_vdebt
,
2152 old_delay
, iocg
->delay
);
2154 spin_unlock(&iocg
->waitq
.lock
);
2159 * Check the active iocgs' state to avoid oversleeping and deactive
2162 * Since waiters determine the sleep durations based on the vrate
2163 * they saw at the time of sleep, if vrate has increased, some
2164 * waiters could be sleeping for too long. Wake up tardy waiters
2165 * which should have woken up in the last period and expire idle
2168 static int ioc_check_iocgs(struct ioc
*ioc
, struct ioc_now
*now
)
2171 struct ioc_gq
*iocg
, *tiocg
;
2173 list_for_each_entry_safe(iocg
, tiocg
, &ioc
->active_iocgs
, active_list
) {
2174 if (!waitqueue_active(&iocg
->waitq
) && !iocg
->abs_vdebt
&&
2175 !iocg
->delay
&& !iocg_is_idle(iocg
))
2178 spin_lock(&iocg
->waitq
.lock
);
2180 /* flush wait and indebt stat deltas */
2181 if (iocg
->wait_since
) {
2182 iocg
->stat
.wait_us
+= now
->now
- iocg
->wait_since
;
2183 iocg
->wait_since
= now
->now
;
2185 if (iocg
->indebt_since
) {
2186 iocg
->stat
.indebt_us
+=
2187 now
->now
- iocg
->indebt_since
;
2188 iocg
->indebt_since
= now
->now
;
2190 if (iocg
->indelay_since
) {
2191 iocg
->stat
.indelay_us
+=
2192 now
->now
- iocg
->indelay_since
;
2193 iocg
->indelay_since
= now
->now
;
2196 if (waitqueue_active(&iocg
->waitq
) || iocg
->abs_vdebt
||
2198 /* might be oversleeping vtime / hweight changes, kick */
2199 iocg_kick_waitq(iocg
, true, now
);
2200 if (iocg
->abs_vdebt
|| iocg
->delay
)
2202 } else if (iocg_is_idle(iocg
)) {
2203 /* no waiter and idle, deactivate */
2204 u64 vtime
= atomic64_read(&iocg
->vtime
);
2208 * @iocg has been inactive for a full duration and will
2209 * have a high budget. Account anything above target as
2210 * error and throw away. On reactivation, it'll start
2211 * with the target budget.
2213 excess
= now
->vnow
- vtime
- ioc
->margins
.target
;
2217 current_hweight(iocg
, NULL
, &old_hwi
);
2218 ioc
->vtime_err
-= div64_u64(excess
* old_hwi
,
2222 TRACE_IOCG_PATH(iocg_idle
, iocg
, now
,
2223 atomic64_read(&iocg
->active_period
),
2224 atomic64_read(&ioc
->cur_period
), vtime
);
2225 __propagate_weights(iocg
, 0, 0, false, now
);
2226 list_del_init(&iocg
->active_list
);
2229 spin_unlock(&iocg
->waitq
.lock
);
2232 commit_weights(ioc
);
2236 static void ioc_timer_fn(struct timer_list
*timer
)
2238 struct ioc
*ioc
= container_of(timer
, struct ioc
, timer
);
2239 struct ioc_gq
*iocg
, *tiocg
;
2241 LIST_HEAD(surpluses
);
2242 int nr_debtors
, nr_shortages
= 0, nr_lagging
= 0;
2243 u64 usage_us_sum
= 0;
2246 u32 missed_ppm
[2], rq_wait_pct
;
2248 int prev_busy_level
;
2250 /* how were the latencies during the period? */
2251 ioc_lat_stat(ioc
, missed_ppm
, &rq_wait_pct
);
2253 /* take care of active iocgs */
2254 spin_lock_irq(&ioc
->lock
);
2256 ppm_rthr
= MILLION
- ioc
->params
.qos
[QOS_RPPM
];
2257 ppm_wthr
= MILLION
- ioc
->params
.qos
[QOS_WPPM
];
2260 period_vtime
= now
.vnow
- ioc
->period_at_vtime
;
2261 if (WARN_ON_ONCE(!period_vtime
)) {
2262 spin_unlock_irq(&ioc
->lock
);
2266 nr_debtors
= ioc_check_iocgs(ioc
, &now
);
2269 * Wait and indebt stat are flushed above and the donation calculation
2270 * below needs updated usage stat. Let's bring stat up-to-date.
2272 iocg_flush_stat(&ioc
->active_iocgs
, &now
);
2274 /* calc usage and see whether some weights need to be moved around */
2275 list_for_each_entry(iocg
, &ioc
->active_iocgs
, active_list
) {
2276 u64 vdone
, vtime
, usage_us
;
2277 u32 hw_active
, hw_inuse
;
2280 * Collect unused and wind vtime closer to vnow to prevent
2281 * iocgs from accumulating a large amount of budget.
2283 vdone
= atomic64_read(&iocg
->done_vtime
);
2284 vtime
= atomic64_read(&iocg
->vtime
);
2285 current_hweight(iocg
, &hw_active
, &hw_inuse
);
2288 * Latency QoS detection doesn't account for IOs which are
2289 * in-flight for longer than a period. Detect them by
2290 * comparing vdone against period start. If lagging behind
2291 * IOs from past periods, don't increase vrate.
2293 if ((ppm_rthr
!= MILLION
|| ppm_wthr
!= MILLION
) &&
2294 !atomic_read(&iocg_to_blkg(iocg
)->use_delay
) &&
2295 time_after64(vtime
, vdone
) &&
2296 time_after64(vtime
, now
.vnow
-
2297 MAX_LAGGING_PERIODS
* period_vtime
) &&
2298 time_before64(vdone
, now
.vnow
- period_vtime
))
2302 * Determine absolute usage factoring in in-flight IOs to avoid
2303 * high-latency completions appearing as idle.
2305 usage_us
= iocg
->usage_delta_us
;
2306 usage_us_sum
+= usage_us
;
2308 /* see whether there's surplus vtime */
2309 WARN_ON_ONCE(!list_empty(&iocg
->surplus_list
));
2310 if (hw_inuse
< hw_active
||
2311 (!waitqueue_active(&iocg
->waitq
) &&
2312 time_before64(vtime
, now
.vnow
- ioc
->margins
.low
))) {
2313 u32 hwa
, old_hwi
, hwm
, new_hwi
, usage
;
2316 if (vdone
!= vtime
) {
2317 u64 inflight_us
= DIV64_U64_ROUND_UP(
2318 cost_to_abs_cost(vtime
- vdone
, hw_inuse
),
2319 ioc
->vtime_base_rate
);
2321 usage_us
= max(usage_us
, inflight_us
);
2324 /* convert to hweight based usage ratio */
2325 if (time_after64(iocg
->activated_at
, ioc
->period_at
))
2326 usage_dur
= max_t(u64
, now
.now
- iocg
->activated_at
, 1);
2328 usage_dur
= max_t(u64
, now
.now
- ioc
->period_at
, 1);
2330 usage
= clamp_t(u32
,
2331 DIV64_U64_ROUND_UP(usage_us
* WEIGHT_ONE
,
2336 * Already donating or accumulated enough to start.
2337 * Determine the donation amount.
2339 current_hweight(iocg
, &hwa
, &old_hwi
);
2340 hwm
= current_hweight_max(iocg
);
2341 new_hwi
= hweight_after_donation(iocg
, old_hwi
, hwm
,
2344 * Donation calculation assumes hweight_after_donation
2345 * to be positive, a condition that a donor w/ hwa < 2
2346 * can't meet. Don't bother with donation if hwa is
2347 * below 2. It's not gonna make a meaningful difference
2350 if (new_hwi
< hwm
&& hwa
>= 2) {
2351 iocg
->hweight_donating
= hwa
;
2352 iocg
->hweight_after_donation
= new_hwi
;
2353 list_add(&iocg
->surplus_list
, &surpluses
);
2354 } else if (!iocg
->abs_vdebt
) {
2356 * @iocg doesn't have enough to donate. Reset
2357 * its inuse to active.
2359 * Don't reset debtors as their inuse's are
2360 * owned by debt handling. This shouldn't affect
2361 * donation calculuation in any meaningful way
2362 * as @iocg doesn't have a meaningful amount of
2365 TRACE_IOCG_PATH(inuse_shortage
, iocg
, &now
,
2366 iocg
->inuse
, iocg
->active
,
2367 iocg
->hweight_inuse
, new_hwi
);
2369 __propagate_weights(iocg
, iocg
->active
,
2370 iocg
->active
, true, &now
);
2374 /* genuinely short on vtime */
2379 if (!list_empty(&surpluses
) && nr_shortages
)
2380 transfer_surpluses(&surpluses
, &now
);
2382 commit_weights(ioc
);
2384 /* surplus list should be dissolved after use */
2385 list_for_each_entry_safe(iocg
, tiocg
, &surpluses
, surplus_list
)
2386 list_del_init(&iocg
->surplus_list
);
2389 * If q is getting clogged or we're missing too much, we're issuing
2390 * too much IO and should lower vtime rate. If we're not missing
2391 * and experiencing shortages but not surpluses, we're too stingy
2392 * and should increase vtime rate.
2394 prev_busy_level
= ioc
->busy_level
;
2395 if (rq_wait_pct
> RQ_WAIT_BUSY_PCT
||
2396 missed_ppm
[READ
] > ppm_rthr
||
2397 missed_ppm
[WRITE
] > ppm_wthr
) {
2398 /* clearly missing QoS targets, slow down vrate */
2399 ioc
->busy_level
= max(ioc
->busy_level
, 0);
2401 } else if (rq_wait_pct
<= RQ_WAIT_BUSY_PCT
* UNBUSY_THR_PCT
/ 100 &&
2402 missed_ppm
[READ
] <= ppm_rthr
* UNBUSY_THR_PCT
/ 100 &&
2403 missed_ppm
[WRITE
] <= ppm_wthr
* UNBUSY_THR_PCT
/ 100) {
2404 /* QoS targets are being met with >25% margin */
2407 * We're throttling while the device has spare
2408 * capacity. If vrate was being slowed down, stop.
2410 ioc
->busy_level
= min(ioc
->busy_level
, 0);
2413 * If there are IOs spanning multiple periods, wait
2414 * them out before pushing the device harder.
2420 * Nobody is being throttled and the users aren't
2421 * issuing enough IOs to saturate the device. We
2422 * simply don't know how close the device is to
2423 * saturation. Coast.
2425 ioc
->busy_level
= 0;
2428 /* inside the hysterisis margin, we're good */
2429 ioc
->busy_level
= 0;
2432 ioc
->busy_level
= clamp(ioc
->busy_level
, -1000, 1000);
2434 ioc_adjust_base_vrate(ioc
, rq_wait_pct
, nr_lagging
, nr_shortages
,
2435 prev_busy_level
, missed_ppm
);
2437 ioc_refresh_params(ioc
, false);
2439 ioc_forgive_debts(ioc
, usage_us_sum
, nr_debtors
, &now
);
2442 * This period is done. Move onto the next one. If nothing's
2443 * going on with the device, stop the timer.
2445 atomic64_inc(&ioc
->cur_period
);
2447 if (ioc
->running
!= IOC_STOP
) {
2448 if (!list_empty(&ioc
->active_iocgs
)) {
2449 ioc_start_period(ioc
, &now
);
2451 ioc
->busy_level
= 0;
2453 ioc
->running
= IOC_IDLE
;
2456 ioc_refresh_vrate(ioc
, &now
);
2459 spin_unlock_irq(&ioc
->lock
);
2462 static u64
adjust_inuse_and_calc_cost(struct ioc_gq
*iocg
, u64 vtime
,
2463 u64 abs_cost
, struct ioc_now
*now
)
2465 struct ioc
*ioc
= iocg
->ioc
;
2466 struct ioc_margins
*margins
= &ioc
->margins
;
2467 u32 __maybe_unused old_inuse
= iocg
->inuse
, __maybe_unused old_hwi
;
2470 u64 cost
, new_inuse
;
2471 unsigned long flags
;
2473 current_hweight(iocg
, NULL
, &hwi
);
2475 cost
= abs_cost_to_cost(abs_cost
, hwi
);
2476 margin
= now
->vnow
- vtime
- cost
;
2478 /* debt handling owns inuse for debtors */
2479 if (iocg
->abs_vdebt
)
2483 * We only increase inuse during period and do so if the margin has
2484 * deteriorated since the previous adjustment.
2486 if (margin
>= iocg
->saved_margin
|| margin
>= margins
->low
||
2487 iocg
->inuse
== iocg
->active
)
2490 spin_lock_irqsave(&ioc
->lock
, flags
);
2492 /* we own inuse only when @iocg is in the normal active state */
2493 if (iocg
->abs_vdebt
|| list_empty(&iocg
->active_list
)) {
2494 spin_unlock_irqrestore(&ioc
->lock
, flags
);
2499 * Bump up inuse till @abs_cost fits in the existing budget.
2500 * adj_step must be determined after acquiring ioc->lock - we might
2501 * have raced and lost to another thread for activation and could
2502 * be reading 0 iocg->active before ioc->lock which will lead to
2505 new_inuse
= iocg
->inuse
;
2506 adj_step
= DIV_ROUND_UP(iocg
->active
* INUSE_ADJ_STEP_PCT
, 100);
2508 new_inuse
= new_inuse
+ adj_step
;
2509 propagate_weights(iocg
, iocg
->active
, new_inuse
, true, now
);
2510 current_hweight(iocg
, NULL
, &hwi
);
2511 cost
= abs_cost_to_cost(abs_cost
, hwi
);
2512 } while (time_after64(vtime
+ cost
, now
->vnow
) &&
2513 iocg
->inuse
!= iocg
->active
);
2515 spin_unlock_irqrestore(&ioc
->lock
, flags
);
2517 TRACE_IOCG_PATH(inuse_adjust
, iocg
, now
,
2518 old_inuse
, iocg
->inuse
, old_hwi
, hwi
);
2523 static void calc_vtime_cost_builtin(struct bio
*bio
, struct ioc_gq
*iocg
,
2524 bool is_merge
, u64
*costp
)
2526 struct ioc
*ioc
= iocg
->ioc
;
2527 u64 coef_seqio
, coef_randio
, coef_page
;
2528 u64 pages
= max_t(u64
, bio_sectors(bio
) >> IOC_SECT_TO_PAGE_SHIFT
, 1);
2532 /* Can't calculate cost for empty bio */
2533 if (!bio
->bi_iter
.bi_size
)
2536 switch (bio_op(bio
)) {
2538 coef_seqio
= ioc
->params
.lcoefs
[LCOEF_RSEQIO
];
2539 coef_randio
= ioc
->params
.lcoefs
[LCOEF_RRANDIO
];
2540 coef_page
= ioc
->params
.lcoefs
[LCOEF_RPAGE
];
2543 coef_seqio
= ioc
->params
.lcoefs
[LCOEF_WSEQIO
];
2544 coef_randio
= ioc
->params
.lcoefs
[LCOEF_WRANDIO
];
2545 coef_page
= ioc
->params
.lcoefs
[LCOEF_WPAGE
];
2552 seek_pages
= abs(bio
->bi_iter
.bi_sector
- iocg
->cursor
);
2553 seek_pages
>>= IOC_SECT_TO_PAGE_SHIFT
;
2557 if (seek_pages
> LCOEF_RANDIO_PAGES
) {
2558 cost
+= coef_randio
;
2563 cost
+= pages
* coef_page
;
2568 static u64
calc_vtime_cost(struct bio
*bio
, struct ioc_gq
*iocg
, bool is_merge
)
2572 calc_vtime_cost_builtin(bio
, iocg
, is_merge
, &cost
);
2576 static void calc_size_vtime_cost_builtin(struct request
*rq
, struct ioc
*ioc
,
2579 unsigned int pages
= blk_rq_stats_sectors(rq
) >> IOC_SECT_TO_PAGE_SHIFT
;
2581 switch (req_op(rq
)) {
2583 *costp
= pages
* ioc
->params
.lcoefs
[LCOEF_RPAGE
];
2586 *costp
= pages
* ioc
->params
.lcoefs
[LCOEF_WPAGE
];
2593 static u64
calc_size_vtime_cost(struct request
*rq
, struct ioc
*ioc
)
2597 calc_size_vtime_cost_builtin(rq
, ioc
, &cost
);
2601 static void ioc_rqos_throttle(struct rq_qos
*rqos
, struct bio
*bio
)
2603 struct blkcg_gq
*blkg
= bio
->bi_blkg
;
2604 struct ioc
*ioc
= rqos_to_ioc(rqos
);
2605 struct ioc_gq
*iocg
= blkg_to_iocg(blkg
);
2607 struct iocg_wait wait
;
2608 u64 abs_cost
, cost
, vtime
;
2609 bool use_debt
, ioc_locked
;
2610 unsigned long flags
;
2612 /* bypass IOs if disabled, still initializing, or for root cgroup */
2613 if (!ioc
->enabled
|| !iocg
|| !iocg
->level
)
2616 /* calculate the absolute vtime cost */
2617 abs_cost
= calc_vtime_cost(bio
, iocg
, false);
2621 if (!iocg_activate(iocg
, &now
))
2624 iocg
->cursor
= bio_end_sector(bio
);
2625 vtime
= atomic64_read(&iocg
->vtime
);
2626 cost
= adjust_inuse_and_calc_cost(iocg
, vtime
, abs_cost
, &now
);
2629 * If no one's waiting and within budget, issue right away. The
2630 * tests are racy but the races aren't systemic - we only miss once
2631 * in a while which is fine.
2633 if (!waitqueue_active(&iocg
->waitq
) && !iocg
->abs_vdebt
&&
2634 time_before_eq64(vtime
+ cost
, now
.vnow
)) {
2635 iocg_commit_bio(iocg
, bio
, abs_cost
, cost
);
2640 * We're over budget. This can be handled in two ways. IOs which may
2641 * cause priority inversions are punted to @ioc->aux_iocg and charged as
2642 * debt. Otherwise, the issuer is blocked on @iocg->waitq. Debt handling
2643 * requires @ioc->lock, waitq handling @iocg->waitq.lock. Determine
2644 * whether debt handling is needed and acquire locks accordingly.
2646 use_debt
= bio_issue_as_root_blkg(bio
) || fatal_signal_pending(current
);
2647 ioc_locked
= use_debt
|| READ_ONCE(iocg
->abs_vdebt
);
2649 iocg_lock(iocg
, ioc_locked
, &flags
);
2652 * @iocg must stay activated for debt and waitq handling. Deactivation
2653 * is synchronized against both ioc->lock and waitq.lock and we won't
2654 * get deactivated as long as we're waiting or has debt, so we're good
2655 * if we're activated here. In the unlikely cases that we aren't, just
2658 if (unlikely(list_empty(&iocg
->active_list
))) {
2659 iocg_unlock(iocg
, ioc_locked
, &flags
);
2660 iocg_commit_bio(iocg
, bio
, abs_cost
, cost
);
2665 * We're over budget. If @bio has to be issued regardless, remember
2666 * the abs_cost instead of advancing vtime. iocg_kick_waitq() will pay
2667 * off the debt before waking more IOs.
2669 * This way, the debt is continuously paid off each period with the
2670 * actual budget available to the cgroup. If we just wound vtime, we
2671 * would incorrectly use the current hw_inuse for the entire amount
2672 * which, for example, can lead to the cgroup staying blocked for a
2673 * long time even with substantially raised hw_inuse.
2675 * An iocg with vdebt should stay online so that the timer can keep
2676 * deducting its vdebt and [de]activate use_delay mechanism
2677 * accordingly. We don't want to race against the timer trying to
2678 * clear them and leave @iocg inactive w/ dangling use_delay heavily
2679 * penalizing the cgroup and its descendants.
2682 iocg_incur_debt(iocg
, abs_cost
, &now
);
2683 if (iocg_kick_delay(iocg
, &now
))
2684 blkcg_schedule_throttle(rqos
->disk
,
2685 (bio
->bi_opf
& REQ_SWAP
) == REQ_SWAP
);
2686 iocg_unlock(iocg
, ioc_locked
, &flags
);
2690 /* guarantee that iocgs w/ waiters have maximum inuse */
2691 if (!iocg
->abs_vdebt
&& iocg
->inuse
!= iocg
->active
) {
2693 iocg_unlock(iocg
, false, &flags
);
2697 propagate_weights(iocg
, iocg
->active
, iocg
->active
, true,
2702 * Append self to the waitq and schedule the wakeup timer if we're
2703 * the first waiter. The timer duration is calculated based on the
2704 * current vrate. vtime and hweight changes can make it too short
2705 * or too long. Each wait entry records the absolute cost it's
2706 * waiting for to allow re-evaluation using a custom wait entry.
2708 * If too short, the timer simply reschedules itself. If too long,
2709 * the period timer will notice and trigger wakeups.
2711 * All waiters are on iocg->waitq and the wait states are
2712 * synchronized using waitq.lock.
2714 init_waitqueue_func_entry(&wait
.wait
, iocg_wake_fn
);
2715 wait
.wait
.private = current
;
2717 wait
.abs_cost
= abs_cost
;
2718 wait
.committed
= false; /* will be set true by waker */
2720 __add_wait_queue_entry_tail(&iocg
->waitq
, &wait
.wait
);
2721 iocg_kick_waitq(iocg
, ioc_locked
, &now
);
2723 iocg_unlock(iocg
, ioc_locked
, &flags
);
2726 set_current_state(TASK_UNINTERRUPTIBLE
);
2732 /* waker already committed us, proceed */
2733 finish_wait(&iocg
->waitq
, &wait
.wait
);
2736 static void ioc_rqos_merge(struct rq_qos
*rqos
, struct request
*rq
,
2739 struct ioc_gq
*iocg
= blkg_to_iocg(bio
->bi_blkg
);
2740 struct ioc
*ioc
= rqos_to_ioc(rqos
);
2741 sector_t bio_end
= bio_end_sector(bio
);
2743 u64 vtime
, abs_cost
, cost
;
2744 unsigned long flags
;
2746 /* bypass if disabled, still initializing, or for root cgroup */
2747 if (!ioc
->enabled
|| !iocg
|| !iocg
->level
)
2750 abs_cost
= calc_vtime_cost(bio
, iocg
, true);
2756 vtime
= atomic64_read(&iocg
->vtime
);
2757 cost
= adjust_inuse_and_calc_cost(iocg
, vtime
, abs_cost
, &now
);
2759 /* update cursor if backmerging into the request at the cursor */
2760 if (blk_rq_pos(rq
) < bio_end
&&
2761 blk_rq_pos(rq
) + blk_rq_sectors(rq
) == iocg
->cursor
)
2762 iocg
->cursor
= bio_end
;
2765 * Charge if there's enough vtime budget and the existing request has
2768 if (rq
->bio
&& rq
->bio
->bi_iocost_cost
&&
2769 time_before_eq64(atomic64_read(&iocg
->vtime
) + cost
, now
.vnow
)) {
2770 iocg_commit_bio(iocg
, bio
, abs_cost
, cost
);
2775 * Otherwise, account it as debt if @iocg is online, which it should
2776 * be for the vast majority of cases. See debt handling in
2777 * ioc_rqos_throttle() for details.
2779 spin_lock_irqsave(&ioc
->lock
, flags
);
2780 spin_lock(&iocg
->waitq
.lock
);
2782 if (likely(!list_empty(&iocg
->active_list
))) {
2783 iocg_incur_debt(iocg
, abs_cost
, &now
);
2784 if (iocg_kick_delay(iocg
, &now
))
2785 blkcg_schedule_throttle(rqos
->disk
,
2786 (bio
->bi_opf
& REQ_SWAP
) == REQ_SWAP
);
2788 iocg_commit_bio(iocg
, bio
, abs_cost
, cost
);
2791 spin_unlock(&iocg
->waitq
.lock
);
2792 spin_unlock_irqrestore(&ioc
->lock
, flags
);
2795 static void ioc_rqos_done_bio(struct rq_qos
*rqos
, struct bio
*bio
)
2797 struct ioc_gq
*iocg
= blkg_to_iocg(bio
->bi_blkg
);
2799 if (iocg
&& bio
->bi_iocost_cost
)
2800 atomic64_add(bio
->bi_iocost_cost
, &iocg
->done_vtime
);
2803 static void ioc_rqos_done(struct rq_qos
*rqos
, struct request
*rq
)
2805 struct ioc
*ioc
= rqos_to_ioc(rqos
);
2806 struct ioc_pcpu_stat
*ccs
;
2807 u64 on_q_ns
, rq_wait_ns
, size_nsec
;
2810 if (!ioc
->enabled
|| !rq
->alloc_time_ns
|| !rq
->start_time_ns
)
2813 switch (req_op(rq
)) {
2826 on_q_ns
= blk_time_get_ns() - rq
->alloc_time_ns
;
2827 rq_wait_ns
= rq
->start_time_ns
- rq
->alloc_time_ns
;
2828 size_nsec
= div64_u64(calc_size_vtime_cost(rq
, ioc
), VTIME_PER_NSEC
);
2830 ccs
= get_cpu_ptr(ioc
->pcpu_stat
);
2832 if (on_q_ns
<= size_nsec
||
2833 on_q_ns
- size_nsec
<= ioc
->params
.qos
[pidx
] * NSEC_PER_USEC
)
2834 local_inc(&ccs
->missed
[rw
].nr_met
);
2836 local_inc(&ccs
->missed
[rw
].nr_missed
);
2838 local64_add(rq_wait_ns
, &ccs
->rq_wait_ns
);
2843 static void ioc_rqos_queue_depth_changed(struct rq_qos
*rqos
)
2845 struct ioc
*ioc
= rqos_to_ioc(rqos
);
2847 spin_lock_irq(&ioc
->lock
);
2848 ioc_refresh_params(ioc
, false);
2849 spin_unlock_irq(&ioc
->lock
);
2852 static void ioc_rqos_exit(struct rq_qos
*rqos
)
2854 struct ioc
*ioc
= rqos_to_ioc(rqos
);
2856 blkcg_deactivate_policy(rqos
->disk
, &blkcg_policy_iocost
);
2858 spin_lock_irq(&ioc
->lock
);
2859 ioc
->running
= IOC_STOP
;
2860 spin_unlock_irq(&ioc
->lock
);
2862 timer_shutdown_sync(&ioc
->timer
);
2863 free_percpu(ioc
->pcpu_stat
);
2867 static const struct rq_qos_ops ioc_rqos_ops
= {
2868 .throttle
= ioc_rqos_throttle
,
2869 .merge
= ioc_rqos_merge
,
2870 .done_bio
= ioc_rqos_done_bio
,
2871 .done
= ioc_rqos_done
,
2872 .queue_depth_changed
= ioc_rqos_queue_depth_changed
,
2873 .exit
= ioc_rqos_exit
,
2876 static int blk_iocost_init(struct gendisk
*disk
)
2881 ioc
= kzalloc(sizeof(*ioc
), GFP_KERNEL
);
2885 ioc
->pcpu_stat
= alloc_percpu(struct ioc_pcpu_stat
);
2886 if (!ioc
->pcpu_stat
) {
2891 for_each_possible_cpu(cpu
) {
2892 struct ioc_pcpu_stat
*ccs
= per_cpu_ptr(ioc
->pcpu_stat
, cpu
);
2894 for (i
= 0; i
< ARRAY_SIZE(ccs
->missed
); i
++) {
2895 local_set(&ccs
->missed
[i
].nr_met
, 0);
2896 local_set(&ccs
->missed
[i
].nr_missed
, 0);
2898 local64_set(&ccs
->rq_wait_ns
, 0);
2901 spin_lock_init(&ioc
->lock
);
2902 timer_setup(&ioc
->timer
, ioc_timer_fn
, 0);
2903 INIT_LIST_HEAD(&ioc
->active_iocgs
);
2905 ioc
->running
= IOC_IDLE
;
2906 ioc
->vtime_base_rate
= VTIME_PER_USEC
;
2907 atomic64_set(&ioc
->vtime_rate
, VTIME_PER_USEC
);
2908 seqcount_spinlock_init(&ioc
->period_seqcount
, &ioc
->lock
);
2909 ioc
->period_at
= ktime_to_us(blk_time_get());
2910 atomic64_set(&ioc
->cur_period
, 0);
2911 atomic_set(&ioc
->hweight_gen
, 0);
2913 spin_lock_irq(&ioc
->lock
);
2914 ioc
->autop_idx
= AUTOP_INVALID
;
2915 ioc_refresh_params_disk(ioc
, true, disk
);
2916 spin_unlock_irq(&ioc
->lock
);
2919 * rqos must be added before activation to allow ioc_pd_init() to
2920 * lookup the ioc from q. This means that the rqos methods may get
2921 * called before policy activation completion, can't assume that the
2922 * target bio has an iocg associated and need to test for NULL iocg.
2924 ret
= rq_qos_add(&ioc
->rqos
, disk
, RQ_QOS_COST
, &ioc_rqos_ops
);
2928 ret
= blkcg_activate_policy(disk
, &blkcg_policy_iocost
);
2934 rq_qos_del(&ioc
->rqos
);
2936 free_percpu(ioc
->pcpu_stat
);
2941 static struct blkcg_policy_data
*ioc_cpd_alloc(gfp_t gfp
)
2943 struct ioc_cgrp
*iocc
;
2945 iocc
= kzalloc(sizeof(struct ioc_cgrp
), gfp
);
2949 iocc
->dfl_weight
= CGROUP_WEIGHT_DFL
* WEIGHT_ONE
;
2953 static void ioc_cpd_free(struct blkcg_policy_data
*cpd
)
2955 kfree(container_of(cpd
, struct ioc_cgrp
, cpd
));
2958 static struct blkg_policy_data
*ioc_pd_alloc(struct gendisk
*disk
,
2959 struct blkcg
*blkcg
, gfp_t gfp
)
2961 int levels
= blkcg
->css
.cgroup
->level
+ 1;
2962 struct ioc_gq
*iocg
;
2964 iocg
= kzalloc_node(struct_size(iocg
, ancestors
, levels
), gfp
,
2969 iocg
->pcpu_stat
= alloc_percpu_gfp(struct iocg_pcpu_stat
, gfp
);
2970 if (!iocg
->pcpu_stat
) {
2978 static void ioc_pd_init(struct blkg_policy_data
*pd
)
2980 struct ioc_gq
*iocg
= pd_to_iocg(pd
);
2981 struct blkcg_gq
*blkg
= pd_to_blkg(&iocg
->pd
);
2982 struct ioc
*ioc
= q_to_ioc(blkg
->q
);
2984 struct blkcg_gq
*tblkg
;
2985 unsigned long flags
;
2990 atomic64_set(&iocg
->vtime
, now
.vnow
);
2991 atomic64_set(&iocg
->done_vtime
, now
.vnow
);
2992 atomic64_set(&iocg
->active_period
, atomic64_read(&ioc
->cur_period
));
2993 INIT_LIST_HEAD(&iocg
->active_list
);
2994 INIT_LIST_HEAD(&iocg
->walk_list
);
2995 INIT_LIST_HEAD(&iocg
->surplus_list
);
2996 iocg
->hweight_active
= WEIGHT_ONE
;
2997 iocg
->hweight_inuse
= WEIGHT_ONE
;
2999 init_waitqueue_head(&iocg
->waitq
);
3000 hrtimer_init(&iocg
->waitq_timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_ABS
);
3001 iocg
->waitq_timer
.function
= iocg_waitq_timer_fn
;
3003 iocg
->level
= blkg
->blkcg
->css
.cgroup
->level
;
3005 for (tblkg
= blkg
; tblkg
; tblkg
= tblkg
->parent
) {
3006 struct ioc_gq
*tiocg
= blkg_to_iocg(tblkg
);
3007 iocg
->ancestors
[tiocg
->level
] = tiocg
;
3010 spin_lock_irqsave(&ioc
->lock
, flags
);
3011 weight_updated(iocg
, &now
);
3012 spin_unlock_irqrestore(&ioc
->lock
, flags
);
3015 static void ioc_pd_free(struct blkg_policy_data
*pd
)
3017 struct ioc_gq
*iocg
= pd_to_iocg(pd
);
3018 struct ioc
*ioc
= iocg
->ioc
;
3019 unsigned long flags
;
3022 spin_lock_irqsave(&ioc
->lock
, flags
);
3024 if (!list_empty(&iocg
->active_list
)) {
3028 propagate_weights(iocg
, 0, 0, false, &now
);
3029 list_del_init(&iocg
->active_list
);
3032 WARN_ON_ONCE(!list_empty(&iocg
->walk_list
));
3033 WARN_ON_ONCE(!list_empty(&iocg
->surplus_list
));
3035 spin_unlock_irqrestore(&ioc
->lock
, flags
);
3037 hrtimer_cancel(&iocg
->waitq_timer
);
3039 free_percpu(iocg
->pcpu_stat
);
3043 static void ioc_pd_stat(struct blkg_policy_data
*pd
, struct seq_file
*s
)
3045 struct ioc_gq
*iocg
= pd_to_iocg(pd
);
3046 struct ioc
*ioc
= iocg
->ioc
;
3051 if (iocg
->level
== 0) {
3052 unsigned vp10k
= DIV64_U64_ROUND_CLOSEST(
3053 ioc
->vtime_base_rate
* 10000,
3055 seq_printf(s
, " cost.vrate=%u.%02u", vp10k
/ 100, vp10k
% 100);
3058 seq_printf(s
, " cost.usage=%llu", iocg
->last_stat
.usage_us
);
3060 if (blkcg_debug_stats
)
3061 seq_printf(s
, " cost.wait=%llu cost.indebt=%llu cost.indelay=%llu",
3062 iocg
->last_stat
.wait_us
,
3063 iocg
->last_stat
.indebt_us
,
3064 iocg
->last_stat
.indelay_us
);
3067 static u64
ioc_weight_prfill(struct seq_file
*sf
, struct blkg_policy_data
*pd
,
3070 const char *dname
= blkg_dev_name(pd
->blkg
);
3071 struct ioc_gq
*iocg
= pd_to_iocg(pd
);
3073 if (dname
&& iocg
->cfg_weight
)
3074 seq_printf(sf
, "%s %u\n", dname
, iocg
->cfg_weight
/ WEIGHT_ONE
);
3079 static int ioc_weight_show(struct seq_file
*sf
, void *v
)
3081 struct blkcg
*blkcg
= css_to_blkcg(seq_css(sf
));
3082 struct ioc_cgrp
*iocc
= blkcg_to_iocc(blkcg
);
3084 seq_printf(sf
, "default %u\n", iocc
->dfl_weight
/ WEIGHT_ONE
);
3085 blkcg_print_blkgs(sf
, blkcg
, ioc_weight_prfill
,
3086 &blkcg_policy_iocost
, seq_cft(sf
)->private, false);
3090 static ssize_t
ioc_weight_write(struct kernfs_open_file
*of
, char *buf
,
3091 size_t nbytes
, loff_t off
)
3093 struct blkcg
*blkcg
= css_to_blkcg(of_css(of
));
3094 struct ioc_cgrp
*iocc
= blkcg_to_iocc(blkcg
);
3095 struct blkg_conf_ctx ctx
;
3097 struct ioc_gq
*iocg
;
3101 if (!strchr(buf
, ':')) {
3102 struct blkcg_gq
*blkg
;
3104 if (!sscanf(buf
, "default %u", &v
) && !sscanf(buf
, "%u", &v
))
3107 if (v
< CGROUP_WEIGHT_MIN
|| v
> CGROUP_WEIGHT_MAX
)
3110 spin_lock_irq(&blkcg
->lock
);
3111 iocc
->dfl_weight
= v
* WEIGHT_ONE
;
3112 hlist_for_each_entry(blkg
, &blkcg
->blkg_list
, blkcg_node
) {
3113 struct ioc_gq
*iocg
= blkg_to_iocg(blkg
);
3116 spin_lock(&iocg
->ioc
->lock
);
3117 ioc_now(iocg
->ioc
, &now
);
3118 weight_updated(iocg
, &now
);
3119 spin_unlock(&iocg
->ioc
->lock
);
3122 spin_unlock_irq(&blkcg
->lock
);
3127 blkg_conf_init(&ctx
, buf
);
3129 ret
= blkg_conf_prep(blkcg
, &blkcg_policy_iocost
, &ctx
);
3133 iocg
= blkg_to_iocg(ctx
.blkg
);
3135 if (!strncmp(ctx
.body
, "default", 7)) {
3138 if (!sscanf(ctx
.body
, "%u", &v
))
3140 if (v
< CGROUP_WEIGHT_MIN
|| v
> CGROUP_WEIGHT_MAX
)
3144 spin_lock(&iocg
->ioc
->lock
);
3145 iocg
->cfg_weight
= v
* WEIGHT_ONE
;
3146 ioc_now(iocg
->ioc
, &now
);
3147 weight_updated(iocg
, &now
);
3148 spin_unlock(&iocg
->ioc
->lock
);
3150 blkg_conf_exit(&ctx
);
3156 blkg_conf_exit(&ctx
);
3160 static u64
ioc_qos_prfill(struct seq_file
*sf
, struct blkg_policy_data
*pd
,
3163 const char *dname
= blkg_dev_name(pd
->blkg
);
3164 struct ioc
*ioc
= pd_to_iocg(pd
)->ioc
;
3169 spin_lock(&ioc
->lock
);
3170 seq_printf(sf
, "%s enable=%d ctrl=%s rpct=%u.%02u rlat=%u wpct=%u.%02u wlat=%u min=%u.%02u max=%u.%02u\n",
3171 dname
, ioc
->enabled
, ioc
->user_qos_params
? "user" : "auto",
3172 ioc
->params
.qos
[QOS_RPPM
] / 10000,
3173 ioc
->params
.qos
[QOS_RPPM
] % 10000 / 100,
3174 ioc
->params
.qos
[QOS_RLAT
],
3175 ioc
->params
.qos
[QOS_WPPM
] / 10000,
3176 ioc
->params
.qos
[QOS_WPPM
] % 10000 / 100,
3177 ioc
->params
.qos
[QOS_WLAT
],
3178 ioc
->params
.qos
[QOS_MIN
] / 10000,
3179 ioc
->params
.qos
[QOS_MIN
] % 10000 / 100,
3180 ioc
->params
.qos
[QOS_MAX
] / 10000,
3181 ioc
->params
.qos
[QOS_MAX
] % 10000 / 100);
3182 spin_unlock(&ioc
->lock
);
3186 static int ioc_qos_show(struct seq_file
*sf
, void *v
)
3188 struct blkcg
*blkcg
= css_to_blkcg(seq_css(sf
));
3190 blkcg_print_blkgs(sf
, blkcg
, ioc_qos_prfill
,
3191 &blkcg_policy_iocost
, seq_cft(sf
)->private, false);
3195 static const match_table_t qos_ctrl_tokens
= {
3196 { QOS_ENABLE
, "enable=%u" },
3197 { QOS_CTRL
, "ctrl=%s" },
3198 { NR_QOS_CTRL_PARAMS
, NULL
},
3201 static const match_table_t qos_tokens
= {
3202 { QOS_RPPM
, "rpct=%s" },
3203 { QOS_RLAT
, "rlat=%u" },
3204 { QOS_WPPM
, "wpct=%s" },
3205 { QOS_WLAT
, "wlat=%u" },
3206 { QOS_MIN
, "min=%s" },
3207 { QOS_MAX
, "max=%s" },
3208 { NR_QOS_PARAMS
, NULL
},
3211 static ssize_t
ioc_qos_write(struct kernfs_open_file
*of
, char *input
,
3212 size_t nbytes
, loff_t off
)
3214 struct blkg_conf_ctx ctx
;
3215 struct gendisk
*disk
;
3217 u32 qos
[NR_QOS_PARAMS
];
3222 blkg_conf_init(&ctx
, input
);
3224 ret
= blkg_conf_open_bdev(&ctx
);
3229 disk
= ctx
.bdev
->bd_disk
;
3230 if (!queue_is_mq(disk
->queue
)) {
3235 ioc
= q_to_ioc(disk
->queue
);
3237 ret
= blk_iocost_init(disk
);
3240 ioc
= q_to_ioc(disk
->queue
);
3243 blk_mq_freeze_queue(disk
->queue
);
3244 blk_mq_quiesce_queue(disk
->queue
);
3246 spin_lock_irq(&ioc
->lock
);
3247 memcpy(qos
, ioc
->params
.qos
, sizeof(qos
));
3248 enable
= ioc
->enabled
;
3249 user
= ioc
->user_qos_params
;
3251 while ((p
= strsep(&body
, " \t\n"))) {
3252 substring_t args
[MAX_OPT_ARGS
];
3260 switch (match_token(p
, qos_ctrl_tokens
, args
)) {
3262 if (match_u64(&args
[0], &v
))
3267 match_strlcpy(buf
, &args
[0], sizeof(buf
));
3268 if (!strcmp(buf
, "auto"))
3270 else if (!strcmp(buf
, "user"))
3277 tok
= match_token(p
, qos_tokens
, args
);
3281 if (match_strlcpy(buf
, &args
[0], sizeof(buf
)) >=
3284 if (cgroup_parse_float(buf
, 2, &v
))
3286 if (v
< 0 || v
> 10000)
3292 if (match_u64(&args
[0], &v
))
3298 if (match_strlcpy(buf
, &args
[0], sizeof(buf
)) >=
3301 if (cgroup_parse_float(buf
, 2, &v
))
3305 qos
[tok
] = clamp_t(s64
, v
* 100,
3306 VRATE_MIN_PPM
, VRATE_MAX_PPM
);
3314 if (qos
[QOS_MIN
] > qos
[QOS_MAX
])
3317 if (enable
&& !ioc
->enabled
) {
3318 blk_stat_enable_accounting(disk
->queue
);
3319 blk_queue_flag_set(QUEUE_FLAG_RQ_ALLOC_TIME
, disk
->queue
);
3320 ioc
->enabled
= true;
3321 } else if (!enable
&& ioc
->enabled
) {
3322 blk_stat_disable_accounting(disk
->queue
);
3323 blk_queue_flag_clear(QUEUE_FLAG_RQ_ALLOC_TIME
, disk
->queue
);
3324 ioc
->enabled
= false;
3328 memcpy(ioc
->params
.qos
, qos
, sizeof(qos
));
3329 ioc
->user_qos_params
= true;
3331 ioc
->user_qos_params
= false;
3334 ioc_refresh_params(ioc
, true);
3335 spin_unlock_irq(&ioc
->lock
);
3338 wbt_disable_default(disk
);
3340 wbt_enable_default(disk
);
3342 blk_mq_unquiesce_queue(disk
->queue
);
3343 blk_mq_unfreeze_queue(disk
->queue
);
3345 blkg_conf_exit(&ctx
);
3348 spin_unlock_irq(&ioc
->lock
);
3350 blk_mq_unquiesce_queue(disk
->queue
);
3351 blk_mq_unfreeze_queue(disk
->queue
);
3355 blkg_conf_exit(&ctx
);
3359 static u64
ioc_cost_model_prfill(struct seq_file
*sf
,
3360 struct blkg_policy_data
*pd
, int off
)
3362 const char *dname
= blkg_dev_name(pd
->blkg
);
3363 struct ioc
*ioc
= pd_to_iocg(pd
)->ioc
;
3364 u64
*u
= ioc
->params
.i_lcoefs
;
3369 spin_lock(&ioc
->lock
);
3370 seq_printf(sf
, "%s ctrl=%s model=linear "
3371 "rbps=%llu rseqiops=%llu rrandiops=%llu "
3372 "wbps=%llu wseqiops=%llu wrandiops=%llu\n",
3373 dname
, ioc
->user_cost_model
? "user" : "auto",
3374 u
[I_LCOEF_RBPS
], u
[I_LCOEF_RSEQIOPS
], u
[I_LCOEF_RRANDIOPS
],
3375 u
[I_LCOEF_WBPS
], u
[I_LCOEF_WSEQIOPS
], u
[I_LCOEF_WRANDIOPS
]);
3376 spin_unlock(&ioc
->lock
);
3380 static int ioc_cost_model_show(struct seq_file
*sf
, void *v
)
3382 struct blkcg
*blkcg
= css_to_blkcg(seq_css(sf
));
3384 blkcg_print_blkgs(sf
, blkcg
, ioc_cost_model_prfill
,
3385 &blkcg_policy_iocost
, seq_cft(sf
)->private, false);
3389 static const match_table_t cost_ctrl_tokens
= {
3390 { COST_CTRL
, "ctrl=%s" },
3391 { COST_MODEL
, "model=%s" },
3392 { NR_COST_CTRL_PARAMS
, NULL
},
3395 static const match_table_t i_lcoef_tokens
= {
3396 { I_LCOEF_RBPS
, "rbps=%u" },
3397 { I_LCOEF_RSEQIOPS
, "rseqiops=%u" },
3398 { I_LCOEF_RRANDIOPS
, "rrandiops=%u" },
3399 { I_LCOEF_WBPS
, "wbps=%u" },
3400 { I_LCOEF_WSEQIOPS
, "wseqiops=%u" },
3401 { I_LCOEF_WRANDIOPS
, "wrandiops=%u" },
3402 { NR_I_LCOEFS
, NULL
},
3405 static ssize_t
ioc_cost_model_write(struct kernfs_open_file
*of
, char *input
,
3406 size_t nbytes
, loff_t off
)
3408 struct blkg_conf_ctx ctx
;
3409 struct request_queue
*q
;
3416 blkg_conf_init(&ctx
, input
);
3418 ret
= blkg_conf_open_bdev(&ctx
);
3423 q
= bdev_get_queue(ctx
.bdev
);
3424 if (!queue_is_mq(q
)) {
3431 ret
= blk_iocost_init(ctx
.bdev
->bd_disk
);
3437 blk_mq_freeze_queue(q
);
3438 blk_mq_quiesce_queue(q
);
3440 spin_lock_irq(&ioc
->lock
);
3441 memcpy(u
, ioc
->params
.i_lcoefs
, sizeof(u
));
3442 user
= ioc
->user_cost_model
;
3444 while ((p
= strsep(&body
, " \t\n"))) {
3445 substring_t args
[MAX_OPT_ARGS
];
3453 switch (match_token(p
, cost_ctrl_tokens
, args
)) {
3455 match_strlcpy(buf
, &args
[0], sizeof(buf
));
3456 if (!strcmp(buf
, "auto"))
3458 else if (!strcmp(buf
, "user"))
3464 match_strlcpy(buf
, &args
[0], sizeof(buf
));
3465 if (strcmp(buf
, "linear"))
3470 tok
= match_token(p
, i_lcoef_tokens
, args
);
3471 if (tok
== NR_I_LCOEFS
)
3473 if (match_u64(&args
[0], &v
))
3480 memcpy(ioc
->params
.i_lcoefs
, u
, sizeof(u
));
3481 ioc
->user_cost_model
= true;
3483 ioc
->user_cost_model
= false;
3485 ioc_refresh_params(ioc
, true);
3486 spin_unlock_irq(&ioc
->lock
);
3488 blk_mq_unquiesce_queue(q
);
3489 blk_mq_unfreeze_queue(q
);
3491 blkg_conf_exit(&ctx
);
3495 spin_unlock_irq(&ioc
->lock
);
3497 blk_mq_unquiesce_queue(q
);
3498 blk_mq_unfreeze_queue(q
);
3502 blkg_conf_exit(&ctx
);
3506 static struct cftype ioc_files
[] = {
3509 .flags
= CFTYPE_NOT_ON_ROOT
,
3510 .seq_show
= ioc_weight_show
,
3511 .write
= ioc_weight_write
,
3515 .flags
= CFTYPE_ONLY_ON_ROOT
,
3516 .seq_show
= ioc_qos_show
,
3517 .write
= ioc_qos_write
,
3520 .name
= "cost.model",
3521 .flags
= CFTYPE_ONLY_ON_ROOT
,
3522 .seq_show
= ioc_cost_model_show
,
3523 .write
= ioc_cost_model_write
,
3528 static struct blkcg_policy blkcg_policy_iocost
= {
3529 .dfl_cftypes
= ioc_files
,
3530 .cpd_alloc_fn
= ioc_cpd_alloc
,
3531 .cpd_free_fn
= ioc_cpd_free
,
3532 .pd_alloc_fn
= ioc_pd_alloc
,
3533 .pd_init_fn
= ioc_pd_init
,
3534 .pd_free_fn
= ioc_pd_free
,
3535 .pd_stat_fn
= ioc_pd_stat
,
3538 static int __init
ioc_init(void)
3540 return blkcg_policy_register(&blkcg_policy_iocost
);
3543 static void __exit
ioc_exit(void)
3545 blkcg_policy_unregister(&blkcg_policy_iocost
);
3548 module_init(ioc_init
);
3549 module_exit(ioc_exit
);