drivers/net/wan/lapbether: Added needed_tailroom
[linux/fpc-iii.git] / block / blk-iocost.c
blobef287c33d6d97d3face310afe2ea979fc2de7010
1 /* SPDX-License-Identifier: GPL-2.0
3 * IO cost model based controller.
5 * Copyright (C) 2019 Tejun Heo <tj@kernel.org>
6 * Copyright (C) 2019 Andy Newell <newella@fb.com>
7 * Copyright (C) 2019 Facebook
9 * One challenge of controlling IO resources is the lack of trivially
10 * observable cost metric. This is distinguished from CPU and memory where
11 * wallclock time and the number of bytes can serve as accurate enough
12 * approximations.
14 * Bandwidth and iops are the most commonly used metrics for IO devices but
15 * depending on the type and specifics of the device, different IO patterns
16 * easily lead to multiple orders of magnitude variations rendering them
17 * useless for the purpose of IO capacity distribution. While on-device
18 * time, with a lot of clutches, could serve as a useful approximation for
19 * non-queued rotational devices, this is no longer viable with modern
20 * devices, even the rotational ones.
22 * While there is no cost metric we can trivially observe, it isn't a
23 * complete mystery. For example, on a rotational device, seek cost
24 * dominates while a contiguous transfer contributes a smaller amount
25 * proportional to the size. If we can characterize at least the relative
26 * costs of these different types of IOs, it should be possible to
27 * implement a reasonable work-conserving proportional IO resource
28 * distribution.
30 * 1. IO Cost Model
32 * IO cost model estimates the cost of an IO given its basic parameters and
33 * history (e.g. the end sector of the last IO). The cost is measured in
34 * device time. If a given IO is estimated to cost 10ms, the device should
35 * be able to process ~100 of those IOs in a second.
37 * Currently, there's only one builtin cost model - linear. Each IO is
38 * classified as sequential or random and given a base cost accordingly.
39 * On top of that, a size cost proportional to the length of the IO is
40 * added. While simple, this model captures the operational
41 * characteristics of a wide varienty of devices well enough. Default
42 * paramters for several different classes of devices are provided and the
43 * parameters can be configured from userspace via
44 * /sys/fs/cgroup/io.cost.model.
46 * If needed, tools/cgroup/iocost_coef_gen.py can be used to generate
47 * device-specific coefficients.
49 * If needed, tools/cgroup/iocost_coef_gen.py can be used to generate
50 * device-specific coefficients.
52 * 2. Control Strategy
54 * The device virtual time (vtime) is used as the primary control metric.
55 * The control strategy is composed of the following three parts.
57 * 2-1. Vtime Distribution
59 * When a cgroup becomes active in terms of IOs, its hierarchical share is
60 * calculated. Please consider the following hierarchy where the numbers
61 * inside parentheses denote the configured weights.
63 * root
64 * / \
65 * A (w:100) B (w:300)
66 * / \
67 * A0 (w:100) A1 (w:100)
69 * If B is idle and only A0 and A1 are actively issuing IOs, as the two are
70 * of equal weight, each gets 50% share. If then B starts issuing IOs, B
71 * gets 300/(100+300) or 75% share, and A0 and A1 equally splits the rest,
72 * 12.5% each. The distribution mechanism only cares about these flattened
73 * shares. They're called hweights (hierarchical weights) and always add
74 * upto 1 (HWEIGHT_WHOLE).
76 * A given cgroup's vtime runs slower in inverse proportion to its hweight.
77 * For example, with 12.5% weight, A0's time runs 8 times slower (100/12.5)
78 * against the device vtime - an IO which takes 10ms on the underlying
79 * device is considered to take 80ms on A0.
81 * This constitutes the basis of IO capacity distribution. Each cgroup's
82 * vtime is running at a rate determined by its hweight. A cgroup tracks
83 * the vtime consumed by past IOs and can issue a new IO iff doing so
84 * wouldn't outrun the current device vtime. Otherwise, the IO is
85 * suspended until the vtime has progressed enough to cover it.
87 * 2-2. Vrate Adjustment
89 * It's unrealistic to expect the cost model to be perfect. There are too
90 * many devices and even on the same device the overall performance
91 * fluctuates depending on numerous factors such as IO mixture and device
92 * internal garbage collection. The controller needs to adapt dynamically.
94 * This is achieved by adjusting the overall IO rate according to how busy
95 * the device is. If the device becomes overloaded, we're sending down too
96 * many IOs and should generally slow down. If there are waiting issuers
97 * but the device isn't saturated, we're issuing too few and should
98 * generally speed up.
100 * To slow down, we lower the vrate - the rate at which the device vtime
101 * passes compared to the wall clock. For example, if the vtime is running
102 * at the vrate of 75%, all cgroups added up would only be able to issue
103 * 750ms worth of IOs per second, and vice-versa for speeding up.
105 * Device business is determined using two criteria - rq wait and
106 * completion latencies.
108 * When a device gets saturated, the on-device and then the request queues
109 * fill up and a bio which is ready to be issued has to wait for a request
110 * to become available. When this delay becomes noticeable, it's a clear
111 * indication that the device is saturated and we lower the vrate. This
112 * saturation signal is fairly conservative as it only triggers when both
113 * hardware and software queues are filled up, and is used as the default
114 * busy signal.
116 * As devices can have deep queues and be unfair in how the queued commands
117 * are executed, soley depending on rq wait may not result in satisfactory
118 * control quality. For a better control quality, completion latency QoS
119 * parameters can be configured so that the device is considered saturated
120 * if N'th percentile completion latency rises above the set point.
122 * The completion latency requirements are a function of both the
123 * underlying device characteristics and the desired IO latency quality of
124 * service. There is an inherent trade-off - the tighter the latency QoS,
125 * the higher the bandwidth lossage. Latency QoS is disabled by default
126 * and can be set through /sys/fs/cgroup/io.cost.qos.
128 * 2-3. Work Conservation
130 * Imagine two cgroups A and B with equal weights. A is issuing a small IO
131 * periodically while B is sending out enough parallel IOs to saturate the
132 * device on its own. Let's say A's usage amounts to 100ms worth of IO
133 * cost per second, i.e., 10% of the device capacity. The naive
134 * distribution of half and half would lead to 60% utilization of the
135 * device, a significant reduction in the total amount of work done
136 * compared to free-for-all competition. This is too high a cost to pay
137 * for IO control.
139 * To conserve the total amount of work done, we keep track of how much
140 * each active cgroup is actually using and yield part of its weight if
141 * there are other cgroups which can make use of it. In the above case,
142 * A's weight will be lowered so that it hovers above the actual usage and
143 * B would be able to use the rest.
145 * As we don't want to penalize a cgroup for donating its weight, the
146 * surplus weight adjustment factors in a margin and has an immediate
147 * snapback mechanism in case the cgroup needs more IO vtime for itself.
149 * Note that adjusting down surplus weights has the same effects as
150 * accelerating vtime for other cgroups and work conservation can also be
151 * implemented by adjusting vrate dynamically. However, squaring who can
152 * donate and should take back how much requires hweight propagations
153 * anyway making it easier to implement and understand as a separate
154 * mechanism.
156 * 3. Monitoring
158 * Instead of debugfs or other clumsy monitoring mechanisms, this
159 * controller uses a drgn based monitoring script -
160 * tools/cgroup/iocost_monitor.py. For details on drgn, please see
161 * https://github.com/osandov/drgn. The ouput looks like the following.
163 * sdb RUN per=300ms cur_per=234.218:v203.695 busy= +1 vrate= 62.12%
164 * active weight hweight% inflt% dbt delay usages%
165 * test/a * 50/ 50 33.33/ 33.33 27.65 2 0*041 033:033:033
166 * test/b * 100/ 100 66.67/ 66.67 17.56 0 0*000 066:079:077
168 * - per : Timer period
169 * - cur_per : Internal wall and device vtime clock
170 * - vrate : Device virtual time rate against wall clock
171 * - weight : Surplus-adjusted and configured weights
172 * - hweight : Surplus-adjusted and configured hierarchical weights
173 * - inflt : The percentage of in-flight IO cost at the end of last period
174 * - del_ms : Deferred issuer delay induction level and duration
175 * - usages : Usage history
178 #include <linux/kernel.h>
179 #include <linux/module.h>
180 #include <linux/timer.h>
181 #include <linux/time64.h>
182 #include <linux/parser.h>
183 #include <linux/sched/signal.h>
184 #include <linux/blk-cgroup.h>
185 #include "blk-rq-qos.h"
186 #include "blk-stat.h"
187 #include "blk-wbt.h"
189 #ifdef CONFIG_TRACEPOINTS
191 /* copied from TRACE_CGROUP_PATH, see cgroup-internal.h */
192 #define TRACE_IOCG_PATH_LEN 1024
193 static DEFINE_SPINLOCK(trace_iocg_path_lock);
194 static char trace_iocg_path[TRACE_IOCG_PATH_LEN];
196 #define TRACE_IOCG_PATH(type, iocg, ...) \
197 do { \
198 unsigned long flags; \
199 if (trace_iocost_##type##_enabled()) { \
200 spin_lock_irqsave(&trace_iocg_path_lock, flags); \
201 cgroup_path(iocg_to_blkg(iocg)->blkcg->css.cgroup, \
202 trace_iocg_path, TRACE_IOCG_PATH_LEN); \
203 trace_iocost_##type(iocg, trace_iocg_path, \
204 ##__VA_ARGS__); \
205 spin_unlock_irqrestore(&trace_iocg_path_lock, flags); \
207 } while (0)
209 #else /* CONFIG_TRACE_POINTS */
210 #define TRACE_IOCG_PATH(type, iocg, ...) do { } while (0)
211 #endif /* CONFIG_TRACE_POINTS */
213 enum {
214 MILLION = 1000000,
216 /* timer period is calculated from latency requirements, bound it */
217 MIN_PERIOD = USEC_PER_MSEC,
218 MAX_PERIOD = USEC_PER_SEC,
221 * A cgroup's vtime can run 50% behind the device vtime, which
222 * serves as its IO credit buffer. Surplus weight adjustment is
223 * immediately canceled if the vtime margin runs below 10%.
225 MARGIN_PCT = 50,
226 INUSE_MARGIN_PCT = 10,
228 /* Have some play in waitq timer operations */
229 WAITQ_TIMER_MARGIN_PCT = 5,
232 * vtime can wrap well within a reasonable uptime when vrate is
233 * consistently raised. Don't trust recorded cgroup vtime if the
234 * period counter indicates that it's older than 5mins.
236 VTIME_VALID_DUR = 300 * USEC_PER_SEC,
239 * Remember the past three non-zero usages and use the max for
240 * surplus calculation. Three slots guarantee that we remember one
241 * full period usage from the last active stretch even after
242 * partial deactivation and re-activation periods. Don't start
243 * giving away weight before collecting two data points to prevent
244 * hweight adjustments based on one partial activation period.
246 NR_USAGE_SLOTS = 3,
247 MIN_VALID_USAGES = 2,
249 /* 1/64k is granular enough and can easily be handled w/ u32 */
250 HWEIGHT_WHOLE = 1 << 16,
253 * As vtime is used to calculate the cost of each IO, it needs to
254 * be fairly high precision. For example, it should be able to
255 * represent the cost of a single page worth of discard with
256 * suffificient accuracy. At the same time, it should be able to
257 * represent reasonably long enough durations to be useful and
258 * convenient during operation.
260 * 1s worth of vtime is 2^37. This gives us both sub-nanosecond
261 * granularity and days of wrap-around time even at extreme vrates.
263 VTIME_PER_SEC_SHIFT = 37,
264 VTIME_PER_SEC = 1LLU << VTIME_PER_SEC_SHIFT,
265 VTIME_PER_USEC = VTIME_PER_SEC / USEC_PER_SEC,
267 /* bound vrate adjustments within two orders of magnitude */
268 VRATE_MIN_PPM = 10000, /* 1% */
269 VRATE_MAX_PPM = 100000000, /* 10000% */
271 VRATE_MIN = VTIME_PER_USEC * VRATE_MIN_PPM / MILLION,
272 VRATE_CLAMP_ADJ_PCT = 4,
274 /* if IOs end up waiting for requests, issue less */
275 RQ_WAIT_BUSY_PCT = 5,
277 /* unbusy hysterisis */
278 UNBUSY_THR_PCT = 75,
280 /* don't let cmds which take a very long time pin lagging for too long */
281 MAX_LAGGING_PERIODS = 10,
284 * If usage% * 1.25 + 2% is lower than hweight% by more than 3%,
285 * donate the surplus.
287 SURPLUS_SCALE_PCT = 125, /* * 125% */
288 SURPLUS_SCALE_ABS = HWEIGHT_WHOLE / 50, /* + 2% */
289 SURPLUS_MIN_ADJ_DELTA = HWEIGHT_WHOLE / 33, /* 3% */
291 /* switch iff the conditions are met for longer than this */
292 AUTOP_CYCLE_NSEC = 10LLU * NSEC_PER_SEC,
295 * Count IO size in 4k pages. The 12bit shift helps keeping
296 * size-proportional components of cost calculation in closer
297 * numbers of digits to per-IO cost components.
299 IOC_PAGE_SHIFT = 12,
300 IOC_PAGE_SIZE = 1 << IOC_PAGE_SHIFT,
301 IOC_SECT_TO_PAGE_SHIFT = IOC_PAGE_SHIFT - SECTOR_SHIFT,
303 /* if apart further than 16M, consider randio for linear model */
304 LCOEF_RANDIO_PAGES = 4096,
307 enum ioc_running {
308 IOC_IDLE,
309 IOC_RUNNING,
310 IOC_STOP,
313 /* io.cost.qos controls including per-dev enable of the whole controller */
314 enum {
315 QOS_ENABLE,
316 QOS_CTRL,
317 NR_QOS_CTRL_PARAMS,
320 /* io.cost.qos params */
321 enum {
322 QOS_RPPM,
323 QOS_RLAT,
324 QOS_WPPM,
325 QOS_WLAT,
326 QOS_MIN,
327 QOS_MAX,
328 NR_QOS_PARAMS,
331 /* io.cost.model controls */
332 enum {
333 COST_CTRL,
334 COST_MODEL,
335 NR_COST_CTRL_PARAMS,
338 /* builtin linear cost model coefficients */
339 enum {
340 I_LCOEF_RBPS,
341 I_LCOEF_RSEQIOPS,
342 I_LCOEF_RRANDIOPS,
343 I_LCOEF_WBPS,
344 I_LCOEF_WSEQIOPS,
345 I_LCOEF_WRANDIOPS,
346 NR_I_LCOEFS,
349 enum {
350 LCOEF_RPAGE,
351 LCOEF_RSEQIO,
352 LCOEF_RRANDIO,
353 LCOEF_WPAGE,
354 LCOEF_WSEQIO,
355 LCOEF_WRANDIO,
356 NR_LCOEFS,
359 enum {
360 AUTOP_INVALID,
361 AUTOP_HDD,
362 AUTOP_SSD_QD1,
363 AUTOP_SSD_DFL,
364 AUTOP_SSD_FAST,
367 struct ioc_gq;
369 struct ioc_params {
370 u32 qos[NR_QOS_PARAMS];
371 u64 i_lcoefs[NR_I_LCOEFS];
372 u64 lcoefs[NR_LCOEFS];
373 u32 too_fast_vrate_pct;
374 u32 too_slow_vrate_pct;
377 struct ioc_missed {
378 u32 nr_met;
379 u32 nr_missed;
380 u32 last_met;
381 u32 last_missed;
384 struct ioc_pcpu_stat {
385 struct ioc_missed missed[2];
387 u64 rq_wait_ns;
388 u64 last_rq_wait_ns;
391 /* per device */
392 struct ioc {
393 struct rq_qos rqos;
395 bool enabled;
397 struct ioc_params params;
398 u32 period_us;
399 u32 margin_us;
400 u64 vrate_min;
401 u64 vrate_max;
403 spinlock_t lock;
404 struct timer_list timer;
405 struct list_head active_iocgs; /* active cgroups */
406 struct ioc_pcpu_stat __percpu *pcpu_stat;
408 enum ioc_running running;
409 atomic64_t vtime_rate;
411 seqcount_t period_seqcount;
412 u32 period_at; /* wallclock starttime */
413 u64 period_at_vtime; /* vtime starttime */
415 atomic64_t cur_period; /* inc'd each period */
416 int busy_level; /* saturation history */
418 u64 inuse_margin_vtime;
419 bool weights_updated;
420 atomic_t hweight_gen; /* for lazy hweights */
422 u64 autop_too_fast_at;
423 u64 autop_too_slow_at;
424 int autop_idx;
425 bool user_qos_params:1;
426 bool user_cost_model:1;
429 /* per device-cgroup pair */
430 struct ioc_gq {
431 struct blkg_policy_data pd;
432 struct ioc *ioc;
435 * A iocg can get its weight from two sources - an explicit
436 * per-device-cgroup configuration or the default weight of the
437 * cgroup. `cfg_weight` is the explicit per-device-cgroup
438 * configuration. `weight` is the effective considering both
439 * sources.
441 * When an idle cgroup becomes active its `active` goes from 0 to
442 * `weight`. `inuse` is the surplus adjusted active weight.
443 * `active` and `inuse` are used to calculate `hweight_active` and
444 * `hweight_inuse`.
446 * `last_inuse` remembers `inuse` while an iocg is idle to persist
447 * surplus adjustments.
449 u32 cfg_weight;
450 u32 weight;
451 u32 active;
452 u32 inuse;
453 u32 last_inuse;
455 sector_t cursor; /* to detect randio */
458 * `vtime` is this iocg's vtime cursor which progresses as IOs are
459 * issued. If lagging behind device vtime, the delta represents
460 * the currently available IO budget. If runnning ahead, the
461 * overage.
463 * `vtime_done` is the same but progressed on completion rather
464 * than issue. The delta behind `vtime` represents the cost of
465 * currently in-flight IOs.
467 * `last_vtime` is used to remember `vtime` at the end of the last
468 * period to calculate utilization.
470 atomic64_t vtime;
471 atomic64_t done_vtime;
472 u64 abs_vdebt;
473 u64 last_vtime;
476 * The period this iocg was last active in. Used for deactivation
477 * and invalidating `vtime`.
479 atomic64_t active_period;
480 struct list_head active_list;
482 /* see __propagate_active_weight() and current_hweight() for details */
483 u64 child_active_sum;
484 u64 child_inuse_sum;
485 int hweight_gen;
486 u32 hweight_active;
487 u32 hweight_inuse;
488 bool has_surplus;
490 struct wait_queue_head waitq;
491 struct hrtimer waitq_timer;
492 struct hrtimer delay_timer;
494 /* usage is recorded as fractions of HWEIGHT_WHOLE */
495 int usage_idx;
496 u32 usages[NR_USAGE_SLOTS];
498 /* this iocg's depth in the hierarchy and ancestors including self */
499 int level;
500 struct ioc_gq *ancestors[];
503 /* per cgroup */
504 struct ioc_cgrp {
505 struct blkcg_policy_data cpd;
506 unsigned int dfl_weight;
509 struct ioc_now {
510 u64 now_ns;
511 u32 now;
512 u64 vnow;
513 u64 vrate;
516 struct iocg_wait {
517 struct wait_queue_entry wait;
518 struct bio *bio;
519 u64 abs_cost;
520 bool committed;
523 struct iocg_wake_ctx {
524 struct ioc_gq *iocg;
525 u32 hw_inuse;
526 s64 vbudget;
529 static const struct ioc_params autop[] = {
530 [AUTOP_HDD] = {
531 .qos = {
532 [QOS_RLAT] = 250000, /* 250ms */
533 [QOS_WLAT] = 250000,
534 [QOS_MIN] = VRATE_MIN_PPM,
535 [QOS_MAX] = VRATE_MAX_PPM,
537 .i_lcoefs = {
538 [I_LCOEF_RBPS] = 174019176,
539 [I_LCOEF_RSEQIOPS] = 41708,
540 [I_LCOEF_RRANDIOPS] = 370,
541 [I_LCOEF_WBPS] = 178075866,
542 [I_LCOEF_WSEQIOPS] = 42705,
543 [I_LCOEF_WRANDIOPS] = 378,
546 [AUTOP_SSD_QD1] = {
547 .qos = {
548 [QOS_RLAT] = 25000, /* 25ms */
549 [QOS_WLAT] = 25000,
550 [QOS_MIN] = VRATE_MIN_PPM,
551 [QOS_MAX] = VRATE_MAX_PPM,
553 .i_lcoefs = {
554 [I_LCOEF_RBPS] = 245855193,
555 [I_LCOEF_RSEQIOPS] = 61575,
556 [I_LCOEF_RRANDIOPS] = 6946,
557 [I_LCOEF_WBPS] = 141365009,
558 [I_LCOEF_WSEQIOPS] = 33716,
559 [I_LCOEF_WRANDIOPS] = 26796,
562 [AUTOP_SSD_DFL] = {
563 .qos = {
564 [QOS_RLAT] = 25000, /* 25ms */
565 [QOS_WLAT] = 25000,
566 [QOS_MIN] = VRATE_MIN_PPM,
567 [QOS_MAX] = VRATE_MAX_PPM,
569 .i_lcoefs = {
570 [I_LCOEF_RBPS] = 488636629,
571 [I_LCOEF_RSEQIOPS] = 8932,
572 [I_LCOEF_RRANDIOPS] = 8518,
573 [I_LCOEF_WBPS] = 427891549,
574 [I_LCOEF_WSEQIOPS] = 28755,
575 [I_LCOEF_WRANDIOPS] = 21940,
577 .too_fast_vrate_pct = 500,
579 [AUTOP_SSD_FAST] = {
580 .qos = {
581 [QOS_RLAT] = 5000, /* 5ms */
582 [QOS_WLAT] = 5000,
583 [QOS_MIN] = VRATE_MIN_PPM,
584 [QOS_MAX] = VRATE_MAX_PPM,
586 .i_lcoefs = {
587 [I_LCOEF_RBPS] = 3102524156LLU,
588 [I_LCOEF_RSEQIOPS] = 724816,
589 [I_LCOEF_RRANDIOPS] = 778122,
590 [I_LCOEF_WBPS] = 1742780862LLU,
591 [I_LCOEF_WSEQIOPS] = 425702,
592 [I_LCOEF_WRANDIOPS] = 443193,
594 .too_slow_vrate_pct = 10,
599 * vrate adjust percentages indexed by ioc->busy_level. We adjust up on
600 * vtime credit shortage and down on device saturation.
602 static u32 vrate_adj_pct[] =
603 { 0, 0, 0, 0,
604 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
605 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
606 4, 4, 4, 4, 4, 4, 4, 4, 8, 8, 8, 8, 8, 8, 8, 8, 16 };
608 static struct blkcg_policy blkcg_policy_iocost;
610 /* accessors and helpers */
611 static struct ioc *rqos_to_ioc(struct rq_qos *rqos)
613 return container_of(rqos, struct ioc, rqos);
616 static struct ioc *q_to_ioc(struct request_queue *q)
618 return rqos_to_ioc(rq_qos_id(q, RQ_QOS_COST));
621 static const char *q_name(struct request_queue *q)
623 if (test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags))
624 return kobject_name(q->kobj.parent);
625 else
626 return "<unknown>";
629 static const char __maybe_unused *ioc_name(struct ioc *ioc)
631 return q_name(ioc->rqos.q);
634 static struct ioc_gq *pd_to_iocg(struct blkg_policy_data *pd)
636 return pd ? container_of(pd, struct ioc_gq, pd) : NULL;
639 static struct ioc_gq *blkg_to_iocg(struct blkcg_gq *blkg)
641 return pd_to_iocg(blkg_to_pd(blkg, &blkcg_policy_iocost));
644 static struct blkcg_gq *iocg_to_blkg(struct ioc_gq *iocg)
646 return pd_to_blkg(&iocg->pd);
649 static struct ioc_cgrp *blkcg_to_iocc(struct blkcg *blkcg)
651 return container_of(blkcg_to_cpd(blkcg, &blkcg_policy_iocost),
652 struct ioc_cgrp, cpd);
656 * Scale @abs_cost to the inverse of @hw_inuse. The lower the hierarchical
657 * weight, the more expensive each IO. Must round up.
659 static u64 abs_cost_to_cost(u64 abs_cost, u32 hw_inuse)
661 return DIV64_U64_ROUND_UP(abs_cost * HWEIGHT_WHOLE, hw_inuse);
665 * The inverse of abs_cost_to_cost(). Must round up.
667 static u64 cost_to_abs_cost(u64 cost, u32 hw_inuse)
669 return DIV64_U64_ROUND_UP(cost * hw_inuse, HWEIGHT_WHOLE);
672 static void iocg_commit_bio(struct ioc_gq *iocg, struct bio *bio, u64 cost)
674 bio->bi_iocost_cost = cost;
675 atomic64_add(cost, &iocg->vtime);
678 #define CREATE_TRACE_POINTS
679 #include <trace/events/iocost.h>
681 /* latency Qos params changed, update period_us and all the dependent params */
682 static void ioc_refresh_period_us(struct ioc *ioc)
684 u32 ppm, lat, multi, period_us;
686 lockdep_assert_held(&ioc->lock);
688 /* pick the higher latency target */
689 if (ioc->params.qos[QOS_RLAT] >= ioc->params.qos[QOS_WLAT]) {
690 ppm = ioc->params.qos[QOS_RPPM];
691 lat = ioc->params.qos[QOS_RLAT];
692 } else {
693 ppm = ioc->params.qos[QOS_WPPM];
694 lat = ioc->params.qos[QOS_WLAT];
698 * We want the period to be long enough to contain a healthy number
699 * of IOs while short enough for granular control. Define it as a
700 * multiple of the latency target. Ideally, the multiplier should
701 * be scaled according to the percentile so that it would nominally
702 * contain a certain number of requests. Let's be simpler and
703 * scale it linearly so that it's 2x >= pct(90) and 10x at pct(50).
705 if (ppm)
706 multi = max_t(u32, (MILLION - ppm) / 50000, 2);
707 else
708 multi = 2;
709 period_us = multi * lat;
710 period_us = clamp_t(u32, period_us, MIN_PERIOD, MAX_PERIOD);
712 /* calculate dependent params */
713 ioc->period_us = period_us;
714 ioc->margin_us = period_us * MARGIN_PCT / 100;
715 ioc->inuse_margin_vtime = DIV64_U64_ROUND_UP(
716 period_us * VTIME_PER_USEC * INUSE_MARGIN_PCT, 100);
719 static int ioc_autop_idx(struct ioc *ioc)
721 int idx = ioc->autop_idx;
722 const struct ioc_params *p = &autop[idx];
723 u32 vrate_pct;
724 u64 now_ns;
726 /* rotational? */
727 if (!blk_queue_nonrot(ioc->rqos.q))
728 return AUTOP_HDD;
730 /* handle SATA SSDs w/ broken NCQ */
731 if (blk_queue_depth(ioc->rqos.q) == 1)
732 return AUTOP_SSD_QD1;
734 /* use one of the normal ssd sets */
735 if (idx < AUTOP_SSD_DFL)
736 return AUTOP_SSD_DFL;
738 /* if user is overriding anything, maintain what was there */
739 if (ioc->user_qos_params || ioc->user_cost_model)
740 return idx;
742 /* step up/down based on the vrate */
743 vrate_pct = div64_u64(atomic64_read(&ioc->vtime_rate) * 100,
744 VTIME_PER_USEC);
745 now_ns = ktime_get_ns();
747 if (p->too_fast_vrate_pct && p->too_fast_vrate_pct <= vrate_pct) {
748 if (!ioc->autop_too_fast_at)
749 ioc->autop_too_fast_at = now_ns;
750 if (now_ns - ioc->autop_too_fast_at >= AUTOP_CYCLE_NSEC)
751 return idx + 1;
752 } else {
753 ioc->autop_too_fast_at = 0;
756 if (p->too_slow_vrate_pct && p->too_slow_vrate_pct >= vrate_pct) {
757 if (!ioc->autop_too_slow_at)
758 ioc->autop_too_slow_at = now_ns;
759 if (now_ns - ioc->autop_too_slow_at >= AUTOP_CYCLE_NSEC)
760 return idx - 1;
761 } else {
762 ioc->autop_too_slow_at = 0;
765 return idx;
769 * Take the followings as input
771 * @bps maximum sequential throughput
772 * @seqiops maximum sequential 4k iops
773 * @randiops maximum random 4k iops
775 * and calculate the linear model cost coefficients.
777 * *@page per-page cost 1s / (@bps / 4096)
778 * *@seqio base cost of a seq IO max((1s / @seqiops) - *@page, 0)
779 * @randiops base cost of a rand IO max((1s / @randiops) - *@page, 0)
781 static void calc_lcoefs(u64 bps, u64 seqiops, u64 randiops,
782 u64 *page, u64 *seqio, u64 *randio)
784 u64 v;
786 *page = *seqio = *randio = 0;
788 if (bps)
789 *page = DIV64_U64_ROUND_UP(VTIME_PER_SEC,
790 DIV_ROUND_UP_ULL(bps, IOC_PAGE_SIZE));
792 if (seqiops) {
793 v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, seqiops);
794 if (v > *page)
795 *seqio = v - *page;
798 if (randiops) {
799 v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, randiops);
800 if (v > *page)
801 *randio = v - *page;
805 static void ioc_refresh_lcoefs(struct ioc *ioc)
807 u64 *u = ioc->params.i_lcoefs;
808 u64 *c = ioc->params.lcoefs;
810 calc_lcoefs(u[I_LCOEF_RBPS], u[I_LCOEF_RSEQIOPS], u[I_LCOEF_RRANDIOPS],
811 &c[LCOEF_RPAGE], &c[LCOEF_RSEQIO], &c[LCOEF_RRANDIO]);
812 calc_lcoefs(u[I_LCOEF_WBPS], u[I_LCOEF_WSEQIOPS], u[I_LCOEF_WRANDIOPS],
813 &c[LCOEF_WPAGE], &c[LCOEF_WSEQIO], &c[LCOEF_WRANDIO]);
816 static bool ioc_refresh_params(struct ioc *ioc, bool force)
818 const struct ioc_params *p;
819 int idx;
821 lockdep_assert_held(&ioc->lock);
823 idx = ioc_autop_idx(ioc);
824 p = &autop[idx];
826 if (idx == ioc->autop_idx && !force)
827 return false;
829 if (idx != ioc->autop_idx)
830 atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC);
832 ioc->autop_idx = idx;
833 ioc->autop_too_fast_at = 0;
834 ioc->autop_too_slow_at = 0;
836 if (!ioc->user_qos_params)
837 memcpy(ioc->params.qos, p->qos, sizeof(p->qos));
838 if (!ioc->user_cost_model)
839 memcpy(ioc->params.i_lcoefs, p->i_lcoefs, sizeof(p->i_lcoefs));
841 ioc_refresh_period_us(ioc);
842 ioc_refresh_lcoefs(ioc);
844 ioc->vrate_min = DIV64_U64_ROUND_UP((u64)ioc->params.qos[QOS_MIN] *
845 VTIME_PER_USEC, MILLION);
846 ioc->vrate_max = div64_u64((u64)ioc->params.qos[QOS_MAX] *
847 VTIME_PER_USEC, MILLION);
849 return true;
852 /* take a snapshot of the current [v]time and vrate */
853 static void ioc_now(struct ioc *ioc, struct ioc_now *now)
855 unsigned seq;
857 now->now_ns = ktime_get();
858 now->now = ktime_to_us(now->now_ns);
859 now->vrate = atomic64_read(&ioc->vtime_rate);
862 * The current vtime is
864 * vtime at period start + (wallclock time since the start) * vrate
866 * As a consistent snapshot of `period_at_vtime` and `period_at` is
867 * needed, they're seqcount protected.
869 do {
870 seq = read_seqcount_begin(&ioc->period_seqcount);
871 now->vnow = ioc->period_at_vtime +
872 (now->now - ioc->period_at) * now->vrate;
873 } while (read_seqcount_retry(&ioc->period_seqcount, seq));
876 static void ioc_start_period(struct ioc *ioc, struct ioc_now *now)
878 lockdep_assert_held(&ioc->lock);
879 WARN_ON_ONCE(ioc->running != IOC_RUNNING);
881 write_seqcount_begin(&ioc->period_seqcount);
882 ioc->period_at = now->now;
883 ioc->period_at_vtime = now->vnow;
884 write_seqcount_end(&ioc->period_seqcount);
886 ioc->timer.expires = jiffies + usecs_to_jiffies(ioc->period_us);
887 add_timer(&ioc->timer);
891 * Update @iocg's `active` and `inuse` to @active and @inuse, update level
892 * weight sums and propagate upwards accordingly.
894 static void __propagate_active_weight(struct ioc_gq *iocg, u32 active, u32 inuse)
896 struct ioc *ioc = iocg->ioc;
897 int lvl;
899 lockdep_assert_held(&ioc->lock);
901 inuse = min(active, inuse);
903 for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
904 struct ioc_gq *parent = iocg->ancestors[lvl];
905 struct ioc_gq *child = iocg->ancestors[lvl + 1];
906 u32 parent_active = 0, parent_inuse = 0;
908 /* update the level sums */
909 parent->child_active_sum += (s32)(active - child->active);
910 parent->child_inuse_sum += (s32)(inuse - child->inuse);
911 /* apply the udpates */
912 child->active = active;
913 child->inuse = inuse;
916 * The delta between inuse and active sums indicates that
917 * that much of weight is being given away. Parent's inuse
918 * and active should reflect the ratio.
920 if (parent->child_active_sum) {
921 parent_active = parent->weight;
922 parent_inuse = DIV64_U64_ROUND_UP(
923 parent_active * parent->child_inuse_sum,
924 parent->child_active_sum);
927 /* do we need to keep walking up? */
928 if (parent_active == parent->active &&
929 parent_inuse == parent->inuse)
930 break;
932 active = parent_active;
933 inuse = parent_inuse;
936 ioc->weights_updated = true;
939 static void commit_active_weights(struct ioc *ioc)
941 lockdep_assert_held(&ioc->lock);
943 if (ioc->weights_updated) {
944 /* paired with rmb in current_hweight(), see there */
945 smp_wmb();
946 atomic_inc(&ioc->hweight_gen);
947 ioc->weights_updated = false;
951 static void propagate_active_weight(struct ioc_gq *iocg, u32 active, u32 inuse)
953 __propagate_active_weight(iocg, active, inuse);
954 commit_active_weights(iocg->ioc);
957 static void current_hweight(struct ioc_gq *iocg, u32 *hw_activep, u32 *hw_inusep)
959 struct ioc *ioc = iocg->ioc;
960 int lvl;
961 u32 hwa, hwi;
962 int ioc_gen;
964 /* hot path - if uptodate, use cached */
965 ioc_gen = atomic_read(&ioc->hweight_gen);
966 if (ioc_gen == iocg->hweight_gen)
967 goto out;
970 * Paired with wmb in commit_active_weights(). If we saw the
971 * updated hweight_gen, all the weight updates from
972 * __propagate_active_weight() are visible too.
974 * We can race with weight updates during calculation and get it
975 * wrong. However, hweight_gen would have changed and a future
976 * reader will recalculate and we're guaranteed to discard the
977 * wrong result soon.
979 smp_rmb();
981 hwa = hwi = HWEIGHT_WHOLE;
982 for (lvl = 0; lvl <= iocg->level - 1; lvl++) {
983 struct ioc_gq *parent = iocg->ancestors[lvl];
984 struct ioc_gq *child = iocg->ancestors[lvl + 1];
985 u32 active_sum = READ_ONCE(parent->child_active_sum);
986 u32 inuse_sum = READ_ONCE(parent->child_inuse_sum);
987 u32 active = READ_ONCE(child->active);
988 u32 inuse = READ_ONCE(child->inuse);
990 /* we can race with deactivations and either may read as zero */
991 if (!active_sum || !inuse_sum)
992 continue;
994 active_sum = max(active, active_sum);
995 hwa = hwa * active / active_sum; /* max 16bits * 10000 */
997 inuse_sum = max(inuse, inuse_sum);
998 hwi = hwi * inuse / inuse_sum; /* max 16bits * 10000 */
1001 iocg->hweight_active = max_t(u32, hwa, 1);
1002 iocg->hweight_inuse = max_t(u32, hwi, 1);
1003 iocg->hweight_gen = ioc_gen;
1004 out:
1005 if (hw_activep)
1006 *hw_activep = iocg->hweight_active;
1007 if (hw_inusep)
1008 *hw_inusep = iocg->hweight_inuse;
1011 static void weight_updated(struct ioc_gq *iocg)
1013 struct ioc *ioc = iocg->ioc;
1014 struct blkcg_gq *blkg = iocg_to_blkg(iocg);
1015 struct ioc_cgrp *iocc = blkcg_to_iocc(blkg->blkcg);
1016 u32 weight;
1018 lockdep_assert_held(&ioc->lock);
1020 weight = iocg->cfg_weight ?: iocc->dfl_weight;
1021 if (weight != iocg->weight && iocg->active)
1022 propagate_active_weight(iocg, weight,
1023 DIV64_U64_ROUND_UP(iocg->inuse * weight, iocg->weight));
1024 iocg->weight = weight;
1027 static bool iocg_activate(struct ioc_gq *iocg, struct ioc_now *now)
1029 struct ioc *ioc = iocg->ioc;
1030 u64 last_period, cur_period, max_period_delta;
1031 u64 vtime, vmargin, vmin;
1032 int i;
1035 * If seem to be already active, just update the stamp to tell the
1036 * timer that we're still active. We don't mind occassional races.
1038 if (!list_empty(&iocg->active_list)) {
1039 ioc_now(ioc, now);
1040 cur_period = atomic64_read(&ioc->cur_period);
1041 if (atomic64_read(&iocg->active_period) != cur_period)
1042 atomic64_set(&iocg->active_period, cur_period);
1043 return true;
1046 /* racy check on internal node IOs, treat as root level IOs */
1047 if (iocg->child_active_sum)
1048 return false;
1050 spin_lock_irq(&ioc->lock);
1052 ioc_now(ioc, now);
1054 /* update period */
1055 cur_period = atomic64_read(&ioc->cur_period);
1056 last_period = atomic64_read(&iocg->active_period);
1057 atomic64_set(&iocg->active_period, cur_period);
1059 /* already activated or breaking leaf-only constraint? */
1060 if (!list_empty(&iocg->active_list))
1061 goto succeed_unlock;
1062 for (i = iocg->level - 1; i > 0; i--)
1063 if (!list_empty(&iocg->ancestors[i]->active_list))
1064 goto fail_unlock;
1066 if (iocg->child_active_sum)
1067 goto fail_unlock;
1070 * vtime may wrap when vrate is raised substantially due to
1071 * underestimated IO costs. Look at the period and ignore its
1072 * vtime if the iocg has been idle for too long. Also, cap the
1073 * budget it can start with to the margin.
1075 max_period_delta = DIV64_U64_ROUND_UP(VTIME_VALID_DUR, ioc->period_us);
1076 vtime = atomic64_read(&iocg->vtime);
1077 vmargin = ioc->margin_us * now->vrate;
1078 vmin = now->vnow - vmargin;
1080 if (last_period + max_period_delta < cur_period ||
1081 time_before64(vtime, vmin)) {
1082 atomic64_add(vmin - vtime, &iocg->vtime);
1083 atomic64_add(vmin - vtime, &iocg->done_vtime);
1084 vtime = vmin;
1088 * Activate, propagate weight and start period timer if not
1089 * running. Reset hweight_gen to avoid accidental match from
1090 * wrapping.
1092 iocg->hweight_gen = atomic_read(&ioc->hweight_gen) - 1;
1093 list_add(&iocg->active_list, &ioc->active_iocgs);
1094 propagate_active_weight(iocg, iocg->weight,
1095 iocg->last_inuse ?: iocg->weight);
1097 TRACE_IOCG_PATH(iocg_activate, iocg, now,
1098 last_period, cur_period, vtime);
1100 iocg->last_vtime = vtime;
1102 if (ioc->running == IOC_IDLE) {
1103 ioc->running = IOC_RUNNING;
1104 ioc_start_period(ioc, now);
1107 succeed_unlock:
1108 spin_unlock_irq(&ioc->lock);
1109 return true;
1111 fail_unlock:
1112 spin_unlock_irq(&ioc->lock);
1113 return false;
1116 static int iocg_wake_fn(struct wait_queue_entry *wq_entry, unsigned mode,
1117 int flags, void *key)
1119 struct iocg_wait *wait = container_of(wq_entry, struct iocg_wait, wait);
1120 struct iocg_wake_ctx *ctx = (struct iocg_wake_ctx *)key;
1121 u64 cost = abs_cost_to_cost(wait->abs_cost, ctx->hw_inuse);
1123 ctx->vbudget -= cost;
1125 if (ctx->vbudget < 0)
1126 return -1;
1128 iocg_commit_bio(ctx->iocg, wait->bio, cost);
1131 * autoremove_wake_function() removes the wait entry only when it
1132 * actually changed the task state. We want the wait always
1133 * removed. Remove explicitly and use default_wake_function().
1135 list_del_init(&wq_entry->entry);
1136 wait->committed = true;
1138 default_wake_function(wq_entry, mode, flags, key);
1139 return 0;
1142 static void iocg_kick_waitq(struct ioc_gq *iocg, struct ioc_now *now)
1144 struct ioc *ioc = iocg->ioc;
1145 struct iocg_wake_ctx ctx = { .iocg = iocg };
1146 u64 margin_ns = (u64)(ioc->period_us *
1147 WAITQ_TIMER_MARGIN_PCT / 100) * NSEC_PER_USEC;
1148 u64 vdebt, vshortage, expires, oexpires;
1149 s64 vbudget;
1150 u32 hw_inuse;
1152 lockdep_assert_held(&iocg->waitq.lock);
1154 current_hweight(iocg, NULL, &hw_inuse);
1155 vbudget = now->vnow - atomic64_read(&iocg->vtime);
1157 /* pay off debt */
1158 vdebt = abs_cost_to_cost(iocg->abs_vdebt, hw_inuse);
1159 if (vdebt && vbudget > 0) {
1160 u64 delta = min_t(u64, vbudget, vdebt);
1161 u64 abs_delta = min(cost_to_abs_cost(delta, hw_inuse),
1162 iocg->abs_vdebt);
1164 atomic64_add(delta, &iocg->vtime);
1165 atomic64_add(delta, &iocg->done_vtime);
1166 iocg->abs_vdebt -= abs_delta;
1170 * Wake up the ones which are due and see how much vtime we'll need
1171 * for the next one.
1173 ctx.hw_inuse = hw_inuse;
1174 ctx.vbudget = vbudget - vdebt;
1175 __wake_up_locked_key(&iocg->waitq, TASK_NORMAL, &ctx);
1176 if (!waitqueue_active(&iocg->waitq))
1177 return;
1178 if (WARN_ON_ONCE(ctx.vbudget >= 0))
1179 return;
1181 /* determine next wakeup, add a quarter margin to guarantee chunking */
1182 vshortage = -ctx.vbudget;
1183 expires = now->now_ns +
1184 DIV64_U64_ROUND_UP(vshortage, now->vrate) * NSEC_PER_USEC;
1185 expires += margin_ns / 4;
1187 /* if already active and close enough, don't bother */
1188 oexpires = ktime_to_ns(hrtimer_get_softexpires(&iocg->waitq_timer));
1189 if (hrtimer_is_queued(&iocg->waitq_timer) &&
1190 abs(oexpires - expires) <= margin_ns / 4)
1191 return;
1193 hrtimer_start_range_ns(&iocg->waitq_timer, ns_to_ktime(expires),
1194 margin_ns / 4, HRTIMER_MODE_ABS);
1197 static enum hrtimer_restart iocg_waitq_timer_fn(struct hrtimer *timer)
1199 struct ioc_gq *iocg = container_of(timer, struct ioc_gq, waitq_timer);
1200 struct ioc_now now;
1201 unsigned long flags;
1203 ioc_now(iocg->ioc, &now);
1205 spin_lock_irqsave(&iocg->waitq.lock, flags);
1206 iocg_kick_waitq(iocg, &now);
1207 spin_unlock_irqrestore(&iocg->waitq.lock, flags);
1209 return HRTIMER_NORESTART;
1212 static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now, u64 cost)
1214 struct ioc *ioc = iocg->ioc;
1215 struct blkcg_gq *blkg = iocg_to_blkg(iocg);
1216 u64 vtime = atomic64_read(&iocg->vtime);
1217 u64 vmargin = ioc->margin_us * now->vrate;
1218 u64 margin_ns = ioc->margin_us * NSEC_PER_USEC;
1219 u64 expires, oexpires;
1220 u32 hw_inuse;
1222 lockdep_assert_held(&iocg->waitq.lock);
1224 /* debt-adjust vtime */
1225 current_hweight(iocg, NULL, &hw_inuse);
1226 vtime += abs_cost_to_cost(iocg->abs_vdebt, hw_inuse);
1229 * Clear or maintain depending on the overage. Non-zero vdebt is what
1230 * guarantees that @iocg is online and future iocg_kick_delay() will
1231 * clear use_delay. Don't leave it on when there's no vdebt.
1233 if (!iocg->abs_vdebt || time_before_eq64(vtime, now->vnow)) {
1234 blkcg_clear_delay(blkg);
1235 return false;
1237 if (!atomic_read(&blkg->use_delay) &&
1238 time_before_eq64(vtime, now->vnow + vmargin))
1239 return false;
1241 /* use delay */
1242 if (cost) {
1243 u64 cost_ns = DIV64_U64_ROUND_UP(cost * NSEC_PER_USEC,
1244 now->vrate);
1245 blkcg_add_delay(blkg, now->now_ns, cost_ns);
1247 blkcg_use_delay(blkg);
1249 expires = now->now_ns + DIV64_U64_ROUND_UP(vtime - now->vnow,
1250 now->vrate) * NSEC_PER_USEC;
1252 /* if already active and close enough, don't bother */
1253 oexpires = ktime_to_ns(hrtimer_get_softexpires(&iocg->delay_timer));
1254 if (hrtimer_is_queued(&iocg->delay_timer) &&
1255 abs(oexpires - expires) <= margin_ns / 4)
1256 return true;
1258 hrtimer_start_range_ns(&iocg->delay_timer, ns_to_ktime(expires),
1259 margin_ns / 4, HRTIMER_MODE_ABS);
1260 return true;
1263 static enum hrtimer_restart iocg_delay_timer_fn(struct hrtimer *timer)
1265 struct ioc_gq *iocg = container_of(timer, struct ioc_gq, delay_timer);
1266 struct ioc_now now;
1267 unsigned long flags;
1269 spin_lock_irqsave(&iocg->waitq.lock, flags);
1270 ioc_now(iocg->ioc, &now);
1271 iocg_kick_delay(iocg, &now, 0);
1272 spin_unlock_irqrestore(&iocg->waitq.lock, flags);
1274 return HRTIMER_NORESTART;
1277 static void ioc_lat_stat(struct ioc *ioc, u32 *missed_ppm_ar, u32 *rq_wait_pct_p)
1279 u32 nr_met[2] = { };
1280 u32 nr_missed[2] = { };
1281 u64 rq_wait_ns = 0;
1282 int cpu, rw;
1284 for_each_online_cpu(cpu) {
1285 struct ioc_pcpu_stat *stat = per_cpu_ptr(ioc->pcpu_stat, cpu);
1286 u64 this_rq_wait_ns;
1288 for (rw = READ; rw <= WRITE; rw++) {
1289 u32 this_met = READ_ONCE(stat->missed[rw].nr_met);
1290 u32 this_missed = READ_ONCE(stat->missed[rw].nr_missed);
1292 nr_met[rw] += this_met - stat->missed[rw].last_met;
1293 nr_missed[rw] += this_missed - stat->missed[rw].last_missed;
1294 stat->missed[rw].last_met = this_met;
1295 stat->missed[rw].last_missed = this_missed;
1298 this_rq_wait_ns = READ_ONCE(stat->rq_wait_ns);
1299 rq_wait_ns += this_rq_wait_ns - stat->last_rq_wait_ns;
1300 stat->last_rq_wait_ns = this_rq_wait_ns;
1303 for (rw = READ; rw <= WRITE; rw++) {
1304 if (nr_met[rw] + nr_missed[rw])
1305 missed_ppm_ar[rw] =
1306 DIV64_U64_ROUND_UP((u64)nr_missed[rw] * MILLION,
1307 nr_met[rw] + nr_missed[rw]);
1308 else
1309 missed_ppm_ar[rw] = 0;
1312 *rq_wait_pct_p = div64_u64(rq_wait_ns * 100,
1313 ioc->period_us * NSEC_PER_USEC);
1316 /* was iocg idle this period? */
1317 static bool iocg_is_idle(struct ioc_gq *iocg)
1319 struct ioc *ioc = iocg->ioc;
1321 /* did something get issued this period? */
1322 if (atomic64_read(&iocg->active_period) ==
1323 atomic64_read(&ioc->cur_period))
1324 return false;
1326 /* is something in flight? */
1327 if (atomic64_read(&iocg->done_vtime) != atomic64_read(&iocg->vtime))
1328 return false;
1330 return true;
1333 /* returns usage with margin added if surplus is large enough */
1334 static u32 surplus_adjusted_hweight_inuse(u32 usage, u32 hw_inuse)
1336 /* add margin */
1337 usage = DIV_ROUND_UP(usage * SURPLUS_SCALE_PCT, 100);
1338 usage += SURPLUS_SCALE_ABS;
1340 /* don't bother if the surplus is too small */
1341 if (usage + SURPLUS_MIN_ADJ_DELTA > hw_inuse)
1342 return 0;
1344 return usage;
1347 static void ioc_timer_fn(struct timer_list *timer)
1349 struct ioc *ioc = container_of(timer, struct ioc, timer);
1350 struct ioc_gq *iocg, *tiocg;
1351 struct ioc_now now;
1352 int nr_surpluses = 0, nr_shortages = 0, nr_lagging = 0;
1353 u32 ppm_rthr = MILLION - ioc->params.qos[QOS_RPPM];
1354 u32 ppm_wthr = MILLION - ioc->params.qos[QOS_WPPM];
1355 u32 missed_ppm[2], rq_wait_pct;
1356 u64 period_vtime;
1357 int prev_busy_level, i;
1359 /* how were the latencies during the period? */
1360 ioc_lat_stat(ioc, missed_ppm, &rq_wait_pct);
1362 /* take care of active iocgs */
1363 spin_lock_irq(&ioc->lock);
1365 ioc_now(ioc, &now);
1367 period_vtime = now.vnow - ioc->period_at_vtime;
1368 if (WARN_ON_ONCE(!period_vtime)) {
1369 spin_unlock_irq(&ioc->lock);
1370 return;
1374 * Waiters determine the sleep durations based on the vrate they
1375 * saw at the time of sleep. If vrate has increased, some waiters
1376 * could be sleeping for too long. Wake up tardy waiters which
1377 * should have woken up in the last period and expire idle iocgs.
1379 list_for_each_entry_safe(iocg, tiocg, &ioc->active_iocgs, active_list) {
1380 if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt &&
1381 !iocg_is_idle(iocg))
1382 continue;
1384 spin_lock(&iocg->waitq.lock);
1386 if (waitqueue_active(&iocg->waitq) || iocg->abs_vdebt) {
1387 /* might be oversleeping vtime / hweight changes, kick */
1388 iocg_kick_waitq(iocg, &now);
1389 iocg_kick_delay(iocg, &now, 0);
1390 } else if (iocg_is_idle(iocg)) {
1391 /* no waiter and idle, deactivate */
1392 iocg->last_inuse = iocg->inuse;
1393 __propagate_active_weight(iocg, 0, 0);
1394 list_del_init(&iocg->active_list);
1397 spin_unlock(&iocg->waitq.lock);
1399 commit_active_weights(ioc);
1401 /* calc usages and see whether some weights need to be moved around */
1402 list_for_each_entry(iocg, &ioc->active_iocgs, active_list) {
1403 u64 vdone, vtime, vusage, vmargin, vmin;
1404 u32 hw_active, hw_inuse, usage;
1407 * Collect unused and wind vtime closer to vnow to prevent
1408 * iocgs from accumulating a large amount of budget.
1410 vdone = atomic64_read(&iocg->done_vtime);
1411 vtime = atomic64_read(&iocg->vtime);
1412 current_hweight(iocg, &hw_active, &hw_inuse);
1415 * Latency QoS detection doesn't account for IOs which are
1416 * in-flight for longer than a period. Detect them by
1417 * comparing vdone against period start. If lagging behind
1418 * IOs from past periods, don't increase vrate.
1420 if ((ppm_rthr != MILLION || ppm_wthr != MILLION) &&
1421 !atomic_read(&iocg_to_blkg(iocg)->use_delay) &&
1422 time_after64(vtime, vdone) &&
1423 time_after64(vtime, now.vnow -
1424 MAX_LAGGING_PERIODS * period_vtime) &&
1425 time_before64(vdone, now.vnow - period_vtime))
1426 nr_lagging++;
1428 if (waitqueue_active(&iocg->waitq))
1429 vusage = now.vnow - iocg->last_vtime;
1430 else if (time_before64(iocg->last_vtime, vtime))
1431 vusage = vtime - iocg->last_vtime;
1432 else
1433 vusage = 0;
1435 iocg->last_vtime += vusage;
1437 * Factor in in-flight vtime into vusage to avoid
1438 * high-latency completions appearing as idle. This should
1439 * be done after the above ->last_time adjustment.
1441 vusage = max(vusage, vtime - vdone);
1443 /* calculate hweight based usage ratio and record */
1444 if (vusage) {
1445 usage = DIV64_U64_ROUND_UP(vusage * hw_inuse,
1446 period_vtime);
1447 iocg->usage_idx = (iocg->usage_idx + 1) % NR_USAGE_SLOTS;
1448 iocg->usages[iocg->usage_idx] = usage;
1449 } else {
1450 usage = 0;
1453 /* see whether there's surplus vtime */
1454 vmargin = ioc->margin_us * now.vrate;
1455 vmin = now.vnow - vmargin;
1457 iocg->has_surplus = false;
1459 if (!waitqueue_active(&iocg->waitq) &&
1460 time_before64(vtime, vmin)) {
1461 u64 delta = vmin - vtime;
1463 /* throw away surplus vtime */
1464 atomic64_add(delta, &iocg->vtime);
1465 atomic64_add(delta, &iocg->done_vtime);
1466 iocg->last_vtime += delta;
1467 /* if usage is sufficiently low, maybe it can donate */
1468 if (surplus_adjusted_hweight_inuse(usage, hw_inuse)) {
1469 iocg->has_surplus = true;
1470 nr_surpluses++;
1472 } else if (hw_inuse < hw_active) {
1473 u32 new_hwi, new_inuse;
1475 /* was donating but might need to take back some */
1476 if (waitqueue_active(&iocg->waitq)) {
1477 new_hwi = hw_active;
1478 } else {
1479 new_hwi = max(hw_inuse,
1480 usage * SURPLUS_SCALE_PCT / 100 +
1481 SURPLUS_SCALE_ABS);
1484 new_inuse = div64_u64((u64)iocg->inuse * new_hwi,
1485 hw_inuse);
1486 new_inuse = clamp_t(u32, new_inuse, 1, iocg->active);
1488 if (new_inuse > iocg->inuse) {
1489 TRACE_IOCG_PATH(inuse_takeback, iocg, &now,
1490 iocg->inuse, new_inuse,
1491 hw_inuse, new_hwi);
1492 __propagate_active_weight(iocg, iocg->weight,
1493 new_inuse);
1495 } else {
1496 /* genuninely out of vtime */
1497 nr_shortages++;
1501 if (!nr_shortages || !nr_surpluses)
1502 goto skip_surplus_transfers;
1504 /* there are both shortages and surpluses, transfer surpluses */
1505 list_for_each_entry(iocg, &ioc->active_iocgs, active_list) {
1506 u32 usage, hw_active, hw_inuse, new_hwi, new_inuse;
1507 int nr_valid = 0;
1509 if (!iocg->has_surplus)
1510 continue;
1512 /* base the decision on max historical usage */
1513 for (i = 0, usage = 0; i < NR_USAGE_SLOTS; i++) {
1514 if (iocg->usages[i]) {
1515 usage = max(usage, iocg->usages[i]);
1516 nr_valid++;
1519 if (nr_valid < MIN_VALID_USAGES)
1520 continue;
1522 current_hweight(iocg, &hw_active, &hw_inuse);
1523 new_hwi = surplus_adjusted_hweight_inuse(usage, hw_inuse);
1524 if (!new_hwi)
1525 continue;
1527 new_inuse = DIV64_U64_ROUND_UP((u64)iocg->inuse * new_hwi,
1528 hw_inuse);
1529 if (new_inuse < iocg->inuse) {
1530 TRACE_IOCG_PATH(inuse_giveaway, iocg, &now,
1531 iocg->inuse, new_inuse,
1532 hw_inuse, new_hwi);
1533 __propagate_active_weight(iocg, iocg->weight, new_inuse);
1536 skip_surplus_transfers:
1537 commit_active_weights(ioc);
1540 * If q is getting clogged or we're missing too much, we're issuing
1541 * too much IO and should lower vtime rate. If we're not missing
1542 * and experiencing shortages but not surpluses, we're too stingy
1543 * and should increase vtime rate.
1545 prev_busy_level = ioc->busy_level;
1546 if (rq_wait_pct > RQ_WAIT_BUSY_PCT ||
1547 missed_ppm[READ] > ppm_rthr ||
1548 missed_ppm[WRITE] > ppm_wthr) {
1549 /* clearly missing QoS targets, slow down vrate */
1550 ioc->busy_level = max(ioc->busy_level, 0);
1551 ioc->busy_level++;
1552 } else if (rq_wait_pct <= RQ_WAIT_BUSY_PCT * UNBUSY_THR_PCT / 100 &&
1553 missed_ppm[READ] <= ppm_rthr * UNBUSY_THR_PCT / 100 &&
1554 missed_ppm[WRITE] <= ppm_wthr * UNBUSY_THR_PCT / 100) {
1555 /* QoS targets are being met with >25% margin */
1556 if (nr_shortages) {
1558 * We're throttling while the device has spare
1559 * capacity. If vrate was being slowed down, stop.
1561 ioc->busy_level = min(ioc->busy_level, 0);
1564 * If there are IOs spanning multiple periods, wait
1565 * them out before pushing the device harder. If
1566 * there are surpluses, let redistribution work it
1567 * out first.
1569 if (!nr_lagging && !nr_surpluses)
1570 ioc->busy_level--;
1571 } else {
1573 * Nobody is being throttled and the users aren't
1574 * issuing enough IOs to saturate the device. We
1575 * simply don't know how close the device is to
1576 * saturation. Coast.
1578 ioc->busy_level = 0;
1580 } else {
1581 /* inside the hysterisis margin, we're good */
1582 ioc->busy_level = 0;
1585 ioc->busy_level = clamp(ioc->busy_level, -1000, 1000);
1587 if (ioc->busy_level > 0 || (ioc->busy_level < 0 && !nr_lagging)) {
1588 u64 vrate = atomic64_read(&ioc->vtime_rate);
1589 u64 vrate_min = ioc->vrate_min, vrate_max = ioc->vrate_max;
1591 /* rq_wait signal is always reliable, ignore user vrate_min */
1592 if (rq_wait_pct > RQ_WAIT_BUSY_PCT)
1593 vrate_min = VRATE_MIN;
1596 * If vrate is out of bounds, apply clamp gradually as the
1597 * bounds can change abruptly. Otherwise, apply busy_level
1598 * based adjustment.
1600 if (vrate < vrate_min) {
1601 vrate = div64_u64(vrate * (100 + VRATE_CLAMP_ADJ_PCT),
1602 100);
1603 vrate = min(vrate, vrate_min);
1604 } else if (vrate > vrate_max) {
1605 vrate = div64_u64(vrate * (100 - VRATE_CLAMP_ADJ_PCT),
1606 100);
1607 vrate = max(vrate, vrate_max);
1608 } else {
1609 int idx = min_t(int, abs(ioc->busy_level),
1610 ARRAY_SIZE(vrate_adj_pct) - 1);
1611 u32 adj_pct = vrate_adj_pct[idx];
1613 if (ioc->busy_level > 0)
1614 adj_pct = 100 - adj_pct;
1615 else
1616 adj_pct = 100 + adj_pct;
1618 vrate = clamp(DIV64_U64_ROUND_UP(vrate * adj_pct, 100),
1619 vrate_min, vrate_max);
1622 trace_iocost_ioc_vrate_adj(ioc, vrate, missed_ppm, rq_wait_pct,
1623 nr_lagging, nr_shortages,
1624 nr_surpluses);
1626 atomic64_set(&ioc->vtime_rate, vrate);
1627 ioc->inuse_margin_vtime = DIV64_U64_ROUND_UP(
1628 ioc->period_us * vrate * INUSE_MARGIN_PCT, 100);
1629 } else if (ioc->busy_level != prev_busy_level || nr_lagging) {
1630 trace_iocost_ioc_vrate_adj(ioc, atomic64_read(&ioc->vtime_rate),
1631 missed_ppm, rq_wait_pct, nr_lagging,
1632 nr_shortages, nr_surpluses);
1635 ioc_refresh_params(ioc, false);
1638 * This period is done. Move onto the next one. If nothing's
1639 * going on with the device, stop the timer.
1641 atomic64_inc(&ioc->cur_period);
1643 if (ioc->running != IOC_STOP) {
1644 if (!list_empty(&ioc->active_iocgs)) {
1645 ioc_start_period(ioc, &now);
1646 } else {
1647 ioc->busy_level = 0;
1648 ioc->running = IOC_IDLE;
1652 spin_unlock_irq(&ioc->lock);
1655 static void calc_vtime_cost_builtin(struct bio *bio, struct ioc_gq *iocg,
1656 bool is_merge, u64 *costp)
1658 struct ioc *ioc = iocg->ioc;
1659 u64 coef_seqio, coef_randio, coef_page;
1660 u64 pages = max_t(u64, bio_sectors(bio) >> IOC_SECT_TO_PAGE_SHIFT, 1);
1661 u64 seek_pages = 0;
1662 u64 cost = 0;
1664 switch (bio_op(bio)) {
1665 case REQ_OP_READ:
1666 coef_seqio = ioc->params.lcoefs[LCOEF_RSEQIO];
1667 coef_randio = ioc->params.lcoefs[LCOEF_RRANDIO];
1668 coef_page = ioc->params.lcoefs[LCOEF_RPAGE];
1669 break;
1670 case REQ_OP_WRITE:
1671 coef_seqio = ioc->params.lcoefs[LCOEF_WSEQIO];
1672 coef_randio = ioc->params.lcoefs[LCOEF_WRANDIO];
1673 coef_page = ioc->params.lcoefs[LCOEF_WPAGE];
1674 break;
1675 default:
1676 goto out;
1679 if (iocg->cursor) {
1680 seek_pages = abs(bio->bi_iter.bi_sector - iocg->cursor);
1681 seek_pages >>= IOC_SECT_TO_PAGE_SHIFT;
1684 if (!is_merge) {
1685 if (seek_pages > LCOEF_RANDIO_PAGES) {
1686 cost += coef_randio;
1687 } else {
1688 cost += coef_seqio;
1691 cost += pages * coef_page;
1692 out:
1693 *costp = cost;
1696 static u64 calc_vtime_cost(struct bio *bio, struct ioc_gq *iocg, bool is_merge)
1698 u64 cost;
1700 calc_vtime_cost_builtin(bio, iocg, is_merge, &cost);
1701 return cost;
1704 static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio)
1706 struct blkcg_gq *blkg = bio->bi_blkg;
1707 struct ioc *ioc = rqos_to_ioc(rqos);
1708 struct ioc_gq *iocg = blkg_to_iocg(blkg);
1709 struct ioc_now now;
1710 struct iocg_wait wait;
1711 u32 hw_active, hw_inuse;
1712 u64 abs_cost, cost, vtime;
1714 /* bypass IOs if disabled or for root cgroup */
1715 if (!ioc->enabled || !iocg->level)
1716 return;
1718 /* always activate so that even 0 cost IOs get protected to some level */
1719 if (!iocg_activate(iocg, &now))
1720 return;
1722 /* calculate the absolute vtime cost */
1723 abs_cost = calc_vtime_cost(bio, iocg, false);
1724 if (!abs_cost)
1725 return;
1727 iocg->cursor = bio_end_sector(bio);
1729 vtime = atomic64_read(&iocg->vtime);
1730 current_hweight(iocg, &hw_active, &hw_inuse);
1732 if (hw_inuse < hw_active &&
1733 time_after_eq64(vtime + ioc->inuse_margin_vtime, now.vnow)) {
1734 TRACE_IOCG_PATH(inuse_reset, iocg, &now,
1735 iocg->inuse, iocg->weight, hw_inuse, hw_active);
1736 spin_lock_irq(&ioc->lock);
1737 propagate_active_weight(iocg, iocg->weight, iocg->weight);
1738 spin_unlock_irq(&ioc->lock);
1739 current_hweight(iocg, &hw_active, &hw_inuse);
1742 cost = abs_cost_to_cost(abs_cost, hw_inuse);
1745 * If no one's waiting and within budget, issue right away. The
1746 * tests are racy but the races aren't systemic - we only miss once
1747 * in a while which is fine.
1749 if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt &&
1750 time_before_eq64(vtime + cost, now.vnow)) {
1751 iocg_commit_bio(iocg, bio, cost);
1752 return;
1756 * We activated above but w/o any synchronization. Deactivation is
1757 * synchronized with waitq.lock and we won't get deactivated as long
1758 * as we're waiting or has debt, so we're good if we're activated
1759 * here. In the unlikely case that we aren't, just issue the IO.
1761 spin_lock_irq(&iocg->waitq.lock);
1763 if (unlikely(list_empty(&iocg->active_list))) {
1764 spin_unlock_irq(&iocg->waitq.lock);
1765 iocg_commit_bio(iocg, bio, cost);
1766 return;
1770 * We're over budget. If @bio has to be issued regardless, remember
1771 * the abs_cost instead of advancing vtime. iocg_kick_waitq() will pay
1772 * off the debt before waking more IOs.
1774 * This way, the debt is continuously paid off each period with the
1775 * actual budget available to the cgroup. If we just wound vtime, we
1776 * would incorrectly use the current hw_inuse for the entire amount
1777 * which, for example, can lead to the cgroup staying blocked for a
1778 * long time even with substantially raised hw_inuse.
1780 * An iocg with vdebt should stay online so that the timer can keep
1781 * deducting its vdebt and [de]activate use_delay mechanism
1782 * accordingly. We don't want to race against the timer trying to
1783 * clear them and leave @iocg inactive w/ dangling use_delay heavily
1784 * penalizing the cgroup and its descendants.
1786 if (bio_issue_as_root_blkg(bio) || fatal_signal_pending(current)) {
1787 iocg->abs_vdebt += abs_cost;
1788 if (iocg_kick_delay(iocg, &now, cost))
1789 blkcg_schedule_throttle(rqos->q,
1790 (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
1791 spin_unlock_irq(&iocg->waitq.lock);
1792 return;
1796 * Append self to the waitq and schedule the wakeup timer if we're
1797 * the first waiter. The timer duration is calculated based on the
1798 * current vrate. vtime and hweight changes can make it too short
1799 * or too long. Each wait entry records the absolute cost it's
1800 * waiting for to allow re-evaluation using a custom wait entry.
1802 * If too short, the timer simply reschedules itself. If too long,
1803 * the period timer will notice and trigger wakeups.
1805 * All waiters are on iocg->waitq and the wait states are
1806 * synchronized using waitq.lock.
1808 init_waitqueue_func_entry(&wait.wait, iocg_wake_fn);
1809 wait.wait.private = current;
1810 wait.bio = bio;
1811 wait.abs_cost = abs_cost;
1812 wait.committed = false; /* will be set true by waker */
1814 __add_wait_queue_entry_tail(&iocg->waitq, &wait.wait);
1815 iocg_kick_waitq(iocg, &now);
1817 spin_unlock_irq(&iocg->waitq.lock);
1819 while (true) {
1820 set_current_state(TASK_UNINTERRUPTIBLE);
1821 if (wait.committed)
1822 break;
1823 io_schedule();
1826 /* waker already committed us, proceed */
1827 finish_wait(&iocg->waitq, &wait.wait);
1830 static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq,
1831 struct bio *bio)
1833 struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
1834 struct ioc *ioc = iocg->ioc;
1835 sector_t bio_end = bio_end_sector(bio);
1836 struct ioc_now now;
1837 u32 hw_inuse;
1838 u64 abs_cost, cost;
1839 unsigned long flags;
1841 /* bypass if disabled or for root cgroup */
1842 if (!ioc->enabled || !iocg->level)
1843 return;
1845 abs_cost = calc_vtime_cost(bio, iocg, true);
1846 if (!abs_cost)
1847 return;
1849 ioc_now(ioc, &now);
1850 current_hweight(iocg, NULL, &hw_inuse);
1851 cost = abs_cost_to_cost(abs_cost, hw_inuse);
1853 /* update cursor if backmerging into the request at the cursor */
1854 if (blk_rq_pos(rq) < bio_end &&
1855 blk_rq_pos(rq) + blk_rq_sectors(rq) == iocg->cursor)
1856 iocg->cursor = bio_end;
1859 * Charge if there's enough vtime budget and the existing request has
1860 * cost assigned.
1862 if (rq->bio && rq->bio->bi_iocost_cost &&
1863 time_before_eq64(atomic64_read(&iocg->vtime) + cost, now.vnow)) {
1864 iocg_commit_bio(iocg, bio, cost);
1865 return;
1869 * Otherwise, account it as debt if @iocg is online, which it should
1870 * be for the vast majority of cases. See debt handling in
1871 * ioc_rqos_throttle() for details.
1873 spin_lock_irqsave(&iocg->waitq.lock, flags);
1874 if (likely(!list_empty(&iocg->active_list))) {
1875 iocg->abs_vdebt += abs_cost;
1876 iocg_kick_delay(iocg, &now, cost);
1877 } else {
1878 iocg_commit_bio(iocg, bio, cost);
1880 spin_unlock_irqrestore(&iocg->waitq.lock, flags);
1883 static void ioc_rqos_done_bio(struct rq_qos *rqos, struct bio *bio)
1885 struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
1887 if (iocg && bio->bi_iocost_cost)
1888 atomic64_add(bio->bi_iocost_cost, &iocg->done_vtime);
1891 static void ioc_rqos_done(struct rq_qos *rqos, struct request *rq)
1893 struct ioc *ioc = rqos_to_ioc(rqos);
1894 u64 on_q_ns, rq_wait_ns;
1895 int pidx, rw;
1897 if (!ioc->enabled || !rq->alloc_time_ns || !rq->start_time_ns)
1898 return;
1900 switch (req_op(rq) & REQ_OP_MASK) {
1901 case REQ_OP_READ:
1902 pidx = QOS_RLAT;
1903 rw = READ;
1904 break;
1905 case REQ_OP_WRITE:
1906 pidx = QOS_WLAT;
1907 rw = WRITE;
1908 break;
1909 default:
1910 return;
1913 on_q_ns = ktime_get_ns() - rq->alloc_time_ns;
1914 rq_wait_ns = rq->start_time_ns - rq->alloc_time_ns;
1916 if (on_q_ns <= ioc->params.qos[pidx] * NSEC_PER_USEC)
1917 this_cpu_inc(ioc->pcpu_stat->missed[rw].nr_met);
1918 else
1919 this_cpu_inc(ioc->pcpu_stat->missed[rw].nr_missed);
1921 this_cpu_add(ioc->pcpu_stat->rq_wait_ns, rq_wait_ns);
1924 static void ioc_rqos_queue_depth_changed(struct rq_qos *rqos)
1926 struct ioc *ioc = rqos_to_ioc(rqos);
1928 spin_lock_irq(&ioc->lock);
1929 ioc_refresh_params(ioc, false);
1930 spin_unlock_irq(&ioc->lock);
1933 static void ioc_rqos_exit(struct rq_qos *rqos)
1935 struct ioc *ioc = rqos_to_ioc(rqos);
1937 blkcg_deactivate_policy(rqos->q, &blkcg_policy_iocost);
1939 spin_lock_irq(&ioc->lock);
1940 ioc->running = IOC_STOP;
1941 spin_unlock_irq(&ioc->lock);
1943 del_timer_sync(&ioc->timer);
1944 free_percpu(ioc->pcpu_stat);
1945 kfree(ioc);
1948 static struct rq_qos_ops ioc_rqos_ops = {
1949 .throttle = ioc_rqos_throttle,
1950 .merge = ioc_rqos_merge,
1951 .done_bio = ioc_rqos_done_bio,
1952 .done = ioc_rqos_done,
1953 .queue_depth_changed = ioc_rqos_queue_depth_changed,
1954 .exit = ioc_rqos_exit,
1957 static int blk_iocost_init(struct request_queue *q)
1959 struct ioc *ioc;
1960 struct rq_qos *rqos;
1961 int ret;
1963 ioc = kzalloc(sizeof(*ioc), GFP_KERNEL);
1964 if (!ioc)
1965 return -ENOMEM;
1967 ioc->pcpu_stat = alloc_percpu(struct ioc_pcpu_stat);
1968 if (!ioc->pcpu_stat) {
1969 kfree(ioc);
1970 return -ENOMEM;
1973 rqos = &ioc->rqos;
1974 rqos->id = RQ_QOS_COST;
1975 rqos->ops = &ioc_rqos_ops;
1976 rqos->q = q;
1978 spin_lock_init(&ioc->lock);
1979 timer_setup(&ioc->timer, ioc_timer_fn, 0);
1980 INIT_LIST_HEAD(&ioc->active_iocgs);
1982 ioc->running = IOC_IDLE;
1983 atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC);
1984 seqcount_init(&ioc->period_seqcount);
1985 ioc->period_at = ktime_to_us(ktime_get());
1986 atomic64_set(&ioc->cur_period, 0);
1987 atomic_set(&ioc->hweight_gen, 0);
1989 spin_lock_irq(&ioc->lock);
1990 ioc->autop_idx = AUTOP_INVALID;
1991 ioc_refresh_params(ioc, true);
1992 spin_unlock_irq(&ioc->lock);
1994 rq_qos_add(q, rqos);
1995 ret = blkcg_activate_policy(q, &blkcg_policy_iocost);
1996 if (ret) {
1997 rq_qos_del(q, rqos);
1998 free_percpu(ioc->pcpu_stat);
1999 kfree(ioc);
2000 return ret;
2002 return 0;
2005 static struct blkcg_policy_data *ioc_cpd_alloc(gfp_t gfp)
2007 struct ioc_cgrp *iocc;
2009 iocc = kzalloc(sizeof(struct ioc_cgrp), gfp);
2010 if (!iocc)
2011 return NULL;
2013 iocc->dfl_weight = CGROUP_WEIGHT_DFL;
2014 return &iocc->cpd;
2017 static void ioc_cpd_free(struct blkcg_policy_data *cpd)
2019 kfree(container_of(cpd, struct ioc_cgrp, cpd));
2022 static struct blkg_policy_data *ioc_pd_alloc(gfp_t gfp, struct request_queue *q,
2023 struct blkcg *blkcg)
2025 int levels = blkcg->css.cgroup->level + 1;
2026 struct ioc_gq *iocg;
2028 iocg = kzalloc_node(sizeof(*iocg) + levels * sizeof(iocg->ancestors[0]),
2029 gfp, q->node);
2030 if (!iocg)
2031 return NULL;
2033 return &iocg->pd;
2036 static void ioc_pd_init(struct blkg_policy_data *pd)
2038 struct ioc_gq *iocg = pd_to_iocg(pd);
2039 struct blkcg_gq *blkg = pd_to_blkg(&iocg->pd);
2040 struct ioc *ioc = q_to_ioc(blkg->q);
2041 struct ioc_now now;
2042 struct blkcg_gq *tblkg;
2043 unsigned long flags;
2045 ioc_now(ioc, &now);
2047 iocg->ioc = ioc;
2048 atomic64_set(&iocg->vtime, now.vnow);
2049 atomic64_set(&iocg->done_vtime, now.vnow);
2050 atomic64_set(&iocg->active_period, atomic64_read(&ioc->cur_period));
2051 INIT_LIST_HEAD(&iocg->active_list);
2052 iocg->hweight_active = HWEIGHT_WHOLE;
2053 iocg->hweight_inuse = HWEIGHT_WHOLE;
2055 init_waitqueue_head(&iocg->waitq);
2056 hrtimer_init(&iocg->waitq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
2057 iocg->waitq_timer.function = iocg_waitq_timer_fn;
2058 hrtimer_init(&iocg->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
2059 iocg->delay_timer.function = iocg_delay_timer_fn;
2061 iocg->level = blkg->blkcg->css.cgroup->level;
2063 for (tblkg = blkg; tblkg; tblkg = tblkg->parent) {
2064 struct ioc_gq *tiocg = blkg_to_iocg(tblkg);
2065 iocg->ancestors[tiocg->level] = tiocg;
2068 spin_lock_irqsave(&ioc->lock, flags);
2069 weight_updated(iocg);
2070 spin_unlock_irqrestore(&ioc->lock, flags);
2073 static void ioc_pd_free(struct blkg_policy_data *pd)
2075 struct ioc_gq *iocg = pd_to_iocg(pd);
2076 struct ioc *ioc = iocg->ioc;
2077 unsigned long flags;
2079 if (ioc) {
2080 spin_lock_irqsave(&ioc->lock, flags);
2081 if (!list_empty(&iocg->active_list)) {
2082 propagate_active_weight(iocg, 0, 0);
2083 list_del_init(&iocg->active_list);
2085 spin_unlock_irqrestore(&ioc->lock, flags);
2087 hrtimer_cancel(&iocg->waitq_timer);
2088 hrtimer_cancel(&iocg->delay_timer);
2090 kfree(iocg);
2093 static u64 ioc_weight_prfill(struct seq_file *sf, struct blkg_policy_data *pd,
2094 int off)
2096 const char *dname = blkg_dev_name(pd->blkg);
2097 struct ioc_gq *iocg = pd_to_iocg(pd);
2099 if (dname && iocg->cfg_weight)
2100 seq_printf(sf, "%s %u\n", dname, iocg->cfg_weight);
2101 return 0;
2105 static int ioc_weight_show(struct seq_file *sf, void *v)
2107 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
2108 struct ioc_cgrp *iocc = blkcg_to_iocc(blkcg);
2110 seq_printf(sf, "default %u\n", iocc->dfl_weight);
2111 blkcg_print_blkgs(sf, blkcg, ioc_weight_prfill,
2112 &blkcg_policy_iocost, seq_cft(sf)->private, false);
2113 return 0;
2116 static ssize_t ioc_weight_write(struct kernfs_open_file *of, char *buf,
2117 size_t nbytes, loff_t off)
2119 struct blkcg *blkcg = css_to_blkcg(of_css(of));
2120 struct ioc_cgrp *iocc = blkcg_to_iocc(blkcg);
2121 struct blkg_conf_ctx ctx;
2122 struct ioc_gq *iocg;
2123 u32 v;
2124 int ret;
2126 if (!strchr(buf, ':')) {
2127 struct blkcg_gq *blkg;
2129 if (!sscanf(buf, "default %u", &v) && !sscanf(buf, "%u", &v))
2130 return -EINVAL;
2132 if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX)
2133 return -EINVAL;
2135 spin_lock(&blkcg->lock);
2136 iocc->dfl_weight = v;
2137 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
2138 struct ioc_gq *iocg = blkg_to_iocg(blkg);
2140 if (iocg) {
2141 spin_lock_irq(&iocg->ioc->lock);
2142 weight_updated(iocg);
2143 spin_unlock_irq(&iocg->ioc->lock);
2146 spin_unlock(&blkcg->lock);
2148 return nbytes;
2151 ret = blkg_conf_prep(blkcg, &blkcg_policy_iocost, buf, &ctx);
2152 if (ret)
2153 return ret;
2155 iocg = blkg_to_iocg(ctx.blkg);
2157 if (!strncmp(ctx.body, "default", 7)) {
2158 v = 0;
2159 } else {
2160 if (!sscanf(ctx.body, "%u", &v))
2161 goto einval;
2162 if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX)
2163 goto einval;
2166 spin_lock(&iocg->ioc->lock);
2167 iocg->cfg_weight = v;
2168 weight_updated(iocg);
2169 spin_unlock(&iocg->ioc->lock);
2171 blkg_conf_finish(&ctx);
2172 return nbytes;
2174 einval:
2175 blkg_conf_finish(&ctx);
2176 return -EINVAL;
2179 static u64 ioc_qos_prfill(struct seq_file *sf, struct blkg_policy_data *pd,
2180 int off)
2182 const char *dname = blkg_dev_name(pd->blkg);
2183 struct ioc *ioc = pd_to_iocg(pd)->ioc;
2185 if (!dname)
2186 return 0;
2188 seq_printf(sf, "%s enable=%d ctrl=%s rpct=%u.%02u rlat=%u wpct=%u.%02u wlat=%u min=%u.%02u max=%u.%02u\n",
2189 dname, ioc->enabled, ioc->user_qos_params ? "user" : "auto",
2190 ioc->params.qos[QOS_RPPM] / 10000,
2191 ioc->params.qos[QOS_RPPM] % 10000 / 100,
2192 ioc->params.qos[QOS_RLAT],
2193 ioc->params.qos[QOS_WPPM] / 10000,
2194 ioc->params.qos[QOS_WPPM] % 10000 / 100,
2195 ioc->params.qos[QOS_WLAT],
2196 ioc->params.qos[QOS_MIN] / 10000,
2197 ioc->params.qos[QOS_MIN] % 10000 / 100,
2198 ioc->params.qos[QOS_MAX] / 10000,
2199 ioc->params.qos[QOS_MAX] % 10000 / 100);
2200 return 0;
2203 static int ioc_qos_show(struct seq_file *sf, void *v)
2205 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
2207 blkcg_print_blkgs(sf, blkcg, ioc_qos_prfill,
2208 &blkcg_policy_iocost, seq_cft(sf)->private, false);
2209 return 0;
2212 static const match_table_t qos_ctrl_tokens = {
2213 { QOS_ENABLE, "enable=%u" },
2214 { QOS_CTRL, "ctrl=%s" },
2215 { NR_QOS_CTRL_PARAMS, NULL },
2218 static const match_table_t qos_tokens = {
2219 { QOS_RPPM, "rpct=%s" },
2220 { QOS_RLAT, "rlat=%u" },
2221 { QOS_WPPM, "wpct=%s" },
2222 { QOS_WLAT, "wlat=%u" },
2223 { QOS_MIN, "min=%s" },
2224 { QOS_MAX, "max=%s" },
2225 { NR_QOS_PARAMS, NULL },
2228 static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input,
2229 size_t nbytes, loff_t off)
2231 struct gendisk *disk;
2232 struct ioc *ioc;
2233 u32 qos[NR_QOS_PARAMS];
2234 bool enable, user;
2235 char *p;
2236 int ret;
2238 disk = blkcg_conf_get_disk(&input);
2239 if (IS_ERR(disk))
2240 return PTR_ERR(disk);
2242 ioc = q_to_ioc(disk->queue);
2243 if (!ioc) {
2244 ret = blk_iocost_init(disk->queue);
2245 if (ret)
2246 goto err;
2247 ioc = q_to_ioc(disk->queue);
2250 spin_lock_irq(&ioc->lock);
2251 memcpy(qos, ioc->params.qos, sizeof(qos));
2252 enable = ioc->enabled;
2253 user = ioc->user_qos_params;
2254 spin_unlock_irq(&ioc->lock);
2256 while ((p = strsep(&input, " \t\n"))) {
2257 substring_t args[MAX_OPT_ARGS];
2258 char buf[32];
2259 int tok;
2260 s64 v;
2262 if (!*p)
2263 continue;
2265 switch (match_token(p, qos_ctrl_tokens, args)) {
2266 case QOS_ENABLE:
2267 match_u64(&args[0], &v);
2268 enable = v;
2269 continue;
2270 case QOS_CTRL:
2271 match_strlcpy(buf, &args[0], sizeof(buf));
2272 if (!strcmp(buf, "auto"))
2273 user = false;
2274 else if (!strcmp(buf, "user"))
2275 user = true;
2276 else
2277 goto einval;
2278 continue;
2281 tok = match_token(p, qos_tokens, args);
2282 switch (tok) {
2283 case QOS_RPPM:
2284 case QOS_WPPM:
2285 if (match_strlcpy(buf, &args[0], sizeof(buf)) >=
2286 sizeof(buf))
2287 goto einval;
2288 if (cgroup_parse_float(buf, 2, &v))
2289 goto einval;
2290 if (v < 0 || v > 10000)
2291 goto einval;
2292 qos[tok] = v * 100;
2293 break;
2294 case QOS_RLAT:
2295 case QOS_WLAT:
2296 if (match_u64(&args[0], &v))
2297 goto einval;
2298 qos[tok] = v;
2299 break;
2300 case QOS_MIN:
2301 case QOS_MAX:
2302 if (match_strlcpy(buf, &args[0], sizeof(buf)) >=
2303 sizeof(buf))
2304 goto einval;
2305 if (cgroup_parse_float(buf, 2, &v))
2306 goto einval;
2307 if (v < 0)
2308 goto einval;
2309 qos[tok] = clamp_t(s64, v * 100,
2310 VRATE_MIN_PPM, VRATE_MAX_PPM);
2311 break;
2312 default:
2313 goto einval;
2315 user = true;
2318 if (qos[QOS_MIN] > qos[QOS_MAX])
2319 goto einval;
2321 spin_lock_irq(&ioc->lock);
2323 if (enable) {
2324 blk_queue_flag_set(QUEUE_FLAG_RQ_ALLOC_TIME, ioc->rqos.q);
2325 ioc->enabled = true;
2326 } else {
2327 blk_queue_flag_clear(QUEUE_FLAG_RQ_ALLOC_TIME, ioc->rqos.q);
2328 ioc->enabled = false;
2331 if (user) {
2332 memcpy(ioc->params.qos, qos, sizeof(qos));
2333 ioc->user_qos_params = true;
2334 } else {
2335 ioc->user_qos_params = false;
2338 ioc_refresh_params(ioc, true);
2339 spin_unlock_irq(&ioc->lock);
2341 put_disk_and_module(disk);
2342 return nbytes;
2343 einval:
2344 ret = -EINVAL;
2345 err:
2346 put_disk_and_module(disk);
2347 return ret;
2350 static u64 ioc_cost_model_prfill(struct seq_file *sf,
2351 struct blkg_policy_data *pd, int off)
2353 const char *dname = blkg_dev_name(pd->blkg);
2354 struct ioc *ioc = pd_to_iocg(pd)->ioc;
2355 u64 *u = ioc->params.i_lcoefs;
2357 if (!dname)
2358 return 0;
2360 seq_printf(sf, "%s ctrl=%s model=linear "
2361 "rbps=%llu rseqiops=%llu rrandiops=%llu "
2362 "wbps=%llu wseqiops=%llu wrandiops=%llu\n",
2363 dname, ioc->user_cost_model ? "user" : "auto",
2364 u[I_LCOEF_RBPS], u[I_LCOEF_RSEQIOPS], u[I_LCOEF_RRANDIOPS],
2365 u[I_LCOEF_WBPS], u[I_LCOEF_WSEQIOPS], u[I_LCOEF_WRANDIOPS]);
2366 return 0;
2369 static int ioc_cost_model_show(struct seq_file *sf, void *v)
2371 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
2373 blkcg_print_blkgs(sf, blkcg, ioc_cost_model_prfill,
2374 &blkcg_policy_iocost, seq_cft(sf)->private, false);
2375 return 0;
2378 static const match_table_t cost_ctrl_tokens = {
2379 { COST_CTRL, "ctrl=%s" },
2380 { COST_MODEL, "model=%s" },
2381 { NR_COST_CTRL_PARAMS, NULL },
2384 static const match_table_t i_lcoef_tokens = {
2385 { I_LCOEF_RBPS, "rbps=%u" },
2386 { I_LCOEF_RSEQIOPS, "rseqiops=%u" },
2387 { I_LCOEF_RRANDIOPS, "rrandiops=%u" },
2388 { I_LCOEF_WBPS, "wbps=%u" },
2389 { I_LCOEF_WSEQIOPS, "wseqiops=%u" },
2390 { I_LCOEF_WRANDIOPS, "wrandiops=%u" },
2391 { NR_I_LCOEFS, NULL },
2394 static ssize_t ioc_cost_model_write(struct kernfs_open_file *of, char *input,
2395 size_t nbytes, loff_t off)
2397 struct gendisk *disk;
2398 struct ioc *ioc;
2399 u64 u[NR_I_LCOEFS];
2400 bool user;
2401 char *p;
2402 int ret;
2404 disk = blkcg_conf_get_disk(&input);
2405 if (IS_ERR(disk))
2406 return PTR_ERR(disk);
2408 ioc = q_to_ioc(disk->queue);
2409 if (!ioc) {
2410 ret = blk_iocost_init(disk->queue);
2411 if (ret)
2412 goto err;
2413 ioc = q_to_ioc(disk->queue);
2416 spin_lock_irq(&ioc->lock);
2417 memcpy(u, ioc->params.i_lcoefs, sizeof(u));
2418 user = ioc->user_cost_model;
2419 spin_unlock_irq(&ioc->lock);
2421 while ((p = strsep(&input, " \t\n"))) {
2422 substring_t args[MAX_OPT_ARGS];
2423 char buf[32];
2424 int tok;
2425 u64 v;
2427 if (!*p)
2428 continue;
2430 switch (match_token(p, cost_ctrl_tokens, args)) {
2431 case COST_CTRL:
2432 match_strlcpy(buf, &args[0], sizeof(buf));
2433 if (!strcmp(buf, "auto"))
2434 user = false;
2435 else if (!strcmp(buf, "user"))
2436 user = true;
2437 else
2438 goto einval;
2439 continue;
2440 case COST_MODEL:
2441 match_strlcpy(buf, &args[0], sizeof(buf));
2442 if (strcmp(buf, "linear"))
2443 goto einval;
2444 continue;
2447 tok = match_token(p, i_lcoef_tokens, args);
2448 if (tok == NR_I_LCOEFS)
2449 goto einval;
2450 if (match_u64(&args[0], &v))
2451 goto einval;
2452 u[tok] = v;
2453 user = true;
2456 spin_lock_irq(&ioc->lock);
2457 if (user) {
2458 memcpy(ioc->params.i_lcoefs, u, sizeof(u));
2459 ioc->user_cost_model = true;
2460 } else {
2461 ioc->user_cost_model = false;
2463 ioc_refresh_params(ioc, true);
2464 spin_unlock_irq(&ioc->lock);
2466 put_disk_and_module(disk);
2467 return nbytes;
2469 einval:
2470 ret = -EINVAL;
2471 err:
2472 put_disk_and_module(disk);
2473 return ret;
2476 static struct cftype ioc_files[] = {
2478 .name = "weight",
2479 .flags = CFTYPE_NOT_ON_ROOT,
2480 .seq_show = ioc_weight_show,
2481 .write = ioc_weight_write,
2484 .name = "cost.qos",
2485 .flags = CFTYPE_ONLY_ON_ROOT,
2486 .seq_show = ioc_qos_show,
2487 .write = ioc_qos_write,
2490 .name = "cost.model",
2491 .flags = CFTYPE_ONLY_ON_ROOT,
2492 .seq_show = ioc_cost_model_show,
2493 .write = ioc_cost_model_write,
2498 static struct blkcg_policy blkcg_policy_iocost = {
2499 .dfl_cftypes = ioc_files,
2500 .cpd_alloc_fn = ioc_cpd_alloc,
2501 .cpd_free_fn = ioc_cpd_free,
2502 .pd_alloc_fn = ioc_pd_alloc,
2503 .pd_init_fn = ioc_pd_init,
2504 .pd_free_fn = ioc_pd_free,
2507 static int __init ioc_init(void)
2509 return blkcg_policy_register(&blkcg_policy_iocost);
2512 static void __exit ioc_exit(void)
2514 return blkcg_policy_unregister(&blkcg_policy_iocost);
2517 module_init(ioc_init);
2518 module_exit(ioc_exit);