1 #include "blk-rq-qos.h"
4 * Increment 'v', if 'v' is below 'below'. Returns true if we succeeded,
5 * false if 'v' + 1 would be bigger than 'below'.
7 static bool atomic_inc_below(atomic_t
*v
, unsigned int below
)
9 unsigned int cur
= atomic_read(v
);
16 old
= atomic_cmpxchg(v
, cur
, cur
+ 1);
25 bool rq_wait_inc_below(struct rq_wait
*rq_wait
, unsigned int limit
)
27 return atomic_inc_below(&rq_wait
->inflight
, limit
);
30 void __rq_qos_cleanup(struct rq_qos
*rqos
, struct bio
*bio
)
33 if (rqos
->ops
->cleanup
)
34 rqos
->ops
->cleanup(rqos
, bio
);
39 void __rq_qos_done(struct rq_qos
*rqos
, struct request
*rq
)
43 rqos
->ops
->done(rqos
, rq
);
48 void __rq_qos_issue(struct rq_qos
*rqos
, struct request
*rq
)
52 rqos
->ops
->issue(rqos
, rq
);
57 void __rq_qos_requeue(struct rq_qos
*rqos
, struct request
*rq
)
60 if (rqos
->ops
->requeue
)
61 rqos
->ops
->requeue(rqos
, rq
);
66 void __rq_qos_throttle(struct rq_qos
*rqos
, struct bio
*bio
)
69 if (rqos
->ops
->throttle
)
70 rqos
->ops
->throttle(rqos
, bio
);
75 void __rq_qos_track(struct rq_qos
*rqos
, struct request
*rq
, struct bio
*bio
)
79 rqos
->ops
->track(rqos
, rq
, bio
);
84 void __rq_qos_done_bio(struct rq_qos
*rqos
, struct bio
*bio
)
87 if (rqos
->ops
->done_bio
)
88 rqos
->ops
->done_bio(rqos
, bio
);
94 * Return true, if we can't increase the depth further by scaling
96 bool rq_depth_calc_max_depth(struct rq_depth
*rqd
)
102 * For QD=1 devices, this is a special case. It's important for those
103 * to have one request ready when one completes, so force a depth of
104 * 2 for those devices. On the backend, it'll be a depth of 1 anyway,
105 * since the device can't have more than that in flight. If we're
106 * scaling down, then keep a setting of 1/1/1.
108 if (rqd
->queue_depth
== 1) {
109 if (rqd
->scale_step
> 0)
117 * scale_step == 0 is our default state. If we have suffered
118 * latency spikes, step will be > 0, and we shrink the
119 * allowed write depths. If step is < 0, we're only doing
120 * writes, and we allow a temporarily higher depth to
121 * increase performance.
123 depth
= min_t(unsigned int, rqd
->default_depth
,
125 if (rqd
->scale_step
> 0)
126 depth
= 1 + ((depth
- 1) >> min(31, rqd
->scale_step
));
127 else if (rqd
->scale_step
< 0) {
128 unsigned int maxd
= 3 * rqd
->queue_depth
/ 4;
130 depth
= 1 + ((depth
- 1) << -rqd
->scale_step
);
137 rqd
->max_depth
= depth
;
143 void rq_depth_scale_up(struct rq_depth
*rqd
)
146 * Hit max in previous round, stop here
153 rqd
->scaled_max
= rq_depth_calc_max_depth(rqd
);
157 * Scale rwb down. If 'hard_throttle' is set, do it quicker, since we
158 * had a latency violation.
160 void rq_depth_scale_down(struct rq_depth
*rqd
, bool hard_throttle
)
163 * Stop scaling down when we've hit the limit. This also prevents
164 * ->scale_step from going to crazy values, if the device can't
167 if (rqd
->max_depth
== 1)
170 if (rqd
->scale_step
< 0 && hard_throttle
)
175 rqd
->scaled_max
= false;
176 rq_depth_calc_max_depth(rqd
);
179 struct rq_qos_wait_data
{
180 struct wait_queue_entry wq
;
181 struct task_struct
*task
;
183 acquire_inflight_cb_t
*cb
;
188 static int rq_qos_wake_function(struct wait_queue_entry
*curr
,
189 unsigned int mode
, int wake_flags
, void *key
)
191 struct rq_qos_wait_data
*data
= container_of(curr
,
192 struct rq_qos_wait_data
,
196 * If we fail to get a budget, return -1 to interrupt the wake up loop
197 * in __wake_up_common.
199 if (!data
->cb(data
->rqw
, data
->private_data
))
202 data
->got_token
= true;
203 list_del_init(&curr
->entry
);
204 wake_up_process(data
->task
);
209 * rq_qos_wait - throttle on a rqw if we need to
210 * @private_data - caller provided specific data
211 * @acquire_inflight_cb - inc the rqw->inflight counter if we can
212 * @cleanup_cb - the callback to cleanup in case we race with a waker
214 * This provides a uniform place for the rq_qos users to do their throttling.
215 * Since you can end up with a lot of things sleeping at once, this manages the
216 * waking up based on the resources available. The acquire_inflight_cb should
217 * inc the rqw->inflight if we have the ability to do so, or return false if not
218 * and then we will sleep until the room becomes available.
220 * cleanup_cb is in case that we race with a waker and need to cleanup the
221 * inflight count accordingly.
223 void rq_qos_wait(struct rq_wait
*rqw
, void *private_data
,
224 acquire_inflight_cb_t
*acquire_inflight_cb
,
225 cleanup_cb_t
*cleanup_cb
)
227 struct rq_qos_wait_data data
= {
229 .func
= rq_qos_wake_function
,
230 .entry
= LIST_HEAD_INIT(data
.wq
.entry
),
234 .cb
= acquire_inflight_cb
,
235 .private_data
= private_data
,
239 has_sleeper
= wq_has_sleeper(&rqw
->wait
);
240 if (!has_sleeper
&& acquire_inflight_cb(rqw
, private_data
))
243 prepare_to_wait_exclusive(&rqw
->wait
, &data
.wq
, TASK_UNINTERRUPTIBLE
);
247 if (!has_sleeper
&& acquire_inflight_cb(rqw
, private_data
)) {
248 finish_wait(&rqw
->wait
, &data
.wq
);
251 * We raced with wbt_wake_function() getting a token,
252 * which means we now have two. Put our local token
253 * and wake anyone else potentially waiting for one.
256 cleanup_cb(rqw
, private_data
);
262 finish_wait(&rqw
->wait
, &data
.wq
);
265 void rq_qos_exit(struct request_queue
*q
)
267 blk_mq_debugfs_unregister_queue_rqos(q
);
270 struct rq_qos
*rqos
= q
->rq_qos
;
271 q
->rq_qos
= rqos
->next
;
272 rqos
->ops
->exit(rqos
);