1 // SPDX-License-Identifier: GPL-2.0
3 #include "blk-rq-qos.h"
6 * Increment 'v', if 'v' is below 'below'. Returns true if we succeeded,
7 * false if 'v' + 1 would be bigger than 'below'.
9 static bool atomic_inc_below(atomic_t
*v
, unsigned int below
)
11 unsigned int cur
= atomic_read(v
);
16 } while (!atomic_try_cmpxchg(v
, &cur
, cur
+ 1));
21 bool rq_wait_inc_below(struct rq_wait
*rq_wait
, unsigned int limit
)
23 return atomic_inc_below(&rq_wait
->inflight
, limit
);
26 void __rq_qos_cleanup(struct rq_qos
*rqos
, struct bio
*bio
)
29 if (rqos
->ops
->cleanup
)
30 rqos
->ops
->cleanup(rqos
, bio
);
35 void __rq_qos_done(struct rq_qos
*rqos
, struct request
*rq
)
39 rqos
->ops
->done(rqos
, rq
);
44 void __rq_qos_issue(struct rq_qos
*rqos
, struct request
*rq
)
48 rqos
->ops
->issue(rqos
, rq
);
53 void __rq_qos_requeue(struct rq_qos
*rqos
, struct request
*rq
)
56 if (rqos
->ops
->requeue
)
57 rqos
->ops
->requeue(rqos
, rq
);
62 void __rq_qos_throttle(struct rq_qos
*rqos
, struct bio
*bio
)
65 if (rqos
->ops
->throttle
)
66 rqos
->ops
->throttle(rqos
, bio
);
71 void __rq_qos_track(struct rq_qos
*rqos
, struct request
*rq
, struct bio
*bio
)
75 rqos
->ops
->track(rqos
, rq
, bio
);
80 void __rq_qos_merge(struct rq_qos
*rqos
, struct request
*rq
, struct bio
*bio
)
84 rqos
->ops
->merge(rqos
, rq
, bio
);
89 void __rq_qos_done_bio(struct rq_qos
*rqos
, struct bio
*bio
)
92 if (rqos
->ops
->done_bio
)
93 rqos
->ops
->done_bio(rqos
, bio
);
98 void __rq_qos_queue_depth_changed(struct rq_qos
*rqos
)
101 if (rqos
->ops
->queue_depth_changed
)
102 rqos
->ops
->queue_depth_changed(rqos
);
108 * Return true, if we can't increase the depth further by scaling
110 bool rq_depth_calc_max_depth(struct rq_depth
*rqd
)
116 * For QD=1 devices, this is a special case. It's important for those
117 * to have one request ready when one completes, so force a depth of
118 * 2 for those devices. On the backend, it'll be a depth of 1 anyway,
119 * since the device can't have more than that in flight. If we're
120 * scaling down, then keep a setting of 1/1/1.
122 if (rqd
->queue_depth
== 1) {
123 if (rqd
->scale_step
> 0)
131 * scale_step == 0 is our default state. If we have suffered
132 * latency spikes, step will be > 0, and we shrink the
133 * allowed write depths. If step is < 0, we're only doing
134 * writes, and we allow a temporarily higher depth to
135 * increase performance.
137 depth
= min_t(unsigned int, rqd
->default_depth
,
139 if (rqd
->scale_step
> 0)
140 depth
= 1 + ((depth
- 1) >> min(31, rqd
->scale_step
));
141 else if (rqd
->scale_step
< 0) {
142 unsigned int maxd
= 3 * rqd
->queue_depth
/ 4;
144 depth
= 1 + ((depth
- 1) << -rqd
->scale_step
);
151 rqd
->max_depth
= depth
;
157 /* Returns true on success and false if scaling up wasn't possible */
158 bool rq_depth_scale_up(struct rq_depth
*rqd
)
161 * Hit max in previous round, stop here
168 rqd
->scaled_max
= rq_depth_calc_max_depth(rqd
);
173 * Scale rwb down. If 'hard_throttle' is set, do it quicker, since we
174 * had a latency violation. Returns true on success and returns false if
175 * scaling down wasn't possible.
177 bool rq_depth_scale_down(struct rq_depth
*rqd
, bool hard_throttle
)
180 * Stop scaling down when we've hit the limit. This also prevents
181 * ->scale_step from going to crazy values, if the device can't
184 if (rqd
->max_depth
== 1)
187 if (rqd
->scale_step
< 0 && hard_throttle
)
192 rqd
->scaled_max
= false;
193 rq_depth_calc_max_depth(rqd
);
197 struct rq_qos_wait_data
{
198 struct wait_queue_entry wq
;
199 struct task_struct
*task
;
201 acquire_inflight_cb_t
*cb
;
206 static int rq_qos_wake_function(struct wait_queue_entry
*curr
,
207 unsigned int mode
, int wake_flags
, void *key
)
209 struct rq_qos_wait_data
*data
= container_of(curr
,
210 struct rq_qos_wait_data
,
214 * If we fail to get a budget, return -1 to interrupt the wake up loop
215 * in __wake_up_common.
217 if (!data
->cb(data
->rqw
, data
->private_data
))
220 data
->got_token
= true;
222 list_del_init(&curr
->entry
);
223 wake_up_process(data
->task
);
228 * rq_qos_wait - throttle on a rqw if we need to
229 * @rqw: rqw to throttle on
230 * @private_data: caller provided specific data
231 * @acquire_inflight_cb: inc the rqw->inflight counter if we can
232 * @cleanup_cb: the callback to cleanup in case we race with a waker
234 * This provides a uniform place for the rq_qos users to do their throttling.
235 * Since you can end up with a lot of things sleeping at once, this manages the
236 * waking up based on the resources available. The acquire_inflight_cb should
237 * inc the rqw->inflight if we have the ability to do so, or return false if not
238 * and then we will sleep until the room becomes available.
240 * cleanup_cb is in case that we race with a waker and need to cleanup the
241 * inflight count accordingly.
243 void rq_qos_wait(struct rq_wait
*rqw
, void *private_data
,
244 acquire_inflight_cb_t
*acquire_inflight_cb
,
245 cleanup_cb_t
*cleanup_cb
)
247 struct rq_qos_wait_data data
= {
249 .func
= rq_qos_wake_function
,
250 .entry
= LIST_HEAD_INIT(data
.wq
.entry
),
254 .cb
= acquire_inflight_cb
,
255 .private_data
= private_data
,
259 has_sleeper
= wq_has_sleeper(&rqw
->wait
);
260 if (!has_sleeper
&& acquire_inflight_cb(rqw
, private_data
))
263 has_sleeper
= !prepare_to_wait_exclusive(&rqw
->wait
, &data
.wq
,
264 TASK_UNINTERRUPTIBLE
);
266 /* The memory barrier in set_current_state saves us here. */
269 if (!has_sleeper
&& acquire_inflight_cb(rqw
, private_data
)) {
270 finish_wait(&rqw
->wait
, &data
.wq
);
273 * We raced with rq_qos_wake_function() getting a token,
274 * which means we now have two. Put our local token
275 * and wake anyone else potentially waiting for one.
279 cleanup_cb(rqw
, private_data
);
284 set_current_state(TASK_UNINTERRUPTIBLE
);
286 finish_wait(&rqw
->wait
, &data
.wq
);
289 void rq_qos_exit(struct request_queue
*q
)
291 mutex_lock(&q
->rq_qos_mutex
);
293 struct rq_qos
*rqos
= q
->rq_qos
;
294 q
->rq_qos
= rqos
->next
;
295 rqos
->ops
->exit(rqos
);
297 mutex_unlock(&q
->rq_qos_mutex
);
300 int rq_qos_add(struct rq_qos
*rqos
, struct gendisk
*disk
, enum rq_qos_id id
,
301 const struct rq_qos_ops
*ops
)
303 struct request_queue
*q
= disk
->queue
;
305 lockdep_assert_held(&q
->rq_qos_mutex
);
312 * No IO can be in-flight when adding rqos, so freeze queue, which
313 * is fine since we only support rq_qos for blk-mq queue.
315 blk_mq_freeze_queue(q
);
317 if (rq_qos_id(q
, rqos
->id
))
319 rqos
->next
= q
->rq_qos
;
322 blk_mq_unfreeze_queue(q
);
324 if (rqos
->ops
->debugfs_attrs
) {
325 mutex_lock(&q
->debugfs_mutex
);
326 blk_mq_debugfs_register_rqos(rqos
);
327 mutex_unlock(&q
->debugfs_mutex
);
332 blk_mq_unfreeze_queue(q
);
336 void rq_qos_del(struct rq_qos
*rqos
)
338 struct request_queue
*q
= rqos
->disk
->queue
;
341 lockdep_assert_held(&q
->rq_qos_mutex
);
343 blk_mq_freeze_queue(q
);
344 for (cur
= &q
->rq_qos
; *cur
; cur
= &(*cur
)->next
) {
350 blk_mq_unfreeze_queue(q
);
352 mutex_lock(&q
->debugfs_mutex
);
353 blk_mq_debugfs_unregister_rqos(rqos
);
354 mutex_unlock(&q
->debugfs_mutex
);