1 // SPDX-License-Identifier: GPL-2.0
3 * Tag allocation using scalable bitmaps. Uses active queue tracking to support
4 * fairer distribution of tags between multiple submitters when a shared tag map
7 * Copyright (C) 2013-2014 Jens Axboe
9 #include <linux/kernel.h>
10 #include <linux/module.h>
12 #include <linux/blk-mq.h>
13 #include <linux/delay.h>
16 #include "blk-mq-tag.h"
18 bool blk_mq_has_free_tags(struct blk_mq_tags
*tags
)
23 return sbitmap_any_bit_clear(&tags
->bitmap_tags
.sb
);
27 * If a previously inactive queue goes active, bump the active user count.
28 * We need to do this before try to allocate driver tag, then even if fail
29 * to get tag when first time, the other shared-tag users could reserve
32 bool __blk_mq_tag_busy(struct blk_mq_hw_ctx
*hctx
)
34 if (!test_bit(BLK_MQ_S_TAG_ACTIVE
, &hctx
->state
) &&
35 !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE
, &hctx
->state
))
36 atomic_inc(&hctx
->tags
->active_queues
);
42 * Wakeup all potentially sleeping on tags
44 void blk_mq_tag_wakeup_all(struct blk_mq_tags
*tags
, bool include_reserve
)
46 sbitmap_queue_wake_all(&tags
->bitmap_tags
);
48 sbitmap_queue_wake_all(&tags
->breserved_tags
);
52 * If a previously busy queue goes inactive, potential waiters could now
53 * be allowed to queue. Wake them up and check.
55 void __blk_mq_tag_idle(struct blk_mq_hw_ctx
*hctx
)
57 struct blk_mq_tags
*tags
= hctx
->tags
;
59 if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE
, &hctx
->state
))
62 atomic_dec(&tags
->active_queues
);
64 blk_mq_tag_wakeup_all(tags
, false);
68 * For shared tag users, we track the number of currently active users
69 * and attempt to provide a fair share of the tag depth for each of them.
71 static inline bool hctx_may_queue(struct blk_mq_hw_ctx
*hctx
,
72 struct sbitmap_queue
*bt
)
74 unsigned int depth
, users
;
76 if (!hctx
|| !(hctx
->flags
& BLK_MQ_F_TAG_SHARED
))
78 if (!test_bit(BLK_MQ_S_TAG_ACTIVE
, &hctx
->state
))
82 * Don't try dividing an ant
84 if (bt
->sb
.depth
== 1)
87 users
= atomic_read(&hctx
->tags
->active_queues
);
92 * Allow at least some tags
94 depth
= max((bt
->sb
.depth
+ users
- 1) / users
, 4U);
95 return atomic_read(&hctx
->nr_active
) < depth
;
98 static int __blk_mq_get_tag(struct blk_mq_alloc_data
*data
,
99 struct sbitmap_queue
*bt
)
101 if (!(data
->flags
& BLK_MQ_REQ_INTERNAL
) &&
102 !hctx_may_queue(data
->hctx
, bt
))
104 if (data
->shallow_depth
)
105 return __sbitmap_queue_get_shallow(bt
, data
->shallow_depth
);
107 return __sbitmap_queue_get(bt
);
110 unsigned int blk_mq_get_tag(struct blk_mq_alloc_data
*data
)
112 struct blk_mq_tags
*tags
= blk_mq_tags_from_data(data
);
113 struct sbitmap_queue
*bt
;
114 struct sbq_wait_state
*ws
;
115 DEFINE_SBQ_WAIT(wait
);
116 unsigned int tag_offset
;
119 if (data
->flags
& BLK_MQ_REQ_RESERVED
) {
120 if (unlikely(!tags
->nr_reserved_tags
)) {
122 return BLK_MQ_TAG_FAIL
;
124 bt
= &tags
->breserved_tags
;
127 bt
= &tags
->bitmap_tags
;
128 tag_offset
= tags
->nr_reserved_tags
;
131 tag
= __blk_mq_get_tag(data
, bt
);
135 if (data
->flags
& BLK_MQ_REQ_NOWAIT
)
136 return BLK_MQ_TAG_FAIL
;
138 ws
= bt_wait_ptr(bt
, data
->hctx
);
140 struct sbitmap_queue
*bt_prev
;
143 * We're out of tags on this hardware queue, kick any
144 * pending IO submits before going to sleep waiting for
147 blk_mq_run_hw_queue(data
->hctx
, false);
150 * Retry tag allocation after running the hardware queue,
151 * as running the queue may also have found completions.
153 tag
= __blk_mq_get_tag(data
, bt
);
157 sbitmap_prepare_to_wait(bt
, ws
, &wait
, TASK_UNINTERRUPTIBLE
);
159 tag
= __blk_mq_get_tag(data
, bt
);
166 sbitmap_finish_wait(bt
, ws
, &wait
);
168 data
->ctx
= blk_mq_get_ctx(data
->q
);
169 data
->hctx
= blk_mq_map_queue(data
->q
, data
->cmd_flags
,
171 tags
= blk_mq_tags_from_data(data
);
172 if (data
->flags
& BLK_MQ_REQ_RESERVED
)
173 bt
= &tags
->breserved_tags
;
175 bt
= &tags
->bitmap_tags
;
178 * If destination hw queue is changed, fake wake up on
179 * previous queue for compensating the wake up miss, so
180 * other allocations on previous queue won't be starved.
183 sbitmap_queue_wake_up(bt_prev
);
185 ws
= bt_wait_ptr(bt
, data
->hctx
);
188 sbitmap_finish_wait(bt
, ws
, &wait
);
191 return tag
+ tag_offset
;
194 void blk_mq_put_tag(struct blk_mq_hw_ctx
*hctx
, struct blk_mq_tags
*tags
,
195 struct blk_mq_ctx
*ctx
, unsigned int tag
)
197 if (!blk_mq_tag_is_reserved(tags
, tag
)) {
198 const int real_tag
= tag
- tags
->nr_reserved_tags
;
200 BUG_ON(real_tag
>= tags
->nr_tags
);
201 sbitmap_queue_clear(&tags
->bitmap_tags
, real_tag
, ctx
->cpu
);
203 BUG_ON(tag
>= tags
->nr_reserved_tags
);
204 sbitmap_queue_clear(&tags
->breserved_tags
, tag
, ctx
->cpu
);
208 struct bt_iter_data
{
209 struct blk_mq_hw_ctx
*hctx
;
215 static bool bt_iter(struct sbitmap
*bitmap
, unsigned int bitnr
, void *data
)
217 struct bt_iter_data
*iter_data
= data
;
218 struct blk_mq_hw_ctx
*hctx
= iter_data
->hctx
;
219 struct blk_mq_tags
*tags
= hctx
->tags
;
220 bool reserved
= iter_data
->reserved
;
224 bitnr
+= tags
->nr_reserved_tags
;
225 rq
= tags
->rqs
[bitnr
];
228 * We can hit rq == NULL here, because the tagging functions
229 * test and set the bit before assigning ->rqs[].
231 if (rq
&& rq
->q
== hctx
->queue
)
232 return iter_data
->fn(hctx
, rq
, iter_data
->data
, reserved
);
237 * bt_for_each - iterate over the requests associated with a hardware queue
238 * @hctx: Hardware queue to examine.
239 * @bt: sbitmap to examine. This is either the breserved_tags member
240 * or the bitmap_tags member of struct blk_mq_tags.
241 * @fn: Pointer to the function that will be called for each request
242 * associated with @hctx that has been assigned a driver tag.
243 * @fn will be called as follows: @fn(@hctx, rq, @data, @reserved)
244 * where rq is a pointer to a request. Return true to continue
245 * iterating tags, false to stop.
246 * @data: Will be passed as third argument to @fn.
247 * @reserved: Indicates whether @bt is the breserved_tags member or the
248 * bitmap_tags member of struct blk_mq_tags.
250 static void bt_for_each(struct blk_mq_hw_ctx
*hctx
, struct sbitmap_queue
*bt
,
251 busy_iter_fn
*fn
, void *data
, bool reserved
)
253 struct bt_iter_data iter_data
= {
257 .reserved
= reserved
,
260 sbitmap_for_each_set(&bt
->sb
, bt_iter
, &iter_data
);
263 struct bt_tags_iter_data
{
264 struct blk_mq_tags
*tags
;
265 busy_tag_iter_fn
*fn
;
270 static bool bt_tags_iter(struct sbitmap
*bitmap
, unsigned int bitnr
, void *data
)
272 struct bt_tags_iter_data
*iter_data
= data
;
273 struct blk_mq_tags
*tags
= iter_data
->tags
;
274 bool reserved
= iter_data
->reserved
;
278 bitnr
+= tags
->nr_reserved_tags
;
281 * We can hit rq == NULL here, because the tagging functions
282 * test and set the bit before assining ->rqs[].
284 rq
= tags
->rqs
[bitnr
];
285 if (rq
&& blk_mq_request_started(rq
))
286 return iter_data
->fn(rq
, iter_data
->data
, reserved
);
292 * bt_tags_for_each - iterate over the requests in a tag map
293 * @tags: Tag map to iterate over.
294 * @bt: sbitmap to examine. This is either the breserved_tags member
295 * or the bitmap_tags member of struct blk_mq_tags.
296 * @fn: Pointer to the function that will be called for each started
297 * request. @fn will be called as follows: @fn(rq, @data,
298 * @reserved) where rq is a pointer to a request. Return true
299 * to continue iterating tags, false to stop.
300 * @data: Will be passed as second argument to @fn.
301 * @reserved: Indicates whether @bt is the breserved_tags member or the
302 * bitmap_tags member of struct blk_mq_tags.
304 static void bt_tags_for_each(struct blk_mq_tags
*tags
, struct sbitmap_queue
*bt
,
305 busy_tag_iter_fn
*fn
, void *data
, bool reserved
)
307 struct bt_tags_iter_data iter_data
= {
311 .reserved
= reserved
,
315 sbitmap_for_each_set(&bt
->sb
, bt_tags_iter
, &iter_data
);
319 * blk_mq_all_tag_busy_iter - iterate over all started requests in a tag map
320 * @tags: Tag map to iterate over.
321 * @fn: Pointer to the function that will be called for each started
322 * request. @fn will be called as follows: @fn(rq, @priv,
323 * reserved) where rq is a pointer to a request. 'reserved'
324 * indicates whether or not @rq is a reserved request. Return
325 * true to continue iterating tags, false to stop.
326 * @priv: Will be passed as second argument to @fn.
328 static void blk_mq_all_tag_busy_iter(struct blk_mq_tags
*tags
,
329 busy_tag_iter_fn
*fn
, void *priv
)
331 if (tags
->nr_reserved_tags
)
332 bt_tags_for_each(tags
, &tags
->breserved_tags
, fn
, priv
, true);
333 bt_tags_for_each(tags
, &tags
->bitmap_tags
, fn
, priv
, false);
337 * blk_mq_tagset_busy_iter - iterate over all started requests in a tag set
338 * @tagset: Tag set to iterate over.
339 * @fn: Pointer to the function that will be called for each started
340 * request. @fn will be called as follows: @fn(rq, @priv,
341 * reserved) where rq is a pointer to a request. 'reserved'
342 * indicates whether or not @rq is a reserved request. Return
343 * true to continue iterating tags, false to stop.
344 * @priv: Will be passed as second argument to @fn.
346 void blk_mq_tagset_busy_iter(struct blk_mq_tag_set
*tagset
,
347 busy_tag_iter_fn
*fn
, void *priv
)
351 for (i
= 0; i
< tagset
->nr_hw_queues
; i
++) {
352 if (tagset
->tags
&& tagset
->tags
[i
])
353 blk_mq_all_tag_busy_iter(tagset
->tags
[i
], fn
, priv
);
356 EXPORT_SYMBOL(blk_mq_tagset_busy_iter
);
358 static bool blk_mq_tagset_count_completed_rqs(struct request
*rq
,
359 void *data
, bool reserved
)
361 unsigned *count
= data
;
363 if (blk_mq_request_completed(rq
))
369 * blk_mq_tagset_wait_completed_request - wait until all completed req's
370 * complete funtion is run
371 * @tagset: Tag set to drain completed request
373 * Note: This function has to be run after all IO queues are shutdown
375 void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set
*tagset
)
380 blk_mq_tagset_busy_iter(tagset
,
381 blk_mq_tagset_count_completed_rqs
, &count
);
387 EXPORT_SYMBOL(blk_mq_tagset_wait_completed_request
);
390 * blk_mq_queue_tag_busy_iter - iterate over all requests with a driver tag
391 * @q: Request queue to examine.
392 * @fn: Pointer to the function that will be called for each request
393 * on @q. @fn will be called as follows: @fn(hctx, rq, @priv,
394 * reserved) where rq is a pointer to a request and hctx points
395 * to the hardware queue associated with the request. 'reserved'
396 * indicates whether or not @rq is a reserved request.
397 * @priv: Will be passed as third argument to @fn.
399 * Note: if @q->tag_set is shared with other request queues then @fn will be
400 * called for all requests on all queues that share that tag set and not only
401 * for requests associated with @q.
403 void blk_mq_queue_tag_busy_iter(struct request_queue
*q
, busy_iter_fn
*fn
,
406 struct blk_mq_hw_ctx
*hctx
;
410 * __blk_mq_update_nr_hw_queues() updates nr_hw_queues and queue_hw_ctx
411 * while the queue is frozen. So we can use q_usage_counter to avoid
412 * racing with it. __blk_mq_update_nr_hw_queues() uses
413 * synchronize_rcu() to ensure this function left the critical section
416 if (!percpu_ref_tryget(&q
->q_usage_counter
))
419 queue_for_each_hw_ctx(q
, hctx
, i
) {
420 struct blk_mq_tags
*tags
= hctx
->tags
;
423 * If no software queues are currently mapped to this
424 * hardware queue, there's nothing to check
426 if (!blk_mq_hw_queue_mapped(hctx
))
429 if (tags
->nr_reserved_tags
)
430 bt_for_each(hctx
, &tags
->breserved_tags
, fn
, priv
, true);
431 bt_for_each(hctx
, &tags
->bitmap_tags
, fn
, priv
, false);
436 static int bt_alloc(struct sbitmap_queue
*bt
, unsigned int depth
,
437 bool round_robin
, int node
)
439 return sbitmap_queue_init_node(bt
, depth
, -1, round_robin
, GFP_KERNEL
,
443 static struct blk_mq_tags
*blk_mq_init_bitmap_tags(struct blk_mq_tags
*tags
,
444 int node
, int alloc_policy
)
446 unsigned int depth
= tags
->nr_tags
- tags
->nr_reserved_tags
;
447 bool round_robin
= alloc_policy
== BLK_TAG_ALLOC_RR
;
449 if (bt_alloc(&tags
->bitmap_tags
, depth
, round_robin
, node
))
451 if (bt_alloc(&tags
->breserved_tags
, tags
->nr_reserved_tags
, round_robin
,
453 goto free_bitmap_tags
;
457 sbitmap_queue_free(&tags
->bitmap_tags
);
463 struct blk_mq_tags
*blk_mq_init_tags(unsigned int total_tags
,
464 unsigned int reserved_tags
,
465 int node
, int alloc_policy
)
467 struct blk_mq_tags
*tags
;
469 if (total_tags
> BLK_MQ_TAG_MAX
) {
470 pr_err("blk-mq: tag depth too large\n");
474 tags
= kzalloc_node(sizeof(*tags
), GFP_KERNEL
, node
);
478 tags
->nr_tags
= total_tags
;
479 tags
->nr_reserved_tags
= reserved_tags
;
481 return blk_mq_init_bitmap_tags(tags
, node
, alloc_policy
);
484 void blk_mq_free_tags(struct blk_mq_tags
*tags
)
486 sbitmap_queue_free(&tags
->bitmap_tags
);
487 sbitmap_queue_free(&tags
->breserved_tags
);
491 int blk_mq_tag_update_depth(struct blk_mq_hw_ctx
*hctx
,
492 struct blk_mq_tags
**tagsptr
, unsigned int tdepth
,
495 struct blk_mq_tags
*tags
= *tagsptr
;
497 if (tdepth
<= tags
->nr_reserved_tags
)
501 * If we are allowed to grow beyond the original size, allocate
502 * a new set of tags before freeing the old one.
504 if (tdepth
> tags
->nr_tags
) {
505 struct blk_mq_tag_set
*set
= hctx
->queue
->tag_set
;
506 struct blk_mq_tags
*new;
513 * We need some sort of upper limit, set it high enough that
514 * no valid use cases should require more.
516 if (tdepth
> 16 * BLKDEV_MAX_RQ
)
519 new = blk_mq_alloc_rq_map(set
, hctx
->queue_num
, tdepth
,
520 tags
->nr_reserved_tags
);
523 ret
= blk_mq_alloc_rqs(set
, new, hctx
->queue_num
, tdepth
);
525 blk_mq_free_rq_map(new);
529 blk_mq_free_rqs(set
, *tagsptr
, hctx
->queue_num
);
530 blk_mq_free_rq_map(*tagsptr
);
534 * Don't need (or can't) update reserved tags here, they
535 * remain static and should never need resizing.
537 sbitmap_queue_resize(&tags
->bitmap_tags
,
538 tdepth
- tags
->nr_reserved_tags
);
545 * blk_mq_unique_tag() - return a tag that is unique queue-wide
546 * @rq: request for which to compute a unique tag
548 * The tag field in struct request is unique per hardware queue but not over
549 * all hardware queues. Hence this function that returns a tag with the
550 * hardware context index in the upper bits and the per hardware queue tag in
553 * Note: When called for a request that is queued on a non-multiqueue request
554 * queue, the hardware context index is set to zero.
556 u32
blk_mq_unique_tag(struct request
*rq
)
558 return (rq
->mq_hctx
->queue_num
<< BLK_MQ_UNIQUE_TAG_BITS
) |
559 (rq
->tag
& BLK_MQ_UNIQUE_TAG_MASK
);
561 EXPORT_SYMBOL(blk_mq_unique_tag
);