2 * Fast and scalable bitmap tagging variant. Uses sparser bitmaps spread
3 * over multiple cachelines to avoid ping-pong between multiple submitters
4 * or submitter and completer. Uses rolling wakeups to avoid falling of
5 * the scaling cliff when we run out of tags and have to start putting
8 * Uses active queue tracking to support fairer distribution of tags
9 * between multiple submitters when a shared tag map is used.
11 * Copyright (C) 2013-2014 Jens Axboe
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/random.h>
17 #include <linux/blk-mq.h>
20 #include "blk-mq-tag.h"
22 static bool bt_has_free_tags(struct blk_mq_bitmap_tags
*bt
)
26 for (i
= 0; i
< bt
->map_nr
; i
++) {
27 struct blk_align_bitmap
*bm
= &bt
->map
[i
];
30 ret
= find_first_zero_bit(&bm
->word
, bm
->depth
);
38 bool blk_mq_has_free_tags(struct blk_mq_tags
*tags
)
43 return bt_has_free_tags(&tags
->bitmap_tags
);
46 static inline int bt_index_inc(int index
)
48 return (index
+ 1) & (BT_WAIT_QUEUES
- 1);
51 static inline void bt_index_atomic_inc(atomic_t
*index
)
53 int old
= atomic_read(index
);
54 int new = bt_index_inc(old
);
55 atomic_cmpxchg(index
, old
, new);
59 * If a previously inactive queue goes active, bump the active user count.
61 bool __blk_mq_tag_busy(struct blk_mq_hw_ctx
*hctx
)
63 if (!test_bit(BLK_MQ_S_TAG_ACTIVE
, &hctx
->state
) &&
64 !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE
, &hctx
->state
))
65 atomic_inc(&hctx
->tags
->active_queues
);
71 * Wakeup all potentially sleeping on tags
73 void blk_mq_tag_wakeup_all(struct blk_mq_tags
*tags
, bool include_reserve
)
75 struct blk_mq_bitmap_tags
*bt
;
78 bt
= &tags
->bitmap_tags
;
79 wake_index
= atomic_read(&bt
->wake_index
);
80 for (i
= 0; i
< BT_WAIT_QUEUES
; i
++) {
81 struct bt_wait_state
*bs
= &bt
->bs
[wake_index
];
83 if (waitqueue_active(&bs
->wait
))
86 wake_index
= bt_index_inc(wake_index
);
89 if (include_reserve
) {
90 bt
= &tags
->breserved_tags
;
91 if (waitqueue_active(&bt
->bs
[0].wait
))
92 wake_up(&bt
->bs
[0].wait
);
97 * If a previously busy queue goes inactive, potential waiters could now
98 * be allowed to queue. Wake them up and check.
100 void __blk_mq_tag_idle(struct blk_mq_hw_ctx
*hctx
)
102 struct blk_mq_tags
*tags
= hctx
->tags
;
104 if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE
, &hctx
->state
))
107 atomic_dec(&tags
->active_queues
);
109 blk_mq_tag_wakeup_all(tags
, false);
113 * For shared tag users, we track the number of currently active users
114 * and attempt to provide a fair share of the tag depth for each of them.
116 static inline bool hctx_may_queue(struct blk_mq_hw_ctx
*hctx
,
117 struct blk_mq_bitmap_tags
*bt
)
119 unsigned int depth
, users
;
121 if (!hctx
|| !(hctx
->flags
& BLK_MQ_F_TAG_SHARED
))
123 if (!test_bit(BLK_MQ_S_TAG_ACTIVE
, &hctx
->state
))
127 * Don't try dividing an ant
132 users
= atomic_read(&hctx
->tags
->active_queues
);
137 * Allow at least some tags
139 depth
= max((bt
->depth
+ users
- 1) / users
, 4U);
140 return atomic_read(&hctx
->nr_active
) < depth
;
143 static int __bt_get_word(struct blk_align_bitmap
*bm
, unsigned int last_tag
,
146 int tag
, org_last_tag
= last_tag
;
149 tag
= find_next_zero_bit(&bm
->word
, bm
->depth
, last_tag
);
150 if (unlikely(tag
>= bm
->depth
)) {
152 * We started with an offset, and we didn't reset the
153 * offset to 0 in a failure case, so start from 0 to
156 if (org_last_tag
&& last_tag
&& !nowrap
) {
157 last_tag
= org_last_tag
= 0;
163 if (!test_and_set_bit(tag
, &bm
->word
))
167 if (last_tag
>= bm
->depth
- 1)
174 #define BT_ALLOC_RR(tags) (tags->alloc_policy == BLK_TAG_ALLOC_RR)
177 * Straight forward bitmap tag implementation, where each bit is a tag
178 * (cleared == free, and set == busy). The small twist is using per-cpu
179 * last_tag caches, which blk-mq stores in the blk_mq_ctx software queue
180 * contexts. This enables us to drastically limit the space searched,
181 * without dirtying an extra shared cacheline like we would if we stored
182 * the cache value inside the shared blk_mq_bitmap_tags structure. On top
183 * of that, each word of tags is in a separate cacheline. This means that
184 * multiple users will tend to stick to different cachelines, at least
185 * until the map is exhausted.
187 static int __bt_get(struct blk_mq_hw_ctx
*hctx
, struct blk_mq_bitmap_tags
*bt
,
188 unsigned int *tag_cache
, struct blk_mq_tags
*tags
)
190 unsigned int last_tag
, org_last_tag
;
193 if (!hctx_may_queue(hctx
, bt
))
196 last_tag
= org_last_tag
= *tag_cache
;
197 index
= TAG_TO_INDEX(bt
, last_tag
);
199 for (i
= 0; i
< bt
->map_nr
; i
++) {
200 tag
= __bt_get_word(&bt
->map
[index
], TAG_TO_BIT(bt
, last_tag
),
203 tag
+= (index
<< bt
->bits_per_word
);
208 * Jump to next index, and reset the last tag to be the
209 * first tag of that index
212 last_tag
= (index
<< bt
->bits_per_word
);
214 if (index
>= bt
->map_nr
) {
224 * Only update the cache from the allocation path, if we ended
225 * up using the specific cached tag.
228 if (tag
== org_last_tag
|| unlikely(BT_ALLOC_RR(tags
))) {
230 if (last_tag
>= bt
->depth
- 1)
233 *tag_cache
= last_tag
;
239 static struct bt_wait_state
*bt_wait_ptr(struct blk_mq_bitmap_tags
*bt
,
240 struct blk_mq_hw_ctx
*hctx
)
242 struct bt_wait_state
*bs
;
248 wait_index
= atomic_read(&hctx
->wait_index
);
249 bs
= &bt
->bs
[wait_index
];
250 bt_index_atomic_inc(&hctx
->wait_index
);
254 static int bt_get(struct blk_mq_alloc_data
*data
,
255 struct blk_mq_bitmap_tags
*bt
,
256 struct blk_mq_hw_ctx
*hctx
,
257 unsigned int *last_tag
, struct blk_mq_tags
*tags
)
259 struct bt_wait_state
*bs
;
263 tag
= __bt_get(hctx
, bt
, last_tag
, tags
);
267 if (!(data
->gfp
& __GFP_WAIT
))
270 bs
= bt_wait_ptr(bt
, hctx
);
272 prepare_to_wait(&bs
->wait
, &wait
, TASK_UNINTERRUPTIBLE
);
274 tag
= __bt_get(hctx
, bt
, last_tag
, tags
);
279 * We're out of tags on this hardware queue, kick any
280 * pending IO submits before going to sleep waiting for
283 blk_mq_run_hw_queue(hctx
, false);
286 * Retry tag allocation after running the hardware queue,
287 * as running the queue may also have found completions.
289 tag
= __bt_get(hctx
, bt
, last_tag
, tags
);
293 blk_mq_put_ctx(data
->ctx
);
297 data
->ctx
= blk_mq_get_ctx(data
->q
);
298 data
->hctx
= data
->q
->mq_ops
->map_queue(data
->q
,
300 if (data
->reserved
) {
301 bt
= &data
->hctx
->tags
->breserved_tags
;
303 last_tag
= &data
->ctx
->last_tag
;
305 bt
= &hctx
->tags
->bitmap_tags
;
307 finish_wait(&bs
->wait
, &wait
);
308 bs
= bt_wait_ptr(bt
, hctx
);
311 finish_wait(&bs
->wait
, &wait
);
315 static unsigned int __blk_mq_get_tag(struct blk_mq_alloc_data
*data
)
319 tag
= bt_get(data
, &data
->hctx
->tags
->bitmap_tags
, data
->hctx
,
320 &data
->ctx
->last_tag
, data
->hctx
->tags
);
322 return tag
+ data
->hctx
->tags
->nr_reserved_tags
;
324 return BLK_MQ_TAG_FAIL
;
327 static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_alloc_data
*data
)
331 if (unlikely(!data
->hctx
->tags
->nr_reserved_tags
)) {
333 return BLK_MQ_TAG_FAIL
;
336 tag
= bt_get(data
, &data
->hctx
->tags
->breserved_tags
, NULL
, &zero
,
339 return BLK_MQ_TAG_FAIL
;
344 unsigned int blk_mq_get_tag(struct blk_mq_alloc_data
*data
)
347 return __blk_mq_get_tag(data
);
349 return __blk_mq_get_reserved_tag(data
);
352 static struct bt_wait_state
*bt_wake_ptr(struct blk_mq_bitmap_tags
*bt
)
356 wake_index
= atomic_read(&bt
->wake_index
);
357 for (i
= 0; i
< BT_WAIT_QUEUES
; i
++) {
358 struct bt_wait_state
*bs
= &bt
->bs
[wake_index
];
360 if (waitqueue_active(&bs
->wait
)) {
361 int o
= atomic_read(&bt
->wake_index
);
363 atomic_cmpxchg(&bt
->wake_index
, o
, wake_index
);
368 wake_index
= bt_index_inc(wake_index
);
374 static void bt_clear_tag(struct blk_mq_bitmap_tags
*bt
, unsigned int tag
)
376 const int index
= TAG_TO_INDEX(bt
, tag
);
377 struct bt_wait_state
*bs
;
380 clear_bit(TAG_TO_BIT(bt
, tag
), &bt
->map
[index
].word
);
382 /* Ensure that the wait list checks occur after clear_bit(). */
385 bs
= bt_wake_ptr(bt
);
389 wait_cnt
= atomic_dec_return(&bs
->wait_cnt
);
390 if (unlikely(wait_cnt
< 0))
391 wait_cnt
= atomic_inc_return(&bs
->wait_cnt
);
393 atomic_add(bt
->wake_cnt
, &bs
->wait_cnt
);
394 bt_index_atomic_inc(&bt
->wake_index
);
399 void blk_mq_put_tag(struct blk_mq_hw_ctx
*hctx
, unsigned int tag
,
400 unsigned int *last_tag
)
402 struct blk_mq_tags
*tags
= hctx
->tags
;
404 if (tag
>= tags
->nr_reserved_tags
) {
405 const int real_tag
= tag
- tags
->nr_reserved_tags
;
407 BUG_ON(real_tag
>= tags
->nr_tags
);
408 bt_clear_tag(&tags
->bitmap_tags
, real_tag
);
409 if (likely(tags
->alloc_policy
== BLK_TAG_ALLOC_FIFO
))
410 *last_tag
= real_tag
;
412 BUG_ON(tag
>= tags
->nr_reserved_tags
);
413 bt_clear_tag(&tags
->breserved_tags
, tag
);
417 static void bt_for_each(struct blk_mq_hw_ctx
*hctx
,
418 struct blk_mq_bitmap_tags
*bt
, unsigned int off
,
419 busy_iter_fn
*fn
, void *data
, bool reserved
)
424 for (i
= 0; i
< bt
->map_nr
; i
++) {
425 struct blk_align_bitmap
*bm
= &bt
->map
[i
];
427 for (bit
= find_first_bit(&bm
->word
, bm
->depth
);
429 bit
= find_next_bit(&bm
->word
, bm
->depth
, bit
+ 1)) {
430 rq
= blk_mq_tag_to_rq(hctx
->tags
, off
+ bit
);
431 if (rq
->q
== hctx
->queue
)
432 fn(hctx
, rq
, data
, reserved
);
435 off
+= (1 << bt
->bits_per_word
);
439 void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx
*hctx
, busy_iter_fn
*fn
,
442 struct blk_mq_tags
*tags
= hctx
->tags
;
444 if (tags
->nr_reserved_tags
)
445 bt_for_each(hctx
, &tags
->breserved_tags
, 0, fn
, priv
, true);
446 bt_for_each(hctx
, &tags
->bitmap_tags
, tags
->nr_reserved_tags
, fn
, priv
,
449 EXPORT_SYMBOL(blk_mq_tag_busy_iter
);
451 static unsigned int bt_unused_tags(struct blk_mq_bitmap_tags
*bt
)
453 unsigned int i
, used
;
455 for (i
= 0, used
= 0; i
< bt
->map_nr
; i
++) {
456 struct blk_align_bitmap
*bm
= &bt
->map
[i
];
458 used
+= bitmap_weight(&bm
->word
, bm
->depth
);
461 return bt
->depth
- used
;
464 static void bt_update_count(struct blk_mq_bitmap_tags
*bt
,
467 unsigned int tags_per_word
= 1U << bt
->bits_per_word
;
468 unsigned int map_depth
= depth
;
473 for (i
= 0; i
< bt
->map_nr
; i
++) {
474 bt
->map
[i
].depth
= min(map_depth
, tags_per_word
);
475 map_depth
-= bt
->map
[i
].depth
;
479 bt
->wake_cnt
= BT_WAIT_BATCH
;
480 if (bt
->wake_cnt
> depth
/ BT_WAIT_QUEUES
)
481 bt
->wake_cnt
= max(1U, depth
/ BT_WAIT_QUEUES
);
486 static int bt_alloc(struct blk_mq_bitmap_tags
*bt
, unsigned int depth
,
487 int node
, bool reserved
)
491 bt
->bits_per_word
= ilog2(BITS_PER_LONG
);
494 * Depth can be zero for reserved tags, that's not a failure
498 unsigned int nr
, tags_per_word
;
500 tags_per_word
= (1 << bt
->bits_per_word
);
503 * If the tag space is small, shrink the number of tags
504 * per word so we spread over a few cachelines, at least.
505 * If less than 4 tags, just forget about it, it's not
506 * going to work optimally anyway.
509 while (tags_per_word
* 4 > depth
) {
511 tags_per_word
= (1 << bt
->bits_per_word
);
515 nr
= ALIGN(depth
, tags_per_word
) / tags_per_word
;
516 bt
->map
= kzalloc_node(nr
* sizeof(struct blk_align_bitmap
),
524 bt
->bs
= kzalloc(BT_WAIT_QUEUES
* sizeof(*bt
->bs
), GFP_KERNEL
);
531 bt_update_count(bt
, depth
);
533 for (i
= 0; i
< BT_WAIT_QUEUES
; i
++) {
534 init_waitqueue_head(&bt
->bs
[i
].wait
);
535 atomic_set(&bt
->bs
[i
].wait_cnt
, bt
->wake_cnt
);
541 static void bt_free(struct blk_mq_bitmap_tags
*bt
)
547 static struct blk_mq_tags
*blk_mq_init_bitmap_tags(struct blk_mq_tags
*tags
,
548 int node
, int alloc_policy
)
550 unsigned int depth
= tags
->nr_tags
- tags
->nr_reserved_tags
;
552 tags
->alloc_policy
= alloc_policy
;
554 if (bt_alloc(&tags
->bitmap_tags
, depth
, node
, false))
556 if (bt_alloc(&tags
->breserved_tags
, tags
->nr_reserved_tags
, node
, true))
561 bt_free(&tags
->bitmap_tags
);
566 struct blk_mq_tags
*blk_mq_init_tags(unsigned int total_tags
,
567 unsigned int reserved_tags
,
568 int node
, int alloc_policy
)
570 struct blk_mq_tags
*tags
;
572 if (total_tags
> BLK_MQ_TAG_MAX
) {
573 pr_err("blk-mq: tag depth too large\n");
577 tags
= kzalloc_node(sizeof(*tags
), GFP_KERNEL
, node
);
581 tags
->nr_tags
= total_tags
;
582 tags
->nr_reserved_tags
= reserved_tags
;
584 return blk_mq_init_bitmap_tags(tags
, node
, alloc_policy
);
587 void blk_mq_free_tags(struct blk_mq_tags
*tags
)
589 bt_free(&tags
->bitmap_tags
);
590 bt_free(&tags
->breserved_tags
);
594 void blk_mq_tag_init_last_tag(struct blk_mq_tags
*tags
, unsigned int *tag
)
596 unsigned int depth
= tags
->nr_tags
- tags
->nr_reserved_tags
;
598 *tag
= prandom_u32() % depth
;
601 int blk_mq_tag_update_depth(struct blk_mq_tags
*tags
, unsigned int tdepth
)
603 tdepth
-= tags
->nr_reserved_tags
;
604 if (tdepth
> tags
->nr_tags
)
608 * Don't need (or can't) update reserved tags here, they remain
609 * static and should never need resizing.
611 bt_update_count(&tags
->bitmap_tags
, tdepth
);
612 blk_mq_tag_wakeup_all(tags
, false);
617 * blk_mq_unique_tag() - return a tag that is unique queue-wide
618 * @rq: request for which to compute a unique tag
620 * The tag field in struct request is unique per hardware queue but not over
621 * all hardware queues. Hence this function that returns a tag with the
622 * hardware context index in the upper bits and the per hardware queue tag in
625 * Note: When called for a request that is queued on a non-multiqueue request
626 * queue, the hardware context index is set to zero.
628 u32
blk_mq_unique_tag(struct request
*rq
)
630 struct request_queue
*q
= rq
->q
;
631 struct blk_mq_hw_ctx
*hctx
;
635 hctx
= q
->mq_ops
->map_queue(q
, rq
->mq_ctx
->cpu
);
636 hwq
= hctx
->queue_num
;
639 return (hwq
<< BLK_MQ_UNIQUE_TAG_BITS
) |
640 (rq
->tag
& BLK_MQ_UNIQUE_TAG_MASK
);
642 EXPORT_SYMBOL(blk_mq_unique_tag
);
644 ssize_t
blk_mq_tag_sysfs_show(struct blk_mq_tags
*tags
, char *page
)
646 char *orig_page
= page
;
647 unsigned int free
, res
;
652 page
+= sprintf(page
, "nr_tags=%u, reserved_tags=%u, "
653 "bits_per_word=%u\n",
654 tags
->nr_tags
, tags
->nr_reserved_tags
,
655 tags
->bitmap_tags
.bits_per_word
);
657 free
= bt_unused_tags(&tags
->bitmap_tags
);
658 res
= bt_unused_tags(&tags
->breserved_tags
);
660 page
+= sprintf(page
, "nr_free=%u, nr_reserved=%u\n", free
, res
);
661 page
+= sprintf(page
, "active_queues=%u\n", atomic_read(&tags
->active_queues
));
663 return page
- orig_page
;