2 * Fast and scalable bitmap tagging variant. Uses sparser bitmaps spread
3 * over multiple cachelines to avoid ping-pong between multiple submitters
4 * or submitter and completer. Uses rolling wakeups to avoid falling of
5 * the scaling cliff when we run out of tags and have to start putting
8 * Uses active queue tracking to support fairer distribution of tags
9 * between multiple submitters when a shared tag map is used.
11 * Copyright (C) 2013-2014 Jens Axboe
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/random.h>
17 #include <linux/blk-mq.h>
20 #include "blk-mq-tag.h"
22 static bool bt_has_free_tags(struct blk_mq_bitmap_tags
*bt
)
26 for (i
= 0; i
< bt
->map_nr
; i
++) {
27 struct blk_align_bitmap
*bm
= &bt
->map
[i
];
30 ret
= find_first_zero_bit(&bm
->word
, bm
->depth
);
38 bool blk_mq_has_free_tags(struct blk_mq_tags
*tags
)
43 return bt_has_free_tags(&tags
->bitmap_tags
);
46 static inline int bt_index_inc(int index
)
48 return (index
+ 1) & (BT_WAIT_QUEUES
- 1);
51 static inline void bt_index_atomic_inc(atomic_t
*index
)
53 int old
= atomic_read(index
);
54 int new = bt_index_inc(old
);
55 atomic_cmpxchg(index
, old
, new);
59 * If a previously inactive queue goes active, bump the active user count.
61 bool __blk_mq_tag_busy(struct blk_mq_hw_ctx
*hctx
)
63 if (!test_bit(BLK_MQ_S_TAG_ACTIVE
, &hctx
->state
) &&
64 !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE
, &hctx
->state
))
65 atomic_inc(&hctx
->tags
->active_queues
);
71 * Wakeup all potentially sleeping on normal (non-reserved) tags
73 static void blk_mq_tag_wakeup_all(struct blk_mq_tags
*tags
)
75 struct blk_mq_bitmap_tags
*bt
;
78 bt
= &tags
->bitmap_tags
;
79 wake_index
= atomic_read(&bt
->wake_index
);
80 for (i
= 0; i
< BT_WAIT_QUEUES
; i
++) {
81 struct bt_wait_state
*bs
= &bt
->bs
[wake_index
];
83 if (waitqueue_active(&bs
->wait
))
86 wake_index
= bt_index_inc(wake_index
);
91 * If a previously busy queue goes inactive, potential waiters could now
92 * be allowed to queue. Wake them up and check.
94 void __blk_mq_tag_idle(struct blk_mq_hw_ctx
*hctx
)
96 struct blk_mq_tags
*tags
= hctx
->tags
;
98 if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE
, &hctx
->state
))
101 atomic_dec(&tags
->active_queues
);
103 blk_mq_tag_wakeup_all(tags
);
107 * For shared tag users, we track the number of currently active users
108 * and attempt to provide a fair share of the tag depth for each of them.
110 static inline bool hctx_may_queue(struct blk_mq_hw_ctx
*hctx
,
111 struct blk_mq_bitmap_tags
*bt
)
113 unsigned int depth
, users
;
115 if (!hctx
|| !(hctx
->flags
& BLK_MQ_F_TAG_SHARED
))
117 if (!test_bit(BLK_MQ_S_TAG_ACTIVE
, &hctx
->state
))
121 * Don't try dividing an ant
126 users
= atomic_read(&hctx
->tags
->active_queues
);
131 * Allow at least some tags
133 depth
= max((bt
->depth
+ users
- 1) / users
, 4U);
134 return atomic_read(&hctx
->nr_active
) < depth
;
137 static int __bt_get_word(struct blk_align_bitmap
*bm
, unsigned int last_tag
)
139 int tag
, org_last_tag
, end
;
141 org_last_tag
= last_tag
;
145 tag
= find_next_zero_bit(&bm
->word
, end
, last_tag
);
146 if (unlikely(tag
>= end
)) {
148 * We started with an offset, start from 0 to
151 if (org_last_tag
&& last_tag
) {
159 } while (test_and_set_bit_lock(tag
, &bm
->word
));
165 * Straight forward bitmap tag implementation, where each bit is a tag
166 * (cleared == free, and set == busy). The small twist is using per-cpu
167 * last_tag caches, which blk-mq stores in the blk_mq_ctx software queue
168 * contexts. This enables us to drastically limit the space searched,
169 * without dirtying an extra shared cacheline like we would if we stored
170 * the cache value inside the shared blk_mq_bitmap_tags structure. On top
171 * of that, each word of tags is in a separate cacheline. This means that
172 * multiple users will tend to stick to different cachelines, at least
173 * until the map is exhausted.
175 static int __bt_get(struct blk_mq_hw_ctx
*hctx
, struct blk_mq_bitmap_tags
*bt
,
176 unsigned int *tag_cache
)
178 unsigned int last_tag
, org_last_tag
;
181 if (!hctx_may_queue(hctx
, bt
))
184 last_tag
= org_last_tag
= *tag_cache
;
185 index
= TAG_TO_INDEX(bt
, last_tag
);
187 for (i
= 0; i
< bt
->map_nr
; i
++) {
188 tag
= __bt_get_word(&bt
->map
[index
], TAG_TO_BIT(bt
, last_tag
));
190 tag
+= (index
<< bt
->bits_per_word
);
195 if (++index
>= bt
->map_nr
)
203 * Only update the cache from the allocation path, if we ended
204 * up using the specific cached tag.
207 if (tag
== org_last_tag
) {
209 if (last_tag
>= bt
->depth
- 1)
212 *tag_cache
= last_tag
;
218 static struct bt_wait_state
*bt_wait_ptr(struct blk_mq_bitmap_tags
*bt
,
219 struct blk_mq_hw_ctx
*hctx
)
221 struct bt_wait_state
*bs
;
227 wait_index
= atomic_read(&hctx
->wait_index
);
228 bs
= &bt
->bs
[wait_index
];
229 bt_index_atomic_inc(&hctx
->wait_index
);
233 static int bt_get(struct blk_mq_alloc_data
*data
,
234 struct blk_mq_bitmap_tags
*bt
,
235 struct blk_mq_hw_ctx
*hctx
,
236 unsigned int *last_tag
)
238 struct bt_wait_state
*bs
;
242 tag
= __bt_get(hctx
, bt
, last_tag
);
246 if (!(data
->gfp
& __GFP_WAIT
))
249 bs
= bt_wait_ptr(bt
, hctx
);
251 prepare_to_wait(&bs
->wait
, &wait
, TASK_UNINTERRUPTIBLE
);
253 tag
= __bt_get(hctx
, bt
, last_tag
);
257 blk_mq_put_ctx(data
->ctx
);
261 data
->ctx
= blk_mq_get_ctx(data
->q
);
262 data
->hctx
= data
->q
->mq_ops
->map_queue(data
->q
,
264 if (data
->reserved
) {
265 bt
= &data
->hctx
->tags
->breserved_tags
;
267 last_tag
= &data
->ctx
->last_tag
;
269 bt
= &hctx
->tags
->bitmap_tags
;
271 finish_wait(&bs
->wait
, &wait
);
272 bs
= bt_wait_ptr(bt
, hctx
);
275 finish_wait(&bs
->wait
, &wait
);
279 static unsigned int __blk_mq_get_tag(struct blk_mq_alloc_data
*data
)
283 tag
= bt_get(data
, &data
->hctx
->tags
->bitmap_tags
, data
->hctx
,
284 &data
->ctx
->last_tag
);
286 return tag
+ data
->hctx
->tags
->nr_reserved_tags
;
288 return BLK_MQ_TAG_FAIL
;
291 static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_alloc_data
*data
)
295 if (unlikely(!data
->hctx
->tags
->nr_reserved_tags
)) {
297 return BLK_MQ_TAG_FAIL
;
300 tag
= bt_get(data
, &data
->hctx
->tags
->breserved_tags
, NULL
, &zero
);
302 return BLK_MQ_TAG_FAIL
;
307 unsigned int blk_mq_get_tag(struct blk_mq_alloc_data
*data
)
310 return __blk_mq_get_tag(data
);
312 return __blk_mq_get_reserved_tag(data
);
315 static struct bt_wait_state
*bt_wake_ptr(struct blk_mq_bitmap_tags
*bt
)
319 wake_index
= atomic_read(&bt
->wake_index
);
320 for (i
= 0; i
< BT_WAIT_QUEUES
; i
++) {
321 struct bt_wait_state
*bs
= &bt
->bs
[wake_index
];
323 if (waitqueue_active(&bs
->wait
)) {
324 int o
= atomic_read(&bt
->wake_index
);
326 atomic_cmpxchg(&bt
->wake_index
, o
, wake_index
);
331 wake_index
= bt_index_inc(wake_index
);
337 static void bt_clear_tag(struct blk_mq_bitmap_tags
*bt
, unsigned int tag
)
339 const int index
= TAG_TO_INDEX(bt
, tag
);
340 struct bt_wait_state
*bs
;
344 * The unlock memory barrier need to order access to req in free
345 * path and clearing tag bit
347 clear_bit_unlock(TAG_TO_BIT(bt
, tag
), &bt
->map
[index
].word
);
349 bs
= bt_wake_ptr(bt
);
353 wait_cnt
= atomic_dec_return(&bs
->wait_cnt
);
354 if (unlikely(wait_cnt
< 0))
355 wait_cnt
= atomic_inc_return(&bs
->wait_cnt
);
357 atomic_add(bt
->wake_cnt
, &bs
->wait_cnt
);
358 bt_index_atomic_inc(&bt
->wake_index
);
363 static void __blk_mq_put_tag(struct blk_mq_tags
*tags
, unsigned int tag
)
365 BUG_ON(tag
>= tags
->nr_tags
);
367 bt_clear_tag(&tags
->bitmap_tags
, tag
);
370 static void __blk_mq_put_reserved_tag(struct blk_mq_tags
*tags
,
373 BUG_ON(tag
>= tags
->nr_reserved_tags
);
375 bt_clear_tag(&tags
->breserved_tags
, tag
);
378 void blk_mq_put_tag(struct blk_mq_hw_ctx
*hctx
, unsigned int tag
,
379 unsigned int *last_tag
)
381 struct blk_mq_tags
*tags
= hctx
->tags
;
383 if (tag
>= tags
->nr_reserved_tags
) {
384 const int real_tag
= tag
- tags
->nr_reserved_tags
;
386 __blk_mq_put_tag(tags
, real_tag
);
387 *last_tag
= real_tag
;
389 __blk_mq_put_reserved_tag(tags
, tag
);
392 static void bt_for_each(struct blk_mq_hw_ctx
*hctx
,
393 struct blk_mq_bitmap_tags
*bt
, unsigned int off
,
394 busy_iter_fn
*fn
, void *data
, bool reserved
)
399 for (i
= 0; i
< bt
->map_nr
; i
++) {
400 struct blk_align_bitmap
*bm
= &bt
->map
[i
];
402 for (bit
= find_first_bit(&bm
->word
, bm
->depth
);
404 bit
= find_next_bit(&bm
->word
, bm
->depth
, bit
+ 1)) {
405 rq
= blk_mq_tag_to_rq(hctx
->tags
, off
+ bit
);
406 if (rq
->q
== hctx
->queue
)
407 fn(hctx
, rq
, data
, reserved
);
410 off
+= (1 << bt
->bits_per_word
);
414 void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx
*hctx
, busy_iter_fn
*fn
,
417 struct blk_mq_tags
*tags
= hctx
->tags
;
419 if (tags
->nr_reserved_tags
)
420 bt_for_each(hctx
, &tags
->breserved_tags
, 0, fn
, priv
, true);
421 bt_for_each(hctx
, &tags
->bitmap_tags
, tags
->nr_reserved_tags
, fn
, priv
,
424 EXPORT_SYMBOL(blk_mq_tag_busy_iter
);
426 static unsigned int bt_unused_tags(struct blk_mq_bitmap_tags
*bt
)
428 unsigned int i
, used
;
430 for (i
= 0, used
= 0; i
< bt
->map_nr
; i
++) {
431 struct blk_align_bitmap
*bm
= &bt
->map
[i
];
433 used
+= bitmap_weight(&bm
->word
, bm
->depth
);
436 return bt
->depth
- used
;
439 static void bt_update_count(struct blk_mq_bitmap_tags
*bt
,
442 unsigned int tags_per_word
= 1U << bt
->bits_per_word
;
443 unsigned int map_depth
= depth
;
448 for (i
= 0; i
< bt
->map_nr
; i
++) {
449 bt
->map
[i
].depth
= min(map_depth
, tags_per_word
);
450 map_depth
-= bt
->map
[i
].depth
;
454 bt
->wake_cnt
= BT_WAIT_BATCH
;
455 if (bt
->wake_cnt
> depth
/ BT_WAIT_QUEUES
)
456 bt
->wake_cnt
= max(1U, depth
/ BT_WAIT_QUEUES
);
461 static int bt_alloc(struct blk_mq_bitmap_tags
*bt
, unsigned int depth
,
462 int node
, bool reserved
)
466 bt
->bits_per_word
= ilog2(BITS_PER_LONG
);
469 * Depth can be zero for reserved tags, that's not a failure
473 unsigned int nr
, tags_per_word
;
475 tags_per_word
= (1 << bt
->bits_per_word
);
478 * If the tag space is small, shrink the number of tags
479 * per word so we spread over a few cachelines, at least.
480 * If less than 4 tags, just forget about it, it's not
481 * going to work optimally anyway.
484 while (tags_per_word
* 4 > depth
) {
486 tags_per_word
= (1 << bt
->bits_per_word
);
490 nr
= ALIGN(depth
, tags_per_word
) / tags_per_word
;
491 bt
->map
= kzalloc_node(nr
* sizeof(struct blk_align_bitmap
),
499 bt
->bs
= kzalloc(BT_WAIT_QUEUES
* sizeof(*bt
->bs
), GFP_KERNEL
);
505 bt_update_count(bt
, depth
);
507 for (i
= 0; i
< BT_WAIT_QUEUES
; i
++) {
508 init_waitqueue_head(&bt
->bs
[i
].wait
);
509 atomic_set(&bt
->bs
[i
].wait_cnt
, bt
->wake_cnt
);
515 static void bt_free(struct blk_mq_bitmap_tags
*bt
)
521 static struct blk_mq_tags
*blk_mq_init_bitmap_tags(struct blk_mq_tags
*tags
,
524 unsigned int depth
= tags
->nr_tags
- tags
->nr_reserved_tags
;
526 if (bt_alloc(&tags
->bitmap_tags
, depth
, node
, false))
528 if (bt_alloc(&tags
->breserved_tags
, tags
->nr_reserved_tags
, node
, true))
533 bt_free(&tags
->bitmap_tags
);
538 struct blk_mq_tags
*blk_mq_init_tags(unsigned int total_tags
,
539 unsigned int reserved_tags
, int node
)
541 struct blk_mq_tags
*tags
;
543 if (total_tags
> BLK_MQ_TAG_MAX
) {
544 pr_err("blk-mq: tag depth too large\n");
548 tags
= kzalloc_node(sizeof(*tags
), GFP_KERNEL
, node
);
552 tags
->nr_tags
= total_tags
;
553 tags
->nr_reserved_tags
= reserved_tags
;
555 return blk_mq_init_bitmap_tags(tags
, node
);
558 void blk_mq_free_tags(struct blk_mq_tags
*tags
)
560 bt_free(&tags
->bitmap_tags
);
561 bt_free(&tags
->breserved_tags
);
565 void blk_mq_tag_init_last_tag(struct blk_mq_tags
*tags
, unsigned int *tag
)
567 unsigned int depth
= tags
->nr_tags
- tags
->nr_reserved_tags
;
569 *tag
= prandom_u32() % depth
;
572 int blk_mq_tag_update_depth(struct blk_mq_tags
*tags
, unsigned int tdepth
)
574 tdepth
-= tags
->nr_reserved_tags
;
575 if (tdepth
> tags
->nr_tags
)
579 * Don't need (or can't) update reserved tags here, they remain
580 * static and should never need resizing.
582 bt_update_count(&tags
->bitmap_tags
, tdepth
);
583 blk_mq_tag_wakeup_all(tags
);
587 ssize_t
blk_mq_tag_sysfs_show(struct blk_mq_tags
*tags
, char *page
)
589 char *orig_page
= page
;
590 unsigned int free
, res
;
595 page
+= sprintf(page
, "nr_tags=%u, reserved_tags=%u, "
596 "bits_per_word=%u\n",
597 tags
->nr_tags
, tags
->nr_reserved_tags
,
598 tags
->bitmap_tags
.bits_per_word
);
600 free
= bt_unused_tags(&tags
->bitmap_tags
);
601 res
= bt_unused_tags(&tags
->breserved_tags
);
603 page
+= sprintf(page
, "nr_free=%u, nr_reserved=%u\n", free
, res
);
604 page
+= sprintf(page
, "active_queues=%u\n", atomic_read(&tags
->active_queues
));
606 return page
- orig_page
;