2 * Fast and scalable bitmap tagging variant. Uses sparser bitmaps spread
3 * over multiple cachelines to avoid ping-pong between multiple submitters
4 * or submitter and completer. Uses rolling wakeups to avoid falling of
5 * the scaling cliff when we run out of tags and have to start putting
8 * Uses active queue tracking to support fairer distribution of tags
9 * between multiple submitters when a shared tag map is used.
11 * Copyright (C) 2013-2014 Jens Axboe
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/random.h>
17 #include <linux/blk-mq.h>
20 #include "blk-mq-tag.h"
22 static bool bt_has_free_tags(struct blk_mq_bitmap_tags
*bt
)
26 for (i
= 0; i
< bt
->map_nr
; i
++) {
27 struct blk_align_bitmap
*bm
= &bt
->map
[i
];
30 ret
= find_first_zero_bit(&bm
->word
, bm
->depth
);
38 bool blk_mq_has_free_tags(struct blk_mq_tags
*tags
)
43 return bt_has_free_tags(&tags
->bitmap_tags
);
46 static inline void bt_index_inc(unsigned int *index
)
48 *index
= (*index
+ 1) & (BT_WAIT_QUEUES
- 1);
52 * If a previously inactive queue goes active, bump the active user count.
54 bool __blk_mq_tag_busy(struct blk_mq_hw_ctx
*hctx
)
56 if (!test_bit(BLK_MQ_S_TAG_ACTIVE
, &hctx
->state
) &&
57 !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE
, &hctx
->state
))
58 atomic_inc(&hctx
->tags
->active_queues
);
64 * Wakeup all potentially sleeping on normal (non-reserved) tags
66 static void blk_mq_tag_wakeup_all(struct blk_mq_tags
*tags
)
68 struct blk_mq_bitmap_tags
*bt
;
71 bt
= &tags
->bitmap_tags
;
72 wake_index
= bt
->wake_index
;
73 for (i
= 0; i
< BT_WAIT_QUEUES
; i
++) {
74 struct bt_wait_state
*bs
= &bt
->bs
[wake_index
];
76 if (waitqueue_active(&bs
->wait
))
79 bt_index_inc(&wake_index
);
84 * If a previously busy queue goes inactive, potential waiters could now
85 * be allowed to queue. Wake them up and check.
87 void __blk_mq_tag_idle(struct blk_mq_hw_ctx
*hctx
)
89 struct blk_mq_tags
*tags
= hctx
->tags
;
91 if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE
, &hctx
->state
))
94 atomic_dec(&tags
->active_queues
);
96 blk_mq_tag_wakeup_all(tags
);
100 * For shared tag users, we track the number of currently active users
101 * and attempt to provide a fair share of the tag depth for each of them.
103 static inline bool hctx_may_queue(struct blk_mq_hw_ctx
*hctx
,
104 struct blk_mq_bitmap_tags
*bt
)
106 unsigned int depth
, users
;
108 if (!hctx
|| !(hctx
->flags
& BLK_MQ_F_TAG_SHARED
))
110 if (!test_bit(BLK_MQ_S_TAG_ACTIVE
, &hctx
->state
))
114 * Don't try dividing an ant
119 users
= atomic_read(&hctx
->tags
->active_queues
);
124 * Allow at least some tags
126 depth
= max((bt
->depth
+ users
- 1) / users
, 4U);
127 return atomic_read(&hctx
->nr_active
) < depth
;
130 static int __bt_get_word(struct blk_align_bitmap
*bm
, unsigned int last_tag
)
132 int tag
, org_last_tag
, end
;
134 org_last_tag
= last_tag
;
138 tag
= find_next_zero_bit(&bm
->word
, end
, last_tag
);
139 if (unlikely(tag
>= end
)) {
141 * We started with an offset, start from 0 to
144 if (org_last_tag
&& last_tag
) {
152 } while (test_and_set_bit_lock(tag
, &bm
->word
));
158 * Straight forward bitmap tag implementation, where each bit is a tag
159 * (cleared == free, and set == busy). The small twist is using per-cpu
160 * last_tag caches, which blk-mq stores in the blk_mq_ctx software queue
161 * contexts. This enables us to drastically limit the space searched,
162 * without dirtying an extra shared cacheline like we would if we stored
163 * the cache value inside the shared blk_mq_bitmap_tags structure. On top
164 * of that, each word of tags is in a separate cacheline. This means that
165 * multiple users will tend to stick to different cachelines, at least
166 * until the map is exhausted.
168 static int __bt_get(struct blk_mq_hw_ctx
*hctx
, struct blk_mq_bitmap_tags
*bt
,
169 unsigned int *tag_cache
)
171 unsigned int last_tag
, org_last_tag
;
174 if (!hctx_may_queue(hctx
, bt
))
177 last_tag
= org_last_tag
= *tag_cache
;
178 index
= TAG_TO_INDEX(bt
, last_tag
);
180 for (i
= 0; i
< bt
->map_nr
; i
++) {
181 tag
= __bt_get_word(&bt
->map
[index
], TAG_TO_BIT(bt
, last_tag
));
183 tag
+= (index
<< bt
->bits_per_word
);
188 if (++index
>= bt
->map_nr
)
196 * Only update the cache from the allocation path, if we ended
197 * up using the specific cached tag.
200 if (tag
== org_last_tag
) {
202 if (last_tag
>= bt
->depth
- 1)
205 *tag_cache
= last_tag
;
211 static struct bt_wait_state
*bt_wait_ptr(struct blk_mq_bitmap_tags
*bt
,
212 struct blk_mq_hw_ctx
*hctx
)
214 struct bt_wait_state
*bs
;
219 bs
= &bt
->bs
[hctx
->wait_index
];
220 bt_index_inc(&hctx
->wait_index
);
224 static int bt_get(struct blk_mq_alloc_data
*data
,
225 struct blk_mq_bitmap_tags
*bt
,
226 struct blk_mq_hw_ctx
*hctx
,
227 unsigned int *last_tag
)
229 struct bt_wait_state
*bs
;
233 tag
= __bt_get(hctx
, bt
, last_tag
);
237 if (!(data
->gfp
& __GFP_WAIT
))
240 bs
= bt_wait_ptr(bt
, hctx
);
244 was_empty
= list_empty(&wait
.task_list
);
245 prepare_to_wait(&bs
->wait
, &wait
, TASK_UNINTERRUPTIBLE
);
247 tag
= __bt_get(hctx
, bt
, last_tag
);
252 atomic_set(&bs
->wait_cnt
, bt
->wake_cnt
);
254 blk_mq_put_ctx(data
->ctx
);
258 data
->ctx
= blk_mq_get_ctx(data
->q
);
259 data
->hctx
= data
->q
->mq_ops
->map_queue(data
->q
,
261 if (data
->reserved
) {
262 bt
= &data
->hctx
->tags
->breserved_tags
;
264 last_tag
= &data
->ctx
->last_tag
;
266 bt
= &hctx
->tags
->bitmap_tags
;
268 finish_wait(&bs
->wait
, &wait
);
269 bs
= bt_wait_ptr(bt
, hctx
);
272 finish_wait(&bs
->wait
, &wait
);
276 static unsigned int __blk_mq_get_tag(struct blk_mq_alloc_data
*data
)
280 tag
= bt_get(data
, &data
->hctx
->tags
->bitmap_tags
, data
->hctx
,
281 &data
->ctx
->last_tag
);
283 return tag
+ data
->hctx
->tags
->nr_reserved_tags
;
285 return BLK_MQ_TAG_FAIL
;
288 static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_alloc_data
*data
)
292 if (unlikely(!data
->hctx
->tags
->nr_reserved_tags
)) {
294 return BLK_MQ_TAG_FAIL
;
297 tag
= bt_get(data
, &data
->hctx
->tags
->breserved_tags
, NULL
, &zero
);
299 return BLK_MQ_TAG_FAIL
;
304 unsigned int blk_mq_get_tag(struct blk_mq_alloc_data
*data
)
307 return __blk_mq_get_tag(data
);
309 return __blk_mq_get_reserved_tag(data
);
312 static struct bt_wait_state
*bt_wake_ptr(struct blk_mq_bitmap_tags
*bt
)
316 wake_index
= bt
->wake_index
;
317 for (i
= 0; i
< BT_WAIT_QUEUES
; i
++) {
318 struct bt_wait_state
*bs
= &bt
->bs
[wake_index
];
320 if (waitqueue_active(&bs
->wait
)) {
321 if (wake_index
!= bt
->wake_index
)
322 bt
->wake_index
= wake_index
;
327 bt_index_inc(&wake_index
);
333 static void bt_clear_tag(struct blk_mq_bitmap_tags
*bt
, unsigned int tag
)
335 const int index
= TAG_TO_INDEX(bt
, tag
);
336 struct bt_wait_state
*bs
;
339 * The unlock memory barrier need to order access to req in free
340 * path and clearing tag bit
342 clear_bit_unlock(TAG_TO_BIT(bt
, tag
), &bt
->map
[index
].word
);
344 bs
= bt_wake_ptr(bt
);
345 if (bs
&& atomic_dec_and_test(&bs
->wait_cnt
)) {
346 atomic_set(&bs
->wait_cnt
, bt
->wake_cnt
);
347 bt_index_inc(&bt
->wake_index
);
352 static void __blk_mq_put_tag(struct blk_mq_tags
*tags
, unsigned int tag
)
354 BUG_ON(tag
>= tags
->nr_tags
);
356 bt_clear_tag(&tags
->bitmap_tags
, tag
);
359 static void __blk_mq_put_reserved_tag(struct blk_mq_tags
*tags
,
362 BUG_ON(tag
>= tags
->nr_reserved_tags
);
364 bt_clear_tag(&tags
->breserved_tags
, tag
);
367 void blk_mq_put_tag(struct blk_mq_hw_ctx
*hctx
, unsigned int tag
,
368 unsigned int *last_tag
)
370 struct blk_mq_tags
*tags
= hctx
->tags
;
372 if (tag
>= tags
->nr_reserved_tags
) {
373 const int real_tag
= tag
- tags
->nr_reserved_tags
;
375 __blk_mq_put_tag(tags
, real_tag
);
376 *last_tag
= real_tag
;
378 __blk_mq_put_reserved_tag(tags
, tag
);
381 static void bt_for_each_free(struct blk_mq_bitmap_tags
*bt
,
382 unsigned long *free_map
, unsigned int off
)
386 for (i
= 0; i
< bt
->map_nr
; i
++) {
387 struct blk_align_bitmap
*bm
= &bt
->map
[i
];
391 bit
= find_next_zero_bit(&bm
->word
, bm
->depth
, bit
);
392 if (bit
>= bm
->depth
)
395 __set_bit(bit
+ off
, free_map
);
399 off
+= (1 << bt
->bits_per_word
);
403 void blk_mq_tag_busy_iter(struct blk_mq_tags
*tags
,
404 void (*fn
)(void *, unsigned long *), void *data
)
406 unsigned long *tag_map
;
409 map_size
= ALIGN(tags
->nr_tags
, BITS_PER_LONG
) / BITS_PER_LONG
;
410 tag_map
= kzalloc(map_size
* sizeof(unsigned long), GFP_ATOMIC
);
414 bt_for_each_free(&tags
->bitmap_tags
, tag_map
, tags
->nr_reserved_tags
);
415 if (tags
->nr_reserved_tags
)
416 bt_for_each_free(&tags
->breserved_tags
, tag_map
, 0);
421 EXPORT_SYMBOL(blk_mq_tag_busy_iter
);
423 static unsigned int bt_unused_tags(struct blk_mq_bitmap_tags
*bt
)
425 unsigned int i
, used
;
427 for (i
= 0, used
= 0; i
< bt
->map_nr
; i
++) {
428 struct blk_align_bitmap
*bm
= &bt
->map
[i
];
430 used
+= bitmap_weight(&bm
->word
, bm
->depth
);
433 return bt
->depth
- used
;
436 static void bt_update_count(struct blk_mq_bitmap_tags
*bt
,
439 unsigned int tags_per_word
= 1U << bt
->bits_per_word
;
440 unsigned int map_depth
= depth
;
445 for (i
= 0; i
< bt
->map_nr
; i
++) {
446 bt
->map
[i
].depth
= min(map_depth
, tags_per_word
);
447 map_depth
-= bt
->map
[i
].depth
;
451 bt
->wake_cnt
= BT_WAIT_BATCH
;
452 if (bt
->wake_cnt
> depth
/ 4)
453 bt
->wake_cnt
= max(1U, depth
/ 4);
458 static int bt_alloc(struct blk_mq_bitmap_tags
*bt
, unsigned int depth
,
459 int node
, bool reserved
)
463 bt
->bits_per_word
= ilog2(BITS_PER_LONG
);
466 * Depth can be zero for reserved tags, that's not a failure
470 unsigned int nr
, tags_per_word
;
472 tags_per_word
= (1 << bt
->bits_per_word
);
475 * If the tag space is small, shrink the number of tags
476 * per word so we spread over a few cachelines, at least.
477 * If less than 4 tags, just forget about it, it's not
478 * going to work optimally anyway.
481 while (tags_per_word
* 4 > depth
) {
483 tags_per_word
= (1 << bt
->bits_per_word
);
487 nr
= ALIGN(depth
, tags_per_word
) / tags_per_word
;
488 bt
->map
= kzalloc_node(nr
* sizeof(struct blk_align_bitmap
),
496 bt
->bs
= kzalloc(BT_WAIT_QUEUES
* sizeof(*bt
->bs
), GFP_KERNEL
);
502 for (i
= 0; i
< BT_WAIT_QUEUES
; i
++)
503 init_waitqueue_head(&bt
->bs
[i
].wait
);
505 bt_update_count(bt
, depth
);
509 static void bt_free(struct blk_mq_bitmap_tags
*bt
)
515 static struct blk_mq_tags
*blk_mq_init_bitmap_tags(struct blk_mq_tags
*tags
,
518 unsigned int depth
= tags
->nr_tags
- tags
->nr_reserved_tags
;
520 if (bt_alloc(&tags
->bitmap_tags
, depth
, node
, false))
522 if (bt_alloc(&tags
->breserved_tags
, tags
->nr_reserved_tags
, node
, true))
527 bt_free(&tags
->bitmap_tags
);
532 struct blk_mq_tags
*blk_mq_init_tags(unsigned int total_tags
,
533 unsigned int reserved_tags
, int node
)
535 struct blk_mq_tags
*tags
;
537 if (total_tags
> BLK_MQ_TAG_MAX
) {
538 pr_err("blk-mq: tag depth too large\n");
542 tags
= kzalloc_node(sizeof(*tags
), GFP_KERNEL
, node
);
546 tags
->nr_tags
= total_tags
;
547 tags
->nr_reserved_tags
= reserved_tags
;
549 return blk_mq_init_bitmap_tags(tags
, node
);
552 void blk_mq_free_tags(struct blk_mq_tags
*tags
)
554 bt_free(&tags
->bitmap_tags
);
555 bt_free(&tags
->breserved_tags
);
559 void blk_mq_tag_init_last_tag(struct blk_mq_tags
*tags
, unsigned int *tag
)
561 unsigned int depth
= tags
->nr_tags
- tags
->nr_reserved_tags
;
563 *tag
= prandom_u32() % depth
;
566 int blk_mq_tag_update_depth(struct blk_mq_tags
*tags
, unsigned int tdepth
)
568 tdepth
-= tags
->nr_reserved_tags
;
569 if (tdepth
> tags
->nr_tags
)
573 * Don't need (or can't) update reserved tags here, they remain
574 * static and should never need resizing.
576 bt_update_count(&tags
->bitmap_tags
, tdepth
);
577 blk_mq_tag_wakeup_all(tags
);
581 ssize_t
blk_mq_tag_sysfs_show(struct blk_mq_tags
*tags
, char *page
)
583 char *orig_page
= page
;
584 unsigned int free
, res
;
589 page
+= sprintf(page
, "nr_tags=%u, reserved_tags=%u, "
590 "bits_per_word=%u\n",
591 tags
->nr_tags
, tags
->nr_reserved_tags
,
592 tags
->bitmap_tags
.bits_per_word
);
594 free
= bt_unused_tags(&tags
->bitmap_tags
);
595 res
= bt_unused_tags(&tags
->breserved_tags
);
597 page
+= sprintf(page
, "nr_free=%u, nr_reserved=%u\n", free
, res
);
598 page
+= sprintf(page
, "active_queues=%u\n", atomic_read(&tags
->active_queues
));
600 return page
- orig_page
;