pinctrl: cherryview: Use raw_spinlock for locking
[linux/fpc-iii.git] / block / blk-mq-tag.c
blobbe3290cc0644efc9e3feec6fd891c7afe1a15775
1 /*
2 * Fast and scalable bitmap tagging variant. Uses sparser bitmaps spread
3 * over multiple cachelines to avoid ping-pong between multiple submitters
4 * or submitter and completer. Uses rolling wakeups to avoid falling of
5 * the scaling cliff when we run out of tags and have to start putting
6 * submitters to sleep.
8 * Uses active queue tracking to support fairer distribution of tags
9 * between multiple submitters when a shared tag map is used.
11 * Copyright (C) 2013-2014 Jens Axboe
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/random.h>
17 #include <linux/blk-mq.h>
18 #include "blk.h"
19 #include "blk-mq.h"
20 #include "blk-mq-tag.h"
22 static bool bt_has_free_tags(struct blk_mq_bitmap_tags *bt)
24 int i;
26 for (i = 0; i < bt->map_nr; i++) {
27 struct blk_align_bitmap *bm = &bt->map[i];
28 int ret;
30 ret = find_first_zero_bit(&bm->word, bm->depth);
31 if (ret < bm->depth)
32 return true;
35 return false;
38 bool blk_mq_has_free_tags(struct blk_mq_tags *tags)
40 if (!tags)
41 return true;
43 return bt_has_free_tags(&tags->bitmap_tags);
46 static inline int bt_index_inc(int index)
48 return (index + 1) & (BT_WAIT_QUEUES - 1);
51 static inline void bt_index_atomic_inc(atomic_t *index)
53 int old = atomic_read(index);
54 int new = bt_index_inc(old);
55 atomic_cmpxchg(index, old, new);
59 * If a previously inactive queue goes active, bump the active user count.
61 bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
63 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
64 !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
65 atomic_inc(&hctx->tags->active_queues);
67 return true;
71 * Wakeup all potentially sleeping on tags
73 void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
75 struct blk_mq_bitmap_tags *bt;
76 int i, wake_index;
78 bt = &tags->bitmap_tags;
79 wake_index = atomic_read(&bt->wake_index);
80 for (i = 0; i < BT_WAIT_QUEUES; i++) {
81 struct bt_wait_state *bs = &bt->bs[wake_index];
83 if (waitqueue_active(&bs->wait))
84 wake_up(&bs->wait);
86 wake_index = bt_index_inc(wake_index);
89 if (include_reserve) {
90 bt = &tags->breserved_tags;
91 if (waitqueue_active(&bt->bs[0].wait))
92 wake_up(&bt->bs[0].wait);
97 * If a previously busy queue goes inactive, potential waiters could now
98 * be allowed to queue. Wake them up and check.
100 void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
102 struct blk_mq_tags *tags = hctx->tags;
104 if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
105 return;
107 atomic_dec(&tags->active_queues);
109 blk_mq_tag_wakeup_all(tags, false);
113 * For shared tag users, we track the number of currently active users
114 * and attempt to provide a fair share of the tag depth for each of them.
116 static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
117 struct blk_mq_bitmap_tags *bt)
119 unsigned int depth, users;
121 if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED))
122 return true;
123 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
124 return true;
127 * Don't try dividing an ant
129 if (bt->depth == 1)
130 return true;
132 users = atomic_read(&hctx->tags->active_queues);
133 if (!users)
134 return true;
137 * Allow at least some tags
139 depth = max((bt->depth + users - 1) / users, 4U);
140 return atomic_read(&hctx->nr_active) < depth;
143 static int __bt_get_word(struct blk_align_bitmap *bm, unsigned int last_tag,
144 bool nowrap)
146 int tag, org_last_tag = last_tag;
148 while (1) {
149 tag = find_next_zero_bit(&bm->word, bm->depth, last_tag);
150 if (unlikely(tag >= bm->depth)) {
152 * We started with an offset, and we didn't reset the
153 * offset to 0 in a failure case, so start from 0 to
154 * exhaust the map.
156 if (org_last_tag && last_tag && !nowrap) {
157 last_tag = org_last_tag = 0;
158 continue;
160 return -1;
163 if (!test_and_set_bit(tag, &bm->word))
164 break;
166 last_tag = tag + 1;
167 if (last_tag >= bm->depth - 1)
168 last_tag = 0;
171 return tag;
174 #define BT_ALLOC_RR(tags) (tags->alloc_policy == BLK_TAG_ALLOC_RR)
177 * Straight forward bitmap tag implementation, where each bit is a tag
178 * (cleared == free, and set == busy). The small twist is using per-cpu
179 * last_tag caches, which blk-mq stores in the blk_mq_ctx software queue
180 * contexts. This enables us to drastically limit the space searched,
181 * without dirtying an extra shared cacheline like we would if we stored
182 * the cache value inside the shared blk_mq_bitmap_tags structure. On top
183 * of that, each word of tags is in a separate cacheline. This means that
184 * multiple users will tend to stick to different cachelines, at least
185 * until the map is exhausted.
187 static int __bt_get(struct blk_mq_hw_ctx *hctx, struct blk_mq_bitmap_tags *bt,
188 unsigned int *tag_cache, struct blk_mq_tags *tags)
190 unsigned int last_tag, org_last_tag;
191 int index, i, tag;
193 if (!hctx_may_queue(hctx, bt))
194 return -1;
196 last_tag = org_last_tag = *tag_cache;
197 index = TAG_TO_INDEX(bt, last_tag);
199 for (i = 0; i < bt->map_nr; i++) {
200 tag = __bt_get_word(&bt->map[index], TAG_TO_BIT(bt, last_tag),
201 BT_ALLOC_RR(tags));
202 if (tag != -1) {
203 tag += (index << bt->bits_per_word);
204 goto done;
208 * Jump to next index, and reset the last tag to be the
209 * first tag of that index
211 index++;
212 last_tag = (index << bt->bits_per_word);
214 if (index >= bt->map_nr) {
215 index = 0;
216 last_tag = 0;
220 *tag_cache = 0;
221 return -1;
224 * Only update the cache from the allocation path, if we ended
225 * up using the specific cached tag.
227 done:
228 if (tag == org_last_tag || unlikely(BT_ALLOC_RR(tags))) {
229 last_tag = tag + 1;
230 if (last_tag >= bt->depth - 1)
231 last_tag = 0;
233 *tag_cache = last_tag;
236 return tag;
239 static struct bt_wait_state *bt_wait_ptr(struct blk_mq_bitmap_tags *bt,
240 struct blk_mq_hw_ctx *hctx)
242 struct bt_wait_state *bs;
243 int wait_index;
245 if (!hctx)
246 return &bt->bs[0];
248 wait_index = atomic_read(&hctx->wait_index);
249 bs = &bt->bs[wait_index];
250 bt_index_atomic_inc(&hctx->wait_index);
251 return bs;
254 static int bt_get(struct blk_mq_alloc_data *data,
255 struct blk_mq_bitmap_tags *bt,
256 struct blk_mq_hw_ctx *hctx,
257 unsigned int *last_tag, struct blk_mq_tags *tags)
259 struct bt_wait_state *bs;
260 DEFINE_WAIT(wait);
261 int tag;
263 tag = __bt_get(hctx, bt, last_tag, tags);
264 if (tag != -1)
265 return tag;
267 if (!(data->gfp & __GFP_WAIT))
268 return -1;
270 bs = bt_wait_ptr(bt, hctx);
271 do {
272 prepare_to_wait(&bs->wait, &wait, TASK_UNINTERRUPTIBLE);
274 tag = __bt_get(hctx, bt, last_tag, tags);
275 if (tag != -1)
276 break;
279 * We're out of tags on this hardware queue, kick any
280 * pending IO submits before going to sleep waiting for
281 * some to complete. Note that hctx can be NULL here for
282 * reserved tag allocation.
284 if (hctx)
285 blk_mq_run_hw_queue(hctx, false);
288 * Retry tag allocation after running the hardware queue,
289 * as running the queue may also have found completions.
291 tag = __bt_get(hctx, bt, last_tag, tags);
292 if (tag != -1)
293 break;
295 blk_mq_put_ctx(data->ctx);
297 io_schedule();
299 data->ctx = blk_mq_get_ctx(data->q);
300 data->hctx = data->q->mq_ops->map_queue(data->q,
301 data->ctx->cpu);
302 if (data->reserved) {
303 bt = &data->hctx->tags->breserved_tags;
304 } else {
305 last_tag = &data->ctx->last_tag;
306 hctx = data->hctx;
307 bt = &hctx->tags->bitmap_tags;
309 finish_wait(&bs->wait, &wait);
310 bs = bt_wait_ptr(bt, hctx);
311 } while (1);
313 finish_wait(&bs->wait, &wait);
314 return tag;
317 static unsigned int __blk_mq_get_tag(struct blk_mq_alloc_data *data)
319 int tag;
321 tag = bt_get(data, &data->hctx->tags->bitmap_tags, data->hctx,
322 &data->ctx->last_tag, data->hctx->tags);
323 if (tag >= 0)
324 return tag + data->hctx->tags->nr_reserved_tags;
326 return BLK_MQ_TAG_FAIL;
329 static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_alloc_data *data)
331 int tag, zero = 0;
333 if (unlikely(!data->hctx->tags->nr_reserved_tags)) {
334 WARN_ON_ONCE(1);
335 return BLK_MQ_TAG_FAIL;
338 tag = bt_get(data, &data->hctx->tags->breserved_tags, NULL, &zero,
339 data->hctx->tags);
340 if (tag < 0)
341 return BLK_MQ_TAG_FAIL;
343 return tag;
346 unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
348 if (!data->reserved)
349 return __blk_mq_get_tag(data);
351 return __blk_mq_get_reserved_tag(data);
354 static struct bt_wait_state *bt_wake_ptr(struct blk_mq_bitmap_tags *bt)
356 int i, wake_index;
358 wake_index = atomic_read(&bt->wake_index);
359 for (i = 0; i < BT_WAIT_QUEUES; i++) {
360 struct bt_wait_state *bs = &bt->bs[wake_index];
362 if (waitqueue_active(&bs->wait)) {
363 int o = atomic_read(&bt->wake_index);
364 if (wake_index != o)
365 atomic_cmpxchg(&bt->wake_index, o, wake_index);
367 return bs;
370 wake_index = bt_index_inc(wake_index);
373 return NULL;
376 static void bt_clear_tag(struct blk_mq_bitmap_tags *bt, unsigned int tag)
378 const int index = TAG_TO_INDEX(bt, tag);
379 struct bt_wait_state *bs;
380 int wait_cnt;
382 clear_bit(TAG_TO_BIT(bt, tag), &bt->map[index].word);
384 /* Ensure that the wait list checks occur after clear_bit(). */
385 smp_mb();
387 bs = bt_wake_ptr(bt);
388 if (!bs)
389 return;
391 wait_cnt = atomic_dec_return(&bs->wait_cnt);
392 if (unlikely(wait_cnt < 0))
393 wait_cnt = atomic_inc_return(&bs->wait_cnt);
394 if (wait_cnt == 0) {
395 atomic_add(bt->wake_cnt, &bs->wait_cnt);
396 bt_index_atomic_inc(&bt->wake_index);
397 wake_up(&bs->wait);
401 void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag,
402 unsigned int *last_tag)
404 struct blk_mq_tags *tags = hctx->tags;
406 if (tag >= tags->nr_reserved_tags) {
407 const int real_tag = tag - tags->nr_reserved_tags;
409 BUG_ON(real_tag >= tags->nr_tags);
410 bt_clear_tag(&tags->bitmap_tags, real_tag);
411 if (likely(tags->alloc_policy == BLK_TAG_ALLOC_FIFO))
412 *last_tag = real_tag;
413 } else {
414 BUG_ON(tag >= tags->nr_reserved_tags);
415 bt_clear_tag(&tags->breserved_tags, tag);
419 static void bt_for_each(struct blk_mq_hw_ctx *hctx,
420 struct blk_mq_bitmap_tags *bt, unsigned int off,
421 busy_iter_fn *fn, void *data, bool reserved)
423 struct request *rq;
424 int bit, i;
426 for (i = 0; i < bt->map_nr; i++) {
427 struct blk_align_bitmap *bm = &bt->map[i];
429 for (bit = find_first_bit(&bm->word, bm->depth);
430 bit < bm->depth;
431 bit = find_next_bit(&bm->word, bm->depth, bit + 1)) {
432 rq = blk_mq_tag_to_rq(hctx->tags, off + bit);
433 if (rq->q == hctx->queue)
434 fn(hctx, rq, data, reserved);
437 off += (1 << bt->bits_per_word);
441 void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn,
442 void *priv)
444 struct blk_mq_tags *tags = hctx->tags;
446 if (tags->nr_reserved_tags)
447 bt_for_each(hctx, &tags->breserved_tags, 0, fn, priv, true);
448 bt_for_each(hctx, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv,
449 false);
451 EXPORT_SYMBOL(blk_mq_tag_busy_iter);
453 static unsigned int bt_unused_tags(struct blk_mq_bitmap_tags *bt)
455 unsigned int i, used;
457 for (i = 0, used = 0; i < bt->map_nr; i++) {
458 struct blk_align_bitmap *bm = &bt->map[i];
460 used += bitmap_weight(&bm->word, bm->depth);
463 return bt->depth - used;
466 static void bt_update_count(struct blk_mq_bitmap_tags *bt,
467 unsigned int depth)
469 unsigned int tags_per_word = 1U << bt->bits_per_word;
470 unsigned int map_depth = depth;
472 if (depth) {
473 int i;
475 for (i = 0; i < bt->map_nr; i++) {
476 bt->map[i].depth = min(map_depth, tags_per_word);
477 map_depth -= bt->map[i].depth;
481 bt->wake_cnt = BT_WAIT_BATCH;
482 if (bt->wake_cnt > depth / BT_WAIT_QUEUES)
483 bt->wake_cnt = max(1U, depth / BT_WAIT_QUEUES);
485 bt->depth = depth;
488 static int bt_alloc(struct blk_mq_bitmap_tags *bt, unsigned int depth,
489 int node, bool reserved)
491 int i;
493 bt->bits_per_word = ilog2(BITS_PER_LONG);
496 * Depth can be zero for reserved tags, that's not a failure
497 * condition.
499 if (depth) {
500 unsigned int nr, tags_per_word;
502 tags_per_word = (1 << bt->bits_per_word);
505 * If the tag space is small, shrink the number of tags
506 * per word so we spread over a few cachelines, at least.
507 * If less than 4 tags, just forget about it, it's not
508 * going to work optimally anyway.
510 if (depth >= 4) {
511 while (tags_per_word * 4 > depth) {
512 bt->bits_per_word--;
513 tags_per_word = (1 << bt->bits_per_word);
517 nr = ALIGN(depth, tags_per_word) / tags_per_word;
518 bt->map = kzalloc_node(nr * sizeof(struct blk_align_bitmap),
519 GFP_KERNEL, node);
520 if (!bt->map)
521 return -ENOMEM;
523 bt->map_nr = nr;
526 bt->bs = kzalloc(BT_WAIT_QUEUES * sizeof(*bt->bs), GFP_KERNEL);
527 if (!bt->bs) {
528 kfree(bt->map);
529 bt->map = NULL;
530 return -ENOMEM;
533 bt_update_count(bt, depth);
535 for (i = 0; i < BT_WAIT_QUEUES; i++) {
536 init_waitqueue_head(&bt->bs[i].wait);
537 atomic_set(&bt->bs[i].wait_cnt, bt->wake_cnt);
540 return 0;
543 static void bt_free(struct blk_mq_bitmap_tags *bt)
545 kfree(bt->map);
546 kfree(bt->bs);
549 static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags,
550 int node, int alloc_policy)
552 unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
554 tags->alloc_policy = alloc_policy;
556 if (bt_alloc(&tags->bitmap_tags, depth, node, false))
557 goto enomem;
558 if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, node, true))
559 goto enomem;
561 return tags;
562 enomem:
563 bt_free(&tags->bitmap_tags);
564 kfree(tags);
565 return NULL;
568 struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
569 unsigned int reserved_tags,
570 int node, int alloc_policy)
572 struct blk_mq_tags *tags;
574 if (total_tags > BLK_MQ_TAG_MAX) {
575 pr_err("blk-mq: tag depth too large\n");
576 return NULL;
579 tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node);
580 if (!tags)
581 return NULL;
583 tags->nr_tags = total_tags;
584 tags->nr_reserved_tags = reserved_tags;
586 return blk_mq_init_bitmap_tags(tags, node, alloc_policy);
589 void blk_mq_free_tags(struct blk_mq_tags *tags)
591 bt_free(&tags->bitmap_tags);
592 bt_free(&tags->breserved_tags);
593 kfree(tags);
596 void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *tag)
598 unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
600 *tag = prandom_u32() % depth;
603 int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int tdepth)
605 tdepth -= tags->nr_reserved_tags;
606 if (tdepth > tags->nr_tags)
607 return -EINVAL;
610 * Don't need (or can't) update reserved tags here, they remain
611 * static and should never need resizing.
613 bt_update_count(&tags->bitmap_tags, tdepth);
614 blk_mq_tag_wakeup_all(tags, false);
615 return 0;
619 * blk_mq_unique_tag() - return a tag that is unique queue-wide
620 * @rq: request for which to compute a unique tag
622 * The tag field in struct request is unique per hardware queue but not over
623 * all hardware queues. Hence this function that returns a tag with the
624 * hardware context index in the upper bits and the per hardware queue tag in
625 * the lower bits.
627 * Note: When called for a request that is queued on a non-multiqueue request
628 * queue, the hardware context index is set to zero.
630 u32 blk_mq_unique_tag(struct request *rq)
632 struct request_queue *q = rq->q;
633 struct blk_mq_hw_ctx *hctx;
634 int hwq = 0;
636 if (q->mq_ops) {
637 hctx = q->mq_ops->map_queue(q, rq->mq_ctx->cpu);
638 hwq = hctx->queue_num;
641 return (hwq << BLK_MQ_UNIQUE_TAG_BITS) |
642 (rq->tag & BLK_MQ_UNIQUE_TAG_MASK);
644 EXPORT_SYMBOL(blk_mq_unique_tag);
646 ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page)
648 char *orig_page = page;
649 unsigned int free, res;
651 if (!tags)
652 return 0;
654 page += sprintf(page, "nr_tags=%u, reserved_tags=%u, "
655 "bits_per_word=%u\n",
656 tags->nr_tags, tags->nr_reserved_tags,
657 tags->bitmap_tags.bits_per_word);
659 free = bt_unused_tags(&tags->bitmap_tags);
660 res = bt_unused_tags(&tags->breserved_tags);
662 page += sprintf(page, "nr_free=%u, nr_reserved=%u\n", free, res);
663 page += sprintf(page, "active_queues=%u\n", atomic_read(&tags->active_queues));
665 return page - orig_page;