ALSA: x86: Allow no-period-wakeup setup
[linux/fpc-iii.git] / block / blk-mq-tag.c
blobdcf5ce3ba4bff3068486d8596c5941e9f382d228
1 /*
2 * Tag allocation using scalable bitmaps. Uses active queue tracking to support
3 * fairer distribution of tags between multiple submitters when a shared tag map
4 * is used.
6 * Copyright (C) 2013-2014 Jens Axboe
7 */
8 #include <linux/kernel.h>
9 #include <linux/module.h>
11 #include <linux/blk-mq.h>
12 #include "blk.h"
13 #include "blk-mq.h"
14 #include "blk-mq-tag.h"
16 bool blk_mq_has_free_tags(struct blk_mq_tags *tags)
18 if (!tags)
19 return true;
21 return sbitmap_any_bit_clear(&tags->bitmap_tags.sb);
25 * If a previously inactive queue goes active, bump the active user count.
27 bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
29 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
30 !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
31 atomic_inc(&hctx->tags->active_queues);
33 return true;
37 * Wakeup all potentially sleeping on tags
39 void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
41 sbitmap_queue_wake_all(&tags->bitmap_tags);
42 if (include_reserve)
43 sbitmap_queue_wake_all(&tags->breserved_tags);
47 * If a previously busy queue goes inactive, potential waiters could now
48 * be allowed to queue. Wake them up and check.
50 void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
52 struct blk_mq_tags *tags = hctx->tags;
54 if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
55 return;
57 atomic_dec(&tags->active_queues);
59 blk_mq_tag_wakeup_all(tags, false);
63 * For shared tag users, we track the number of currently active users
64 * and attempt to provide a fair share of the tag depth for each of them.
66 static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
67 struct sbitmap_queue *bt)
69 unsigned int depth, users;
71 if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED))
72 return true;
73 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
74 return true;
77 * Don't try dividing an ant
79 if (bt->sb.depth == 1)
80 return true;
82 users = atomic_read(&hctx->tags->active_queues);
83 if (!users)
84 return true;
87 * Allow at least some tags
89 depth = max((bt->sb.depth + users - 1) / users, 4U);
90 return atomic_read(&hctx->nr_active) < depth;
93 static int __bt_get(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt)
95 if (!hctx_may_queue(hctx, bt))
96 return -1;
97 return __sbitmap_queue_get(bt);
100 static int bt_get(struct blk_mq_alloc_data *data, struct sbitmap_queue *bt,
101 struct blk_mq_hw_ctx *hctx, struct blk_mq_tags *tags)
103 struct sbq_wait_state *ws;
104 DEFINE_WAIT(wait);
105 int tag;
107 tag = __bt_get(hctx, bt);
108 if (tag != -1)
109 return tag;
111 if (data->flags & BLK_MQ_REQ_NOWAIT)
112 return -1;
114 ws = bt_wait_ptr(bt, hctx);
115 do {
116 prepare_to_wait(&ws->wait, &wait, TASK_UNINTERRUPTIBLE);
118 tag = __bt_get(hctx, bt);
119 if (tag != -1)
120 break;
123 * We're out of tags on this hardware queue, kick any
124 * pending IO submits before going to sleep waiting for
125 * some to complete. Note that hctx can be NULL here for
126 * reserved tag allocation.
128 if (hctx)
129 blk_mq_run_hw_queue(hctx, false);
132 * Retry tag allocation after running the hardware queue,
133 * as running the queue may also have found completions.
135 tag = __bt_get(hctx, bt);
136 if (tag != -1)
137 break;
139 blk_mq_put_ctx(data->ctx);
141 io_schedule();
143 data->ctx = blk_mq_get_ctx(data->q);
144 data->hctx = blk_mq_map_queue(data->q, data->ctx->cpu);
145 if (data->flags & BLK_MQ_REQ_RESERVED) {
146 bt = &data->hctx->tags->breserved_tags;
147 } else {
148 hctx = data->hctx;
149 bt = &hctx->tags->bitmap_tags;
151 finish_wait(&ws->wait, &wait);
152 ws = bt_wait_ptr(bt, hctx);
153 } while (1);
155 finish_wait(&ws->wait, &wait);
156 return tag;
159 static unsigned int __blk_mq_get_tag(struct blk_mq_alloc_data *data)
161 int tag;
163 tag = bt_get(data, &data->hctx->tags->bitmap_tags, data->hctx,
164 data->hctx->tags);
165 if (tag >= 0)
166 return tag + data->hctx->tags->nr_reserved_tags;
168 return BLK_MQ_TAG_FAIL;
171 static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_alloc_data *data)
173 int tag;
175 if (unlikely(!data->hctx->tags->nr_reserved_tags)) {
176 WARN_ON_ONCE(1);
177 return BLK_MQ_TAG_FAIL;
180 tag = bt_get(data, &data->hctx->tags->breserved_tags, NULL,
181 data->hctx->tags);
182 if (tag < 0)
183 return BLK_MQ_TAG_FAIL;
185 return tag;
188 unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
190 if (data->flags & BLK_MQ_REQ_RESERVED)
191 return __blk_mq_get_reserved_tag(data);
192 return __blk_mq_get_tag(data);
195 void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
196 unsigned int tag)
198 struct blk_mq_tags *tags = hctx->tags;
200 if (tag >= tags->nr_reserved_tags) {
201 const int real_tag = tag - tags->nr_reserved_tags;
203 BUG_ON(real_tag >= tags->nr_tags);
204 sbitmap_queue_clear(&tags->bitmap_tags, real_tag, ctx->cpu);
205 } else {
206 BUG_ON(tag >= tags->nr_reserved_tags);
207 sbitmap_queue_clear(&tags->breserved_tags, tag, ctx->cpu);
211 struct bt_iter_data {
212 struct blk_mq_hw_ctx *hctx;
213 busy_iter_fn *fn;
214 void *data;
215 bool reserved;
218 static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
220 struct bt_iter_data *iter_data = data;
221 struct blk_mq_hw_ctx *hctx = iter_data->hctx;
222 struct blk_mq_tags *tags = hctx->tags;
223 bool reserved = iter_data->reserved;
224 struct request *rq;
226 if (!reserved)
227 bitnr += tags->nr_reserved_tags;
228 rq = tags->rqs[bitnr];
230 if (rq->q == hctx->queue)
231 iter_data->fn(hctx, rq, iter_data->data, reserved);
232 return true;
235 static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt,
236 busy_iter_fn *fn, void *data, bool reserved)
238 struct bt_iter_data iter_data = {
239 .hctx = hctx,
240 .fn = fn,
241 .data = data,
242 .reserved = reserved,
245 sbitmap_for_each_set(&bt->sb, bt_iter, &iter_data);
248 struct bt_tags_iter_data {
249 struct blk_mq_tags *tags;
250 busy_tag_iter_fn *fn;
251 void *data;
252 bool reserved;
255 static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
257 struct bt_tags_iter_data *iter_data = data;
258 struct blk_mq_tags *tags = iter_data->tags;
259 bool reserved = iter_data->reserved;
260 struct request *rq;
262 if (!reserved)
263 bitnr += tags->nr_reserved_tags;
264 rq = tags->rqs[bitnr];
266 iter_data->fn(rq, iter_data->data, reserved);
267 return true;
270 static void bt_tags_for_each(struct blk_mq_tags *tags, struct sbitmap_queue *bt,
271 busy_tag_iter_fn *fn, void *data, bool reserved)
273 struct bt_tags_iter_data iter_data = {
274 .tags = tags,
275 .fn = fn,
276 .data = data,
277 .reserved = reserved,
280 if (tags->rqs)
281 sbitmap_for_each_set(&bt->sb, bt_tags_iter, &iter_data);
284 static void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags,
285 busy_tag_iter_fn *fn, void *priv)
287 if (tags->nr_reserved_tags)
288 bt_tags_for_each(tags, &tags->breserved_tags, fn, priv, true);
289 bt_tags_for_each(tags, &tags->bitmap_tags, fn, priv, false);
292 void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
293 busy_tag_iter_fn *fn, void *priv)
295 int i;
297 for (i = 0; i < tagset->nr_hw_queues; i++) {
298 if (tagset->tags && tagset->tags[i])
299 blk_mq_all_tag_busy_iter(tagset->tags[i], fn, priv);
302 EXPORT_SYMBOL(blk_mq_tagset_busy_iter);
304 int blk_mq_reinit_tagset(struct blk_mq_tag_set *set)
306 int i, j, ret = 0;
308 if (!set->ops->reinit_request)
309 goto out;
311 for (i = 0; i < set->nr_hw_queues; i++) {
312 struct blk_mq_tags *tags = set->tags[i];
314 for (j = 0; j < tags->nr_tags; j++) {
315 if (!tags->rqs[j])
316 continue;
318 ret = set->ops->reinit_request(set->driver_data,
319 tags->rqs[j]);
320 if (ret)
321 goto out;
325 out:
326 return ret;
328 EXPORT_SYMBOL_GPL(blk_mq_reinit_tagset);
330 void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
331 void *priv)
333 struct blk_mq_hw_ctx *hctx;
334 int i;
337 queue_for_each_hw_ctx(q, hctx, i) {
338 struct blk_mq_tags *tags = hctx->tags;
341 * If not software queues are currently mapped to this
342 * hardware queue, there's nothing to check
344 if (!blk_mq_hw_queue_mapped(hctx))
345 continue;
347 if (tags->nr_reserved_tags)
348 bt_for_each(hctx, &tags->breserved_tags, fn, priv, true);
349 bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false);
354 static unsigned int bt_unused_tags(const struct sbitmap_queue *bt)
356 return bt->sb.depth - sbitmap_weight(&bt->sb);
359 static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,
360 bool round_robin, int node)
362 return sbitmap_queue_init_node(bt, depth, -1, round_robin, GFP_KERNEL,
363 node);
366 static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags,
367 int node, int alloc_policy)
369 unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
370 bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR;
372 if (bt_alloc(&tags->bitmap_tags, depth, round_robin, node))
373 goto free_tags;
374 if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, round_robin,
375 node))
376 goto free_bitmap_tags;
378 return tags;
379 free_bitmap_tags:
380 sbitmap_queue_free(&tags->bitmap_tags);
381 free_tags:
382 kfree(tags);
383 return NULL;
386 struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
387 unsigned int reserved_tags,
388 int node, int alloc_policy)
390 struct blk_mq_tags *tags;
392 if (total_tags > BLK_MQ_TAG_MAX) {
393 pr_err("blk-mq: tag depth too large\n");
394 return NULL;
397 tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node);
398 if (!tags)
399 return NULL;
401 tags->nr_tags = total_tags;
402 tags->nr_reserved_tags = reserved_tags;
404 return blk_mq_init_bitmap_tags(tags, node, alloc_policy);
407 void blk_mq_free_tags(struct blk_mq_tags *tags)
409 sbitmap_queue_free(&tags->bitmap_tags);
410 sbitmap_queue_free(&tags->breserved_tags);
411 kfree(tags);
414 int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int tdepth)
416 tdepth -= tags->nr_reserved_tags;
417 if (tdepth > tags->nr_tags)
418 return -EINVAL;
421 * Don't need (or can't) update reserved tags here, they remain
422 * static and should never need resizing.
424 sbitmap_queue_resize(&tags->bitmap_tags, tdepth);
426 blk_mq_tag_wakeup_all(tags, false);
427 return 0;
431 * blk_mq_unique_tag() - return a tag that is unique queue-wide
432 * @rq: request for which to compute a unique tag
434 * The tag field in struct request is unique per hardware queue but not over
435 * all hardware queues. Hence this function that returns a tag with the
436 * hardware context index in the upper bits and the per hardware queue tag in
437 * the lower bits.
439 * Note: When called for a request that is queued on a non-multiqueue request
440 * queue, the hardware context index is set to zero.
442 u32 blk_mq_unique_tag(struct request *rq)
444 struct request_queue *q = rq->q;
445 struct blk_mq_hw_ctx *hctx;
446 int hwq = 0;
448 if (q->mq_ops) {
449 hctx = blk_mq_map_queue(q, rq->mq_ctx->cpu);
450 hwq = hctx->queue_num;
453 return (hwq << BLK_MQ_UNIQUE_TAG_BITS) |
454 (rq->tag & BLK_MQ_UNIQUE_TAG_MASK);
456 EXPORT_SYMBOL(blk_mq_unique_tag);
458 ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page)
460 char *orig_page = page;
461 unsigned int free, res;
463 if (!tags)
464 return 0;
466 page += sprintf(page, "nr_tags=%u, reserved_tags=%u, "
467 "bits_per_word=%u\n",
468 tags->nr_tags, tags->nr_reserved_tags,
469 1U << tags->bitmap_tags.sb.shift);
471 free = bt_unused_tags(&tags->bitmap_tags);
472 res = bt_unused_tags(&tags->breserved_tags);
474 page += sprintf(page, "nr_free=%u, nr_reserved=%u\n", free, res);
475 page += sprintf(page, "active_queues=%u\n", atomic_read(&tags->active_queues));
477 return page - orig_page;