serial: tegra: drop bogus NULL tty-port checks
[linux/fpc-iii.git] / block / blk-mq-tag.c
blobae722f8b13fb2c49f500445badc3877d01f3f00b
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Tag allocation using scalable bitmaps. Uses active queue tracking to support
4 * fairer distribution of tags between multiple submitters when a shared tag map
5 * is used.
7 * Copyright (C) 2013-2014 Jens Axboe
8 */
9 #include <linux/kernel.h>
10 #include <linux/module.h>
12 #include <linux/blk-mq.h>
13 #include <linux/delay.h>
14 #include "blk.h"
15 #include "blk-mq.h"
16 #include "blk-mq-tag.h"
19 * If a previously inactive queue goes active, bump the active user count.
20 * We need to do this before try to allocate driver tag, then even if fail
21 * to get tag when first time, the other shared-tag users could reserve
22 * budget for it.
24 bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
26 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
27 !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
28 atomic_inc(&hctx->tags->active_queues);
30 return true;
34 * Wakeup all potentially sleeping on tags
36 void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
38 sbitmap_queue_wake_all(&tags->bitmap_tags);
39 if (include_reserve)
40 sbitmap_queue_wake_all(&tags->breserved_tags);
44 * If a previously busy queue goes inactive, potential waiters could now
45 * be allowed to queue. Wake them up and check.
47 void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
49 struct blk_mq_tags *tags = hctx->tags;
51 if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
52 return;
54 atomic_dec(&tags->active_queues);
56 blk_mq_tag_wakeup_all(tags, false);
60 * For shared tag users, we track the number of currently active users
61 * and attempt to provide a fair share of the tag depth for each of them.
63 static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
64 struct sbitmap_queue *bt)
66 unsigned int depth, users;
68 if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED))
69 return true;
70 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
71 return true;
74 * Don't try dividing an ant
76 if (bt->sb.depth == 1)
77 return true;
79 users = atomic_read(&hctx->tags->active_queues);
80 if (!users)
81 return true;
84 * Allow at least some tags
86 depth = max((bt->sb.depth + users - 1) / users, 4U);
87 return atomic_read(&hctx->nr_active) < depth;
90 static int __blk_mq_get_tag(struct blk_mq_alloc_data *data,
91 struct sbitmap_queue *bt)
93 if (!(data->flags & BLK_MQ_REQ_INTERNAL) &&
94 !hctx_may_queue(data->hctx, bt))
95 return BLK_MQ_NO_TAG;
96 if (data->shallow_depth)
97 return __sbitmap_queue_get_shallow(bt, data->shallow_depth);
98 else
99 return __sbitmap_queue_get(bt);
102 unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
104 struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
105 struct sbitmap_queue *bt;
106 struct sbq_wait_state *ws;
107 DEFINE_SBQ_WAIT(wait);
108 unsigned int tag_offset;
109 int tag;
111 if (data->flags & BLK_MQ_REQ_RESERVED) {
112 if (unlikely(!tags->nr_reserved_tags)) {
113 WARN_ON_ONCE(1);
114 return BLK_MQ_NO_TAG;
116 bt = &tags->breserved_tags;
117 tag_offset = 0;
118 } else {
119 bt = &tags->bitmap_tags;
120 tag_offset = tags->nr_reserved_tags;
123 tag = __blk_mq_get_tag(data, bt);
124 if (tag != BLK_MQ_NO_TAG)
125 goto found_tag;
127 if (data->flags & BLK_MQ_REQ_NOWAIT)
128 return BLK_MQ_NO_TAG;
130 ws = bt_wait_ptr(bt, data->hctx);
131 do {
132 struct sbitmap_queue *bt_prev;
135 * We're out of tags on this hardware queue, kick any
136 * pending IO submits before going to sleep waiting for
137 * some to complete.
139 blk_mq_run_hw_queue(data->hctx, false);
142 * Retry tag allocation after running the hardware queue,
143 * as running the queue may also have found completions.
145 tag = __blk_mq_get_tag(data, bt);
146 if (tag != BLK_MQ_NO_TAG)
147 break;
149 sbitmap_prepare_to_wait(bt, ws, &wait, TASK_UNINTERRUPTIBLE);
151 tag = __blk_mq_get_tag(data, bt);
152 if (tag != BLK_MQ_NO_TAG)
153 break;
155 bt_prev = bt;
156 io_schedule();
158 sbitmap_finish_wait(bt, ws, &wait);
160 data->ctx = blk_mq_get_ctx(data->q);
161 data->hctx = blk_mq_map_queue(data->q, data->cmd_flags,
162 data->ctx);
163 tags = blk_mq_tags_from_data(data);
164 if (data->flags & BLK_MQ_REQ_RESERVED)
165 bt = &tags->breserved_tags;
166 else
167 bt = &tags->bitmap_tags;
170 * If destination hw queue is changed, fake wake up on
171 * previous queue for compensating the wake up miss, so
172 * other allocations on previous queue won't be starved.
174 if (bt != bt_prev)
175 sbitmap_queue_wake_up(bt_prev);
177 ws = bt_wait_ptr(bt, data->hctx);
178 } while (1);
180 sbitmap_finish_wait(bt, ws, &wait);
182 found_tag:
184 * Give up this allocation if the hctx is inactive. The caller will
185 * retry on an active hctx.
187 if (unlikely(test_bit(BLK_MQ_S_INACTIVE, &data->hctx->state))) {
188 blk_mq_put_tag(tags, data->ctx, tag + tag_offset);
189 return BLK_MQ_NO_TAG;
191 return tag + tag_offset;
194 bool __blk_mq_get_driver_tag(struct request *rq)
196 struct sbitmap_queue *bt = &rq->mq_hctx->tags->bitmap_tags;
197 unsigned int tag_offset = rq->mq_hctx->tags->nr_reserved_tags;
198 bool shared = blk_mq_tag_busy(rq->mq_hctx);
199 int tag;
201 if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag)) {
202 bt = &rq->mq_hctx->tags->breserved_tags;
203 tag_offset = 0;
206 if (!hctx_may_queue(rq->mq_hctx, bt))
207 return false;
208 tag = __sbitmap_queue_get(bt);
209 if (tag == BLK_MQ_NO_TAG)
210 return false;
212 rq->tag = tag + tag_offset;
213 if (shared) {
214 rq->rq_flags |= RQF_MQ_INFLIGHT;
215 atomic_inc(&rq->mq_hctx->nr_active);
217 rq->mq_hctx->tags->rqs[rq->tag] = rq;
218 return true;
221 void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
222 unsigned int tag)
224 if (!blk_mq_tag_is_reserved(tags, tag)) {
225 const int real_tag = tag - tags->nr_reserved_tags;
227 BUG_ON(real_tag >= tags->nr_tags);
228 sbitmap_queue_clear(&tags->bitmap_tags, real_tag, ctx->cpu);
229 } else {
230 BUG_ON(tag >= tags->nr_reserved_tags);
231 sbitmap_queue_clear(&tags->breserved_tags, tag, ctx->cpu);
235 struct bt_iter_data {
236 struct blk_mq_hw_ctx *hctx;
237 busy_iter_fn *fn;
238 void *data;
239 bool reserved;
242 static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
244 struct bt_iter_data *iter_data = data;
245 struct blk_mq_hw_ctx *hctx = iter_data->hctx;
246 struct blk_mq_tags *tags = hctx->tags;
247 bool reserved = iter_data->reserved;
248 struct request *rq;
250 if (!reserved)
251 bitnr += tags->nr_reserved_tags;
252 rq = tags->rqs[bitnr];
255 * We can hit rq == NULL here, because the tagging functions
256 * test and set the bit before assigning ->rqs[].
258 if (rq && rq->q == hctx->queue)
259 return iter_data->fn(hctx, rq, iter_data->data, reserved);
260 return true;
264 * bt_for_each - iterate over the requests associated with a hardware queue
265 * @hctx: Hardware queue to examine.
266 * @bt: sbitmap to examine. This is either the breserved_tags member
267 * or the bitmap_tags member of struct blk_mq_tags.
268 * @fn: Pointer to the function that will be called for each request
269 * associated with @hctx that has been assigned a driver tag.
270 * @fn will be called as follows: @fn(@hctx, rq, @data, @reserved)
271 * where rq is a pointer to a request. Return true to continue
272 * iterating tags, false to stop.
273 * @data: Will be passed as third argument to @fn.
274 * @reserved: Indicates whether @bt is the breserved_tags member or the
275 * bitmap_tags member of struct blk_mq_tags.
277 static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt,
278 busy_iter_fn *fn, void *data, bool reserved)
280 struct bt_iter_data iter_data = {
281 .hctx = hctx,
282 .fn = fn,
283 .data = data,
284 .reserved = reserved,
287 sbitmap_for_each_set(&bt->sb, bt_iter, &iter_data);
290 struct bt_tags_iter_data {
291 struct blk_mq_tags *tags;
292 busy_tag_iter_fn *fn;
293 void *data;
294 unsigned int flags;
297 #define BT_TAG_ITER_RESERVED (1 << 0)
298 #define BT_TAG_ITER_STARTED (1 << 1)
299 #define BT_TAG_ITER_STATIC_RQS (1 << 2)
301 static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
303 struct bt_tags_iter_data *iter_data = data;
304 struct blk_mq_tags *tags = iter_data->tags;
305 bool reserved = iter_data->flags & BT_TAG_ITER_RESERVED;
306 struct request *rq;
308 if (!reserved)
309 bitnr += tags->nr_reserved_tags;
312 * We can hit rq == NULL here, because the tagging functions
313 * test and set the bit before assigning ->rqs[].
315 if (iter_data->flags & BT_TAG_ITER_STATIC_RQS)
316 rq = tags->static_rqs[bitnr];
317 else
318 rq = tags->rqs[bitnr];
319 if (!rq)
320 return true;
321 if ((iter_data->flags & BT_TAG_ITER_STARTED) &&
322 !blk_mq_request_started(rq))
323 return true;
324 return iter_data->fn(rq, iter_data->data, reserved);
328 * bt_tags_for_each - iterate over the requests in a tag map
329 * @tags: Tag map to iterate over.
330 * @bt: sbitmap to examine. This is either the breserved_tags member
331 * or the bitmap_tags member of struct blk_mq_tags.
332 * @fn: Pointer to the function that will be called for each started
333 * request. @fn will be called as follows: @fn(rq, @data,
334 * @reserved) where rq is a pointer to a request. Return true
335 * to continue iterating tags, false to stop.
336 * @data: Will be passed as second argument to @fn.
337 * @flags: BT_TAG_ITER_*
339 static void bt_tags_for_each(struct blk_mq_tags *tags, struct sbitmap_queue *bt,
340 busy_tag_iter_fn *fn, void *data, unsigned int flags)
342 struct bt_tags_iter_data iter_data = {
343 .tags = tags,
344 .fn = fn,
345 .data = data,
346 .flags = flags,
349 if (tags->rqs)
350 sbitmap_for_each_set(&bt->sb, bt_tags_iter, &iter_data);
353 static void __blk_mq_all_tag_iter(struct blk_mq_tags *tags,
354 busy_tag_iter_fn *fn, void *priv, unsigned int flags)
356 WARN_ON_ONCE(flags & BT_TAG_ITER_RESERVED);
358 if (tags->nr_reserved_tags)
359 bt_tags_for_each(tags, &tags->breserved_tags, fn, priv,
360 flags | BT_TAG_ITER_RESERVED);
361 bt_tags_for_each(tags, &tags->bitmap_tags, fn, priv, flags);
365 * blk_mq_all_tag_iter - iterate over all requests in a tag map
366 * @tags: Tag map to iterate over.
367 * @fn: Pointer to the function that will be called for each
368 * request. @fn will be called as follows: @fn(rq, @priv,
369 * reserved) where rq is a pointer to a request. 'reserved'
370 * indicates whether or not @rq is a reserved request. Return
371 * true to continue iterating tags, false to stop.
372 * @priv: Will be passed as second argument to @fn.
374 * Caller has to pass the tag map from which requests are allocated.
376 void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
377 void *priv)
379 __blk_mq_all_tag_iter(tags, fn, priv, BT_TAG_ITER_STATIC_RQS);
383 * blk_mq_tagset_busy_iter - iterate over all started requests in a tag set
384 * @tagset: Tag set to iterate over.
385 * @fn: Pointer to the function that will be called for each started
386 * request. @fn will be called as follows: @fn(rq, @priv,
387 * reserved) where rq is a pointer to a request. 'reserved'
388 * indicates whether or not @rq is a reserved request. Return
389 * true to continue iterating tags, false to stop.
390 * @priv: Will be passed as second argument to @fn.
392 void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
393 busy_tag_iter_fn *fn, void *priv)
395 int i;
397 for (i = 0; i < tagset->nr_hw_queues; i++) {
398 if (tagset->tags && tagset->tags[i])
399 __blk_mq_all_tag_iter(tagset->tags[i], fn, priv,
400 BT_TAG_ITER_STARTED);
403 EXPORT_SYMBOL(blk_mq_tagset_busy_iter);
405 static bool blk_mq_tagset_count_completed_rqs(struct request *rq,
406 void *data, bool reserved)
408 unsigned *count = data;
410 if (blk_mq_request_completed(rq))
411 (*count)++;
412 return true;
416 * blk_mq_tagset_wait_completed_request - wait until all completed req's
417 * complete funtion is run
418 * @tagset: Tag set to drain completed request
420 * Note: This function has to be run after all IO queues are shutdown
422 void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset)
424 while (true) {
425 unsigned count = 0;
427 blk_mq_tagset_busy_iter(tagset,
428 blk_mq_tagset_count_completed_rqs, &count);
429 if (!count)
430 break;
431 msleep(5);
434 EXPORT_SYMBOL(blk_mq_tagset_wait_completed_request);
437 * blk_mq_queue_tag_busy_iter - iterate over all requests with a driver tag
438 * @q: Request queue to examine.
439 * @fn: Pointer to the function that will be called for each request
440 * on @q. @fn will be called as follows: @fn(hctx, rq, @priv,
441 * reserved) where rq is a pointer to a request and hctx points
442 * to the hardware queue associated with the request. 'reserved'
443 * indicates whether or not @rq is a reserved request.
444 * @priv: Will be passed as third argument to @fn.
446 * Note: if @q->tag_set is shared with other request queues then @fn will be
447 * called for all requests on all queues that share that tag set and not only
448 * for requests associated with @q.
450 void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
451 void *priv)
453 struct blk_mq_hw_ctx *hctx;
454 int i;
457 * __blk_mq_update_nr_hw_queues() updates nr_hw_queues and queue_hw_ctx
458 * while the queue is frozen. So we can use q_usage_counter to avoid
459 * racing with it. __blk_mq_update_nr_hw_queues() uses
460 * synchronize_rcu() to ensure this function left the critical section
461 * below.
463 if (!percpu_ref_tryget(&q->q_usage_counter))
464 return;
466 queue_for_each_hw_ctx(q, hctx, i) {
467 struct blk_mq_tags *tags = hctx->tags;
470 * If no software queues are currently mapped to this
471 * hardware queue, there's nothing to check
473 if (!blk_mq_hw_queue_mapped(hctx))
474 continue;
476 if (tags->nr_reserved_tags)
477 bt_for_each(hctx, &tags->breserved_tags, fn, priv, true);
478 bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false);
480 blk_queue_exit(q);
483 static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,
484 bool round_robin, int node)
486 return sbitmap_queue_init_node(bt, depth, -1, round_robin, GFP_KERNEL,
487 node);
490 static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags,
491 int node, int alloc_policy)
493 unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
494 bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR;
496 if (bt_alloc(&tags->bitmap_tags, depth, round_robin, node))
497 goto free_tags;
498 if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, round_robin,
499 node))
500 goto free_bitmap_tags;
502 return tags;
503 free_bitmap_tags:
504 sbitmap_queue_free(&tags->bitmap_tags);
505 free_tags:
506 kfree(tags);
507 return NULL;
510 struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
511 unsigned int reserved_tags,
512 int node, int alloc_policy)
514 struct blk_mq_tags *tags;
516 if (total_tags > BLK_MQ_TAG_MAX) {
517 pr_err("blk-mq: tag depth too large\n");
518 return NULL;
521 tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node);
522 if (!tags)
523 return NULL;
525 tags->nr_tags = total_tags;
526 tags->nr_reserved_tags = reserved_tags;
528 return blk_mq_init_bitmap_tags(tags, node, alloc_policy);
531 void blk_mq_free_tags(struct blk_mq_tags *tags)
533 sbitmap_queue_free(&tags->bitmap_tags);
534 sbitmap_queue_free(&tags->breserved_tags);
535 kfree(tags);
538 int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
539 struct blk_mq_tags **tagsptr, unsigned int tdepth,
540 bool can_grow)
542 struct blk_mq_tags *tags = *tagsptr;
544 if (tdepth <= tags->nr_reserved_tags)
545 return -EINVAL;
548 * If we are allowed to grow beyond the original size, allocate
549 * a new set of tags before freeing the old one.
551 if (tdepth > tags->nr_tags) {
552 struct blk_mq_tag_set *set = hctx->queue->tag_set;
553 struct blk_mq_tags *new;
554 bool ret;
556 if (!can_grow)
557 return -EINVAL;
560 * We need some sort of upper limit, set it high enough that
561 * no valid use cases should require more.
563 if (tdepth > 16 * BLKDEV_MAX_RQ)
564 return -EINVAL;
566 new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth,
567 tags->nr_reserved_tags);
568 if (!new)
569 return -ENOMEM;
570 ret = blk_mq_alloc_rqs(set, new, hctx->queue_num, tdepth);
571 if (ret) {
572 blk_mq_free_rq_map(new);
573 return -ENOMEM;
576 blk_mq_free_rqs(set, *tagsptr, hctx->queue_num);
577 blk_mq_free_rq_map(*tagsptr);
578 *tagsptr = new;
579 } else {
581 * Don't need (or can't) update reserved tags here, they
582 * remain static and should never need resizing.
584 sbitmap_queue_resize(&tags->bitmap_tags,
585 tdepth - tags->nr_reserved_tags);
588 return 0;
592 * blk_mq_unique_tag() - return a tag that is unique queue-wide
593 * @rq: request for which to compute a unique tag
595 * The tag field in struct request is unique per hardware queue but not over
596 * all hardware queues. Hence this function that returns a tag with the
597 * hardware context index in the upper bits and the per hardware queue tag in
598 * the lower bits.
600 * Note: When called for a request that is queued on a non-multiqueue request
601 * queue, the hardware context index is set to zero.
603 u32 blk_mq_unique_tag(struct request *rq)
605 return (rq->mq_hctx->queue_num << BLK_MQ_UNIQUE_TAG_BITS) |
606 (rq->tag & BLK_MQ_UNIQUE_TAG_MASK);
608 EXPORT_SYMBOL(blk_mq_unique_tag);