USB: cypress_cy7c63: convert to use dev_groups
[linux/fpc-iii.git] / block / blk-mq-tag.c
blobda19f0bc8876d21d9f0263ed79f8f15720935739
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Tag allocation using scalable bitmaps. Uses active queue tracking to support
4 * fairer distribution of tags between multiple submitters when a shared tag map
5 * is used.
7 * Copyright (C) 2013-2014 Jens Axboe
8 */
9 #include <linux/kernel.h>
10 #include <linux/module.h>
12 #include <linux/blk-mq.h>
13 #include "blk.h"
14 #include "blk-mq.h"
15 #include "blk-mq-tag.h"
17 bool blk_mq_has_free_tags(struct blk_mq_tags *tags)
19 if (!tags)
20 return true;
22 return sbitmap_any_bit_clear(&tags->bitmap_tags.sb);
26 * If a previously inactive queue goes active, bump the active user count.
27 * We need to do this before try to allocate driver tag, then even if fail
28 * to get tag when first time, the other shared-tag users could reserve
29 * budget for it.
31 bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
33 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
34 !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
35 atomic_inc(&hctx->tags->active_queues);
37 return true;
41 * Wakeup all potentially sleeping on tags
43 void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
45 sbitmap_queue_wake_all(&tags->bitmap_tags);
46 if (include_reserve)
47 sbitmap_queue_wake_all(&tags->breserved_tags);
51 * If a previously busy queue goes inactive, potential waiters could now
52 * be allowed to queue. Wake them up and check.
54 void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
56 struct blk_mq_tags *tags = hctx->tags;
58 if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
59 return;
61 atomic_dec(&tags->active_queues);
63 blk_mq_tag_wakeup_all(tags, false);
67 * For shared tag users, we track the number of currently active users
68 * and attempt to provide a fair share of the tag depth for each of them.
70 static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
71 struct sbitmap_queue *bt)
73 unsigned int depth, users;
75 if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED))
76 return true;
77 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
78 return true;
81 * Don't try dividing an ant
83 if (bt->sb.depth == 1)
84 return true;
86 users = atomic_read(&hctx->tags->active_queues);
87 if (!users)
88 return true;
91 * Allow at least some tags
93 depth = max((bt->sb.depth + users - 1) / users, 4U);
94 return atomic_read(&hctx->nr_active) < depth;
97 static int __blk_mq_get_tag(struct blk_mq_alloc_data *data,
98 struct sbitmap_queue *bt)
100 if (!(data->flags & BLK_MQ_REQ_INTERNAL) &&
101 !hctx_may_queue(data->hctx, bt))
102 return -1;
103 if (data->shallow_depth)
104 return __sbitmap_queue_get_shallow(bt, data->shallow_depth);
105 else
106 return __sbitmap_queue_get(bt);
109 unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
111 struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
112 struct sbitmap_queue *bt;
113 struct sbq_wait_state *ws;
114 DEFINE_SBQ_WAIT(wait);
115 unsigned int tag_offset;
116 int tag;
118 if (data->flags & BLK_MQ_REQ_RESERVED) {
119 if (unlikely(!tags->nr_reserved_tags)) {
120 WARN_ON_ONCE(1);
121 return BLK_MQ_TAG_FAIL;
123 bt = &tags->breserved_tags;
124 tag_offset = 0;
125 } else {
126 bt = &tags->bitmap_tags;
127 tag_offset = tags->nr_reserved_tags;
130 tag = __blk_mq_get_tag(data, bt);
131 if (tag != -1)
132 goto found_tag;
134 if (data->flags & BLK_MQ_REQ_NOWAIT)
135 return BLK_MQ_TAG_FAIL;
137 ws = bt_wait_ptr(bt, data->hctx);
138 do {
139 struct sbitmap_queue *bt_prev;
142 * We're out of tags on this hardware queue, kick any
143 * pending IO submits before going to sleep waiting for
144 * some to complete.
146 blk_mq_run_hw_queue(data->hctx, false);
149 * Retry tag allocation after running the hardware queue,
150 * as running the queue may also have found completions.
152 tag = __blk_mq_get_tag(data, bt);
153 if (tag != -1)
154 break;
156 sbitmap_prepare_to_wait(bt, ws, &wait, TASK_UNINTERRUPTIBLE);
158 tag = __blk_mq_get_tag(data, bt);
159 if (tag != -1)
160 break;
162 bt_prev = bt;
163 io_schedule();
165 sbitmap_finish_wait(bt, ws, &wait);
167 data->ctx = blk_mq_get_ctx(data->q);
168 data->hctx = blk_mq_map_queue(data->q, data->cmd_flags,
169 data->ctx);
170 tags = blk_mq_tags_from_data(data);
171 if (data->flags & BLK_MQ_REQ_RESERVED)
172 bt = &tags->breserved_tags;
173 else
174 bt = &tags->bitmap_tags;
177 * If destination hw queue is changed, fake wake up on
178 * previous queue for compensating the wake up miss, so
179 * other allocations on previous queue won't be starved.
181 if (bt != bt_prev)
182 sbitmap_queue_wake_up(bt_prev);
184 ws = bt_wait_ptr(bt, data->hctx);
185 } while (1);
187 sbitmap_finish_wait(bt, ws, &wait);
189 found_tag:
190 return tag + tag_offset;
193 void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_tags *tags,
194 struct blk_mq_ctx *ctx, unsigned int tag)
196 if (!blk_mq_tag_is_reserved(tags, tag)) {
197 const int real_tag = tag - tags->nr_reserved_tags;
199 BUG_ON(real_tag >= tags->nr_tags);
200 sbitmap_queue_clear(&tags->bitmap_tags, real_tag, ctx->cpu);
201 } else {
202 BUG_ON(tag >= tags->nr_reserved_tags);
203 sbitmap_queue_clear(&tags->breserved_tags, tag, ctx->cpu);
207 struct bt_iter_data {
208 struct blk_mq_hw_ctx *hctx;
209 busy_iter_fn *fn;
210 void *data;
211 bool reserved;
214 static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
216 struct bt_iter_data *iter_data = data;
217 struct blk_mq_hw_ctx *hctx = iter_data->hctx;
218 struct blk_mq_tags *tags = hctx->tags;
219 bool reserved = iter_data->reserved;
220 struct request *rq;
222 if (!reserved)
223 bitnr += tags->nr_reserved_tags;
224 rq = tags->rqs[bitnr];
227 * We can hit rq == NULL here, because the tagging functions
228 * test and set the bit before assigning ->rqs[].
230 if (rq && rq->q == hctx->queue)
231 return iter_data->fn(hctx, rq, iter_data->data, reserved);
232 return true;
236 * bt_for_each - iterate over the requests associated with a hardware queue
237 * @hctx: Hardware queue to examine.
238 * @bt: sbitmap to examine. This is either the breserved_tags member
239 * or the bitmap_tags member of struct blk_mq_tags.
240 * @fn: Pointer to the function that will be called for each request
241 * associated with @hctx that has been assigned a driver tag.
242 * @fn will be called as follows: @fn(@hctx, rq, @data, @reserved)
243 * where rq is a pointer to a request. Return true to continue
244 * iterating tags, false to stop.
245 * @data: Will be passed as third argument to @fn.
246 * @reserved: Indicates whether @bt is the breserved_tags member or the
247 * bitmap_tags member of struct blk_mq_tags.
249 static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt,
250 busy_iter_fn *fn, void *data, bool reserved)
252 struct bt_iter_data iter_data = {
253 .hctx = hctx,
254 .fn = fn,
255 .data = data,
256 .reserved = reserved,
259 sbitmap_for_each_set(&bt->sb, bt_iter, &iter_data);
262 struct bt_tags_iter_data {
263 struct blk_mq_tags *tags;
264 busy_tag_iter_fn *fn;
265 void *data;
266 bool reserved;
269 static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
271 struct bt_tags_iter_data *iter_data = data;
272 struct blk_mq_tags *tags = iter_data->tags;
273 bool reserved = iter_data->reserved;
274 struct request *rq;
276 if (!reserved)
277 bitnr += tags->nr_reserved_tags;
280 * We can hit rq == NULL here, because the tagging functions
281 * test and set the bit before assining ->rqs[].
283 rq = tags->rqs[bitnr];
284 if (rq && blk_mq_request_started(rq))
285 return iter_data->fn(rq, iter_data->data, reserved);
287 return true;
291 * bt_tags_for_each - iterate over the requests in a tag map
292 * @tags: Tag map to iterate over.
293 * @bt: sbitmap to examine. This is either the breserved_tags member
294 * or the bitmap_tags member of struct blk_mq_tags.
295 * @fn: Pointer to the function that will be called for each started
296 * request. @fn will be called as follows: @fn(rq, @data,
297 * @reserved) where rq is a pointer to a request. Return true
298 * to continue iterating tags, false to stop.
299 * @data: Will be passed as second argument to @fn.
300 * @reserved: Indicates whether @bt is the breserved_tags member or the
301 * bitmap_tags member of struct blk_mq_tags.
303 static void bt_tags_for_each(struct blk_mq_tags *tags, struct sbitmap_queue *bt,
304 busy_tag_iter_fn *fn, void *data, bool reserved)
306 struct bt_tags_iter_data iter_data = {
307 .tags = tags,
308 .fn = fn,
309 .data = data,
310 .reserved = reserved,
313 if (tags->rqs)
314 sbitmap_for_each_set(&bt->sb, bt_tags_iter, &iter_data);
318 * blk_mq_all_tag_busy_iter - iterate over all started requests in a tag map
319 * @tags: Tag map to iterate over.
320 * @fn: Pointer to the function that will be called for each started
321 * request. @fn will be called as follows: @fn(rq, @priv,
322 * reserved) where rq is a pointer to a request. 'reserved'
323 * indicates whether or not @rq is a reserved request. Return
324 * true to continue iterating tags, false to stop.
325 * @priv: Will be passed as second argument to @fn.
327 static void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags,
328 busy_tag_iter_fn *fn, void *priv)
330 if (tags->nr_reserved_tags)
331 bt_tags_for_each(tags, &tags->breserved_tags, fn, priv, true);
332 bt_tags_for_each(tags, &tags->bitmap_tags, fn, priv, false);
336 * blk_mq_tagset_busy_iter - iterate over all started requests in a tag set
337 * @tagset: Tag set to iterate over.
338 * @fn: Pointer to the function that will be called for each started
339 * request. @fn will be called as follows: @fn(rq, @priv,
340 * reserved) where rq is a pointer to a request. 'reserved'
341 * indicates whether or not @rq is a reserved request. Return
342 * true to continue iterating tags, false to stop.
343 * @priv: Will be passed as second argument to @fn.
345 void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
346 busy_tag_iter_fn *fn, void *priv)
348 int i;
350 for (i = 0; i < tagset->nr_hw_queues; i++) {
351 if (tagset->tags && tagset->tags[i])
352 blk_mq_all_tag_busy_iter(tagset->tags[i], fn, priv);
355 EXPORT_SYMBOL(blk_mq_tagset_busy_iter);
358 * blk_mq_queue_tag_busy_iter - iterate over all requests with a driver tag
359 * @q: Request queue to examine.
360 * @fn: Pointer to the function that will be called for each request
361 * on @q. @fn will be called as follows: @fn(hctx, rq, @priv,
362 * reserved) where rq is a pointer to a request and hctx points
363 * to the hardware queue associated with the request. 'reserved'
364 * indicates whether or not @rq is a reserved request.
365 * @priv: Will be passed as third argument to @fn.
367 * Note: if @q->tag_set is shared with other request queues then @fn will be
368 * called for all requests on all queues that share that tag set and not only
369 * for requests associated with @q.
371 void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
372 void *priv)
374 struct blk_mq_hw_ctx *hctx;
375 int i;
378 * __blk_mq_update_nr_hw_queues() updates nr_hw_queues and queue_hw_ctx
379 * while the queue is frozen. So we can use q_usage_counter to avoid
380 * racing with it. __blk_mq_update_nr_hw_queues() uses
381 * synchronize_rcu() to ensure this function left the critical section
382 * below.
384 if (!percpu_ref_tryget(&q->q_usage_counter))
385 return;
387 queue_for_each_hw_ctx(q, hctx, i) {
388 struct blk_mq_tags *tags = hctx->tags;
391 * If no software queues are currently mapped to this
392 * hardware queue, there's nothing to check
394 if (!blk_mq_hw_queue_mapped(hctx))
395 continue;
397 if (tags->nr_reserved_tags)
398 bt_for_each(hctx, &tags->breserved_tags, fn, priv, true);
399 bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false);
401 blk_queue_exit(q);
404 static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,
405 bool round_robin, int node)
407 return sbitmap_queue_init_node(bt, depth, -1, round_robin, GFP_KERNEL,
408 node);
411 static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags,
412 int node, int alloc_policy)
414 unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
415 bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR;
417 if (bt_alloc(&tags->bitmap_tags, depth, round_robin, node))
418 goto free_tags;
419 if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, round_robin,
420 node))
421 goto free_bitmap_tags;
423 return tags;
424 free_bitmap_tags:
425 sbitmap_queue_free(&tags->bitmap_tags);
426 free_tags:
427 kfree(tags);
428 return NULL;
431 struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
432 unsigned int reserved_tags,
433 int node, int alloc_policy)
435 struct blk_mq_tags *tags;
437 if (total_tags > BLK_MQ_TAG_MAX) {
438 pr_err("blk-mq: tag depth too large\n");
439 return NULL;
442 tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node);
443 if (!tags)
444 return NULL;
446 tags->nr_tags = total_tags;
447 tags->nr_reserved_tags = reserved_tags;
449 return blk_mq_init_bitmap_tags(tags, node, alloc_policy);
452 void blk_mq_free_tags(struct blk_mq_tags *tags)
454 sbitmap_queue_free(&tags->bitmap_tags);
455 sbitmap_queue_free(&tags->breserved_tags);
456 kfree(tags);
459 int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
460 struct blk_mq_tags **tagsptr, unsigned int tdepth,
461 bool can_grow)
463 struct blk_mq_tags *tags = *tagsptr;
465 if (tdepth <= tags->nr_reserved_tags)
466 return -EINVAL;
469 * If we are allowed to grow beyond the original size, allocate
470 * a new set of tags before freeing the old one.
472 if (tdepth > tags->nr_tags) {
473 struct blk_mq_tag_set *set = hctx->queue->tag_set;
474 struct blk_mq_tags *new;
475 bool ret;
477 if (!can_grow)
478 return -EINVAL;
481 * We need some sort of upper limit, set it high enough that
482 * no valid use cases should require more.
484 if (tdepth > 16 * BLKDEV_MAX_RQ)
485 return -EINVAL;
487 new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth,
488 tags->nr_reserved_tags);
489 if (!new)
490 return -ENOMEM;
491 ret = blk_mq_alloc_rqs(set, new, hctx->queue_num, tdepth);
492 if (ret) {
493 blk_mq_free_rq_map(new);
494 return -ENOMEM;
497 blk_mq_free_rqs(set, *tagsptr, hctx->queue_num);
498 blk_mq_free_rq_map(*tagsptr);
499 *tagsptr = new;
500 } else {
502 * Don't need (or can't) update reserved tags here, they
503 * remain static and should never need resizing.
505 sbitmap_queue_resize(&tags->bitmap_tags,
506 tdepth - tags->nr_reserved_tags);
509 return 0;
513 * blk_mq_unique_tag() - return a tag that is unique queue-wide
514 * @rq: request for which to compute a unique tag
516 * The tag field in struct request is unique per hardware queue but not over
517 * all hardware queues. Hence this function that returns a tag with the
518 * hardware context index in the upper bits and the per hardware queue tag in
519 * the lower bits.
521 * Note: When called for a request that is queued on a non-multiqueue request
522 * queue, the hardware context index is set to zero.
524 u32 blk_mq_unique_tag(struct request *rq)
526 return (rq->mq_hctx->queue_num << BLK_MQ_UNIQUE_TAG_BITS) |
527 (rq->tag & BLK_MQ_UNIQUE_TAG_MASK);
529 EXPORT_SYMBOL(blk_mq_unique_tag);