bnxt_en: Reset rings if ring reservation fails during open()
[linux/fpc-iii.git] / block / blk-tag.c
blobfbc153aef166d7faa27b65f41d0870ca844d9aad
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Functions related to tagged command queuing
4 */
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/bio.h>
8 #include <linux/blkdev.h>
9 #include <linux/slab.h>
11 #include "blk.h"
13 /**
14 * blk_queue_find_tag - find a request by its tag and queue
15 * @q: The request queue for the device
16 * @tag: The tag of the request
18 * Notes:
19 * Should be used when a device returns a tag and you want to match
20 * it with a request.
22 * no locks need be held.
23 **/
24 struct request *blk_queue_find_tag(struct request_queue *q, int tag)
26 return blk_map_queue_find_tag(q->queue_tags, tag);
28 EXPORT_SYMBOL(blk_queue_find_tag);
30 /**
31 * blk_free_tags - release a given set of tag maintenance info
32 * @bqt: the tag map to free
34 * Drop the reference count on @bqt and frees it when the last reference
35 * is dropped.
37 void blk_free_tags(struct blk_queue_tag *bqt)
39 if (atomic_dec_and_test(&bqt->refcnt)) {
40 BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) <
41 bqt->max_depth);
43 kfree(bqt->tag_index);
44 bqt->tag_index = NULL;
46 kfree(bqt->tag_map);
47 bqt->tag_map = NULL;
49 kfree(bqt);
52 EXPORT_SYMBOL(blk_free_tags);
54 /**
55 * __blk_queue_free_tags - release tag maintenance info
56 * @q: the request queue for the device
58 * Notes:
59 * blk_cleanup_queue() will take care of calling this function, if tagging
60 * has been used. So there's no need to call this directly.
61 **/
62 void __blk_queue_free_tags(struct request_queue *q)
64 struct blk_queue_tag *bqt = q->queue_tags;
66 if (!bqt)
67 return;
69 blk_free_tags(bqt);
71 q->queue_tags = NULL;
72 queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
75 /**
76 * blk_queue_free_tags - release tag maintenance info
77 * @q: the request queue for the device
79 * Notes:
80 * This is used to disable tagged queuing to a device, yet leave
81 * queue in function.
82 **/
83 void blk_queue_free_tags(struct request_queue *q)
85 queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
87 EXPORT_SYMBOL(blk_queue_free_tags);
89 static int
90 init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth)
92 struct request **tag_index;
93 unsigned long *tag_map;
94 int nr_ulongs;
96 if (q && depth > q->nr_requests * 2) {
97 depth = q->nr_requests * 2;
98 printk(KERN_ERR "%s: adjusted depth to %d\n",
99 __func__, depth);
102 tag_index = kcalloc(depth, sizeof(struct request *), GFP_ATOMIC);
103 if (!tag_index)
104 goto fail;
106 nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG;
107 tag_map = kcalloc(nr_ulongs, sizeof(unsigned long), GFP_ATOMIC);
108 if (!tag_map)
109 goto fail;
111 tags->real_max_depth = depth;
112 tags->max_depth = depth;
113 tags->tag_index = tag_index;
114 tags->tag_map = tag_map;
116 return 0;
117 fail:
118 kfree(tag_index);
119 return -ENOMEM;
122 static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q,
123 int depth, int alloc_policy)
125 struct blk_queue_tag *tags;
127 tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC);
128 if (!tags)
129 goto fail;
131 if (init_tag_map(q, tags, depth))
132 goto fail;
134 atomic_set(&tags->refcnt, 1);
135 tags->alloc_policy = alloc_policy;
136 tags->next_tag = 0;
137 return tags;
138 fail:
139 kfree(tags);
140 return NULL;
144 * blk_init_tags - initialize the tag info for an external tag map
145 * @depth: the maximum queue depth supported
146 * @alloc_policy: tag allocation policy
148 struct blk_queue_tag *blk_init_tags(int depth, int alloc_policy)
150 return __blk_queue_init_tags(NULL, depth, alloc_policy);
152 EXPORT_SYMBOL(blk_init_tags);
155 * blk_queue_init_tags - initialize the queue tag info
156 * @q: the request queue for the device
157 * @depth: the maximum queue depth supported
158 * @tags: the tag to use
159 * @alloc_policy: tag allocation policy
161 * Queue lock must be held here if the function is called to resize an
162 * existing map.
164 int blk_queue_init_tags(struct request_queue *q, int depth,
165 struct blk_queue_tag *tags, int alloc_policy)
167 int rc;
169 BUG_ON(tags && q->queue_tags && tags != q->queue_tags);
171 if (!tags && !q->queue_tags) {
172 tags = __blk_queue_init_tags(q, depth, alloc_policy);
174 if (!tags)
175 return -ENOMEM;
177 } else if (q->queue_tags) {
178 rc = blk_queue_resize_tags(q, depth);
179 if (rc)
180 return rc;
181 queue_flag_set(QUEUE_FLAG_QUEUED, q);
182 return 0;
183 } else
184 atomic_inc(&tags->refcnt);
187 * assign it, all done
189 q->queue_tags = tags;
190 queue_flag_set_unlocked(QUEUE_FLAG_QUEUED, q);
191 return 0;
193 EXPORT_SYMBOL(blk_queue_init_tags);
196 * blk_queue_resize_tags - change the queueing depth
197 * @q: the request queue for the device
198 * @new_depth: the new max command queueing depth
200 * Notes:
201 * Must be called with the queue lock held.
203 int blk_queue_resize_tags(struct request_queue *q, int new_depth)
205 struct blk_queue_tag *bqt = q->queue_tags;
206 struct request **tag_index;
207 unsigned long *tag_map;
208 int max_depth, nr_ulongs;
210 if (!bqt)
211 return -ENXIO;
214 * if we already have large enough real_max_depth. just
215 * adjust max_depth. *NOTE* as requests with tag value
216 * between new_depth and real_max_depth can be in-flight, tag
217 * map can not be shrunk blindly here.
219 if (new_depth <= bqt->real_max_depth) {
220 bqt->max_depth = new_depth;
221 return 0;
225 * Currently cannot replace a shared tag map with a new
226 * one, so error out if this is the case
228 if (atomic_read(&bqt->refcnt) != 1)
229 return -EBUSY;
232 * save the old state info, so we can copy it back
234 tag_index = bqt->tag_index;
235 tag_map = bqt->tag_map;
236 max_depth = bqt->real_max_depth;
238 if (init_tag_map(q, bqt, new_depth))
239 return -ENOMEM;
241 memcpy(bqt->tag_index, tag_index, max_depth * sizeof(struct request *));
242 nr_ulongs = ALIGN(max_depth, BITS_PER_LONG) / BITS_PER_LONG;
243 memcpy(bqt->tag_map, tag_map, nr_ulongs * sizeof(unsigned long));
245 kfree(tag_index);
246 kfree(tag_map);
247 return 0;
249 EXPORT_SYMBOL(blk_queue_resize_tags);
252 * blk_queue_end_tag - end tag operations for a request
253 * @q: the request queue for the device
254 * @rq: the request that has completed
256 * Description:
257 * Typically called when end_that_request_first() returns %0, meaning
258 * all transfers have been done for a request. It's important to call
259 * this function before end_that_request_last(), as that will put the
260 * request back on the free list thus corrupting the internal tag list.
262 void blk_queue_end_tag(struct request_queue *q, struct request *rq)
264 struct blk_queue_tag *bqt = q->queue_tags;
265 unsigned tag = rq->tag; /* negative tags invalid */
267 lockdep_assert_held(q->queue_lock);
269 BUG_ON(tag >= bqt->real_max_depth);
271 list_del_init(&rq->queuelist);
272 rq->rq_flags &= ~RQF_QUEUED;
273 rq->tag = -1;
274 rq->internal_tag = -1;
276 if (unlikely(bqt->tag_index[tag] == NULL))
277 printk(KERN_ERR "%s: tag %d is missing\n",
278 __func__, tag);
280 bqt->tag_index[tag] = NULL;
282 if (unlikely(!test_bit(tag, bqt->tag_map))) {
283 printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n",
284 __func__, tag);
285 return;
288 * The tag_map bit acts as a lock for tag_index[bit], so we need
289 * unlock memory barrier semantics.
291 clear_bit_unlock(tag, bqt->tag_map);
295 * blk_queue_start_tag - find a free tag and assign it
296 * @q: the request queue for the device
297 * @rq: the block request that needs tagging
299 * Description:
300 * This can either be used as a stand-alone helper, or possibly be
301 * assigned as the queue &prep_rq_fn (in which case &struct request
302 * automagically gets a tag assigned). Note that this function
303 * assumes that any type of request can be queued! if this is not
304 * true for your device, you must check the request type before
305 * calling this function. The request will also be removed from
306 * the request queue, so it's the drivers responsibility to readd
307 * it if it should need to be restarted for some reason.
309 int blk_queue_start_tag(struct request_queue *q, struct request *rq)
311 struct blk_queue_tag *bqt = q->queue_tags;
312 unsigned max_depth;
313 int tag;
315 lockdep_assert_held(q->queue_lock);
317 if (unlikely((rq->rq_flags & RQF_QUEUED))) {
318 printk(KERN_ERR
319 "%s: request %p for device [%s] already tagged %d",
320 __func__, rq,
321 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);
322 BUG();
326 * Protect against shared tag maps, as we may not have exclusive
327 * access to the tag map.
329 * We reserve a few tags just for sync IO, since we don't want
330 * to starve sync IO on behalf of flooding async IO.
332 max_depth = bqt->max_depth;
333 if (!rq_is_sync(rq) && max_depth > 1) {
334 switch (max_depth) {
335 case 2:
336 max_depth = 1;
337 break;
338 case 3:
339 max_depth = 2;
340 break;
341 default:
342 max_depth -= 2;
344 if (q->in_flight[BLK_RW_ASYNC] > max_depth)
345 return 1;
348 do {
349 if (bqt->alloc_policy == BLK_TAG_ALLOC_FIFO) {
350 tag = find_first_zero_bit(bqt->tag_map, max_depth);
351 if (tag >= max_depth)
352 return 1;
353 } else {
354 int start = bqt->next_tag;
355 int size = min_t(int, bqt->max_depth, max_depth + start);
356 tag = find_next_zero_bit(bqt->tag_map, size, start);
357 if (tag >= size && start + size > bqt->max_depth) {
358 size = start + size - bqt->max_depth;
359 tag = find_first_zero_bit(bqt->tag_map, size);
361 if (tag >= size)
362 return 1;
365 } while (test_and_set_bit_lock(tag, bqt->tag_map));
367 * We need lock ordering semantics given by test_and_set_bit_lock.
368 * See blk_queue_end_tag for details.
371 bqt->next_tag = (tag + 1) % bqt->max_depth;
372 rq->rq_flags |= RQF_QUEUED;
373 rq->tag = tag;
374 bqt->tag_index[tag] = rq;
375 blk_start_request(rq);
376 return 0;
378 EXPORT_SYMBOL(blk_queue_start_tag);