x86: early_ioremap(), debugging
[wrt350n-kernel.git] / block / blk-tag.c
blobd1fd300e8aeaa73f57945fb75fa7de819a7aeeaa
1 /*
2 * Functions related to tagged command queuing
3 */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/bio.h>
7 #include <linux/blkdev.h>
9 /**
10 * blk_queue_find_tag - find a request by its tag and queue
11 * @q: The request queue for the device
12 * @tag: The tag of the request
14 * Notes:
15 * Should be used when a device returns a tag and you want to match
16 * it with a request.
18 * no locks need be held.
19 **/
20 struct request *blk_queue_find_tag(struct request_queue *q, int tag)
22 return blk_map_queue_find_tag(q->queue_tags, tag);
25 EXPORT_SYMBOL(blk_queue_find_tag);
27 /**
28 * __blk_free_tags - release a given set of tag maintenance info
29 * @bqt: the tag map to free
31 * Tries to free the specified @bqt@. Returns true if it was
32 * actually freed and false if there are still references using it
34 static int __blk_free_tags(struct blk_queue_tag *bqt)
36 int retval;
38 retval = atomic_dec_and_test(&bqt->refcnt);
39 if (retval) {
40 BUG_ON(bqt->busy);
42 kfree(bqt->tag_index);
43 bqt->tag_index = NULL;
45 kfree(bqt->tag_map);
46 bqt->tag_map = NULL;
48 kfree(bqt);
51 return retval;
54 /**
55 * __blk_queue_free_tags - release tag maintenance info
56 * @q: the request queue for the device
58 * Notes:
59 * blk_cleanup_queue() will take care of calling this function, if tagging
60 * has been used. So there's no need to call this directly.
61 **/
62 void __blk_queue_free_tags(struct request_queue *q)
64 struct blk_queue_tag *bqt = q->queue_tags;
66 if (!bqt)
67 return;
69 __blk_free_tags(bqt);
71 q->queue_tags = NULL;
72 q->queue_flags &= ~(1 << QUEUE_FLAG_QUEUED);
75 /**
76 * blk_free_tags - release a given set of tag maintenance info
77 * @bqt: the tag map to free
79 * For externally managed @bqt@ frees the map. Callers of this
80 * function must guarantee to have released all the queues that
81 * might have been using this tag map.
83 void blk_free_tags(struct blk_queue_tag *bqt)
85 if (unlikely(!__blk_free_tags(bqt)))
86 BUG();
88 EXPORT_SYMBOL(blk_free_tags);
90 /**
91 * blk_queue_free_tags - release tag maintenance info
92 * @q: the request queue for the device
94 * Notes:
95 * This is used to disabled tagged queuing to a device, yet leave
96 * queue in function.
97 **/
98 void blk_queue_free_tags(struct request_queue *q)
100 clear_bit(QUEUE_FLAG_QUEUED, &q->queue_flags);
103 EXPORT_SYMBOL(blk_queue_free_tags);
105 static int
106 init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth)
108 struct request **tag_index;
109 unsigned long *tag_map;
110 int nr_ulongs;
112 if (q && depth > q->nr_requests * 2) {
113 depth = q->nr_requests * 2;
114 printk(KERN_ERR "%s: adjusted depth to %d\n",
115 __FUNCTION__, depth);
118 tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC);
119 if (!tag_index)
120 goto fail;
122 nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG;
123 tag_map = kzalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC);
124 if (!tag_map)
125 goto fail;
127 tags->real_max_depth = depth;
128 tags->max_depth = depth;
129 tags->tag_index = tag_index;
130 tags->tag_map = tag_map;
132 return 0;
133 fail:
134 kfree(tag_index);
135 return -ENOMEM;
138 static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q,
139 int depth)
141 struct blk_queue_tag *tags;
143 tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC);
144 if (!tags)
145 goto fail;
147 if (init_tag_map(q, tags, depth))
148 goto fail;
150 tags->busy = 0;
151 atomic_set(&tags->refcnt, 1);
152 return tags;
153 fail:
154 kfree(tags);
155 return NULL;
159 * blk_init_tags - initialize the tag info for an external tag map
160 * @depth: the maximum queue depth supported
161 * @tags: the tag to use
163 struct blk_queue_tag *blk_init_tags(int depth)
165 return __blk_queue_init_tags(NULL, depth);
167 EXPORT_SYMBOL(blk_init_tags);
170 * blk_queue_init_tags - initialize the queue tag info
171 * @q: the request queue for the device
172 * @depth: the maximum queue depth supported
173 * @tags: the tag to use
175 int blk_queue_init_tags(struct request_queue *q, int depth,
176 struct blk_queue_tag *tags)
178 int rc;
180 BUG_ON(tags && q->queue_tags && tags != q->queue_tags);
182 if (!tags && !q->queue_tags) {
183 tags = __blk_queue_init_tags(q, depth);
185 if (!tags)
186 goto fail;
187 } else if (q->queue_tags) {
188 if ((rc = blk_queue_resize_tags(q, depth)))
189 return rc;
190 set_bit(QUEUE_FLAG_QUEUED, &q->queue_flags);
191 return 0;
192 } else
193 atomic_inc(&tags->refcnt);
196 * assign it, all done
198 q->queue_tags = tags;
199 q->queue_flags |= (1 << QUEUE_FLAG_QUEUED);
200 INIT_LIST_HEAD(&q->tag_busy_list);
201 return 0;
202 fail:
203 kfree(tags);
204 return -ENOMEM;
207 EXPORT_SYMBOL(blk_queue_init_tags);
210 * blk_queue_resize_tags - change the queueing depth
211 * @q: the request queue for the device
212 * @new_depth: the new max command queueing depth
214 * Notes:
215 * Must be called with the queue lock held.
217 int blk_queue_resize_tags(struct request_queue *q, int new_depth)
219 struct blk_queue_tag *bqt = q->queue_tags;
220 struct request **tag_index;
221 unsigned long *tag_map;
222 int max_depth, nr_ulongs;
224 if (!bqt)
225 return -ENXIO;
228 * if we already have large enough real_max_depth. just
229 * adjust max_depth. *NOTE* as requests with tag value
230 * between new_depth and real_max_depth can be in-flight, tag
231 * map can not be shrunk blindly here.
233 if (new_depth <= bqt->real_max_depth) {
234 bqt->max_depth = new_depth;
235 return 0;
239 * Currently cannot replace a shared tag map with a new
240 * one, so error out if this is the case
242 if (atomic_read(&bqt->refcnt) != 1)
243 return -EBUSY;
246 * save the old state info, so we can copy it back
248 tag_index = bqt->tag_index;
249 tag_map = bqt->tag_map;
250 max_depth = bqt->real_max_depth;
252 if (init_tag_map(q, bqt, new_depth))
253 return -ENOMEM;
255 memcpy(bqt->tag_index, tag_index, max_depth * sizeof(struct request *));
256 nr_ulongs = ALIGN(max_depth, BITS_PER_LONG) / BITS_PER_LONG;
257 memcpy(bqt->tag_map, tag_map, nr_ulongs * sizeof(unsigned long));
259 kfree(tag_index);
260 kfree(tag_map);
261 return 0;
264 EXPORT_SYMBOL(blk_queue_resize_tags);
267 * blk_queue_end_tag - end tag operations for a request
268 * @q: the request queue for the device
269 * @rq: the request that has completed
271 * Description:
272 * Typically called when end_that_request_first() returns 0, meaning
273 * all transfers have been done for a request. It's important to call
274 * this function before end_that_request_last(), as that will put the
275 * request back on the free list thus corrupting the internal tag list.
277 * Notes:
278 * queue lock must be held.
280 void blk_queue_end_tag(struct request_queue *q, struct request *rq)
282 struct blk_queue_tag *bqt = q->queue_tags;
283 int tag = rq->tag;
285 BUG_ON(tag == -1);
287 if (unlikely(tag >= bqt->real_max_depth))
289 * This can happen after tag depth has been reduced.
290 * FIXME: how about a warning or info message here?
292 return;
294 list_del_init(&rq->queuelist);
295 rq->cmd_flags &= ~REQ_QUEUED;
296 rq->tag = -1;
298 if (unlikely(bqt->tag_index[tag] == NULL))
299 printk(KERN_ERR "%s: tag %d is missing\n",
300 __FUNCTION__, tag);
302 bqt->tag_index[tag] = NULL;
304 if (unlikely(!test_bit(tag, bqt->tag_map))) {
305 printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n",
306 __FUNCTION__, tag);
307 return;
310 * The tag_map bit acts as a lock for tag_index[bit], so we need
311 * unlock memory barrier semantics.
313 clear_bit_unlock(tag, bqt->tag_map);
314 bqt->busy--;
317 EXPORT_SYMBOL(blk_queue_end_tag);
320 * blk_queue_start_tag - find a free tag and assign it
321 * @q: the request queue for the device
322 * @rq: the block request that needs tagging
324 * Description:
325 * This can either be used as a stand-alone helper, or possibly be
326 * assigned as the queue &prep_rq_fn (in which case &struct request
327 * automagically gets a tag assigned). Note that this function
328 * assumes that any type of request can be queued! if this is not
329 * true for your device, you must check the request type before
330 * calling this function. The request will also be removed from
331 * the request queue, so it's the drivers responsibility to readd
332 * it if it should need to be restarted for some reason.
334 * Notes:
335 * queue lock must be held.
337 int blk_queue_start_tag(struct request_queue *q, struct request *rq)
339 struct blk_queue_tag *bqt = q->queue_tags;
340 int tag;
342 if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
343 printk(KERN_ERR
344 "%s: request %p for device [%s] already tagged %d",
345 __FUNCTION__, rq,
346 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);
347 BUG();
351 * Protect against shared tag maps, as we may not have exclusive
352 * access to the tag map.
354 do {
355 tag = find_first_zero_bit(bqt->tag_map, bqt->max_depth);
356 if (tag >= bqt->max_depth)
357 return 1;
359 } while (test_and_set_bit_lock(tag, bqt->tag_map));
361 * We need lock ordering semantics given by test_and_set_bit_lock.
362 * See blk_queue_end_tag for details.
365 rq->cmd_flags |= REQ_QUEUED;
366 rq->tag = tag;
367 bqt->tag_index[tag] = rq;
368 blkdev_dequeue_request(rq);
369 list_add(&rq->queuelist, &q->tag_busy_list);
370 bqt->busy++;
371 return 0;
374 EXPORT_SYMBOL(blk_queue_start_tag);
377 * blk_queue_invalidate_tags - invalidate all pending tags
378 * @q: the request queue for the device
380 * Description:
381 * Hardware conditions may dictate a need to stop all pending requests.
382 * In this case, we will safely clear the block side of the tag queue and
383 * readd all requests to the request queue in the right order.
385 * Notes:
386 * queue lock must be held.
388 void blk_queue_invalidate_tags(struct request_queue *q)
390 struct list_head *tmp, *n;
392 list_for_each_safe(tmp, n, &q->tag_busy_list)
393 blk_requeue_request(q, list_entry_rq(tmp));
396 EXPORT_SYMBOL(blk_queue_invalidate_tags);