2 * linux/drivers/mmc/card/queue.c
4 * Copyright (C) 2003 Russell King, All Rights Reserved.
5 * Copyright 2006-2007 Pierre Ossman
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/slab.h>
13 #include <linux/module.h>
14 #include <linux/blkdev.h>
15 #include <linux/freezer.h>
16 #include <linux/kthread.h>
17 #include <linux/scatterlist.h>
18 #include <linux/dma-mapping.h>
20 #include <linux/mmc/card.h>
21 #include <linux/mmc/host.h>
24 #define MMC_QUEUE_BOUNCESZ 65536
27 * Prepare a MMC request. This just filters out odd stuff.
29 static int mmc_prep_request(struct request_queue
*q
, struct request
*req
)
31 struct mmc_queue
*mq
= q
->queuedata
;
34 * We only like normal block requests and discards.
36 if (req
->cmd_type
!= REQ_TYPE_FS
&& !(req
->cmd_flags
& REQ_DISCARD
)) {
37 blk_dump_rq_flags(req
, "MMC bad request");
41 if (mq
&& (mmc_card_removed(mq
->card
) || mmc_access_rpmb(mq
)))
44 req
->cmd_flags
|= REQ_DONTPREP
;
49 static int mmc_queue_thread(void *d
)
51 struct mmc_queue
*mq
= d
;
52 struct request_queue
*q
= mq
->queue
;
54 current
->flags
|= PF_MEMALLOC
;
56 down(&mq
->thread_sem
);
58 struct request
*req
= NULL
;
59 struct mmc_queue_req
*tmp
;
60 unsigned int cmd_flags
= 0;
62 spin_lock_irq(q
->queue_lock
);
63 set_current_state(TASK_INTERRUPTIBLE
);
64 req
= blk_fetch_request(q
);
65 mq
->mqrq_cur
->req
= req
;
66 spin_unlock_irq(q
->queue_lock
);
68 if (req
|| mq
->mqrq_prev
->req
) {
69 set_current_state(TASK_RUNNING
);
70 cmd_flags
= req
? req
->cmd_flags
: 0;
71 mq
->issue_fn(mq
, req
);
72 if (mq
->flags
& MMC_QUEUE_NEW_REQUEST
) {
73 mq
->flags
&= ~MMC_QUEUE_NEW_REQUEST
;
74 continue; /* fetch again */
78 * Current request becomes previous request
80 * In case of special requests, current request
81 * has been finished. Do not assign it to previous
84 if (cmd_flags
& MMC_REQ_SPECIAL_MASK
)
85 mq
->mqrq_cur
->req
= NULL
;
87 mq
->mqrq_prev
->brq
.mrq
.data
= NULL
;
88 mq
->mqrq_prev
->req
= NULL
;
90 mq
->mqrq_prev
= mq
->mqrq_cur
;
93 if (kthread_should_stop()) {
94 set_current_state(TASK_RUNNING
);
99 down(&mq
->thread_sem
);
108 * Generic MMC request handler. This is called for any queue on a
109 * particular host. When the host is not busy, we look for a request
110 * on any queue on this host, and attempt to issue it. This may
111 * not be the queue we were asked to process.
113 static void mmc_request_fn(struct request_queue
*q
)
115 struct mmc_queue
*mq
= q
->queuedata
;
118 struct mmc_context_info
*cntx
;
121 while ((req
= blk_fetch_request(q
)) != NULL
) {
122 req
->cmd_flags
|= REQ_QUIET
;
123 __blk_end_request_all(req
, -EIO
);
128 cntx
= &mq
->card
->host
->context_info
;
129 if (!mq
->mqrq_cur
->req
&& mq
->mqrq_prev
->req
) {
131 * New MMC request arrived when MMC thread may be
132 * blocked on the previous request to be complete
133 * with no current request fetched
135 spin_lock_irqsave(&cntx
->lock
, flags
);
136 if (cntx
->is_waiting_last_req
) {
137 cntx
->is_new_req
= true;
138 wake_up_interruptible(&cntx
->wait
);
140 spin_unlock_irqrestore(&cntx
->lock
, flags
);
141 } else if (!mq
->mqrq_cur
->req
&& !mq
->mqrq_prev
->req
)
142 wake_up_process(mq
->thread
);
145 static struct scatterlist
*mmc_alloc_sg(int sg_len
, int *err
)
147 struct scatterlist
*sg
;
149 sg
= kmalloc(sizeof(struct scatterlist
)*sg_len
, GFP_KERNEL
);
154 sg_init_table(sg
, sg_len
);
160 static void mmc_queue_setup_discard(struct request_queue
*q
,
161 struct mmc_card
*card
)
163 unsigned max_discard
;
165 max_discard
= mmc_calc_max_discard(card
);
169 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD
, q
);
170 q
->limits
.max_discard_sectors
= max_discard
;
171 if (card
->erased_byte
== 0 && !mmc_can_discard(card
))
172 q
->limits
.discard_zeroes_data
= 1;
173 q
->limits
.discard_granularity
= card
->pref_erase
<< 9;
174 /* granularity must not be greater than max. discard */
175 if (card
->pref_erase
> max_discard
)
176 q
->limits
.discard_granularity
= 0;
177 if (mmc_can_secure_erase_trim(card
))
178 queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD
, q
);
182 * mmc_init_queue - initialise a queue structure.
184 * @card: mmc card to attach this queue
186 * @subname: partition subname
188 * Initialise a MMC card request queue.
190 int mmc_init_queue(struct mmc_queue
*mq
, struct mmc_card
*card
,
191 spinlock_t
*lock
, const char *subname
)
193 struct mmc_host
*host
= card
->host
;
194 u64 limit
= BLK_BOUNCE_HIGH
;
196 struct mmc_queue_req
*mqrq_cur
= &mq
->mqrq
[0];
197 struct mmc_queue_req
*mqrq_prev
= &mq
->mqrq
[1];
199 if (mmc_dev(host
)->dma_mask
&& *mmc_dev(host
)->dma_mask
)
200 limit
= (u64
)dma_max_pfn(mmc_dev(host
)) << PAGE_SHIFT
;
203 mq
->queue
= blk_init_queue(mmc_request_fn
, lock
);
207 mq
->mqrq_cur
= mqrq_cur
;
208 mq
->mqrq_prev
= mqrq_prev
;
209 mq
->queue
->queuedata
= mq
;
211 blk_queue_prep_rq(mq
->queue
, mmc_prep_request
);
212 queue_flag_set_unlocked(QUEUE_FLAG_NONROT
, mq
->queue
);
213 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM
, mq
->queue
);
214 if (mmc_can_erase(card
))
215 mmc_queue_setup_discard(mq
->queue
, card
);
217 #ifdef CONFIG_MMC_BLOCK_BOUNCE
218 if (host
->max_segs
== 1) {
219 unsigned int bouncesz
;
221 bouncesz
= MMC_QUEUE_BOUNCESZ
;
223 if (bouncesz
> host
->max_req_size
)
224 bouncesz
= host
->max_req_size
;
225 if (bouncesz
> host
->max_seg_size
)
226 bouncesz
= host
->max_seg_size
;
227 if (bouncesz
> (host
->max_blk_count
* 512))
228 bouncesz
= host
->max_blk_count
* 512;
230 if (bouncesz
> 512) {
231 mqrq_cur
->bounce_buf
= kmalloc(bouncesz
, GFP_KERNEL
);
232 if (!mqrq_cur
->bounce_buf
) {
233 pr_warn("%s: unable to allocate bounce cur buffer\n",
234 mmc_card_name(card
));
236 mqrq_prev
->bounce_buf
=
237 kmalloc(bouncesz
, GFP_KERNEL
);
238 if (!mqrq_prev
->bounce_buf
) {
239 pr_warn("%s: unable to allocate bounce prev buffer\n",
240 mmc_card_name(card
));
241 kfree(mqrq_cur
->bounce_buf
);
242 mqrq_cur
->bounce_buf
= NULL
;
247 if (mqrq_cur
->bounce_buf
&& mqrq_prev
->bounce_buf
) {
248 blk_queue_bounce_limit(mq
->queue
, BLK_BOUNCE_ANY
);
249 blk_queue_max_hw_sectors(mq
->queue
, bouncesz
/ 512);
250 blk_queue_max_segments(mq
->queue
, bouncesz
/ 512);
251 blk_queue_max_segment_size(mq
->queue
, bouncesz
);
253 mqrq_cur
->sg
= mmc_alloc_sg(1, &ret
);
257 mqrq_cur
->bounce_sg
=
258 mmc_alloc_sg(bouncesz
/ 512, &ret
);
262 mqrq_prev
->sg
= mmc_alloc_sg(1, &ret
);
266 mqrq_prev
->bounce_sg
=
267 mmc_alloc_sg(bouncesz
/ 512, &ret
);
274 if (!mqrq_cur
->bounce_buf
&& !mqrq_prev
->bounce_buf
) {
275 blk_queue_bounce_limit(mq
->queue
, limit
);
276 blk_queue_max_hw_sectors(mq
->queue
,
277 min(host
->max_blk_count
, host
->max_req_size
/ 512));
278 blk_queue_max_segments(mq
->queue
, host
->max_segs
);
279 blk_queue_max_segment_size(mq
->queue
, host
->max_seg_size
);
281 mqrq_cur
->sg
= mmc_alloc_sg(host
->max_segs
, &ret
);
286 mqrq_prev
->sg
= mmc_alloc_sg(host
->max_segs
, &ret
);
291 sema_init(&mq
->thread_sem
, 1);
293 mq
->thread
= kthread_run(mmc_queue_thread
, mq
, "mmcqd/%d%s",
294 host
->index
, subname
? subname
: "");
296 if (IS_ERR(mq
->thread
)) {
297 ret
= PTR_ERR(mq
->thread
);
303 kfree(mqrq_cur
->bounce_sg
);
304 mqrq_cur
->bounce_sg
= NULL
;
305 kfree(mqrq_prev
->bounce_sg
);
306 mqrq_prev
->bounce_sg
= NULL
;
311 kfree(mqrq_cur
->bounce_buf
);
312 mqrq_cur
->bounce_buf
= NULL
;
314 kfree(mqrq_prev
->sg
);
315 mqrq_prev
->sg
= NULL
;
316 kfree(mqrq_prev
->bounce_buf
);
317 mqrq_prev
->bounce_buf
= NULL
;
319 blk_cleanup_queue(mq
->queue
);
323 void mmc_cleanup_queue(struct mmc_queue
*mq
)
325 struct request_queue
*q
= mq
->queue
;
327 struct mmc_queue_req
*mqrq_cur
= mq
->mqrq_cur
;
328 struct mmc_queue_req
*mqrq_prev
= mq
->mqrq_prev
;
330 /* Make sure the queue isn't suspended, as that will deadlock */
331 mmc_queue_resume(mq
);
333 /* Then terminate our worker thread */
334 kthread_stop(mq
->thread
);
336 /* Empty the queue */
337 spin_lock_irqsave(q
->queue_lock
, flags
);
340 spin_unlock_irqrestore(q
->queue_lock
, flags
);
342 kfree(mqrq_cur
->bounce_sg
);
343 mqrq_cur
->bounce_sg
= NULL
;
348 kfree(mqrq_cur
->bounce_buf
);
349 mqrq_cur
->bounce_buf
= NULL
;
351 kfree(mqrq_prev
->bounce_sg
);
352 mqrq_prev
->bounce_sg
= NULL
;
354 kfree(mqrq_prev
->sg
);
355 mqrq_prev
->sg
= NULL
;
357 kfree(mqrq_prev
->bounce_buf
);
358 mqrq_prev
->bounce_buf
= NULL
;
362 EXPORT_SYMBOL(mmc_cleanup_queue
);
364 int mmc_packed_init(struct mmc_queue
*mq
, struct mmc_card
*card
)
366 struct mmc_queue_req
*mqrq_cur
= &mq
->mqrq
[0];
367 struct mmc_queue_req
*mqrq_prev
= &mq
->mqrq
[1];
371 mqrq_cur
->packed
= kzalloc(sizeof(struct mmc_packed
), GFP_KERNEL
);
372 if (!mqrq_cur
->packed
) {
373 pr_warn("%s: unable to allocate packed cmd for mqrq_cur\n",
374 mmc_card_name(card
));
379 mqrq_prev
->packed
= kzalloc(sizeof(struct mmc_packed
), GFP_KERNEL
);
380 if (!mqrq_prev
->packed
) {
381 pr_warn("%s: unable to allocate packed cmd for mqrq_prev\n",
382 mmc_card_name(card
));
383 kfree(mqrq_cur
->packed
);
384 mqrq_cur
->packed
= NULL
;
389 INIT_LIST_HEAD(&mqrq_cur
->packed
->list
);
390 INIT_LIST_HEAD(&mqrq_prev
->packed
->list
);
396 void mmc_packed_clean(struct mmc_queue
*mq
)
398 struct mmc_queue_req
*mqrq_cur
= &mq
->mqrq
[0];
399 struct mmc_queue_req
*mqrq_prev
= &mq
->mqrq
[1];
401 kfree(mqrq_cur
->packed
);
402 mqrq_cur
->packed
= NULL
;
403 kfree(mqrq_prev
->packed
);
404 mqrq_prev
->packed
= NULL
;
408 * mmc_queue_suspend - suspend a MMC request queue
409 * @mq: MMC queue to suspend
411 * Stop the block request queue, and wait for our thread to
412 * complete any outstanding requests. This ensures that we
413 * won't suspend while a request is being processed.
415 void mmc_queue_suspend(struct mmc_queue
*mq
)
417 struct request_queue
*q
= mq
->queue
;
420 if (!(mq
->flags
& MMC_QUEUE_SUSPENDED
)) {
421 mq
->flags
|= MMC_QUEUE_SUSPENDED
;
423 spin_lock_irqsave(q
->queue_lock
, flags
);
425 spin_unlock_irqrestore(q
->queue_lock
, flags
);
427 down(&mq
->thread_sem
);
432 * mmc_queue_resume - resume a previously suspended MMC request queue
433 * @mq: MMC queue to resume
435 void mmc_queue_resume(struct mmc_queue
*mq
)
437 struct request_queue
*q
= mq
->queue
;
440 if (mq
->flags
& MMC_QUEUE_SUSPENDED
) {
441 mq
->flags
&= ~MMC_QUEUE_SUSPENDED
;
445 spin_lock_irqsave(q
->queue_lock
, flags
);
447 spin_unlock_irqrestore(q
->queue_lock
, flags
);
451 static unsigned int mmc_queue_packed_map_sg(struct mmc_queue
*mq
,
452 struct mmc_packed
*packed
,
453 struct scatterlist
*sg
,
454 enum mmc_packed_type cmd_type
)
456 struct scatterlist
*__sg
= sg
;
457 unsigned int sg_len
= 0;
460 if (mmc_packed_wr(cmd_type
)) {
461 unsigned int hdr_sz
= mmc_large_sector(mq
->card
) ? 4096 : 512;
462 unsigned int max_seg_sz
= queue_max_segment_size(mq
->queue
);
463 unsigned int len
, remain
, offset
= 0;
464 u8
*buf
= (u8
*)packed
->cmd_hdr
;
468 len
= min(remain
, max_seg_sz
);
469 sg_set_buf(__sg
, buf
+ offset
, len
);
472 (__sg
++)->page_link
&= ~0x02;
477 list_for_each_entry(req
, &packed
->list
, queuelist
) {
478 sg_len
+= blk_rq_map_sg(mq
->queue
, req
, __sg
);
479 __sg
= sg
+ (sg_len
- 1);
480 (__sg
++)->page_link
&= ~0x02;
482 sg_mark_end(sg
+ (sg_len
- 1));
487 * Prepare the sg list(s) to be handed of to the host driver
489 unsigned int mmc_queue_map_sg(struct mmc_queue
*mq
, struct mmc_queue_req
*mqrq
)
493 struct scatterlist
*sg
;
494 enum mmc_packed_type cmd_type
;
497 cmd_type
= mqrq
->cmd_type
;
499 if (!mqrq
->bounce_buf
) {
500 if (mmc_packed_cmd(cmd_type
))
501 return mmc_queue_packed_map_sg(mq
, mqrq
->packed
,
504 return blk_rq_map_sg(mq
->queue
, mqrq
->req
, mqrq
->sg
);
507 BUG_ON(!mqrq
->bounce_sg
);
509 if (mmc_packed_cmd(cmd_type
))
510 sg_len
= mmc_queue_packed_map_sg(mq
, mqrq
->packed
,
511 mqrq
->bounce_sg
, cmd_type
);
513 sg_len
= blk_rq_map_sg(mq
->queue
, mqrq
->req
, mqrq
->bounce_sg
);
515 mqrq
->bounce_sg_len
= sg_len
;
518 for_each_sg(mqrq
->bounce_sg
, sg
, sg_len
, i
)
519 buflen
+= sg
->length
;
521 sg_init_one(mqrq
->sg
, mqrq
->bounce_buf
, buflen
);
527 * If writing, bounce the data to the buffer before the request
528 * is sent to the host driver
530 void mmc_queue_bounce_pre(struct mmc_queue_req
*mqrq
)
532 if (!mqrq
->bounce_buf
)
535 if (rq_data_dir(mqrq
->req
) != WRITE
)
538 sg_copy_to_buffer(mqrq
->bounce_sg
, mqrq
->bounce_sg_len
,
539 mqrq
->bounce_buf
, mqrq
->sg
[0].length
);
543 * If reading, bounce the data from the buffer after the request
544 * has been handled by the host driver
546 void mmc_queue_bounce_post(struct mmc_queue_req
*mqrq
)
548 if (!mqrq
->bounce_buf
)
551 if (rq_data_dir(mqrq
->req
) != READ
)
554 sg_copy_from_buffer(mqrq
->bounce_sg
, mqrq
->bounce_sg_len
,
555 mqrq
->bounce_buf
, mqrq
->sg
[0].length
);