2 * Copyright (C) 2003 Russell King, All Rights Reserved.
3 * Copyright 2006-2007 Pierre Ossman
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
10 #include <linux/slab.h>
11 #include <linux/module.h>
12 #include <linux/blkdev.h>
13 #include <linux/freezer.h>
14 #include <linux/kthread.h>
15 #include <linux/scatterlist.h>
16 #include <linux/dma-mapping.h>
18 #include <linux/mmc/card.h>
19 #include <linux/mmc/host.h>
24 #define MMC_QUEUE_BOUNCESZ 65536
27 * Prepare a MMC request. This just filters out odd stuff.
29 static int mmc_prep_request(struct request_queue
*q
, struct request
*req
)
31 struct mmc_queue
*mq
= q
->queuedata
;
34 * We only like normal block requests and discards.
36 if (req
->cmd_type
!= REQ_TYPE_FS
&& req_op(req
) != REQ_OP_DISCARD
&&
37 req_op(req
) != REQ_OP_SECURE_ERASE
) {
38 blk_dump_rq_flags(req
, "MMC bad request");
42 if (mq
&& (mmc_card_removed(mq
->card
) || mmc_access_rpmb(mq
)))
45 req
->rq_flags
|= RQF_DONTPREP
;
50 static int mmc_queue_thread(void *d
)
52 struct mmc_queue
*mq
= d
;
53 struct request_queue
*q
= mq
->queue
;
54 struct mmc_context_info
*cntx
= &mq
->card
->host
->context_info
;
56 current
->flags
|= PF_MEMALLOC
;
58 down(&mq
->thread_sem
);
60 struct request
*req
= NULL
;
62 spin_lock_irq(q
->queue_lock
);
63 set_current_state(TASK_INTERRUPTIBLE
);
64 req
= blk_fetch_request(q
);
66 cntx
->is_waiting_last_req
= false;
67 cntx
->is_new_req
= false;
70 * Dispatch queue is empty so set flags for
71 * mmc_request_fn() to wake us up.
73 if (mq
->mqrq_prev
->req
)
74 cntx
->is_waiting_last_req
= true;
78 mq
->mqrq_cur
->req
= req
;
79 spin_unlock_irq(q
->queue_lock
);
81 if (req
|| mq
->mqrq_prev
->req
) {
82 bool req_is_special
= mmc_req_is_special(req
);
84 set_current_state(TASK_RUNNING
);
85 mmc_blk_issue_rq(mq
, req
);
87 if (mq
->flags
& MMC_QUEUE_NEW_REQUEST
) {
88 mq
->flags
&= ~MMC_QUEUE_NEW_REQUEST
;
89 continue; /* fetch again */
93 * Current request becomes previous request
95 * In case of special requests, current request
96 * has been finished. Do not assign it to previous
100 mq
->mqrq_cur
->req
= NULL
;
102 mq
->mqrq_prev
->brq
.mrq
.data
= NULL
;
103 mq
->mqrq_prev
->req
= NULL
;
104 swap(mq
->mqrq_prev
, mq
->mqrq_cur
);
106 if (kthread_should_stop()) {
107 set_current_state(TASK_RUNNING
);
112 down(&mq
->thread_sem
);
121 * Generic MMC request handler. This is called for any queue on a
122 * particular host. When the host is not busy, we look for a request
123 * on any queue on this host, and attempt to issue it. This may
124 * not be the queue we were asked to process.
126 static void mmc_request_fn(struct request_queue
*q
)
128 struct mmc_queue
*mq
= q
->queuedata
;
130 struct mmc_context_info
*cntx
;
133 while ((req
= blk_fetch_request(q
)) != NULL
) {
134 req
->rq_flags
|= RQF_QUIET
;
135 __blk_end_request_all(req
, -EIO
);
140 cntx
= &mq
->card
->host
->context_info
;
142 if (cntx
->is_waiting_last_req
) {
143 cntx
->is_new_req
= true;
144 wake_up_interruptible(&cntx
->wait
);
148 wake_up_process(mq
->thread
);
151 static struct scatterlist
*mmc_alloc_sg(int sg_len
, int *err
)
153 struct scatterlist
*sg
;
155 sg
= kmalloc(sizeof(struct scatterlist
)*sg_len
, GFP_KERNEL
);
160 sg_init_table(sg
, sg_len
);
166 static void mmc_queue_setup_discard(struct request_queue
*q
,
167 struct mmc_card
*card
)
169 unsigned max_discard
;
171 max_discard
= mmc_calc_max_discard(card
);
175 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD
, q
);
176 blk_queue_max_discard_sectors(q
, max_discard
);
177 if (card
->erased_byte
== 0 && !mmc_can_discard(card
))
178 q
->limits
.discard_zeroes_data
= 1;
179 q
->limits
.discard_granularity
= card
->pref_erase
<< 9;
180 /* granularity must not be greater than max. discard */
181 if (card
->pref_erase
> max_discard
)
182 q
->limits
.discard_granularity
= 0;
183 if (mmc_can_secure_erase_trim(card
))
184 queue_flag_set_unlocked(QUEUE_FLAG_SECERASE
, q
);
187 #ifdef CONFIG_MMC_BLOCK_BOUNCE
188 static bool mmc_queue_alloc_bounce_bufs(struct mmc_queue
*mq
,
189 unsigned int bouncesz
)
193 for (i
= 0; i
< mq
->qdepth
; i
++) {
194 mq
->mqrq
[i
].bounce_buf
= kmalloc(bouncesz
, GFP_KERNEL
);
195 if (!mq
->mqrq
[i
].bounce_buf
)
203 kfree(mq
->mqrq
[i
].bounce_buf
);
204 mq
->mqrq
[i
].bounce_buf
= NULL
;
206 pr_warn("%s: unable to allocate bounce buffers\n",
207 mmc_card_name(mq
->card
));
211 static int mmc_queue_alloc_bounce_sgs(struct mmc_queue
*mq
,
212 unsigned int bouncesz
)
216 for (i
= 0; i
< mq
->qdepth
; i
++) {
217 mq
->mqrq
[i
].sg
= mmc_alloc_sg(1, &ret
);
221 mq
->mqrq
[i
].bounce_sg
= mmc_alloc_sg(bouncesz
/ 512, &ret
);
230 static int mmc_queue_alloc_sgs(struct mmc_queue
*mq
, int max_segs
)
234 for (i
= 0; i
< mq
->qdepth
; i
++) {
235 mq
->mqrq
[i
].sg
= mmc_alloc_sg(max_segs
, &ret
);
243 static void mmc_queue_req_free_bufs(struct mmc_queue_req
*mqrq
)
245 kfree(mqrq
->bounce_sg
);
246 mqrq
->bounce_sg
= NULL
;
251 kfree(mqrq
->bounce_buf
);
252 mqrq
->bounce_buf
= NULL
;
255 static void mmc_queue_reqs_free_bufs(struct mmc_queue
*mq
)
259 for (i
= 0; i
< mq
->qdepth
; i
++)
260 mmc_queue_req_free_bufs(&mq
->mqrq
[i
]);
264 * mmc_init_queue - initialise a queue structure.
266 * @card: mmc card to attach this queue
268 * @subname: partition subname
270 * Initialise a MMC card request queue.
272 int mmc_init_queue(struct mmc_queue
*mq
, struct mmc_card
*card
,
273 spinlock_t
*lock
, const char *subname
)
275 struct mmc_host
*host
= card
->host
;
276 u64 limit
= BLK_BOUNCE_HIGH
;
280 if (mmc_dev(host
)->dma_mask
&& *mmc_dev(host
)->dma_mask
)
281 limit
= (u64
)dma_max_pfn(mmc_dev(host
)) << PAGE_SHIFT
;
284 mq
->queue
= blk_init_queue(mmc_request_fn
, lock
);
289 mq
->mqrq
= kcalloc(mq
->qdepth
, sizeof(struct mmc_queue_req
),
293 mq
->mqrq_cur
= &mq
->mqrq
[0];
294 mq
->mqrq_prev
= &mq
->mqrq
[1];
295 mq
->queue
->queuedata
= mq
;
297 blk_queue_prep_rq(mq
->queue
, mmc_prep_request
);
298 queue_flag_set_unlocked(QUEUE_FLAG_NONROT
, mq
->queue
);
299 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM
, mq
->queue
);
300 if (mmc_can_erase(card
))
301 mmc_queue_setup_discard(mq
->queue
, card
);
303 #ifdef CONFIG_MMC_BLOCK_BOUNCE
304 if (host
->max_segs
== 1) {
305 unsigned int bouncesz
;
307 bouncesz
= MMC_QUEUE_BOUNCESZ
;
309 if (bouncesz
> host
->max_req_size
)
310 bouncesz
= host
->max_req_size
;
311 if (bouncesz
> host
->max_seg_size
)
312 bouncesz
= host
->max_seg_size
;
313 if (bouncesz
> (host
->max_blk_count
* 512))
314 bouncesz
= host
->max_blk_count
* 512;
316 if (bouncesz
> 512 &&
317 mmc_queue_alloc_bounce_bufs(mq
, bouncesz
)) {
318 blk_queue_bounce_limit(mq
->queue
, BLK_BOUNCE_ANY
);
319 blk_queue_max_hw_sectors(mq
->queue
, bouncesz
/ 512);
320 blk_queue_max_segments(mq
->queue
, bouncesz
/ 512);
321 blk_queue_max_segment_size(mq
->queue
, bouncesz
);
323 ret
= mmc_queue_alloc_bounce_sgs(mq
, bouncesz
);
332 blk_queue_bounce_limit(mq
->queue
, limit
);
333 blk_queue_max_hw_sectors(mq
->queue
,
334 min(host
->max_blk_count
, host
->max_req_size
/ 512));
335 blk_queue_max_segments(mq
->queue
, host
->max_segs
);
336 blk_queue_max_segment_size(mq
->queue
, host
->max_seg_size
);
338 ret
= mmc_queue_alloc_sgs(mq
, host
->max_segs
);
343 sema_init(&mq
->thread_sem
, 1);
345 mq
->thread
= kthread_run(mmc_queue_thread
, mq
, "mmcqd/%d%s",
346 host
->index
, subname
? subname
: "");
348 if (IS_ERR(mq
->thread
)) {
349 ret
= PTR_ERR(mq
->thread
);
356 mmc_queue_reqs_free_bufs(mq
);
360 blk_cleanup_queue(mq
->queue
);
364 void mmc_cleanup_queue(struct mmc_queue
*mq
)
366 struct request_queue
*q
= mq
->queue
;
369 /* Make sure the queue isn't suspended, as that will deadlock */
370 mmc_queue_resume(mq
);
372 /* Then terminate our worker thread */
373 kthread_stop(mq
->thread
);
375 /* Empty the queue */
376 spin_lock_irqsave(q
->queue_lock
, flags
);
379 spin_unlock_irqrestore(q
->queue_lock
, flags
);
381 mmc_queue_reqs_free_bufs(mq
);
387 EXPORT_SYMBOL(mmc_cleanup_queue
);
390 * mmc_queue_suspend - suspend a MMC request queue
391 * @mq: MMC queue to suspend
393 * Stop the block request queue, and wait for our thread to
394 * complete any outstanding requests. This ensures that we
395 * won't suspend while a request is being processed.
397 void mmc_queue_suspend(struct mmc_queue
*mq
)
399 struct request_queue
*q
= mq
->queue
;
402 if (!(mq
->flags
& MMC_QUEUE_SUSPENDED
)) {
403 mq
->flags
|= MMC_QUEUE_SUSPENDED
;
405 spin_lock_irqsave(q
->queue_lock
, flags
);
407 spin_unlock_irqrestore(q
->queue_lock
, flags
);
409 down(&mq
->thread_sem
);
414 * mmc_queue_resume - resume a previously suspended MMC request queue
415 * @mq: MMC queue to resume
417 void mmc_queue_resume(struct mmc_queue
*mq
)
419 struct request_queue
*q
= mq
->queue
;
422 if (mq
->flags
& MMC_QUEUE_SUSPENDED
) {
423 mq
->flags
&= ~MMC_QUEUE_SUSPENDED
;
427 spin_lock_irqsave(q
->queue_lock
, flags
);
429 spin_unlock_irqrestore(q
->queue_lock
, flags
);
434 * Prepare the sg list(s) to be handed of to the host driver
436 unsigned int mmc_queue_map_sg(struct mmc_queue
*mq
, struct mmc_queue_req
*mqrq
)
440 struct scatterlist
*sg
;
443 if (!mqrq
->bounce_buf
)
444 return blk_rq_map_sg(mq
->queue
, mqrq
->req
, mqrq
->sg
);
446 sg_len
= blk_rq_map_sg(mq
->queue
, mqrq
->req
, mqrq
->bounce_sg
);
448 mqrq
->bounce_sg_len
= sg_len
;
451 for_each_sg(mqrq
->bounce_sg
, sg
, sg_len
, i
)
452 buflen
+= sg
->length
;
454 sg_init_one(mqrq
->sg
, mqrq
->bounce_buf
, buflen
);
460 * If writing, bounce the data to the buffer before the request
461 * is sent to the host driver
463 void mmc_queue_bounce_pre(struct mmc_queue_req
*mqrq
)
465 if (!mqrq
->bounce_buf
)
468 if (rq_data_dir(mqrq
->req
) != WRITE
)
471 sg_copy_to_buffer(mqrq
->bounce_sg
, mqrq
->bounce_sg_len
,
472 mqrq
->bounce_buf
, mqrq
->sg
[0].length
);
476 * If reading, bounce the data from the buffer after the request
477 * has been handled by the host driver
479 void mmc_queue_bounce_post(struct mmc_queue_req
*mqrq
)
481 if (!mqrq
->bounce_buf
)
484 if (rq_data_dir(mqrq
->req
) != READ
)
487 sg_copy_from_buffer(mqrq
->bounce_sg
, mqrq
->bounce_sg_len
,
488 mqrq
->bounce_buf
, mqrq
->sg
[0].length
);