2 * Copyright (C) 2003 Russell King, All Rights Reserved.
3 * Copyright 2006-2007 Pierre Ossman
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
10 #include <linux/slab.h>
11 #include <linux/module.h>
12 #include <linux/blkdev.h>
13 #include <linux/freezer.h>
14 #include <linux/kthread.h>
15 #include <linux/scatterlist.h>
16 #include <linux/dma-mapping.h>
18 #include <linux/mmc/card.h>
19 #include <linux/mmc/host.h>
26 #define MMC_QUEUE_BOUNCESZ 65536
29 * Prepare a MMC request. This just filters out odd stuff.
31 static int mmc_prep_request(struct request_queue
*q
, struct request
*req
)
33 struct mmc_queue
*mq
= q
->queuedata
;
35 if (mq
&& (mmc_card_removed(mq
->card
) || mmc_access_rpmb(mq
)))
38 req
->rq_flags
|= RQF_DONTPREP
;
43 static int mmc_queue_thread(void *d
)
45 struct mmc_queue
*mq
= d
;
46 struct request_queue
*q
= mq
->queue
;
47 struct mmc_context_info
*cntx
= &mq
->card
->host
->context_info
;
49 current
->flags
|= PF_MEMALLOC
;
51 down(&mq
->thread_sem
);
53 struct request
*req
= NULL
;
55 spin_lock_irq(q
->queue_lock
);
56 set_current_state(TASK_INTERRUPTIBLE
);
57 req
= blk_fetch_request(q
);
59 cntx
->is_waiting_last_req
= false;
60 cntx
->is_new_req
= false;
63 * Dispatch queue is empty so set flags for
64 * mmc_request_fn() to wake us up.
66 if (mq
->mqrq_prev
->req
)
67 cntx
->is_waiting_last_req
= true;
71 mq
->mqrq_cur
->req
= req
;
72 spin_unlock_irq(q
->queue_lock
);
74 if (req
|| mq
->mqrq_prev
->req
) {
75 bool req_is_special
= mmc_req_is_special(req
);
77 set_current_state(TASK_RUNNING
);
78 mmc_blk_issue_rq(mq
, req
);
80 if (mq
->new_request
) {
81 mq
->new_request
= false;
82 continue; /* fetch again */
86 * Current request becomes previous request
88 * In case of special requests, current request
89 * has been finished. Do not assign it to previous
93 mq
->mqrq_cur
->req
= NULL
;
95 mq
->mqrq_prev
->brq
.mrq
.data
= NULL
;
96 mq
->mqrq_prev
->req
= NULL
;
97 swap(mq
->mqrq_prev
, mq
->mqrq_cur
);
99 if (kthread_should_stop()) {
100 set_current_state(TASK_RUNNING
);
105 down(&mq
->thread_sem
);
114 * Generic MMC request handler. This is called for any queue on a
115 * particular host. When the host is not busy, we look for a request
116 * on any queue on this host, and attempt to issue it. This may
117 * not be the queue we were asked to process.
119 static void mmc_request_fn(struct request_queue
*q
)
121 struct mmc_queue
*mq
= q
->queuedata
;
123 struct mmc_context_info
*cntx
;
126 while ((req
= blk_fetch_request(q
)) != NULL
) {
127 req
->rq_flags
|= RQF_QUIET
;
128 __blk_end_request_all(req
, -EIO
);
133 cntx
= &mq
->card
->host
->context_info
;
135 if (cntx
->is_waiting_last_req
) {
136 cntx
->is_new_req
= true;
137 wake_up_interruptible(&cntx
->wait
);
141 wake_up_process(mq
->thread
);
144 static struct scatterlist
*mmc_alloc_sg(int sg_len
, int *err
)
146 struct scatterlist
*sg
;
148 sg
= kmalloc_array(sg_len
, sizeof(*sg
), GFP_KERNEL
);
153 sg_init_table(sg
, sg_len
);
159 static void mmc_queue_setup_discard(struct request_queue
*q
,
160 struct mmc_card
*card
)
162 unsigned max_discard
;
164 max_discard
= mmc_calc_max_discard(card
);
168 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD
, q
);
169 blk_queue_max_discard_sectors(q
, max_discard
);
170 if (card
->erased_byte
== 0 && !mmc_can_discard(card
))
171 q
->limits
.discard_zeroes_data
= 1;
172 q
->limits
.discard_granularity
= card
->pref_erase
<< 9;
173 /* granularity must not be greater than max. discard */
174 if (card
->pref_erase
> max_discard
)
175 q
->limits
.discard_granularity
= 0;
176 if (mmc_can_secure_erase_trim(card
))
177 queue_flag_set_unlocked(QUEUE_FLAG_SECERASE
, q
);
180 #ifdef CONFIG_MMC_BLOCK_BOUNCE
181 static bool mmc_queue_alloc_bounce_bufs(struct mmc_queue
*mq
,
182 unsigned int bouncesz
)
186 for (i
= 0; i
< mq
->qdepth
; i
++) {
187 mq
->mqrq
[i
].bounce_buf
= kmalloc(bouncesz
, GFP_KERNEL
);
188 if (!mq
->mqrq
[i
].bounce_buf
)
196 kfree(mq
->mqrq
[i
].bounce_buf
);
197 mq
->mqrq
[i
].bounce_buf
= NULL
;
199 pr_warn("%s: unable to allocate bounce buffers\n",
200 mmc_card_name(mq
->card
));
204 static int mmc_queue_alloc_bounce_sgs(struct mmc_queue
*mq
,
205 unsigned int bouncesz
)
209 for (i
= 0; i
< mq
->qdepth
; i
++) {
210 mq
->mqrq
[i
].sg
= mmc_alloc_sg(1, &ret
);
214 mq
->mqrq
[i
].bounce_sg
= mmc_alloc_sg(bouncesz
/ 512, &ret
);
223 static int mmc_queue_alloc_sgs(struct mmc_queue
*mq
, int max_segs
)
227 for (i
= 0; i
< mq
->qdepth
; i
++) {
228 mq
->mqrq
[i
].sg
= mmc_alloc_sg(max_segs
, &ret
);
236 static void mmc_queue_req_free_bufs(struct mmc_queue_req
*mqrq
)
238 kfree(mqrq
->bounce_sg
);
239 mqrq
->bounce_sg
= NULL
;
244 kfree(mqrq
->bounce_buf
);
245 mqrq
->bounce_buf
= NULL
;
248 static void mmc_queue_reqs_free_bufs(struct mmc_queue
*mq
)
252 for (i
= 0; i
< mq
->qdepth
; i
++)
253 mmc_queue_req_free_bufs(&mq
->mqrq
[i
]);
257 * mmc_init_queue - initialise a queue structure.
259 * @card: mmc card to attach this queue
261 * @subname: partition subname
263 * Initialise a MMC card request queue.
265 int mmc_init_queue(struct mmc_queue
*mq
, struct mmc_card
*card
,
266 spinlock_t
*lock
, const char *subname
)
268 struct mmc_host
*host
= card
->host
;
269 u64 limit
= BLK_BOUNCE_HIGH
;
273 if (mmc_dev(host
)->dma_mask
&& *mmc_dev(host
)->dma_mask
)
274 limit
= (u64
)dma_max_pfn(mmc_dev(host
)) << PAGE_SHIFT
;
277 mq
->queue
= blk_init_queue(mmc_request_fn
, lock
);
282 mq
->mqrq
= kcalloc(mq
->qdepth
, sizeof(struct mmc_queue_req
),
286 mq
->mqrq_cur
= &mq
->mqrq
[0];
287 mq
->mqrq_prev
= &mq
->mqrq
[1];
288 mq
->queue
->queuedata
= mq
;
290 blk_queue_prep_rq(mq
->queue
, mmc_prep_request
);
291 queue_flag_set_unlocked(QUEUE_FLAG_NONROT
, mq
->queue
);
292 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM
, mq
->queue
);
293 if (mmc_can_erase(card
))
294 mmc_queue_setup_discard(mq
->queue
, card
);
296 #ifdef CONFIG_MMC_BLOCK_BOUNCE
297 if (host
->max_segs
== 1) {
298 unsigned int bouncesz
;
300 bouncesz
= MMC_QUEUE_BOUNCESZ
;
302 if (bouncesz
> host
->max_req_size
)
303 bouncesz
= host
->max_req_size
;
304 if (bouncesz
> host
->max_seg_size
)
305 bouncesz
= host
->max_seg_size
;
306 if (bouncesz
> (host
->max_blk_count
* 512))
307 bouncesz
= host
->max_blk_count
* 512;
309 if (bouncesz
> 512 &&
310 mmc_queue_alloc_bounce_bufs(mq
, bouncesz
)) {
311 blk_queue_bounce_limit(mq
->queue
, BLK_BOUNCE_ANY
);
312 blk_queue_max_hw_sectors(mq
->queue
, bouncesz
/ 512);
313 blk_queue_max_segments(mq
->queue
, bouncesz
/ 512);
314 blk_queue_max_segment_size(mq
->queue
, bouncesz
);
316 ret
= mmc_queue_alloc_bounce_sgs(mq
, bouncesz
);
325 blk_queue_bounce_limit(mq
->queue
, limit
);
326 blk_queue_max_hw_sectors(mq
->queue
,
327 min(host
->max_blk_count
, host
->max_req_size
/ 512));
328 blk_queue_max_segments(mq
->queue
, host
->max_segs
);
329 blk_queue_max_segment_size(mq
->queue
, host
->max_seg_size
);
331 ret
= mmc_queue_alloc_sgs(mq
, host
->max_segs
);
336 sema_init(&mq
->thread_sem
, 1);
338 mq
->thread
= kthread_run(mmc_queue_thread
, mq
, "mmcqd/%d%s",
339 host
->index
, subname
? subname
: "");
341 if (IS_ERR(mq
->thread
)) {
342 ret
= PTR_ERR(mq
->thread
);
349 mmc_queue_reqs_free_bufs(mq
);
353 blk_cleanup_queue(mq
->queue
);
357 void mmc_cleanup_queue(struct mmc_queue
*mq
)
359 struct request_queue
*q
= mq
->queue
;
362 /* Make sure the queue isn't suspended, as that will deadlock */
363 mmc_queue_resume(mq
);
365 /* Then terminate our worker thread */
366 kthread_stop(mq
->thread
);
368 /* Empty the queue */
369 spin_lock_irqsave(q
->queue_lock
, flags
);
372 spin_unlock_irqrestore(q
->queue_lock
, flags
);
374 mmc_queue_reqs_free_bufs(mq
);
380 EXPORT_SYMBOL(mmc_cleanup_queue
);
383 * mmc_queue_suspend - suspend a MMC request queue
384 * @mq: MMC queue to suspend
386 * Stop the block request queue, and wait for our thread to
387 * complete any outstanding requests. This ensures that we
388 * won't suspend while a request is being processed.
390 void mmc_queue_suspend(struct mmc_queue
*mq
)
392 struct request_queue
*q
= mq
->queue
;
395 if (!mq
->suspended
) {
396 mq
->suspended
|= true;
398 spin_lock_irqsave(q
->queue_lock
, flags
);
400 spin_unlock_irqrestore(q
->queue_lock
, flags
);
402 down(&mq
->thread_sem
);
407 * mmc_queue_resume - resume a previously suspended MMC request queue
408 * @mq: MMC queue to resume
410 void mmc_queue_resume(struct mmc_queue
*mq
)
412 struct request_queue
*q
= mq
->queue
;
416 mq
->suspended
= false;
420 spin_lock_irqsave(q
->queue_lock
, flags
);
422 spin_unlock_irqrestore(q
->queue_lock
, flags
);
427 * Prepare the sg list(s) to be handed of to the host driver
429 unsigned int mmc_queue_map_sg(struct mmc_queue
*mq
, struct mmc_queue_req
*mqrq
)
433 struct scatterlist
*sg
;
436 if (!mqrq
->bounce_buf
)
437 return blk_rq_map_sg(mq
->queue
, mqrq
->req
, mqrq
->sg
);
439 sg_len
= blk_rq_map_sg(mq
->queue
, mqrq
->req
, mqrq
->bounce_sg
);
441 mqrq
->bounce_sg_len
= sg_len
;
444 for_each_sg(mqrq
->bounce_sg
, sg
, sg_len
, i
)
445 buflen
+= sg
->length
;
447 sg_init_one(mqrq
->sg
, mqrq
->bounce_buf
, buflen
);
453 * If writing, bounce the data to the buffer before the request
454 * is sent to the host driver
456 void mmc_queue_bounce_pre(struct mmc_queue_req
*mqrq
)
458 if (!mqrq
->bounce_buf
)
461 if (rq_data_dir(mqrq
->req
) != WRITE
)
464 sg_copy_to_buffer(mqrq
->bounce_sg
, mqrq
->bounce_sg_len
,
465 mqrq
->bounce_buf
, mqrq
->sg
[0].length
);
469 * If reading, bounce the data from the buffer after the request
470 * has been handled by the host driver
472 void mmc_queue_bounce_post(struct mmc_queue_req
*mqrq
)
474 if (!mqrq
->bounce_buf
)
477 if (rq_data_dir(mqrq
->req
) != READ
)
480 sg_copy_from_buffer(mqrq
->bounce_sg
, mqrq
->bounce_sg_len
,
481 mqrq
->bounce_buf
, mqrq
->sg
[0].length
);