1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2003 Russell King, All Rights Reserved.
4 * Copyright 2006-2007 Pierre Ossman
6 #include <linux/slab.h>
7 #include <linux/module.h>
8 #include <linux/blkdev.h>
9 #include <linux/freezer.h>
10 #include <linux/kthread.h>
11 #include <linux/scatterlist.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/backing-dev.h>
15 #include <linux/mmc/card.h>
16 #include <linux/mmc/host.h>
24 #define MMC_DMA_MAP_MERGE_SEGMENTS 512
26 static inline bool mmc_cqe_dcmd_busy(struct mmc_queue
*mq
)
28 /* Allow only 1 DCMD at a time */
29 return mq
->in_flight
[MMC_ISSUE_DCMD
];
32 void mmc_cqe_check_busy(struct mmc_queue
*mq
)
34 if ((mq
->cqe_busy
& MMC_CQE_DCMD_BUSY
) && !mmc_cqe_dcmd_busy(mq
))
35 mq
->cqe_busy
&= ~MMC_CQE_DCMD_BUSY
;
37 mq
->cqe_busy
&= ~MMC_CQE_QUEUE_FULL
;
40 static inline bool mmc_cqe_can_dcmd(struct mmc_host
*host
)
42 return host
->caps2
& MMC_CAP2_CQE_DCMD
;
45 static enum mmc_issue_type
mmc_cqe_issue_type(struct mmc_host
*host
,
48 switch (req_op(req
)) {
52 case REQ_OP_SECURE_ERASE
:
53 return MMC_ISSUE_SYNC
;
55 return mmc_cqe_can_dcmd(host
) ? MMC_ISSUE_DCMD
: MMC_ISSUE_SYNC
;
57 return MMC_ISSUE_ASYNC
;
61 enum mmc_issue_type
mmc_issue_type(struct mmc_queue
*mq
, struct request
*req
)
63 struct mmc_host
*host
= mq
->card
->host
;
66 return mmc_cqe_issue_type(host
, req
);
68 if (req_op(req
) == REQ_OP_READ
|| req_op(req
) == REQ_OP_WRITE
)
69 return MMC_ISSUE_ASYNC
;
71 return MMC_ISSUE_SYNC
;
74 static void __mmc_cqe_recovery_notifier(struct mmc_queue
*mq
)
76 if (!mq
->recovery_needed
) {
77 mq
->recovery_needed
= true;
78 schedule_work(&mq
->recovery_work
);
82 void mmc_cqe_recovery_notifier(struct mmc_request
*mrq
)
84 struct mmc_queue_req
*mqrq
= container_of(mrq
, struct mmc_queue_req
,
86 struct request
*req
= mmc_queue_req_to_req(mqrq
);
87 struct request_queue
*q
= req
->q
;
88 struct mmc_queue
*mq
= q
->queuedata
;
91 spin_lock_irqsave(&mq
->lock
, flags
);
92 __mmc_cqe_recovery_notifier(mq
);
93 spin_unlock_irqrestore(&mq
->lock
, flags
);
96 static enum blk_eh_timer_return
mmc_cqe_timed_out(struct request
*req
)
98 struct mmc_queue_req
*mqrq
= req_to_mmc_queue_req(req
);
99 struct mmc_request
*mrq
= &mqrq
->brq
.mrq
;
100 struct mmc_queue
*mq
= req
->q
->queuedata
;
101 struct mmc_host
*host
= mq
->card
->host
;
102 enum mmc_issue_type issue_type
= mmc_issue_type(mq
, req
);
103 bool recovery_needed
= false;
105 switch (issue_type
) {
106 case MMC_ISSUE_ASYNC
:
108 if (host
->cqe_ops
->cqe_timeout(host
, mrq
, &recovery_needed
)) {
110 __mmc_cqe_recovery_notifier(mq
);
111 return BLK_EH_RESET_TIMER
;
113 /* No timeout (XXX: huh? comment doesn't make much sense) */
114 blk_mq_complete_request(req
);
117 /* Timeout is handled by mmc core */
118 return BLK_EH_RESET_TIMER
;
122 static enum blk_eh_timer_return
mmc_mq_timed_out(struct request
*req
,
125 struct request_queue
*q
= req
->q
;
126 struct mmc_queue
*mq
= q
->queuedata
;
130 spin_lock_irqsave(&mq
->lock
, flags
);
132 if (mq
->recovery_needed
|| !mq
->use_cqe
)
133 ret
= BLK_EH_RESET_TIMER
;
135 ret
= mmc_cqe_timed_out(req
);
137 spin_unlock_irqrestore(&mq
->lock
, flags
);
142 static void mmc_mq_recovery_handler(struct work_struct
*work
)
144 struct mmc_queue
*mq
= container_of(work
, struct mmc_queue
,
146 struct request_queue
*q
= mq
->queue
;
148 mmc_get_card(mq
->card
, &mq
->ctx
);
150 mq
->in_recovery
= true;
153 mmc_blk_cqe_recovery(mq
);
155 mmc_blk_mq_recovery(mq
);
157 mq
->in_recovery
= false;
159 spin_lock_irq(&mq
->lock
);
160 mq
->recovery_needed
= false;
161 spin_unlock_irq(&mq
->lock
);
163 mmc_put_card(mq
->card
, &mq
->ctx
);
165 blk_mq_run_hw_queues(q
, true);
168 static struct scatterlist
*mmc_alloc_sg(int sg_len
, gfp_t gfp
)
170 struct scatterlist
*sg
;
172 sg
= kmalloc_array(sg_len
, sizeof(*sg
), gfp
);
174 sg_init_table(sg
, sg_len
);
179 static void mmc_queue_setup_discard(struct request_queue
*q
,
180 struct mmc_card
*card
)
182 unsigned max_discard
;
184 max_discard
= mmc_calc_max_discard(card
);
188 blk_queue_flag_set(QUEUE_FLAG_DISCARD
, q
);
189 blk_queue_max_discard_sectors(q
, max_discard
);
190 q
->limits
.discard_granularity
= card
->pref_erase
<< 9;
191 /* granularity must not be greater than max. discard */
192 if (card
->pref_erase
> max_discard
)
193 q
->limits
.discard_granularity
= 0;
194 if (mmc_can_secure_erase_trim(card
))
195 blk_queue_flag_set(QUEUE_FLAG_SECERASE
, q
);
198 static unsigned int mmc_get_max_segments(struct mmc_host
*host
)
200 return host
->can_dma_map_merge
? MMC_DMA_MAP_MERGE_SEGMENTS
:
205 * mmc_init_request() - initialize the MMC-specific per-request data
206 * @q: the request queue
208 * @gfp: memory allocation policy
210 static int __mmc_init_request(struct mmc_queue
*mq
, struct request
*req
,
213 struct mmc_queue_req
*mq_rq
= req_to_mmc_queue_req(req
);
214 struct mmc_card
*card
= mq
->card
;
215 struct mmc_host
*host
= card
->host
;
217 mq_rq
->sg
= mmc_alloc_sg(mmc_get_max_segments(host
), gfp
);
224 static void mmc_exit_request(struct request_queue
*q
, struct request
*req
)
226 struct mmc_queue_req
*mq_rq
= req_to_mmc_queue_req(req
);
232 static int mmc_mq_init_request(struct blk_mq_tag_set
*set
, struct request
*req
,
233 unsigned int hctx_idx
, unsigned int numa_node
)
235 return __mmc_init_request(set
->driver_data
, req
, GFP_KERNEL
);
238 static void mmc_mq_exit_request(struct blk_mq_tag_set
*set
, struct request
*req
,
239 unsigned int hctx_idx
)
241 struct mmc_queue
*mq
= set
->driver_data
;
243 mmc_exit_request(mq
->queue
, req
);
246 static blk_status_t
mmc_mq_queue_rq(struct blk_mq_hw_ctx
*hctx
,
247 const struct blk_mq_queue_data
*bd
)
249 struct request
*req
= bd
->rq
;
250 struct request_queue
*q
= req
->q
;
251 struct mmc_queue
*mq
= q
->queuedata
;
252 struct mmc_card
*card
= mq
->card
;
253 struct mmc_host
*host
= card
->host
;
254 enum mmc_issue_type issue_type
;
255 enum mmc_issued issued
;
256 bool get_card
, cqe_retune_ok
;
259 if (mmc_card_removed(mq
->card
)) {
260 req
->rq_flags
|= RQF_QUIET
;
261 return BLK_STS_IOERR
;
264 issue_type
= mmc_issue_type(mq
, req
);
266 spin_lock_irq(&mq
->lock
);
268 if (mq
->recovery_needed
|| mq
->busy
) {
269 spin_unlock_irq(&mq
->lock
);
270 return BLK_STS_RESOURCE
;
273 switch (issue_type
) {
275 if (mmc_cqe_dcmd_busy(mq
)) {
276 mq
->cqe_busy
|= MMC_CQE_DCMD_BUSY
;
277 spin_unlock_irq(&mq
->lock
);
278 return BLK_STS_RESOURCE
;
281 case MMC_ISSUE_ASYNC
:
285 * Timeouts are handled by mmc core, and we don't have a host
286 * API to abort requests, so we can't handle the timeout anyway.
287 * However, when the timeout happens, blk_mq_complete_request()
288 * no longer works (to stop the request disappearing under us).
289 * To avoid racing with that, set a large timeout.
291 req
->timeout
= 600 * HZ
;
295 /* Parallel dispatch of requests is not supported at the moment */
298 mq
->in_flight
[issue_type
] += 1;
299 get_card
= (mmc_tot_in_flight(mq
) == 1);
300 cqe_retune_ok
= (mmc_cqe_qcnt(mq
) == 1);
302 spin_unlock_irq(&mq
->lock
);
304 if (!(req
->rq_flags
& RQF_DONTPREP
)) {
305 req_to_mmc_queue_req(req
)->retries
= 0;
306 req
->rq_flags
|= RQF_DONTPREP
;
310 mmc_get_card(card
, &mq
->ctx
);
313 host
->retune_now
= host
->need_retune
&& cqe_retune_ok
&&
317 blk_mq_start_request(req
);
319 issued
= mmc_blk_mq_issue_rq(mq
, req
);
323 ret
= BLK_STS_RESOURCE
;
325 case MMC_REQ_FAILED_TO_START
:
333 if (issued
!= MMC_REQ_STARTED
) {
334 bool put_card
= false;
336 spin_lock_irq(&mq
->lock
);
337 mq
->in_flight
[issue_type
] -= 1;
338 if (mmc_tot_in_flight(mq
) == 0)
341 spin_unlock_irq(&mq
->lock
);
343 mmc_put_card(card
, &mq
->ctx
);
345 WRITE_ONCE(mq
->busy
, false);
351 static const struct blk_mq_ops mmc_mq_ops
= {
352 .queue_rq
= mmc_mq_queue_rq
,
353 .init_request
= mmc_mq_init_request
,
354 .exit_request
= mmc_mq_exit_request
,
355 .complete
= mmc_blk_mq_complete
,
356 .timeout
= mmc_mq_timed_out
,
359 static void mmc_setup_queue(struct mmc_queue
*mq
, struct mmc_card
*card
)
361 struct mmc_host
*host
= card
->host
;
362 unsigned block_size
= 512;
364 blk_queue_flag_set(QUEUE_FLAG_NONROT
, mq
->queue
);
365 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM
, mq
->queue
);
366 if (mmc_can_erase(card
))
367 mmc_queue_setup_discard(mq
->queue
, card
);
369 if (!mmc_dev(host
)->dma_mask
|| !*mmc_dev(host
)->dma_mask
)
370 blk_queue_bounce_limit(mq
->queue
, BLK_BOUNCE_HIGH
);
371 blk_queue_max_hw_sectors(mq
->queue
,
372 min(host
->max_blk_count
, host
->max_req_size
/ 512));
373 if (host
->can_dma_map_merge
)
374 WARN(!blk_queue_can_use_dma_map_merging(mq
->queue
,
376 "merging was advertised but not possible");
377 blk_queue_max_segments(mq
->queue
, mmc_get_max_segments(host
));
379 if (mmc_card_mmc(card
))
380 block_size
= card
->ext_csd
.data_sector_size
;
382 blk_queue_logical_block_size(mq
->queue
, block_size
);
384 * After blk_queue_can_use_dma_map_merging() was called with succeed,
385 * since it calls blk_queue_virt_boundary(), the mmc should not call
386 * both blk_queue_max_segment_size().
388 if (!host
->can_dma_map_merge
)
389 blk_queue_max_segment_size(mq
->queue
,
390 round_down(host
->max_seg_size
, block_size
));
392 dma_set_max_seg_size(mmc_dev(host
), queue_max_segment_size(mq
->queue
));
394 INIT_WORK(&mq
->recovery_work
, mmc_mq_recovery_handler
);
395 INIT_WORK(&mq
->complete_work
, mmc_blk_mq_complete_work
);
397 mutex_init(&mq
->complete_lock
);
399 init_waitqueue_head(&mq
->wait
);
402 static inline bool mmc_merge_capable(struct mmc_host
*host
)
404 return host
->caps2
& MMC_CAP2_MERGE_CAPABLE
;
407 /* Set queue depth to get a reasonable value for q->nr_requests */
408 #define MMC_QUEUE_DEPTH 64
411 * mmc_init_queue - initialise a queue structure.
413 * @card: mmc card to attach this queue
415 * Initialise a MMC card request queue.
417 int mmc_init_queue(struct mmc_queue
*mq
, struct mmc_card
*card
)
419 struct mmc_host
*host
= card
->host
;
423 mq
->use_cqe
= host
->cqe_enabled
;
425 spin_lock_init(&mq
->lock
);
427 memset(&mq
->tag_set
, 0, sizeof(mq
->tag_set
));
428 mq
->tag_set
.ops
= &mmc_mq_ops
;
430 * The queue depth for CQE must match the hardware because the request
431 * tag is used to index the hardware queue.
434 mq
->tag_set
.queue_depth
=
435 min_t(int, card
->ext_csd
.cmdq_depth
, host
->cqe_qdepth
);
437 mq
->tag_set
.queue_depth
= MMC_QUEUE_DEPTH
;
438 mq
->tag_set
.numa_node
= NUMA_NO_NODE
;
439 mq
->tag_set
.flags
= BLK_MQ_F_SHOULD_MERGE
| BLK_MQ_F_BLOCKING
;
440 mq
->tag_set
.nr_hw_queues
= 1;
441 mq
->tag_set
.cmd_size
= sizeof(struct mmc_queue_req
);
442 mq
->tag_set
.driver_data
= mq
;
445 * Since blk_mq_alloc_tag_set() calls .init_request() of mmc_mq_ops,
446 * the host->can_dma_map_merge should be set before to get max_segs
447 * from mmc_get_max_segments().
449 if (mmc_merge_capable(host
) &&
450 host
->max_segs
< MMC_DMA_MAP_MERGE_SEGMENTS
&&
451 dma_get_merge_boundary(mmc_dev(host
)))
452 host
->can_dma_map_merge
= 1;
454 host
->can_dma_map_merge
= 0;
456 ret
= blk_mq_alloc_tag_set(&mq
->tag_set
);
460 mq
->queue
= blk_mq_init_queue(&mq
->tag_set
);
461 if (IS_ERR(mq
->queue
)) {
462 ret
= PTR_ERR(mq
->queue
);
466 if (mmc_host_is_spi(host
) && host
->use_spi_crc
)
467 mq
->queue
->backing_dev_info
->capabilities
|=
468 BDI_CAP_STABLE_WRITES
;
470 mq
->queue
->queuedata
= mq
;
471 blk_queue_rq_timeout(mq
->queue
, 60 * HZ
);
473 mmc_setup_queue(mq
, card
);
477 blk_mq_free_tag_set(&mq
->tag_set
);
481 void mmc_queue_suspend(struct mmc_queue
*mq
)
483 blk_mq_quiesce_queue(mq
->queue
);
486 * The host remains claimed while there are outstanding requests, so
487 * simply claiming and releasing here ensures there are none.
489 mmc_claim_host(mq
->card
->host
);
490 mmc_release_host(mq
->card
->host
);
493 void mmc_queue_resume(struct mmc_queue
*mq
)
495 blk_mq_unquiesce_queue(mq
->queue
);
498 void mmc_cleanup_queue(struct mmc_queue
*mq
)
500 struct request_queue
*q
= mq
->queue
;
503 * The legacy code handled the possibility of being suspended,
504 * so do that here too.
506 if (blk_queue_quiesced(q
))
507 blk_mq_unquiesce_queue(q
);
509 blk_cleanup_queue(q
);
510 blk_mq_free_tag_set(&mq
->tag_set
);
513 * A request can be completed before the next request, potentially
514 * leaving a complete_work with nothing to do. Such a work item might
515 * still be queued at this point. Flush it.
517 flush_work(&mq
->complete_work
);
523 * Prepare the sg list(s) to be handed of to the host driver
525 unsigned int mmc_queue_map_sg(struct mmc_queue
*mq
, struct mmc_queue_req
*mqrq
)
527 struct request
*req
= mmc_queue_req_to_req(mqrq
);
529 return blk_rq_map_sg(mq
->queue
, req
, mqrq
->sg
);