Merge tag 'block-5.11-2021-01-10' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / drivers / mmc / core / queue.c
blobde7cb0369c308f9fdd664552c9f14c985f3f7c1e
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2003 Russell King, All Rights Reserved.
4 * Copyright 2006-2007 Pierre Ossman
5 */
6 #include <linux/slab.h>
7 #include <linux/module.h>
8 #include <linux/blkdev.h>
9 #include <linux/freezer.h>
10 #include <linux/kthread.h>
11 #include <linux/scatterlist.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/backing-dev.h>
15 #include <linux/mmc/card.h>
16 #include <linux/mmc/host.h>
18 #include "queue.h"
19 #include "block.h"
20 #include "core.h"
21 #include "card.h"
22 #include "host.h"
24 #define MMC_DMA_MAP_MERGE_SEGMENTS 512
26 static inline bool mmc_cqe_dcmd_busy(struct mmc_queue *mq)
28 /* Allow only 1 DCMD at a time */
29 return mq->in_flight[MMC_ISSUE_DCMD];
32 void mmc_cqe_check_busy(struct mmc_queue *mq)
34 if ((mq->cqe_busy & MMC_CQE_DCMD_BUSY) && !mmc_cqe_dcmd_busy(mq))
35 mq->cqe_busy &= ~MMC_CQE_DCMD_BUSY;
37 mq->cqe_busy &= ~MMC_CQE_QUEUE_FULL;
40 static inline bool mmc_cqe_can_dcmd(struct mmc_host *host)
42 return host->caps2 & MMC_CAP2_CQE_DCMD;
45 static enum mmc_issue_type mmc_cqe_issue_type(struct mmc_host *host,
46 struct request *req)
48 switch (req_op(req)) {
49 case REQ_OP_DRV_IN:
50 case REQ_OP_DRV_OUT:
51 case REQ_OP_DISCARD:
52 case REQ_OP_SECURE_ERASE:
53 return MMC_ISSUE_SYNC;
54 case REQ_OP_FLUSH:
55 return mmc_cqe_can_dcmd(host) ? MMC_ISSUE_DCMD : MMC_ISSUE_SYNC;
56 default:
57 return MMC_ISSUE_ASYNC;
61 enum mmc_issue_type mmc_issue_type(struct mmc_queue *mq, struct request *req)
63 struct mmc_host *host = mq->card->host;
65 if (mq->use_cqe && !host->hsq_enabled)
66 return mmc_cqe_issue_type(host, req);
68 if (req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_WRITE)
69 return MMC_ISSUE_ASYNC;
71 return MMC_ISSUE_SYNC;
74 static void __mmc_cqe_recovery_notifier(struct mmc_queue *mq)
76 if (!mq->recovery_needed) {
77 mq->recovery_needed = true;
78 schedule_work(&mq->recovery_work);
82 void mmc_cqe_recovery_notifier(struct mmc_request *mrq)
84 struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req,
85 brq.mrq);
86 struct request *req = mmc_queue_req_to_req(mqrq);
87 struct request_queue *q = req->q;
88 struct mmc_queue *mq = q->queuedata;
89 unsigned long flags;
91 spin_lock_irqsave(&mq->lock, flags);
92 __mmc_cqe_recovery_notifier(mq);
93 spin_unlock_irqrestore(&mq->lock, flags);
96 static enum blk_eh_timer_return mmc_cqe_timed_out(struct request *req)
98 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
99 struct mmc_request *mrq = &mqrq->brq.mrq;
100 struct mmc_queue *mq = req->q->queuedata;
101 struct mmc_host *host = mq->card->host;
102 enum mmc_issue_type issue_type = mmc_issue_type(mq, req);
103 bool recovery_needed = false;
105 switch (issue_type) {
106 case MMC_ISSUE_ASYNC:
107 case MMC_ISSUE_DCMD:
108 if (host->cqe_ops->cqe_timeout(host, mrq, &recovery_needed)) {
109 if (recovery_needed)
110 mmc_cqe_recovery_notifier(mrq);
111 return BLK_EH_RESET_TIMER;
113 /* The request has gone already */
114 return BLK_EH_DONE;
115 default:
116 /* Timeout is handled by mmc core */
117 return BLK_EH_RESET_TIMER;
121 static enum blk_eh_timer_return mmc_mq_timed_out(struct request *req,
122 bool reserved)
124 struct request_queue *q = req->q;
125 struct mmc_queue *mq = q->queuedata;
126 struct mmc_card *card = mq->card;
127 struct mmc_host *host = card->host;
128 unsigned long flags;
129 bool ignore_tout;
131 spin_lock_irqsave(&mq->lock, flags);
132 ignore_tout = mq->recovery_needed || !mq->use_cqe || host->hsq_enabled;
133 spin_unlock_irqrestore(&mq->lock, flags);
135 return ignore_tout ? BLK_EH_RESET_TIMER : mmc_cqe_timed_out(req);
138 static void mmc_mq_recovery_handler(struct work_struct *work)
140 struct mmc_queue *mq = container_of(work, struct mmc_queue,
141 recovery_work);
142 struct request_queue *q = mq->queue;
143 struct mmc_host *host = mq->card->host;
145 mmc_get_card(mq->card, &mq->ctx);
147 mq->in_recovery = true;
149 if (mq->use_cqe && !host->hsq_enabled)
150 mmc_blk_cqe_recovery(mq);
151 else
152 mmc_blk_mq_recovery(mq);
154 mq->in_recovery = false;
156 spin_lock_irq(&mq->lock);
157 mq->recovery_needed = false;
158 spin_unlock_irq(&mq->lock);
160 if (host->hsq_enabled)
161 host->cqe_ops->cqe_recovery_finish(host);
163 mmc_put_card(mq->card, &mq->ctx);
165 blk_mq_run_hw_queues(q, true);
168 static struct scatterlist *mmc_alloc_sg(int sg_len, gfp_t gfp)
170 struct scatterlist *sg;
172 sg = kmalloc_array(sg_len, sizeof(*sg), gfp);
173 if (sg)
174 sg_init_table(sg, sg_len);
176 return sg;
179 static void mmc_queue_setup_discard(struct request_queue *q,
180 struct mmc_card *card)
182 unsigned max_discard;
184 max_discard = mmc_calc_max_discard(card);
185 if (!max_discard)
186 return;
188 blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
189 blk_queue_max_discard_sectors(q, max_discard);
190 q->limits.discard_granularity = card->pref_erase << 9;
191 /* granularity must not be greater than max. discard */
192 if (card->pref_erase > max_discard)
193 q->limits.discard_granularity = SECTOR_SIZE;
194 if (mmc_can_secure_erase_trim(card))
195 blk_queue_flag_set(QUEUE_FLAG_SECERASE, q);
198 static unsigned int mmc_get_max_segments(struct mmc_host *host)
200 return host->can_dma_map_merge ? MMC_DMA_MAP_MERGE_SEGMENTS :
201 host->max_segs;
205 * mmc_init_request() - initialize the MMC-specific per-request data
206 * @mq: the request queue
207 * @req: the request
208 * @gfp: memory allocation policy
210 static int __mmc_init_request(struct mmc_queue *mq, struct request *req,
211 gfp_t gfp)
213 struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
214 struct mmc_card *card = mq->card;
215 struct mmc_host *host = card->host;
217 mq_rq->sg = mmc_alloc_sg(mmc_get_max_segments(host), gfp);
218 if (!mq_rq->sg)
219 return -ENOMEM;
221 return 0;
224 static void mmc_exit_request(struct request_queue *q, struct request *req)
226 struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
228 kfree(mq_rq->sg);
229 mq_rq->sg = NULL;
232 static int mmc_mq_init_request(struct blk_mq_tag_set *set, struct request *req,
233 unsigned int hctx_idx, unsigned int numa_node)
235 return __mmc_init_request(set->driver_data, req, GFP_KERNEL);
238 static void mmc_mq_exit_request(struct blk_mq_tag_set *set, struct request *req,
239 unsigned int hctx_idx)
241 struct mmc_queue *mq = set->driver_data;
243 mmc_exit_request(mq->queue, req);
246 static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
247 const struct blk_mq_queue_data *bd)
249 struct request *req = bd->rq;
250 struct request_queue *q = req->q;
251 struct mmc_queue *mq = q->queuedata;
252 struct mmc_card *card = mq->card;
253 struct mmc_host *host = card->host;
254 enum mmc_issue_type issue_type;
255 enum mmc_issued issued;
256 bool get_card, cqe_retune_ok;
257 int ret;
259 if (mmc_card_removed(mq->card)) {
260 req->rq_flags |= RQF_QUIET;
261 return BLK_STS_IOERR;
264 issue_type = mmc_issue_type(mq, req);
266 spin_lock_irq(&mq->lock);
268 if (mq->recovery_needed || mq->busy) {
269 spin_unlock_irq(&mq->lock);
270 return BLK_STS_RESOURCE;
273 switch (issue_type) {
274 case MMC_ISSUE_DCMD:
275 if (mmc_cqe_dcmd_busy(mq)) {
276 mq->cqe_busy |= MMC_CQE_DCMD_BUSY;
277 spin_unlock_irq(&mq->lock);
278 return BLK_STS_RESOURCE;
280 break;
281 case MMC_ISSUE_ASYNC:
283 * For MMC host software queue, we only allow 2 requests in
284 * flight to avoid a long latency.
286 if (host->hsq_enabled && mq->in_flight[issue_type] > 2) {
287 spin_unlock_irq(&mq->lock);
288 return BLK_STS_RESOURCE;
290 break;
291 default:
293 * Timeouts are handled by mmc core, and we don't have a host
294 * API to abort requests, so we can't handle the timeout anyway.
295 * However, when the timeout happens, blk_mq_complete_request()
296 * no longer works (to stop the request disappearing under us).
297 * To avoid racing with that, set a large timeout.
299 req->timeout = 600 * HZ;
300 break;
303 /* Parallel dispatch of requests is not supported at the moment */
304 mq->busy = true;
306 mq->in_flight[issue_type] += 1;
307 get_card = (mmc_tot_in_flight(mq) == 1);
308 cqe_retune_ok = (mmc_cqe_qcnt(mq) == 1);
310 spin_unlock_irq(&mq->lock);
312 if (!(req->rq_flags & RQF_DONTPREP)) {
313 req_to_mmc_queue_req(req)->retries = 0;
314 req->rq_flags |= RQF_DONTPREP;
317 if (get_card)
318 mmc_get_card(card, &mq->ctx);
320 if (mq->use_cqe) {
321 host->retune_now = host->need_retune && cqe_retune_ok &&
322 !host->hold_retune;
325 blk_mq_start_request(req);
327 issued = mmc_blk_mq_issue_rq(mq, req);
329 switch (issued) {
330 case MMC_REQ_BUSY:
331 ret = BLK_STS_RESOURCE;
332 break;
333 case MMC_REQ_FAILED_TO_START:
334 ret = BLK_STS_IOERR;
335 break;
336 default:
337 ret = BLK_STS_OK;
338 break;
341 if (issued != MMC_REQ_STARTED) {
342 bool put_card = false;
344 spin_lock_irq(&mq->lock);
345 mq->in_flight[issue_type] -= 1;
346 if (mmc_tot_in_flight(mq) == 0)
347 put_card = true;
348 mq->busy = false;
349 spin_unlock_irq(&mq->lock);
350 if (put_card)
351 mmc_put_card(card, &mq->ctx);
352 } else {
353 WRITE_ONCE(mq->busy, false);
356 return ret;
359 static const struct blk_mq_ops mmc_mq_ops = {
360 .queue_rq = mmc_mq_queue_rq,
361 .init_request = mmc_mq_init_request,
362 .exit_request = mmc_mq_exit_request,
363 .complete = mmc_blk_mq_complete,
364 .timeout = mmc_mq_timed_out,
367 static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
369 struct mmc_host *host = card->host;
370 unsigned block_size = 512;
372 blk_queue_flag_set(QUEUE_FLAG_NONROT, mq->queue);
373 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, mq->queue);
374 if (mmc_can_erase(card))
375 mmc_queue_setup_discard(mq->queue, card);
377 if (!mmc_dev(host)->dma_mask || !*mmc_dev(host)->dma_mask)
378 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_HIGH);
379 blk_queue_max_hw_sectors(mq->queue,
380 min(host->max_blk_count, host->max_req_size / 512));
381 if (host->can_dma_map_merge)
382 WARN(!blk_queue_can_use_dma_map_merging(mq->queue,
383 mmc_dev(host)),
384 "merging was advertised but not possible");
385 blk_queue_max_segments(mq->queue, mmc_get_max_segments(host));
387 if (mmc_card_mmc(card))
388 block_size = card->ext_csd.data_sector_size;
390 blk_queue_logical_block_size(mq->queue, block_size);
392 * After blk_queue_can_use_dma_map_merging() was called with succeed,
393 * since it calls blk_queue_virt_boundary(), the mmc should not call
394 * both blk_queue_max_segment_size().
396 if (!host->can_dma_map_merge)
397 blk_queue_max_segment_size(mq->queue,
398 round_down(host->max_seg_size, block_size));
400 dma_set_max_seg_size(mmc_dev(host), queue_max_segment_size(mq->queue));
402 INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler);
403 INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work);
405 mutex_init(&mq->complete_lock);
407 init_waitqueue_head(&mq->wait);
410 static inline bool mmc_merge_capable(struct mmc_host *host)
412 return host->caps2 & MMC_CAP2_MERGE_CAPABLE;
415 /* Set queue depth to get a reasonable value for q->nr_requests */
416 #define MMC_QUEUE_DEPTH 64
419 * mmc_init_queue - initialise a queue structure.
420 * @mq: mmc queue
421 * @card: mmc card to attach this queue
423 * Initialise a MMC card request queue.
425 int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card)
427 struct mmc_host *host = card->host;
428 int ret;
430 mq->card = card;
431 mq->use_cqe = host->cqe_enabled;
433 spin_lock_init(&mq->lock);
435 memset(&mq->tag_set, 0, sizeof(mq->tag_set));
436 mq->tag_set.ops = &mmc_mq_ops;
438 * The queue depth for CQE must match the hardware because the request
439 * tag is used to index the hardware queue.
441 if (mq->use_cqe && !host->hsq_enabled)
442 mq->tag_set.queue_depth =
443 min_t(int, card->ext_csd.cmdq_depth, host->cqe_qdepth);
444 else
445 mq->tag_set.queue_depth = MMC_QUEUE_DEPTH;
446 mq->tag_set.numa_node = NUMA_NO_NODE;
447 mq->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
448 mq->tag_set.nr_hw_queues = 1;
449 mq->tag_set.cmd_size = sizeof(struct mmc_queue_req);
450 mq->tag_set.driver_data = mq;
453 * Since blk_mq_alloc_tag_set() calls .init_request() of mmc_mq_ops,
454 * the host->can_dma_map_merge should be set before to get max_segs
455 * from mmc_get_max_segments().
457 if (mmc_merge_capable(host) &&
458 host->max_segs < MMC_DMA_MAP_MERGE_SEGMENTS &&
459 dma_get_merge_boundary(mmc_dev(host)))
460 host->can_dma_map_merge = 1;
461 else
462 host->can_dma_map_merge = 0;
464 ret = blk_mq_alloc_tag_set(&mq->tag_set);
465 if (ret)
466 return ret;
468 mq->queue = blk_mq_init_queue(&mq->tag_set);
469 if (IS_ERR(mq->queue)) {
470 ret = PTR_ERR(mq->queue);
471 goto free_tag_set;
474 if (mmc_host_is_spi(host) && host->use_spi_crc)
475 blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, mq->queue);
477 mq->queue->queuedata = mq;
478 blk_queue_rq_timeout(mq->queue, 60 * HZ);
480 mmc_setup_queue(mq, card);
481 return 0;
483 free_tag_set:
484 blk_mq_free_tag_set(&mq->tag_set);
485 return ret;
488 void mmc_queue_suspend(struct mmc_queue *mq)
490 blk_mq_quiesce_queue(mq->queue);
493 * The host remains claimed while there are outstanding requests, so
494 * simply claiming and releasing here ensures there are none.
496 mmc_claim_host(mq->card->host);
497 mmc_release_host(mq->card->host);
500 void mmc_queue_resume(struct mmc_queue *mq)
502 blk_mq_unquiesce_queue(mq->queue);
505 void mmc_cleanup_queue(struct mmc_queue *mq)
507 struct request_queue *q = mq->queue;
510 * The legacy code handled the possibility of being suspended,
511 * so do that here too.
513 if (blk_queue_quiesced(q))
514 blk_mq_unquiesce_queue(q);
516 blk_cleanup_queue(q);
517 blk_mq_free_tag_set(&mq->tag_set);
520 * A request can be completed before the next request, potentially
521 * leaving a complete_work with nothing to do. Such a work item might
522 * still be queued at this point. Flush it.
524 flush_work(&mq->complete_work);
526 mq->card = NULL;
530 * Prepare the sg list(s) to be handed of to the host driver
532 unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
534 struct request *req = mmc_queue_req_to_req(mqrq);
536 return blk_rq_map_sg(mq->queue, req, mqrq->sg);