Merge tag 'trace-printf-v6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/trace...
[drm/drm-misc.git] / drivers / mmc / core / core.c
bloba499f3c59de5544aa8a8fde55871b459e0167c9a
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/drivers/mmc/core/core.c
5 * Copyright (C) 2003-2004 Russell King, All Rights Reserved.
6 * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
7 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
8 * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
9 */
10 #include <linux/module.h>
11 #include <linux/init.h>
12 #include <linux/interrupt.h>
13 #include <linux/completion.h>
14 #include <linux/device.h>
15 #include <linux/delay.h>
16 #include <linux/pagemap.h>
17 #include <linux/err.h>
18 #include <linux/leds.h>
19 #include <linux/scatterlist.h>
20 #include <linux/log2.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/pm_wakeup.h>
23 #include <linux/suspend.h>
24 #include <linux/fault-inject.h>
25 #include <linux/random.h>
26 #include <linux/slab.h>
27 #include <linux/of.h>
29 #include <linux/mmc/card.h>
30 #include <linux/mmc/host.h>
31 #include <linux/mmc/mmc.h>
32 #include <linux/mmc/sd.h>
33 #include <linux/mmc/slot-gpio.h>
35 #define CREATE_TRACE_POINTS
36 #include <trace/events/mmc.h>
38 #include "core.h"
39 #include "card.h"
40 #include "crypto.h"
41 #include "bus.h"
42 #include "host.h"
43 #include "sdio_bus.h"
44 #include "pwrseq.h"
46 #include "mmc_ops.h"
47 #include "sd_ops.h"
48 #include "sdio_ops.h"
50 /* The max erase timeout, used when host->max_busy_timeout isn't specified */
51 #define MMC_ERASE_TIMEOUT_MS (60 * 1000) /* 60 s */
52 #define SD_DISCARD_TIMEOUT_MS (250)
54 static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
57 * Enabling software CRCs on the data blocks can be a significant (30%)
58 * performance cost, and for other reasons may not always be desired.
59 * So we allow it to be disabled.
61 bool use_spi_crc = 1;
62 module_param(use_spi_crc, bool, 0);
64 static int mmc_schedule_delayed_work(struct delayed_work *work,
65 unsigned long delay)
68 * We use the system_freezable_wq, because of two reasons.
69 * First, it allows several works (not the same work item) to be
70 * executed simultaneously. Second, the queue becomes frozen when
71 * userspace becomes frozen during system PM.
73 return queue_delayed_work(system_freezable_wq, work, delay);
76 #ifdef CONFIG_FAIL_MMC_REQUEST
79 * Internal function. Inject random data errors.
80 * If mmc_data is NULL no errors are injected.
82 static void mmc_should_fail_request(struct mmc_host *host,
83 struct mmc_request *mrq)
85 struct mmc_command *cmd = mrq->cmd;
86 struct mmc_data *data = mrq->data;
87 static const int data_errors[] = {
88 -ETIMEDOUT,
89 -EILSEQ,
90 -EIO,
93 if (!data)
94 return;
96 if ((cmd && cmd->error) || data->error ||
97 !should_fail(&host->fail_mmc_request, data->blksz * data->blocks))
98 return;
100 data->error = data_errors[get_random_u32_below(ARRAY_SIZE(data_errors))];
101 data->bytes_xfered = get_random_u32_below(data->bytes_xfered >> 9) << 9;
104 #else /* CONFIG_FAIL_MMC_REQUEST */
106 static inline void mmc_should_fail_request(struct mmc_host *host,
107 struct mmc_request *mrq)
111 #endif /* CONFIG_FAIL_MMC_REQUEST */
113 static inline void mmc_complete_cmd(struct mmc_request *mrq)
115 if (mrq->cap_cmd_during_tfr && !completion_done(&mrq->cmd_completion))
116 complete_all(&mrq->cmd_completion);
119 void mmc_command_done(struct mmc_host *host, struct mmc_request *mrq)
121 if (!mrq->cap_cmd_during_tfr)
122 return;
124 mmc_complete_cmd(mrq);
126 pr_debug("%s: cmd done, tfr ongoing (CMD%u)\n",
127 mmc_hostname(host), mrq->cmd->opcode);
129 EXPORT_SYMBOL(mmc_command_done);
132 * mmc_request_done - finish processing an MMC request
133 * @host: MMC host which completed request
134 * @mrq: MMC request which request
136 * MMC drivers should call this function when they have completed
137 * their processing of a request.
139 void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
141 struct mmc_command *cmd = mrq->cmd;
142 int err = cmd->error;
144 /* Flag re-tuning needed on CRC errors */
145 if (!mmc_op_tuning(cmd->opcode) &&
146 !host->retune_crc_disable &&
147 (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) ||
148 (mrq->data && mrq->data->error == -EILSEQ) ||
149 (mrq->stop && mrq->stop->error == -EILSEQ)))
150 mmc_retune_needed(host);
152 if (err && cmd->retries && mmc_host_is_spi(host)) {
153 if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
154 cmd->retries = 0;
157 if (host->ongoing_mrq == mrq)
158 host->ongoing_mrq = NULL;
160 mmc_complete_cmd(mrq);
162 trace_mmc_request_done(host, mrq);
165 * We list various conditions for the command to be considered
166 * properly done:
168 * - There was no error, OK fine then
169 * - We are not doing some kind of retry
170 * - The card was removed (...so just complete everything no matter
171 * if there are errors or retries)
173 if (!err || !cmd->retries || mmc_card_removed(host->card)) {
174 mmc_should_fail_request(host, mrq);
176 if (!host->ongoing_mrq)
177 led_trigger_event(host->led, LED_OFF);
179 if (mrq->sbc) {
180 pr_debug("%s: req done <CMD%u>: %d: %08x %08x %08x %08x\n",
181 mmc_hostname(host), mrq->sbc->opcode,
182 mrq->sbc->error,
183 mrq->sbc->resp[0], mrq->sbc->resp[1],
184 mrq->sbc->resp[2], mrq->sbc->resp[3]);
187 pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
188 mmc_hostname(host), cmd->opcode, err,
189 cmd->resp[0], cmd->resp[1],
190 cmd->resp[2], cmd->resp[3]);
192 if (mrq->data) {
193 pr_debug("%s: %d bytes transferred: %d\n",
194 mmc_hostname(host),
195 mrq->data->bytes_xfered, mrq->data->error);
198 if (mrq->stop) {
199 pr_debug("%s: (CMD%u): %d: %08x %08x %08x %08x\n",
200 mmc_hostname(host), mrq->stop->opcode,
201 mrq->stop->error,
202 mrq->stop->resp[0], mrq->stop->resp[1],
203 mrq->stop->resp[2], mrq->stop->resp[3]);
207 * Request starter must handle retries - see
208 * mmc_wait_for_req_done().
210 if (mrq->done)
211 mrq->done(mrq);
214 EXPORT_SYMBOL(mmc_request_done);
216 static void __mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
218 int err;
220 /* Assumes host controller has been runtime resumed by mmc_claim_host */
221 err = mmc_retune(host);
222 if (err) {
223 mrq->cmd->error = err;
224 mmc_request_done(host, mrq);
225 return;
229 * For sdio rw commands we must wait for card busy otherwise some
230 * sdio devices won't work properly.
231 * And bypass I/O abort, reset and bus suspend operations.
233 if (sdio_is_io_busy(mrq->cmd->opcode, mrq->cmd->arg) &&
234 host->ops->card_busy) {
235 int tries = 500; /* Wait aprox 500ms at maximum */
237 while (host->ops->card_busy(host) && --tries)
238 mmc_delay(1);
240 if (tries == 0) {
241 mrq->cmd->error = -EBUSY;
242 mmc_request_done(host, mrq);
243 return;
247 if (mrq->cap_cmd_during_tfr) {
248 host->ongoing_mrq = mrq;
250 * Retry path could come through here without having waiting on
251 * cmd_completion, so ensure it is reinitialised.
253 reinit_completion(&mrq->cmd_completion);
256 trace_mmc_request_start(host, mrq);
258 if (host->cqe_on)
259 host->cqe_ops->cqe_off(host);
261 host->ops->request(host, mrq);
264 static void mmc_mrq_pr_debug(struct mmc_host *host, struct mmc_request *mrq,
265 bool cqe)
267 if (mrq->sbc) {
268 pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n",
269 mmc_hostname(host), mrq->sbc->opcode,
270 mrq->sbc->arg, mrq->sbc->flags);
273 if (mrq->cmd) {
274 pr_debug("%s: starting %sCMD%u arg %08x flags %08x\n",
275 mmc_hostname(host), cqe ? "CQE direct " : "",
276 mrq->cmd->opcode, mrq->cmd->arg, mrq->cmd->flags);
277 } else if (cqe) {
278 pr_debug("%s: starting CQE transfer for tag %d blkaddr %u\n",
279 mmc_hostname(host), mrq->tag, mrq->data->blk_addr);
282 if (mrq->data) {
283 pr_debug("%s: blksz %d blocks %d flags %08x "
284 "tsac %d ms nsac %d\n",
285 mmc_hostname(host), mrq->data->blksz,
286 mrq->data->blocks, mrq->data->flags,
287 mrq->data->timeout_ns / 1000000,
288 mrq->data->timeout_clks);
291 if (mrq->stop) {
292 pr_debug("%s: CMD%u arg %08x flags %08x\n",
293 mmc_hostname(host), mrq->stop->opcode,
294 mrq->stop->arg, mrq->stop->flags);
298 static int mmc_mrq_prep(struct mmc_host *host, struct mmc_request *mrq)
300 unsigned int i, sz = 0;
301 struct scatterlist *sg;
303 if (mrq->cmd) {
304 mrq->cmd->error = 0;
305 mrq->cmd->mrq = mrq;
306 mrq->cmd->data = mrq->data;
308 if (mrq->sbc) {
309 mrq->sbc->error = 0;
310 mrq->sbc->mrq = mrq;
312 if (mrq->data) {
313 if (mrq->data->blksz > host->max_blk_size ||
314 mrq->data->blocks > host->max_blk_count ||
315 mrq->data->blocks * mrq->data->blksz > host->max_req_size)
316 return -EINVAL;
318 for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i)
319 sz += sg->length;
320 if (sz != mrq->data->blocks * mrq->data->blksz)
321 return -EINVAL;
323 mrq->data->error = 0;
324 mrq->data->mrq = mrq;
325 if (mrq->stop) {
326 mrq->data->stop = mrq->stop;
327 mrq->stop->error = 0;
328 mrq->stop->mrq = mrq;
332 return 0;
335 int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
337 int err;
339 if (mrq->cmd && mrq->cmd->has_ext_addr)
340 mmc_send_ext_addr(host, mrq->cmd->ext_addr);
342 init_completion(&mrq->cmd_completion);
344 mmc_retune_hold(host);
346 if (mmc_card_removed(host->card))
347 return -ENOMEDIUM;
349 mmc_mrq_pr_debug(host, mrq, false);
351 WARN_ON(!host->claimed);
353 err = mmc_mrq_prep(host, mrq);
354 if (err)
355 return err;
357 if (host->uhs2_sd_tran)
358 mmc_uhs2_prepare_cmd(host, mrq);
360 led_trigger_event(host->led, LED_FULL);
361 __mmc_start_request(host, mrq);
363 return 0;
365 EXPORT_SYMBOL(mmc_start_request);
367 static void mmc_wait_done(struct mmc_request *mrq)
369 complete(&mrq->completion);
372 static inline void mmc_wait_ongoing_tfr_cmd(struct mmc_host *host)
374 struct mmc_request *ongoing_mrq = READ_ONCE(host->ongoing_mrq);
377 * If there is an ongoing transfer, wait for the command line to become
378 * available.
380 if (ongoing_mrq && !completion_done(&ongoing_mrq->cmd_completion))
381 wait_for_completion(&ongoing_mrq->cmd_completion);
384 static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
386 int err;
388 mmc_wait_ongoing_tfr_cmd(host);
390 init_completion(&mrq->completion);
391 mrq->done = mmc_wait_done;
393 err = mmc_start_request(host, mrq);
394 if (err) {
395 mrq->cmd->error = err;
396 mmc_complete_cmd(mrq);
397 complete(&mrq->completion);
400 return err;
403 void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq)
405 struct mmc_command *cmd;
407 while (1) {
408 wait_for_completion(&mrq->completion);
410 cmd = mrq->cmd;
412 if (!cmd->error || !cmd->retries ||
413 mmc_card_removed(host->card))
414 break;
416 mmc_retune_recheck(host);
418 pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
419 mmc_hostname(host), cmd->opcode, cmd->error);
420 cmd->retries--;
421 cmd->error = 0;
422 __mmc_start_request(host, mrq);
425 mmc_retune_release(host);
427 EXPORT_SYMBOL(mmc_wait_for_req_done);
430 * mmc_cqe_start_req - Start a CQE request.
431 * @host: MMC host to start the request
432 * @mrq: request to start
434 * Start the request, re-tuning if needed and it is possible. Returns an error
435 * code if the request fails to start or -EBUSY if CQE is busy.
437 int mmc_cqe_start_req(struct mmc_host *host, struct mmc_request *mrq)
439 int err;
442 * CQE cannot process re-tuning commands. Caller must hold retuning
443 * while CQE is in use. Re-tuning can happen here only when CQE has no
444 * active requests i.e. this is the first. Note, re-tuning will call
445 * ->cqe_off().
447 err = mmc_retune(host);
448 if (err)
449 goto out_err;
451 mrq->host = host;
453 mmc_mrq_pr_debug(host, mrq, true);
455 err = mmc_mrq_prep(host, mrq);
456 if (err)
457 goto out_err;
459 if (host->uhs2_sd_tran)
460 mmc_uhs2_prepare_cmd(host, mrq);
462 err = host->cqe_ops->cqe_request(host, mrq);
463 if (err)
464 goto out_err;
466 trace_mmc_request_start(host, mrq);
468 return 0;
470 out_err:
471 if (mrq->cmd) {
472 pr_debug("%s: failed to start CQE direct CMD%u, error %d\n",
473 mmc_hostname(host), mrq->cmd->opcode, err);
474 } else {
475 pr_debug("%s: failed to start CQE transfer for tag %d, error %d\n",
476 mmc_hostname(host), mrq->tag, err);
478 return err;
480 EXPORT_SYMBOL(mmc_cqe_start_req);
483 * mmc_cqe_request_done - CQE has finished processing an MMC request
484 * @host: MMC host which completed request
485 * @mrq: MMC request which completed
487 * CQE drivers should call this function when they have completed
488 * their processing of a request.
490 void mmc_cqe_request_done(struct mmc_host *host, struct mmc_request *mrq)
492 mmc_should_fail_request(host, mrq);
494 /* Flag re-tuning needed on CRC errors */
495 if ((mrq->cmd && mrq->cmd->error == -EILSEQ) ||
496 (mrq->data && mrq->data->error == -EILSEQ))
497 mmc_retune_needed(host);
499 trace_mmc_request_done(host, mrq);
501 if (mrq->cmd) {
502 pr_debug("%s: CQE req done (direct CMD%u): %d\n",
503 mmc_hostname(host), mrq->cmd->opcode, mrq->cmd->error);
504 } else {
505 pr_debug("%s: CQE transfer done tag %d\n",
506 mmc_hostname(host), mrq->tag);
509 if (mrq->data) {
510 pr_debug("%s: %d bytes transferred: %d\n",
511 mmc_hostname(host),
512 mrq->data->bytes_xfered, mrq->data->error);
515 mrq->done(mrq);
517 EXPORT_SYMBOL(mmc_cqe_request_done);
520 * mmc_cqe_post_req - CQE post process of a completed MMC request
521 * @host: MMC host
522 * @mrq: MMC request to be processed
524 void mmc_cqe_post_req(struct mmc_host *host, struct mmc_request *mrq)
526 if (host->cqe_ops->cqe_post_req)
527 host->cqe_ops->cqe_post_req(host, mrq);
529 EXPORT_SYMBOL(mmc_cqe_post_req);
531 /* Arbitrary 1 second timeout */
532 #define MMC_CQE_RECOVERY_TIMEOUT 1000
535 * mmc_cqe_recovery - Recover from CQE errors.
536 * @host: MMC host to recover
538 * Recovery consists of stopping CQE, stopping eMMC, discarding the queue
539 * in eMMC, and discarding the queue in CQE. CQE must call
540 * mmc_cqe_request_done() on all requests. An error is returned if the eMMC
541 * fails to discard its queue.
543 int mmc_cqe_recovery(struct mmc_host *host)
545 struct mmc_command cmd;
546 int err;
548 mmc_retune_hold_now(host);
551 * Recovery is expected seldom, if at all, but it reduces performance,
552 * so make sure it is not completely silent.
554 pr_warn("%s: running CQE recovery\n", mmc_hostname(host));
556 host->cqe_ops->cqe_recovery_start(host);
558 memset(&cmd, 0, sizeof(cmd));
559 cmd.opcode = MMC_STOP_TRANSMISSION;
560 cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
561 cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */
562 cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT;
563 mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
565 mmc_poll_for_busy(host->card, MMC_CQE_RECOVERY_TIMEOUT, true, MMC_BUSY_IO);
567 memset(&cmd, 0, sizeof(cmd));
568 cmd.opcode = MMC_CMDQ_TASK_MGMT;
569 cmd.arg = 1; /* Discard entire queue */
570 cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
571 cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */
572 cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT;
573 err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
575 host->cqe_ops->cqe_recovery_finish(host);
577 if (err)
578 err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
580 mmc_retune_release(host);
582 return err;
584 EXPORT_SYMBOL(mmc_cqe_recovery);
587 * mmc_is_req_done - Determine if a 'cap_cmd_during_tfr' request is done
588 * @host: MMC host
589 * @mrq: MMC request
591 * mmc_is_req_done() is used with requests that have
592 * mrq->cap_cmd_during_tfr = true. mmc_is_req_done() must be called after
593 * starting a request and before waiting for it to complete. That is,
594 * either in between calls to mmc_start_req(), or after mmc_wait_for_req()
595 * and before mmc_wait_for_req_done(). If it is called at other times the
596 * result is not meaningful.
598 bool mmc_is_req_done(struct mmc_host *host, struct mmc_request *mrq)
600 return completion_done(&mrq->completion);
602 EXPORT_SYMBOL(mmc_is_req_done);
605 * mmc_wait_for_req - start a request and wait for completion
606 * @host: MMC host to start command
607 * @mrq: MMC request to start
609 * Start a new MMC custom command request for a host, and wait
610 * for the command to complete. In the case of 'cap_cmd_during_tfr'
611 * requests, the transfer is ongoing and the caller can issue further
612 * commands that do not use the data lines, and then wait by calling
613 * mmc_wait_for_req_done().
614 * Does not attempt to parse the response.
616 void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
618 __mmc_start_req(host, mrq);
620 if (!mrq->cap_cmd_during_tfr)
621 mmc_wait_for_req_done(host, mrq);
623 EXPORT_SYMBOL(mmc_wait_for_req);
626 * mmc_wait_for_cmd - start a command and wait for completion
627 * @host: MMC host to start command
628 * @cmd: MMC command to start
629 * @retries: maximum number of retries
631 * Start a new MMC command for a host, and wait for the command
632 * to complete. Return any error that occurred while the command
633 * was executing. Do not attempt to parse the response.
635 int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
637 struct mmc_request mrq = {};
639 WARN_ON(!host->claimed);
641 memset(cmd->resp, 0, sizeof(cmd->resp));
642 cmd->retries = retries;
644 mrq.cmd = cmd;
645 cmd->data = NULL;
647 mmc_wait_for_req(host, &mrq);
649 return cmd->error;
652 EXPORT_SYMBOL(mmc_wait_for_cmd);
655 * mmc_set_data_timeout - set the timeout for a data command
656 * @data: data phase for command
657 * @card: the MMC card associated with the data transfer
659 * Computes the data timeout parameters according to the
660 * correct algorithm given the card type.
662 void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
664 unsigned int mult;
667 * SDIO cards only define an upper 1 s limit on access.
669 if (mmc_card_sdio(card)) {
670 data->timeout_ns = 1000000000;
671 data->timeout_clks = 0;
672 return;
676 * SD cards use a 100 multiplier rather than 10
678 mult = mmc_card_sd(card) ? 100 : 10;
681 * Scale up the multiplier (and therefore the timeout) by
682 * the r2w factor for writes.
684 if (data->flags & MMC_DATA_WRITE)
685 mult <<= card->csd.r2w_factor;
687 data->timeout_ns = card->csd.taac_ns * mult;
688 data->timeout_clks = card->csd.taac_clks * mult;
691 * SD cards also have an upper limit on the timeout.
693 if (mmc_card_sd(card)) {
694 unsigned int timeout_us, limit_us;
696 timeout_us = data->timeout_ns / 1000;
697 if (card->host->ios.clock)
698 timeout_us += data->timeout_clks * 1000 /
699 (card->host->ios.clock / 1000);
701 if (data->flags & MMC_DATA_WRITE)
703 * The MMC spec "It is strongly recommended
704 * for hosts to implement more than 500ms
705 * timeout value even if the card indicates
706 * the 250ms maximum busy length." Even the
707 * previous value of 300ms is known to be
708 * insufficient for some cards.
710 limit_us = 3000000;
711 else
712 limit_us = 100000;
715 * SDHC cards always use these fixed values.
717 if (timeout_us > limit_us) {
718 data->timeout_ns = limit_us * 1000;
719 data->timeout_clks = 0;
722 /* assign limit value if invalid */
723 if (timeout_us == 0)
724 data->timeout_ns = limit_us * 1000;
728 * Some cards require longer data read timeout than indicated in CSD.
729 * Address this by setting the read timeout to a "reasonably high"
730 * value. For the cards tested, 600ms has proven enough. If necessary,
731 * this value can be increased if other problematic cards require this.
733 if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
734 data->timeout_ns = 600000000;
735 data->timeout_clks = 0;
739 * Some cards need very high timeouts if driven in SPI mode.
740 * The worst observed timeout was 900ms after writing a
741 * continuous stream of data until the internal logic
742 * overflowed.
744 if (mmc_host_is_spi(card->host)) {
745 if (data->flags & MMC_DATA_WRITE) {
746 if (data->timeout_ns < 1000000000)
747 data->timeout_ns = 1000000000; /* 1s */
748 } else {
749 if (data->timeout_ns < 100000000)
750 data->timeout_ns = 100000000; /* 100ms */
754 EXPORT_SYMBOL(mmc_set_data_timeout);
757 * Allow claiming an already claimed host if the context is the same or there is
758 * no context but the task is the same.
760 static inline bool mmc_ctx_matches(struct mmc_host *host, struct mmc_ctx *ctx,
761 struct task_struct *task)
763 return host->claimer == ctx ||
764 (!ctx && task && host->claimer->task == task);
767 static inline void mmc_ctx_set_claimer(struct mmc_host *host,
768 struct mmc_ctx *ctx,
769 struct task_struct *task)
771 if (!host->claimer) {
772 if (ctx)
773 host->claimer = ctx;
774 else
775 host->claimer = &host->default_ctx;
777 if (task)
778 host->claimer->task = task;
782 * __mmc_claim_host - exclusively claim a host
783 * @host: mmc host to claim
784 * @ctx: context that claims the host or NULL in which case the default
785 * context will be used
786 * @abort: whether or not the operation should be aborted
788 * Claim a host for a set of operations. If @abort is non null and
789 * dereference a non-zero value then this will return prematurely with
790 * that non-zero value without acquiring the lock. Returns zero
791 * with the lock held otherwise.
793 int __mmc_claim_host(struct mmc_host *host, struct mmc_ctx *ctx,
794 atomic_t *abort)
796 struct task_struct *task = ctx ? NULL : current;
797 DECLARE_WAITQUEUE(wait, current);
798 unsigned long flags;
799 int stop;
800 bool pm = false;
802 might_sleep();
804 add_wait_queue(&host->wq, &wait);
805 spin_lock_irqsave(&host->lock, flags);
806 while (1) {
807 set_current_state(TASK_UNINTERRUPTIBLE);
808 stop = abort ? atomic_read(abort) : 0;
809 if (stop || !host->claimed || mmc_ctx_matches(host, ctx, task))
810 break;
811 spin_unlock_irqrestore(&host->lock, flags);
812 schedule();
813 spin_lock_irqsave(&host->lock, flags);
815 set_current_state(TASK_RUNNING);
816 if (!stop) {
817 host->claimed = 1;
818 mmc_ctx_set_claimer(host, ctx, task);
819 host->claim_cnt += 1;
820 if (host->claim_cnt == 1)
821 pm = true;
822 } else
823 wake_up(&host->wq);
824 spin_unlock_irqrestore(&host->lock, flags);
825 remove_wait_queue(&host->wq, &wait);
827 if (pm)
828 pm_runtime_get_sync(mmc_dev(host));
830 return stop;
832 EXPORT_SYMBOL(__mmc_claim_host);
835 * mmc_release_host - release a host
836 * @host: mmc host to release
838 * Release a MMC host, allowing others to claim the host
839 * for their operations.
841 void mmc_release_host(struct mmc_host *host)
843 unsigned long flags;
845 WARN_ON(!host->claimed);
847 spin_lock_irqsave(&host->lock, flags);
848 if (--host->claim_cnt) {
849 /* Release for nested claim */
850 spin_unlock_irqrestore(&host->lock, flags);
851 } else {
852 host->claimed = 0;
853 host->claimer->task = NULL;
854 host->claimer = NULL;
855 spin_unlock_irqrestore(&host->lock, flags);
856 wake_up(&host->wq);
857 pm_runtime_mark_last_busy(mmc_dev(host));
858 if (host->caps & MMC_CAP_SYNC_RUNTIME_PM)
859 pm_runtime_put_sync_suspend(mmc_dev(host));
860 else
861 pm_runtime_put_autosuspend(mmc_dev(host));
864 EXPORT_SYMBOL(mmc_release_host);
867 * This is a helper function, which fetches a runtime pm reference for the
868 * card device and also claims the host.
870 void mmc_get_card(struct mmc_card *card, struct mmc_ctx *ctx)
872 pm_runtime_get_sync(&card->dev);
873 __mmc_claim_host(card->host, ctx, NULL);
875 EXPORT_SYMBOL(mmc_get_card);
878 * This is a helper function, which releases the host and drops the runtime
879 * pm reference for the card device.
881 void mmc_put_card(struct mmc_card *card, struct mmc_ctx *ctx)
883 struct mmc_host *host = card->host;
885 WARN_ON(ctx && host->claimer != ctx);
887 mmc_release_host(host);
888 pm_runtime_mark_last_busy(&card->dev);
889 pm_runtime_put_autosuspend(&card->dev);
891 EXPORT_SYMBOL(mmc_put_card);
894 * Internal function that does the actual ios call to the host driver,
895 * optionally printing some debug output.
897 static inline void mmc_set_ios(struct mmc_host *host)
899 struct mmc_ios *ios = &host->ios;
901 pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u "
902 "width %u timing %u\n",
903 mmc_hostname(host), ios->clock, ios->bus_mode,
904 ios->power_mode, ios->chip_select, ios->vdd,
905 1 << ios->bus_width, ios->timing);
907 host->ops->set_ios(host, ios);
911 * Control chip select pin on a host.
913 void mmc_set_chip_select(struct mmc_host *host, int mode)
915 host->ios.chip_select = mode;
916 mmc_set_ios(host);
920 * Sets the host clock to the highest possible frequency that
921 * is below "hz".
923 void mmc_set_clock(struct mmc_host *host, unsigned int hz)
925 WARN_ON(hz && hz < host->f_min);
927 if (hz > host->f_max)
928 hz = host->f_max;
930 host->ios.clock = hz;
931 mmc_set_ios(host);
934 int mmc_execute_tuning(struct mmc_card *card)
936 struct mmc_host *host = card->host;
937 u32 opcode;
938 int err;
940 if (!host->ops->execute_tuning)
941 return 0;
943 if (host->cqe_on)
944 host->cqe_ops->cqe_off(host);
946 if (mmc_card_mmc(card))
947 opcode = MMC_SEND_TUNING_BLOCK_HS200;
948 else
949 opcode = MMC_SEND_TUNING_BLOCK;
951 err = host->ops->execute_tuning(host, opcode);
952 if (!err) {
953 mmc_retune_clear(host);
954 mmc_retune_enable(host);
955 return 0;
958 /* Only print error when we don't check for card removal */
959 if (!host->detect_change) {
960 pr_err("%s: tuning execution failed: %d\n",
961 mmc_hostname(host), err);
962 mmc_debugfs_err_stats_inc(host, MMC_ERR_TUNING);
965 return err;
969 * Change the bus mode (open drain/push-pull) of a host.
971 void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
973 host->ios.bus_mode = mode;
974 mmc_set_ios(host);
978 * Change data bus width of a host.
980 void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
982 host->ios.bus_width = width;
983 mmc_set_ios(host);
987 * Set initial state after a power cycle or a hw_reset.
989 void mmc_set_initial_state(struct mmc_host *host)
991 if (host->cqe_on)
992 host->cqe_ops->cqe_off(host);
994 mmc_retune_disable(host);
996 if (mmc_host_is_spi(host))
997 host->ios.chip_select = MMC_CS_HIGH;
998 else
999 host->ios.chip_select = MMC_CS_DONTCARE;
1000 host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
1001 host->ios.bus_width = MMC_BUS_WIDTH_1;
1002 host->ios.timing = MMC_TIMING_LEGACY;
1003 host->ios.drv_type = 0;
1004 host->ios.enhanced_strobe = false;
1007 * Make sure we are in non-enhanced strobe mode before we
1008 * actually enable it in ext_csd.
1010 if ((host->caps2 & MMC_CAP2_HS400_ES) &&
1011 host->ops->hs400_enhanced_strobe)
1012 host->ops->hs400_enhanced_strobe(host, &host->ios);
1014 mmc_set_ios(host);
1016 mmc_crypto_set_initial_state(host);
1020 * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number
1021 * @vdd: voltage (mV)
1022 * @low_bits: prefer low bits in boundary cases
1024 * This function returns the OCR bit number according to the provided @vdd
1025 * value. If conversion is not possible a negative errno value returned.
1027 * Depending on the @low_bits flag the function prefers low or high OCR bits
1028 * on boundary voltages. For example,
1029 * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33);
1030 * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34);
1032 * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21).
1034 static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits)
1036 const int max_bit = ilog2(MMC_VDD_35_36);
1037 int bit;
1039 if (vdd < 1650 || vdd > 3600)
1040 return -EINVAL;
1042 if (vdd >= 1650 && vdd <= 1950)
1043 return ilog2(MMC_VDD_165_195);
1045 if (low_bits)
1046 vdd -= 1;
1048 /* Base 2000 mV, step 100 mV, bit's base 8. */
1049 bit = (vdd - 2000) / 100 + 8;
1050 if (bit > max_bit)
1051 return max_bit;
1052 return bit;
1056 * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask
1057 * @vdd_min: minimum voltage value (mV)
1058 * @vdd_max: maximum voltage value (mV)
1060 * This function returns the OCR mask bits according to the provided @vdd_min
1061 * and @vdd_max values. If conversion is not possible the function returns 0.
1063 * Notes wrt boundary cases:
1064 * This function sets the OCR bits for all boundary voltages, for example
1065 * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 |
1066 * MMC_VDD_34_35 mask.
1068 u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max)
1070 u32 mask = 0;
1072 if (vdd_max < vdd_min)
1073 return 0;
1075 /* Prefer high bits for the boundary vdd_max values. */
1076 vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false);
1077 if (vdd_max < 0)
1078 return 0;
1080 /* Prefer low bits for the boundary vdd_min values. */
1081 vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true);
1082 if (vdd_min < 0)
1083 return 0;
1085 /* Fill the mask, from max bit to min bit. */
1086 while (vdd_max >= vdd_min)
1087 mask |= 1 << vdd_max--;
1089 return mask;
1092 static int mmc_of_get_func_num(struct device_node *node)
1094 u32 reg;
1095 int ret;
1097 ret = of_property_read_u32(node, "reg", &reg);
1098 if (ret < 0)
1099 return ret;
1101 return reg;
1104 struct device_node *mmc_of_find_child_device(struct mmc_host *host,
1105 unsigned func_num)
1107 struct device_node *node;
1109 if (!host->parent || !host->parent->of_node)
1110 return NULL;
1112 for_each_child_of_node(host->parent->of_node, node) {
1113 if (mmc_of_get_func_num(node) == func_num)
1114 return node;
1117 return NULL;
1121 * Mask off any voltages we don't support and select
1122 * the lowest voltage
1124 u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
1126 int bit;
1129 * Sanity check the voltages that the card claims to
1130 * support.
1132 if (ocr & 0x7F) {
1133 dev_warn(mmc_dev(host),
1134 "card claims to support voltages below defined range\n");
1135 ocr &= ~0x7F;
1138 ocr &= host->ocr_avail;
1139 if (!ocr) {
1140 dev_warn(mmc_dev(host), "no support for card's volts\n");
1141 return 0;
1144 if (!mmc_card_uhs2(host) && host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) {
1145 bit = ffs(ocr) - 1;
1146 ocr &= 3 << bit;
1147 mmc_power_cycle(host, ocr);
1148 } else {
1149 bit = fls(ocr) - 1;
1151 * The bit variable represents the highest voltage bit set in
1152 * the OCR register.
1153 * To keep a range of 2 values (e.g. 3.2V/3.3V and 3.3V/3.4V),
1154 * we must shift the mask '3' with (bit - 1).
1156 ocr &= 3 << (bit - 1);
1157 if (bit != host->ios.vdd)
1158 dev_warn(mmc_dev(host), "exceeding card's volts\n");
1161 return ocr;
1164 int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage)
1166 int err = 0;
1167 int old_signal_voltage = host->ios.signal_voltage;
1169 host->ios.signal_voltage = signal_voltage;
1170 if (host->ops->start_signal_voltage_switch)
1171 err = host->ops->start_signal_voltage_switch(host, &host->ios);
1173 if (err)
1174 host->ios.signal_voltage = old_signal_voltage;
1176 return err;
1180 void mmc_set_initial_signal_voltage(struct mmc_host *host)
1182 /* Try to set signal voltage to 3.3V but fall back to 1.8v or 1.2v */
1183 if (!mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330))
1184 dev_dbg(mmc_dev(host), "Initial signal voltage of 3.3v\n");
1185 else if (!mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180))
1186 dev_dbg(mmc_dev(host), "Initial signal voltage of 1.8v\n");
1187 else if (!mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120))
1188 dev_dbg(mmc_dev(host), "Initial signal voltage of 1.2v\n");
1191 int mmc_host_set_uhs_voltage(struct mmc_host *host)
1193 u32 clock;
1196 * During a signal voltage level switch, the clock must be gated
1197 * for 5 ms according to the SD spec
1199 clock = host->ios.clock;
1200 host->ios.clock = 0;
1201 mmc_set_ios(host);
1203 if (mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180))
1204 return -EAGAIN;
1206 /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
1207 mmc_delay(10);
1208 host->ios.clock = clock;
1209 mmc_set_ios(host);
1211 return 0;
1214 int mmc_set_uhs_voltage(struct mmc_host *host, u32 ocr)
1216 struct mmc_command cmd = {};
1217 int err = 0;
1220 * If we cannot switch voltages, return failure so the caller
1221 * can continue without UHS mode
1223 if (!host->ops->start_signal_voltage_switch)
1224 return -EPERM;
1225 if (!host->ops->card_busy)
1226 pr_warn("%s: cannot verify signal voltage switch\n",
1227 mmc_hostname(host));
1229 cmd.opcode = SD_SWITCH_VOLTAGE;
1230 cmd.arg = 0;
1231 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1233 err = mmc_wait_for_cmd(host, &cmd, 0);
1234 if (err)
1235 goto power_cycle;
1237 if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR))
1238 return -EIO;
1241 * The card should drive cmd and dat[0:3] low immediately
1242 * after the response of cmd11, but wait 1 ms to be sure
1244 mmc_delay(1);
1245 if (host->ops->card_busy && !host->ops->card_busy(host)) {
1246 err = -EAGAIN;
1247 goto power_cycle;
1250 if (mmc_host_set_uhs_voltage(host)) {
1252 * Voltages may not have been switched, but we've already
1253 * sent CMD11, so a power cycle is required anyway
1255 err = -EAGAIN;
1256 goto power_cycle;
1259 /* Wait for at least 1 ms according to spec */
1260 mmc_delay(1);
1263 * Failure to switch is indicated by the card holding
1264 * dat[0:3] low
1266 if (host->ops->card_busy && host->ops->card_busy(host))
1267 err = -EAGAIN;
1269 power_cycle:
1270 if (err) {
1271 pr_debug("%s: Signal voltage switch failed, "
1272 "power cycling card\n", mmc_hostname(host));
1273 mmc_power_cycle(host, ocr);
1276 return err;
1280 * Select timing parameters for host.
1282 void mmc_set_timing(struct mmc_host *host, unsigned int timing)
1284 host->ios.timing = timing;
1285 mmc_set_ios(host);
1289 * Select appropriate driver type for host.
1291 void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
1293 host->ios.drv_type = drv_type;
1294 mmc_set_ios(host);
1297 int mmc_select_drive_strength(struct mmc_card *card, unsigned int max_dtr,
1298 int card_drv_type, int *drv_type)
1300 struct mmc_host *host = card->host;
1301 int host_drv_type = SD_DRIVER_TYPE_B;
1303 *drv_type = 0;
1305 if (!host->ops->select_drive_strength)
1306 return 0;
1308 /* Use SD definition of driver strength for hosts */
1309 if (host->caps & MMC_CAP_DRIVER_TYPE_A)
1310 host_drv_type |= SD_DRIVER_TYPE_A;
1312 if (host->caps & MMC_CAP_DRIVER_TYPE_C)
1313 host_drv_type |= SD_DRIVER_TYPE_C;
1315 if (host->caps & MMC_CAP_DRIVER_TYPE_D)
1316 host_drv_type |= SD_DRIVER_TYPE_D;
1319 * The drive strength that the hardware can support
1320 * depends on the board design. Pass the appropriate
1321 * information and let the hardware specific code
1322 * return what is possible given the options
1324 return host->ops->select_drive_strength(card, max_dtr,
1325 host_drv_type,
1326 card_drv_type,
1327 drv_type);
1331 * Apply power to the MMC stack. This is a two-stage process.
1332 * First, we enable power to the card without the clock running.
1333 * We then wait a bit for the power to stabilise. Finally,
1334 * enable the bus drivers and clock to the card.
1336 * We must _NOT_ enable the clock prior to power stablising.
1338 * If a host does all the power sequencing itself, ignore the
1339 * initial MMC_POWER_UP stage.
1341 void mmc_power_up(struct mmc_host *host, u32 ocr)
1343 if (host->ios.power_mode == MMC_POWER_ON)
1344 return;
1346 mmc_pwrseq_pre_power_on(host);
1348 host->ios.vdd = fls(ocr) - 1;
1349 host->ios.power_mode = MMC_POWER_UP;
1350 /* Set initial state and call mmc_set_ios */
1351 mmc_set_initial_state(host);
1353 mmc_set_initial_signal_voltage(host);
1356 * This delay should be sufficient to allow the power supply
1357 * to reach the minimum voltage.
1359 mmc_delay(host->ios.power_delay_ms);
1361 mmc_pwrseq_post_power_on(host);
1363 host->ios.clock = host->f_init;
1365 host->ios.power_mode = MMC_POWER_ON;
1366 mmc_set_ios(host);
1369 * This delay must be at least 74 clock sizes, or 1 ms, or the
1370 * time required to reach a stable voltage.
1372 mmc_delay(host->ios.power_delay_ms);
1375 void mmc_power_off(struct mmc_host *host)
1377 if (host->ios.power_mode == MMC_POWER_OFF)
1378 return;
1380 mmc_pwrseq_power_off(host);
1382 host->ios.clock = 0;
1383 host->ios.vdd = 0;
1385 host->ios.power_mode = MMC_POWER_OFF;
1386 /* Set initial state and call mmc_set_ios */
1387 mmc_set_initial_state(host);
1390 * Some configurations, such as the 802.11 SDIO card in the OLPC
1391 * XO-1.5, require a short delay after poweroff before the card
1392 * can be successfully turned on again.
1394 mmc_delay(1);
1397 void mmc_power_cycle(struct mmc_host *host, u32 ocr)
1399 mmc_power_off(host);
1400 /* Wait at least 1 ms according to SD spec */
1401 mmc_delay(1);
1402 mmc_power_up(host, ocr);
1406 * Assign a mmc bus handler to a host. Only one bus handler may control a
1407 * host at any given time.
1409 void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops)
1411 host->bus_ops = ops;
1415 * Remove the current bus handler from a host.
1417 void mmc_detach_bus(struct mmc_host *host)
1419 host->bus_ops = NULL;
1422 void _mmc_detect_change(struct mmc_host *host, unsigned long delay, bool cd_irq)
1425 * Prevent system sleep for 5s to allow user space to consume the
1426 * corresponding uevent. This is especially useful, when CD irq is used
1427 * as a system wakeup, but doesn't hurt in other cases.
1429 if (cd_irq && !(host->caps & MMC_CAP_NEEDS_POLL))
1430 __pm_wakeup_event(host->ws, 5000);
1432 host->detect_change = 1;
1433 mmc_schedule_delayed_work(&host->detect, delay);
1437 * mmc_detect_change - process change of state on a MMC socket
1438 * @host: host which changed state.
1439 * @delay: optional delay to wait before detection (jiffies)
1441 * MMC drivers should call this when they detect a card has been
1442 * inserted or removed. The MMC layer will confirm that any
1443 * present card is still functional, and initialize any newly
1444 * inserted.
1446 void mmc_detect_change(struct mmc_host *host, unsigned long delay)
1448 _mmc_detect_change(host, delay, true);
1450 EXPORT_SYMBOL(mmc_detect_change);
1452 void mmc_init_erase(struct mmc_card *card)
1454 unsigned int sz;
1456 if (is_power_of_2(card->erase_size))
1457 card->erase_shift = ffs(card->erase_size) - 1;
1458 else
1459 card->erase_shift = 0;
1462 * It is possible to erase an arbitrarily large area of an SD or MMC
1463 * card. That is not desirable because it can take a long time
1464 * (minutes) potentially delaying more important I/O, and also the
1465 * timeout calculations become increasingly hugely over-estimated.
1466 * Consequently, 'pref_erase' is defined as a guide to limit erases
1467 * to that size and alignment.
1469 * For SD cards that define Allocation Unit size, limit erases to one
1470 * Allocation Unit at a time.
1471 * For MMC, have a stab at ai good value and for modern cards it will
1472 * end up being 4MiB. Note that if the value is too small, it can end
1473 * up taking longer to erase. Also note, erase_size is already set to
1474 * High Capacity Erase Size if available when this function is called.
1476 if (mmc_card_sd(card) && card->ssr.au) {
1477 card->pref_erase = card->ssr.au;
1478 card->erase_shift = ffs(card->ssr.au) - 1;
1479 } else if (card->erase_size) {
1480 sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11;
1481 if (sz < 128)
1482 card->pref_erase = 512 * 1024 / 512;
1483 else if (sz < 512)
1484 card->pref_erase = 1024 * 1024 / 512;
1485 else if (sz < 1024)
1486 card->pref_erase = 2 * 1024 * 1024 / 512;
1487 else
1488 card->pref_erase = 4 * 1024 * 1024 / 512;
1489 if (card->pref_erase < card->erase_size)
1490 card->pref_erase = card->erase_size;
1491 else {
1492 sz = card->pref_erase % card->erase_size;
1493 if (sz)
1494 card->pref_erase += card->erase_size - sz;
1496 } else
1497 card->pref_erase = 0;
1500 static bool is_trim_arg(unsigned int arg)
1502 return (arg & MMC_TRIM_OR_DISCARD_ARGS) && arg != MMC_DISCARD_ARG;
1505 static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
1506 unsigned int arg, unsigned int qty)
1508 unsigned int erase_timeout;
1510 if (arg == MMC_DISCARD_ARG ||
1511 (arg == MMC_TRIM_ARG && card->ext_csd.rev >= 6)) {
1512 erase_timeout = card->ext_csd.trim_timeout;
1513 } else if (card->ext_csd.erase_group_def & 1) {
1514 /* High Capacity Erase Group Size uses HC timeouts */
1515 if (arg == MMC_TRIM_ARG)
1516 erase_timeout = card->ext_csd.trim_timeout;
1517 else
1518 erase_timeout = card->ext_csd.hc_erase_timeout;
1519 } else {
1520 /* CSD Erase Group Size uses write timeout */
1521 unsigned int mult = (10 << card->csd.r2w_factor);
1522 unsigned int timeout_clks = card->csd.taac_clks * mult;
1523 unsigned int timeout_us;
1525 /* Avoid overflow: e.g. taac_ns=80000000 mult=1280 */
1526 if (card->csd.taac_ns < 1000000)
1527 timeout_us = (card->csd.taac_ns * mult) / 1000;
1528 else
1529 timeout_us = (card->csd.taac_ns / 1000) * mult;
1532 * ios.clock is only a target. The real clock rate might be
1533 * less but not that much less, so fudge it by multiplying by 2.
1535 timeout_clks <<= 1;
1536 timeout_us += (timeout_clks * 1000) /
1537 (card->host->ios.clock / 1000);
1539 erase_timeout = timeout_us / 1000;
1542 * Theoretically, the calculation could underflow so round up
1543 * to 1ms in that case.
1545 if (!erase_timeout)
1546 erase_timeout = 1;
1549 /* Multiplier for secure operations */
1550 if (arg & MMC_SECURE_ARGS) {
1551 if (arg == MMC_SECURE_ERASE_ARG)
1552 erase_timeout *= card->ext_csd.sec_erase_mult;
1553 else
1554 erase_timeout *= card->ext_csd.sec_trim_mult;
1557 erase_timeout *= qty;
1560 * Ensure at least a 1 second timeout for SPI as per
1561 * 'mmc_set_data_timeout()'
1563 if (mmc_host_is_spi(card->host) && erase_timeout < 1000)
1564 erase_timeout = 1000;
1566 return erase_timeout;
1569 static unsigned int mmc_sd_erase_timeout(struct mmc_card *card,
1570 unsigned int arg,
1571 unsigned int qty)
1573 unsigned int erase_timeout;
1575 /* for DISCARD none of the below calculation applies.
1576 * the busy timeout is 250msec per discard command.
1578 if (arg == SD_DISCARD_ARG)
1579 return SD_DISCARD_TIMEOUT_MS;
1581 if (card->ssr.erase_timeout) {
1582 /* Erase timeout specified in SD Status Register (SSR) */
1583 erase_timeout = card->ssr.erase_timeout * qty +
1584 card->ssr.erase_offset;
1585 } else {
1587 * Erase timeout not specified in SD Status Register (SSR) so
1588 * use 250ms per write block.
1590 erase_timeout = 250 * qty;
1593 /* Must not be less than 1 second */
1594 if (erase_timeout < 1000)
1595 erase_timeout = 1000;
1597 return erase_timeout;
1600 static unsigned int mmc_erase_timeout(struct mmc_card *card,
1601 unsigned int arg,
1602 unsigned int qty)
1604 if (mmc_card_sd(card))
1605 return mmc_sd_erase_timeout(card, arg, qty);
1606 else
1607 return mmc_mmc_erase_timeout(card, arg, qty);
1610 static int mmc_do_erase(struct mmc_card *card, sector_t from,
1611 sector_t to, unsigned int arg)
1613 struct mmc_command cmd = {};
1614 unsigned int qty = 0, busy_timeout = 0;
1615 bool use_r1b_resp;
1616 int err;
1618 mmc_retune_hold(card->host);
1621 * qty is used to calculate the erase timeout which depends on how many
1622 * erase groups (or allocation units in SD terminology) are affected.
1623 * We count erasing part of an erase group as one erase group.
1624 * For SD, the allocation units are always a power of 2. For MMC, the
1625 * erase group size is almost certainly also power of 2, but it does not
1626 * seem to insist on that in the JEDEC standard, so we fall back to
1627 * division in that case. SD may not specify an allocation unit size,
1628 * in which case the timeout is based on the number of write blocks.
1630 * Note that the timeout for secure trim 2 will only be correct if the
1631 * number of erase groups specified is the same as the total of all
1632 * preceding secure trim 1 commands. Since the power may have been
1633 * lost since the secure trim 1 commands occurred, it is generally
1634 * impossible to calculate the secure trim 2 timeout correctly.
1636 if (card->erase_shift)
1637 qty += ((to >> card->erase_shift) -
1638 (from >> card->erase_shift)) + 1;
1639 else if (mmc_card_sd(card))
1640 qty += to - from + 1;
1641 else
1642 qty += (mmc_sector_div(to, card->erase_size) -
1643 mmc_sector_div(from, card->erase_size)) + 1;
1645 if (!mmc_card_blockaddr(card)) {
1646 from <<= 9;
1647 to <<= 9;
1650 if (mmc_card_sd(card))
1651 cmd.opcode = SD_ERASE_WR_BLK_START;
1652 else
1653 cmd.opcode = MMC_ERASE_GROUP_START;
1654 cmd.arg = from;
1655 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1657 if (mmc_card_ult_capacity(card)) {
1658 cmd.ext_addr = from >> 32;
1659 cmd.has_ext_addr = true;
1662 err = mmc_wait_for_cmd(card->host, &cmd, 0);
1663 if (err) {
1664 pr_err("mmc_erase: group start error %d, "
1665 "status %#x\n", err, cmd.resp[0]);
1666 err = -EIO;
1667 goto out;
1670 memset(&cmd, 0, sizeof(struct mmc_command));
1671 if (mmc_card_sd(card))
1672 cmd.opcode = SD_ERASE_WR_BLK_END;
1673 else
1674 cmd.opcode = MMC_ERASE_GROUP_END;
1675 cmd.arg = to;
1676 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1678 if (mmc_card_ult_capacity(card)) {
1679 cmd.ext_addr = to >> 32;
1680 cmd.has_ext_addr = true;
1683 err = mmc_wait_for_cmd(card->host, &cmd, 0);
1684 if (err) {
1685 pr_err("mmc_erase: group end error %d, status %#x\n",
1686 err, cmd.resp[0]);
1687 err = -EIO;
1688 goto out;
1691 memset(&cmd, 0, sizeof(struct mmc_command));
1692 cmd.opcode = MMC_ERASE;
1693 cmd.arg = arg;
1694 busy_timeout = mmc_erase_timeout(card, arg, qty);
1695 use_r1b_resp = mmc_prepare_busy_cmd(card->host, &cmd, busy_timeout);
1697 err = mmc_wait_for_cmd(card->host, &cmd, 0);
1698 if (err) {
1699 pr_err("mmc_erase: erase error %d, status %#x\n",
1700 err, cmd.resp[0]);
1701 err = -EIO;
1702 goto out;
1705 if (mmc_host_is_spi(card->host))
1706 goto out;
1709 * In case of when R1B + MMC_CAP_WAIT_WHILE_BUSY is used, the polling
1710 * shall be avoided.
1712 if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
1713 goto out;
1715 /* Let's poll to find out when the erase operation completes. */
1716 err = mmc_poll_for_busy(card, busy_timeout, false, MMC_BUSY_ERASE);
1718 out:
1719 mmc_retune_release(card->host);
1720 return err;
1723 static unsigned int mmc_align_erase_size(struct mmc_card *card,
1724 sector_t *from,
1725 sector_t *to,
1726 unsigned int nr)
1728 sector_t from_new = *from;
1729 unsigned int nr_new = nr, rem;
1732 * When the 'card->erase_size' is power of 2, we can use round_up/down()
1733 * to align the erase size efficiently.
1735 if (is_power_of_2(card->erase_size)) {
1736 sector_t temp = from_new;
1738 from_new = round_up(temp, card->erase_size);
1739 rem = from_new - temp;
1741 if (nr_new > rem)
1742 nr_new -= rem;
1743 else
1744 return 0;
1746 nr_new = round_down(nr_new, card->erase_size);
1747 } else {
1748 rem = mmc_sector_mod(from_new, card->erase_size);
1749 if (rem) {
1750 rem = card->erase_size - rem;
1751 from_new += rem;
1752 if (nr_new > rem)
1753 nr_new -= rem;
1754 else
1755 return 0;
1758 rem = nr_new % card->erase_size;
1759 if (rem)
1760 nr_new -= rem;
1763 if (nr_new == 0)
1764 return 0;
1766 *to = from_new + nr_new;
1767 *from = from_new;
1769 return nr_new;
1773 * mmc_erase - erase sectors.
1774 * @card: card to erase
1775 * @from: first sector to erase
1776 * @nr: number of sectors to erase
1777 * @arg: erase command argument
1779 * Caller must claim host before calling this function.
1781 int mmc_erase(struct mmc_card *card, sector_t from, unsigned int nr,
1782 unsigned int arg)
1784 unsigned int rem;
1785 sector_t to = from + nr;
1787 int err;
1789 if (!(card->csd.cmdclass & CCC_ERASE))
1790 return -EOPNOTSUPP;
1792 if (!card->erase_size)
1793 return -EOPNOTSUPP;
1795 if (mmc_card_sd(card) && arg != SD_ERASE_ARG && arg != SD_DISCARD_ARG)
1796 return -EOPNOTSUPP;
1798 if (mmc_card_mmc(card) && (arg & MMC_SECURE_ARGS) &&
1799 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN))
1800 return -EOPNOTSUPP;
1802 if (mmc_card_mmc(card) && is_trim_arg(arg) &&
1803 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN))
1804 return -EOPNOTSUPP;
1806 if (arg == MMC_SECURE_ERASE_ARG) {
1807 if (mmc_sector_mod(from, card->erase_size) || nr % card->erase_size)
1808 return -EINVAL;
1811 if (arg == MMC_ERASE_ARG)
1812 nr = mmc_align_erase_size(card, &from, &to, nr);
1814 if (nr == 0)
1815 return 0;
1817 if (to <= from)
1818 return -EINVAL;
1820 /* 'from' and 'to' are inclusive */
1821 to -= 1;
1824 * Special case where only one erase-group fits in the timeout budget:
1825 * If the region crosses an erase-group boundary on this particular
1826 * case, we will be trimming more than one erase-group which, does not
1827 * fit in the timeout budget of the controller, so we need to split it
1828 * and call mmc_do_erase() twice if necessary. This special case is
1829 * identified by the card->eg_boundary flag.
1831 rem = card->erase_size - mmc_sector_mod(from, card->erase_size);
1832 if ((arg & MMC_TRIM_OR_DISCARD_ARGS) && card->eg_boundary && nr > rem) {
1833 err = mmc_do_erase(card, from, from + rem - 1, arg);
1834 from += rem;
1835 if ((err) || (to <= from))
1836 return err;
1839 return mmc_do_erase(card, from, to, arg);
1841 EXPORT_SYMBOL(mmc_erase);
1843 int mmc_can_erase(struct mmc_card *card)
1845 if (card->csd.cmdclass & CCC_ERASE && card->erase_size)
1846 return 1;
1847 return 0;
1849 EXPORT_SYMBOL(mmc_can_erase);
1851 int mmc_can_trim(struct mmc_card *card)
1853 if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN) &&
1854 (!(card->quirks & MMC_QUIRK_TRIM_BROKEN)))
1855 return 1;
1856 return 0;
1858 EXPORT_SYMBOL(mmc_can_trim);
1860 int mmc_can_discard(struct mmc_card *card)
1863 * As there's no way to detect the discard support bit at v4.5
1864 * use the s/w feature support filed.
1866 if (card->ext_csd.feature_support & MMC_DISCARD_FEATURE)
1867 return 1;
1868 return 0;
1870 EXPORT_SYMBOL(mmc_can_discard);
1872 int mmc_can_sanitize(struct mmc_card *card)
1874 if (!mmc_can_trim(card) && !mmc_can_erase(card))
1875 return 0;
1876 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE)
1877 return 1;
1878 return 0;
1881 int mmc_can_secure_erase_trim(struct mmc_card *card)
1883 if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN) &&
1884 !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
1885 return 1;
1886 return 0;
1888 EXPORT_SYMBOL(mmc_can_secure_erase_trim);
1890 int mmc_erase_group_aligned(struct mmc_card *card, sector_t from,
1891 unsigned int nr)
1893 if (!card->erase_size)
1894 return 0;
1895 if (mmc_sector_mod(from, card->erase_size) || nr % card->erase_size)
1896 return 0;
1897 return 1;
1899 EXPORT_SYMBOL(mmc_erase_group_aligned);
1901 static unsigned int mmc_do_calc_max_discard(struct mmc_card *card,
1902 unsigned int arg)
1904 struct mmc_host *host = card->host;
1905 unsigned int max_discard, x, y, qty = 0, max_qty, min_qty, timeout;
1906 unsigned int last_timeout = 0;
1907 unsigned int max_busy_timeout = host->max_busy_timeout ?
1908 host->max_busy_timeout : MMC_ERASE_TIMEOUT_MS;
1910 if (card->erase_shift) {
1911 max_qty = UINT_MAX >> card->erase_shift;
1912 min_qty = card->pref_erase >> card->erase_shift;
1913 } else if (mmc_card_sd(card)) {
1914 max_qty = UINT_MAX;
1915 min_qty = card->pref_erase;
1916 } else {
1917 max_qty = UINT_MAX / card->erase_size;
1918 min_qty = card->pref_erase / card->erase_size;
1922 * We should not only use 'host->max_busy_timeout' as the limitation
1923 * when deciding the max discard sectors. We should set a balance value
1924 * to improve the erase speed, and it can not get too long timeout at
1925 * the same time.
1927 * Here we set 'card->pref_erase' as the minimal discard sectors no
1928 * matter what size of 'host->max_busy_timeout', but if the
1929 * 'host->max_busy_timeout' is large enough for more discard sectors,
1930 * then we can continue to increase the max discard sectors until we
1931 * get a balance value. In cases when the 'host->max_busy_timeout'
1932 * isn't specified, use the default max erase timeout.
1934 do {
1935 y = 0;
1936 for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) {
1937 timeout = mmc_erase_timeout(card, arg, qty + x);
1939 if (qty + x > min_qty && timeout > max_busy_timeout)
1940 break;
1942 if (timeout < last_timeout)
1943 break;
1944 last_timeout = timeout;
1945 y = x;
1947 qty += y;
1948 } while (y);
1950 if (!qty)
1951 return 0;
1954 * When specifying a sector range to trim, chances are we might cross
1955 * an erase-group boundary even if the amount of sectors is less than
1956 * one erase-group.
1957 * If we can only fit one erase-group in the controller timeout budget,
1958 * we have to care that erase-group boundaries are not crossed by a
1959 * single trim operation. We flag that special case with "eg_boundary".
1960 * In all other cases we can just decrement qty and pretend that we
1961 * always touch (qty + 1) erase-groups as a simple optimization.
1963 if (qty == 1)
1964 card->eg_boundary = 1;
1965 else
1966 qty--;
1968 /* Convert qty to sectors */
1969 if (card->erase_shift)
1970 max_discard = qty << card->erase_shift;
1971 else if (mmc_card_sd(card))
1972 max_discard = qty + 1;
1973 else
1974 max_discard = qty * card->erase_size;
1976 return max_discard;
1979 unsigned int mmc_calc_max_discard(struct mmc_card *card)
1981 struct mmc_host *host = card->host;
1982 unsigned int max_discard, max_trim;
1985 * Without erase_group_def set, MMC erase timeout depends on clock
1986 * frequence which can change. In that case, the best choice is
1987 * just the preferred erase size.
1989 if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1))
1990 return card->pref_erase;
1992 max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG);
1993 if (mmc_can_trim(card)) {
1994 max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG);
1995 if (max_trim < max_discard || max_discard == 0)
1996 max_discard = max_trim;
1997 } else if (max_discard < card->erase_size) {
1998 max_discard = 0;
2000 pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n",
2001 mmc_hostname(host), max_discard, host->max_busy_timeout ?
2002 host->max_busy_timeout : MMC_ERASE_TIMEOUT_MS);
2003 return max_discard;
2005 EXPORT_SYMBOL(mmc_calc_max_discard);
2007 bool mmc_card_is_blockaddr(struct mmc_card *card)
2009 return card ? mmc_card_blockaddr(card) : false;
2011 EXPORT_SYMBOL(mmc_card_is_blockaddr);
2013 int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
2015 struct mmc_command cmd = {};
2017 if (mmc_card_blockaddr(card) || mmc_card_ddr52(card) ||
2018 mmc_card_hs400(card) || mmc_card_hs400es(card))
2019 return 0;
2021 cmd.opcode = MMC_SET_BLOCKLEN;
2022 cmd.arg = blocklen;
2023 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2024 return mmc_wait_for_cmd(card->host, &cmd, 5);
2026 EXPORT_SYMBOL(mmc_set_blocklen);
2028 static void mmc_hw_reset_for_init(struct mmc_host *host)
2030 mmc_pwrseq_reset(host);
2032 if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->card_hw_reset)
2033 return;
2034 host->ops->card_hw_reset(host);
2038 * mmc_hw_reset - reset the card in hardware
2039 * @card: card to be reset
2041 * Hard reset the card. This function is only for upper layers, like the
2042 * block layer or card drivers. You cannot use it in host drivers (struct
2043 * mmc_card might be gone then).
2045 * Return: 0 on success, -errno on failure
2047 int mmc_hw_reset(struct mmc_card *card)
2049 struct mmc_host *host = card->host;
2050 int ret;
2052 ret = host->bus_ops->hw_reset(host);
2053 if (ret < 0)
2054 pr_warn("%s: tried to HW reset card, got error %d\n",
2055 mmc_hostname(host), ret);
2057 return ret;
2059 EXPORT_SYMBOL(mmc_hw_reset);
2061 int mmc_sw_reset(struct mmc_card *card)
2063 struct mmc_host *host = card->host;
2064 int ret;
2066 if (!host->bus_ops->sw_reset)
2067 return -EOPNOTSUPP;
2069 ret = host->bus_ops->sw_reset(host);
2070 if (ret)
2071 pr_warn("%s: tried to SW reset card, got error %d\n",
2072 mmc_hostname(host), ret);
2074 return ret;
2076 EXPORT_SYMBOL(mmc_sw_reset);
2078 static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
2080 host->f_init = freq;
2082 pr_debug("%s: %s: trying to init card at %u Hz\n",
2083 mmc_hostname(host), __func__, host->f_init);
2085 mmc_power_up(host, host->ocr_avail);
2088 * Some eMMCs (with VCCQ always on) may not be reset after power up, so
2089 * do a hardware reset if possible.
2091 mmc_hw_reset_for_init(host);
2094 * sdio_reset sends CMD52 to reset card. Since we do not know
2095 * if the card is being re-initialized, just send it. CMD52
2096 * should be ignored by SD/eMMC cards.
2097 * Skip it if we already know that we do not support SDIO commands
2099 if (!(host->caps2 & MMC_CAP2_NO_SDIO))
2100 sdio_reset(host);
2102 mmc_go_idle(host);
2104 if (!(host->caps2 & MMC_CAP2_NO_SD)) {
2105 if (mmc_send_if_cond_pcie(host, host->ocr_avail))
2106 goto out;
2107 if (mmc_card_sd_express(host))
2108 return 0;
2111 /* Order's important: probe SDIO, then SD, then MMC */
2112 if (!(host->caps2 & MMC_CAP2_NO_SDIO))
2113 if (!mmc_attach_sdio(host))
2114 return 0;
2116 if (!(host->caps2 & MMC_CAP2_NO_SD))
2117 if (!mmc_attach_sd(host))
2118 return 0;
2120 if (!(host->caps2 & MMC_CAP2_NO_MMC))
2121 if (!mmc_attach_mmc(host))
2122 return 0;
2124 out:
2125 mmc_power_off(host);
2126 return -EIO;
2129 int _mmc_detect_card_removed(struct mmc_host *host)
2131 int ret;
2133 if (!host->card || mmc_card_removed(host->card))
2134 return 1;
2136 ret = host->bus_ops->alive(host);
2139 * Card detect status and alive check may be out of sync if card is
2140 * removed slowly, when card detect switch changes while card/slot
2141 * pads are still contacted in hardware (refer to "SD Card Mechanical
2142 * Addendum, Appendix C: Card Detection Switch"). So reschedule a
2143 * detect work 200ms later for this case.
2145 if (!ret && host->ops->get_cd && !host->ops->get_cd(host)) {
2146 mmc_detect_change(host, msecs_to_jiffies(200));
2147 pr_debug("%s: card removed too slowly\n", mmc_hostname(host));
2150 if (ret) {
2151 mmc_card_set_removed(host->card);
2152 pr_debug("%s: card remove detected\n", mmc_hostname(host));
2155 return ret;
2158 int mmc_detect_card_removed(struct mmc_host *host)
2160 struct mmc_card *card = host->card;
2161 int ret;
2163 WARN_ON(!host->claimed);
2165 if (!card)
2166 return 1;
2168 if (!mmc_card_is_removable(host))
2169 return 0;
2171 ret = mmc_card_removed(card);
2173 * The card will be considered unchanged unless we have been asked to
2174 * detect a change or host requires polling to provide card detection.
2176 if (!host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL))
2177 return ret;
2179 host->detect_change = 0;
2180 if (!ret) {
2181 ret = _mmc_detect_card_removed(host);
2182 if (ret && (host->caps & MMC_CAP_NEEDS_POLL)) {
2184 * Schedule a detect work as soon as possible to let a
2185 * rescan handle the card removal.
2187 cancel_delayed_work(&host->detect);
2188 _mmc_detect_change(host, 0, false);
2192 return ret;
2194 EXPORT_SYMBOL(mmc_detect_card_removed);
2196 int mmc_card_alternative_gpt_sector(struct mmc_card *card, sector_t *gpt_sector)
2198 unsigned int boot_sectors_num;
2200 if ((!(card->host->caps2 & MMC_CAP2_ALT_GPT_TEGRA)))
2201 return -EOPNOTSUPP;
2203 /* filter out unrelated cards */
2204 if (card->ext_csd.rev < 3 ||
2205 !mmc_card_mmc(card) ||
2206 !mmc_card_is_blockaddr(card) ||
2207 mmc_card_is_removable(card->host))
2208 return -ENOENT;
2211 * eMMC storage has two special boot partitions in addition to the
2212 * main one. NVIDIA's bootloader linearizes eMMC boot0->boot1->main
2213 * accesses, this means that the partition table addresses are shifted
2214 * by the size of boot partitions. In accordance with the eMMC
2215 * specification, the boot partition size is calculated as follows:
2217 * boot partition size = 128K byte x BOOT_SIZE_MULT
2219 * Calculate number of sectors occupied by the both boot partitions.
2221 boot_sectors_num = card->ext_csd.raw_boot_mult * SZ_128K /
2222 SZ_512 * MMC_NUM_BOOT_PARTITION;
2224 /* Defined by NVIDIA and used by Android devices. */
2225 *gpt_sector = card->ext_csd.sectors - boot_sectors_num - 1;
2227 return 0;
2229 EXPORT_SYMBOL(mmc_card_alternative_gpt_sector);
2231 void mmc_rescan(struct work_struct *work)
2233 struct mmc_host *host =
2234 container_of(work, struct mmc_host, detect.work);
2235 int i;
2237 if (host->rescan_disable)
2238 return;
2240 /* If there is a non-removable card registered, only scan once */
2241 if (!mmc_card_is_removable(host) && host->rescan_entered)
2242 return;
2243 host->rescan_entered = 1;
2245 if (host->trigger_card_event && host->ops->card_event) {
2246 mmc_claim_host(host);
2247 host->ops->card_event(host);
2248 mmc_release_host(host);
2249 host->trigger_card_event = false;
2252 /* Verify a registered card to be functional, else remove it. */
2253 if (host->bus_ops)
2254 host->bus_ops->detect(host);
2256 host->detect_change = 0;
2258 /* if there still is a card present, stop here */
2259 if (host->bus_ops != NULL)
2260 goto out;
2262 mmc_claim_host(host);
2263 if (mmc_card_is_removable(host) && host->ops->get_cd &&
2264 host->ops->get_cd(host) == 0) {
2265 mmc_power_off(host);
2266 mmc_release_host(host);
2267 goto out;
2270 /* If an SD express card is present, then leave it as is. */
2271 if (mmc_card_sd_express(host)) {
2272 mmc_release_host(host);
2273 goto out;
2277 * Ideally we should favor initialization of legacy SD cards and defer
2278 * UHS-II enumeration. However, it seems like cards doesn't reliably
2279 * announce their support for UHS-II in the response to the ACMD41,
2280 * while initializing the legacy SD interface. Therefore, let's start
2281 * with UHS-II for now.
2283 if (!mmc_attach_sd_uhs2(host)) {
2284 mmc_release_host(host);
2285 goto out;
2288 for (i = 0; i < ARRAY_SIZE(freqs); i++) {
2289 unsigned int freq = freqs[i];
2290 if (freq > host->f_max) {
2291 if (i + 1 < ARRAY_SIZE(freqs))
2292 continue;
2293 freq = host->f_max;
2295 if (!mmc_rescan_try_freq(host, max(freq, host->f_min)))
2296 break;
2297 if (freqs[i] <= host->f_min)
2298 break;
2301 /* A non-removable card should have been detected by now. */
2302 if (!mmc_card_is_removable(host) && !host->bus_ops)
2303 pr_info("%s: Failed to initialize a non-removable card",
2304 mmc_hostname(host));
2307 * Ignore the command timeout errors observed during
2308 * the card init as those are excepted.
2310 host->err_stats[MMC_ERR_CMD_TIMEOUT] = 0;
2311 mmc_release_host(host);
2313 out:
2314 if (host->caps & MMC_CAP_NEEDS_POLL)
2315 mmc_schedule_delayed_work(&host->detect, HZ);
2318 void mmc_start_host(struct mmc_host *host)
2320 bool power_up = !(host->caps2 &
2321 (MMC_CAP2_NO_PRESCAN_POWERUP | MMC_CAP2_SD_UHS2));
2323 host->f_init = max(min(freqs[0], host->f_max), host->f_min);
2324 host->rescan_disable = 0;
2326 if (power_up) {
2327 mmc_claim_host(host);
2328 mmc_power_up(host, host->ocr_avail);
2329 mmc_release_host(host);
2332 mmc_gpiod_request_cd_irq(host);
2333 _mmc_detect_change(host, 0, false);
2336 void __mmc_stop_host(struct mmc_host *host)
2338 if (host->slot.cd_irq >= 0) {
2339 mmc_gpio_set_cd_wake(host, false);
2340 disable_irq(host->slot.cd_irq);
2343 host->rescan_disable = 1;
2344 cancel_delayed_work_sync(&host->detect);
2347 void mmc_stop_host(struct mmc_host *host)
2349 __mmc_stop_host(host);
2351 /* clear pm flags now and let card drivers set them as needed */
2352 host->pm_flags = 0;
2354 if (host->bus_ops) {
2355 /* Calling bus_ops->remove() with a claimed host can deadlock */
2356 host->bus_ops->remove(host);
2357 mmc_claim_host(host);
2358 mmc_detach_bus(host);
2359 mmc_power_off(host);
2360 mmc_release_host(host);
2361 return;
2364 mmc_claim_host(host);
2365 mmc_power_off(host);
2366 mmc_release_host(host);
2369 static int __init mmc_init(void)
2371 int ret;
2373 ret = mmc_register_bus();
2374 if (ret)
2375 return ret;
2377 ret = mmc_register_host_class();
2378 if (ret)
2379 goto unregister_bus;
2381 ret = sdio_register_bus();
2382 if (ret)
2383 goto unregister_host_class;
2385 return 0;
2387 unregister_host_class:
2388 mmc_unregister_host_class();
2389 unregister_bus:
2390 mmc_unregister_bus();
2391 return ret;
2394 static void __exit mmc_exit(void)
2396 sdio_unregister_bus();
2397 mmc_unregister_host_class();
2398 mmc_unregister_bus();
2401 subsys_initcall(mmc_init);
2402 module_exit(mmc_exit);
2404 MODULE_DESCRIPTION("MMC core driver");
2405 MODULE_LICENSE("GPL");