Linux 4.19.133
[linux/fpc-iii.git] / drivers / mmc / core / core.c
blob56f7f3600469a32955982196606ea1ec676576f0
1 /*
2 * linux/drivers/mmc/core/core.c
4 * Copyright (C) 2003-2004 Russell King, All Rights Reserved.
5 * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
6 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
7 * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/completion.h>
17 #include <linux/device.h>
18 #include <linux/delay.h>
19 #include <linux/pagemap.h>
20 #include <linux/err.h>
21 #include <linux/leds.h>
22 #include <linux/scatterlist.h>
23 #include <linux/log2.h>
24 #include <linux/regulator/consumer.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/pm_wakeup.h>
27 #include <linux/suspend.h>
28 #include <linux/fault-inject.h>
29 #include <linux/random.h>
30 #include <linux/slab.h>
31 #include <linux/of.h>
33 #include <linux/mmc/card.h>
34 #include <linux/mmc/host.h>
35 #include <linux/mmc/mmc.h>
36 #include <linux/mmc/sd.h>
37 #include <linux/mmc/slot-gpio.h>
39 #define CREATE_TRACE_POINTS
40 #include <trace/events/mmc.h>
42 #include "core.h"
43 #include "card.h"
44 #include "bus.h"
45 #include "host.h"
46 #include "sdio_bus.h"
47 #include "pwrseq.h"
49 #include "mmc_ops.h"
50 #include "sd_ops.h"
51 #include "sdio_ops.h"
53 /* The max erase timeout, used when host->max_busy_timeout isn't specified */
54 #define MMC_ERASE_TIMEOUT_MS (60 * 1000) /* 60 s */
56 static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
59 * Enabling software CRCs on the data blocks can be a significant (30%)
60 * performance cost, and for other reasons may not always be desired.
61 * So we allow it it to be disabled.
63 bool use_spi_crc = 1;
64 module_param(use_spi_crc, bool, 0);
66 static int mmc_schedule_delayed_work(struct delayed_work *work,
67 unsigned long delay)
70 * We use the system_freezable_wq, because of two reasons.
71 * First, it allows several works (not the same work item) to be
72 * executed simultaneously. Second, the queue becomes frozen when
73 * userspace becomes frozen during system PM.
75 return queue_delayed_work(system_freezable_wq, work, delay);
78 #ifdef CONFIG_FAIL_MMC_REQUEST
81 * Internal function. Inject random data errors.
82 * If mmc_data is NULL no errors are injected.
84 static void mmc_should_fail_request(struct mmc_host *host,
85 struct mmc_request *mrq)
87 struct mmc_command *cmd = mrq->cmd;
88 struct mmc_data *data = mrq->data;
89 static const int data_errors[] = {
90 -ETIMEDOUT,
91 -EILSEQ,
92 -EIO,
95 if (!data)
96 return;
98 if ((cmd && cmd->error) || data->error ||
99 !should_fail(&host->fail_mmc_request, data->blksz * data->blocks))
100 return;
102 data->error = data_errors[prandom_u32() % ARRAY_SIZE(data_errors)];
103 data->bytes_xfered = (prandom_u32() % (data->bytes_xfered >> 9)) << 9;
106 #else /* CONFIG_FAIL_MMC_REQUEST */
108 static inline void mmc_should_fail_request(struct mmc_host *host,
109 struct mmc_request *mrq)
113 #endif /* CONFIG_FAIL_MMC_REQUEST */
115 static inline void mmc_complete_cmd(struct mmc_request *mrq)
117 if (mrq->cap_cmd_during_tfr && !completion_done(&mrq->cmd_completion))
118 complete_all(&mrq->cmd_completion);
121 void mmc_command_done(struct mmc_host *host, struct mmc_request *mrq)
123 if (!mrq->cap_cmd_during_tfr)
124 return;
126 mmc_complete_cmd(mrq);
128 pr_debug("%s: cmd done, tfr ongoing (CMD%u)\n",
129 mmc_hostname(host), mrq->cmd->opcode);
131 EXPORT_SYMBOL(mmc_command_done);
134 * mmc_request_done - finish processing an MMC request
135 * @host: MMC host which completed request
136 * @mrq: MMC request which request
138 * MMC drivers should call this function when they have completed
139 * their processing of a request.
141 void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
143 struct mmc_command *cmd = mrq->cmd;
144 int err = cmd->error;
146 /* Flag re-tuning needed on CRC errors */
147 if (cmd->opcode != MMC_SEND_TUNING_BLOCK &&
148 cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200 &&
149 !host->retune_crc_disable &&
150 (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) ||
151 (mrq->data && mrq->data->error == -EILSEQ) ||
152 (mrq->stop && mrq->stop->error == -EILSEQ)))
153 mmc_retune_needed(host);
155 if (err && cmd->retries && mmc_host_is_spi(host)) {
156 if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
157 cmd->retries = 0;
160 if (host->ongoing_mrq == mrq)
161 host->ongoing_mrq = NULL;
163 mmc_complete_cmd(mrq);
165 trace_mmc_request_done(host, mrq);
168 * We list various conditions for the command to be considered
169 * properly done:
171 * - There was no error, OK fine then
172 * - We are not doing some kind of retry
173 * - The card was removed (...so just complete everything no matter
174 * if there are errors or retries)
176 if (!err || !cmd->retries || mmc_card_removed(host->card)) {
177 mmc_should_fail_request(host, mrq);
179 if (!host->ongoing_mrq)
180 led_trigger_event(host->led, LED_OFF);
182 if (mrq->sbc) {
183 pr_debug("%s: req done <CMD%u>: %d: %08x %08x %08x %08x\n",
184 mmc_hostname(host), mrq->sbc->opcode,
185 mrq->sbc->error,
186 mrq->sbc->resp[0], mrq->sbc->resp[1],
187 mrq->sbc->resp[2], mrq->sbc->resp[3]);
190 pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
191 mmc_hostname(host), cmd->opcode, err,
192 cmd->resp[0], cmd->resp[1],
193 cmd->resp[2], cmd->resp[3]);
195 if (mrq->data) {
196 pr_debug("%s: %d bytes transferred: %d\n",
197 mmc_hostname(host),
198 mrq->data->bytes_xfered, mrq->data->error);
201 if (mrq->stop) {
202 pr_debug("%s: (CMD%u): %d: %08x %08x %08x %08x\n",
203 mmc_hostname(host), mrq->stop->opcode,
204 mrq->stop->error,
205 mrq->stop->resp[0], mrq->stop->resp[1],
206 mrq->stop->resp[2], mrq->stop->resp[3]);
210 * Request starter must handle retries - see
211 * mmc_wait_for_req_done().
213 if (mrq->done)
214 mrq->done(mrq);
217 EXPORT_SYMBOL(mmc_request_done);
219 static void __mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
221 int err;
223 /* Assumes host controller has been runtime resumed by mmc_claim_host */
224 err = mmc_retune(host);
225 if (err) {
226 mrq->cmd->error = err;
227 mmc_request_done(host, mrq);
228 return;
232 * For sdio rw commands we must wait for card busy otherwise some
233 * sdio devices won't work properly.
234 * And bypass I/O abort, reset and bus suspend operations.
236 if (sdio_is_io_busy(mrq->cmd->opcode, mrq->cmd->arg) &&
237 host->ops->card_busy) {
238 int tries = 500; /* Wait aprox 500ms at maximum */
240 while (host->ops->card_busy(host) && --tries)
241 mmc_delay(1);
243 if (tries == 0) {
244 mrq->cmd->error = -EBUSY;
245 mmc_request_done(host, mrq);
246 return;
250 if (mrq->cap_cmd_during_tfr) {
251 host->ongoing_mrq = mrq;
253 * Retry path could come through here without having waiting on
254 * cmd_completion, so ensure it is reinitialised.
256 reinit_completion(&mrq->cmd_completion);
259 trace_mmc_request_start(host, mrq);
261 if (host->cqe_on)
262 host->cqe_ops->cqe_off(host);
264 host->ops->request(host, mrq);
267 static void mmc_mrq_pr_debug(struct mmc_host *host, struct mmc_request *mrq,
268 bool cqe)
270 if (mrq->sbc) {
271 pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n",
272 mmc_hostname(host), mrq->sbc->opcode,
273 mrq->sbc->arg, mrq->sbc->flags);
276 if (mrq->cmd) {
277 pr_debug("%s: starting %sCMD%u arg %08x flags %08x\n",
278 mmc_hostname(host), cqe ? "CQE direct " : "",
279 mrq->cmd->opcode, mrq->cmd->arg, mrq->cmd->flags);
280 } else if (cqe) {
281 pr_debug("%s: starting CQE transfer for tag %d blkaddr %u\n",
282 mmc_hostname(host), mrq->tag, mrq->data->blk_addr);
285 if (mrq->data) {
286 pr_debug("%s: blksz %d blocks %d flags %08x "
287 "tsac %d ms nsac %d\n",
288 mmc_hostname(host), mrq->data->blksz,
289 mrq->data->blocks, mrq->data->flags,
290 mrq->data->timeout_ns / 1000000,
291 mrq->data->timeout_clks);
294 if (mrq->stop) {
295 pr_debug("%s: CMD%u arg %08x flags %08x\n",
296 mmc_hostname(host), mrq->stop->opcode,
297 mrq->stop->arg, mrq->stop->flags);
301 static int mmc_mrq_prep(struct mmc_host *host, struct mmc_request *mrq)
303 unsigned int i, sz = 0;
304 struct scatterlist *sg;
306 if (mrq->cmd) {
307 mrq->cmd->error = 0;
308 mrq->cmd->mrq = mrq;
309 mrq->cmd->data = mrq->data;
311 if (mrq->sbc) {
312 mrq->sbc->error = 0;
313 mrq->sbc->mrq = mrq;
315 if (mrq->data) {
316 if (mrq->data->blksz > host->max_blk_size ||
317 mrq->data->blocks > host->max_blk_count ||
318 mrq->data->blocks * mrq->data->blksz > host->max_req_size)
319 return -EINVAL;
321 for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i)
322 sz += sg->length;
323 if (sz != mrq->data->blocks * mrq->data->blksz)
324 return -EINVAL;
326 mrq->data->error = 0;
327 mrq->data->mrq = mrq;
328 if (mrq->stop) {
329 mrq->data->stop = mrq->stop;
330 mrq->stop->error = 0;
331 mrq->stop->mrq = mrq;
335 return 0;
338 int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
340 int err;
342 init_completion(&mrq->cmd_completion);
344 mmc_retune_hold(host);
346 if (mmc_card_removed(host->card))
347 return -ENOMEDIUM;
349 mmc_mrq_pr_debug(host, mrq, false);
351 WARN_ON(!host->claimed);
353 err = mmc_mrq_prep(host, mrq);
354 if (err)
355 return err;
357 led_trigger_event(host->led, LED_FULL);
358 __mmc_start_request(host, mrq);
360 return 0;
362 EXPORT_SYMBOL(mmc_start_request);
364 static void mmc_wait_done(struct mmc_request *mrq)
366 complete(&mrq->completion);
369 static inline void mmc_wait_ongoing_tfr_cmd(struct mmc_host *host)
371 struct mmc_request *ongoing_mrq = READ_ONCE(host->ongoing_mrq);
374 * If there is an ongoing transfer, wait for the command line to become
375 * available.
377 if (ongoing_mrq && !completion_done(&ongoing_mrq->cmd_completion))
378 wait_for_completion(&ongoing_mrq->cmd_completion);
381 static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
383 int err;
385 mmc_wait_ongoing_tfr_cmd(host);
387 init_completion(&mrq->completion);
388 mrq->done = mmc_wait_done;
390 err = mmc_start_request(host, mrq);
391 if (err) {
392 mrq->cmd->error = err;
393 mmc_complete_cmd(mrq);
394 complete(&mrq->completion);
397 return err;
400 void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq)
402 struct mmc_command *cmd;
404 while (1) {
405 wait_for_completion(&mrq->completion);
407 cmd = mrq->cmd;
410 * If host has timed out waiting for the sanitize
411 * to complete, card might be still in programming state
412 * so let's try to bring the card out of programming
413 * state.
415 if (cmd->sanitize_busy && cmd->error == -ETIMEDOUT) {
416 if (!mmc_interrupt_hpi(host->card)) {
417 pr_warn("%s: %s: Interrupted sanitize\n",
418 mmc_hostname(host), __func__);
419 cmd->error = 0;
420 break;
421 } else {
422 pr_err("%s: %s: Failed to interrupt sanitize\n",
423 mmc_hostname(host), __func__);
426 if (!cmd->error || !cmd->retries ||
427 mmc_card_removed(host->card))
428 break;
430 mmc_retune_recheck(host);
432 pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
433 mmc_hostname(host), cmd->opcode, cmd->error);
434 cmd->retries--;
435 cmd->error = 0;
436 __mmc_start_request(host, mrq);
439 mmc_retune_release(host);
441 EXPORT_SYMBOL(mmc_wait_for_req_done);
444 * mmc_cqe_start_req - Start a CQE request.
445 * @host: MMC host to start the request
446 * @mrq: request to start
448 * Start the request, re-tuning if needed and it is possible. Returns an error
449 * code if the request fails to start or -EBUSY if CQE is busy.
451 int mmc_cqe_start_req(struct mmc_host *host, struct mmc_request *mrq)
453 int err;
456 * CQE cannot process re-tuning commands. Caller must hold retuning
457 * while CQE is in use. Re-tuning can happen here only when CQE has no
458 * active requests i.e. this is the first. Note, re-tuning will call
459 * ->cqe_off().
461 err = mmc_retune(host);
462 if (err)
463 goto out_err;
465 mrq->host = host;
467 mmc_mrq_pr_debug(host, mrq, true);
469 err = mmc_mrq_prep(host, mrq);
470 if (err)
471 goto out_err;
473 err = host->cqe_ops->cqe_request(host, mrq);
474 if (err)
475 goto out_err;
477 trace_mmc_request_start(host, mrq);
479 return 0;
481 out_err:
482 if (mrq->cmd) {
483 pr_debug("%s: failed to start CQE direct CMD%u, error %d\n",
484 mmc_hostname(host), mrq->cmd->opcode, err);
485 } else {
486 pr_debug("%s: failed to start CQE transfer for tag %d, error %d\n",
487 mmc_hostname(host), mrq->tag, err);
489 return err;
491 EXPORT_SYMBOL(mmc_cqe_start_req);
494 * mmc_cqe_request_done - CQE has finished processing an MMC request
495 * @host: MMC host which completed request
496 * @mrq: MMC request which completed
498 * CQE drivers should call this function when they have completed
499 * their processing of a request.
501 void mmc_cqe_request_done(struct mmc_host *host, struct mmc_request *mrq)
503 mmc_should_fail_request(host, mrq);
505 /* Flag re-tuning needed on CRC errors */
506 if ((mrq->cmd && mrq->cmd->error == -EILSEQ) ||
507 (mrq->data && mrq->data->error == -EILSEQ))
508 mmc_retune_needed(host);
510 trace_mmc_request_done(host, mrq);
512 if (mrq->cmd) {
513 pr_debug("%s: CQE req done (direct CMD%u): %d\n",
514 mmc_hostname(host), mrq->cmd->opcode, mrq->cmd->error);
515 } else {
516 pr_debug("%s: CQE transfer done tag %d\n",
517 mmc_hostname(host), mrq->tag);
520 if (mrq->data) {
521 pr_debug("%s: %d bytes transferred: %d\n",
522 mmc_hostname(host),
523 mrq->data->bytes_xfered, mrq->data->error);
526 mrq->done(mrq);
528 EXPORT_SYMBOL(mmc_cqe_request_done);
531 * mmc_cqe_post_req - CQE post process of a completed MMC request
532 * @host: MMC host
533 * @mrq: MMC request to be processed
535 void mmc_cqe_post_req(struct mmc_host *host, struct mmc_request *mrq)
537 if (host->cqe_ops->cqe_post_req)
538 host->cqe_ops->cqe_post_req(host, mrq);
540 EXPORT_SYMBOL(mmc_cqe_post_req);
542 /* Arbitrary 1 second timeout */
543 #define MMC_CQE_RECOVERY_TIMEOUT 1000
546 * mmc_cqe_recovery - Recover from CQE errors.
547 * @host: MMC host to recover
549 * Recovery consists of stopping CQE, stopping eMMC, discarding the queue in
550 * in eMMC, and discarding the queue in CQE. CQE must call
551 * mmc_cqe_request_done() on all requests. An error is returned if the eMMC
552 * fails to discard its queue.
554 int mmc_cqe_recovery(struct mmc_host *host)
556 struct mmc_command cmd;
557 int err;
559 mmc_retune_hold_now(host);
562 * Recovery is expected seldom, if at all, but it reduces performance,
563 * so make sure it is not completely silent.
565 pr_warn("%s: running CQE recovery\n", mmc_hostname(host));
567 host->cqe_ops->cqe_recovery_start(host);
569 memset(&cmd, 0, sizeof(cmd));
570 cmd.opcode = MMC_STOP_TRANSMISSION,
571 cmd.flags = MMC_RSP_R1B | MMC_CMD_AC,
572 cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */
573 cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT,
574 mmc_wait_for_cmd(host, &cmd, 0);
576 memset(&cmd, 0, sizeof(cmd));
577 cmd.opcode = MMC_CMDQ_TASK_MGMT;
578 cmd.arg = 1; /* Discard entire queue */
579 cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
580 cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */
581 cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT,
582 err = mmc_wait_for_cmd(host, &cmd, 0);
584 host->cqe_ops->cqe_recovery_finish(host);
586 mmc_retune_release(host);
588 return err;
590 EXPORT_SYMBOL(mmc_cqe_recovery);
593 * mmc_is_req_done - Determine if a 'cap_cmd_during_tfr' request is done
594 * @host: MMC host
595 * @mrq: MMC request
597 * mmc_is_req_done() is used with requests that have
598 * mrq->cap_cmd_during_tfr = true. mmc_is_req_done() must be called after
599 * starting a request and before waiting for it to complete. That is,
600 * either in between calls to mmc_start_req(), or after mmc_wait_for_req()
601 * and before mmc_wait_for_req_done(). If it is called at other times the
602 * result is not meaningful.
604 bool mmc_is_req_done(struct mmc_host *host, struct mmc_request *mrq)
606 return completion_done(&mrq->completion);
608 EXPORT_SYMBOL(mmc_is_req_done);
611 * mmc_wait_for_req - start a request and wait for completion
612 * @host: MMC host to start command
613 * @mrq: MMC request to start
615 * Start a new MMC custom command request for a host, and wait
616 * for the command to complete. In the case of 'cap_cmd_during_tfr'
617 * requests, the transfer is ongoing and the caller can issue further
618 * commands that do not use the data lines, and then wait by calling
619 * mmc_wait_for_req_done().
620 * Does not attempt to parse the response.
622 void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
624 __mmc_start_req(host, mrq);
626 if (!mrq->cap_cmd_during_tfr)
627 mmc_wait_for_req_done(host, mrq);
629 EXPORT_SYMBOL(mmc_wait_for_req);
632 * mmc_wait_for_cmd - start a command and wait for completion
633 * @host: MMC host to start command
634 * @cmd: MMC command to start
635 * @retries: maximum number of retries
637 * Start a new MMC command for a host, and wait for the command
638 * to complete. Return any error that occurred while the command
639 * was executing. Do not attempt to parse the response.
641 int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
643 struct mmc_request mrq = {};
645 WARN_ON(!host->claimed);
647 memset(cmd->resp, 0, sizeof(cmd->resp));
648 cmd->retries = retries;
650 mrq.cmd = cmd;
651 cmd->data = NULL;
653 mmc_wait_for_req(host, &mrq);
655 return cmd->error;
658 EXPORT_SYMBOL(mmc_wait_for_cmd);
661 * mmc_set_data_timeout - set the timeout for a data command
662 * @data: data phase for command
663 * @card: the MMC card associated with the data transfer
665 * Computes the data timeout parameters according to the
666 * correct algorithm given the card type.
668 void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
670 unsigned int mult;
673 * SDIO cards only define an upper 1 s limit on access.
675 if (mmc_card_sdio(card)) {
676 data->timeout_ns = 1000000000;
677 data->timeout_clks = 0;
678 return;
682 * SD cards use a 100 multiplier rather than 10
684 mult = mmc_card_sd(card) ? 100 : 10;
687 * Scale up the multiplier (and therefore the timeout) by
688 * the r2w factor for writes.
690 if (data->flags & MMC_DATA_WRITE)
691 mult <<= card->csd.r2w_factor;
693 data->timeout_ns = card->csd.taac_ns * mult;
694 data->timeout_clks = card->csd.taac_clks * mult;
697 * SD cards also have an upper limit on the timeout.
699 if (mmc_card_sd(card)) {
700 unsigned int timeout_us, limit_us;
702 timeout_us = data->timeout_ns / 1000;
703 if (card->host->ios.clock)
704 timeout_us += data->timeout_clks * 1000 /
705 (card->host->ios.clock / 1000);
707 if (data->flags & MMC_DATA_WRITE)
709 * The MMC spec "It is strongly recommended
710 * for hosts to implement more than 500ms
711 * timeout value even if the card indicates
712 * the 250ms maximum busy length." Even the
713 * previous value of 300ms is known to be
714 * insufficient for some cards.
716 limit_us = 3000000;
717 else
718 limit_us = 100000;
721 * SDHC cards always use these fixed values.
723 if (timeout_us > limit_us) {
724 data->timeout_ns = limit_us * 1000;
725 data->timeout_clks = 0;
728 /* assign limit value if invalid */
729 if (timeout_us == 0)
730 data->timeout_ns = limit_us * 1000;
734 * Some cards require longer data read timeout than indicated in CSD.
735 * Address this by setting the read timeout to a "reasonably high"
736 * value. For the cards tested, 600ms has proven enough. If necessary,
737 * this value can be increased if other problematic cards require this.
739 if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
740 data->timeout_ns = 600000000;
741 data->timeout_clks = 0;
745 * Some cards need very high timeouts if driven in SPI mode.
746 * The worst observed timeout was 900ms after writing a
747 * continuous stream of data until the internal logic
748 * overflowed.
750 if (mmc_host_is_spi(card->host)) {
751 if (data->flags & MMC_DATA_WRITE) {
752 if (data->timeout_ns < 1000000000)
753 data->timeout_ns = 1000000000; /* 1s */
754 } else {
755 if (data->timeout_ns < 100000000)
756 data->timeout_ns = 100000000; /* 100ms */
760 EXPORT_SYMBOL(mmc_set_data_timeout);
763 * mmc_align_data_size - pads a transfer size to a more optimal value
764 * @card: the MMC card associated with the data transfer
765 * @sz: original transfer size
767 * Pads the original data size with a number of extra bytes in
768 * order to avoid controller bugs and/or performance hits
769 * (e.g. some controllers revert to PIO for certain sizes).
771 * Returns the improved size, which might be unmodified.
773 * Note that this function is only relevant when issuing a
774 * single scatter gather entry.
776 unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz)
779 * FIXME: We don't have a system for the controller to tell
780 * the core about its problems yet, so for now we just 32-bit
781 * align the size.
783 sz = ((sz + 3) / 4) * 4;
785 return sz;
787 EXPORT_SYMBOL(mmc_align_data_size);
790 * Allow claiming an already claimed host if the context is the same or there is
791 * no context but the task is the same.
793 static inline bool mmc_ctx_matches(struct mmc_host *host, struct mmc_ctx *ctx,
794 struct task_struct *task)
796 return host->claimer == ctx ||
797 (!ctx && task && host->claimer->task == task);
800 static inline void mmc_ctx_set_claimer(struct mmc_host *host,
801 struct mmc_ctx *ctx,
802 struct task_struct *task)
804 if (!host->claimer) {
805 if (ctx)
806 host->claimer = ctx;
807 else
808 host->claimer = &host->default_ctx;
810 if (task)
811 host->claimer->task = task;
815 * __mmc_claim_host - exclusively claim a host
816 * @host: mmc host to claim
817 * @ctx: context that claims the host or NULL in which case the default
818 * context will be used
819 * @abort: whether or not the operation should be aborted
821 * Claim a host for a set of operations. If @abort is non null and
822 * dereference a non-zero value then this will return prematurely with
823 * that non-zero value without acquiring the lock. Returns zero
824 * with the lock held otherwise.
826 int __mmc_claim_host(struct mmc_host *host, struct mmc_ctx *ctx,
827 atomic_t *abort)
829 struct task_struct *task = ctx ? NULL : current;
830 DECLARE_WAITQUEUE(wait, current);
831 unsigned long flags;
832 int stop;
833 bool pm = false;
835 might_sleep();
837 add_wait_queue(&host->wq, &wait);
838 spin_lock_irqsave(&host->lock, flags);
839 while (1) {
840 set_current_state(TASK_UNINTERRUPTIBLE);
841 stop = abort ? atomic_read(abort) : 0;
842 if (stop || !host->claimed || mmc_ctx_matches(host, ctx, task))
843 break;
844 spin_unlock_irqrestore(&host->lock, flags);
845 schedule();
846 spin_lock_irqsave(&host->lock, flags);
848 set_current_state(TASK_RUNNING);
849 if (!stop) {
850 host->claimed = 1;
851 mmc_ctx_set_claimer(host, ctx, task);
852 host->claim_cnt += 1;
853 if (host->claim_cnt == 1)
854 pm = true;
855 } else
856 wake_up(&host->wq);
857 spin_unlock_irqrestore(&host->lock, flags);
858 remove_wait_queue(&host->wq, &wait);
860 if (pm)
861 pm_runtime_get_sync(mmc_dev(host));
863 return stop;
865 EXPORT_SYMBOL(__mmc_claim_host);
868 * mmc_release_host - release a host
869 * @host: mmc host to release
871 * Release a MMC host, allowing others to claim the host
872 * for their operations.
874 void mmc_release_host(struct mmc_host *host)
876 unsigned long flags;
878 WARN_ON(!host->claimed);
880 spin_lock_irqsave(&host->lock, flags);
881 if (--host->claim_cnt) {
882 /* Release for nested claim */
883 spin_unlock_irqrestore(&host->lock, flags);
884 } else {
885 host->claimed = 0;
886 host->claimer->task = NULL;
887 host->claimer = NULL;
888 spin_unlock_irqrestore(&host->lock, flags);
889 wake_up(&host->wq);
890 pm_runtime_mark_last_busy(mmc_dev(host));
891 pm_runtime_put_autosuspend(mmc_dev(host));
894 EXPORT_SYMBOL(mmc_release_host);
897 * This is a helper function, which fetches a runtime pm reference for the
898 * card device and also claims the host.
900 void mmc_get_card(struct mmc_card *card, struct mmc_ctx *ctx)
902 pm_runtime_get_sync(&card->dev);
903 __mmc_claim_host(card->host, ctx, NULL);
905 EXPORT_SYMBOL(mmc_get_card);
908 * This is a helper function, which releases the host and drops the runtime
909 * pm reference for the card device.
911 void mmc_put_card(struct mmc_card *card, struct mmc_ctx *ctx)
913 struct mmc_host *host = card->host;
915 WARN_ON(ctx && host->claimer != ctx);
917 mmc_release_host(host);
918 pm_runtime_mark_last_busy(&card->dev);
919 pm_runtime_put_autosuspend(&card->dev);
921 EXPORT_SYMBOL(mmc_put_card);
924 * Internal function that does the actual ios call to the host driver,
925 * optionally printing some debug output.
927 static inline void mmc_set_ios(struct mmc_host *host)
929 struct mmc_ios *ios = &host->ios;
931 pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u "
932 "width %u timing %u\n",
933 mmc_hostname(host), ios->clock, ios->bus_mode,
934 ios->power_mode, ios->chip_select, ios->vdd,
935 1 << ios->bus_width, ios->timing);
937 host->ops->set_ios(host, ios);
941 * Control chip select pin on a host.
943 void mmc_set_chip_select(struct mmc_host *host, int mode)
945 host->ios.chip_select = mode;
946 mmc_set_ios(host);
950 * Sets the host clock to the highest possible frequency that
951 * is below "hz".
953 void mmc_set_clock(struct mmc_host *host, unsigned int hz)
955 WARN_ON(hz && hz < host->f_min);
957 if (hz > host->f_max)
958 hz = host->f_max;
960 host->ios.clock = hz;
961 mmc_set_ios(host);
964 int mmc_execute_tuning(struct mmc_card *card)
966 struct mmc_host *host = card->host;
967 u32 opcode;
968 int err;
970 if (!host->ops->execute_tuning)
971 return 0;
973 if (host->cqe_on)
974 host->cqe_ops->cqe_off(host);
976 if (mmc_card_mmc(card))
977 opcode = MMC_SEND_TUNING_BLOCK_HS200;
978 else
979 opcode = MMC_SEND_TUNING_BLOCK;
981 err = host->ops->execute_tuning(host, opcode);
983 if (err)
984 pr_err("%s: tuning execution failed: %d\n",
985 mmc_hostname(host), err);
986 else
987 mmc_retune_enable(host);
989 return err;
993 * Change the bus mode (open drain/push-pull) of a host.
995 void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
997 host->ios.bus_mode = mode;
998 mmc_set_ios(host);
1002 * Change data bus width of a host.
1004 void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
1006 host->ios.bus_width = width;
1007 mmc_set_ios(host);
1011 * Set initial state after a power cycle or a hw_reset.
1013 void mmc_set_initial_state(struct mmc_host *host)
1015 if (host->cqe_on)
1016 host->cqe_ops->cqe_off(host);
1018 mmc_retune_disable(host);
1020 if (mmc_host_is_spi(host))
1021 host->ios.chip_select = MMC_CS_HIGH;
1022 else
1023 host->ios.chip_select = MMC_CS_DONTCARE;
1024 host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
1025 host->ios.bus_width = MMC_BUS_WIDTH_1;
1026 host->ios.timing = MMC_TIMING_LEGACY;
1027 host->ios.drv_type = 0;
1028 host->ios.enhanced_strobe = false;
1031 * Make sure we are in non-enhanced strobe mode before we
1032 * actually enable it in ext_csd.
1034 if ((host->caps2 & MMC_CAP2_HS400_ES) &&
1035 host->ops->hs400_enhanced_strobe)
1036 host->ops->hs400_enhanced_strobe(host, &host->ios);
1038 mmc_set_ios(host);
1042 * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number
1043 * @vdd: voltage (mV)
1044 * @low_bits: prefer low bits in boundary cases
1046 * This function returns the OCR bit number according to the provided @vdd
1047 * value. If conversion is not possible a negative errno value returned.
1049 * Depending on the @low_bits flag the function prefers low or high OCR bits
1050 * on boundary voltages. For example,
1051 * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33);
1052 * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34);
1054 * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21).
1056 static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits)
1058 const int max_bit = ilog2(MMC_VDD_35_36);
1059 int bit;
1061 if (vdd < 1650 || vdd > 3600)
1062 return -EINVAL;
1064 if (vdd >= 1650 && vdd <= 1950)
1065 return ilog2(MMC_VDD_165_195);
1067 if (low_bits)
1068 vdd -= 1;
1070 /* Base 2000 mV, step 100 mV, bit's base 8. */
1071 bit = (vdd - 2000) / 100 + 8;
1072 if (bit > max_bit)
1073 return max_bit;
1074 return bit;
1078 * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask
1079 * @vdd_min: minimum voltage value (mV)
1080 * @vdd_max: maximum voltage value (mV)
1082 * This function returns the OCR mask bits according to the provided @vdd_min
1083 * and @vdd_max values. If conversion is not possible the function returns 0.
1085 * Notes wrt boundary cases:
1086 * This function sets the OCR bits for all boundary voltages, for example
1087 * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 |
1088 * MMC_VDD_34_35 mask.
1090 u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max)
1092 u32 mask = 0;
1094 if (vdd_max < vdd_min)
1095 return 0;
1097 /* Prefer high bits for the boundary vdd_max values. */
1098 vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false);
1099 if (vdd_max < 0)
1100 return 0;
1102 /* Prefer low bits for the boundary vdd_min values. */
1103 vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true);
1104 if (vdd_min < 0)
1105 return 0;
1107 /* Fill the mask, from max bit to min bit. */
1108 while (vdd_max >= vdd_min)
1109 mask |= 1 << vdd_max--;
1111 return mask;
1113 EXPORT_SYMBOL(mmc_vddrange_to_ocrmask);
1115 #ifdef CONFIG_OF
1118 * mmc_of_parse_voltage - return mask of supported voltages
1119 * @np: The device node need to be parsed.
1120 * @mask: mask of voltages available for MMC/SD/SDIO
1122 * Parse the "voltage-ranges" DT property, returning zero if it is not
1123 * found, negative errno if the voltage-range specification is invalid,
1124 * or one if the voltage-range is specified and successfully parsed.
1126 int mmc_of_parse_voltage(struct device_node *np, u32 *mask)
1128 const u32 *voltage_ranges;
1129 int num_ranges, i;
1131 voltage_ranges = of_get_property(np, "voltage-ranges", &num_ranges);
1132 num_ranges = num_ranges / sizeof(*voltage_ranges) / 2;
1133 if (!voltage_ranges) {
1134 pr_debug("%pOF: voltage-ranges unspecified\n", np);
1135 return 0;
1137 if (!num_ranges) {
1138 pr_err("%pOF: voltage-ranges empty\n", np);
1139 return -EINVAL;
1142 for (i = 0; i < num_ranges; i++) {
1143 const int j = i * 2;
1144 u32 ocr_mask;
1146 ocr_mask = mmc_vddrange_to_ocrmask(
1147 be32_to_cpu(voltage_ranges[j]),
1148 be32_to_cpu(voltage_ranges[j + 1]));
1149 if (!ocr_mask) {
1150 pr_err("%pOF: voltage-range #%d is invalid\n",
1151 np, i);
1152 return -EINVAL;
1154 *mask |= ocr_mask;
1157 return 1;
1159 EXPORT_SYMBOL(mmc_of_parse_voltage);
1161 #endif /* CONFIG_OF */
1163 static int mmc_of_get_func_num(struct device_node *node)
1165 u32 reg;
1166 int ret;
1168 ret = of_property_read_u32(node, "reg", &reg);
1169 if (ret < 0)
1170 return ret;
1172 return reg;
1175 struct device_node *mmc_of_find_child_device(struct mmc_host *host,
1176 unsigned func_num)
1178 struct device_node *node;
1180 if (!host->parent || !host->parent->of_node)
1181 return NULL;
1183 for_each_child_of_node(host->parent->of_node, node) {
1184 if (mmc_of_get_func_num(node) == func_num)
1185 return node;
1188 return NULL;
1191 #ifdef CONFIG_REGULATOR
1194 * mmc_ocrbitnum_to_vdd - Convert a OCR bit number to its voltage
1195 * @vdd_bit: OCR bit number
1196 * @min_uV: minimum voltage value (mV)
1197 * @max_uV: maximum voltage value (mV)
1199 * This function returns the voltage range according to the provided OCR
1200 * bit number. If conversion is not possible a negative errno value returned.
1202 static int mmc_ocrbitnum_to_vdd(int vdd_bit, int *min_uV, int *max_uV)
1204 int tmp;
1206 if (!vdd_bit)
1207 return -EINVAL;
1210 * REVISIT mmc_vddrange_to_ocrmask() may have set some
1211 * bits this regulator doesn't quite support ... don't
1212 * be too picky, most cards and regulators are OK with
1213 * a 0.1V range goof (it's a small error percentage).
1215 tmp = vdd_bit - ilog2(MMC_VDD_165_195);
1216 if (tmp == 0) {
1217 *min_uV = 1650 * 1000;
1218 *max_uV = 1950 * 1000;
1219 } else {
1220 *min_uV = 1900 * 1000 + tmp * 100 * 1000;
1221 *max_uV = *min_uV + 100 * 1000;
1224 return 0;
1228 * mmc_regulator_get_ocrmask - return mask of supported voltages
1229 * @supply: regulator to use
1231 * This returns either a negative errno, or a mask of voltages that
1232 * can be provided to MMC/SD/SDIO devices using the specified voltage
1233 * regulator. This would normally be called before registering the
1234 * MMC host adapter.
1236 int mmc_regulator_get_ocrmask(struct regulator *supply)
1238 int result = 0;
1239 int count;
1240 int i;
1241 int vdd_uV;
1242 int vdd_mV;
1244 count = regulator_count_voltages(supply);
1245 if (count < 0)
1246 return count;
1248 for (i = 0; i < count; i++) {
1249 vdd_uV = regulator_list_voltage(supply, i);
1250 if (vdd_uV <= 0)
1251 continue;
1253 vdd_mV = vdd_uV / 1000;
1254 result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
1257 if (!result) {
1258 vdd_uV = regulator_get_voltage(supply);
1259 if (vdd_uV <= 0)
1260 return vdd_uV;
1262 vdd_mV = vdd_uV / 1000;
1263 result = mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
1266 return result;
1268 EXPORT_SYMBOL_GPL(mmc_regulator_get_ocrmask);
1271 * mmc_regulator_set_ocr - set regulator to match host->ios voltage
1272 * @mmc: the host to regulate
1273 * @supply: regulator to use
1274 * @vdd_bit: zero for power off, else a bit number (host->ios.vdd)
1276 * Returns zero on success, else negative errno.
1278 * MMC host drivers may use this to enable or disable a regulator using
1279 * a particular supply voltage. This would normally be called from the
1280 * set_ios() method.
1282 int mmc_regulator_set_ocr(struct mmc_host *mmc,
1283 struct regulator *supply,
1284 unsigned short vdd_bit)
1286 int result = 0;
1287 int min_uV, max_uV;
1289 if (vdd_bit) {
1290 mmc_ocrbitnum_to_vdd(vdd_bit, &min_uV, &max_uV);
1292 result = regulator_set_voltage(supply, min_uV, max_uV);
1293 if (result == 0 && !mmc->regulator_enabled) {
1294 result = regulator_enable(supply);
1295 if (!result)
1296 mmc->regulator_enabled = true;
1298 } else if (mmc->regulator_enabled) {
1299 result = regulator_disable(supply);
1300 if (result == 0)
1301 mmc->regulator_enabled = false;
1304 if (result)
1305 dev_err(mmc_dev(mmc),
1306 "could not set regulator OCR (%d)\n", result);
1307 return result;
1309 EXPORT_SYMBOL_GPL(mmc_regulator_set_ocr);
1311 static int mmc_regulator_set_voltage_if_supported(struct regulator *regulator,
1312 int min_uV, int target_uV,
1313 int max_uV)
1316 * Check if supported first to avoid errors since we may try several
1317 * signal levels during power up and don't want to show errors.
1319 if (!regulator_is_supported_voltage(regulator, min_uV, max_uV))
1320 return -EINVAL;
1322 return regulator_set_voltage_triplet(regulator, min_uV, target_uV,
1323 max_uV);
1327 * mmc_regulator_set_vqmmc - Set VQMMC as per the ios
1329 * For 3.3V signaling, we try to match VQMMC to VMMC as closely as possible.
1330 * That will match the behavior of old boards where VQMMC and VMMC were supplied
1331 * by the same supply. The Bus Operating conditions for 3.3V signaling in the
1332 * SD card spec also define VQMMC in terms of VMMC.
1333 * If this is not possible we'll try the full 2.7-3.6V of the spec.
1335 * For 1.2V and 1.8V signaling we'll try to get as close as possible to the
1336 * requested voltage. This is definitely a good idea for UHS where there's a
1337 * separate regulator on the card that's trying to make 1.8V and it's best if
1338 * we match.
1340 * This function is expected to be used by a controller's
1341 * start_signal_voltage_switch() function.
1343 int mmc_regulator_set_vqmmc(struct mmc_host *mmc, struct mmc_ios *ios)
1345 struct device *dev = mmc_dev(mmc);
1346 int ret, volt, min_uV, max_uV;
1348 /* If no vqmmc supply then we can't change the voltage */
1349 if (IS_ERR(mmc->supply.vqmmc))
1350 return -EINVAL;
1352 switch (ios->signal_voltage) {
1353 case MMC_SIGNAL_VOLTAGE_120:
1354 return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
1355 1100000, 1200000, 1300000);
1356 case MMC_SIGNAL_VOLTAGE_180:
1357 return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
1358 1700000, 1800000, 1950000);
1359 case MMC_SIGNAL_VOLTAGE_330:
1360 ret = mmc_ocrbitnum_to_vdd(mmc->ios.vdd, &volt, &max_uV);
1361 if (ret < 0)
1362 return ret;
1364 dev_dbg(dev, "%s: found vmmc voltage range of %d-%duV\n",
1365 __func__, volt, max_uV);
1367 min_uV = max(volt - 300000, 2700000);
1368 max_uV = min(max_uV + 200000, 3600000);
1371 * Due to a limitation in the current implementation of
1372 * regulator_set_voltage_triplet() which is taking the lowest
1373 * voltage possible if below the target, search for a suitable
1374 * voltage in two steps and try to stay close to vmmc
1375 * with a 0.3V tolerance at first.
1377 if (!mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
1378 min_uV, volt, max_uV))
1379 return 0;
1381 return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
1382 2700000, volt, 3600000);
1383 default:
1384 return -EINVAL;
1387 EXPORT_SYMBOL_GPL(mmc_regulator_set_vqmmc);
1389 #endif /* CONFIG_REGULATOR */
1392 * mmc_regulator_get_supply - try to get VMMC and VQMMC regulators for a host
1393 * @mmc: the host to regulate
1395 * Returns 0 or errno. errno should be handled, it is either a critical error
1396 * or -EPROBE_DEFER. 0 means no critical error but it does not mean all
1397 * regulators have been found because they all are optional. If you require
1398 * certain regulators, you need to check separately in your driver if they got
1399 * populated after calling this function.
1401 int mmc_regulator_get_supply(struct mmc_host *mmc)
1403 struct device *dev = mmc_dev(mmc);
1404 int ret;
1406 mmc->supply.vmmc = devm_regulator_get_optional(dev, "vmmc");
1407 mmc->supply.vqmmc = devm_regulator_get_optional(dev, "vqmmc");
1409 if (IS_ERR(mmc->supply.vmmc)) {
1410 if (PTR_ERR(mmc->supply.vmmc) == -EPROBE_DEFER)
1411 return -EPROBE_DEFER;
1412 dev_dbg(dev, "No vmmc regulator found\n");
1413 } else {
1414 ret = mmc_regulator_get_ocrmask(mmc->supply.vmmc);
1415 if (ret > 0)
1416 mmc->ocr_avail = ret;
1417 else
1418 dev_warn(dev, "Failed getting OCR mask: %d\n", ret);
1421 if (IS_ERR(mmc->supply.vqmmc)) {
1422 if (PTR_ERR(mmc->supply.vqmmc) == -EPROBE_DEFER)
1423 return -EPROBE_DEFER;
1424 dev_dbg(dev, "No vqmmc regulator found\n");
1427 return 0;
1429 EXPORT_SYMBOL_GPL(mmc_regulator_get_supply);
1432 * Mask off any voltages we don't support and select
1433 * the lowest voltage
1435 u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
1437 int bit;
1440 * Sanity check the voltages that the card claims to
1441 * support.
1443 if (ocr & 0x7F) {
1444 dev_warn(mmc_dev(host),
1445 "card claims to support voltages below defined range\n");
1446 ocr &= ~0x7F;
1449 ocr &= host->ocr_avail;
1450 if (!ocr) {
1451 dev_warn(mmc_dev(host), "no support for card's volts\n");
1452 return 0;
1455 if (host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) {
1456 bit = ffs(ocr) - 1;
1457 ocr &= 3 << bit;
1458 mmc_power_cycle(host, ocr);
1459 } else {
1460 bit = fls(ocr) - 1;
1461 ocr &= 3 << bit;
1462 if (bit != host->ios.vdd)
1463 dev_warn(mmc_dev(host), "exceeding card's volts\n");
1466 return ocr;
1469 int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage)
1471 int err = 0;
1472 int old_signal_voltage = host->ios.signal_voltage;
1474 host->ios.signal_voltage = signal_voltage;
1475 if (host->ops->start_signal_voltage_switch)
1476 err = host->ops->start_signal_voltage_switch(host, &host->ios);
1478 if (err)
1479 host->ios.signal_voltage = old_signal_voltage;
1481 return err;
1485 void mmc_set_initial_signal_voltage(struct mmc_host *host)
1487 /* Try to set signal voltage to 3.3V but fall back to 1.8v or 1.2v */
1488 if (!mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330))
1489 dev_dbg(mmc_dev(host), "Initial signal voltage of 3.3v\n");
1490 else if (!mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180))
1491 dev_dbg(mmc_dev(host), "Initial signal voltage of 1.8v\n");
1492 else if (!mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120))
1493 dev_dbg(mmc_dev(host), "Initial signal voltage of 1.2v\n");
1496 int mmc_host_set_uhs_voltage(struct mmc_host *host)
1498 u32 clock;
1501 * During a signal voltage level switch, the clock must be gated
1502 * for 5 ms according to the SD spec
1504 clock = host->ios.clock;
1505 host->ios.clock = 0;
1506 mmc_set_ios(host);
1508 if (mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180))
1509 return -EAGAIN;
1511 /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
1512 mmc_delay(10);
1513 host->ios.clock = clock;
1514 mmc_set_ios(host);
1516 return 0;
1519 int mmc_set_uhs_voltage(struct mmc_host *host, u32 ocr)
1521 struct mmc_command cmd = {};
1522 int err = 0;
1525 * If we cannot switch voltages, return failure so the caller
1526 * can continue without UHS mode
1528 if (!host->ops->start_signal_voltage_switch)
1529 return -EPERM;
1530 if (!host->ops->card_busy)
1531 pr_warn("%s: cannot verify signal voltage switch\n",
1532 mmc_hostname(host));
1534 cmd.opcode = SD_SWITCH_VOLTAGE;
1535 cmd.arg = 0;
1536 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1538 err = mmc_wait_for_cmd(host, &cmd, 0);
1539 if (err)
1540 return err;
1542 if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR))
1543 return -EIO;
1546 * The card should drive cmd and dat[0:3] low immediately
1547 * after the response of cmd11, but wait 1 ms to be sure
1549 mmc_delay(1);
1550 if (host->ops->card_busy && !host->ops->card_busy(host)) {
1551 err = -EAGAIN;
1552 goto power_cycle;
1555 if (mmc_host_set_uhs_voltage(host)) {
1557 * Voltages may not have been switched, but we've already
1558 * sent CMD11, so a power cycle is required anyway
1560 err = -EAGAIN;
1561 goto power_cycle;
1564 /* Wait for at least 1 ms according to spec */
1565 mmc_delay(1);
1568 * Failure to switch is indicated by the card holding
1569 * dat[0:3] low
1571 if (host->ops->card_busy && host->ops->card_busy(host))
1572 err = -EAGAIN;
1574 power_cycle:
1575 if (err) {
1576 pr_debug("%s: Signal voltage switch failed, "
1577 "power cycling card\n", mmc_hostname(host));
1578 mmc_power_cycle(host, ocr);
1581 return err;
1585 * Select timing parameters for host.
1587 void mmc_set_timing(struct mmc_host *host, unsigned int timing)
1589 host->ios.timing = timing;
1590 mmc_set_ios(host);
1594 * Select appropriate driver type for host.
1596 void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
1598 host->ios.drv_type = drv_type;
1599 mmc_set_ios(host);
1602 int mmc_select_drive_strength(struct mmc_card *card, unsigned int max_dtr,
1603 int card_drv_type, int *drv_type)
1605 struct mmc_host *host = card->host;
1606 int host_drv_type = SD_DRIVER_TYPE_B;
1608 *drv_type = 0;
1610 if (!host->ops->select_drive_strength)
1611 return 0;
1613 /* Use SD definition of driver strength for hosts */
1614 if (host->caps & MMC_CAP_DRIVER_TYPE_A)
1615 host_drv_type |= SD_DRIVER_TYPE_A;
1617 if (host->caps & MMC_CAP_DRIVER_TYPE_C)
1618 host_drv_type |= SD_DRIVER_TYPE_C;
1620 if (host->caps & MMC_CAP_DRIVER_TYPE_D)
1621 host_drv_type |= SD_DRIVER_TYPE_D;
1624 * The drive strength that the hardware can support
1625 * depends on the board design. Pass the appropriate
1626 * information and let the hardware specific code
1627 * return what is possible given the options
1629 return host->ops->select_drive_strength(card, max_dtr,
1630 host_drv_type,
1631 card_drv_type,
1632 drv_type);
1636 * Apply power to the MMC stack. This is a two-stage process.
1637 * First, we enable power to the card without the clock running.
1638 * We then wait a bit for the power to stabilise. Finally,
1639 * enable the bus drivers and clock to the card.
1641 * We must _NOT_ enable the clock prior to power stablising.
1643 * If a host does all the power sequencing itself, ignore the
1644 * initial MMC_POWER_UP stage.
1646 void mmc_power_up(struct mmc_host *host, u32 ocr)
1648 if (host->ios.power_mode == MMC_POWER_ON)
1649 return;
1651 mmc_pwrseq_pre_power_on(host);
1653 host->ios.vdd = fls(ocr) - 1;
1654 host->ios.power_mode = MMC_POWER_UP;
1655 /* Set initial state and call mmc_set_ios */
1656 mmc_set_initial_state(host);
1658 mmc_set_initial_signal_voltage(host);
1661 * This delay should be sufficient to allow the power supply
1662 * to reach the minimum voltage.
1664 mmc_delay(host->ios.power_delay_ms);
1666 mmc_pwrseq_post_power_on(host);
1668 host->ios.clock = host->f_init;
1670 host->ios.power_mode = MMC_POWER_ON;
1671 mmc_set_ios(host);
1674 * This delay must be at least 74 clock sizes, or 1 ms, or the
1675 * time required to reach a stable voltage.
1677 mmc_delay(host->ios.power_delay_ms);
1680 void mmc_power_off(struct mmc_host *host)
1682 if (host->ios.power_mode == MMC_POWER_OFF)
1683 return;
1685 mmc_pwrseq_power_off(host);
1687 host->ios.clock = 0;
1688 host->ios.vdd = 0;
1690 host->ios.power_mode = MMC_POWER_OFF;
1691 /* Set initial state and call mmc_set_ios */
1692 mmc_set_initial_state(host);
1695 * Some configurations, such as the 802.11 SDIO card in the OLPC
1696 * XO-1.5, require a short delay after poweroff before the card
1697 * can be successfully turned on again.
1699 mmc_delay(1);
1702 void mmc_power_cycle(struct mmc_host *host, u32 ocr)
1704 mmc_power_off(host);
1705 /* Wait at least 1 ms according to SD spec */
1706 mmc_delay(1);
1707 mmc_power_up(host, ocr);
1711 * Cleanup when the last reference to the bus operator is dropped.
1713 static void __mmc_release_bus(struct mmc_host *host)
1715 WARN_ON(!host->bus_dead);
1717 host->bus_ops = NULL;
1721 * Increase reference count of bus operator
1723 static inline void mmc_bus_get(struct mmc_host *host)
1725 unsigned long flags;
1727 spin_lock_irqsave(&host->lock, flags);
1728 host->bus_refs++;
1729 spin_unlock_irqrestore(&host->lock, flags);
1733 * Decrease reference count of bus operator and free it if
1734 * it is the last reference.
1736 static inline void mmc_bus_put(struct mmc_host *host)
1738 unsigned long flags;
1740 spin_lock_irqsave(&host->lock, flags);
1741 host->bus_refs--;
1742 if ((host->bus_refs == 0) && host->bus_ops)
1743 __mmc_release_bus(host);
1744 spin_unlock_irqrestore(&host->lock, flags);
1748 * Assign a mmc bus handler to a host. Only one bus handler may control a
1749 * host at any given time.
1751 void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops)
1753 unsigned long flags;
1755 WARN_ON(!host->claimed);
1757 spin_lock_irqsave(&host->lock, flags);
1759 WARN_ON(host->bus_ops);
1760 WARN_ON(host->bus_refs);
1762 host->bus_ops = ops;
1763 host->bus_refs = 1;
1764 host->bus_dead = 0;
1766 spin_unlock_irqrestore(&host->lock, flags);
1770 * Remove the current bus handler from a host.
1772 void mmc_detach_bus(struct mmc_host *host)
1774 unsigned long flags;
1776 WARN_ON(!host->claimed);
1777 WARN_ON(!host->bus_ops);
1779 spin_lock_irqsave(&host->lock, flags);
1781 host->bus_dead = 1;
1783 spin_unlock_irqrestore(&host->lock, flags);
1785 mmc_bus_put(host);
1788 static void _mmc_detect_change(struct mmc_host *host, unsigned long delay,
1789 bool cd_irq)
1792 * If the device is configured as wakeup, we prevent a new sleep for
1793 * 5 s to give provision for user space to consume the event.
1795 if (cd_irq && !(host->caps & MMC_CAP_NEEDS_POLL) &&
1796 device_can_wakeup(mmc_dev(host)))
1797 pm_wakeup_event(mmc_dev(host), 5000);
1799 host->detect_change = 1;
1800 mmc_schedule_delayed_work(&host->detect, delay);
1804 * mmc_detect_change - process change of state on a MMC socket
1805 * @host: host which changed state.
1806 * @delay: optional delay to wait before detection (jiffies)
1808 * MMC drivers should call this when they detect a card has been
1809 * inserted or removed. The MMC layer will confirm that any
1810 * present card is still functional, and initialize any newly
1811 * inserted.
1813 void mmc_detect_change(struct mmc_host *host, unsigned long delay)
1815 _mmc_detect_change(host, delay, true);
1817 EXPORT_SYMBOL(mmc_detect_change);
1819 void mmc_init_erase(struct mmc_card *card)
1821 unsigned int sz;
1823 if (is_power_of_2(card->erase_size))
1824 card->erase_shift = ffs(card->erase_size) - 1;
1825 else
1826 card->erase_shift = 0;
1829 * It is possible to erase an arbitrarily large area of an SD or MMC
1830 * card. That is not desirable because it can take a long time
1831 * (minutes) potentially delaying more important I/O, and also the
1832 * timeout calculations become increasingly hugely over-estimated.
1833 * Consequently, 'pref_erase' is defined as a guide to limit erases
1834 * to that size and alignment.
1836 * For SD cards that define Allocation Unit size, limit erases to one
1837 * Allocation Unit at a time.
1838 * For MMC, have a stab at ai good value and for modern cards it will
1839 * end up being 4MiB. Note that if the value is too small, it can end
1840 * up taking longer to erase. Also note, erase_size is already set to
1841 * High Capacity Erase Size if available when this function is called.
1843 if (mmc_card_sd(card) && card->ssr.au) {
1844 card->pref_erase = card->ssr.au;
1845 card->erase_shift = ffs(card->ssr.au) - 1;
1846 } else if (card->erase_size) {
1847 sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11;
1848 if (sz < 128)
1849 card->pref_erase = 512 * 1024 / 512;
1850 else if (sz < 512)
1851 card->pref_erase = 1024 * 1024 / 512;
1852 else if (sz < 1024)
1853 card->pref_erase = 2 * 1024 * 1024 / 512;
1854 else
1855 card->pref_erase = 4 * 1024 * 1024 / 512;
1856 if (card->pref_erase < card->erase_size)
1857 card->pref_erase = card->erase_size;
1858 else {
1859 sz = card->pref_erase % card->erase_size;
1860 if (sz)
1861 card->pref_erase += card->erase_size - sz;
1863 } else
1864 card->pref_erase = 0;
1867 static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
1868 unsigned int arg, unsigned int qty)
1870 unsigned int erase_timeout;
1872 if (arg == MMC_DISCARD_ARG ||
1873 (arg == MMC_TRIM_ARG && card->ext_csd.rev >= 6)) {
1874 erase_timeout = card->ext_csd.trim_timeout;
1875 } else if (card->ext_csd.erase_group_def & 1) {
1876 /* High Capacity Erase Group Size uses HC timeouts */
1877 if (arg == MMC_TRIM_ARG)
1878 erase_timeout = card->ext_csd.trim_timeout;
1879 else
1880 erase_timeout = card->ext_csd.hc_erase_timeout;
1881 } else {
1882 /* CSD Erase Group Size uses write timeout */
1883 unsigned int mult = (10 << card->csd.r2w_factor);
1884 unsigned int timeout_clks = card->csd.taac_clks * mult;
1885 unsigned int timeout_us;
1887 /* Avoid overflow: e.g. taac_ns=80000000 mult=1280 */
1888 if (card->csd.taac_ns < 1000000)
1889 timeout_us = (card->csd.taac_ns * mult) / 1000;
1890 else
1891 timeout_us = (card->csd.taac_ns / 1000) * mult;
1894 * ios.clock is only a target. The real clock rate might be
1895 * less but not that much less, so fudge it by multiplying by 2.
1897 timeout_clks <<= 1;
1898 timeout_us += (timeout_clks * 1000) /
1899 (card->host->ios.clock / 1000);
1901 erase_timeout = timeout_us / 1000;
1904 * Theoretically, the calculation could underflow so round up
1905 * to 1ms in that case.
1907 if (!erase_timeout)
1908 erase_timeout = 1;
1911 /* Multiplier for secure operations */
1912 if (arg & MMC_SECURE_ARGS) {
1913 if (arg == MMC_SECURE_ERASE_ARG)
1914 erase_timeout *= card->ext_csd.sec_erase_mult;
1915 else
1916 erase_timeout *= card->ext_csd.sec_trim_mult;
1919 erase_timeout *= qty;
1922 * Ensure at least a 1 second timeout for SPI as per
1923 * 'mmc_set_data_timeout()'
1925 if (mmc_host_is_spi(card->host) && erase_timeout < 1000)
1926 erase_timeout = 1000;
1928 return erase_timeout;
1931 static unsigned int mmc_sd_erase_timeout(struct mmc_card *card,
1932 unsigned int arg,
1933 unsigned int qty)
1935 unsigned int erase_timeout;
1937 if (card->ssr.erase_timeout) {
1938 /* Erase timeout specified in SD Status Register (SSR) */
1939 erase_timeout = card->ssr.erase_timeout * qty +
1940 card->ssr.erase_offset;
1941 } else {
1943 * Erase timeout not specified in SD Status Register (SSR) so
1944 * use 250ms per write block.
1946 erase_timeout = 250 * qty;
1949 /* Must not be less than 1 second */
1950 if (erase_timeout < 1000)
1951 erase_timeout = 1000;
1953 return erase_timeout;
1956 static unsigned int mmc_erase_timeout(struct mmc_card *card,
1957 unsigned int arg,
1958 unsigned int qty)
1960 if (mmc_card_sd(card))
1961 return mmc_sd_erase_timeout(card, arg, qty);
1962 else
1963 return mmc_mmc_erase_timeout(card, arg, qty);
1966 static int mmc_do_erase(struct mmc_card *card, unsigned int from,
1967 unsigned int to, unsigned int arg)
1969 struct mmc_command cmd = {};
1970 unsigned int qty = 0, busy_timeout = 0;
1971 bool use_r1b_resp = false;
1972 unsigned long timeout;
1973 int loop_udelay=64, udelay_max=32768;
1974 int err;
1976 mmc_retune_hold(card->host);
1979 * qty is used to calculate the erase timeout which depends on how many
1980 * erase groups (or allocation units in SD terminology) are affected.
1981 * We count erasing part of an erase group as one erase group.
1982 * For SD, the allocation units are always a power of 2. For MMC, the
1983 * erase group size is almost certainly also power of 2, but it does not
1984 * seem to insist on that in the JEDEC standard, so we fall back to
1985 * division in that case. SD may not specify an allocation unit size,
1986 * in which case the timeout is based on the number of write blocks.
1988 * Note that the timeout for secure trim 2 will only be correct if the
1989 * number of erase groups specified is the same as the total of all
1990 * preceding secure trim 1 commands. Since the power may have been
1991 * lost since the secure trim 1 commands occurred, it is generally
1992 * impossible to calculate the secure trim 2 timeout correctly.
1994 if (card->erase_shift)
1995 qty += ((to >> card->erase_shift) -
1996 (from >> card->erase_shift)) + 1;
1997 else if (mmc_card_sd(card))
1998 qty += to - from + 1;
1999 else
2000 qty += ((to / card->erase_size) -
2001 (from / card->erase_size)) + 1;
2003 if (!mmc_card_blockaddr(card)) {
2004 from <<= 9;
2005 to <<= 9;
2008 if (mmc_card_sd(card))
2009 cmd.opcode = SD_ERASE_WR_BLK_START;
2010 else
2011 cmd.opcode = MMC_ERASE_GROUP_START;
2012 cmd.arg = from;
2013 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2014 err = mmc_wait_for_cmd(card->host, &cmd, 0);
2015 if (err) {
2016 pr_err("mmc_erase: group start error %d, "
2017 "status %#x\n", err, cmd.resp[0]);
2018 err = -EIO;
2019 goto out;
2022 memset(&cmd, 0, sizeof(struct mmc_command));
2023 if (mmc_card_sd(card))
2024 cmd.opcode = SD_ERASE_WR_BLK_END;
2025 else
2026 cmd.opcode = MMC_ERASE_GROUP_END;
2027 cmd.arg = to;
2028 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2029 err = mmc_wait_for_cmd(card->host, &cmd, 0);
2030 if (err) {
2031 pr_err("mmc_erase: group end error %d, status %#x\n",
2032 err, cmd.resp[0]);
2033 err = -EIO;
2034 goto out;
2037 memset(&cmd, 0, sizeof(struct mmc_command));
2038 cmd.opcode = MMC_ERASE;
2039 cmd.arg = arg;
2040 busy_timeout = mmc_erase_timeout(card, arg, qty);
2042 * If the host controller supports busy signalling and the timeout for
2043 * the erase operation does not exceed the max_busy_timeout, we should
2044 * use R1B response. Or we need to prevent the host from doing hw busy
2045 * detection, which is done by converting to a R1 response instead.
2046 * Note, some hosts requires R1B, which also means they are on their own
2047 * when it comes to deal with the busy timeout.
2049 if (!(card->host->caps & MMC_CAP_NEED_RSP_BUSY) &&
2050 card->host->max_busy_timeout &&
2051 busy_timeout > card->host->max_busy_timeout) {
2052 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2053 } else {
2054 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
2055 cmd.busy_timeout = busy_timeout;
2056 use_r1b_resp = true;
2059 err = mmc_wait_for_cmd(card->host, &cmd, 0);
2060 if (err) {
2061 pr_err("mmc_erase: erase error %d, status %#x\n",
2062 err, cmd.resp[0]);
2063 err = -EIO;
2064 goto out;
2067 if (mmc_host_is_spi(card->host))
2068 goto out;
2071 * In case of when R1B + MMC_CAP_WAIT_WHILE_BUSY is used, the polling
2072 * shall be avoided.
2074 if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
2075 goto out;
2077 timeout = jiffies + msecs_to_jiffies(busy_timeout);
2078 do {
2079 memset(&cmd, 0, sizeof(struct mmc_command));
2080 cmd.opcode = MMC_SEND_STATUS;
2081 cmd.arg = card->rca << 16;
2082 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
2083 /* Do not retry else we can't see errors */
2084 err = mmc_wait_for_cmd(card->host, &cmd, 0);
2085 if (err || R1_STATUS(cmd.resp[0])) {
2086 pr_err("error %d requesting status %#x\n",
2087 err, cmd.resp[0]);
2088 err = -EIO;
2089 goto out;
2092 /* Timeout if the device never becomes ready for data and
2093 * never leaves the program state.
2095 if (time_after(jiffies, timeout)) {
2096 pr_err("%s: Card stuck in programming state! %s\n",
2097 mmc_hostname(card->host), __func__);
2098 err = -EIO;
2099 goto out;
2101 if ((cmd.resp[0] & R1_READY_FOR_DATA) &&
2102 R1_CURRENT_STATE(cmd.resp[0]) != R1_STATE_PRG)
2103 break;
2105 usleep_range(loop_udelay, loop_udelay*2);
2106 if (loop_udelay < udelay_max)
2107 loop_udelay *= 2;
2108 } while (1);
2110 out:
2111 mmc_retune_release(card->host);
2112 return err;
2115 static unsigned int mmc_align_erase_size(struct mmc_card *card,
2116 unsigned int *from,
2117 unsigned int *to,
2118 unsigned int nr)
2120 unsigned int from_new = *from, nr_new = nr, rem;
2123 * When the 'card->erase_size' is power of 2, we can use round_up/down()
2124 * to align the erase size efficiently.
2126 if (is_power_of_2(card->erase_size)) {
2127 unsigned int temp = from_new;
2129 from_new = round_up(temp, card->erase_size);
2130 rem = from_new - temp;
2132 if (nr_new > rem)
2133 nr_new -= rem;
2134 else
2135 return 0;
2137 nr_new = round_down(nr_new, card->erase_size);
2138 } else {
2139 rem = from_new % card->erase_size;
2140 if (rem) {
2141 rem = card->erase_size - rem;
2142 from_new += rem;
2143 if (nr_new > rem)
2144 nr_new -= rem;
2145 else
2146 return 0;
2149 rem = nr_new % card->erase_size;
2150 if (rem)
2151 nr_new -= rem;
2154 if (nr_new == 0)
2155 return 0;
2157 *to = from_new + nr_new;
2158 *from = from_new;
2160 return nr_new;
2164 * mmc_erase - erase sectors.
2165 * @card: card to erase
2166 * @from: first sector to erase
2167 * @nr: number of sectors to erase
2168 * @arg: erase command argument (SD supports only %MMC_ERASE_ARG)
2170 * Caller must claim host before calling this function.
2172 int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
2173 unsigned int arg)
2175 unsigned int rem, to = from + nr;
2176 int err;
2178 if (!(card->host->caps & MMC_CAP_ERASE) ||
2179 !(card->csd.cmdclass & CCC_ERASE))
2180 return -EOPNOTSUPP;
2182 if (!card->erase_size)
2183 return -EOPNOTSUPP;
2185 if (mmc_card_sd(card) && arg != MMC_ERASE_ARG)
2186 return -EOPNOTSUPP;
2188 if ((arg & MMC_SECURE_ARGS) &&
2189 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN))
2190 return -EOPNOTSUPP;
2192 if ((arg & MMC_TRIM_ARGS) &&
2193 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN))
2194 return -EOPNOTSUPP;
2196 if (arg == MMC_SECURE_ERASE_ARG) {
2197 if (from % card->erase_size || nr % card->erase_size)
2198 return -EINVAL;
2201 if (arg == MMC_ERASE_ARG)
2202 nr = mmc_align_erase_size(card, &from, &to, nr);
2204 if (nr == 0)
2205 return 0;
2207 if (to <= from)
2208 return -EINVAL;
2210 /* 'from' and 'to' are inclusive */
2211 to -= 1;
2214 * Special case where only one erase-group fits in the timeout budget:
2215 * If the region crosses an erase-group boundary on this particular
2216 * case, we will be trimming more than one erase-group which, does not
2217 * fit in the timeout budget of the controller, so we need to split it
2218 * and call mmc_do_erase() twice if necessary. This special case is
2219 * identified by the card->eg_boundary flag.
2221 rem = card->erase_size - (from % card->erase_size);
2222 if ((arg & MMC_TRIM_ARGS) && (card->eg_boundary) && (nr > rem)) {
2223 err = mmc_do_erase(card, from, from + rem - 1, arg);
2224 from += rem;
2225 if ((err) || (to <= from))
2226 return err;
2229 return mmc_do_erase(card, from, to, arg);
2231 EXPORT_SYMBOL(mmc_erase);
2233 int mmc_can_erase(struct mmc_card *card)
2235 if ((card->host->caps & MMC_CAP_ERASE) &&
2236 (card->csd.cmdclass & CCC_ERASE) && card->erase_size)
2237 return 1;
2238 return 0;
2240 EXPORT_SYMBOL(mmc_can_erase);
2242 int mmc_can_trim(struct mmc_card *card)
2244 if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN) &&
2245 (!(card->quirks & MMC_QUIRK_TRIM_BROKEN)))
2246 return 1;
2247 return 0;
2249 EXPORT_SYMBOL(mmc_can_trim);
2251 int mmc_can_discard(struct mmc_card *card)
2254 * As there's no way to detect the discard support bit at v4.5
2255 * use the s/w feature support filed.
2257 if (card->ext_csd.feature_support & MMC_DISCARD_FEATURE)
2258 return 1;
2259 return 0;
2261 EXPORT_SYMBOL(mmc_can_discard);
2263 int mmc_can_sanitize(struct mmc_card *card)
2265 if (!mmc_can_trim(card) && !mmc_can_erase(card))
2266 return 0;
2267 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE)
2268 return 1;
2269 return 0;
2271 EXPORT_SYMBOL(mmc_can_sanitize);
2273 int mmc_can_secure_erase_trim(struct mmc_card *card)
2275 if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN) &&
2276 !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
2277 return 1;
2278 return 0;
2280 EXPORT_SYMBOL(mmc_can_secure_erase_trim);
2282 int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
2283 unsigned int nr)
2285 if (!card->erase_size)
2286 return 0;
2287 if (from % card->erase_size || nr % card->erase_size)
2288 return 0;
2289 return 1;
2291 EXPORT_SYMBOL(mmc_erase_group_aligned);
2293 static unsigned int mmc_do_calc_max_discard(struct mmc_card *card,
2294 unsigned int arg)
2296 struct mmc_host *host = card->host;
2297 unsigned int max_discard, x, y, qty = 0, max_qty, min_qty, timeout;
2298 unsigned int last_timeout = 0;
2299 unsigned int max_busy_timeout = host->max_busy_timeout ?
2300 host->max_busy_timeout : MMC_ERASE_TIMEOUT_MS;
2302 if (card->erase_shift) {
2303 max_qty = UINT_MAX >> card->erase_shift;
2304 min_qty = card->pref_erase >> card->erase_shift;
2305 } else if (mmc_card_sd(card)) {
2306 max_qty = UINT_MAX;
2307 min_qty = card->pref_erase;
2308 } else {
2309 max_qty = UINT_MAX / card->erase_size;
2310 min_qty = card->pref_erase / card->erase_size;
2314 * We should not only use 'host->max_busy_timeout' as the limitation
2315 * when deciding the max discard sectors. We should set a balance value
2316 * to improve the erase speed, and it can not get too long timeout at
2317 * the same time.
2319 * Here we set 'card->pref_erase' as the minimal discard sectors no
2320 * matter what size of 'host->max_busy_timeout', but if the
2321 * 'host->max_busy_timeout' is large enough for more discard sectors,
2322 * then we can continue to increase the max discard sectors until we
2323 * get a balance value. In cases when the 'host->max_busy_timeout'
2324 * isn't specified, use the default max erase timeout.
2326 do {
2327 y = 0;
2328 for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) {
2329 timeout = mmc_erase_timeout(card, arg, qty + x);
2331 if (qty + x > min_qty && timeout > max_busy_timeout)
2332 break;
2334 if (timeout < last_timeout)
2335 break;
2336 last_timeout = timeout;
2337 y = x;
2339 qty += y;
2340 } while (y);
2342 if (!qty)
2343 return 0;
2346 * When specifying a sector range to trim, chances are we might cross
2347 * an erase-group boundary even if the amount of sectors is less than
2348 * one erase-group.
2349 * If we can only fit one erase-group in the controller timeout budget,
2350 * we have to care that erase-group boundaries are not crossed by a
2351 * single trim operation. We flag that special case with "eg_boundary".
2352 * In all other cases we can just decrement qty and pretend that we
2353 * always touch (qty + 1) erase-groups as a simple optimization.
2355 if (qty == 1)
2356 card->eg_boundary = 1;
2357 else
2358 qty--;
2360 /* Convert qty to sectors */
2361 if (card->erase_shift)
2362 max_discard = qty << card->erase_shift;
2363 else if (mmc_card_sd(card))
2364 max_discard = qty + 1;
2365 else
2366 max_discard = qty * card->erase_size;
2368 return max_discard;
2371 unsigned int mmc_calc_max_discard(struct mmc_card *card)
2373 struct mmc_host *host = card->host;
2374 unsigned int max_discard, max_trim;
2377 * Without erase_group_def set, MMC erase timeout depends on clock
2378 * frequence which can change. In that case, the best choice is
2379 * just the preferred erase size.
2381 if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1))
2382 return card->pref_erase;
2384 max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG);
2385 if (mmc_can_trim(card)) {
2386 max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG);
2387 if (max_trim < max_discard || max_discard == 0)
2388 max_discard = max_trim;
2389 } else if (max_discard < card->erase_size) {
2390 max_discard = 0;
2392 pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n",
2393 mmc_hostname(host), max_discard, host->max_busy_timeout ?
2394 host->max_busy_timeout : MMC_ERASE_TIMEOUT_MS);
2395 return max_discard;
2397 EXPORT_SYMBOL(mmc_calc_max_discard);
2399 bool mmc_card_is_blockaddr(struct mmc_card *card)
2401 return card ? mmc_card_blockaddr(card) : false;
2403 EXPORT_SYMBOL(mmc_card_is_blockaddr);
2405 int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
2407 struct mmc_command cmd = {};
2409 if (mmc_card_blockaddr(card) || mmc_card_ddr52(card) ||
2410 mmc_card_hs400(card) || mmc_card_hs400es(card))
2411 return 0;
2413 cmd.opcode = MMC_SET_BLOCKLEN;
2414 cmd.arg = blocklen;
2415 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2416 return mmc_wait_for_cmd(card->host, &cmd, 5);
2418 EXPORT_SYMBOL(mmc_set_blocklen);
2420 int mmc_set_blockcount(struct mmc_card *card, unsigned int blockcount,
2421 bool is_rel_write)
2423 struct mmc_command cmd = {};
2425 cmd.opcode = MMC_SET_BLOCK_COUNT;
2426 cmd.arg = blockcount & 0x0000FFFF;
2427 if (is_rel_write)
2428 cmd.arg |= 1 << 31;
2429 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2430 return mmc_wait_for_cmd(card->host, &cmd, 5);
2432 EXPORT_SYMBOL(mmc_set_blockcount);
2434 static void mmc_hw_reset_for_init(struct mmc_host *host)
2436 mmc_pwrseq_reset(host);
2438 if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
2439 return;
2440 host->ops->hw_reset(host);
2443 int mmc_hw_reset(struct mmc_host *host)
2445 int ret;
2447 if (!host->card)
2448 return -EINVAL;
2450 mmc_bus_get(host);
2451 if (!host->bus_ops || host->bus_dead || !host->bus_ops->hw_reset) {
2452 mmc_bus_put(host);
2453 return -EOPNOTSUPP;
2456 ret = host->bus_ops->hw_reset(host);
2457 mmc_bus_put(host);
2459 if (ret)
2460 pr_warn("%s: tried to HW reset card, got error %d\n",
2461 mmc_hostname(host), ret);
2463 return ret;
2465 EXPORT_SYMBOL(mmc_hw_reset);
2467 int mmc_sw_reset(struct mmc_host *host)
2469 int ret;
2471 if (!host->card)
2472 return -EINVAL;
2474 mmc_bus_get(host);
2475 if (!host->bus_ops || host->bus_dead || !host->bus_ops->sw_reset) {
2476 mmc_bus_put(host);
2477 return -EOPNOTSUPP;
2480 ret = host->bus_ops->sw_reset(host);
2481 mmc_bus_put(host);
2483 if (ret)
2484 pr_warn("%s: tried to SW reset card, got error %d\n",
2485 mmc_hostname(host), ret);
2487 return ret;
2489 EXPORT_SYMBOL(mmc_sw_reset);
2491 static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
2493 host->f_init = freq;
2495 pr_debug("%s: %s: trying to init card at %u Hz\n",
2496 mmc_hostname(host), __func__, host->f_init);
2498 mmc_power_up(host, host->ocr_avail);
2501 * Some eMMCs (with VCCQ always on) may not be reset after power up, so
2502 * do a hardware reset if possible.
2504 mmc_hw_reset_for_init(host);
2507 * sdio_reset sends CMD52 to reset card. Since we do not know
2508 * if the card is being re-initialized, just send it. CMD52
2509 * should be ignored by SD/eMMC cards.
2510 * Skip it if we already know that we do not support SDIO commands
2512 if (!(host->caps2 & MMC_CAP2_NO_SDIO))
2513 sdio_reset(host);
2515 mmc_go_idle(host);
2517 if (!(host->caps2 & MMC_CAP2_NO_SD))
2518 mmc_send_if_cond(host, host->ocr_avail);
2520 /* Order's important: probe SDIO, then SD, then MMC */
2521 if (!(host->caps2 & MMC_CAP2_NO_SDIO))
2522 if (!mmc_attach_sdio(host))
2523 return 0;
2525 if (!(host->caps2 & MMC_CAP2_NO_SD))
2526 if (!mmc_attach_sd(host))
2527 return 0;
2529 if (!(host->caps2 & MMC_CAP2_NO_MMC))
2530 if (!mmc_attach_mmc(host))
2531 return 0;
2533 mmc_power_off(host);
2534 return -EIO;
2537 int _mmc_detect_card_removed(struct mmc_host *host)
2539 int ret;
2541 if (!host->card || mmc_card_removed(host->card))
2542 return 1;
2544 ret = host->bus_ops->alive(host);
2547 * Card detect status and alive check may be out of sync if card is
2548 * removed slowly, when card detect switch changes while card/slot
2549 * pads are still contacted in hardware (refer to "SD Card Mechanical
2550 * Addendum, Appendix C: Card Detection Switch"). So reschedule a
2551 * detect work 200ms later for this case.
2553 if (!ret && host->ops->get_cd && !host->ops->get_cd(host)) {
2554 mmc_detect_change(host, msecs_to_jiffies(200));
2555 pr_debug("%s: card removed too slowly\n", mmc_hostname(host));
2558 if (ret) {
2559 mmc_card_set_removed(host->card);
2560 pr_debug("%s: card remove detected\n", mmc_hostname(host));
2563 return ret;
2566 int mmc_detect_card_removed(struct mmc_host *host)
2568 struct mmc_card *card = host->card;
2569 int ret;
2571 WARN_ON(!host->claimed);
2573 if (!card)
2574 return 1;
2576 if (!mmc_card_is_removable(host))
2577 return 0;
2579 ret = mmc_card_removed(card);
2581 * The card will be considered unchanged unless we have been asked to
2582 * detect a change or host requires polling to provide card detection.
2584 if (!host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL))
2585 return ret;
2587 host->detect_change = 0;
2588 if (!ret) {
2589 ret = _mmc_detect_card_removed(host);
2590 if (ret && (host->caps & MMC_CAP_NEEDS_POLL)) {
2592 * Schedule a detect work as soon as possible to let a
2593 * rescan handle the card removal.
2595 cancel_delayed_work(&host->detect);
2596 _mmc_detect_change(host, 0, false);
2600 return ret;
2602 EXPORT_SYMBOL(mmc_detect_card_removed);
2604 void mmc_rescan(struct work_struct *work)
2606 struct mmc_host *host =
2607 container_of(work, struct mmc_host, detect.work);
2608 int i;
2610 if (host->rescan_disable)
2611 return;
2613 /* If there is a non-removable card registered, only scan once */
2614 if (!mmc_card_is_removable(host) && host->rescan_entered)
2615 return;
2616 host->rescan_entered = 1;
2618 if (host->trigger_card_event && host->ops->card_event) {
2619 mmc_claim_host(host);
2620 host->ops->card_event(host);
2621 mmc_release_host(host);
2622 host->trigger_card_event = false;
2625 mmc_bus_get(host);
2628 * if there is a _removable_ card registered, check whether it is
2629 * still present
2631 if (host->bus_ops && !host->bus_dead && mmc_card_is_removable(host))
2632 host->bus_ops->detect(host);
2634 host->detect_change = 0;
2637 * Let mmc_bus_put() free the bus/bus_ops if we've found that
2638 * the card is no longer present.
2640 mmc_bus_put(host);
2641 mmc_bus_get(host);
2643 /* if there still is a card present, stop here */
2644 if (host->bus_ops != NULL) {
2645 mmc_bus_put(host);
2646 goto out;
2650 * Only we can add a new handler, so it's safe to
2651 * release the lock here.
2653 mmc_bus_put(host);
2655 mmc_claim_host(host);
2656 if (mmc_card_is_removable(host) && host->ops->get_cd &&
2657 host->ops->get_cd(host) == 0) {
2658 mmc_power_off(host);
2659 mmc_release_host(host);
2660 goto out;
2663 for (i = 0; i < ARRAY_SIZE(freqs); i++) {
2664 if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
2665 break;
2666 if (freqs[i] <= host->f_min)
2667 break;
2669 mmc_release_host(host);
2671 out:
2672 if (host->caps & MMC_CAP_NEEDS_POLL)
2673 mmc_schedule_delayed_work(&host->detect, HZ);
2676 void mmc_start_host(struct mmc_host *host)
2678 host->f_init = max(freqs[0], host->f_min);
2679 host->rescan_disable = 0;
2680 host->ios.power_mode = MMC_POWER_UNDEFINED;
2682 if (!(host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP)) {
2683 mmc_claim_host(host);
2684 mmc_power_up(host, host->ocr_avail);
2685 mmc_release_host(host);
2688 mmc_gpiod_request_cd_irq(host);
2689 _mmc_detect_change(host, 0, false);
2692 void mmc_stop_host(struct mmc_host *host)
2694 if (host->slot.cd_irq >= 0) {
2695 mmc_gpio_set_cd_wake(host, false);
2696 disable_irq(host->slot.cd_irq);
2699 host->rescan_disable = 1;
2700 cancel_delayed_work_sync(&host->detect);
2702 /* clear pm flags now and let card drivers set them as needed */
2703 host->pm_flags = 0;
2705 mmc_bus_get(host);
2706 if (host->bus_ops && !host->bus_dead) {
2707 /* Calling bus_ops->remove() with a claimed host can deadlock */
2708 host->bus_ops->remove(host);
2709 mmc_claim_host(host);
2710 mmc_detach_bus(host);
2711 mmc_power_off(host);
2712 mmc_release_host(host);
2713 mmc_bus_put(host);
2714 return;
2716 mmc_bus_put(host);
2718 mmc_claim_host(host);
2719 mmc_power_off(host);
2720 mmc_release_host(host);
2723 #ifdef CONFIG_PM_SLEEP
2724 /* Do the card removal on suspend if card is assumed removeable
2725 * Do that in pm notifier while userspace isn't yet frozen, so we will be able
2726 to sync the card.
2728 static int mmc_pm_notify(struct notifier_block *notify_block,
2729 unsigned long mode, void *unused)
2731 struct mmc_host *host = container_of(
2732 notify_block, struct mmc_host, pm_notify);
2733 unsigned long flags;
2734 int err = 0;
2736 switch (mode) {
2737 case PM_HIBERNATION_PREPARE:
2738 case PM_SUSPEND_PREPARE:
2739 case PM_RESTORE_PREPARE:
2740 spin_lock_irqsave(&host->lock, flags);
2741 host->rescan_disable = 1;
2742 spin_unlock_irqrestore(&host->lock, flags);
2743 cancel_delayed_work_sync(&host->detect);
2745 if (!host->bus_ops)
2746 break;
2748 /* Validate prerequisites for suspend */
2749 if (host->bus_ops->pre_suspend)
2750 err = host->bus_ops->pre_suspend(host);
2751 if (!err)
2752 break;
2754 if (!mmc_card_is_removable(host)) {
2755 dev_warn(mmc_dev(host),
2756 "pre_suspend failed for non-removable host: "
2757 "%d\n", err);
2758 /* Avoid removing non-removable hosts */
2759 break;
2762 /* Calling bus_ops->remove() with a claimed host can deadlock */
2763 host->bus_ops->remove(host);
2764 mmc_claim_host(host);
2765 mmc_detach_bus(host);
2766 mmc_power_off(host);
2767 mmc_release_host(host);
2768 host->pm_flags = 0;
2769 break;
2771 case PM_POST_SUSPEND:
2772 case PM_POST_HIBERNATION:
2773 case PM_POST_RESTORE:
2775 spin_lock_irqsave(&host->lock, flags);
2776 host->rescan_disable = 0;
2777 spin_unlock_irqrestore(&host->lock, flags);
2778 _mmc_detect_change(host, 0, false);
2782 return 0;
2785 void mmc_register_pm_notifier(struct mmc_host *host)
2787 host->pm_notify.notifier_call = mmc_pm_notify;
2788 register_pm_notifier(&host->pm_notify);
2791 void mmc_unregister_pm_notifier(struct mmc_host *host)
2793 unregister_pm_notifier(&host->pm_notify);
2795 #endif
2797 static int __init mmc_init(void)
2799 int ret;
2801 ret = mmc_register_bus();
2802 if (ret)
2803 return ret;
2805 ret = mmc_register_host_class();
2806 if (ret)
2807 goto unregister_bus;
2809 ret = sdio_register_bus();
2810 if (ret)
2811 goto unregister_host_class;
2813 return 0;
2815 unregister_host_class:
2816 mmc_unregister_host_class();
2817 unregister_bus:
2818 mmc_unregister_bus();
2819 return ret;
2822 static void __exit mmc_exit(void)
2824 sdio_unregister_bus();
2825 mmc_unregister_host_class();
2826 mmc_unregister_bus();
2829 subsys_initcall(mmc_init);
2830 module_exit(mmc_exit);
2832 MODULE_LICENSE("GPL");