2 * linux/drivers/mmc/core/core.c
4 * Copyright (C) 2003-2004 Russell King, All Rights Reserved.
5 * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
6 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
7 * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/completion.h>
17 #include <linux/device.h>
18 #include <linux/delay.h>
19 #include <linux/pagemap.h>
20 #include <linux/err.h>
21 #include <linux/leds.h>
22 #include <linux/scatterlist.h>
23 #include <linux/log2.h>
24 #include <linux/regulator/consumer.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/suspend.h>
27 #include <linux/fault-inject.h>
28 #include <linux/random.h>
30 #include <linux/mmc/card.h>
31 #include <linux/mmc/host.h>
32 #include <linux/mmc/mmc.h>
33 #include <linux/mmc/sd.h>
44 static struct workqueue_struct
*workqueue
;
47 * Enabling software CRCs on the data blocks can be a significant (30%)
48 * performance cost, and for other reasons may not always be desired.
49 * So we allow it it to be disabled.
52 module_param(use_spi_crc
, bool, 0);
55 * We normally treat cards as removed during suspend if they are not
56 * known to be on a non-removable bus, to avoid the risk of writing
57 * back data to a different card after resume. Allow this to be
58 * overridden if necessary.
60 #ifdef CONFIG_MMC_UNSAFE_RESUME
61 int mmc_assume_removable
;
63 int mmc_assume_removable
= 1;
65 EXPORT_SYMBOL(mmc_assume_removable
);
66 module_param_named(removable
, mmc_assume_removable
, bool, 0644);
69 "MMC/SD cards are removable and may be removed during suspend");
72 * Internal function. Schedule delayed work in the MMC work queue.
74 static int mmc_schedule_delayed_work(struct delayed_work
*work
,
77 return queue_delayed_work(workqueue
, work
, delay
);
81 * Internal function. Flush all scheduled work from the MMC work queue.
83 static void mmc_flush_scheduled_work(void)
85 flush_workqueue(workqueue
);
88 #ifdef CONFIG_FAIL_MMC_REQUEST
91 * Internal function. Inject random data errors.
92 * If mmc_data is NULL no errors are injected.
94 static void mmc_should_fail_request(struct mmc_host
*host
,
95 struct mmc_request
*mrq
)
97 struct mmc_command
*cmd
= mrq
->cmd
;
98 struct mmc_data
*data
= mrq
->data
;
99 static const int data_errors
[] = {
108 if (cmd
->error
|| data
->error
||
109 !should_fail(&host
->fail_mmc_request
, data
->blksz
* data
->blocks
))
112 data
->error
= data_errors
[random32() % ARRAY_SIZE(data_errors
)];
113 data
->bytes_xfered
= (random32() % (data
->bytes_xfered
>> 9)) << 9;
116 #else /* CONFIG_FAIL_MMC_REQUEST */
118 static inline void mmc_should_fail_request(struct mmc_host
*host
,
119 struct mmc_request
*mrq
)
123 #endif /* CONFIG_FAIL_MMC_REQUEST */
126 * mmc_request_done - finish processing an MMC request
127 * @host: MMC host which completed request
128 * @mrq: MMC request which request
130 * MMC drivers should call this function when they have completed
131 * their processing of a request.
133 void mmc_request_done(struct mmc_host
*host
, struct mmc_request
*mrq
)
135 struct mmc_command
*cmd
= mrq
->cmd
;
136 int err
= cmd
->error
;
138 if (err
&& cmd
->retries
&& mmc_host_is_spi(host
)) {
139 if (cmd
->resp
[0] & R1_SPI_ILLEGAL_COMMAND
)
143 if (err
&& cmd
->retries
) {
145 * Request starter must handle retries - see
146 * mmc_wait_for_req_done().
151 mmc_should_fail_request(host
, mrq
);
153 led_trigger_event(host
->led
, LED_OFF
);
155 pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
156 mmc_hostname(host
), cmd
->opcode
, err
,
157 cmd
->resp
[0], cmd
->resp
[1],
158 cmd
->resp
[2], cmd
->resp
[3]);
161 pr_debug("%s: %d bytes transferred: %d\n",
163 mrq
->data
->bytes_xfered
, mrq
->data
->error
);
167 pr_debug("%s: (CMD%u): %d: %08x %08x %08x %08x\n",
168 mmc_hostname(host
), mrq
->stop
->opcode
,
170 mrq
->stop
->resp
[0], mrq
->stop
->resp
[1],
171 mrq
->stop
->resp
[2], mrq
->stop
->resp
[3]);
177 mmc_host_clk_release(host
);
181 EXPORT_SYMBOL(mmc_request_done
);
184 mmc_start_request(struct mmc_host
*host
, struct mmc_request
*mrq
)
186 #ifdef CONFIG_MMC_DEBUG
188 struct scatterlist
*sg
;
191 pr_debug("%s: starting CMD%u arg %08x flags %08x\n",
192 mmc_hostname(host
), mrq
->cmd
->opcode
,
193 mrq
->cmd
->arg
, mrq
->cmd
->flags
);
196 pr_debug("%s: blksz %d blocks %d flags %08x "
197 "tsac %d ms nsac %d\n",
198 mmc_hostname(host
), mrq
->data
->blksz
,
199 mrq
->data
->blocks
, mrq
->data
->flags
,
200 mrq
->data
->timeout_ns
/ 1000000,
201 mrq
->data
->timeout_clks
);
205 pr_debug("%s: CMD%u arg %08x flags %08x\n",
206 mmc_hostname(host
), mrq
->stop
->opcode
,
207 mrq
->stop
->arg
, mrq
->stop
->flags
);
210 WARN_ON(!host
->claimed
);
215 BUG_ON(mrq
->data
->blksz
> host
->max_blk_size
);
216 BUG_ON(mrq
->data
->blocks
> host
->max_blk_count
);
217 BUG_ON(mrq
->data
->blocks
* mrq
->data
->blksz
>
220 #ifdef CONFIG_MMC_DEBUG
222 for_each_sg(mrq
->data
->sg
, sg
, mrq
->data
->sg_len
, i
)
224 BUG_ON(sz
!= mrq
->data
->blocks
* mrq
->data
->blksz
);
227 mrq
->cmd
->data
= mrq
->data
;
228 mrq
->data
->error
= 0;
229 mrq
->data
->mrq
= mrq
;
231 mrq
->data
->stop
= mrq
->stop
;
232 mrq
->stop
->error
= 0;
233 mrq
->stop
->mrq
= mrq
;
236 mmc_host_clk_hold(host
);
237 led_trigger_event(host
->led
, LED_FULL
);
238 host
->ops
->request(host
, mrq
);
241 static void mmc_wait_done(struct mmc_request
*mrq
)
243 complete(&mrq
->completion
);
246 static void __mmc_start_req(struct mmc_host
*host
, struct mmc_request
*mrq
)
248 init_completion(&mrq
->completion
);
249 mrq
->done
= mmc_wait_done
;
250 mmc_start_request(host
, mrq
);
253 static void mmc_wait_for_req_done(struct mmc_host
*host
,
254 struct mmc_request
*mrq
)
256 struct mmc_command
*cmd
;
259 wait_for_completion(&mrq
->completion
);
262 if (!cmd
->error
|| !cmd
->retries
)
265 pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
266 mmc_hostname(host
), cmd
->opcode
, cmd
->error
);
269 host
->ops
->request(host
, mrq
);
274 * mmc_pre_req - Prepare for a new request
275 * @host: MMC host to prepare command
276 * @mrq: MMC request to prepare for
277 * @is_first_req: true if there is no previous started request
278 * that may run in parellel to this call, otherwise false
280 * mmc_pre_req() is called in prior to mmc_start_req() to let
281 * host prepare for the new request. Preparation of a request may be
282 * performed while another request is running on the host.
284 static void mmc_pre_req(struct mmc_host
*host
, struct mmc_request
*mrq
,
287 if (host
->ops
->pre_req
)
288 host
->ops
->pre_req(host
, mrq
, is_first_req
);
292 * mmc_post_req - Post process a completed request
293 * @host: MMC host to post process command
294 * @mrq: MMC request to post process for
295 * @err: Error, if non zero, clean up any resources made in pre_req
297 * Let the host post process a completed request. Post processing of
298 * a request may be performed while another reuqest is running.
300 static void mmc_post_req(struct mmc_host
*host
, struct mmc_request
*mrq
,
303 if (host
->ops
->post_req
)
304 host
->ops
->post_req(host
, mrq
, err
);
308 * mmc_start_req - start a non-blocking request
309 * @host: MMC host to start command
310 * @areq: async request to start
311 * @error: out parameter returns 0 for success, otherwise non zero
313 * Start a new MMC custom command request for a host.
314 * If there is on ongoing async request wait for completion
315 * of that request and start the new one and return.
316 * Does not wait for the new request to complete.
318 * Returns the completed request, NULL in case of none completed.
319 * Wait for the an ongoing request (previoulsy started) to complete and
320 * return the completed request. If there is no ongoing request, NULL
321 * is returned without waiting. NULL is not an error condition.
323 struct mmc_async_req
*mmc_start_req(struct mmc_host
*host
,
324 struct mmc_async_req
*areq
, int *error
)
327 struct mmc_async_req
*data
= host
->areq
;
329 /* Prepare a new request */
331 mmc_pre_req(host
, areq
->mrq
, !host
->areq
);
334 mmc_wait_for_req_done(host
, host
->areq
->mrq
);
335 err
= host
->areq
->err_check(host
->card
, host
->areq
);
337 /* post process the completed failed request */
338 mmc_post_req(host
, host
->areq
->mrq
, 0);
341 * Cancel the new prepared request, because
342 * it can't run until the failed
343 * request has been properly handled.
345 mmc_post_req(host
, areq
->mrq
, -EINVAL
);
353 __mmc_start_req(host
, areq
->mrq
);
356 mmc_post_req(host
, host
->areq
->mrq
, 0);
364 EXPORT_SYMBOL(mmc_start_req
);
367 * mmc_wait_for_req - start a request and wait for completion
368 * @host: MMC host to start command
369 * @mrq: MMC request to start
371 * Start a new MMC custom command request for a host, and wait
372 * for the command to complete. Does not attempt to parse the
375 void mmc_wait_for_req(struct mmc_host
*host
, struct mmc_request
*mrq
)
377 __mmc_start_req(host
, mrq
);
378 mmc_wait_for_req_done(host
, mrq
);
380 EXPORT_SYMBOL(mmc_wait_for_req
);
383 * mmc_interrupt_hpi - Issue for High priority Interrupt
384 * @card: the MMC card associated with the HPI transfer
386 * Issued High Priority Interrupt, and check for card status
387 * util out-of prg-state.
389 int mmc_interrupt_hpi(struct mmc_card
*card
)
396 if (!card
->ext_csd
.hpi_en
) {
397 pr_info("%s: HPI enable bit unset\n", mmc_hostname(card
->host
));
401 mmc_claim_host(card
->host
);
402 err
= mmc_send_status(card
, &status
);
404 pr_err("%s: Get card status fail\n", mmc_hostname(card
->host
));
409 * If the card status is in PRG-state, we can send the HPI command.
411 if (R1_CURRENT_STATE(status
) == R1_STATE_PRG
) {
414 * We don't know when the HPI command will finish
415 * processing, so we need to resend HPI until out
416 * of prg-state, and keep checking the card status
417 * with SEND_STATUS. If a timeout error occurs when
418 * sending the HPI command, we are already out of
421 err
= mmc_send_hpi_cmd(card
, &status
);
423 pr_debug("%s: abort HPI (%d error)\n",
424 mmc_hostname(card
->host
), err
);
426 err
= mmc_send_status(card
, &status
);
429 } while (R1_CURRENT_STATE(status
) == R1_STATE_PRG
);
431 pr_debug("%s: Left prg-state\n", mmc_hostname(card
->host
));
434 mmc_release_host(card
->host
);
437 EXPORT_SYMBOL(mmc_interrupt_hpi
);
440 * mmc_wait_for_cmd - start a command and wait for completion
441 * @host: MMC host to start command
442 * @cmd: MMC command to start
443 * @retries: maximum number of retries
445 * Start a new MMC command for a host, and wait for the command
446 * to complete. Return any error that occurred while the command
447 * was executing. Do not attempt to parse the response.
449 int mmc_wait_for_cmd(struct mmc_host
*host
, struct mmc_command
*cmd
, int retries
)
451 struct mmc_request mrq
= {NULL
};
453 WARN_ON(!host
->claimed
);
455 memset(cmd
->resp
, 0, sizeof(cmd
->resp
));
456 cmd
->retries
= retries
;
461 mmc_wait_for_req(host
, &mrq
);
466 EXPORT_SYMBOL(mmc_wait_for_cmd
);
469 * mmc_set_data_timeout - set the timeout for a data command
470 * @data: data phase for command
471 * @card: the MMC card associated with the data transfer
473 * Computes the data timeout parameters according to the
474 * correct algorithm given the card type.
476 void mmc_set_data_timeout(struct mmc_data
*data
, const struct mmc_card
*card
)
481 * SDIO cards only define an upper 1 s limit on access.
483 if (mmc_card_sdio(card
)) {
484 data
->timeout_ns
= 1000000000;
485 data
->timeout_clks
= 0;
490 * SD cards use a 100 multiplier rather than 10
492 mult
= mmc_card_sd(card
) ? 100 : 10;
495 * Scale up the multiplier (and therefore the timeout) by
496 * the r2w factor for writes.
498 if (data
->flags
& MMC_DATA_WRITE
)
499 mult
<<= card
->csd
.r2w_factor
;
501 data
->timeout_ns
= card
->csd
.tacc_ns
* mult
;
502 data
->timeout_clks
= card
->csd
.tacc_clks
* mult
;
505 * SD cards also have an upper limit on the timeout.
507 if (mmc_card_sd(card
)) {
508 unsigned int timeout_us
, limit_us
;
510 timeout_us
= data
->timeout_ns
/ 1000;
511 if (mmc_host_clk_rate(card
->host
))
512 timeout_us
+= data
->timeout_clks
* 1000 /
513 (mmc_host_clk_rate(card
->host
) / 1000);
515 if (data
->flags
& MMC_DATA_WRITE
)
517 * The limit is really 250 ms, but that is
518 * insufficient for some crappy cards.
525 * SDHC cards always use these fixed values.
527 if (timeout_us
> limit_us
|| mmc_card_blockaddr(card
)) {
528 data
->timeout_ns
= limit_us
* 1000;
529 data
->timeout_clks
= 0;
534 * Some cards require longer data read timeout than indicated in CSD.
535 * Address this by setting the read timeout to a "reasonably high"
536 * value. For the cards tested, 300ms has proven enough. If necessary,
537 * this value can be increased if other problematic cards require this.
539 if (mmc_card_long_read_time(card
) && data
->flags
& MMC_DATA_READ
) {
540 data
->timeout_ns
= 300000000;
541 data
->timeout_clks
= 0;
545 * Some cards need very high timeouts if driven in SPI mode.
546 * The worst observed timeout was 900ms after writing a
547 * continuous stream of data until the internal logic
550 if (mmc_host_is_spi(card
->host
)) {
551 if (data
->flags
& MMC_DATA_WRITE
) {
552 if (data
->timeout_ns
< 1000000000)
553 data
->timeout_ns
= 1000000000; /* 1s */
555 if (data
->timeout_ns
< 100000000)
556 data
->timeout_ns
= 100000000; /* 100ms */
560 EXPORT_SYMBOL(mmc_set_data_timeout
);
563 * mmc_align_data_size - pads a transfer size to a more optimal value
564 * @card: the MMC card associated with the data transfer
565 * @sz: original transfer size
567 * Pads the original data size with a number of extra bytes in
568 * order to avoid controller bugs and/or performance hits
569 * (e.g. some controllers revert to PIO for certain sizes).
571 * Returns the improved size, which might be unmodified.
573 * Note that this function is only relevant when issuing a
574 * single scatter gather entry.
576 unsigned int mmc_align_data_size(struct mmc_card
*card
, unsigned int sz
)
579 * FIXME: We don't have a system for the controller to tell
580 * the core about its problems yet, so for now we just 32-bit
583 sz
= ((sz
+ 3) / 4) * 4;
587 EXPORT_SYMBOL(mmc_align_data_size
);
590 * mmc_host_enable - enable a host.
591 * @host: mmc host to enable
593 * Hosts that support power saving can use the 'enable' and 'disable'
594 * methods to exit and enter power saving states. For more information
595 * see comments for struct mmc_host_ops.
597 int mmc_host_enable(struct mmc_host
*host
)
599 if (!(host
->caps
& MMC_CAP_DISABLE
))
602 if (host
->en_dis_recurs
)
605 if (host
->nesting_cnt
++)
608 cancel_delayed_work_sync(&host
->disable
);
613 if (host
->ops
->enable
) {
616 host
->en_dis_recurs
= 1;
617 err
= host
->ops
->enable(host
);
618 host
->en_dis_recurs
= 0;
621 pr_debug("%s: enable error %d\n",
622 mmc_hostname(host
), err
);
629 EXPORT_SYMBOL(mmc_host_enable
);
631 static int mmc_host_do_disable(struct mmc_host
*host
, int lazy
)
633 if (host
->ops
->disable
) {
636 host
->en_dis_recurs
= 1;
637 err
= host
->ops
->disable(host
, lazy
);
638 host
->en_dis_recurs
= 0;
641 pr_debug("%s: disable error %d\n",
642 mmc_hostname(host
), err
);
646 unsigned long delay
= msecs_to_jiffies(err
);
648 mmc_schedule_delayed_work(&host
->disable
, delay
);
656 * mmc_host_disable - disable a host.
657 * @host: mmc host to disable
659 * Hosts that support power saving can use the 'enable' and 'disable'
660 * methods to exit and enter power saving states. For more information
661 * see comments for struct mmc_host_ops.
663 int mmc_host_disable(struct mmc_host
*host
)
667 if (!(host
->caps
& MMC_CAP_DISABLE
))
670 if (host
->en_dis_recurs
)
673 if (--host
->nesting_cnt
)
679 err
= mmc_host_do_disable(host
, 0);
682 EXPORT_SYMBOL(mmc_host_disable
);
685 * __mmc_claim_host - exclusively claim a host
686 * @host: mmc host to claim
687 * @abort: whether or not the operation should be aborted
689 * Claim a host for a set of operations. If @abort is non null and
690 * dereference a non-zero value then this will return prematurely with
691 * that non-zero value without acquiring the lock. Returns zero
692 * with the lock held otherwise.
694 int __mmc_claim_host(struct mmc_host
*host
, atomic_t
*abort
)
696 DECLARE_WAITQUEUE(wait
, current
);
702 add_wait_queue(&host
->wq
, &wait
);
703 spin_lock_irqsave(&host
->lock
, flags
);
705 set_current_state(TASK_UNINTERRUPTIBLE
);
706 stop
= abort
? atomic_read(abort
) : 0;
707 if (stop
|| !host
->claimed
|| host
->claimer
== current
)
709 spin_unlock_irqrestore(&host
->lock
, flags
);
711 spin_lock_irqsave(&host
->lock
, flags
);
713 set_current_state(TASK_RUNNING
);
716 host
->claimer
= current
;
717 host
->claim_cnt
+= 1;
720 spin_unlock_irqrestore(&host
->lock
, flags
);
721 remove_wait_queue(&host
->wq
, &wait
);
723 mmc_host_enable(host
);
727 EXPORT_SYMBOL(__mmc_claim_host
);
730 * mmc_try_claim_host - try exclusively to claim a host
731 * @host: mmc host to claim
733 * Returns %1 if the host is claimed, %0 otherwise.
735 int mmc_try_claim_host(struct mmc_host
*host
)
737 int claimed_host
= 0;
740 spin_lock_irqsave(&host
->lock
, flags
);
741 if (!host
->claimed
|| host
->claimer
== current
) {
743 host
->claimer
= current
;
744 host
->claim_cnt
+= 1;
747 spin_unlock_irqrestore(&host
->lock
, flags
);
750 EXPORT_SYMBOL(mmc_try_claim_host
);
753 * mmc_do_release_host - release a claimed host
754 * @host: mmc host to release
756 * If you successfully claimed a host, this function will
759 void mmc_do_release_host(struct mmc_host
*host
)
763 spin_lock_irqsave(&host
->lock
, flags
);
764 if (--host
->claim_cnt
) {
765 /* Release for nested claim */
766 spin_unlock_irqrestore(&host
->lock
, flags
);
769 host
->claimer
= NULL
;
770 spin_unlock_irqrestore(&host
->lock
, flags
);
774 EXPORT_SYMBOL(mmc_do_release_host
);
776 void mmc_host_deeper_disable(struct work_struct
*work
)
778 struct mmc_host
*host
=
779 container_of(work
, struct mmc_host
, disable
.work
);
781 /* If the host is claimed then we do not want to disable it anymore */
782 if (!mmc_try_claim_host(host
))
784 mmc_host_do_disable(host
, 1);
785 mmc_do_release_host(host
);
789 * mmc_host_lazy_disable - lazily disable a host.
790 * @host: mmc host to disable
792 * Hosts that support power saving can use the 'enable' and 'disable'
793 * methods to exit and enter power saving states. For more information
794 * see comments for struct mmc_host_ops.
796 int mmc_host_lazy_disable(struct mmc_host
*host
)
798 if (!(host
->caps
& MMC_CAP_DISABLE
))
801 if (host
->en_dis_recurs
)
804 if (--host
->nesting_cnt
)
810 if (host
->disable_delay
) {
811 mmc_schedule_delayed_work(&host
->disable
,
812 msecs_to_jiffies(host
->disable_delay
));
815 return mmc_host_do_disable(host
, 1);
817 EXPORT_SYMBOL(mmc_host_lazy_disable
);
820 * mmc_release_host - release a host
821 * @host: mmc host to release
823 * Release a MMC host, allowing others to claim the host
824 * for their operations.
826 void mmc_release_host(struct mmc_host
*host
)
828 WARN_ON(!host
->claimed
);
830 mmc_host_lazy_disable(host
);
832 mmc_do_release_host(host
);
835 EXPORT_SYMBOL(mmc_release_host
);
838 * Internal function that does the actual ios call to the host driver,
839 * optionally printing some debug output.
841 static inline void mmc_set_ios(struct mmc_host
*host
)
843 struct mmc_ios
*ios
= &host
->ios
;
845 pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u "
846 "width %u timing %u\n",
847 mmc_hostname(host
), ios
->clock
, ios
->bus_mode
,
848 ios
->power_mode
, ios
->chip_select
, ios
->vdd
,
849 ios
->bus_width
, ios
->timing
);
852 mmc_set_ungated(host
);
853 host
->ops
->set_ios(host
, ios
);
857 * Control chip select pin on a host.
859 void mmc_set_chip_select(struct mmc_host
*host
, int mode
)
861 mmc_host_clk_hold(host
);
862 host
->ios
.chip_select
= mode
;
864 mmc_host_clk_release(host
);
868 * Sets the host clock to the highest possible frequency that
871 static void __mmc_set_clock(struct mmc_host
*host
, unsigned int hz
)
873 WARN_ON(hz
< host
->f_min
);
875 if (hz
> host
->f_max
)
878 host
->ios
.clock
= hz
;
882 void mmc_set_clock(struct mmc_host
*host
, unsigned int hz
)
884 mmc_host_clk_hold(host
);
885 __mmc_set_clock(host
, hz
);
886 mmc_host_clk_release(host
);
889 #ifdef CONFIG_MMC_CLKGATE
891 * This gates the clock by setting it to 0 Hz.
893 void mmc_gate_clock(struct mmc_host
*host
)
897 spin_lock_irqsave(&host
->clk_lock
, flags
);
898 host
->clk_old
= host
->ios
.clock
;
900 host
->clk_gated
= true;
901 spin_unlock_irqrestore(&host
->clk_lock
, flags
);
906 * This restores the clock from gating by using the cached
909 void mmc_ungate_clock(struct mmc_host
*host
)
912 * We should previously have gated the clock, so the clock shall
913 * be 0 here! The clock may however be 0 during initialization,
914 * when some request operations are performed before setting
915 * the frequency. When ungate is requested in that situation
916 * we just ignore the call.
919 BUG_ON(host
->ios
.clock
);
920 /* This call will also set host->clk_gated to false */
921 __mmc_set_clock(host
, host
->clk_old
);
925 void mmc_set_ungated(struct mmc_host
*host
)
930 * We've been given a new frequency while the clock is gated,
931 * so make sure we regard this as ungating it.
933 spin_lock_irqsave(&host
->clk_lock
, flags
);
934 host
->clk_gated
= false;
935 spin_unlock_irqrestore(&host
->clk_lock
, flags
);
939 void mmc_set_ungated(struct mmc_host
*host
)
945 * Change the bus mode (open drain/push-pull) of a host.
947 void mmc_set_bus_mode(struct mmc_host
*host
, unsigned int mode
)
949 mmc_host_clk_hold(host
);
950 host
->ios
.bus_mode
= mode
;
952 mmc_host_clk_release(host
);
956 * Change data bus width of a host.
958 void mmc_set_bus_width(struct mmc_host
*host
, unsigned int width
)
960 mmc_host_clk_hold(host
);
961 host
->ios
.bus_width
= width
;
963 mmc_host_clk_release(host
);
967 * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number
969 * @low_bits: prefer low bits in boundary cases
971 * This function returns the OCR bit number according to the provided @vdd
972 * value. If conversion is not possible a negative errno value returned.
974 * Depending on the @low_bits flag the function prefers low or high OCR bits
975 * on boundary voltages. For example,
976 * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33);
977 * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34);
979 * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21).
981 static int mmc_vdd_to_ocrbitnum(int vdd
, bool low_bits
)
983 const int max_bit
= ilog2(MMC_VDD_35_36
);
986 if (vdd
< 1650 || vdd
> 3600)
989 if (vdd
>= 1650 && vdd
<= 1950)
990 return ilog2(MMC_VDD_165_195
);
995 /* Base 2000 mV, step 100 mV, bit's base 8. */
996 bit
= (vdd
- 2000) / 100 + 8;
1003 * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask
1004 * @vdd_min: minimum voltage value (mV)
1005 * @vdd_max: maximum voltage value (mV)
1007 * This function returns the OCR mask bits according to the provided @vdd_min
1008 * and @vdd_max values. If conversion is not possible the function returns 0.
1010 * Notes wrt boundary cases:
1011 * This function sets the OCR bits for all boundary voltages, for example
1012 * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 |
1013 * MMC_VDD_34_35 mask.
1015 u32
mmc_vddrange_to_ocrmask(int vdd_min
, int vdd_max
)
1019 if (vdd_max
< vdd_min
)
1022 /* Prefer high bits for the boundary vdd_max values. */
1023 vdd_max
= mmc_vdd_to_ocrbitnum(vdd_max
, false);
1027 /* Prefer low bits for the boundary vdd_min values. */
1028 vdd_min
= mmc_vdd_to_ocrbitnum(vdd_min
, true);
1032 /* Fill the mask, from max bit to min bit. */
1033 while (vdd_max
>= vdd_min
)
1034 mask
|= 1 << vdd_max
--;
1038 EXPORT_SYMBOL(mmc_vddrange_to_ocrmask
);
1040 #ifdef CONFIG_REGULATOR
1043 * mmc_regulator_get_ocrmask - return mask of supported voltages
1044 * @supply: regulator to use
1046 * This returns either a negative errno, or a mask of voltages that
1047 * can be provided to MMC/SD/SDIO devices using the specified voltage
1048 * regulator. This would normally be called before registering the
1051 int mmc_regulator_get_ocrmask(struct regulator
*supply
)
1057 count
= regulator_count_voltages(supply
);
1061 for (i
= 0; i
< count
; i
++) {
1065 vdd_uV
= regulator_list_voltage(supply
, i
);
1069 vdd_mV
= vdd_uV
/ 1000;
1070 result
|= mmc_vddrange_to_ocrmask(vdd_mV
, vdd_mV
);
1075 EXPORT_SYMBOL(mmc_regulator_get_ocrmask
);
1078 * mmc_regulator_set_ocr - set regulator to match host->ios voltage
1079 * @mmc: the host to regulate
1080 * @supply: regulator to use
1081 * @vdd_bit: zero for power off, else a bit number (host->ios.vdd)
1083 * Returns zero on success, else negative errno.
1085 * MMC host drivers may use this to enable or disable a regulator using
1086 * a particular supply voltage. This would normally be called from the
1089 int mmc_regulator_set_ocr(struct mmc_host
*mmc
,
1090 struct regulator
*supply
,
1091 unsigned short vdd_bit
)
1100 /* REVISIT mmc_vddrange_to_ocrmask() may have set some
1101 * bits this regulator doesn't quite support ... don't
1102 * be too picky, most cards and regulators are OK with
1103 * a 0.1V range goof (it's a small error percentage).
1105 tmp
= vdd_bit
- ilog2(MMC_VDD_165_195
);
1107 min_uV
= 1650 * 1000;
1108 max_uV
= 1950 * 1000;
1110 min_uV
= 1900 * 1000 + tmp
* 100 * 1000;
1111 max_uV
= min_uV
+ 100 * 1000;
1114 /* avoid needless changes to this voltage; the regulator
1115 * might not allow this operation
1117 voltage
= regulator_get_voltage(supply
);
1120 else if (voltage
< min_uV
|| voltage
> max_uV
)
1121 result
= regulator_set_voltage(supply
, min_uV
, max_uV
);
1125 if (result
== 0 && !mmc
->regulator_enabled
) {
1126 result
= regulator_enable(supply
);
1128 mmc
->regulator_enabled
= true;
1130 } else if (mmc
->regulator_enabled
) {
1131 result
= regulator_disable(supply
);
1133 mmc
->regulator_enabled
= false;
1137 dev_err(mmc_dev(mmc
),
1138 "could not set regulator OCR (%d)\n", result
);
1141 EXPORT_SYMBOL(mmc_regulator_set_ocr
);
1143 #endif /* CONFIG_REGULATOR */
1146 * Mask off any voltages we don't support and select
1147 * the lowest voltage
1149 u32
mmc_select_voltage(struct mmc_host
*host
, u32 ocr
)
1153 ocr
&= host
->ocr_avail
;
1161 mmc_host_clk_hold(host
);
1162 host
->ios
.vdd
= bit
;
1164 mmc_host_clk_release(host
);
1166 pr_warning("%s: host doesn't support card's voltages\n",
1167 mmc_hostname(host
));
1174 int mmc_set_signal_voltage(struct mmc_host
*host
, int signal_voltage
, bool cmd11
)
1176 struct mmc_command cmd
= {0};
1182 * Send CMD11 only if the request is to switch the card to
1185 if ((signal_voltage
!= MMC_SIGNAL_VOLTAGE_330
) && cmd11
) {
1186 cmd
.opcode
= SD_SWITCH_VOLTAGE
;
1188 cmd
.flags
= MMC_RSP_R1
| MMC_CMD_AC
;
1190 err
= mmc_wait_for_cmd(host
, &cmd
, 0);
1194 if (!mmc_host_is_spi(host
) && (cmd
.resp
[0] & R1_ERROR
))
1198 host
->ios
.signal_voltage
= signal_voltage
;
1200 if (host
->ops
->start_signal_voltage_switch
)
1201 err
= host
->ops
->start_signal_voltage_switch(host
, &host
->ios
);
1207 * Select timing parameters for host.
1209 void mmc_set_timing(struct mmc_host
*host
, unsigned int timing
)
1211 mmc_host_clk_hold(host
);
1212 host
->ios
.timing
= timing
;
1214 mmc_host_clk_release(host
);
1218 * Select appropriate driver type for host.
1220 void mmc_set_driver_type(struct mmc_host
*host
, unsigned int drv_type
)
1222 mmc_host_clk_hold(host
);
1223 host
->ios
.drv_type
= drv_type
;
1225 mmc_host_clk_release(host
);
1228 static void mmc_poweroff_notify(struct mmc_host
*host
)
1230 struct mmc_card
*card
;
1231 unsigned int timeout
;
1232 unsigned int notify_type
= EXT_CSD_NO_POWER_NOTIFICATION
;
1238 * Send power notify command only if card
1239 * is mmc and notify state is powered ON
1241 if (card
&& mmc_card_mmc(card
) &&
1242 (card
->poweroff_notify_state
== MMC_POWERED_ON
)) {
1244 if (host
->power_notify_type
== MMC_HOST_PW_NOTIFY_SHORT
) {
1245 notify_type
= EXT_CSD_POWER_OFF_SHORT
;
1246 timeout
= card
->ext_csd
.generic_cmd6_time
;
1247 card
->poweroff_notify_state
= MMC_POWEROFF_SHORT
;
1249 notify_type
= EXT_CSD_POWER_OFF_LONG
;
1250 timeout
= card
->ext_csd
.power_off_longtime
;
1251 card
->poweroff_notify_state
= MMC_POWEROFF_LONG
;
1254 err
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
1255 EXT_CSD_POWER_OFF_NOTIFICATION
,
1256 notify_type
, timeout
);
1258 if (err
&& err
!= -EBADMSG
)
1259 pr_err("Device failed to respond within %d poweroff "
1260 "time. Forcefully powering down the device\n",
1263 /* Set the card state to no notification after the poweroff */
1264 card
->poweroff_notify_state
= MMC_NO_POWER_NOTIFICATION
;
1269 * Apply power to the MMC stack. This is a two-stage process.
1270 * First, we enable power to the card without the clock running.
1271 * We then wait a bit for the power to stabilise. Finally,
1272 * enable the bus drivers and clock to the card.
1274 * We must _NOT_ enable the clock prior to power stablising.
1276 * If a host does all the power sequencing itself, ignore the
1277 * initial MMC_POWER_UP stage.
1279 static void mmc_power_up(struct mmc_host
*host
)
1283 mmc_host_clk_hold(host
);
1285 /* If ocr is set, we use it */
1287 bit
= ffs(host
->ocr
) - 1;
1289 bit
= fls(host
->ocr_avail
) - 1;
1291 host
->ios
.vdd
= bit
;
1292 if (mmc_host_is_spi(host
))
1293 host
->ios
.chip_select
= MMC_CS_HIGH
;
1295 host
->ios
.chip_select
= MMC_CS_DONTCARE
;
1296 host
->ios
.bus_mode
= MMC_BUSMODE_PUSHPULL
;
1297 host
->ios
.power_mode
= MMC_POWER_UP
;
1298 host
->ios
.bus_width
= MMC_BUS_WIDTH_1
;
1299 host
->ios
.timing
= MMC_TIMING_LEGACY
;
1303 * This delay should be sufficient to allow the power supply
1304 * to reach the minimum voltage.
1308 host
->ios
.clock
= host
->f_init
;
1310 host
->ios
.power_mode
= MMC_POWER_ON
;
1314 * This delay must be at least 74 clock sizes, or 1 ms, or the
1315 * time required to reach a stable voltage.
1319 mmc_host_clk_release(host
);
1322 void mmc_power_off(struct mmc_host
*host
)
1324 mmc_host_clk_hold(host
);
1326 host
->ios
.clock
= 0;
1329 mmc_poweroff_notify(host
);
1332 * Reset ocr mask to be the highest possible voltage supported for
1333 * this mmc host. This value will be used at next power up.
1335 host
->ocr
= 1 << (fls(host
->ocr_avail
) - 1);
1337 if (!mmc_host_is_spi(host
)) {
1338 host
->ios
.bus_mode
= MMC_BUSMODE_OPENDRAIN
;
1339 host
->ios
.chip_select
= MMC_CS_DONTCARE
;
1341 host
->ios
.power_mode
= MMC_POWER_OFF
;
1342 host
->ios
.bus_width
= MMC_BUS_WIDTH_1
;
1343 host
->ios
.timing
= MMC_TIMING_LEGACY
;
1347 * Some configurations, such as the 802.11 SDIO card in the OLPC
1348 * XO-1.5, require a short delay after poweroff before the card
1349 * can be successfully turned on again.
1353 mmc_host_clk_release(host
);
1357 * Cleanup when the last reference to the bus operator is dropped.
1359 static void __mmc_release_bus(struct mmc_host
*host
)
1362 BUG_ON(host
->bus_refs
);
1363 BUG_ON(!host
->bus_dead
);
1365 host
->bus_ops
= NULL
;
1369 * Increase reference count of bus operator
1371 static inline void mmc_bus_get(struct mmc_host
*host
)
1373 unsigned long flags
;
1375 spin_lock_irqsave(&host
->lock
, flags
);
1377 spin_unlock_irqrestore(&host
->lock
, flags
);
1381 * Decrease reference count of bus operator and free it if
1382 * it is the last reference.
1384 static inline void mmc_bus_put(struct mmc_host
*host
)
1386 unsigned long flags
;
1388 spin_lock_irqsave(&host
->lock
, flags
);
1390 if ((host
->bus_refs
== 0) && host
->bus_ops
)
1391 __mmc_release_bus(host
);
1392 spin_unlock_irqrestore(&host
->lock
, flags
);
1396 * Assign a mmc bus handler to a host. Only one bus handler may control a
1397 * host at any given time.
1399 void mmc_attach_bus(struct mmc_host
*host
, const struct mmc_bus_ops
*ops
)
1401 unsigned long flags
;
1406 WARN_ON(!host
->claimed
);
1408 spin_lock_irqsave(&host
->lock
, flags
);
1410 BUG_ON(host
->bus_ops
);
1411 BUG_ON(host
->bus_refs
);
1413 host
->bus_ops
= ops
;
1417 spin_unlock_irqrestore(&host
->lock
, flags
);
1421 * Remove the current bus handler from a host.
1423 void mmc_detach_bus(struct mmc_host
*host
)
1425 unsigned long flags
;
1429 WARN_ON(!host
->claimed
);
1430 WARN_ON(!host
->bus_ops
);
1432 spin_lock_irqsave(&host
->lock
, flags
);
1436 spin_unlock_irqrestore(&host
->lock
, flags
);
1442 * mmc_detect_change - process change of state on a MMC socket
1443 * @host: host which changed state.
1444 * @delay: optional delay to wait before detection (jiffies)
1446 * MMC drivers should call this when they detect a card has been
1447 * inserted or removed. The MMC layer will confirm that any
1448 * present card is still functional, and initialize any newly
1451 void mmc_detect_change(struct mmc_host
*host
, unsigned long delay
)
1453 #ifdef CONFIG_MMC_DEBUG
1454 unsigned long flags
;
1455 spin_lock_irqsave(&host
->lock
, flags
);
1456 WARN_ON(host
->removed
);
1457 spin_unlock_irqrestore(&host
->lock
, flags
);
1460 mmc_schedule_delayed_work(&host
->detect
, delay
);
1463 EXPORT_SYMBOL(mmc_detect_change
);
1465 void mmc_init_erase(struct mmc_card
*card
)
1469 if (is_power_of_2(card
->erase_size
))
1470 card
->erase_shift
= ffs(card
->erase_size
) - 1;
1472 card
->erase_shift
= 0;
1475 * It is possible to erase an arbitrarily large area of an SD or MMC
1476 * card. That is not desirable because it can take a long time
1477 * (minutes) potentially delaying more important I/O, and also the
1478 * timeout calculations become increasingly hugely over-estimated.
1479 * Consequently, 'pref_erase' is defined as a guide to limit erases
1480 * to that size and alignment.
1482 * For SD cards that define Allocation Unit size, limit erases to one
1483 * Allocation Unit at a time. For MMC cards that define High Capacity
1484 * Erase Size, whether it is switched on or not, limit to that size.
1485 * Otherwise just have a stab at a good value. For modern cards it
1486 * will end up being 4MiB. Note that if the value is too small, it
1487 * can end up taking longer to erase.
1489 if (mmc_card_sd(card
) && card
->ssr
.au
) {
1490 card
->pref_erase
= card
->ssr
.au
;
1491 card
->erase_shift
= ffs(card
->ssr
.au
) - 1;
1492 } else if (card
->ext_csd
.hc_erase_size
) {
1493 card
->pref_erase
= card
->ext_csd
.hc_erase_size
;
1495 sz
= (card
->csd
.capacity
<< (card
->csd
.read_blkbits
- 9)) >> 11;
1497 card
->pref_erase
= 512 * 1024 / 512;
1499 card
->pref_erase
= 1024 * 1024 / 512;
1501 card
->pref_erase
= 2 * 1024 * 1024 / 512;
1503 card
->pref_erase
= 4 * 1024 * 1024 / 512;
1504 if (card
->pref_erase
< card
->erase_size
)
1505 card
->pref_erase
= card
->erase_size
;
1507 sz
= card
->pref_erase
% card
->erase_size
;
1509 card
->pref_erase
+= card
->erase_size
- sz
;
1514 static unsigned int mmc_mmc_erase_timeout(struct mmc_card
*card
,
1515 unsigned int arg
, unsigned int qty
)
1517 unsigned int erase_timeout
;
1519 if (arg
== MMC_DISCARD_ARG
||
1520 (arg
== MMC_TRIM_ARG
&& card
->ext_csd
.rev
>= 6)) {
1521 erase_timeout
= card
->ext_csd
.trim_timeout
;
1522 } else if (card
->ext_csd
.erase_group_def
& 1) {
1523 /* High Capacity Erase Group Size uses HC timeouts */
1524 if (arg
== MMC_TRIM_ARG
)
1525 erase_timeout
= card
->ext_csd
.trim_timeout
;
1527 erase_timeout
= card
->ext_csd
.hc_erase_timeout
;
1529 /* CSD Erase Group Size uses write timeout */
1530 unsigned int mult
= (10 << card
->csd
.r2w_factor
);
1531 unsigned int timeout_clks
= card
->csd
.tacc_clks
* mult
;
1532 unsigned int timeout_us
;
1534 /* Avoid overflow: e.g. tacc_ns=80000000 mult=1280 */
1535 if (card
->csd
.tacc_ns
< 1000000)
1536 timeout_us
= (card
->csd
.tacc_ns
* mult
) / 1000;
1538 timeout_us
= (card
->csd
.tacc_ns
/ 1000) * mult
;
1541 * ios.clock is only a target. The real clock rate might be
1542 * less but not that much less, so fudge it by multiplying by 2.
1545 timeout_us
+= (timeout_clks
* 1000) /
1546 (mmc_host_clk_rate(card
->host
) / 1000);
1548 erase_timeout
= timeout_us
/ 1000;
1551 * Theoretically, the calculation could underflow so round up
1552 * to 1ms in that case.
1558 /* Multiplier for secure operations */
1559 if (arg
& MMC_SECURE_ARGS
) {
1560 if (arg
== MMC_SECURE_ERASE_ARG
)
1561 erase_timeout
*= card
->ext_csd
.sec_erase_mult
;
1563 erase_timeout
*= card
->ext_csd
.sec_trim_mult
;
1566 erase_timeout
*= qty
;
1569 * Ensure at least a 1 second timeout for SPI as per
1570 * 'mmc_set_data_timeout()'
1572 if (mmc_host_is_spi(card
->host
) && erase_timeout
< 1000)
1573 erase_timeout
= 1000;
1575 return erase_timeout
;
1578 static unsigned int mmc_sd_erase_timeout(struct mmc_card
*card
,
1582 unsigned int erase_timeout
;
1584 if (card
->ssr
.erase_timeout
) {
1585 /* Erase timeout specified in SD Status Register (SSR) */
1586 erase_timeout
= card
->ssr
.erase_timeout
* qty
+
1587 card
->ssr
.erase_offset
;
1590 * Erase timeout not specified in SD Status Register (SSR) so
1591 * use 250ms per write block.
1593 erase_timeout
= 250 * qty
;
1596 /* Must not be less than 1 second */
1597 if (erase_timeout
< 1000)
1598 erase_timeout
= 1000;
1600 return erase_timeout
;
1603 static unsigned int mmc_erase_timeout(struct mmc_card
*card
,
1607 if (mmc_card_sd(card
))
1608 return mmc_sd_erase_timeout(card
, arg
, qty
);
1610 return mmc_mmc_erase_timeout(card
, arg
, qty
);
1613 static int mmc_do_erase(struct mmc_card
*card
, unsigned int from
,
1614 unsigned int to
, unsigned int arg
)
1616 struct mmc_command cmd
= {0};
1617 unsigned int qty
= 0;
1621 * qty is used to calculate the erase timeout which depends on how many
1622 * erase groups (or allocation units in SD terminology) are affected.
1623 * We count erasing part of an erase group as one erase group.
1624 * For SD, the allocation units are always a power of 2. For MMC, the
1625 * erase group size is almost certainly also power of 2, but it does not
1626 * seem to insist on that in the JEDEC standard, so we fall back to
1627 * division in that case. SD may not specify an allocation unit size,
1628 * in which case the timeout is based on the number of write blocks.
1630 * Note that the timeout for secure trim 2 will only be correct if the
1631 * number of erase groups specified is the same as the total of all
1632 * preceding secure trim 1 commands. Since the power may have been
1633 * lost since the secure trim 1 commands occurred, it is generally
1634 * impossible to calculate the secure trim 2 timeout correctly.
1636 if (card
->erase_shift
)
1637 qty
+= ((to
>> card
->erase_shift
) -
1638 (from
>> card
->erase_shift
)) + 1;
1639 else if (mmc_card_sd(card
))
1640 qty
+= to
- from
+ 1;
1642 qty
+= ((to
/ card
->erase_size
) -
1643 (from
/ card
->erase_size
)) + 1;
1645 if (!mmc_card_blockaddr(card
)) {
1650 if (mmc_card_sd(card
))
1651 cmd
.opcode
= SD_ERASE_WR_BLK_START
;
1653 cmd
.opcode
= MMC_ERASE_GROUP_START
;
1655 cmd
.flags
= MMC_RSP_SPI_R1
| MMC_RSP_R1
| MMC_CMD_AC
;
1656 err
= mmc_wait_for_cmd(card
->host
, &cmd
, 0);
1658 pr_err("mmc_erase: group start error %d, "
1659 "status %#x\n", err
, cmd
.resp
[0]);
1664 memset(&cmd
, 0, sizeof(struct mmc_command
));
1665 if (mmc_card_sd(card
))
1666 cmd
.opcode
= SD_ERASE_WR_BLK_END
;
1668 cmd
.opcode
= MMC_ERASE_GROUP_END
;
1670 cmd
.flags
= MMC_RSP_SPI_R1
| MMC_RSP_R1
| MMC_CMD_AC
;
1671 err
= mmc_wait_for_cmd(card
->host
, &cmd
, 0);
1673 pr_err("mmc_erase: group end error %d, status %#x\n",
1679 memset(&cmd
, 0, sizeof(struct mmc_command
));
1680 cmd
.opcode
= MMC_ERASE
;
1682 cmd
.flags
= MMC_RSP_SPI_R1B
| MMC_RSP_R1B
| MMC_CMD_AC
;
1683 cmd
.cmd_timeout_ms
= mmc_erase_timeout(card
, arg
, qty
);
1684 err
= mmc_wait_for_cmd(card
->host
, &cmd
, 0);
1686 pr_err("mmc_erase: erase error %d, status %#x\n",
1692 if (mmc_host_is_spi(card
->host
))
1696 memset(&cmd
, 0, sizeof(struct mmc_command
));
1697 cmd
.opcode
= MMC_SEND_STATUS
;
1698 cmd
.arg
= card
->rca
<< 16;
1699 cmd
.flags
= MMC_RSP_R1
| MMC_CMD_AC
;
1700 /* Do not retry else we can't see errors */
1701 err
= mmc_wait_for_cmd(card
->host
, &cmd
, 0);
1702 if (err
|| (cmd
.resp
[0] & 0xFDF92000)) {
1703 pr_err("error %d requesting status %#x\n",
1708 } while (!(cmd
.resp
[0] & R1_READY_FOR_DATA
) ||
1709 R1_CURRENT_STATE(cmd
.resp
[0]) == R1_STATE_PRG
);
1715 * mmc_erase - erase sectors.
1716 * @card: card to erase
1717 * @from: first sector to erase
1718 * @nr: number of sectors to erase
1719 * @arg: erase command argument (SD supports only %MMC_ERASE_ARG)
1721 * Caller must claim host before calling this function.
1723 int mmc_erase(struct mmc_card
*card
, unsigned int from
, unsigned int nr
,
1726 unsigned int rem
, to
= from
+ nr
;
1728 if (!(card
->host
->caps
& MMC_CAP_ERASE
) ||
1729 !(card
->csd
.cmdclass
& CCC_ERASE
))
1732 if (!card
->erase_size
)
1735 if (mmc_card_sd(card
) && arg
!= MMC_ERASE_ARG
)
1738 if ((arg
& MMC_SECURE_ARGS
) &&
1739 !(card
->ext_csd
.sec_feature_support
& EXT_CSD_SEC_ER_EN
))
1742 if ((arg
& MMC_TRIM_ARGS
) &&
1743 !(card
->ext_csd
.sec_feature_support
& EXT_CSD_SEC_GB_CL_EN
))
1746 if (arg
== MMC_SECURE_ERASE_ARG
) {
1747 if (from
% card
->erase_size
|| nr
% card
->erase_size
)
1751 if (arg
== MMC_ERASE_ARG
) {
1752 rem
= from
% card
->erase_size
;
1754 rem
= card
->erase_size
- rem
;
1761 rem
= nr
% card
->erase_size
;
1774 /* 'from' and 'to' are inclusive */
1777 return mmc_do_erase(card
, from
, to
, arg
);
1779 EXPORT_SYMBOL(mmc_erase
);
1781 int mmc_can_erase(struct mmc_card
*card
)
1783 if ((card
->host
->caps
& MMC_CAP_ERASE
) &&
1784 (card
->csd
.cmdclass
& CCC_ERASE
) && card
->erase_size
)
1788 EXPORT_SYMBOL(mmc_can_erase
);
1790 int mmc_can_trim(struct mmc_card
*card
)
1792 if (card
->ext_csd
.sec_feature_support
& EXT_CSD_SEC_GB_CL_EN
)
1796 EXPORT_SYMBOL(mmc_can_trim
);
1798 int mmc_can_discard(struct mmc_card
*card
)
1801 * As there's no way to detect the discard support bit at v4.5
1802 * use the s/w feature support filed.
1804 if (card
->ext_csd
.feature_support
& MMC_DISCARD_FEATURE
)
1808 EXPORT_SYMBOL(mmc_can_discard
);
1810 int mmc_can_sanitize(struct mmc_card
*card
)
1812 if (!mmc_can_trim(card
) && !mmc_can_erase(card
))
1814 if (card
->ext_csd
.sec_feature_support
& EXT_CSD_SEC_SANITIZE
)
1818 EXPORT_SYMBOL(mmc_can_sanitize
);
1820 int mmc_can_secure_erase_trim(struct mmc_card
*card
)
1822 if (card
->ext_csd
.sec_feature_support
& EXT_CSD_SEC_ER_EN
)
1826 EXPORT_SYMBOL(mmc_can_secure_erase_trim
);
1828 int mmc_erase_group_aligned(struct mmc_card
*card
, unsigned int from
,
1831 if (!card
->erase_size
)
1833 if (from
% card
->erase_size
|| nr
% card
->erase_size
)
1837 EXPORT_SYMBOL(mmc_erase_group_aligned
);
1839 static unsigned int mmc_do_calc_max_discard(struct mmc_card
*card
,
1842 struct mmc_host
*host
= card
->host
;
1843 unsigned int max_discard
, x
, y
, qty
= 0, max_qty
, timeout
;
1844 unsigned int last_timeout
= 0;
1846 if (card
->erase_shift
)
1847 max_qty
= UINT_MAX
>> card
->erase_shift
;
1848 else if (mmc_card_sd(card
))
1851 max_qty
= UINT_MAX
/ card
->erase_size
;
1853 /* Find the largest qty with an OK timeout */
1856 for (x
= 1; x
&& x
<= max_qty
&& max_qty
- x
>= qty
; x
<<= 1) {
1857 timeout
= mmc_erase_timeout(card
, arg
, qty
+ x
);
1858 if (timeout
> host
->max_discard_to
)
1860 if (timeout
< last_timeout
)
1862 last_timeout
= timeout
;
1874 /* Convert qty to sectors */
1875 if (card
->erase_shift
)
1876 max_discard
= --qty
<< card
->erase_shift
;
1877 else if (mmc_card_sd(card
))
1880 max_discard
= --qty
* card
->erase_size
;
1885 unsigned int mmc_calc_max_discard(struct mmc_card
*card
)
1887 struct mmc_host
*host
= card
->host
;
1888 unsigned int max_discard
, max_trim
;
1890 if (!host
->max_discard_to
)
1894 * Without erase_group_def set, MMC erase timeout depends on clock
1895 * frequence which can change. In that case, the best choice is
1896 * just the preferred erase size.
1898 if (mmc_card_mmc(card
) && !(card
->ext_csd
.erase_group_def
& 1))
1899 return card
->pref_erase
;
1901 max_discard
= mmc_do_calc_max_discard(card
, MMC_ERASE_ARG
);
1902 if (mmc_can_trim(card
)) {
1903 max_trim
= mmc_do_calc_max_discard(card
, MMC_TRIM_ARG
);
1904 if (max_trim
< max_discard
)
1905 max_discard
= max_trim
;
1906 } else if (max_discard
< card
->erase_size
) {
1909 pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n",
1910 mmc_hostname(host
), max_discard
, host
->max_discard_to
);
1913 EXPORT_SYMBOL(mmc_calc_max_discard
);
1915 int mmc_set_blocklen(struct mmc_card
*card
, unsigned int blocklen
)
1917 struct mmc_command cmd
= {0};
1919 if (mmc_card_blockaddr(card
) || mmc_card_ddr_mode(card
))
1922 cmd
.opcode
= MMC_SET_BLOCKLEN
;
1924 cmd
.flags
= MMC_RSP_SPI_R1
| MMC_RSP_R1
| MMC_CMD_AC
;
1925 return mmc_wait_for_cmd(card
->host
, &cmd
, 5);
1927 EXPORT_SYMBOL(mmc_set_blocklen
);
1929 static void mmc_hw_reset_for_init(struct mmc_host
*host
)
1931 if (!(host
->caps
& MMC_CAP_HW_RESET
) || !host
->ops
->hw_reset
)
1933 mmc_host_clk_hold(host
);
1934 host
->ops
->hw_reset(host
);
1935 mmc_host_clk_release(host
);
1938 int mmc_can_reset(struct mmc_card
*card
)
1942 if (!mmc_card_mmc(card
))
1944 rst_n_function
= card
->ext_csd
.rst_n_function
;
1945 if ((rst_n_function
& EXT_CSD_RST_N_EN_MASK
) != EXT_CSD_RST_N_ENABLED
)
1949 EXPORT_SYMBOL(mmc_can_reset
);
1951 static int mmc_do_hw_reset(struct mmc_host
*host
, int check
)
1953 struct mmc_card
*card
= host
->card
;
1955 if (!host
->bus_ops
->power_restore
)
1958 if (!(host
->caps
& MMC_CAP_HW_RESET
) || !host
->ops
->hw_reset
)
1964 if (!mmc_can_reset(card
))
1967 mmc_host_clk_hold(host
);
1968 mmc_set_clock(host
, host
->f_init
);
1970 host
->ops
->hw_reset(host
);
1972 /* If the reset has happened, then a status command will fail */
1974 struct mmc_command cmd
= {0};
1977 cmd
.opcode
= MMC_SEND_STATUS
;
1978 if (!mmc_host_is_spi(card
->host
))
1979 cmd
.arg
= card
->rca
<< 16;
1980 cmd
.flags
= MMC_RSP_SPI_R2
| MMC_RSP_R1
| MMC_CMD_AC
;
1981 err
= mmc_wait_for_cmd(card
->host
, &cmd
, 0);
1983 mmc_host_clk_release(host
);
1988 host
->card
->state
&= ~(MMC_STATE_HIGHSPEED
| MMC_STATE_HIGHSPEED_DDR
);
1989 if (mmc_host_is_spi(host
)) {
1990 host
->ios
.chip_select
= MMC_CS_HIGH
;
1991 host
->ios
.bus_mode
= MMC_BUSMODE_PUSHPULL
;
1993 host
->ios
.chip_select
= MMC_CS_DONTCARE
;
1994 host
->ios
.bus_mode
= MMC_BUSMODE_OPENDRAIN
;
1996 host
->ios
.bus_width
= MMC_BUS_WIDTH_1
;
1997 host
->ios
.timing
= MMC_TIMING_LEGACY
;
2000 mmc_host_clk_release(host
);
2002 return host
->bus_ops
->power_restore(host
);
2005 int mmc_hw_reset(struct mmc_host
*host
)
2007 return mmc_do_hw_reset(host
, 0);
2009 EXPORT_SYMBOL(mmc_hw_reset
);
2011 int mmc_hw_reset_check(struct mmc_host
*host
)
2013 return mmc_do_hw_reset(host
, 1);
2015 EXPORT_SYMBOL(mmc_hw_reset_check
);
2017 static int mmc_rescan_try_freq(struct mmc_host
*host
, unsigned freq
)
2019 host
->f_init
= freq
;
2021 #ifdef CONFIG_MMC_DEBUG
2022 pr_info("%s: %s: trying to init card at %u Hz\n",
2023 mmc_hostname(host
), __func__
, host
->f_init
);
2028 * Some eMMCs (with VCCQ always on) may not be reset after power up, so
2029 * do a hardware reset if possible.
2031 mmc_hw_reset_for_init(host
);
2034 * sdio_reset sends CMD52 to reset card. Since we do not know
2035 * if the card is being re-initialized, just send it. CMD52
2036 * should be ignored by SD/eMMC cards.
2041 mmc_send_if_cond(host
, host
->ocr_avail
);
2043 /* Order's important: probe SDIO, then SD, then MMC */
2044 if (!mmc_attach_sdio(host
))
2046 if (!mmc_attach_sd(host
))
2048 if (!mmc_attach_mmc(host
))
2051 mmc_power_off(host
);
2055 void mmc_rescan(struct work_struct
*work
)
2057 static const unsigned freqs
[] = { 400000, 300000, 200000, 100000 };
2058 struct mmc_host
*host
=
2059 container_of(work
, struct mmc_host
, detect
.work
);
2062 if (host
->rescan_disable
)
2068 * if there is a _removable_ card registered, check whether it is
2071 if (host
->bus_ops
&& host
->bus_ops
->detect
&& !host
->bus_dead
2072 && !(host
->caps
& MMC_CAP_NONREMOVABLE
))
2073 host
->bus_ops
->detect(host
);
2076 * Let mmc_bus_put() free the bus/bus_ops if we've found that
2077 * the card is no longer present.
2082 /* if there still is a card present, stop here */
2083 if (host
->bus_ops
!= NULL
) {
2089 * Only we can add a new handler, so it's safe to
2090 * release the lock here.
2094 if (host
->ops
->get_cd
&& host
->ops
->get_cd(host
) == 0)
2097 mmc_claim_host(host
);
2098 for (i
= 0; i
< ARRAY_SIZE(freqs
); i
++) {
2099 if (!mmc_rescan_try_freq(host
, max(freqs
[i
], host
->f_min
)))
2101 if (freqs
[i
] <= host
->f_min
)
2104 mmc_release_host(host
);
2107 if (host
->caps
& MMC_CAP_NEEDS_POLL
)
2108 mmc_schedule_delayed_work(&host
->detect
, HZ
);
2111 void mmc_start_host(struct mmc_host
*host
)
2113 mmc_power_off(host
);
2114 mmc_detect_change(host
, 0);
2117 void mmc_stop_host(struct mmc_host
*host
)
2119 #ifdef CONFIG_MMC_DEBUG
2120 unsigned long flags
;
2121 spin_lock_irqsave(&host
->lock
, flags
);
2123 spin_unlock_irqrestore(&host
->lock
, flags
);
2126 if (host
->caps
& MMC_CAP_DISABLE
)
2127 cancel_delayed_work(&host
->disable
);
2128 cancel_delayed_work_sync(&host
->detect
);
2129 mmc_flush_scheduled_work();
2131 /* clear pm flags now and let card drivers set them as needed */
2135 if (host
->bus_ops
&& !host
->bus_dead
) {
2136 if (host
->bus_ops
->remove
)
2137 host
->bus_ops
->remove(host
);
2139 mmc_claim_host(host
);
2140 mmc_detach_bus(host
);
2141 mmc_power_off(host
);
2142 mmc_release_host(host
);
2150 mmc_power_off(host
);
2153 int mmc_power_save_host(struct mmc_host
*host
)
2157 #ifdef CONFIG_MMC_DEBUG
2158 pr_info("%s: %s: powering down\n", mmc_hostname(host
), __func__
);
2163 if (!host
->bus_ops
|| host
->bus_dead
|| !host
->bus_ops
->power_restore
) {
2168 if (host
->bus_ops
->power_save
)
2169 ret
= host
->bus_ops
->power_save(host
);
2173 mmc_power_off(host
);
2177 EXPORT_SYMBOL(mmc_power_save_host
);
2179 int mmc_power_restore_host(struct mmc_host
*host
)
2183 #ifdef CONFIG_MMC_DEBUG
2184 pr_info("%s: %s: powering up\n", mmc_hostname(host
), __func__
);
2189 if (!host
->bus_ops
|| host
->bus_dead
|| !host
->bus_ops
->power_restore
) {
2195 ret
= host
->bus_ops
->power_restore(host
);
2201 EXPORT_SYMBOL(mmc_power_restore_host
);
2203 int mmc_card_awake(struct mmc_host
*host
)
2209 if (host
->bus_ops
&& !host
->bus_dead
&& host
->bus_ops
->awake
)
2210 err
= host
->bus_ops
->awake(host
);
2216 EXPORT_SYMBOL(mmc_card_awake
);
2218 int mmc_card_sleep(struct mmc_host
*host
)
2224 if (host
->bus_ops
&& !host
->bus_dead
&& host
->bus_ops
->sleep
)
2225 err
= host
->bus_ops
->sleep(host
);
2231 EXPORT_SYMBOL(mmc_card_sleep
);
2233 int mmc_card_can_sleep(struct mmc_host
*host
)
2235 struct mmc_card
*card
= host
->card
;
2237 if (card
&& mmc_card_mmc(card
) && card
->ext_csd
.rev
>= 3)
2241 EXPORT_SYMBOL(mmc_card_can_sleep
);
2244 * Flush the cache to the non-volatile storage.
2246 int mmc_flush_cache(struct mmc_card
*card
)
2248 struct mmc_host
*host
= card
->host
;
2251 if (!(host
->caps2
& MMC_CAP2_CACHE_CTRL
))
2254 if (mmc_card_mmc(card
) &&
2255 (card
->ext_csd
.cache_size
> 0) &&
2256 (card
->ext_csd
.cache_ctrl
& 1)) {
2257 err
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
2258 EXT_CSD_FLUSH_CACHE
, 1, 0);
2260 pr_err("%s: cache flush error %d\n",
2261 mmc_hostname(card
->host
), err
);
2266 EXPORT_SYMBOL(mmc_flush_cache
);
2269 * Turn the cache ON/OFF.
2270 * Turning the cache OFF shall trigger flushing of the data
2271 * to the non-volatile storage.
2273 int mmc_cache_ctrl(struct mmc_host
*host
, u8 enable
)
2275 struct mmc_card
*card
= host
->card
;
2278 if (!(host
->caps2
& MMC_CAP2_CACHE_CTRL
) ||
2279 mmc_card_is_removable(host
))
2282 if (card
&& mmc_card_mmc(card
) &&
2283 (card
->ext_csd
.cache_size
> 0)) {
2286 if (card
->ext_csd
.cache_ctrl
^ enable
)
2287 err
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
2288 EXT_CSD_CACHE_CTRL
, enable
, 0);
2290 pr_err("%s: cache %s error %d\n",
2291 mmc_hostname(card
->host
),
2292 enable
? "on" : "off",
2295 card
->ext_csd
.cache_ctrl
= enable
;
2300 EXPORT_SYMBOL(mmc_cache_ctrl
);
2305 * mmc_suspend_host - suspend a host
2308 int mmc_suspend_host(struct mmc_host
*host
)
2312 if (host
->caps
& MMC_CAP_DISABLE
)
2313 cancel_delayed_work(&host
->disable
);
2314 cancel_delayed_work(&host
->detect
);
2315 mmc_flush_scheduled_work();
2316 err
= mmc_cache_ctrl(host
, 0);
2321 if (host
->bus_ops
&& !host
->bus_dead
) {
2324 * A long response time is not acceptable for device drivers
2325 * when doing suspend. Prevent mmc_claim_host in the suspend
2326 * sequence, to potentially wait "forever" by trying to
2327 * pre-claim the host.
2329 if (mmc_try_claim_host(host
)) {
2330 if (host
->bus_ops
->suspend
) {
2332 * For eMMC 4.5 device send notify command
2333 * before sleep, because in sleep state eMMC 4.5
2334 * devices respond to only RESET and AWAKE cmd
2336 mmc_poweroff_notify(host
);
2337 err
= host
->bus_ops
->suspend(host
);
2339 mmc_do_release_host(host
);
2341 if (err
== -ENOSYS
|| !host
->bus_ops
->resume
) {
2343 * We simply "remove" the card in this case.
2344 * It will be redetected on resume.
2346 if (host
->bus_ops
->remove
)
2347 host
->bus_ops
->remove(host
);
2348 mmc_claim_host(host
);
2349 mmc_detach_bus(host
);
2350 mmc_power_off(host
);
2351 mmc_release_host(host
);
2361 if (!err
&& !mmc_card_keep_power(host
))
2362 mmc_power_off(host
);
2368 EXPORT_SYMBOL(mmc_suspend_host
);
2371 * mmc_resume_host - resume a previously suspended host
2374 int mmc_resume_host(struct mmc_host
*host
)
2379 if (host
->bus_ops
&& !host
->bus_dead
) {
2380 if (!mmc_card_keep_power(host
)) {
2382 mmc_select_voltage(host
, host
->ocr
);
2384 * Tell runtime PM core we just powered up the card,
2385 * since it still believes the card is powered off.
2386 * Note that currently runtime PM is only enabled
2387 * for SDIO cards that are MMC_CAP_POWER_OFF_CARD
2389 if (mmc_card_sdio(host
->card
) &&
2390 (host
->caps
& MMC_CAP_POWER_OFF_CARD
)) {
2391 pm_runtime_disable(&host
->card
->dev
);
2392 pm_runtime_set_active(&host
->card
->dev
);
2393 pm_runtime_enable(&host
->card
->dev
);
2396 BUG_ON(!host
->bus_ops
->resume
);
2397 err
= host
->bus_ops
->resume(host
);
2399 pr_warning("%s: error %d during resume "
2400 "(card was removed?)\n",
2401 mmc_hostname(host
), err
);
2405 host
->pm_flags
&= ~MMC_PM_KEEP_POWER
;
2410 EXPORT_SYMBOL(mmc_resume_host
);
2412 /* Do the card removal on suspend if card is assumed removeable
2413 * Do that in pm notifier while userspace isn't yet frozen, so we will be able
2416 int mmc_pm_notify(struct notifier_block
*notify_block
,
2417 unsigned long mode
, void *unused
)
2419 struct mmc_host
*host
= container_of(
2420 notify_block
, struct mmc_host
, pm_notify
);
2421 unsigned long flags
;
2425 case PM_HIBERNATION_PREPARE
:
2426 case PM_SUSPEND_PREPARE
:
2428 spin_lock_irqsave(&host
->lock
, flags
);
2429 host
->rescan_disable
= 1;
2430 host
->power_notify_type
= MMC_HOST_PW_NOTIFY_SHORT
;
2431 spin_unlock_irqrestore(&host
->lock
, flags
);
2432 cancel_delayed_work_sync(&host
->detect
);
2434 if (!host
->bus_ops
|| host
->bus_ops
->suspend
)
2437 mmc_claim_host(host
);
2439 if (host
->bus_ops
->remove
)
2440 host
->bus_ops
->remove(host
);
2442 mmc_detach_bus(host
);
2443 mmc_power_off(host
);
2444 mmc_release_host(host
);
2448 case PM_POST_SUSPEND
:
2449 case PM_POST_HIBERNATION
:
2450 case PM_POST_RESTORE
:
2452 spin_lock_irqsave(&host
->lock
, flags
);
2453 host
->rescan_disable
= 0;
2454 host
->power_notify_type
= MMC_HOST_PW_NOTIFY_LONG
;
2455 spin_unlock_irqrestore(&host
->lock
, flags
);
2456 mmc_detect_change(host
, 0);
2464 static int __init
mmc_init(void)
2468 workqueue
= alloc_ordered_workqueue("kmmcd", 0);
2472 ret
= mmc_register_bus();
2474 goto destroy_workqueue
;
2476 ret
= mmc_register_host_class();
2478 goto unregister_bus
;
2480 ret
= sdio_register_bus();
2482 goto unregister_host_class
;
2486 unregister_host_class
:
2487 mmc_unregister_host_class();
2489 mmc_unregister_bus();
2491 destroy_workqueue(workqueue
);
2496 static void __exit
mmc_exit(void)
2498 sdio_unregister_bus();
2499 mmc_unregister_host_class();
2500 mmc_unregister_bus();
2501 destroy_workqueue(workqueue
);
2504 subsys_initcall(mmc_init
);
2505 module_exit(mmc_exit
);
2507 MODULE_LICENSE("GPL");