2 * linux/drivers/mmc/core/core.c
4 * Copyright (C) 2003-2004 Russell King, All Rights Reserved.
5 * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
6 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
7 * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/completion.h>
17 #include <linux/device.h>
18 #include <linux/delay.h>
19 #include <linux/pagemap.h>
20 #include <linux/err.h>
21 #include <linux/leds.h>
22 #include <linux/scatterlist.h>
23 #include <linux/log2.h>
24 #include <linux/regulator/consumer.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/suspend.h>
27 #include <linux/fault-inject.h>
28 #include <linux/random.h>
29 #include <linux/slab.h>
32 #include <linux/mmc/card.h>
33 #include <linux/mmc/host.h>
34 #include <linux/mmc/mmc.h>
35 #include <linux/mmc/sd.h>
46 /* If the device is not responding */
47 #define MMC_CORE_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
50 * Background operations can take a long time, depending on the housekeeping
51 * operations the card has to perform.
53 #define MMC_BKOPS_MAX_TIMEOUT (4 * 60 * 1000) /* max time to wait in ms */
55 static struct workqueue_struct
*workqueue
;
56 static const unsigned freqs
[] = { 400000, 300000, 200000, 100000 };
59 * Enabling software CRCs on the data blocks can be a significant (30%)
60 * performance cost, and for other reasons may not always be desired.
61 * So we allow it it to be disabled.
64 module_param(use_spi_crc
, bool, 0);
67 * We normally treat cards as removed during suspend if they are not
68 * known to be on a non-removable bus, to avoid the risk of writing
69 * back data to a different card after resume. Allow this to be
70 * overridden if necessary.
72 #ifdef CONFIG_MMC_UNSAFE_RESUME
73 bool mmc_assume_removable
;
75 bool mmc_assume_removable
= 1;
77 EXPORT_SYMBOL(mmc_assume_removable
);
78 module_param_named(removable
, mmc_assume_removable
, bool, 0644);
81 "MMC/SD cards are removable and may be removed during suspend");
84 * Internal function. Schedule delayed work in the MMC work queue.
86 static int mmc_schedule_delayed_work(struct delayed_work
*work
,
89 return queue_delayed_work(workqueue
, work
, delay
);
93 * Internal function. Flush all scheduled work from the MMC work queue.
95 static void mmc_flush_scheduled_work(void)
97 flush_workqueue(workqueue
);
100 #ifdef CONFIG_FAIL_MMC_REQUEST
103 * Internal function. Inject random data errors.
104 * If mmc_data is NULL no errors are injected.
106 static void mmc_should_fail_request(struct mmc_host
*host
,
107 struct mmc_request
*mrq
)
109 struct mmc_command
*cmd
= mrq
->cmd
;
110 struct mmc_data
*data
= mrq
->data
;
111 static const int data_errors
[] = {
120 if (cmd
->error
|| data
->error
||
121 !should_fail(&host
->fail_mmc_request
, data
->blksz
* data
->blocks
))
124 data
->error
= data_errors
[prandom_u32() % ARRAY_SIZE(data_errors
)];
125 data
->bytes_xfered
= (prandom_u32() % (data
->bytes_xfered
>> 9)) << 9;
128 #else /* CONFIG_FAIL_MMC_REQUEST */
130 static inline void mmc_should_fail_request(struct mmc_host
*host
,
131 struct mmc_request
*mrq
)
135 #endif /* CONFIG_FAIL_MMC_REQUEST */
138 * mmc_request_done - finish processing an MMC request
139 * @host: MMC host which completed request
140 * @mrq: MMC request which request
142 * MMC drivers should call this function when they have completed
143 * their processing of a request.
145 void mmc_request_done(struct mmc_host
*host
, struct mmc_request
*mrq
)
147 struct mmc_command
*cmd
= mrq
->cmd
;
148 int err
= cmd
->error
;
150 if (err
&& cmd
->retries
&& mmc_host_is_spi(host
)) {
151 if (cmd
->resp
[0] & R1_SPI_ILLEGAL_COMMAND
)
155 if (err
&& cmd
->retries
&& !mmc_card_removed(host
->card
)) {
157 * Request starter must handle retries - see
158 * mmc_wait_for_req_done().
163 mmc_should_fail_request(host
, mrq
);
165 led_trigger_event(host
->led
, LED_OFF
);
167 pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
168 mmc_hostname(host
), cmd
->opcode
, err
,
169 cmd
->resp
[0], cmd
->resp
[1],
170 cmd
->resp
[2], cmd
->resp
[3]);
173 pr_debug("%s: %d bytes transferred: %d\n",
175 mrq
->data
->bytes_xfered
, mrq
->data
->error
);
179 pr_debug("%s: (CMD%u): %d: %08x %08x %08x %08x\n",
180 mmc_hostname(host
), mrq
->stop
->opcode
,
182 mrq
->stop
->resp
[0], mrq
->stop
->resp
[1],
183 mrq
->stop
->resp
[2], mrq
->stop
->resp
[3]);
189 mmc_host_clk_release(host
);
193 EXPORT_SYMBOL(mmc_request_done
);
196 mmc_start_request(struct mmc_host
*host
, struct mmc_request
*mrq
)
198 #ifdef CONFIG_MMC_DEBUG
200 struct scatterlist
*sg
;
204 pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n",
205 mmc_hostname(host
), mrq
->sbc
->opcode
,
206 mrq
->sbc
->arg
, mrq
->sbc
->flags
);
209 pr_debug("%s: starting CMD%u arg %08x flags %08x\n",
210 mmc_hostname(host
), mrq
->cmd
->opcode
,
211 mrq
->cmd
->arg
, mrq
->cmd
->flags
);
214 pr_debug("%s: blksz %d blocks %d flags %08x "
215 "tsac %d ms nsac %d\n",
216 mmc_hostname(host
), mrq
->data
->blksz
,
217 mrq
->data
->blocks
, mrq
->data
->flags
,
218 mrq
->data
->timeout_ns
/ 1000000,
219 mrq
->data
->timeout_clks
);
223 pr_debug("%s: CMD%u arg %08x flags %08x\n",
224 mmc_hostname(host
), mrq
->stop
->opcode
,
225 mrq
->stop
->arg
, mrq
->stop
->flags
);
228 WARN_ON(!host
->claimed
);
233 BUG_ON(mrq
->data
->blksz
> host
->max_blk_size
);
234 BUG_ON(mrq
->data
->blocks
> host
->max_blk_count
);
235 BUG_ON(mrq
->data
->blocks
* mrq
->data
->blksz
>
238 #ifdef CONFIG_MMC_DEBUG
240 for_each_sg(mrq
->data
->sg
, sg
, mrq
->data
->sg_len
, i
)
242 BUG_ON(sz
!= mrq
->data
->blocks
* mrq
->data
->blksz
);
245 mrq
->cmd
->data
= mrq
->data
;
246 mrq
->data
->error
= 0;
247 mrq
->data
->mrq
= mrq
;
249 mrq
->data
->stop
= mrq
->stop
;
250 mrq
->stop
->error
= 0;
251 mrq
->stop
->mrq
= mrq
;
254 mmc_host_clk_hold(host
);
255 led_trigger_event(host
->led
, LED_FULL
);
256 host
->ops
->request(host
, mrq
);
260 * mmc_start_bkops - start BKOPS for supported cards
261 * @card: MMC card to start BKOPS
262 * @form_exception: A flag to indicate if this function was
263 * called due to an exception raised by the card
265 * Start background operations whenever requested.
266 * When the urgent BKOPS bit is set in a R1 command response
267 * then background operations should be started immediately.
269 void mmc_start_bkops(struct mmc_card
*card
, bool from_exception
)
273 bool use_busy_signal
;
277 if (!card
->ext_csd
.bkops_en
|| mmc_card_doing_bkops(card
))
280 err
= mmc_read_bkops_status(card
);
282 pr_err("%s: Failed to read bkops status: %d\n",
283 mmc_hostname(card
->host
), err
);
287 if (!card
->ext_csd
.raw_bkops_status
)
290 if (card
->ext_csd
.raw_bkops_status
< EXT_CSD_BKOPS_LEVEL_2
&&
294 mmc_claim_host(card
->host
);
295 if (card
->ext_csd
.raw_bkops_status
>= EXT_CSD_BKOPS_LEVEL_2
) {
296 timeout
= MMC_BKOPS_MAX_TIMEOUT
;
297 use_busy_signal
= true;
300 use_busy_signal
= false;
303 err
= __mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
304 EXT_CSD_BKOPS_START
, 1, timeout
, use_busy_signal
);
306 pr_warn("%s: Error %d starting bkops\n",
307 mmc_hostname(card
->host
), err
);
312 * For urgent bkops status (LEVEL_2 and more)
313 * bkops executed synchronously, otherwise
314 * the operation is in progress
316 if (!use_busy_signal
)
317 mmc_card_set_doing_bkops(card
);
319 mmc_release_host(card
->host
);
321 EXPORT_SYMBOL(mmc_start_bkops
);
324 * mmc_wait_data_done() - done callback for data request
325 * @mrq: done data request
327 * Wakes up mmc context, passed as a callback to host controller driver
329 static void mmc_wait_data_done(struct mmc_request
*mrq
)
331 mrq
->host
->context_info
.is_done_rcv
= true;
332 wake_up_interruptible(&mrq
->host
->context_info
.wait
);
335 static void mmc_wait_done(struct mmc_request
*mrq
)
337 complete(&mrq
->completion
);
341 *__mmc_start_data_req() - starts data request
342 * @host: MMC host to start the request
343 * @mrq: data request to start
345 * Sets the done callback to be called when request is completed by the card.
346 * Starts data mmc request execution
348 static int __mmc_start_data_req(struct mmc_host
*host
, struct mmc_request
*mrq
)
350 mrq
->done
= mmc_wait_data_done
;
352 if (mmc_card_removed(host
->card
)) {
353 mrq
->cmd
->error
= -ENOMEDIUM
;
354 mmc_wait_data_done(mrq
);
357 mmc_start_request(host
, mrq
);
362 static int __mmc_start_req(struct mmc_host
*host
, struct mmc_request
*mrq
)
364 init_completion(&mrq
->completion
);
365 mrq
->done
= mmc_wait_done
;
366 if (mmc_card_removed(host
->card
)) {
367 mrq
->cmd
->error
= -ENOMEDIUM
;
368 complete(&mrq
->completion
);
371 mmc_start_request(host
, mrq
);
376 * mmc_wait_for_data_req_done() - wait for request completed
377 * @host: MMC host to prepare the command.
378 * @mrq: MMC request to wait for
380 * Blocks MMC context till host controller will ack end of data request
381 * execution or new request notification arrives from the block layer.
382 * Handles command retries.
384 * Returns enum mmc_blk_status after checking errors.
386 static int mmc_wait_for_data_req_done(struct mmc_host
*host
,
387 struct mmc_request
*mrq
,
388 struct mmc_async_req
*next_req
)
390 struct mmc_command
*cmd
;
391 struct mmc_context_info
*context_info
= &host
->context_info
;
396 wait_event_interruptible(context_info
->wait
,
397 (context_info
->is_done_rcv
||
398 context_info
->is_new_req
));
399 spin_lock_irqsave(&context_info
->lock
, flags
);
400 context_info
->is_waiting_last_req
= false;
401 spin_unlock_irqrestore(&context_info
->lock
, flags
);
402 if (context_info
->is_done_rcv
) {
403 context_info
->is_done_rcv
= false;
404 context_info
->is_new_req
= false;
407 if (!cmd
->error
|| !cmd
->retries
||
408 mmc_card_removed(host
->card
)) {
409 err
= host
->areq
->err_check(host
->card
,
411 break; /* return err */
413 pr_info("%s: req failed (CMD%u): %d, retrying...\n",
415 cmd
->opcode
, cmd
->error
);
418 host
->ops
->request(host
, mrq
);
419 continue; /* wait for done/new event again */
421 } else if (context_info
->is_new_req
) {
422 context_info
->is_new_req
= false;
424 err
= MMC_BLK_NEW_REQUEST
;
425 break; /* return err */
432 static void mmc_wait_for_req_done(struct mmc_host
*host
,
433 struct mmc_request
*mrq
)
435 struct mmc_command
*cmd
;
438 wait_for_completion(&mrq
->completion
);
443 * If host has timed out waiting for the sanitize
444 * to complete, card might be still in programming state
445 * so let's try to bring the card out of programming
448 if (cmd
->sanitize_busy
&& cmd
->error
== -ETIMEDOUT
) {
449 if (!mmc_interrupt_hpi(host
->card
)) {
450 pr_warning("%s: %s: Interrupted sanitize\n",
451 mmc_hostname(host
), __func__
);
455 pr_err("%s: %s: Failed to interrupt sanitize\n",
456 mmc_hostname(host
), __func__
);
459 if (!cmd
->error
|| !cmd
->retries
||
460 mmc_card_removed(host
->card
))
463 pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
464 mmc_hostname(host
), cmd
->opcode
, cmd
->error
);
467 host
->ops
->request(host
, mrq
);
472 * mmc_pre_req - Prepare for a new request
473 * @host: MMC host to prepare command
474 * @mrq: MMC request to prepare for
475 * @is_first_req: true if there is no previous started request
476 * that may run in parellel to this call, otherwise false
478 * mmc_pre_req() is called in prior to mmc_start_req() to let
479 * host prepare for the new request. Preparation of a request may be
480 * performed while another request is running on the host.
482 static void mmc_pre_req(struct mmc_host
*host
, struct mmc_request
*mrq
,
485 if (host
->ops
->pre_req
) {
486 mmc_host_clk_hold(host
);
487 host
->ops
->pre_req(host
, mrq
, is_first_req
);
488 mmc_host_clk_release(host
);
493 * mmc_post_req - Post process a completed request
494 * @host: MMC host to post process command
495 * @mrq: MMC request to post process for
496 * @err: Error, if non zero, clean up any resources made in pre_req
498 * Let the host post process a completed request. Post processing of
499 * a request may be performed while another reuqest is running.
501 static void mmc_post_req(struct mmc_host
*host
, struct mmc_request
*mrq
,
504 if (host
->ops
->post_req
) {
505 mmc_host_clk_hold(host
);
506 host
->ops
->post_req(host
, mrq
, err
);
507 mmc_host_clk_release(host
);
512 * mmc_start_req - start a non-blocking request
513 * @host: MMC host to start command
514 * @areq: async request to start
515 * @error: out parameter returns 0 for success, otherwise non zero
517 * Start a new MMC custom command request for a host.
518 * If there is on ongoing async request wait for completion
519 * of that request and start the new one and return.
520 * Does not wait for the new request to complete.
522 * Returns the completed request, NULL in case of none completed.
523 * Wait for the an ongoing request (previoulsy started) to complete and
524 * return the completed request. If there is no ongoing request, NULL
525 * is returned without waiting. NULL is not an error condition.
527 struct mmc_async_req
*mmc_start_req(struct mmc_host
*host
,
528 struct mmc_async_req
*areq
, int *error
)
532 struct mmc_async_req
*data
= host
->areq
;
534 /* Prepare a new request */
536 mmc_pre_req(host
, areq
->mrq
, !host
->areq
);
539 err
= mmc_wait_for_data_req_done(host
, host
->areq
->mrq
, areq
);
540 if (err
== MMC_BLK_NEW_REQUEST
) {
544 * The previous request was not completed,
550 * Check BKOPS urgency for each R1 response
552 if (host
->card
&& mmc_card_mmc(host
->card
) &&
553 ((mmc_resp_type(host
->areq
->mrq
->cmd
) == MMC_RSP_R1
) ||
554 (mmc_resp_type(host
->areq
->mrq
->cmd
) == MMC_RSP_R1B
)) &&
555 (host
->areq
->mrq
->cmd
->resp
[0] & R1_EXCEPTION_EVENT
))
556 mmc_start_bkops(host
->card
, true);
560 start_err
= __mmc_start_data_req(host
, areq
->mrq
);
563 mmc_post_req(host
, host
->areq
->mrq
, 0);
565 /* Cancel a prepared request if it was not started. */
566 if ((err
|| start_err
) && areq
)
567 mmc_post_req(host
, areq
->mrq
, -EINVAL
);
578 EXPORT_SYMBOL(mmc_start_req
);
581 * mmc_wait_for_req - start a request and wait for completion
582 * @host: MMC host to start command
583 * @mrq: MMC request to start
585 * Start a new MMC custom command request for a host, and wait
586 * for the command to complete. Does not attempt to parse the
589 void mmc_wait_for_req(struct mmc_host
*host
, struct mmc_request
*mrq
)
591 __mmc_start_req(host
, mrq
);
592 mmc_wait_for_req_done(host
, mrq
);
594 EXPORT_SYMBOL(mmc_wait_for_req
);
597 * mmc_interrupt_hpi - Issue for High priority Interrupt
598 * @card: the MMC card associated with the HPI transfer
600 * Issued High Priority Interrupt, and check for card status
601 * until out-of prg-state.
603 int mmc_interrupt_hpi(struct mmc_card
*card
)
607 unsigned long prg_wait
;
611 if (!card
->ext_csd
.hpi_en
) {
612 pr_info("%s: HPI enable bit unset\n", mmc_hostname(card
->host
));
616 mmc_claim_host(card
->host
);
617 err
= mmc_send_status(card
, &status
);
619 pr_err("%s: Get card status fail\n", mmc_hostname(card
->host
));
623 switch (R1_CURRENT_STATE(status
)) {
629 * In idle and transfer states, HPI is not needed and the caller
630 * can issue the next intended command immediately
636 /* In all other states, it's illegal to issue HPI */
637 pr_debug("%s: HPI cannot be sent. Card state=%d\n",
638 mmc_hostname(card
->host
), R1_CURRENT_STATE(status
));
643 err
= mmc_send_hpi_cmd(card
, &status
);
647 prg_wait
= jiffies
+ msecs_to_jiffies(card
->ext_csd
.out_of_int_time
);
649 err
= mmc_send_status(card
, &status
);
651 if (!err
&& R1_CURRENT_STATE(status
) == R1_STATE_TRAN
)
653 if (time_after(jiffies
, prg_wait
))
658 mmc_release_host(card
->host
);
661 EXPORT_SYMBOL(mmc_interrupt_hpi
);
664 * mmc_wait_for_cmd - start a command and wait for completion
665 * @host: MMC host to start command
666 * @cmd: MMC command to start
667 * @retries: maximum number of retries
669 * Start a new MMC command for a host, and wait for the command
670 * to complete. Return any error that occurred while the command
671 * was executing. Do not attempt to parse the response.
673 int mmc_wait_for_cmd(struct mmc_host
*host
, struct mmc_command
*cmd
, int retries
)
675 struct mmc_request mrq
= {NULL
};
677 WARN_ON(!host
->claimed
);
679 memset(cmd
->resp
, 0, sizeof(cmd
->resp
));
680 cmd
->retries
= retries
;
685 mmc_wait_for_req(host
, &mrq
);
690 EXPORT_SYMBOL(mmc_wait_for_cmd
);
693 * mmc_stop_bkops - stop ongoing BKOPS
694 * @card: MMC card to check BKOPS
696 * Send HPI command to stop ongoing background operations to
697 * allow rapid servicing of foreground operations, e.g. read/
698 * writes. Wait until the card comes out of the programming state
699 * to avoid errors in servicing read/write requests.
701 int mmc_stop_bkops(struct mmc_card
*card
)
706 err
= mmc_interrupt_hpi(card
);
709 * If err is EINVAL, we can't issue an HPI.
710 * It should complete the BKOPS.
712 if (!err
|| (err
== -EINVAL
)) {
713 mmc_card_clr_doing_bkops(card
);
719 EXPORT_SYMBOL(mmc_stop_bkops
);
721 int mmc_read_bkops_status(struct mmc_card
*card
)
727 * In future work, we should consider storing the entire ext_csd.
729 ext_csd
= kmalloc(512, GFP_KERNEL
);
731 pr_err("%s: could not allocate buffer to receive the ext_csd.\n",
732 mmc_hostname(card
->host
));
736 mmc_claim_host(card
->host
);
737 err
= mmc_send_ext_csd(card
, ext_csd
);
738 mmc_release_host(card
->host
);
742 card
->ext_csd
.raw_bkops_status
= ext_csd
[EXT_CSD_BKOPS_STATUS
];
743 card
->ext_csd
.raw_exception_status
= ext_csd
[EXT_CSD_EXP_EVENTS_STATUS
];
748 EXPORT_SYMBOL(mmc_read_bkops_status
);
751 * mmc_set_data_timeout - set the timeout for a data command
752 * @data: data phase for command
753 * @card: the MMC card associated with the data transfer
755 * Computes the data timeout parameters according to the
756 * correct algorithm given the card type.
758 void mmc_set_data_timeout(struct mmc_data
*data
, const struct mmc_card
*card
)
763 * SDIO cards only define an upper 1 s limit on access.
765 if (mmc_card_sdio(card
)) {
766 data
->timeout_ns
= 1000000000;
767 data
->timeout_clks
= 0;
772 * SD cards use a 100 multiplier rather than 10
774 mult
= mmc_card_sd(card
) ? 100 : 10;
777 * Scale up the multiplier (and therefore the timeout) by
778 * the r2w factor for writes.
780 if (data
->flags
& MMC_DATA_WRITE
)
781 mult
<<= card
->csd
.r2w_factor
;
783 data
->timeout_ns
= card
->csd
.tacc_ns
* mult
;
784 data
->timeout_clks
= card
->csd
.tacc_clks
* mult
;
787 * SD cards also have an upper limit on the timeout.
789 if (mmc_card_sd(card
)) {
790 unsigned int timeout_us
, limit_us
;
792 timeout_us
= data
->timeout_ns
/ 1000;
793 if (mmc_host_clk_rate(card
->host
))
794 timeout_us
+= data
->timeout_clks
* 1000 /
795 (mmc_host_clk_rate(card
->host
) / 1000);
797 if (data
->flags
& MMC_DATA_WRITE
)
799 * The MMC spec "It is strongly recommended
800 * for hosts to implement more than 500ms
801 * timeout value even if the card indicates
802 * the 250ms maximum busy length." Even the
803 * previous value of 300ms is known to be
804 * insufficient for some cards.
811 * SDHC cards always use these fixed values.
813 if (timeout_us
> limit_us
|| mmc_card_blockaddr(card
)) {
814 data
->timeout_ns
= limit_us
* 1000;
815 data
->timeout_clks
= 0;
820 * Some cards require longer data read timeout than indicated in CSD.
821 * Address this by setting the read timeout to a "reasonably high"
822 * value. For the cards tested, 300ms has proven enough. If necessary,
823 * this value can be increased if other problematic cards require this.
825 if (mmc_card_long_read_time(card
) && data
->flags
& MMC_DATA_READ
) {
826 data
->timeout_ns
= 300000000;
827 data
->timeout_clks
= 0;
831 * Some cards need very high timeouts if driven in SPI mode.
832 * The worst observed timeout was 900ms after writing a
833 * continuous stream of data until the internal logic
836 if (mmc_host_is_spi(card
->host
)) {
837 if (data
->flags
& MMC_DATA_WRITE
) {
838 if (data
->timeout_ns
< 1000000000)
839 data
->timeout_ns
= 1000000000; /* 1s */
841 if (data
->timeout_ns
< 100000000)
842 data
->timeout_ns
= 100000000; /* 100ms */
846 EXPORT_SYMBOL(mmc_set_data_timeout
);
849 * mmc_align_data_size - pads a transfer size to a more optimal value
850 * @card: the MMC card associated with the data transfer
851 * @sz: original transfer size
853 * Pads the original data size with a number of extra bytes in
854 * order to avoid controller bugs and/or performance hits
855 * (e.g. some controllers revert to PIO for certain sizes).
857 * Returns the improved size, which might be unmodified.
859 * Note that this function is only relevant when issuing a
860 * single scatter gather entry.
862 unsigned int mmc_align_data_size(struct mmc_card
*card
, unsigned int sz
)
865 * FIXME: We don't have a system for the controller to tell
866 * the core about its problems yet, so for now we just 32-bit
869 sz
= ((sz
+ 3) / 4) * 4;
873 EXPORT_SYMBOL(mmc_align_data_size
);
876 * __mmc_claim_host - exclusively claim a host
877 * @host: mmc host to claim
878 * @abort: whether or not the operation should be aborted
880 * Claim a host for a set of operations. If @abort is non null and
881 * dereference a non-zero value then this will return prematurely with
882 * that non-zero value without acquiring the lock. Returns zero
883 * with the lock held otherwise.
885 int __mmc_claim_host(struct mmc_host
*host
, atomic_t
*abort
)
887 DECLARE_WAITQUEUE(wait
, current
);
893 add_wait_queue(&host
->wq
, &wait
);
894 spin_lock_irqsave(&host
->lock
, flags
);
896 set_current_state(TASK_UNINTERRUPTIBLE
);
897 stop
= abort
? atomic_read(abort
) : 0;
898 if (stop
|| !host
->claimed
|| host
->claimer
== current
)
900 spin_unlock_irqrestore(&host
->lock
, flags
);
902 spin_lock_irqsave(&host
->lock
, flags
);
904 set_current_state(TASK_RUNNING
);
907 host
->claimer
= current
;
908 host
->claim_cnt
+= 1;
911 spin_unlock_irqrestore(&host
->lock
, flags
);
912 remove_wait_queue(&host
->wq
, &wait
);
913 if (host
->ops
->enable
&& !stop
&& host
->claim_cnt
== 1)
914 host
->ops
->enable(host
);
918 EXPORT_SYMBOL(__mmc_claim_host
);
921 * mmc_try_claim_host - try exclusively to claim a host
922 * @host: mmc host to claim
924 * Returns %1 if the host is claimed, %0 otherwise.
926 int mmc_try_claim_host(struct mmc_host
*host
)
928 int claimed_host
= 0;
931 spin_lock_irqsave(&host
->lock
, flags
);
932 if (!host
->claimed
|| host
->claimer
== current
) {
934 host
->claimer
= current
;
935 host
->claim_cnt
+= 1;
938 spin_unlock_irqrestore(&host
->lock
, flags
);
939 if (host
->ops
->enable
&& claimed_host
&& host
->claim_cnt
== 1)
940 host
->ops
->enable(host
);
943 EXPORT_SYMBOL(mmc_try_claim_host
);
946 * mmc_release_host - release a host
947 * @host: mmc host to release
949 * Release a MMC host, allowing others to claim the host
950 * for their operations.
952 void mmc_release_host(struct mmc_host
*host
)
956 WARN_ON(!host
->claimed
);
958 if (host
->ops
->disable
&& host
->claim_cnt
== 1)
959 host
->ops
->disable(host
);
961 spin_lock_irqsave(&host
->lock
, flags
);
962 if (--host
->claim_cnt
) {
963 /* Release for nested claim */
964 spin_unlock_irqrestore(&host
->lock
, flags
);
967 host
->claimer
= NULL
;
968 spin_unlock_irqrestore(&host
->lock
, flags
);
972 EXPORT_SYMBOL(mmc_release_host
);
975 * This is a helper function, which fetches a runtime pm reference for the
976 * card device and also claims the host.
978 void mmc_get_card(struct mmc_card
*card
)
980 pm_runtime_get_sync(&card
->dev
);
981 mmc_claim_host(card
->host
);
983 EXPORT_SYMBOL(mmc_get_card
);
986 * This is a helper function, which releases the host and drops the runtime
987 * pm reference for the card device.
989 void mmc_put_card(struct mmc_card
*card
)
991 mmc_release_host(card
->host
);
992 pm_runtime_mark_last_busy(&card
->dev
);
993 pm_runtime_put_autosuspend(&card
->dev
);
995 EXPORT_SYMBOL(mmc_put_card
);
998 * Internal function that does the actual ios call to the host driver,
999 * optionally printing some debug output.
1001 static inline void mmc_set_ios(struct mmc_host
*host
)
1003 struct mmc_ios
*ios
= &host
->ios
;
1005 pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u "
1006 "width %u timing %u\n",
1007 mmc_hostname(host
), ios
->clock
, ios
->bus_mode
,
1008 ios
->power_mode
, ios
->chip_select
, ios
->vdd
,
1009 ios
->bus_width
, ios
->timing
);
1012 mmc_set_ungated(host
);
1013 host
->ops
->set_ios(host
, ios
);
1017 * Control chip select pin on a host.
1019 void mmc_set_chip_select(struct mmc_host
*host
, int mode
)
1021 mmc_host_clk_hold(host
);
1022 host
->ios
.chip_select
= mode
;
1024 mmc_host_clk_release(host
);
1028 * Sets the host clock to the highest possible frequency that
1031 static void __mmc_set_clock(struct mmc_host
*host
, unsigned int hz
)
1033 WARN_ON(hz
< host
->f_min
);
1035 if (hz
> host
->f_max
)
1038 host
->ios
.clock
= hz
;
1042 void mmc_set_clock(struct mmc_host
*host
, unsigned int hz
)
1044 mmc_host_clk_hold(host
);
1045 __mmc_set_clock(host
, hz
);
1046 mmc_host_clk_release(host
);
1049 #ifdef CONFIG_MMC_CLKGATE
1051 * This gates the clock by setting it to 0 Hz.
1053 void mmc_gate_clock(struct mmc_host
*host
)
1055 unsigned long flags
;
1057 spin_lock_irqsave(&host
->clk_lock
, flags
);
1058 host
->clk_old
= host
->ios
.clock
;
1059 host
->ios
.clock
= 0;
1060 host
->clk_gated
= true;
1061 spin_unlock_irqrestore(&host
->clk_lock
, flags
);
1066 * This restores the clock from gating by using the cached
1069 void mmc_ungate_clock(struct mmc_host
*host
)
1072 * We should previously have gated the clock, so the clock shall
1073 * be 0 here! The clock may however be 0 during initialization,
1074 * when some request operations are performed before setting
1075 * the frequency. When ungate is requested in that situation
1076 * we just ignore the call.
1078 if (host
->clk_old
) {
1079 BUG_ON(host
->ios
.clock
);
1080 /* This call will also set host->clk_gated to false */
1081 __mmc_set_clock(host
, host
->clk_old
);
1085 void mmc_set_ungated(struct mmc_host
*host
)
1087 unsigned long flags
;
1090 * We've been given a new frequency while the clock is gated,
1091 * so make sure we regard this as ungating it.
1093 spin_lock_irqsave(&host
->clk_lock
, flags
);
1094 host
->clk_gated
= false;
1095 spin_unlock_irqrestore(&host
->clk_lock
, flags
);
1099 void mmc_set_ungated(struct mmc_host
*host
)
1105 * Change the bus mode (open drain/push-pull) of a host.
1107 void mmc_set_bus_mode(struct mmc_host
*host
, unsigned int mode
)
1109 mmc_host_clk_hold(host
);
1110 host
->ios
.bus_mode
= mode
;
1112 mmc_host_clk_release(host
);
1116 * Change data bus width of a host.
1118 void mmc_set_bus_width(struct mmc_host
*host
, unsigned int width
)
1120 mmc_host_clk_hold(host
);
1121 host
->ios
.bus_width
= width
;
1123 mmc_host_clk_release(host
);
1127 * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number
1128 * @vdd: voltage (mV)
1129 * @low_bits: prefer low bits in boundary cases
1131 * This function returns the OCR bit number according to the provided @vdd
1132 * value. If conversion is not possible a negative errno value returned.
1134 * Depending on the @low_bits flag the function prefers low or high OCR bits
1135 * on boundary voltages. For example,
1136 * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33);
1137 * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34);
1139 * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21).
1141 static int mmc_vdd_to_ocrbitnum(int vdd
, bool low_bits
)
1143 const int max_bit
= ilog2(MMC_VDD_35_36
);
1146 if (vdd
< 1650 || vdd
> 3600)
1149 if (vdd
>= 1650 && vdd
<= 1950)
1150 return ilog2(MMC_VDD_165_195
);
1155 /* Base 2000 mV, step 100 mV, bit's base 8. */
1156 bit
= (vdd
- 2000) / 100 + 8;
1163 * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask
1164 * @vdd_min: minimum voltage value (mV)
1165 * @vdd_max: maximum voltage value (mV)
1167 * This function returns the OCR mask bits according to the provided @vdd_min
1168 * and @vdd_max values. If conversion is not possible the function returns 0.
1170 * Notes wrt boundary cases:
1171 * This function sets the OCR bits for all boundary voltages, for example
1172 * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 |
1173 * MMC_VDD_34_35 mask.
1175 u32
mmc_vddrange_to_ocrmask(int vdd_min
, int vdd_max
)
1179 if (vdd_max
< vdd_min
)
1182 /* Prefer high bits for the boundary vdd_max values. */
1183 vdd_max
= mmc_vdd_to_ocrbitnum(vdd_max
, false);
1187 /* Prefer low bits for the boundary vdd_min values. */
1188 vdd_min
= mmc_vdd_to_ocrbitnum(vdd_min
, true);
1192 /* Fill the mask, from max bit to min bit. */
1193 while (vdd_max
>= vdd_min
)
1194 mask
|= 1 << vdd_max
--;
1198 EXPORT_SYMBOL(mmc_vddrange_to_ocrmask
);
1203 * mmc_of_parse_voltage - return mask of supported voltages
1204 * @np: The device node need to be parsed.
1205 * @mask: mask of voltages available for MMC/SD/SDIO
1207 * 1. Return zero on success.
1208 * 2. Return negative errno: voltage-range is invalid.
1210 int mmc_of_parse_voltage(struct device_node
*np
, u32
*mask
)
1212 const u32
*voltage_ranges
;
1215 voltage_ranges
= of_get_property(np
, "voltage-ranges", &num_ranges
);
1216 num_ranges
= num_ranges
/ sizeof(*voltage_ranges
) / 2;
1217 if (!voltage_ranges
|| !num_ranges
) {
1218 pr_info("%s: voltage-ranges unspecified\n", np
->full_name
);
1222 for (i
= 0; i
< num_ranges
; i
++) {
1223 const int j
= i
* 2;
1226 ocr_mask
= mmc_vddrange_to_ocrmask(
1227 be32_to_cpu(voltage_ranges
[j
]),
1228 be32_to_cpu(voltage_ranges
[j
+ 1]));
1230 pr_err("%s: voltage-range #%d is invalid\n",
1239 EXPORT_SYMBOL(mmc_of_parse_voltage
);
1241 #endif /* CONFIG_OF */
1243 #ifdef CONFIG_REGULATOR
1246 * mmc_regulator_get_ocrmask - return mask of supported voltages
1247 * @supply: regulator to use
1249 * This returns either a negative errno, or a mask of voltages that
1250 * can be provided to MMC/SD/SDIO devices using the specified voltage
1251 * regulator. This would normally be called before registering the
1254 int mmc_regulator_get_ocrmask(struct regulator
*supply
)
1260 count
= regulator_count_voltages(supply
);
1264 for (i
= 0; i
< count
; i
++) {
1268 vdd_uV
= regulator_list_voltage(supply
, i
);
1272 vdd_mV
= vdd_uV
/ 1000;
1273 result
|= mmc_vddrange_to_ocrmask(vdd_mV
, vdd_mV
);
1278 EXPORT_SYMBOL_GPL(mmc_regulator_get_ocrmask
);
1281 * mmc_regulator_set_ocr - set regulator to match host->ios voltage
1282 * @mmc: the host to regulate
1283 * @supply: regulator to use
1284 * @vdd_bit: zero for power off, else a bit number (host->ios.vdd)
1286 * Returns zero on success, else negative errno.
1288 * MMC host drivers may use this to enable or disable a regulator using
1289 * a particular supply voltage. This would normally be called from the
1292 int mmc_regulator_set_ocr(struct mmc_host
*mmc
,
1293 struct regulator
*supply
,
1294 unsigned short vdd_bit
)
1304 * REVISIT mmc_vddrange_to_ocrmask() may have set some
1305 * bits this regulator doesn't quite support ... don't
1306 * be too picky, most cards and regulators are OK with
1307 * a 0.1V range goof (it's a small error percentage).
1309 tmp
= vdd_bit
- ilog2(MMC_VDD_165_195
);
1311 min_uV
= 1650 * 1000;
1312 max_uV
= 1950 * 1000;
1314 min_uV
= 1900 * 1000 + tmp
* 100 * 1000;
1315 max_uV
= min_uV
+ 100 * 1000;
1319 * If we're using a fixed/static regulator, don't call
1320 * regulator_set_voltage; it would fail.
1322 voltage
= regulator_get_voltage(supply
);
1324 if (!regulator_can_change_voltage(supply
))
1325 min_uV
= max_uV
= voltage
;
1329 else if (voltage
< min_uV
|| voltage
> max_uV
)
1330 result
= regulator_set_voltage(supply
, min_uV
, max_uV
);
1334 if (result
== 0 && !mmc
->regulator_enabled
) {
1335 result
= regulator_enable(supply
);
1337 mmc
->regulator_enabled
= true;
1339 } else if (mmc
->regulator_enabled
) {
1340 result
= regulator_disable(supply
);
1342 mmc
->regulator_enabled
= false;
1346 dev_err(mmc_dev(mmc
),
1347 "could not set regulator OCR (%d)\n", result
);
1350 EXPORT_SYMBOL_GPL(mmc_regulator_set_ocr
);
1352 int mmc_regulator_get_supply(struct mmc_host
*mmc
)
1354 struct device
*dev
= mmc_dev(mmc
);
1355 struct regulator
*supply
;
1358 supply
= devm_regulator_get(dev
, "vmmc");
1359 mmc
->supply
.vmmc
= supply
;
1360 mmc
->supply
.vqmmc
= devm_regulator_get_optional(dev
, "vqmmc");
1363 return PTR_ERR(supply
);
1365 ret
= mmc_regulator_get_ocrmask(supply
);
1367 mmc
->ocr_avail
= ret
;
1369 dev_warn(mmc_dev(mmc
), "Failed getting OCR mask: %d\n", ret
);
1373 EXPORT_SYMBOL_GPL(mmc_regulator_get_supply
);
1375 #endif /* CONFIG_REGULATOR */
1378 * Mask off any voltages we don't support and select
1379 * the lowest voltage
1381 u32
mmc_select_voltage(struct mmc_host
*host
, u32 ocr
)
1385 ocr
&= host
->ocr_avail
;
1393 mmc_host_clk_hold(host
);
1394 host
->ios
.vdd
= bit
;
1396 mmc_host_clk_release(host
);
1398 pr_warning("%s: host doesn't support card's voltages\n",
1399 mmc_hostname(host
));
1406 int __mmc_set_signal_voltage(struct mmc_host
*host
, int signal_voltage
)
1409 int old_signal_voltage
= host
->ios
.signal_voltage
;
1411 host
->ios
.signal_voltage
= signal_voltage
;
1412 if (host
->ops
->start_signal_voltage_switch
) {
1413 mmc_host_clk_hold(host
);
1414 err
= host
->ops
->start_signal_voltage_switch(host
, &host
->ios
);
1415 mmc_host_clk_release(host
);
1419 host
->ios
.signal_voltage
= old_signal_voltage
;
1425 int mmc_set_signal_voltage(struct mmc_host
*host
, int signal_voltage
)
1427 struct mmc_command cmd
= {0};
1434 * Send CMD11 only if the request is to switch the card to
1437 if (signal_voltage
== MMC_SIGNAL_VOLTAGE_330
)
1438 return __mmc_set_signal_voltage(host
, signal_voltage
);
1441 * If we cannot switch voltages, return failure so the caller
1442 * can continue without UHS mode
1444 if (!host
->ops
->start_signal_voltage_switch
)
1446 if (!host
->ops
->card_busy
)
1447 pr_warning("%s: cannot verify signal voltage switch\n",
1448 mmc_hostname(host
));
1450 cmd
.opcode
= SD_SWITCH_VOLTAGE
;
1452 cmd
.flags
= MMC_RSP_R1
| MMC_CMD_AC
;
1454 err
= mmc_wait_for_cmd(host
, &cmd
, 0);
1458 if (!mmc_host_is_spi(host
) && (cmd
.resp
[0] & R1_ERROR
))
1461 mmc_host_clk_hold(host
);
1463 * The card should drive cmd and dat[0:3] low immediately
1464 * after the response of cmd11, but wait 1 ms to be sure
1467 if (host
->ops
->card_busy
&& !host
->ops
->card_busy(host
)) {
1472 * During a signal voltage level switch, the clock must be gated
1473 * for 5 ms according to the SD spec
1475 clock
= host
->ios
.clock
;
1476 host
->ios
.clock
= 0;
1479 if (__mmc_set_signal_voltage(host
, signal_voltage
)) {
1481 * Voltages may not have been switched, but we've already
1482 * sent CMD11, so a power cycle is required anyway
1488 /* Keep clock gated for at least 5 ms */
1490 host
->ios
.clock
= clock
;
1493 /* Wait for at least 1 ms according to spec */
1497 * Failure to switch is indicated by the card holding
1500 if (host
->ops
->card_busy
&& host
->ops
->card_busy(host
))
1505 pr_debug("%s: Signal voltage switch failed, "
1506 "power cycling card\n", mmc_hostname(host
));
1507 mmc_power_cycle(host
);
1510 mmc_host_clk_release(host
);
1516 * Select timing parameters for host.
1518 void mmc_set_timing(struct mmc_host
*host
, unsigned int timing
)
1520 mmc_host_clk_hold(host
);
1521 host
->ios
.timing
= timing
;
1523 mmc_host_clk_release(host
);
1527 * Select appropriate driver type for host.
1529 void mmc_set_driver_type(struct mmc_host
*host
, unsigned int drv_type
)
1531 mmc_host_clk_hold(host
);
1532 host
->ios
.drv_type
= drv_type
;
1534 mmc_host_clk_release(host
);
1538 * Apply power to the MMC stack. This is a two-stage process.
1539 * First, we enable power to the card without the clock running.
1540 * We then wait a bit for the power to stabilise. Finally,
1541 * enable the bus drivers and clock to the card.
1543 * We must _NOT_ enable the clock prior to power stablising.
1545 * If a host does all the power sequencing itself, ignore the
1546 * initial MMC_POWER_UP stage.
1548 void mmc_power_up(struct mmc_host
*host
)
1552 if (host
->ios
.power_mode
== MMC_POWER_ON
)
1555 mmc_host_clk_hold(host
);
1557 /* If ocr is set, we use it */
1559 bit
= ffs(host
->ocr
) - 1;
1561 bit
= fls(host
->ocr_avail
) - 1;
1563 host
->ios
.vdd
= bit
;
1564 if (mmc_host_is_spi(host
))
1565 host
->ios
.chip_select
= MMC_CS_HIGH
;
1567 host
->ios
.chip_select
= MMC_CS_DONTCARE
;
1568 host
->ios
.bus_mode
= MMC_BUSMODE_PUSHPULL
;
1569 host
->ios
.power_mode
= MMC_POWER_UP
;
1570 host
->ios
.bus_width
= MMC_BUS_WIDTH_1
;
1571 host
->ios
.timing
= MMC_TIMING_LEGACY
;
1574 /* Set signal voltage to 3.3V */
1575 __mmc_set_signal_voltage(host
, MMC_SIGNAL_VOLTAGE_330
);
1578 * This delay should be sufficient to allow the power supply
1579 * to reach the minimum voltage.
1583 host
->ios
.clock
= host
->f_init
;
1585 host
->ios
.power_mode
= MMC_POWER_ON
;
1589 * This delay must be at least 74 clock sizes, or 1 ms, or the
1590 * time required to reach a stable voltage.
1594 mmc_host_clk_release(host
);
1597 void mmc_power_off(struct mmc_host
*host
)
1599 if (host
->ios
.power_mode
== MMC_POWER_OFF
)
1602 mmc_host_clk_hold(host
);
1604 host
->ios
.clock
= 0;
1609 * Reset ocr mask to be the highest possible voltage supported for
1610 * this mmc host. This value will be used at next power up.
1612 host
->ocr
= 1 << (fls(host
->ocr_avail
) - 1);
1614 if (!mmc_host_is_spi(host
)) {
1615 host
->ios
.bus_mode
= MMC_BUSMODE_OPENDRAIN
;
1616 host
->ios
.chip_select
= MMC_CS_DONTCARE
;
1618 host
->ios
.power_mode
= MMC_POWER_OFF
;
1619 host
->ios
.bus_width
= MMC_BUS_WIDTH_1
;
1620 host
->ios
.timing
= MMC_TIMING_LEGACY
;
1624 * Some configurations, such as the 802.11 SDIO card in the OLPC
1625 * XO-1.5, require a short delay after poweroff before the card
1626 * can be successfully turned on again.
1630 mmc_host_clk_release(host
);
1633 void mmc_power_cycle(struct mmc_host
*host
)
1635 mmc_power_off(host
);
1636 /* Wait at least 1 ms according to SD spec */
1642 * Cleanup when the last reference to the bus operator is dropped.
1644 static void __mmc_release_bus(struct mmc_host
*host
)
1647 BUG_ON(host
->bus_refs
);
1648 BUG_ON(!host
->bus_dead
);
1650 host
->bus_ops
= NULL
;
1654 * Increase reference count of bus operator
1656 static inline void mmc_bus_get(struct mmc_host
*host
)
1658 unsigned long flags
;
1660 spin_lock_irqsave(&host
->lock
, flags
);
1662 spin_unlock_irqrestore(&host
->lock
, flags
);
1666 * Decrease reference count of bus operator and free it if
1667 * it is the last reference.
1669 static inline void mmc_bus_put(struct mmc_host
*host
)
1671 unsigned long flags
;
1673 spin_lock_irqsave(&host
->lock
, flags
);
1675 if ((host
->bus_refs
== 0) && host
->bus_ops
)
1676 __mmc_release_bus(host
);
1677 spin_unlock_irqrestore(&host
->lock
, flags
);
1681 * Assign a mmc bus handler to a host. Only one bus handler may control a
1682 * host at any given time.
1684 void mmc_attach_bus(struct mmc_host
*host
, const struct mmc_bus_ops
*ops
)
1686 unsigned long flags
;
1691 WARN_ON(!host
->claimed
);
1693 spin_lock_irqsave(&host
->lock
, flags
);
1695 BUG_ON(host
->bus_ops
);
1696 BUG_ON(host
->bus_refs
);
1698 host
->bus_ops
= ops
;
1702 spin_unlock_irqrestore(&host
->lock
, flags
);
1706 * Remove the current bus handler from a host.
1708 void mmc_detach_bus(struct mmc_host
*host
)
1710 unsigned long flags
;
1714 WARN_ON(!host
->claimed
);
1715 WARN_ON(!host
->bus_ops
);
1717 spin_lock_irqsave(&host
->lock
, flags
);
1721 spin_unlock_irqrestore(&host
->lock
, flags
);
1727 * mmc_detect_change - process change of state on a MMC socket
1728 * @host: host which changed state.
1729 * @delay: optional delay to wait before detection (jiffies)
1731 * MMC drivers should call this when they detect a card has been
1732 * inserted or removed. The MMC layer will confirm that any
1733 * present card is still functional, and initialize any newly
1736 void mmc_detect_change(struct mmc_host
*host
, unsigned long delay
)
1738 #ifdef CONFIG_MMC_DEBUG
1739 unsigned long flags
;
1740 spin_lock_irqsave(&host
->lock
, flags
);
1741 WARN_ON(host
->removed
);
1742 spin_unlock_irqrestore(&host
->lock
, flags
);
1744 host
->detect_change
= 1;
1745 mmc_schedule_delayed_work(&host
->detect
, delay
);
1748 EXPORT_SYMBOL(mmc_detect_change
);
1750 void mmc_init_erase(struct mmc_card
*card
)
1754 if (is_power_of_2(card
->erase_size
))
1755 card
->erase_shift
= ffs(card
->erase_size
) - 1;
1757 card
->erase_shift
= 0;
1760 * It is possible to erase an arbitrarily large area of an SD or MMC
1761 * card. That is not desirable because it can take a long time
1762 * (minutes) potentially delaying more important I/O, and also the
1763 * timeout calculations become increasingly hugely over-estimated.
1764 * Consequently, 'pref_erase' is defined as a guide to limit erases
1765 * to that size and alignment.
1767 * For SD cards that define Allocation Unit size, limit erases to one
1768 * Allocation Unit at a time. For MMC cards that define High Capacity
1769 * Erase Size, whether it is switched on or not, limit to that size.
1770 * Otherwise just have a stab at a good value. For modern cards it
1771 * will end up being 4MiB. Note that if the value is too small, it
1772 * can end up taking longer to erase.
1774 if (mmc_card_sd(card
) && card
->ssr
.au
) {
1775 card
->pref_erase
= card
->ssr
.au
;
1776 card
->erase_shift
= ffs(card
->ssr
.au
) - 1;
1777 } else if (card
->ext_csd
.hc_erase_size
) {
1778 card
->pref_erase
= card
->ext_csd
.hc_erase_size
;
1780 sz
= (card
->csd
.capacity
<< (card
->csd
.read_blkbits
- 9)) >> 11;
1782 card
->pref_erase
= 512 * 1024 / 512;
1784 card
->pref_erase
= 1024 * 1024 / 512;
1786 card
->pref_erase
= 2 * 1024 * 1024 / 512;
1788 card
->pref_erase
= 4 * 1024 * 1024 / 512;
1789 if (card
->pref_erase
< card
->erase_size
)
1790 card
->pref_erase
= card
->erase_size
;
1792 sz
= card
->pref_erase
% card
->erase_size
;
1794 card
->pref_erase
+= card
->erase_size
- sz
;
1799 static unsigned int mmc_mmc_erase_timeout(struct mmc_card
*card
,
1800 unsigned int arg
, unsigned int qty
)
1802 unsigned int erase_timeout
;
1804 if (arg
== MMC_DISCARD_ARG
||
1805 (arg
== MMC_TRIM_ARG
&& card
->ext_csd
.rev
>= 6)) {
1806 erase_timeout
= card
->ext_csd
.trim_timeout
;
1807 } else if (card
->ext_csd
.erase_group_def
& 1) {
1808 /* High Capacity Erase Group Size uses HC timeouts */
1809 if (arg
== MMC_TRIM_ARG
)
1810 erase_timeout
= card
->ext_csd
.trim_timeout
;
1812 erase_timeout
= card
->ext_csd
.hc_erase_timeout
;
1814 /* CSD Erase Group Size uses write timeout */
1815 unsigned int mult
= (10 << card
->csd
.r2w_factor
);
1816 unsigned int timeout_clks
= card
->csd
.tacc_clks
* mult
;
1817 unsigned int timeout_us
;
1819 /* Avoid overflow: e.g. tacc_ns=80000000 mult=1280 */
1820 if (card
->csd
.tacc_ns
< 1000000)
1821 timeout_us
= (card
->csd
.tacc_ns
* mult
) / 1000;
1823 timeout_us
= (card
->csd
.tacc_ns
/ 1000) * mult
;
1826 * ios.clock is only a target. The real clock rate might be
1827 * less but not that much less, so fudge it by multiplying by 2.
1830 timeout_us
+= (timeout_clks
* 1000) /
1831 (mmc_host_clk_rate(card
->host
) / 1000);
1833 erase_timeout
= timeout_us
/ 1000;
1836 * Theoretically, the calculation could underflow so round up
1837 * to 1ms in that case.
1843 /* Multiplier for secure operations */
1844 if (arg
& MMC_SECURE_ARGS
) {
1845 if (arg
== MMC_SECURE_ERASE_ARG
)
1846 erase_timeout
*= card
->ext_csd
.sec_erase_mult
;
1848 erase_timeout
*= card
->ext_csd
.sec_trim_mult
;
1851 erase_timeout
*= qty
;
1854 * Ensure at least a 1 second timeout for SPI as per
1855 * 'mmc_set_data_timeout()'
1857 if (mmc_host_is_spi(card
->host
) && erase_timeout
< 1000)
1858 erase_timeout
= 1000;
1860 return erase_timeout
;
1863 static unsigned int mmc_sd_erase_timeout(struct mmc_card
*card
,
1867 unsigned int erase_timeout
;
1869 if (card
->ssr
.erase_timeout
) {
1870 /* Erase timeout specified in SD Status Register (SSR) */
1871 erase_timeout
= card
->ssr
.erase_timeout
* qty
+
1872 card
->ssr
.erase_offset
;
1875 * Erase timeout not specified in SD Status Register (SSR) so
1876 * use 250ms per write block.
1878 erase_timeout
= 250 * qty
;
1881 /* Must not be less than 1 second */
1882 if (erase_timeout
< 1000)
1883 erase_timeout
= 1000;
1885 return erase_timeout
;
1888 static unsigned int mmc_erase_timeout(struct mmc_card
*card
,
1892 if (mmc_card_sd(card
))
1893 return mmc_sd_erase_timeout(card
, arg
, qty
);
1895 return mmc_mmc_erase_timeout(card
, arg
, qty
);
1898 static int mmc_do_erase(struct mmc_card
*card
, unsigned int from
,
1899 unsigned int to
, unsigned int arg
)
1901 struct mmc_command cmd
= {0};
1902 unsigned int qty
= 0;
1903 unsigned long timeout
;
1907 * qty is used to calculate the erase timeout which depends on how many
1908 * erase groups (or allocation units in SD terminology) are affected.
1909 * We count erasing part of an erase group as one erase group.
1910 * For SD, the allocation units are always a power of 2. For MMC, the
1911 * erase group size is almost certainly also power of 2, but it does not
1912 * seem to insist on that in the JEDEC standard, so we fall back to
1913 * division in that case. SD may not specify an allocation unit size,
1914 * in which case the timeout is based on the number of write blocks.
1916 * Note that the timeout for secure trim 2 will only be correct if the
1917 * number of erase groups specified is the same as the total of all
1918 * preceding secure trim 1 commands. Since the power may have been
1919 * lost since the secure trim 1 commands occurred, it is generally
1920 * impossible to calculate the secure trim 2 timeout correctly.
1922 if (card
->erase_shift
)
1923 qty
+= ((to
>> card
->erase_shift
) -
1924 (from
>> card
->erase_shift
)) + 1;
1925 else if (mmc_card_sd(card
))
1926 qty
+= to
- from
+ 1;
1928 qty
+= ((to
/ card
->erase_size
) -
1929 (from
/ card
->erase_size
)) + 1;
1931 if (!mmc_card_blockaddr(card
)) {
1936 if (mmc_card_sd(card
))
1937 cmd
.opcode
= SD_ERASE_WR_BLK_START
;
1939 cmd
.opcode
= MMC_ERASE_GROUP_START
;
1941 cmd
.flags
= MMC_RSP_SPI_R1
| MMC_RSP_R1
| MMC_CMD_AC
;
1942 err
= mmc_wait_for_cmd(card
->host
, &cmd
, 0);
1944 pr_err("mmc_erase: group start error %d, "
1945 "status %#x\n", err
, cmd
.resp
[0]);
1950 memset(&cmd
, 0, sizeof(struct mmc_command
));
1951 if (mmc_card_sd(card
))
1952 cmd
.opcode
= SD_ERASE_WR_BLK_END
;
1954 cmd
.opcode
= MMC_ERASE_GROUP_END
;
1956 cmd
.flags
= MMC_RSP_SPI_R1
| MMC_RSP_R1
| MMC_CMD_AC
;
1957 err
= mmc_wait_for_cmd(card
->host
, &cmd
, 0);
1959 pr_err("mmc_erase: group end error %d, status %#x\n",
1965 memset(&cmd
, 0, sizeof(struct mmc_command
));
1966 cmd
.opcode
= MMC_ERASE
;
1968 cmd
.flags
= MMC_RSP_SPI_R1B
| MMC_RSP_R1B
| MMC_CMD_AC
;
1969 cmd
.cmd_timeout_ms
= mmc_erase_timeout(card
, arg
, qty
);
1970 err
= mmc_wait_for_cmd(card
->host
, &cmd
, 0);
1972 pr_err("mmc_erase: erase error %d, status %#x\n",
1978 if (mmc_host_is_spi(card
->host
))
1981 timeout
= jiffies
+ msecs_to_jiffies(MMC_CORE_TIMEOUT_MS
);
1983 memset(&cmd
, 0, sizeof(struct mmc_command
));
1984 cmd
.opcode
= MMC_SEND_STATUS
;
1985 cmd
.arg
= card
->rca
<< 16;
1986 cmd
.flags
= MMC_RSP_R1
| MMC_CMD_AC
;
1987 /* Do not retry else we can't see errors */
1988 err
= mmc_wait_for_cmd(card
->host
, &cmd
, 0);
1989 if (err
|| (cmd
.resp
[0] & 0xFDF92000)) {
1990 pr_err("error %d requesting status %#x\n",
1996 /* Timeout if the device never becomes ready for data and
1997 * never leaves the program state.
1999 if (time_after(jiffies
, timeout
)) {
2000 pr_err("%s: Card stuck in programming state! %s\n",
2001 mmc_hostname(card
->host
), __func__
);
2006 } while (!(cmd
.resp
[0] & R1_READY_FOR_DATA
) ||
2007 (R1_CURRENT_STATE(cmd
.resp
[0]) == R1_STATE_PRG
));
2013 * mmc_erase - erase sectors.
2014 * @card: card to erase
2015 * @from: first sector to erase
2016 * @nr: number of sectors to erase
2017 * @arg: erase command argument (SD supports only %MMC_ERASE_ARG)
2019 * Caller must claim host before calling this function.
2021 int mmc_erase(struct mmc_card
*card
, unsigned int from
, unsigned int nr
,
2024 unsigned int rem
, to
= from
+ nr
;
2026 if (!(card
->host
->caps
& MMC_CAP_ERASE
) ||
2027 !(card
->csd
.cmdclass
& CCC_ERASE
))
2030 if (!card
->erase_size
)
2033 if (mmc_card_sd(card
) && arg
!= MMC_ERASE_ARG
)
2036 if ((arg
& MMC_SECURE_ARGS
) &&
2037 !(card
->ext_csd
.sec_feature_support
& EXT_CSD_SEC_ER_EN
))
2040 if ((arg
& MMC_TRIM_ARGS
) &&
2041 !(card
->ext_csd
.sec_feature_support
& EXT_CSD_SEC_GB_CL_EN
))
2044 if (arg
== MMC_SECURE_ERASE_ARG
) {
2045 if (from
% card
->erase_size
|| nr
% card
->erase_size
)
2049 if (arg
== MMC_ERASE_ARG
) {
2050 rem
= from
% card
->erase_size
;
2052 rem
= card
->erase_size
- rem
;
2059 rem
= nr
% card
->erase_size
;
2072 /* 'from' and 'to' are inclusive */
2075 return mmc_do_erase(card
, from
, to
, arg
);
2077 EXPORT_SYMBOL(mmc_erase
);
2079 int mmc_can_erase(struct mmc_card
*card
)
2081 if ((card
->host
->caps
& MMC_CAP_ERASE
) &&
2082 (card
->csd
.cmdclass
& CCC_ERASE
) && card
->erase_size
)
2086 EXPORT_SYMBOL(mmc_can_erase
);
2088 int mmc_can_trim(struct mmc_card
*card
)
2090 if (card
->ext_csd
.sec_feature_support
& EXT_CSD_SEC_GB_CL_EN
)
2094 EXPORT_SYMBOL(mmc_can_trim
);
2096 int mmc_can_discard(struct mmc_card
*card
)
2099 * As there's no way to detect the discard support bit at v4.5
2100 * use the s/w feature support filed.
2102 if (card
->ext_csd
.feature_support
& MMC_DISCARD_FEATURE
)
2106 EXPORT_SYMBOL(mmc_can_discard
);
2108 int mmc_can_sanitize(struct mmc_card
*card
)
2110 if (!mmc_can_trim(card
) && !mmc_can_erase(card
))
2112 if (card
->ext_csd
.sec_feature_support
& EXT_CSD_SEC_SANITIZE
)
2116 EXPORT_SYMBOL(mmc_can_sanitize
);
2118 int mmc_can_secure_erase_trim(struct mmc_card
*card
)
2120 if (card
->ext_csd
.sec_feature_support
& EXT_CSD_SEC_ER_EN
)
2124 EXPORT_SYMBOL(mmc_can_secure_erase_trim
);
2126 int mmc_erase_group_aligned(struct mmc_card
*card
, unsigned int from
,
2129 if (!card
->erase_size
)
2131 if (from
% card
->erase_size
|| nr
% card
->erase_size
)
2135 EXPORT_SYMBOL(mmc_erase_group_aligned
);
2137 static unsigned int mmc_do_calc_max_discard(struct mmc_card
*card
,
2140 struct mmc_host
*host
= card
->host
;
2141 unsigned int max_discard
, x
, y
, qty
= 0, max_qty
, timeout
;
2142 unsigned int last_timeout
= 0;
2144 if (card
->erase_shift
)
2145 max_qty
= UINT_MAX
>> card
->erase_shift
;
2146 else if (mmc_card_sd(card
))
2149 max_qty
= UINT_MAX
/ card
->erase_size
;
2151 /* Find the largest qty with an OK timeout */
2154 for (x
= 1; x
&& x
<= max_qty
&& max_qty
- x
>= qty
; x
<<= 1) {
2155 timeout
= mmc_erase_timeout(card
, arg
, qty
+ x
);
2156 if (timeout
> host
->max_discard_to
)
2158 if (timeout
< last_timeout
)
2160 last_timeout
= timeout
;
2172 /* Convert qty to sectors */
2173 if (card
->erase_shift
)
2174 max_discard
= --qty
<< card
->erase_shift
;
2175 else if (mmc_card_sd(card
))
2178 max_discard
= --qty
* card
->erase_size
;
2183 unsigned int mmc_calc_max_discard(struct mmc_card
*card
)
2185 struct mmc_host
*host
= card
->host
;
2186 unsigned int max_discard
, max_trim
;
2188 if (!host
->max_discard_to
)
2192 * Without erase_group_def set, MMC erase timeout depends on clock
2193 * frequence which can change. In that case, the best choice is
2194 * just the preferred erase size.
2196 if (mmc_card_mmc(card
) && !(card
->ext_csd
.erase_group_def
& 1))
2197 return card
->pref_erase
;
2199 max_discard
= mmc_do_calc_max_discard(card
, MMC_ERASE_ARG
);
2200 if (mmc_can_trim(card
)) {
2201 max_trim
= mmc_do_calc_max_discard(card
, MMC_TRIM_ARG
);
2202 if (max_trim
< max_discard
)
2203 max_discard
= max_trim
;
2204 } else if (max_discard
< card
->erase_size
) {
2207 pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n",
2208 mmc_hostname(host
), max_discard
, host
->max_discard_to
);
2211 EXPORT_SYMBOL(mmc_calc_max_discard
);
2213 int mmc_set_blocklen(struct mmc_card
*card
, unsigned int blocklen
)
2215 struct mmc_command cmd
= {0};
2217 if (mmc_card_blockaddr(card
) || mmc_card_ddr_mode(card
))
2220 cmd
.opcode
= MMC_SET_BLOCKLEN
;
2222 cmd
.flags
= MMC_RSP_SPI_R1
| MMC_RSP_R1
| MMC_CMD_AC
;
2223 return mmc_wait_for_cmd(card
->host
, &cmd
, 5);
2225 EXPORT_SYMBOL(mmc_set_blocklen
);
2227 int mmc_set_blockcount(struct mmc_card
*card
, unsigned int blockcount
,
2230 struct mmc_command cmd
= {0};
2232 cmd
.opcode
= MMC_SET_BLOCK_COUNT
;
2233 cmd
.arg
= blockcount
& 0x0000FFFF;
2236 cmd
.flags
= MMC_RSP_SPI_R1
| MMC_RSP_R1
| MMC_CMD_AC
;
2237 return mmc_wait_for_cmd(card
->host
, &cmd
, 5);
2239 EXPORT_SYMBOL(mmc_set_blockcount
);
2241 static void mmc_hw_reset_for_init(struct mmc_host
*host
)
2243 if (!(host
->caps
& MMC_CAP_HW_RESET
) || !host
->ops
->hw_reset
)
2245 mmc_host_clk_hold(host
);
2246 host
->ops
->hw_reset(host
);
2247 mmc_host_clk_release(host
);
2250 int mmc_can_reset(struct mmc_card
*card
)
2254 if (!mmc_card_mmc(card
))
2256 rst_n_function
= card
->ext_csd
.rst_n_function
;
2257 if ((rst_n_function
& EXT_CSD_RST_N_EN_MASK
) != EXT_CSD_RST_N_ENABLED
)
2261 EXPORT_SYMBOL(mmc_can_reset
);
2263 static int mmc_do_hw_reset(struct mmc_host
*host
, int check
)
2265 struct mmc_card
*card
= host
->card
;
2267 if (!host
->bus_ops
->power_restore
)
2270 if (!(host
->caps
& MMC_CAP_HW_RESET
) || !host
->ops
->hw_reset
)
2276 if (!mmc_can_reset(card
))
2279 mmc_host_clk_hold(host
);
2280 mmc_set_clock(host
, host
->f_init
);
2282 host
->ops
->hw_reset(host
);
2284 /* If the reset has happened, then a status command will fail */
2286 struct mmc_command cmd
= {0};
2289 cmd
.opcode
= MMC_SEND_STATUS
;
2290 if (!mmc_host_is_spi(card
->host
))
2291 cmd
.arg
= card
->rca
<< 16;
2292 cmd
.flags
= MMC_RSP_SPI_R2
| MMC_RSP_R1
| MMC_CMD_AC
;
2293 err
= mmc_wait_for_cmd(card
->host
, &cmd
, 0);
2295 mmc_host_clk_release(host
);
2300 host
->card
->state
&= ~(MMC_STATE_HIGHSPEED
| MMC_STATE_HIGHSPEED_DDR
);
2301 if (mmc_host_is_spi(host
)) {
2302 host
->ios
.chip_select
= MMC_CS_HIGH
;
2303 host
->ios
.bus_mode
= MMC_BUSMODE_PUSHPULL
;
2305 host
->ios
.chip_select
= MMC_CS_DONTCARE
;
2306 host
->ios
.bus_mode
= MMC_BUSMODE_OPENDRAIN
;
2308 host
->ios
.bus_width
= MMC_BUS_WIDTH_1
;
2309 host
->ios
.timing
= MMC_TIMING_LEGACY
;
2312 mmc_host_clk_release(host
);
2314 return host
->bus_ops
->power_restore(host
);
2317 int mmc_hw_reset(struct mmc_host
*host
)
2319 return mmc_do_hw_reset(host
, 0);
2321 EXPORT_SYMBOL(mmc_hw_reset
);
2323 int mmc_hw_reset_check(struct mmc_host
*host
)
2325 return mmc_do_hw_reset(host
, 1);
2327 EXPORT_SYMBOL(mmc_hw_reset_check
);
2329 static int mmc_rescan_try_freq(struct mmc_host
*host
, unsigned freq
)
2331 host
->f_init
= freq
;
2333 #ifdef CONFIG_MMC_DEBUG
2334 pr_info("%s: %s: trying to init card at %u Hz\n",
2335 mmc_hostname(host
), __func__
, host
->f_init
);
2340 * Some eMMCs (with VCCQ always on) may not be reset after power up, so
2341 * do a hardware reset if possible.
2343 mmc_hw_reset_for_init(host
);
2346 * sdio_reset sends CMD52 to reset card. Since we do not know
2347 * if the card is being re-initialized, just send it. CMD52
2348 * should be ignored by SD/eMMC cards.
2353 mmc_send_if_cond(host
, host
->ocr_avail
);
2355 /* Order's important: probe SDIO, then SD, then MMC */
2356 if (!mmc_attach_sdio(host
))
2358 if (!mmc_attach_sd(host
))
2360 if (!mmc_attach_mmc(host
))
2363 mmc_power_off(host
);
2367 int _mmc_detect_card_removed(struct mmc_host
*host
)
2371 if ((host
->caps
& MMC_CAP_NONREMOVABLE
) || !host
->bus_ops
->alive
)
2374 if (!host
->card
|| mmc_card_removed(host
->card
))
2377 ret
= host
->bus_ops
->alive(host
);
2380 * Card detect status and alive check may be out of sync if card is
2381 * removed slowly, when card detect switch changes while card/slot
2382 * pads are still contacted in hardware (refer to "SD Card Mechanical
2383 * Addendum, Appendix C: Card Detection Switch"). So reschedule a
2384 * detect work 200ms later for this case.
2386 if (!ret
&& host
->ops
->get_cd
&& !host
->ops
->get_cd(host
)) {
2387 mmc_detect_change(host
, msecs_to_jiffies(200));
2388 pr_debug("%s: card removed too slowly\n", mmc_hostname(host
));
2392 mmc_card_set_removed(host
->card
);
2393 pr_debug("%s: card remove detected\n", mmc_hostname(host
));
2399 int mmc_detect_card_removed(struct mmc_host
*host
)
2401 struct mmc_card
*card
= host
->card
;
2404 WARN_ON(!host
->claimed
);
2409 ret
= mmc_card_removed(card
);
2411 * The card will be considered unchanged unless we have been asked to
2412 * detect a change or host requires polling to provide card detection.
2414 if (!host
->detect_change
&& !(host
->caps
& MMC_CAP_NEEDS_POLL
))
2417 host
->detect_change
= 0;
2419 ret
= _mmc_detect_card_removed(host
);
2420 if (ret
&& (host
->caps
& MMC_CAP_NEEDS_POLL
)) {
2422 * Schedule a detect work as soon as possible to let a
2423 * rescan handle the card removal.
2425 cancel_delayed_work(&host
->detect
);
2426 mmc_detect_change(host
, 0);
2432 EXPORT_SYMBOL(mmc_detect_card_removed
);
2434 void mmc_rescan(struct work_struct
*work
)
2436 struct mmc_host
*host
=
2437 container_of(work
, struct mmc_host
, detect
.work
);
2440 if (host
->rescan_disable
)
2443 /* If there is a non-removable card registered, only scan once */
2444 if ((host
->caps
& MMC_CAP_NONREMOVABLE
) && host
->rescan_entered
)
2446 host
->rescan_entered
= 1;
2451 * if there is a _removable_ card registered, check whether it is
2454 if (host
->bus_ops
&& host
->bus_ops
->detect
&& !host
->bus_dead
2455 && !(host
->caps
& MMC_CAP_NONREMOVABLE
))
2456 host
->bus_ops
->detect(host
);
2458 host
->detect_change
= 0;
2461 * Let mmc_bus_put() free the bus/bus_ops if we've found that
2462 * the card is no longer present.
2467 /* if there still is a card present, stop here */
2468 if (host
->bus_ops
!= NULL
) {
2474 * Only we can add a new handler, so it's safe to
2475 * release the lock here.
2479 if (host
->ops
->get_cd
&& host
->ops
->get_cd(host
) == 0) {
2480 mmc_claim_host(host
);
2481 mmc_power_off(host
);
2482 mmc_release_host(host
);
2486 mmc_claim_host(host
);
2487 for (i
= 0; i
< ARRAY_SIZE(freqs
); i
++) {
2488 if (!mmc_rescan_try_freq(host
, max(freqs
[i
], host
->f_min
)))
2490 if (freqs
[i
] <= host
->f_min
)
2493 mmc_release_host(host
);
2496 if (host
->caps
& MMC_CAP_NEEDS_POLL
)
2497 mmc_schedule_delayed_work(&host
->detect
, HZ
);
2500 void mmc_start_host(struct mmc_host
*host
)
2502 host
->f_init
= max(freqs
[0], host
->f_min
);
2503 host
->rescan_disable
= 0;
2504 if (host
->caps2
& MMC_CAP2_NO_PRESCAN_POWERUP
)
2505 mmc_power_off(host
);
2508 mmc_detect_change(host
, 0);
2511 void mmc_stop_host(struct mmc_host
*host
)
2513 #ifdef CONFIG_MMC_DEBUG
2514 unsigned long flags
;
2515 spin_lock_irqsave(&host
->lock
, flags
);
2517 spin_unlock_irqrestore(&host
->lock
, flags
);
2520 host
->rescan_disable
= 1;
2521 cancel_delayed_work_sync(&host
->detect
);
2522 mmc_flush_scheduled_work();
2524 /* clear pm flags now and let card drivers set them as needed */
2528 if (host
->bus_ops
&& !host
->bus_dead
) {
2529 /* Calling bus_ops->remove() with a claimed host can deadlock */
2530 host
->bus_ops
->remove(host
);
2531 mmc_claim_host(host
);
2532 mmc_detach_bus(host
);
2533 mmc_power_off(host
);
2534 mmc_release_host(host
);
2542 mmc_power_off(host
);
2545 int mmc_power_save_host(struct mmc_host
*host
)
2549 #ifdef CONFIG_MMC_DEBUG
2550 pr_info("%s: %s: powering down\n", mmc_hostname(host
), __func__
);
2555 if (!host
->bus_ops
|| host
->bus_dead
|| !host
->bus_ops
->power_restore
) {
2560 if (host
->bus_ops
->power_save
)
2561 ret
= host
->bus_ops
->power_save(host
);
2565 mmc_power_off(host
);
2569 EXPORT_SYMBOL(mmc_power_save_host
);
2571 int mmc_power_restore_host(struct mmc_host
*host
)
2575 #ifdef CONFIG_MMC_DEBUG
2576 pr_info("%s: %s: powering up\n", mmc_hostname(host
), __func__
);
2581 if (!host
->bus_ops
|| host
->bus_dead
|| !host
->bus_ops
->power_restore
) {
2587 ret
= host
->bus_ops
->power_restore(host
);
2593 EXPORT_SYMBOL(mmc_power_restore_host
);
2596 * Flush the cache to the non-volatile storage.
2598 int mmc_flush_cache(struct mmc_card
*card
)
2600 struct mmc_host
*host
= card
->host
;
2603 if (!(host
->caps2
& MMC_CAP2_CACHE_CTRL
))
2606 if (mmc_card_mmc(card
) &&
2607 (card
->ext_csd
.cache_size
> 0) &&
2608 (card
->ext_csd
.cache_ctrl
& 1)) {
2609 err
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
2610 EXT_CSD_FLUSH_CACHE
, 1, 0);
2612 pr_err("%s: cache flush error %d\n",
2613 mmc_hostname(card
->host
), err
);
2618 EXPORT_SYMBOL(mmc_flush_cache
);
2621 * Turn the cache ON/OFF.
2622 * Turning the cache OFF shall trigger flushing of the data
2623 * to the non-volatile storage.
2624 * This function should be called with host claimed
2626 int mmc_cache_ctrl(struct mmc_host
*host
, u8 enable
)
2628 struct mmc_card
*card
= host
->card
;
2629 unsigned int timeout
;
2632 if (!(host
->caps2
& MMC_CAP2_CACHE_CTRL
) ||
2633 mmc_card_is_removable(host
))
2636 if (card
&& mmc_card_mmc(card
) &&
2637 (card
->ext_csd
.cache_size
> 0)) {
2640 if (card
->ext_csd
.cache_ctrl
^ enable
) {
2641 timeout
= enable
? card
->ext_csd
.generic_cmd6_time
: 0;
2642 err
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
2643 EXT_CSD_CACHE_CTRL
, enable
, timeout
);
2645 pr_err("%s: cache %s error %d\n",
2646 mmc_hostname(card
->host
),
2647 enable
? "on" : "off",
2650 card
->ext_csd
.cache_ctrl
= enable
;
2656 EXPORT_SYMBOL(mmc_cache_ctrl
);
2661 * mmc_suspend_host - suspend a host
2664 int mmc_suspend_host(struct mmc_host
*host
)
2666 /* This function is deprecated */
2669 EXPORT_SYMBOL(mmc_suspend_host
);
2672 * mmc_resume_host - resume a previously suspended host
2675 int mmc_resume_host(struct mmc_host
*host
)
2677 /* This function is deprecated */
2680 EXPORT_SYMBOL(mmc_resume_host
);
2682 /* Do the card removal on suspend if card is assumed removeable
2683 * Do that in pm notifier while userspace isn't yet frozen, so we will be able
2686 int mmc_pm_notify(struct notifier_block
*notify_block
,
2687 unsigned long mode
, void *unused
)
2689 struct mmc_host
*host
= container_of(
2690 notify_block
, struct mmc_host
, pm_notify
);
2691 unsigned long flags
;
2695 case PM_HIBERNATION_PREPARE
:
2696 case PM_SUSPEND_PREPARE
:
2697 spin_lock_irqsave(&host
->lock
, flags
);
2698 host
->rescan_disable
= 1;
2699 spin_unlock_irqrestore(&host
->lock
, flags
);
2700 cancel_delayed_work_sync(&host
->detect
);
2705 /* Validate prerequisites for suspend */
2706 if (host
->bus_ops
->pre_suspend
)
2707 err
= host
->bus_ops
->pre_suspend(host
);
2708 if (!err
&& host
->bus_ops
->suspend
)
2711 /* Calling bus_ops->remove() with a claimed host can deadlock */
2712 host
->bus_ops
->remove(host
);
2713 mmc_claim_host(host
);
2714 mmc_detach_bus(host
);
2715 mmc_power_off(host
);
2716 mmc_release_host(host
);
2720 case PM_POST_SUSPEND
:
2721 case PM_POST_HIBERNATION
:
2722 case PM_POST_RESTORE
:
2724 spin_lock_irqsave(&host
->lock
, flags
);
2725 host
->rescan_disable
= 0;
2726 spin_unlock_irqrestore(&host
->lock
, flags
);
2727 mmc_detect_change(host
, 0);
2736 * mmc_init_context_info() - init synchronization context
2739 * Init struct context_info needed to implement asynchronous
2740 * request mechanism, used by mmc core, host driver and mmc requests
2743 void mmc_init_context_info(struct mmc_host
*host
)
2745 spin_lock_init(&host
->context_info
.lock
);
2746 host
->context_info
.is_new_req
= false;
2747 host
->context_info
.is_done_rcv
= false;
2748 host
->context_info
.is_waiting_last_req
= false;
2749 init_waitqueue_head(&host
->context_info
.wait
);
2752 static int __init
mmc_init(void)
2756 workqueue
= alloc_ordered_workqueue("kmmcd", 0);
2760 ret
= mmc_register_bus();
2762 goto destroy_workqueue
;
2764 ret
= mmc_register_host_class();
2766 goto unregister_bus
;
2768 ret
= sdio_register_bus();
2770 goto unregister_host_class
;
2774 unregister_host_class
:
2775 mmc_unregister_host_class();
2777 mmc_unregister_bus();
2779 destroy_workqueue(workqueue
);
2784 static void __exit
mmc_exit(void)
2786 sdio_unregister_bus();
2787 mmc_unregister_host_class();
2788 mmc_unregister_bus();
2789 destroy_workqueue(workqueue
);
2792 subsys_initcall(mmc_init
);
2793 module_exit(mmc_exit
);
2795 MODULE_LICENSE("GPL");