2 * linux/drivers/mmc/core/core.c
4 * Copyright (C) 2003-2004 Russell King, All Rights Reserved.
5 * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
6 * Copyright (C) 2005-2007 Pierre Ossman, All Rights Reserved.
7 * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/completion.h>
17 #include <linux/device.h>
18 #include <linux/delay.h>
19 #include <linux/pagemap.h>
20 #include <linux/err.h>
21 #include <linux/leds.h>
22 #include <asm/scatterlist.h>
23 #include <linux/scatterlist.h>
25 #include <linux/mmc/card.h>
26 #include <linux/mmc/host.h>
27 #include <linux/mmc/mmc.h>
28 #include <linux/mmc/sd.h>
39 extern int mmc_attach_mmc(struct mmc_host
*host
, u32 ocr
);
40 extern int mmc_attach_sd(struct mmc_host
*host
, u32 ocr
);
41 extern int mmc_attach_sdio(struct mmc_host
*host
, u32 ocr
);
43 static struct workqueue_struct
*workqueue
;
46 * Enabling software CRCs on the data blocks can be a significant (30%)
47 * performance cost, and for other reasons may not always be desired.
48 * So we allow it it to be disabled.
51 module_param(use_spi_crc
, bool, 0);
54 * Internal function. Schedule delayed work in the MMC work queue.
56 static int mmc_schedule_delayed_work(struct delayed_work
*work
,
59 return queue_delayed_work(workqueue
, work
, delay
);
63 * Internal function. Flush all scheduled work from the MMC work queue.
65 static void mmc_flush_scheduled_work(void)
67 flush_workqueue(workqueue
);
71 * mmc_request_done - finish processing an MMC request
72 * @host: MMC host which completed request
73 * @mrq: MMC request which request
75 * MMC drivers should call this function when they have completed
76 * their processing of a request.
78 void mmc_request_done(struct mmc_host
*host
, struct mmc_request
*mrq
)
80 struct mmc_command
*cmd
= mrq
->cmd
;
83 if (err
&& cmd
->retries
&& mmc_host_is_spi(host
)) {
84 if (cmd
->resp
[0] & R1_SPI_ILLEGAL_COMMAND
)
88 if (err
&& cmd
->retries
) {
89 pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
90 mmc_hostname(host
), cmd
->opcode
, err
);
94 host
->ops
->request(host
, mrq
);
96 led_trigger_event(host
->led
, LED_OFF
);
98 pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
99 mmc_hostname(host
), cmd
->opcode
, err
,
100 cmd
->resp
[0], cmd
->resp
[1],
101 cmd
->resp
[2], cmd
->resp
[3]);
104 pr_debug("%s: %d bytes transferred: %d\n",
106 mrq
->data
->bytes_xfered
, mrq
->data
->error
);
110 pr_debug("%s: (CMD%u): %d: %08x %08x %08x %08x\n",
111 mmc_hostname(host
), mrq
->stop
->opcode
,
113 mrq
->stop
->resp
[0], mrq
->stop
->resp
[1],
114 mrq
->stop
->resp
[2], mrq
->stop
->resp
[3]);
122 EXPORT_SYMBOL(mmc_request_done
);
125 mmc_start_request(struct mmc_host
*host
, struct mmc_request
*mrq
)
127 #ifdef CONFIG_MMC_DEBUG
131 pr_debug("%s: starting CMD%u arg %08x flags %08x\n",
132 mmc_hostname(host
), mrq
->cmd
->opcode
,
133 mrq
->cmd
->arg
, mrq
->cmd
->flags
);
136 pr_debug("%s: blksz %d blocks %d flags %08x "
137 "tsac %d ms nsac %d\n",
138 mmc_hostname(host
), mrq
->data
->blksz
,
139 mrq
->data
->blocks
, mrq
->data
->flags
,
140 mrq
->data
->timeout_ns
/ 1000000,
141 mrq
->data
->timeout_clks
);
145 pr_debug("%s: CMD%u arg %08x flags %08x\n",
146 mmc_hostname(host
), mrq
->stop
->opcode
,
147 mrq
->stop
->arg
, mrq
->stop
->flags
);
150 WARN_ON(!host
->claimed
);
152 led_trigger_event(host
->led
, LED_FULL
);
157 BUG_ON(mrq
->data
->blksz
> host
->max_blk_size
);
158 BUG_ON(mrq
->data
->blocks
> host
->max_blk_count
);
159 BUG_ON(mrq
->data
->blocks
* mrq
->data
->blksz
>
162 #ifdef CONFIG_MMC_DEBUG
164 for (i
= 0;i
< mrq
->data
->sg_len
;i
++)
165 sz
+= mrq
->data
->sg
[i
].length
;
166 BUG_ON(sz
!= mrq
->data
->blocks
* mrq
->data
->blksz
);
169 mrq
->cmd
->data
= mrq
->data
;
170 mrq
->data
->error
= 0;
171 mrq
->data
->mrq
= mrq
;
173 mrq
->data
->stop
= mrq
->stop
;
174 mrq
->stop
->error
= 0;
175 mrq
->stop
->mrq
= mrq
;
178 host
->ops
->request(host
, mrq
);
181 static void mmc_wait_done(struct mmc_request
*mrq
)
183 complete(mrq
->done_data
);
187 * mmc_wait_for_req - start a request and wait for completion
188 * @host: MMC host to start command
189 * @mrq: MMC request to start
191 * Start a new MMC custom command request for a host, and wait
192 * for the command to complete. Does not attempt to parse the
195 void mmc_wait_for_req(struct mmc_host
*host
, struct mmc_request
*mrq
)
197 DECLARE_COMPLETION_ONSTACK(complete
);
199 mrq
->done_data
= &complete
;
200 mrq
->done
= mmc_wait_done
;
202 mmc_start_request(host
, mrq
);
204 wait_for_completion(&complete
);
207 EXPORT_SYMBOL(mmc_wait_for_req
);
210 * mmc_wait_for_cmd - start a command and wait for completion
211 * @host: MMC host to start command
212 * @cmd: MMC command to start
213 * @retries: maximum number of retries
215 * Start a new MMC command for a host, and wait for the command
216 * to complete. Return any error that occurred while the command
217 * was executing. Do not attempt to parse the response.
219 int mmc_wait_for_cmd(struct mmc_host
*host
, struct mmc_command
*cmd
, int retries
)
221 struct mmc_request mrq
;
223 WARN_ON(!host
->claimed
);
225 memset(&mrq
, 0, sizeof(struct mmc_request
));
227 memset(cmd
->resp
, 0, sizeof(cmd
->resp
));
228 cmd
->retries
= retries
;
233 mmc_wait_for_req(host
, &mrq
);
238 EXPORT_SYMBOL(mmc_wait_for_cmd
);
241 * mmc_set_data_timeout - set the timeout for a data command
242 * @data: data phase for command
243 * @card: the MMC card associated with the data transfer
245 * Computes the data timeout parameters according to the
246 * correct algorithm given the card type.
248 void mmc_set_data_timeout(struct mmc_data
*data
, const struct mmc_card
*card
)
253 * SDIO cards only define an upper 1 s limit on access.
255 if (mmc_card_sdio(card
)) {
256 data
->timeout_ns
= 1000000000;
257 data
->timeout_clks
= 0;
262 * SD cards use a 100 multiplier rather than 10
264 mult
= mmc_card_sd(card
) ? 100 : 10;
267 * Scale up the multiplier (and therefore the timeout) by
268 * the r2w factor for writes.
270 if (data
->flags
& MMC_DATA_WRITE
)
271 mult
<<= card
->csd
.r2w_factor
;
273 data
->timeout_ns
= card
->csd
.tacc_ns
* mult
;
274 data
->timeout_clks
= card
->csd
.tacc_clks
* mult
;
277 * SD cards also have an upper limit on the timeout.
279 if (mmc_card_sd(card
)) {
280 unsigned int timeout_us
, limit_us
;
282 timeout_us
= data
->timeout_ns
/ 1000;
283 timeout_us
+= data
->timeout_clks
* 1000 /
284 (card
->host
->ios
.clock
/ 1000);
286 if (data
->flags
& MMC_DATA_WRITE
)
292 * SDHC cards always use these fixed values.
294 if (timeout_us
> limit_us
|| mmc_card_blockaddr(card
)) {
295 data
->timeout_ns
= limit_us
* 1000;
296 data
->timeout_clks
= 0;
300 EXPORT_SYMBOL(mmc_set_data_timeout
);
303 * __mmc_claim_host - exclusively claim a host
304 * @host: mmc host to claim
305 * @abort: whether or not the operation should be aborted
307 * Claim a host for a set of operations. If @abort is non null and
308 * dereference a non-zero value then this will return prematurely with
309 * that non-zero value without acquiring the lock. Returns zero
310 * with the lock held otherwise.
312 int __mmc_claim_host(struct mmc_host
*host
, atomic_t
*abort
)
314 DECLARE_WAITQUEUE(wait
, current
);
320 add_wait_queue(&host
->wq
, &wait
);
321 spin_lock_irqsave(&host
->lock
, flags
);
323 set_current_state(TASK_UNINTERRUPTIBLE
);
324 stop
= abort
? atomic_read(abort
) : 0;
325 if (stop
|| !host
->claimed
)
327 spin_unlock_irqrestore(&host
->lock
, flags
);
329 spin_lock_irqsave(&host
->lock
, flags
);
331 set_current_state(TASK_RUNNING
);
336 spin_unlock_irqrestore(&host
->lock
, flags
);
337 remove_wait_queue(&host
->wq
, &wait
);
341 EXPORT_SYMBOL(__mmc_claim_host
);
344 * mmc_release_host - release a host
345 * @host: mmc host to release
347 * Release a MMC host, allowing others to claim the host
348 * for their operations.
350 void mmc_release_host(struct mmc_host
*host
)
354 WARN_ON(!host
->claimed
);
356 spin_lock_irqsave(&host
->lock
, flags
);
358 spin_unlock_irqrestore(&host
->lock
, flags
);
363 EXPORT_SYMBOL(mmc_release_host
);
366 * Internal function that does the actual ios call to the host driver,
367 * optionally printing some debug output.
369 static inline void mmc_set_ios(struct mmc_host
*host
)
371 struct mmc_ios
*ios
= &host
->ios
;
373 pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u "
374 "width %u timing %u\n",
375 mmc_hostname(host
), ios
->clock
, ios
->bus_mode
,
376 ios
->power_mode
, ios
->chip_select
, ios
->vdd
,
377 ios
->bus_width
, ios
->timing
);
379 host
->ops
->set_ios(host
, ios
);
383 * Control chip select pin on a host.
385 void mmc_set_chip_select(struct mmc_host
*host
, int mode
)
387 host
->ios
.chip_select
= mode
;
392 * Sets the host clock to the highest possible frequency that
395 void mmc_set_clock(struct mmc_host
*host
, unsigned int hz
)
397 WARN_ON(hz
< host
->f_min
);
399 if (hz
> host
->f_max
)
402 host
->ios
.clock
= hz
;
407 * Change the bus mode (open drain/push-pull) of a host.
409 void mmc_set_bus_mode(struct mmc_host
*host
, unsigned int mode
)
411 host
->ios
.bus_mode
= mode
;
416 * Change data bus width of a host.
418 void mmc_set_bus_width(struct mmc_host
*host
, unsigned int width
)
420 host
->ios
.bus_width
= width
;
425 * Mask off any voltages we don't support and select
428 u32
mmc_select_voltage(struct mmc_host
*host
, u32 ocr
)
432 ocr
&= host
->ocr_avail
;
450 * Select timing parameters for host.
452 void mmc_set_timing(struct mmc_host
*host
, unsigned int timing
)
454 host
->ios
.timing
= timing
;
459 * Apply power to the MMC stack. This is a two-stage process.
460 * First, we enable power to the card without the clock running.
461 * We then wait a bit for the power to stabilise. Finally,
462 * enable the bus drivers and clock to the card.
464 * We must _NOT_ enable the clock prior to power stablising.
466 * If a host does all the power sequencing itself, ignore the
467 * initial MMC_POWER_UP stage.
469 static void mmc_power_up(struct mmc_host
*host
)
471 int bit
= fls(host
->ocr_avail
) - 1;
474 if (mmc_host_is_spi(host
)) {
475 host
->ios
.chip_select
= MMC_CS_HIGH
;
476 host
->ios
.bus_mode
= MMC_BUSMODE_PUSHPULL
;
478 host
->ios
.chip_select
= MMC_CS_DONTCARE
;
479 host
->ios
.bus_mode
= MMC_BUSMODE_OPENDRAIN
;
481 host
->ios
.power_mode
= MMC_POWER_UP
;
482 host
->ios
.bus_width
= MMC_BUS_WIDTH_1
;
483 host
->ios
.timing
= MMC_TIMING_LEGACY
;
487 * This delay should be sufficient to allow the power supply
488 * to reach the minimum voltage.
492 host
->ios
.clock
= host
->f_min
;
493 host
->ios
.power_mode
= MMC_POWER_ON
;
497 * This delay must be at least 74 clock sizes, or 1 ms, or the
498 * time required to reach a stable voltage.
503 static void mmc_power_off(struct mmc_host
*host
)
507 if (!mmc_host_is_spi(host
)) {
508 host
->ios
.bus_mode
= MMC_BUSMODE_OPENDRAIN
;
509 host
->ios
.chip_select
= MMC_CS_DONTCARE
;
511 host
->ios
.power_mode
= MMC_POWER_OFF
;
512 host
->ios
.bus_width
= MMC_BUS_WIDTH_1
;
513 host
->ios
.timing
= MMC_TIMING_LEGACY
;
518 * Cleanup when the last reference to the bus operator is dropped.
520 void __mmc_release_bus(struct mmc_host
*host
)
523 BUG_ON(host
->bus_refs
);
524 BUG_ON(!host
->bus_dead
);
526 host
->bus_ops
= NULL
;
530 * Increase reference count of bus operator
532 static inline void mmc_bus_get(struct mmc_host
*host
)
536 spin_lock_irqsave(&host
->lock
, flags
);
538 spin_unlock_irqrestore(&host
->lock
, flags
);
542 * Decrease reference count of bus operator and free it if
543 * it is the last reference.
545 static inline void mmc_bus_put(struct mmc_host
*host
)
549 spin_lock_irqsave(&host
->lock
, flags
);
551 if ((host
->bus_refs
== 0) && host
->bus_ops
)
552 __mmc_release_bus(host
);
553 spin_unlock_irqrestore(&host
->lock
, flags
);
557 * Assign a mmc bus handler to a host. Only one bus handler may control a
558 * host at any given time.
560 void mmc_attach_bus(struct mmc_host
*host
, const struct mmc_bus_ops
*ops
)
567 WARN_ON(!host
->claimed
);
569 spin_lock_irqsave(&host
->lock
, flags
);
571 BUG_ON(host
->bus_ops
);
572 BUG_ON(host
->bus_refs
);
578 spin_unlock_irqrestore(&host
->lock
, flags
);
582 * Remove the current bus handler from a host. Assumes that there are
583 * no interesting cards left, so the bus is powered down.
585 void mmc_detach_bus(struct mmc_host
*host
)
591 WARN_ON(!host
->claimed
);
592 WARN_ON(!host
->bus_ops
);
594 spin_lock_irqsave(&host
->lock
, flags
);
598 spin_unlock_irqrestore(&host
->lock
, flags
);
606 * mmc_detect_change - process change of state on a MMC socket
607 * @host: host which changed state.
608 * @delay: optional delay to wait before detection (jiffies)
610 * MMC drivers should call this when they detect a card has been
611 * inserted or removed. The MMC layer will confirm that any
612 * present card is still functional, and initialize any newly
615 void mmc_detect_change(struct mmc_host
*host
, unsigned long delay
)
617 #ifdef CONFIG_MMC_DEBUG
619 spin_lock_irqsave(&host
->lock
, flags
);
620 WARN_ON(host
->removed
);
621 spin_unlock_irqrestore(&host
->lock
, flags
);
624 mmc_schedule_delayed_work(&host
->detect
, delay
);
627 EXPORT_SYMBOL(mmc_detect_change
);
630 void mmc_rescan(struct work_struct
*work
)
632 struct mmc_host
*host
=
633 container_of(work
, struct mmc_host
, detect
.work
);
639 if (host
->bus_ops
== NULL
) {
641 * Only we can add a new handler, so it's safe to
642 * release the lock here.
646 mmc_claim_host(host
);
651 mmc_send_if_cond(host
, host
->ocr_avail
);
654 * First we search for SDIO...
656 err
= mmc_send_io_op_cond(host
, 0, &ocr
);
658 if (mmc_attach_sdio(host
, ocr
))
664 * ...then normal SD...
666 err
= mmc_send_app_op_cond(host
, 0, &ocr
);
668 if (mmc_attach_sd(host
, ocr
))
674 * ...and finally MMC.
676 err
= mmc_send_op_cond(host
, 0, &ocr
);
678 if (mmc_attach_mmc(host
, ocr
))
683 mmc_release_host(host
);
686 if (host
->bus_ops
->detect
&& !host
->bus_dead
)
687 host
->bus_ops
->detect(host
);
693 void mmc_start_host(struct mmc_host
*host
)
696 mmc_detect_change(host
, 0);
699 void mmc_stop_host(struct mmc_host
*host
)
701 #ifdef CONFIG_MMC_DEBUG
703 spin_lock_irqsave(&host
->lock
, flags
);
705 spin_unlock_irqrestore(&host
->lock
, flags
);
708 mmc_flush_scheduled_work();
711 if (host
->bus_ops
&& !host
->bus_dead
) {
712 if (host
->bus_ops
->remove
)
713 host
->bus_ops
->remove(host
);
715 mmc_claim_host(host
);
716 mmc_detach_bus(host
);
717 mmc_release_host(host
);
729 * mmc_suspend_host - suspend a host
731 * @state: suspend mode (PM_SUSPEND_xxx)
733 int mmc_suspend_host(struct mmc_host
*host
, pm_message_t state
)
735 mmc_flush_scheduled_work();
738 if (host
->bus_ops
&& !host
->bus_dead
) {
739 if (host
->bus_ops
->suspend
)
740 host
->bus_ops
->suspend(host
);
741 if (!host
->bus_ops
->resume
) {
742 if (host
->bus_ops
->remove
)
743 host
->bus_ops
->remove(host
);
745 mmc_claim_host(host
);
746 mmc_detach_bus(host
);
747 mmc_release_host(host
);
757 EXPORT_SYMBOL(mmc_suspend_host
);
760 * mmc_resume_host - resume a previously suspended host
763 int mmc_resume_host(struct mmc_host
*host
)
766 if (host
->bus_ops
&& !host
->bus_dead
) {
768 BUG_ON(!host
->bus_ops
->resume
);
769 host
->bus_ops
->resume(host
);
774 * We add a slight delay here so that resume can progress
777 mmc_detect_change(host
, 1);
782 EXPORT_SYMBOL(mmc_resume_host
);
786 static int __init
mmc_init(void)
790 workqueue
= create_singlethread_workqueue("kmmcd");
794 ret
= mmc_register_bus();
796 goto destroy_workqueue
;
798 ret
= mmc_register_host_class();
802 ret
= sdio_register_bus();
804 goto unregister_host_class
;
808 unregister_host_class
:
809 mmc_unregister_host_class();
811 mmc_unregister_bus();
813 destroy_workqueue(workqueue
);
818 static void __exit
mmc_exit(void)
820 sdio_unregister_bus();
821 mmc_unregister_host_class();
822 mmc_unregister_bus();
823 destroy_workqueue(workqueue
);
826 subsys_initcall(mmc_init
);
827 module_exit(mmc_exit
);
829 MODULE_LICENSE("GPL");