2 * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
4 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
11 * Thanks to the following companies for their support:
13 * - JMicron (hardware and technical support)
16 #include <linux/delay.h>
17 #include <linux/highmem.h>
19 #include <linux/module.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/slab.h>
22 #include <linux/scatterlist.h>
23 #include <linux/regulator/consumer.h>
24 #include <linux/pm_runtime.h>
26 #include <linux/leds.h>
28 #include <linux/mmc/mmc.h>
29 #include <linux/mmc/host.h>
30 #include <linux/mmc/card.h>
31 #include <linux/mmc/sdio.h>
32 #include <linux/mmc/slot-gpio.h>
36 #define DRIVER_NAME "sdhci"
38 #define DBG(f, x...) \
39 pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x)
41 #if defined(CONFIG_LEDS_CLASS) || (defined(CONFIG_LEDS_CLASS_MODULE) && \
42 defined(CONFIG_MMC_SDHCI_MODULE))
43 #define SDHCI_USE_LEDS_CLASS
46 #define MAX_TUNING_LOOP 40
48 static unsigned int debug_quirks
= 0;
49 static unsigned int debug_quirks2
;
51 static void sdhci_finish_data(struct sdhci_host
*);
53 static void sdhci_finish_command(struct sdhci_host
*);
54 static int sdhci_execute_tuning(struct mmc_host
*mmc
, u32 opcode
);
55 static void sdhci_enable_preset_value(struct sdhci_host
*host
, bool enable
);
56 static int sdhci_pre_dma_transfer(struct sdhci_host
*host
,
57 struct mmc_data
*data
);
58 static int sdhci_do_get_cd(struct sdhci_host
*host
);
61 static int sdhci_runtime_pm_get(struct sdhci_host
*host
);
62 static int sdhci_runtime_pm_put(struct sdhci_host
*host
);
63 static void sdhci_runtime_pm_bus_on(struct sdhci_host
*host
);
64 static void sdhci_runtime_pm_bus_off(struct sdhci_host
*host
);
66 static inline int sdhci_runtime_pm_get(struct sdhci_host
*host
)
70 static inline int sdhci_runtime_pm_put(struct sdhci_host
*host
)
74 static void sdhci_runtime_pm_bus_on(struct sdhci_host
*host
)
77 static void sdhci_runtime_pm_bus_off(struct sdhci_host
*host
)
82 static void sdhci_dumpregs(struct sdhci_host
*host
)
84 pr_debug(DRIVER_NAME
": =========== REGISTER DUMP (%s)===========\n",
85 mmc_hostname(host
->mmc
));
87 pr_debug(DRIVER_NAME
": Sys addr: 0x%08x | Version: 0x%08x\n",
88 sdhci_readl(host
, SDHCI_DMA_ADDRESS
),
89 sdhci_readw(host
, SDHCI_HOST_VERSION
));
90 pr_debug(DRIVER_NAME
": Blk size: 0x%08x | Blk cnt: 0x%08x\n",
91 sdhci_readw(host
, SDHCI_BLOCK_SIZE
),
92 sdhci_readw(host
, SDHCI_BLOCK_COUNT
));
93 pr_debug(DRIVER_NAME
": Argument: 0x%08x | Trn mode: 0x%08x\n",
94 sdhci_readl(host
, SDHCI_ARGUMENT
),
95 sdhci_readw(host
, SDHCI_TRANSFER_MODE
));
96 pr_debug(DRIVER_NAME
": Present: 0x%08x | Host ctl: 0x%08x\n",
97 sdhci_readl(host
, SDHCI_PRESENT_STATE
),
98 sdhci_readb(host
, SDHCI_HOST_CONTROL
));
99 pr_debug(DRIVER_NAME
": Power: 0x%08x | Blk gap: 0x%08x\n",
100 sdhci_readb(host
, SDHCI_POWER_CONTROL
),
101 sdhci_readb(host
, SDHCI_BLOCK_GAP_CONTROL
));
102 pr_debug(DRIVER_NAME
": Wake-up: 0x%08x | Clock: 0x%08x\n",
103 sdhci_readb(host
, SDHCI_WAKE_UP_CONTROL
),
104 sdhci_readw(host
, SDHCI_CLOCK_CONTROL
));
105 pr_debug(DRIVER_NAME
": Timeout: 0x%08x | Int stat: 0x%08x\n",
106 sdhci_readb(host
, SDHCI_TIMEOUT_CONTROL
),
107 sdhci_readl(host
, SDHCI_INT_STATUS
));
108 pr_debug(DRIVER_NAME
": Int enab: 0x%08x | Sig enab: 0x%08x\n",
109 sdhci_readl(host
, SDHCI_INT_ENABLE
),
110 sdhci_readl(host
, SDHCI_SIGNAL_ENABLE
));
111 pr_debug(DRIVER_NAME
": AC12 err: 0x%08x | Slot int: 0x%08x\n",
112 sdhci_readw(host
, SDHCI_ACMD12_ERR
),
113 sdhci_readw(host
, SDHCI_SLOT_INT_STATUS
));
114 pr_debug(DRIVER_NAME
": Caps: 0x%08x | Caps_1: 0x%08x\n",
115 sdhci_readl(host
, SDHCI_CAPABILITIES
),
116 sdhci_readl(host
, SDHCI_CAPABILITIES_1
));
117 pr_debug(DRIVER_NAME
": Cmd: 0x%08x | Max curr: 0x%08x\n",
118 sdhci_readw(host
, SDHCI_COMMAND
),
119 sdhci_readl(host
, SDHCI_MAX_CURRENT
));
120 pr_debug(DRIVER_NAME
": Host ctl2: 0x%08x\n",
121 sdhci_readw(host
, SDHCI_HOST_CONTROL2
));
123 if (host
->flags
& SDHCI_USE_ADMA
) {
124 if (host
->flags
& SDHCI_USE_64_BIT_DMA
)
125 pr_debug(DRIVER_NAME
": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n",
126 readl(host
->ioaddr
+ SDHCI_ADMA_ERROR
),
127 readl(host
->ioaddr
+ SDHCI_ADMA_ADDRESS_HI
),
128 readl(host
->ioaddr
+ SDHCI_ADMA_ADDRESS
));
130 pr_debug(DRIVER_NAME
": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
131 readl(host
->ioaddr
+ SDHCI_ADMA_ERROR
),
132 readl(host
->ioaddr
+ SDHCI_ADMA_ADDRESS
));
135 pr_debug(DRIVER_NAME
": ===========================================\n");
138 /*****************************************************************************\
140 * Low level functions *
142 \*****************************************************************************/
144 static void sdhci_set_card_detection(struct sdhci_host
*host
, bool enable
)
148 if ((host
->quirks
& SDHCI_QUIRK_BROKEN_CARD_DETECTION
) ||
149 (host
->mmc
->caps
& MMC_CAP_NONREMOVABLE
))
153 present
= sdhci_readl(host
, SDHCI_PRESENT_STATE
) &
156 host
->ier
|= present
? SDHCI_INT_CARD_REMOVE
:
157 SDHCI_INT_CARD_INSERT
;
159 host
->ier
&= ~(SDHCI_INT_CARD_REMOVE
| SDHCI_INT_CARD_INSERT
);
162 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
163 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
166 static void sdhci_enable_card_detection(struct sdhci_host
*host
)
168 sdhci_set_card_detection(host
, true);
171 static void sdhci_disable_card_detection(struct sdhci_host
*host
)
173 sdhci_set_card_detection(host
, false);
176 void sdhci_reset(struct sdhci_host
*host
, u8 mask
)
178 unsigned long timeout
;
180 sdhci_writeb(host
, mask
, SDHCI_SOFTWARE_RESET
);
182 if (mask
& SDHCI_RESET_ALL
) {
184 /* Reset-all turns off SD Bus Power */
185 if (host
->quirks2
& SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON
)
186 sdhci_runtime_pm_bus_off(host
);
189 /* Wait max 100 ms */
192 /* hw clears the bit when it's done */
193 while (sdhci_readb(host
, SDHCI_SOFTWARE_RESET
) & mask
) {
195 pr_err("%s: Reset 0x%x never completed.\n",
196 mmc_hostname(host
->mmc
), (int)mask
);
197 sdhci_dumpregs(host
);
204 EXPORT_SYMBOL_GPL(sdhci_reset
);
206 static void sdhci_do_reset(struct sdhci_host
*host
, u8 mask
)
208 if (host
->quirks
& SDHCI_QUIRK_NO_CARD_NO_RESET
) {
209 if (!sdhci_do_get_cd(host
))
213 host
->ops
->reset(host
, mask
);
215 if (mask
& SDHCI_RESET_ALL
) {
216 if (host
->flags
& (SDHCI_USE_SDMA
| SDHCI_USE_ADMA
)) {
217 if (host
->ops
->enable_dma
)
218 host
->ops
->enable_dma(host
);
221 /* Resetting the controller clears many */
222 host
->preset_enabled
= false;
226 static void sdhci_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
);
228 static void sdhci_init(struct sdhci_host
*host
, int soft
)
231 sdhci_do_reset(host
, SDHCI_RESET_CMD
|SDHCI_RESET_DATA
);
233 sdhci_do_reset(host
, SDHCI_RESET_ALL
);
235 host
->ier
= SDHCI_INT_BUS_POWER
| SDHCI_INT_DATA_END_BIT
|
236 SDHCI_INT_DATA_CRC
| SDHCI_INT_DATA_TIMEOUT
|
237 SDHCI_INT_INDEX
| SDHCI_INT_END_BIT
| SDHCI_INT_CRC
|
238 SDHCI_INT_TIMEOUT
| SDHCI_INT_DATA_END
|
241 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
242 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
245 /* force clock reconfiguration */
247 sdhci_set_ios(host
->mmc
, &host
->mmc
->ios
);
251 static void sdhci_reinit(struct sdhci_host
*host
)
254 sdhci_enable_card_detection(host
);
257 static void sdhci_activate_led(struct sdhci_host
*host
)
261 ctrl
= sdhci_readb(host
, SDHCI_HOST_CONTROL
);
262 ctrl
|= SDHCI_CTRL_LED
;
263 sdhci_writeb(host
, ctrl
, SDHCI_HOST_CONTROL
);
266 static void sdhci_deactivate_led(struct sdhci_host
*host
)
270 ctrl
= sdhci_readb(host
, SDHCI_HOST_CONTROL
);
271 ctrl
&= ~SDHCI_CTRL_LED
;
272 sdhci_writeb(host
, ctrl
, SDHCI_HOST_CONTROL
);
275 #ifdef SDHCI_USE_LEDS_CLASS
276 static void sdhci_led_control(struct led_classdev
*led
,
277 enum led_brightness brightness
)
279 struct sdhci_host
*host
= container_of(led
, struct sdhci_host
, led
);
282 spin_lock_irqsave(&host
->lock
, flags
);
284 if (host
->runtime_suspended
)
287 if (brightness
== LED_OFF
)
288 sdhci_deactivate_led(host
);
290 sdhci_activate_led(host
);
292 spin_unlock_irqrestore(&host
->lock
, flags
);
296 /*****************************************************************************\
300 \*****************************************************************************/
302 static void sdhci_read_block_pio(struct sdhci_host
*host
)
305 size_t blksize
, len
, chunk
;
306 u32
uninitialized_var(scratch
);
309 DBG("PIO reading\n");
311 blksize
= host
->data
->blksz
;
314 local_irq_save(flags
);
317 BUG_ON(!sg_miter_next(&host
->sg_miter
));
319 len
= min(host
->sg_miter
.length
, blksize
);
322 host
->sg_miter
.consumed
= len
;
324 buf
= host
->sg_miter
.addr
;
328 scratch
= sdhci_readl(host
, SDHCI_BUFFER
);
332 *buf
= scratch
& 0xFF;
341 sg_miter_stop(&host
->sg_miter
);
343 local_irq_restore(flags
);
346 static void sdhci_write_block_pio(struct sdhci_host
*host
)
349 size_t blksize
, len
, chunk
;
353 DBG("PIO writing\n");
355 blksize
= host
->data
->blksz
;
359 local_irq_save(flags
);
362 BUG_ON(!sg_miter_next(&host
->sg_miter
));
364 len
= min(host
->sg_miter
.length
, blksize
);
367 host
->sg_miter
.consumed
= len
;
369 buf
= host
->sg_miter
.addr
;
372 scratch
|= (u32
)*buf
<< (chunk
* 8);
378 if ((chunk
== 4) || ((len
== 0) && (blksize
== 0))) {
379 sdhci_writel(host
, scratch
, SDHCI_BUFFER
);
386 sg_miter_stop(&host
->sg_miter
);
388 local_irq_restore(flags
);
391 static void sdhci_transfer_pio(struct sdhci_host
*host
)
397 if (host
->blocks
== 0)
400 if (host
->data
->flags
& MMC_DATA_READ
)
401 mask
= SDHCI_DATA_AVAILABLE
;
403 mask
= SDHCI_SPACE_AVAILABLE
;
406 * Some controllers (JMicron JMB38x) mess up the buffer bits
407 * for transfers < 4 bytes. As long as it is just one block,
408 * we can ignore the bits.
410 if ((host
->quirks
& SDHCI_QUIRK_BROKEN_SMALL_PIO
) &&
411 (host
->data
->blocks
== 1))
414 while (sdhci_readl(host
, SDHCI_PRESENT_STATE
) & mask
) {
415 if (host
->quirks
& SDHCI_QUIRK_PIO_NEEDS_DELAY
)
418 if (host
->data
->flags
& MMC_DATA_READ
)
419 sdhci_read_block_pio(host
);
421 sdhci_write_block_pio(host
);
424 if (host
->blocks
== 0)
428 DBG("PIO transfer complete.\n");
431 static char *sdhci_kmap_atomic(struct scatterlist
*sg
, unsigned long *flags
)
433 local_irq_save(*flags
);
434 return kmap_atomic(sg_page(sg
)) + sg
->offset
;
437 static void sdhci_kunmap_atomic(void *buffer
, unsigned long *flags
)
439 kunmap_atomic(buffer
);
440 local_irq_restore(*flags
);
443 static void sdhci_adma_write_desc(struct sdhci_host
*host
, void *desc
,
444 dma_addr_t addr
, int len
, unsigned cmd
)
446 struct sdhci_adma2_64_desc
*dma_desc
= desc
;
448 /* 32-bit and 64-bit descriptors have these members in same position */
449 dma_desc
->cmd
= cpu_to_le16(cmd
);
450 dma_desc
->len
= cpu_to_le16(len
);
451 dma_desc
->addr_lo
= cpu_to_le32((u32
)addr
);
453 if (host
->flags
& SDHCI_USE_64_BIT_DMA
)
454 dma_desc
->addr_hi
= cpu_to_le32((u64
)addr
>> 32);
457 static void sdhci_adma_mark_end(void *desc
)
459 struct sdhci_adma2_64_desc
*dma_desc
= desc
;
461 /* 32-bit and 64-bit descriptors have 'cmd' in same position */
462 dma_desc
->cmd
|= cpu_to_le16(ADMA2_END
);
465 static int sdhci_adma_table_pre(struct sdhci_host
*host
,
466 struct mmc_data
*data
)
473 dma_addr_t align_addr
;
476 struct scatterlist
*sg
;
482 * The spec does not specify endianness of descriptor table.
483 * We currently guess that it is LE.
486 if (data
->flags
& MMC_DATA_READ
)
487 direction
= DMA_FROM_DEVICE
;
489 direction
= DMA_TO_DEVICE
;
491 host
->align_addr
= dma_map_single(mmc_dev(host
->mmc
),
492 host
->align_buffer
, host
->align_buffer_sz
, direction
);
493 if (dma_mapping_error(mmc_dev(host
->mmc
), host
->align_addr
))
495 BUG_ON(host
->align_addr
& host
->align_mask
);
497 host
->sg_count
= sdhci_pre_dma_transfer(host
, data
);
498 if (host
->sg_count
< 0)
501 desc
= host
->adma_table
;
502 align
= host
->align_buffer
;
504 align_addr
= host
->align_addr
;
506 for_each_sg(data
->sg
, sg
, host
->sg_count
, i
) {
507 addr
= sg_dma_address(sg
);
508 len
= sg_dma_len(sg
);
511 * The SDHCI specification states that ADMA
512 * addresses must be 32-bit aligned. If they
513 * aren't, then we use a bounce buffer for
514 * the (up to three) bytes that screw up the
517 offset
= (host
->align_sz
- (addr
& host
->align_mask
)) &
520 if (data
->flags
& MMC_DATA_WRITE
) {
521 buffer
= sdhci_kmap_atomic(sg
, &flags
);
522 memcpy(align
, buffer
, offset
);
523 sdhci_kunmap_atomic(buffer
, &flags
);
527 sdhci_adma_write_desc(host
, desc
, align_addr
, offset
,
530 BUG_ON(offset
> 65536);
532 align
+= host
->align_sz
;
533 align_addr
+= host
->align_sz
;
535 desc
+= host
->desc_sz
;
544 sdhci_adma_write_desc(host
, desc
, addr
, len
, ADMA2_TRAN_VALID
);
545 desc
+= host
->desc_sz
;
548 * If this triggers then we have a calculation bug
551 WARN_ON((desc
- host
->adma_table
) >= host
->adma_table_sz
);
554 if (host
->quirks
& SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC
) {
556 * Mark the last descriptor as the terminating descriptor
558 if (desc
!= host
->adma_table
) {
559 desc
-= host
->desc_sz
;
560 sdhci_adma_mark_end(desc
);
564 * Add a terminating entry.
567 /* nop, end, valid */
568 sdhci_adma_write_desc(host
, desc
, 0, 0, ADMA2_NOP_END_VALID
);
572 * Resync align buffer as we might have changed it.
574 if (data
->flags
& MMC_DATA_WRITE
) {
575 dma_sync_single_for_device(mmc_dev(host
->mmc
),
576 host
->align_addr
, host
->align_buffer_sz
, direction
);
582 dma_unmap_single(mmc_dev(host
->mmc
), host
->align_addr
,
583 host
->align_buffer_sz
, direction
);
588 static void sdhci_adma_table_post(struct sdhci_host
*host
,
589 struct mmc_data
*data
)
593 struct scatterlist
*sg
;
600 if (data
->flags
& MMC_DATA_READ
)
601 direction
= DMA_FROM_DEVICE
;
603 direction
= DMA_TO_DEVICE
;
605 dma_unmap_single(mmc_dev(host
->mmc
), host
->align_addr
,
606 host
->align_buffer_sz
, direction
);
608 /* Do a quick scan of the SG list for any unaligned mappings */
609 has_unaligned
= false;
610 for_each_sg(data
->sg
, sg
, host
->sg_count
, i
)
611 if (sg_dma_address(sg
) & host
->align_mask
) {
612 has_unaligned
= true;
616 if (has_unaligned
&& data
->flags
& MMC_DATA_READ
) {
617 dma_sync_sg_for_cpu(mmc_dev(host
->mmc
), data
->sg
,
618 data
->sg_len
, direction
);
620 align
= host
->align_buffer
;
622 for_each_sg(data
->sg
, sg
, host
->sg_count
, i
) {
623 if (sg_dma_address(sg
) & host
->align_mask
) {
624 size
= host
->align_sz
-
625 (sg_dma_address(sg
) & host
->align_mask
);
627 buffer
= sdhci_kmap_atomic(sg
, &flags
);
628 memcpy(buffer
, align
, size
);
629 sdhci_kunmap_atomic(buffer
, &flags
);
631 align
+= host
->align_sz
;
636 if (data
->host_cookie
== COOKIE_MAPPED
) {
637 dma_unmap_sg(mmc_dev(host
->mmc
), data
->sg
,
638 data
->sg_len
, direction
);
639 data
->host_cookie
= COOKIE_UNMAPPED
;
643 static u8
sdhci_calc_timeout(struct sdhci_host
*host
, struct mmc_command
*cmd
)
646 struct mmc_data
*data
= cmd
->data
;
647 unsigned target_timeout
, current_timeout
;
650 * If the host controller provides us with an incorrect timeout
651 * value, just skip the check and use 0xE. The hardware may take
652 * longer to time out, but that's much better than having a too-short
655 if (host
->quirks
& SDHCI_QUIRK_BROKEN_TIMEOUT_VAL
)
658 /* Unspecified timeout, assume max */
659 if (!data
&& !cmd
->busy_timeout
)
664 target_timeout
= cmd
->busy_timeout
* 1000;
666 target_timeout
= data
->timeout_ns
/ 1000;
668 target_timeout
+= data
->timeout_clks
/ host
->clock
;
672 * Figure out needed cycles.
673 * We do this in steps in order to fit inside a 32 bit int.
674 * The first step is the minimum timeout, which will have a
675 * minimum resolution of 6 bits:
676 * (1) 2^13*1000 > 2^22,
677 * (2) host->timeout_clk < 2^16
682 current_timeout
= (1 << 13) * 1000 / host
->timeout_clk
;
683 while (current_timeout
< target_timeout
) {
685 current_timeout
<<= 1;
691 DBG("%s: Too large timeout 0x%x requested for CMD%d!\n",
692 mmc_hostname(host
->mmc
), count
, cmd
->opcode
);
699 static void sdhci_set_transfer_irqs(struct sdhci_host
*host
)
701 u32 pio_irqs
= SDHCI_INT_DATA_AVAIL
| SDHCI_INT_SPACE_AVAIL
;
702 u32 dma_irqs
= SDHCI_INT_DMA_END
| SDHCI_INT_ADMA_ERROR
;
704 if (host
->flags
& SDHCI_REQ_USE_DMA
)
705 host
->ier
= (host
->ier
& ~pio_irqs
) | dma_irqs
;
707 host
->ier
= (host
->ier
& ~dma_irqs
) | pio_irqs
;
709 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
710 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
713 static void sdhci_set_timeout(struct sdhci_host
*host
, struct mmc_command
*cmd
)
717 if (host
->ops
->set_timeout
) {
718 host
->ops
->set_timeout(host
, cmd
);
720 count
= sdhci_calc_timeout(host
, cmd
);
721 sdhci_writeb(host
, count
, SDHCI_TIMEOUT_CONTROL
);
725 static void sdhci_prepare_data(struct sdhci_host
*host
, struct mmc_command
*cmd
)
728 struct mmc_data
*data
= cmd
->data
;
733 if (data
|| (cmd
->flags
& MMC_RSP_BUSY
))
734 sdhci_set_timeout(host
, cmd
);
740 BUG_ON(data
->blksz
* data
->blocks
> 524288);
741 BUG_ON(data
->blksz
> host
->mmc
->max_blk_size
);
742 BUG_ON(data
->blocks
> 65535);
745 host
->data_early
= 0;
746 host
->data
->bytes_xfered
= 0;
748 if (host
->flags
& (SDHCI_USE_SDMA
| SDHCI_USE_ADMA
))
749 host
->flags
|= SDHCI_REQ_USE_DMA
;
752 * FIXME: This doesn't account for merging when mapping the
755 if (host
->flags
& SDHCI_REQ_USE_DMA
) {
757 struct scatterlist
*sg
;
760 if (host
->flags
& SDHCI_USE_ADMA
) {
761 if (host
->quirks
& SDHCI_QUIRK_32BIT_ADMA_SIZE
)
764 if (host
->quirks
& SDHCI_QUIRK_32BIT_DMA_SIZE
)
768 if (unlikely(broken
)) {
769 for_each_sg(data
->sg
, sg
, data
->sg_len
, i
) {
770 if (sg
->length
& 0x3) {
771 DBG("Reverting to PIO because of "
772 "transfer size (%d)\n",
774 host
->flags
&= ~SDHCI_REQ_USE_DMA
;
782 * The assumption here being that alignment is the same after
783 * translation to device address space.
785 if (host
->flags
& SDHCI_REQ_USE_DMA
) {
787 struct scatterlist
*sg
;
790 if (host
->flags
& SDHCI_USE_ADMA
) {
792 * As we use 3 byte chunks to work around
793 * alignment problems, we need to check this
796 if (host
->quirks
& SDHCI_QUIRK_32BIT_ADMA_SIZE
)
799 if (host
->quirks
& SDHCI_QUIRK_32BIT_DMA_ADDR
)
803 if (unlikely(broken
)) {
804 for_each_sg(data
->sg
, sg
, data
->sg_len
, i
) {
805 if (sg
->offset
& 0x3) {
806 DBG("Reverting to PIO because of "
808 host
->flags
&= ~SDHCI_REQ_USE_DMA
;
815 if (host
->flags
& SDHCI_REQ_USE_DMA
) {
816 if (host
->flags
& SDHCI_USE_ADMA
) {
817 ret
= sdhci_adma_table_pre(host
, data
);
820 * This only happens when someone fed
821 * us an invalid request.
824 host
->flags
&= ~SDHCI_REQ_USE_DMA
;
826 sdhci_writel(host
, host
->adma_addr
,
828 if (host
->flags
& SDHCI_USE_64_BIT_DMA
)
830 (u64
)host
->adma_addr
>> 32,
831 SDHCI_ADMA_ADDRESS_HI
);
836 sg_cnt
= sdhci_pre_dma_transfer(host
, data
);
839 * This only happens when someone fed
840 * us an invalid request.
843 host
->flags
&= ~SDHCI_REQ_USE_DMA
;
845 WARN_ON(sg_cnt
!= 1);
846 sdhci_writel(host
, sg_dma_address(data
->sg
),
853 * Always adjust the DMA selection as some controllers
854 * (e.g. JMicron) can't do PIO properly when the selection
857 if (host
->version
>= SDHCI_SPEC_200
) {
858 ctrl
= sdhci_readb(host
, SDHCI_HOST_CONTROL
);
859 ctrl
&= ~SDHCI_CTRL_DMA_MASK
;
860 if ((host
->flags
& SDHCI_REQ_USE_DMA
) &&
861 (host
->flags
& SDHCI_USE_ADMA
)) {
862 if (host
->flags
& SDHCI_USE_64_BIT_DMA
)
863 ctrl
|= SDHCI_CTRL_ADMA64
;
865 ctrl
|= SDHCI_CTRL_ADMA32
;
867 ctrl
|= SDHCI_CTRL_SDMA
;
869 sdhci_writeb(host
, ctrl
, SDHCI_HOST_CONTROL
);
872 if (!(host
->flags
& SDHCI_REQ_USE_DMA
)) {
875 flags
= SG_MITER_ATOMIC
;
876 if (host
->data
->flags
& MMC_DATA_READ
)
877 flags
|= SG_MITER_TO_SG
;
879 flags
|= SG_MITER_FROM_SG
;
880 sg_miter_start(&host
->sg_miter
, data
->sg
, data
->sg_len
, flags
);
881 host
->blocks
= data
->blocks
;
884 sdhci_set_transfer_irqs(host
);
886 /* Set the DMA boundary value and block size */
887 sdhci_writew(host
, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG
,
888 data
->blksz
), SDHCI_BLOCK_SIZE
);
889 sdhci_writew(host
, data
->blocks
, SDHCI_BLOCK_COUNT
);
892 static void sdhci_set_transfer_mode(struct sdhci_host
*host
,
893 struct mmc_command
*cmd
)
896 struct mmc_data
*data
= cmd
->data
;
900 SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD
) {
901 sdhci_writew(host
, 0x0, SDHCI_TRANSFER_MODE
);
903 /* clear Auto CMD settings for no data CMDs */
904 mode
= sdhci_readw(host
, SDHCI_TRANSFER_MODE
);
905 sdhci_writew(host
, mode
& ~(SDHCI_TRNS_AUTO_CMD12
|
906 SDHCI_TRNS_AUTO_CMD23
), SDHCI_TRANSFER_MODE
);
911 WARN_ON(!host
->data
);
913 if (!(host
->quirks2
& SDHCI_QUIRK2_SUPPORT_SINGLE
))
914 mode
= SDHCI_TRNS_BLK_CNT_EN
;
916 if (mmc_op_multi(cmd
->opcode
) || data
->blocks
> 1) {
917 mode
= SDHCI_TRNS_BLK_CNT_EN
| SDHCI_TRNS_MULTI
;
919 * If we are sending CMD23, CMD12 never gets sent
920 * on successful completion (so no Auto-CMD12).
922 if (!host
->mrq
->sbc
&& (host
->flags
& SDHCI_AUTO_CMD12
) &&
923 (cmd
->opcode
!= SD_IO_RW_EXTENDED
))
924 mode
|= SDHCI_TRNS_AUTO_CMD12
;
925 else if (host
->mrq
->sbc
&& (host
->flags
& SDHCI_AUTO_CMD23
)) {
926 mode
|= SDHCI_TRNS_AUTO_CMD23
;
927 sdhci_writel(host
, host
->mrq
->sbc
->arg
, SDHCI_ARGUMENT2
);
931 if (data
->flags
& MMC_DATA_READ
)
932 mode
|= SDHCI_TRNS_READ
;
933 if (host
->flags
& SDHCI_REQ_USE_DMA
)
934 mode
|= SDHCI_TRNS_DMA
;
936 sdhci_writew(host
, mode
, SDHCI_TRANSFER_MODE
);
939 static void sdhci_finish_data(struct sdhci_host
*host
)
941 struct mmc_data
*data
;
948 if (host
->flags
& SDHCI_REQ_USE_DMA
) {
949 if (host
->flags
& SDHCI_USE_ADMA
)
950 sdhci_adma_table_post(host
, data
);
952 if (data
->host_cookie
== COOKIE_MAPPED
) {
953 dma_unmap_sg(mmc_dev(host
->mmc
),
954 data
->sg
, data
->sg_len
,
955 (data
->flags
& MMC_DATA_READ
) ?
956 DMA_FROM_DEVICE
: DMA_TO_DEVICE
);
957 data
->host_cookie
= COOKIE_UNMAPPED
;
963 * The specification states that the block count register must
964 * be updated, but it does not specify at what point in the
965 * data flow. That makes the register entirely useless to read
966 * back so we have to assume that nothing made it to the card
967 * in the event of an error.
970 data
->bytes_xfered
= 0;
972 data
->bytes_xfered
= data
->blksz
* data
->blocks
;
975 * Need to send CMD12 if -
976 * a) open-ended multiblock transfer (no CMD23)
977 * b) error in multiblock transfer
984 * The controller needs a reset of internal state machines
985 * upon error conditions.
988 sdhci_do_reset(host
, SDHCI_RESET_CMD
);
989 sdhci_do_reset(host
, SDHCI_RESET_DATA
);
992 sdhci_send_command(host
, data
->stop
);
994 tasklet_schedule(&host
->finish_tasklet
);
997 void sdhci_send_command(struct sdhci_host
*host
, struct mmc_command
*cmd
)
1001 unsigned long timeout
;
1005 /* Wait max 10 ms */
1008 mask
= SDHCI_CMD_INHIBIT
;
1009 if ((cmd
->data
!= NULL
) || (cmd
->flags
& MMC_RSP_BUSY
))
1010 mask
|= SDHCI_DATA_INHIBIT
;
1012 /* We shouldn't wait for data inihibit for stop commands, even
1013 though they might use busy signaling */
1014 if (host
->mrq
->data
&& (cmd
== host
->mrq
->data
->stop
))
1015 mask
&= ~SDHCI_DATA_INHIBIT
;
1017 while (sdhci_readl(host
, SDHCI_PRESENT_STATE
) & mask
) {
1019 pr_err("%s: Controller never released "
1020 "inhibit bit(s).\n", mmc_hostname(host
->mmc
));
1021 sdhci_dumpregs(host
);
1023 tasklet_schedule(&host
->finish_tasklet
);
1031 if (!cmd
->data
&& cmd
->busy_timeout
> 9000)
1032 timeout
+= DIV_ROUND_UP(cmd
->busy_timeout
, 1000) * HZ
+ HZ
;
1035 mod_timer(&host
->timer
, timeout
);
1038 host
->busy_handle
= 0;
1040 sdhci_prepare_data(host
, cmd
);
1042 sdhci_writel(host
, cmd
->arg
, SDHCI_ARGUMENT
);
1044 sdhci_set_transfer_mode(host
, cmd
);
1046 if ((cmd
->flags
& MMC_RSP_136
) && (cmd
->flags
& MMC_RSP_BUSY
)) {
1047 pr_err("%s: Unsupported response type!\n",
1048 mmc_hostname(host
->mmc
));
1049 cmd
->error
= -EINVAL
;
1050 tasklet_schedule(&host
->finish_tasklet
);
1054 if (!(cmd
->flags
& MMC_RSP_PRESENT
))
1055 flags
= SDHCI_CMD_RESP_NONE
;
1056 else if (cmd
->flags
& MMC_RSP_136
)
1057 flags
= SDHCI_CMD_RESP_LONG
;
1058 else if (cmd
->flags
& MMC_RSP_BUSY
)
1059 flags
= SDHCI_CMD_RESP_SHORT_BUSY
;
1061 flags
= SDHCI_CMD_RESP_SHORT
;
1063 if (cmd
->flags
& MMC_RSP_CRC
)
1064 flags
|= SDHCI_CMD_CRC
;
1065 if (cmd
->flags
& MMC_RSP_OPCODE
)
1066 flags
|= SDHCI_CMD_INDEX
;
1068 /* CMD19 is special in that the Data Present Select should be set */
1069 if (cmd
->data
|| cmd
->opcode
== MMC_SEND_TUNING_BLOCK
||
1070 cmd
->opcode
== MMC_SEND_TUNING_BLOCK_HS200
)
1071 flags
|= SDHCI_CMD_DATA
;
1073 sdhci_writew(host
, SDHCI_MAKE_CMD(cmd
->opcode
, flags
), SDHCI_COMMAND
);
1075 EXPORT_SYMBOL_GPL(sdhci_send_command
);
1077 static void sdhci_finish_command(struct sdhci_host
*host
)
1081 BUG_ON(host
->cmd
== NULL
);
1083 if (host
->cmd
->flags
& MMC_RSP_PRESENT
) {
1084 if (host
->cmd
->flags
& MMC_RSP_136
) {
1085 /* CRC is stripped so we need to do some shifting. */
1086 for (i
= 0;i
< 4;i
++) {
1087 host
->cmd
->resp
[i
] = sdhci_readl(host
,
1088 SDHCI_RESPONSE
+ (3-i
)*4) << 8;
1090 host
->cmd
->resp
[i
] |=
1092 SDHCI_RESPONSE
+ (3-i
)*4-1);
1095 host
->cmd
->resp
[0] = sdhci_readl(host
, SDHCI_RESPONSE
);
1099 host
->cmd
->error
= 0;
1101 /* Finished CMD23, now send actual command. */
1102 if (host
->cmd
== host
->mrq
->sbc
) {
1104 sdhci_send_command(host
, host
->mrq
->cmd
);
1107 /* Processed actual command. */
1108 if (host
->data
&& host
->data_early
)
1109 sdhci_finish_data(host
);
1111 if (!host
->cmd
->data
)
1112 tasklet_schedule(&host
->finish_tasklet
);
1118 static u16
sdhci_get_preset_value(struct sdhci_host
*host
)
1122 switch (host
->timing
) {
1123 case MMC_TIMING_UHS_SDR12
:
1124 preset
= sdhci_readw(host
, SDHCI_PRESET_FOR_SDR12
);
1126 case MMC_TIMING_UHS_SDR25
:
1127 preset
= sdhci_readw(host
, SDHCI_PRESET_FOR_SDR25
);
1129 case MMC_TIMING_UHS_SDR50
:
1130 preset
= sdhci_readw(host
, SDHCI_PRESET_FOR_SDR50
);
1132 case MMC_TIMING_UHS_SDR104
:
1133 case MMC_TIMING_MMC_HS200
:
1134 preset
= sdhci_readw(host
, SDHCI_PRESET_FOR_SDR104
);
1136 case MMC_TIMING_UHS_DDR50
:
1137 case MMC_TIMING_MMC_DDR52
:
1138 preset
= sdhci_readw(host
, SDHCI_PRESET_FOR_DDR50
);
1140 case MMC_TIMING_MMC_HS400
:
1141 preset
= sdhci_readw(host
, SDHCI_PRESET_FOR_HS400
);
1144 pr_warn("%s: Invalid UHS-I mode selected\n",
1145 mmc_hostname(host
->mmc
));
1146 preset
= sdhci_readw(host
, SDHCI_PRESET_FOR_SDR12
);
1152 void sdhci_set_clock(struct sdhci_host
*host
, unsigned int clock
)
1154 int div
= 0; /* Initialized for compiler warning */
1155 int real_div
= div
, clk_mul
= 1;
1157 unsigned long timeout
;
1158 bool switch_base_clk
= false;
1160 host
->mmc
->actual_clock
= 0;
1162 sdhci_writew(host
, 0, SDHCI_CLOCK_CONTROL
);
1163 if (host
->quirks2
& SDHCI_QUIRK2_NEED_DELAY_AFTER_INT_CLK_RST
)
1169 if (host
->version
>= SDHCI_SPEC_300
) {
1170 if (host
->preset_enabled
) {
1173 clk
= sdhci_readw(host
, SDHCI_CLOCK_CONTROL
);
1174 pre_val
= sdhci_get_preset_value(host
);
1175 div
= (pre_val
& SDHCI_PRESET_SDCLK_FREQ_MASK
)
1176 >> SDHCI_PRESET_SDCLK_FREQ_SHIFT
;
1177 if (host
->clk_mul
&&
1178 (pre_val
& SDHCI_PRESET_CLKGEN_SEL_MASK
)) {
1179 clk
= SDHCI_PROG_CLOCK_MODE
;
1181 clk_mul
= host
->clk_mul
;
1183 real_div
= max_t(int, 1, div
<< 1);
1189 * Check if the Host Controller supports Programmable Clock
1192 if (host
->clk_mul
) {
1193 for (div
= 1; div
<= 1024; div
++) {
1194 if ((host
->max_clk
* host
->clk_mul
/ div
)
1198 if ((host
->max_clk
* host
->clk_mul
/ div
) <= clock
) {
1200 * Set Programmable Clock Mode in the Clock
1203 clk
= SDHCI_PROG_CLOCK_MODE
;
1205 clk_mul
= host
->clk_mul
;
1209 * Divisor can be too small to reach clock
1210 * speed requirement. Then use the base clock.
1212 switch_base_clk
= true;
1216 if (!host
->clk_mul
|| switch_base_clk
) {
1217 /* Version 3.00 divisors must be a multiple of 2. */
1218 if (host
->max_clk
<= clock
)
1221 for (div
= 2; div
< SDHCI_MAX_DIV_SPEC_300
;
1223 if ((host
->max_clk
/ div
) <= clock
)
1229 if ((host
->quirks2
& SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN
)
1230 && !div
&& host
->max_clk
<= 25000000)
1234 /* Version 2.00 divisors must be a power of 2. */
1235 for (div
= 1; div
< SDHCI_MAX_DIV_SPEC_200
; div
*= 2) {
1236 if ((host
->max_clk
/ div
) <= clock
)
1245 host
->mmc
->actual_clock
= (host
->max_clk
* clk_mul
) / real_div
;
1246 clk
|= (div
& SDHCI_DIV_MASK
) << SDHCI_DIVIDER_SHIFT
;
1247 clk
|= ((div
& SDHCI_DIV_HI_MASK
) >> SDHCI_DIV_MASK_LEN
)
1248 << SDHCI_DIVIDER_HI_SHIFT
;
1249 clk
|= SDHCI_CLOCK_INT_EN
;
1250 sdhci_writew(host
, clk
, SDHCI_CLOCK_CONTROL
);
1252 /* Wait max 20 ms */
1254 while (!((clk
= sdhci_readw(host
, SDHCI_CLOCK_CONTROL
))
1255 & SDHCI_CLOCK_INT_STABLE
)) {
1257 pr_err("%s: Internal clock never "
1258 "stabilised.\n", mmc_hostname(host
->mmc
));
1259 sdhci_dumpregs(host
);
1266 clk
|= SDHCI_CLOCK_CARD_EN
;
1267 sdhci_writew(host
, clk
, SDHCI_CLOCK_CONTROL
);
1269 EXPORT_SYMBOL_GPL(sdhci_set_clock
);
1271 static void sdhci_set_power(struct sdhci_host
*host
, unsigned char mode
,
1274 struct mmc_host
*mmc
= host
->mmc
;
1277 if (!IS_ERR(mmc
->supply
.vmmc
)) {
1278 spin_unlock_irq(&host
->lock
);
1279 mmc_regulator_set_ocr(mmc
, mmc
->supply
.vmmc
, vdd
);
1280 spin_lock_irq(&host
->lock
);
1282 if (mode
!= MMC_POWER_OFF
)
1283 sdhci_writeb(host
, SDHCI_POWER_ON
, SDHCI_POWER_CONTROL
);
1285 sdhci_writeb(host
, 0, SDHCI_POWER_CONTROL
);
1290 if (mode
!= MMC_POWER_OFF
) {
1292 case MMC_VDD_165_195
:
1293 pwr
= SDHCI_POWER_180
;
1297 pwr
= SDHCI_POWER_300
;
1301 pwr
= SDHCI_POWER_330
;
1308 if (host
->pwr
== pwr
)
1314 sdhci_writeb(host
, 0, SDHCI_POWER_CONTROL
);
1315 if (host
->quirks2
& SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON
)
1316 sdhci_runtime_pm_bus_off(host
);
1320 * Spec says that we should clear the power reg before setting
1321 * a new value. Some controllers don't seem to like this though.
1323 if (!(host
->quirks
& SDHCI_QUIRK_SINGLE_POWER_WRITE
))
1324 sdhci_writeb(host
, 0, SDHCI_POWER_CONTROL
);
1327 * At least the Marvell CaFe chip gets confused if we set the
1328 * voltage and set turn on power at the same time, so set the
1331 if (host
->quirks
& SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER
)
1332 sdhci_writeb(host
, pwr
, SDHCI_POWER_CONTROL
);
1334 pwr
|= SDHCI_POWER_ON
;
1336 sdhci_writeb(host
, pwr
, SDHCI_POWER_CONTROL
);
1338 if (host
->quirks2
& SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON
)
1339 sdhci_runtime_pm_bus_on(host
);
1342 * Some controllers need an extra 10ms delay of 10ms before
1343 * they can apply clock after applying power
1345 if (host
->quirks
& SDHCI_QUIRK_DELAY_AFTER_POWER
)
1350 /*****************************************************************************\
1354 \*****************************************************************************/
1356 static void sdhci_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
1358 struct sdhci_host
*host
;
1360 unsigned long flags
;
1362 host
= mmc_priv(mmc
);
1364 sdhci_runtime_pm_get(host
);
1366 /* Firstly check card presence */
1367 present
= sdhci_do_get_cd(host
);
1369 spin_lock_irqsave(&host
->lock
, flags
);
1371 WARN_ON(host
->mrq
!= NULL
);
1373 #ifndef SDHCI_USE_LEDS_CLASS
1374 sdhci_activate_led(host
);
1378 * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED
1379 * requests if Auto-CMD12 is enabled.
1381 if (!mrq
->sbc
&& (host
->flags
& SDHCI_AUTO_CMD12
)) {
1383 mrq
->data
->stop
= NULL
;
1390 if (!present
|| host
->flags
& SDHCI_DEVICE_DEAD
) {
1391 host
->mrq
->cmd
->error
= -ENOMEDIUM
;
1392 tasklet_schedule(&host
->finish_tasklet
);
1394 if (mrq
->sbc
&& !(host
->flags
& SDHCI_AUTO_CMD23
))
1395 sdhci_send_command(host
, mrq
->sbc
);
1397 sdhci_send_command(host
, mrq
->cmd
);
1401 spin_unlock_irqrestore(&host
->lock
, flags
);
1404 void sdhci_set_bus_width(struct sdhci_host
*host
, int width
)
1408 ctrl
= sdhci_readb(host
, SDHCI_HOST_CONTROL
);
1409 if (width
== MMC_BUS_WIDTH_8
) {
1410 ctrl
&= ~SDHCI_CTRL_4BITBUS
;
1411 if (host
->version
>= SDHCI_SPEC_300
)
1412 ctrl
|= SDHCI_CTRL_8BITBUS
;
1414 if (host
->version
>= SDHCI_SPEC_300
)
1415 ctrl
&= ~SDHCI_CTRL_8BITBUS
;
1416 if (width
== MMC_BUS_WIDTH_4
)
1417 ctrl
|= SDHCI_CTRL_4BITBUS
;
1419 ctrl
&= ~SDHCI_CTRL_4BITBUS
;
1421 sdhci_writeb(host
, ctrl
, SDHCI_HOST_CONTROL
);
1423 EXPORT_SYMBOL_GPL(sdhci_set_bus_width
);
1425 void sdhci_set_uhs_signaling(struct sdhci_host
*host
, unsigned timing
)
1429 ctrl_2
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
1430 /* Select Bus Speed Mode for host */
1431 ctrl_2
&= ~SDHCI_CTRL_UHS_MASK
;
1432 if ((timing
== MMC_TIMING_MMC_HS200
) ||
1433 (timing
== MMC_TIMING_UHS_SDR104
))
1434 ctrl_2
|= SDHCI_CTRL_UHS_SDR104
;
1435 else if (timing
== MMC_TIMING_UHS_SDR12
)
1436 ctrl_2
|= SDHCI_CTRL_UHS_SDR12
;
1437 else if (timing
== MMC_TIMING_UHS_SDR25
)
1438 ctrl_2
|= SDHCI_CTRL_UHS_SDR25
;
1439 else if (timing
== MMC_TIMING_UHS_SDR50
)
1440 ctrl_2
|= SDHCI_CTRL_UHS_SDR50
;
1441 else if ((timing
== MMC_TIMING_UHS_DDR50
) ||
1442 (timing
== MMC_TIMING_MMC_DDR52
))
1443 ctrl_2
|= SDHCI_CTRL_UHS_DDR50
;
1444 else if (timing
== MMC_TIMING_MMC_HS400
)
1445 ctrl_2
|= SDHCI_CTRL_HS400
; /* Non-standard */
1446 sdhci_writew(host
, ctrl_2
, SDHCI_HOST_CONTROL2
);
1448 EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling
);
1450 static void sdhci_do_set_ios(struct sdhci_host
*host
, struct mmc_ios
*ios
)
1452 unsigned long flags
;
1454 struct mmc_host
*mmc
= host
->mmc
;
1456 spin_lock_irqsave(&host
->lock
, flags
);
1458 if (host
->flags
& SDHCI_DEVICE_DEAD
) {
1459 spin_unlock_irqrestore(&host
->lock
, flags
);
1460 if (!IS_ERR(mmc
->supply
.vmmc
) &&
1461 ios
->power_mode
== MMC_POWER_OFF
)
1462 mmc_regulator_set_ocr(mmc
, mmc
->supply
.vmmc
, 0);
1467 * Reset the chip on each power off.
1468 * Should clear out any weird states.
1470 if (ios
->power_mode
== MMC_POWER_OFF
) {
1471 sdhci_writel(host
, 0, SDHCI_SIGNAL_ENABLE
);
1475 if (host
->version
>= SDHCI_SPEC_300
&&
1476 (ios
->power_mode
== MMC_POWER_UP
) &&
1477 !(host
->quirks2
& SDHCI_QUIRK2_PRESET_VALUE_BROKEN
))
1478 sdhci_enable_preset_value(host
, false);
1480 if (!ios
->clock
|| ios
->clock
!= host
->clock
) {
1481 host
->ops
->set_clock(host
, ios
->clock
);
1482 host
->clock
= ios
->clock
;
1484 if (host
->quirks
& SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK
&&
1486 host
->timeout_clk
= host
->mmc
->actual_clock
?
1487 host
->mmc
->actual_clock
/ 1000 :
1489 host
->mmc
->max_busy_timeout
=
1490 host
->ops
->get_max_timeout_count
?
1491 host
->ops
->get_max_timeout_count(host
) :
1493 host
->mmc
->max_busy_timeout
/= host
->timeout_clk
;
1497 sdhci_set_power(host
, ios
->power_mode
, ios
->vdd
);
1499 if (host
->ops
->platform_send_init_74_clocks
)
1500 host
->ops
->platform_send_init_74_clocks(host
, ios
->power_mode
);
1502 host
->ops
->set_bus_width(host
, ios
->bus_width
);
1504 ctrl
= sdhci_readb(host
, SDHCI_HOST_CONTROL
);
1506 if ((ios
->timing
== MMC_TIMING_SD_HS
||
1507 ios
->timing
== MMC_TIMING_MMC_HS
)
1508 && !(host
->quirks
& SDHCI_QUIRK_NO_HISPD_BIT
))
1509 ctrl
|= SDHCI_CTRL_HISPD
;
1511 ctrl
&= ~SDHCI_CTRL_HISPD
;
1513 if (host
->version
>= SDHCI_SPEC_300
) {
1516 /* In case of UHS-I modes, set High Speed Enable */
1517 if ((ios
->timing
== MMC_TIMING_MMC_HS400
) ||
1518 (ios
->timing
== MMC_TIMING_MMC_HS200
) ||
1519 (ios
->timing
== MMC_TIMING_MMC_DDR52
) ||
1520 (ios
->timing
== MMC_TIMING_UHS_SDR50
) ||
1521 (ios
->timing
== MMC_TIMING_UHS_SDR104
) ||
1522 (ios
->timing
== MMC_TIMING_UHS_DDR50
) ||
1523 (ios
->timing
== MMC_TIMING_UHS_SDR25
))
1524 ctrl
|= SDHCI_CTRL_HISPD
;
1526 if (!host
->preset_enabled
) {
1527 sdhci_writeb(host
, ctrl
, SDHCI_HOST_CONTROL
);
1529 * We only need to set Driver Strength if the
1530 * preset value enable is not set.
1532 ctrl_2
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
1533 ctrl_2
&= ~SDHCI_CTRL_DRV_TYPE_MASK
;
1534 if (ios
->drv_type
== MMC_SET_DRIVER_TYPE_A
)
1535 ctrl_2
|= SDHCI_CTRL_DRV_TYPE_A
;
1536 else if (ios
->drv_type
== MMC_SET_DRIVER_TYPE_B
)
1537 ctrl_2
|= SDHCI_CTRL_DRV_TYPE_B
;
1538 else if (ios
->drv_type
== MMC_SET_DRIVER_TYPE_C
)
1539 ctrl_2
|= SDHCI_CTRL_DRV_TYPE_C
;
1540 else if (ios
->drv_type
== MMC_SET_DRIVER_TYPE_D
)
1541 ctrl_2
|= SDHCI_CTRL_DRV_TYPE_D
;
1543 pr_warn("%s: invalid driver type, default to "
1544 "driver type B\n", mmc_hostname(mmc
));
1545 ctrl_2
|= SDHCI_CTRL_DRV_TYPE_B
;
1548 sdhci_writew(host
, ctrl_2
, SDHCI_HOST_CONTROL2
);
1551 * According to SDHC Spec v3.00, if the Preset Value
1552 * Enable in the Host Control 2 register is set, we
1553 * need to reset SD Clock Enable before changing High
1554 * Speed Enable to avoid generating clock gliches.
1557 /* Reset SD Clock Enable */
1558 clk
= sdhci_readw(host
, SDHCI_CLOCK_CONTROL
);
1559 clk
&= ~SDHCI_CLOCK_CARD_EN
;
1560 sdhci_writew(host
, clk
, SDHCI_CLOCK_CONTROL
);
1562 sdhci_writeb(host
, ctrl
, SDHCI_HOST_CONTROL
);
1564 /* Re-enable SD Clock */
1565 host
->ops
->set_clock(host
, host
->clock
);
1568 /* Reset SD Clock Enable */
1569 clk
= sdhci_readw(host
, SDHCI_CLOCK_CONTROL
);
1570 clk
&= ~SDHCI_CLOCK_CARD_EN
;
1571 sdhci_writew(host
, clk
, SDHCI_CLOCK_CONTROL
);
1573 host
->ops
->set_uhs_signaling(host
, ios
->timing
);
1574 host
->timing
= ios
->timing
;
1576 if (!(host
->quirks2
& SDHCI_QUIRK2_PRESET_VALUE_BROKEN
) &&
1577 ((ios
->timing
== MMC_TIMING_UHS_SDR12
) ||
1578 (ios
->timing
== MMC_TIMING_UHS_SDR25
) ||
1579 (ios
->timing
== MMC_TIMING_UHS_SDR50
) ||
1580 (ios
->timing
== MMC_TIMING_UHS_SDR104
) ||
1581 (ios
->timing
== MMC_TIMING_UHS_DDR50
) ||
1582 (ios
->timing
== MMC_TIMING_MMC_DDR52
))) {
1585 sdhci_enable_preset_value(host
, true);
1586 preset
= sdhci_get_preset_value(host
);
1587 ios
->drv_type
= (preset
& SDHCI_PRESET_DRV_MASK
)
1588 >> SDHCI_PRESET_DRV_SHIFT
;
1591 /* Re-enable SD Clock */
1592 host
->ops
->set_clock(host
, host
->clock
);
1594 sdhci_writeb(host
, ctrl
, SDHCI_HOST_CONTROL
);
1597 * Some (ENE) controllers go apeshit on some ios operation,
1598 * signalling timeout and CRC errors even on CMD0. Resetting
1599 * it on each ios seems to solve the problem.
1601 if (host
->quirks
& SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS
)
1602 sdhci_do_reset(host
, SDHCI_RESET_CMD
| SDHCI_RESET_DATA
);
1605 spin_unlock_irqrestore(&host
->lock
, flags
);
1608 static void sdhci_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
1610 struct sdhci_host
*host
= mmc_priv(mmc
);
1612 sdhci_runtime_pm_get(host
);
1613 sdhci_do_set_ios(host
, ios
);
1614 sdhci_runtime_pm_put(host
);
1617 static int sdhci_do_get_cd(struct sdhci_host
*host
)
1619 int gpio_cd
= mmc_gpio_get_cd(host
->mmc
);
1621 if (host
->flags
& SDHCI_DEVICE_DEAD
)
1624 /* If nonremovable, assume that the card is always present. */
1625 if (host
->mmc
->caps
& MMC_CAP_NONREMOVABLE
)
1629 * Try slot gpio detect, if defined it take precedence
1630 * over build in controller functionality
1632 if (!IS_ERR_VALUE(gpio_cd
))
1635 /* If polling, assume that the card is always present. */
1636 if (host
->quirks
& SDHCI_QUIRK_BROKEN_CARD_DETECTION
)
1639 /* Host native card detect */
1640 return !!(sdhci_readl(host
, SDHCI_PRESENT_STATE
) & SDHCI_CARD_PRESENT
);
1643 static int sdhci_get_cd(struct mmc_host
*mmc
)
1645 struct sdhci_host
*host
= mmc_priv(mmc
);
1648 sdhci_runtime_pm_get(host
);
1649 ret
= sdhci_do_get_cd(host
);
1650 sdhci_runtime_pm_put(host
);
1654 static int sdhci_check_ro(struct sdhci_host
*host
)
1656 unsigned long flags
;
1659 spin_lock_irqsave(&host
->lock
, flags
);
1661 if (host
->flags
& SDHCI_DEVICE_DEAD
)
1663 else if (host
->ops
->get_ro
)
1664 is_readonly
= host
->ops
->get_ro(host
);
1666 is_readonly
= !(sdhci_readl(host
, SDHCI_PRESENT_STATE
)
1667 & SDHCI_WRITE_PROTECT
);
1669 spin_unlock_irqrestore(&host
->lock
, flags
);
1671 /* This quirk needs to be replaced by a callback-function later */
1672 return host
->quirks
& SDHCI_QUIRK_INVERTED_WRITE_PROTECT
?
1673 !is_readonly
: is_readonly
;
1676 #define SAMPLE_COUNT 5
1678 static int sdhci_do_get_ro(struct sdhci_host
*host
)
1682 if (!(host
->quirks
& SDHCI_QUIRK_UNSTABLE_RO_DETECT
))
1683 return sdhci_check_ro(host
);
1686 for (i
= 0; i
< SAMPLE_COUNT
; i
++) {
1687 if (sdhci_check_ro(host
)) {
1688 if (++ro_count
> SAMPLE_COUNT
/ 2)
1696 static void sdhci_hw_reset(struct mmc_host
*mmc
)
1698 struct sdhci_host
*host
= mmc_priv(mmc
);
1700 if (host
->ops
&& host
->ops
->hw_reset
)
1701 host
->ops
->hw_reset(host
);
1704 static int sdhci_get_ro(struct mmc_host
*mmc
)
1706 struct sdhci_host
*host
= mmc_priv(mmc
);
1709 sdhci_runtime_pm_get(host
);
1710 ret
= sdhci_do_get_ro(host
);
1711 sdhci_runtime_pm_put(host
);
1715 static void sdhci_enable_sdio_irq_nolock(struct sdhci_host
*host
, int enable
)
1717 if (!(host
->flags
& SDHCI_DEVICE_DEAD
)) {
1719 host
->ier
|= SDHCI_INT_CARD_INT
;
1721 host
->ier
&= ~SDHCI_INT_CARD_INT
;
1723 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
1724 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
1729 static void sdhci_enable_sdio_irq(struct mmc_host
*mmc
, int enable
)
1731 struct sdhci_host
*host
= mmc_priv(mmc
);
1732 unsigned long flags
;
1734 sdhci_runtime_pm_get(host
);
1736 spin_lock_irqsave(&host
->lock
, flags
);
1738 host
->flags
|= SDHCI_SDIO_IRQ_ENABLED
;
1740 host
->flags
&= ~SDHCI_SDIO_IRQ_ENABLED
;
1742 sdhci_enable_sdio_irq_nolock(host
, enable
);
1743 spin_unlock_irqrestore(&host
->lock
, flags
);
1745 sdhci_runtime_pm_put(host
);
1748 static int sdhci_do_start_signal_voltage_switch(struct sdhci_host
*host
,
1749 struct mmc_ios
*ios
)
1751 struct mmc_host
*mmc
= host
->mmc
;
1756 * Signal Voltage Switching is only applicable for Host Controllers
1759 if (host
->version
< SDHCI_SPEC_300
)
1762 ctrl
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
1764 switch (ios
->signal_voltage
) {
1765 case MMC_SIGNAL_VOLTAGE_330
:
1766 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
1767 ctrl
&= ~SDHCI_CTRL_VDD_180
;
1768 sdhci_writew(host
, ctrl
, SDHCI_HOST_CONTROL2
);
1770 if (!IS_ERR(mmc
->supply
.vqmmc
)) {
1771 ret
= regulator_set_voltage(mmc
->supply
.vqmmc
, 2700000,
1774 pr_warn("%s: Switching to 3.3V signalling voltage failed\n",
1780 usleep_range(5000, 5500);
1782 /* 3.3V regulator output should be stable within 5 ms */
1783 ctrl
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
1784 if (!(ctrl
& SDHCI_CTRL_VDD_180
))
1787 pr_warn("%s: 3.3V regulator output did not became stable\n",
1791 case MMC_SIGNAL_VOLTAGE_180
:
1792 if (!IS_ERR(mmc
->supply
.vqmmc
)) {
1793 ret
= regulator_set_voltage(mmc
->supply
.vqmmc
,
1796 pr_warn("%s: Switching to 1.8V signalling voltage failed\n",
1803 * Enable 1.8V Signal Enable in the Host Control2
1806 ctrl
|= SDHCI_CTRL_VDD_180
;
1807 sdhci_writew(host
, ctrl
, SDHCI_HOST_CONTROL2
);
1809 /* Some controller need to do more when switching */
1810 if (host
->ops
->voltage_switch
)
1811 host
->ops
->voltage_switch(host
);
1813 /* 1.8V regulator output should be stable within 5 ms */
1814 ctrl
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
1815 if (ctrl
& SDHCI_CTRL_VDD_180
)
1818 pr_warn("%s: 1.8V regulator output did not became stable\n",
1822 case MMC_SIGNAL_VOLTAGE_120
:
1823 if (!IS_ERR(mmc
->supply
.vqmmc
)) {
1824 ret
= regulator_set_voltage(mmc
->supply
.vqmmc
, 1100000,
1827 pr_warn("%s: Switching to 1.2V signalling voltage failed\n",
1834 /* No signal voltage switch required */
1839 static int sdhci_start_signal_voltage_switch(struct mmc_host
*mmc
,
1840 struct mmc_ios
*ios
)
1842 struct sdhci_host
*host
= mmc_priv(mmc
);
1845 if (host
->version
< SDHCI_SPEC_300
)
1847 sdhci_runtime_pm_get(host
);
1848 err
= sdhci_do_start_signal_voltage_switch(host
, ios
);
1849 sdhci_runtime_pm_put(host
);
1853 static int sdhci_card_busy(struct mmc_host
*mmc
)
1855 struct sdhci_host
*host
= mmc_priv(mmc
);
1858 sdhci_runtime_pm_get(host
);
1859 /* Check whether DAT[3:0] is 0000 */
1860 present_state
= sdhci_readl(host
, SDHCI_PRESENT_STATE
);
1861 sdhci_runtime_pm_put(host
);
1863 return !(present_state
& SDHCI_DATA_LVL_MASK
);
1866 static int sdhci_prepare_hs400_tuning(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
1868 struct sdhci_host
*host
= mmc_priv(mmc
);
1869 unsigned long flags
;
1871 spin_lock_irqsave(&host
->lock
, flags
);
1872 host
->flags
|= SDHCI_HS400_TUNING
;
1873 spin_unlock_irqrestore(&host
->lock
, flags
);
1878 static int sdhci_execute_tuning(struct mmc_host
*mmc
, u32 opcode
)
1880 struct sdhci_host
*host
= mmc_priv(mmc
);
1882 int tuning_loop_counter
= MAX_TUNING_LOOP
;
1884 unsigned long flags
;
1885 unsigned int tuning_count
= 0;
1888 sdhci_runtime_pm_get(host
);
1889 spin_lock_irqsave(&host
->lock
, flags
);
1891 hs400_tuning
= host
->flags
& SDHCI_HS400_TUNING
;
1892 host
->flags
&= ~SDHCI_HS400_TUNING
;
1894 if (host
->tuning_mode
== SDHCI_TUNING_MODE_1
)
1895 tuning_count
= host
->tuning_count
;
1898 * The Host Controller needs tuning in case of SDR104 and DDR50
1899 * mode, and for SDR50 mode when Use Tuning for SDR50 is set in
1900 * the Capabilities register.
1901 * If the Host Controller supports the HS200 mode then the
1902 * tuning function has to be executed.
1904 switch (host
->timing
) {
1905 /* HS400 tuning is done in HS200 mode */
1906 case MMC_TIMING_MMC_HS400
:
1910 case MMC_TIMING_MMC_HS200
:
1912 * Periodic re-tuning for HS400 is not expected to be needed, so
1919 case MMC_TIMING_UHS_SDR104
:
1920 case MMC_TIMING_UHS_DDR50
:
1923 case MMC_TIMING_UHS_SDR50
:
1924 if (host
->flags
& SDHCI_SDR50_NEEDS_TUNING
||
1925 host
->flags
& SDHCI_SDR104_NEEDS_TUNING
)
1933 if (host
->ops
->platform_execute_tuning
) {
1934 spin_unlock_irqrestore(&host
->lock
, flags
);
1935 err
= host
->ops
->platform_execute_tuning(host
, opcode
);
1936 sdhci_runtime_pm_put(host
);
1940 ctrl
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
1941 ctrl
|= SDHCI_CTRL_EXEC_TUNING
;
1942 if (host
->quirks2
& SDHCI_QUIRK2_TUNING_WORK_AROUND
)
1943 ctrl
|= SDHCI_CTRL_TUNED_CLK
;
1944 sdhci_writew(host
, ctrl
, SDHCI_HOST_CONTROL2
);
1947 * As per the Host Controller spec v3.00, tuning command
1948 * generates Buffer Read Ready interrupt, so enable that.
1950 * Note: The spec clearly says that when tuning sequence
1951 * is being performed, the controller does not generate
1952 * interrupts other than Buffer Read Ready interrupt. But
1953 * to make sure we don't hit a controller bug, we _only_
1954 * enable Buffer Read Ready interrupt here.
1956 sdhci_writel(host
, SDHCI_INT_DATA_AVAIL
, SDHCI_INT_ENABLE
);
1957 sdhci_writel(host
, SDHCI_INT_DATA_AVAIL
, SDHCI_SIGNAL_ENABLE
);
1960 * Issue CMD19 repeatedly till Execute Tuning is set to 0 or the number
1961 * of loops reaches 40 times or a timeout of 150ms occurs.
1964 struct mmc_command cmd
= {0};
1965 struct mmc_request mrq
= {NULL
};
1967 cmd
.opcode
= opcode
;
1969 cmd
.flags
= MMC_RSP_R1
| MMC_CMD_ADTC
;
1974 if (tuning_loop_counter
-- == 0)
1981 * In response to CMD19, the card sends 64 bytes of tuning
1982 * block to the Host Controller. So we set the block size
1985 if (cmd
.opcode
== MMC_SEND_TUNING_BLOCK_HS200
) {
1986 if (mmc
->ios
.bus_width
== MMC_BUS_WIDTH_8
)
1987 sdhci_writew(host
, SDHCI_MAKE_BLKSZ(7, 128),
1989 else if (mmc
->ios
.bus_width
== MMC_BUS_WIDTH_4
)
1990 sdhci_writew(host
, SDHCI_MAKE_BLKSZ(7, 64),
1993 sdhci_writew(host
, SDHCI_MAKE_BLKSZ(7, 64),
1998 * The tuning block is sent by the card to the host controller.
1999 * So we set the TRNS_READ bit in the Transfer Mode register.
2000 * This also takes care of setting DMA Enable and Multi Block
2001 * Select in the same register to 0.
2003 sdhci_writew(host
, SDHCI_TRNS_READ
, SDHCI_TRANSFER_MODE
);
2005 sdhci_send_command(host
, &cmd
);
2010 spin_unlock_irqrestore(&host
->lock
, flags
);
2011 /* Wait for Buffer Read Ready interrupt */
2012 wait_event_interruptible_timeout(host
->buf_ready_int
,
2013 (host
->tuning_done
== 1),
2014 msecs_to_jiffies(50));
2015 spin_lock_irqsave(&host
->lock
, flags
);
2017 if (!host
->tuning_done
) {
2018 pr_info(DRIVER_NAME
": Timeout waiting for "
2019 "Buffer Read Ready interrupt during tuning "
2020 "procedure, falling back to fixed sampling "
2022 ctrl
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
2023 ctrl
&= ~SDHCI_CTRL_TUNED_CLK
;
2024 ctrl
&= ~SDHCI_CTRL_EXEC_TUNING
;
2025 sdhci_writew(host
, ctrl
, SDHCI_HOST_CONTROL2
);
2031 host
->tuning_done
= 0;
2033 ctrl
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
2035 /* eMMC spec does not require a delay between tuning cycles */
2036 if (opcode
== MMC_SEND_TUNING_BLOCK
)
2038 } while (ctrl
& SDHCI_CTRL_EXEC_TUNING
);
2041 * The Host Driver has exhausted the maximum number of loops allowed,
2042 * so use fixed sampling frequency.
2044 if (tuning_loop_counter
< 0) {
2045 ctrl
&= ~SDHCI_CTRL_TUNED_CLK
;
2046 sdhci_writew(host
, ctrl
, SDHCI_HOST_CONTROL2
);
2048 if (!(ctrl
& SDHCI_CTRL_TUNED_CLK
)) {
2049 pr_info(DRIVER_NAME
": Tuning procedure"
2050 " failed, falling back to fixed sampling"
2058 * In case tuning fails, host controllers which support
2059 * re-tuning can try tuning again at a later time, when the
2060 * re-tuning timer expires. So for these controllers, we
2061 * return 0. Since there might be other controllers who do not
2062 * have this capability, we return error for them.
2067 host
->mmc
->retune_period
= err
? 0 : tuning_count
;
2069 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
2070 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
2072 spin_unlock_irqrestore(&host
->lock
, flags
);
2073 sdhci_runtime_pm_put(host
);
2078 static int sdhci_select_drive_strength(struct mmc_card
*card
,
2079 unsigned int max_dtr
, int host_drv
,
2080 int card_drv
, int *drv_type
)
2082 struct sdhci_host
*host
= mmc_priv(card
->host
);
2084 if (!host
->ops
->select_drive_strength
)
2087 return host
->ops
->select_drive_strength(host
, card
, max_dtr
, host_drv
,
2088 card_drv
, drv_type
);
2091 static void sdhci_enable_preset_value(struct sdhci_host
*host
, bool enable
)
2093 /* Host Controller v3.00 defines preset value registers */
2094 if (host
->version
< SDHCI_SPEC_300
)
2098 * We only enable or disable Preset Value if they are not already
2099 * enabled or disabled respectively. Otherwise, we bail out.
2101 if (host
->preset_enabled
!= enable
) {
2102 u16 ctrl
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
2105 ctrl
|= SDHCI_CTRL_PRESET_VAL_ENABLE
;
2107 ctrl
&= ~SDHCI_CTRL_PRESET_VAL_ENABLE
;
2109 sdhci_writew(host
, ctrl
, SDHCI_HOST_CONTROL2
);
2112 host
->flags
|= SDHCI_PV_ENABLED
;
2114 host
->flags
&= ~SDHCI_PV_ENABLED
;
2116 host
->preset_enabled
= enable
;
2120 static void sdhci_post_req(struct mmc_host
*mmc
, struct mmc_request
*mrq
,
2123 struct sdhci_host
*host
= mmc_priv(mmc
);
2124 struct mmc_data
*data
= mrq
->data
;
2126 if (host
->flags
& SDHCI_REQ_USE_DMA
) {
2127 if (data
->host_cookie
== COOKIE_GIVEN
||
2128 data
->host_cookie
== COOKIE_MAPPED
)
2129 dma_unmap_sg(mmc_dev(host
->mmc
), data
->sg
, data
->sg_len
,
2130 data
->flags
& MMC_DATA_WRITE
?
2131 DMA_TO_DEVICE
: DMA_FROM_DEVICE
);
2132 data
->host_cookie
= COOKIE_UNMAPPED
;
2136 static int sdhci_pre_dma_transfer(struct sdhci_host
*host
,
2137 struct mmc_data
*data
)
2141 if (data
->host_cookie
== COOKIE_MAPPED
) {
2142 data
->host_cookie
= COOKIE_GIVEN
;
2143 return data
->sg_count
;
2146 WARN_ON(data
->host_cookie
== COOKIE_GIVEN
);
2148 sg_count
= dma_map_sg(mmc_dev(host
->mmc
), data
->sg
, data
->sg_len
,
2149 data
->flags
& MMC_DATA_WRITE
?
2150 DMA_TO_DEVICE
: DMA_FROM_DEVICE
);
2155 data
->sg_count
= sg_count
;
2156 data
->host_cookie
= COOKIE_MAPPED
;
2161 static void sdhci_pre_req(struct mmc_host
*mmc
, struct mmc_request
*mrq
,
2164 struct sdhci_host
*host
= mmc_priv(mmc
);
2166 mrq
->data
->host_cookie
= COOKIE_UNMAPPED
;
2168 if (host
->flags
& SDHCI_REQ_USE_DMA
)
2169 sdhci_pre_dma_transfer(host
, mrq
->data
);
2172 static void sdhci_card_event(struct mmc_host
*mmc
)
2174 struct sdhci_host
*host
= mmc_priv(mmc
);
2175 unsigned long flags
;
2178 /* First check if client has provided their own card event */
2179 if (host
->ops
->card_event
)
2180 host
->ops
->card_event(host
);
2182 present
= sdhci_do_get_cd(host
);
2184 spin_lock_irqsave(&host
->lock
, flags
);
2186 /* Check host->mrq first in case we are runtime suspended */
2187 if (host
->mrq
&& !present
) {
2188 pr_err("%s: Card removed during transfer!\n",
2189 mmc_hostname(host
->mmc
));
2190 pr_err("%s: Resetting controller.\n",
2191 mmc_hostname(host
->mmc
));
2193 sdhci_do_reset(host
, SDHCI_RESET_CMD
);
2194 sdhci_do_reset(host
, SDHCI_RESET_DATA
);
2196 host
->mrq
->cmd
->error
= -ENOMEDIUM
;
2197 tasklet_schedule(&host
->finish_tasklet
);
2200 spin_unlock_irqrestore(&host
->lock
, flags
);
2203 static const struct mmc_host_ops sdhci_ops
= {
2204 .request
= sdhci_request
,
2205 .post_req
= sdhci_post_req
,
2206 .pre_req
= sdhci_pre_req
,
2207 .set_ios
= sdhci_set_ios
,
2208 .get_cd
= sdhci_get_cd
,
2209 .get_ro
= sdhci_get_ro
,
2210 .hw_reset
= sdhci_hw_reset
,
2211 .enable_sdio_irq
= sdhci_enable_sdio_irq
,
2212 .start_signal_voltage_switch
= sdhci_start_signal_voltage_switch
,
2213 .prepare_hs400_tuning
= sdhci_prepare_hs400_tuning
,
2214 .execute_tuning
= sdhci_execute_tuning
,
2215 .select_drive_strength
= sdhci_select_drive_strength
,
2216 .card_event
= sdhci_card_event
,
2217 .card_busy
= sdhci_card_busy
,
2220 /*****************************************************************************\
2224 \*****************************************************************************/
2226 static void sdhci_tasklet_finish(unsigned long param
)
2228 struct sdhci_host
*host
;
2229 unsigned long flags
;
2230 struct mmc_request
*mrq
;
2232 host
= (struct sdhci_host
*)param
;
2234 spin_lock_irqsave(&host
->lock
, flags
);
2237 * If this tasklet gets rescheduled while running, it will
2238 * be run again afterwards but without any active request.
2241 spin_unlock_irqrestore(&host
->lock
, flags
);
2245 del_timer(&host
->timer
);
2250 * The controller needs a reset of internal state machines
2251 * upon error conditions.
2253 if (!(host
->flags
& SDHCI_DEVICE_DEAD
) &&
2254 ((mrq
->cmd
&& mrq
->cmd
->error
) ||
2255 (mrq
->sbc
&& mrq
->sbc
->error
) ||
2256 (mrq
->data
&& ((mrq
->data
->error
&& !mrq
->data
->stop
) ||
2257 (mrq
->data
->stop
&& mrq
->data
->stop
->error
))) ||
2258 (host
->quirks
& SDHCI_QUIRK_RESET_AFTER_REQUEST
))) {
2260 /* Some controllers need this kick or reset won't work here */
2261 if (host
->quirks
& SDHCI_QUIRK_CLOCK_BEFORE_RESET
)
2262 /* This is to force an update */
2263 host
->ops
->set_clock(host
, host
->clock
);
2265 /* Spec says we should do both at the same time, but Ricoh
2266 controllers do not like that. */
2267 sdhci_do_reset(host
, SDHCI_RESET_CMD
);
2268 sdhci_do_reset(host
, SDHCI_RESET_DATA
);
2275 #ifndef SDHCI_USE_LEDS_CLASS
2276 sdhci_deactivate_led(host
);
2280 spin_unlock_irqrestore(&host
->lock
, flags
);
2282 mmc_request_done(host
->mmc
, mrq
);
2283 sdhci_runtime_pm_put(host
);
2286 static void sdhci_timeout_timer(unsigned long data
)
2288 struct sdhci_host
*host
;
2289 unsigned long flags
;
2291 host
= (struct sdhci_host
*)data
;
2293 spin_lock_irqsave(&host
->lock
, flags
);
2296 pr_err("%s: Timeout waiting for hardware "
2297 "interrupt.\n", mmc_hostname(host
->mmc
));
2298 sdhci_dumpregs(host
);
2301 host
->data
->error
= -ETIMEDOUT
;
2302 sdhci_finish_data(host
);
2305 host
->cmd
->error
= -ETIMEDOUT
;
2307 host
->mrq
->cmd
->error
= -ETIMEDOUT
;
2309 tasklet_schedule(&host
->finish_tasklet
);
2314 spin_unlock_irqrestore(&host
->lock
, flags
);
2317 /*****************************************************************************\
2319 * Interrupt handling *
2321 \*****************************************************************************/
2323 static void sdhci_cmd_irq(struct sdhci_host
*host
, u32 intmask
, u32
*mask
)
2325 BUG_ON(intmask
== 0);
2328 pr_err("%s: Got command interrupt 0x%08x even "
2329 "though no command operation was in progress.\n",
2330 mmc_hostname(host
->mmc
), (unsigned)intmask
);
2331 sdhci_dumpregs(host
);
2335 if (intmask
& SDHCI_INT_TIMEOUT
)
2336 host
->cmd
->error
= -ETIMEDOUT
;
2337 else if (intmask
& (SDHCI_INT_CRC
| SDHCI_INT_END_BIT
|
2339 host
->cmd
->error
= -EILSEQ
;
2341 if (host
->cmd
->error
) {
2342 tasklet_schedule(&host
->finish_tasklet
);
2347 * The host can send and interrupt when the busy state has
2348 * ended, allowing us to wait without wasting CPU cycles.
2349 * Unfortunately this is overloaded on the "data complete"
2350 * interrupt, so we need to take some care when handling
2353 * Note: The 1.0 specification is a bit ambiguous about this
2354 * feature so there might be some problems with older
2357 if (host
->cmd
->flags
& MMC_RSP_BUSY
) {
2358 if (host
->cmd
->data
)
2359 DBG("Cannot wait for busy signal when also "
2360 "doing a data transfer");
2361 else if (!(host
->quirks
& SDHCI_QUIRK_NO_BUSY_IRQ
)
2362 && !host
->busy_handle
) {
2363 /* Mark that command complete before busy is ended */
2364 host
->busy_handle
= 1;
2368 /* The controller does not support the end-of-busy IRQ,
2369 * fall through and take the SDHCI_INT_RESPONSE */
2370 } else if ((host
->quirks2
& SDHCI_QUIRK2_STOP_WITH_TC
) &&
2371 host
->cmd
->opcode
== MMC_STOP_TRANSMISSION
&& !host
->data
) {
2372 *mask
&= ~SDHCI_INT_DATA_END
;
2375 if (intmask
& SDHCI_INT_RESPONSE
)
2376 sdhci_finish_command(host
);
2379 #ifdef CONFIG_MMC_DEBUG
2380 static void sdhci_adma_show_error(struct sdhci_host
*host
)
2382 const char *name
= mmc_hostname(host
->mmc
);
2383 void *desc
= host
->adma_table
;
2385 sdhci_dumpregs(host
);
2388 struct sdhci_adma2_64_desc
*dma_desc
= desc
;
2390 if (host
->flags
& SDHCI_USE_64_BIT_DMA
)
2391 DBG("%s: %p: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
2392 name
, desc
, le32_to_cpu(dma_desc
->addr_hi
),
2393 le32_to_cpu(dma_desc
->addr_lo
),
2394 le16_to_cpu(dma_desc
->len
),
2395 le16_to_cpu(dma_desc
->cmd
));
2397 DBG("%s: %p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
2398 name
, desc
, le32_to_cpu(dma_desc
->addr_lo
),
2399 le16_to_cpu(dma_desc
->len
),
2400 le16_to_cpu(dma_desc
->cmd
));
2402 desc
+= host
->desc_sz
;
2404 if (dma_desc
->cmd
& cpu_to_le16(ADMA2_END
))
2409 static void sdhci_adma_show_error(struct sdhci_host
*host
) { }
2412 static void sdhci_data_irq(struct sdhci_host
*host
, u32 intmask
)
2415 BUG_ON(intmask
== 0);
2417 /* CMD19 generates _only_ Buffer Read Ready interrupt */
2418 if (intmask
& SDHCI_INT_DATA_AVAIL
) {
2419 command
= SDHCI_GET_CMD(sdhci_readw(host
, SDHCI_COMMAND
));
2420 if (command
== MMC_SEND_TUNING_BLOCK
||
2421 command
== MMC_SEND_TUNING_BLOCK_HS200
) {
2422 host
->tuning_done
= 1;
2423 wake_up(&host
->buf_ready_int
);
2430 * The "data complete" interrupt is also used to
2431 * indicate that a busy state has ended. See comment
2432 * above in sdhci_cmd_irq().
2434 if (host
->cmd
&& (host
->cmd
->flags
& MMC_RSP_BUSY
)) {
2435 if (intmask
& SDHCI_INT_DATA_TIMEOUT
) {
2436 host
->cmd
->error
= -ETIMEDOUT
;
2437 tasklet_schedule(&host
->finish_tasklet
);
2440 if (intmask
& SDHCI_INT_DATA_END
) {
2442 * Some cards handle busy-end interrupt
2443 * before the command completed, so make
2444 * sure we do things in the proper order.
2446 if (host
->busy_handle
)
2447 sdhci_finish_command(host
);
2449 host
->busy_handle
= 1;
2454 pr_err("%s: Got data interrupt 0x%08x even "
2455 "though no data operation was in progress.\n",
2456 mmc_hostname(host
->mmc
), (unsigned)intmask
);
2457 sdhci_dumpregs(host
);
2462 if (intmask
& SDHCI_INT_DATA_TIMEOUT
)
2463 host
->data
->error
= -ETIMEDOUT
;
2464 else if (intmask
& SDHCI_INT_DATA_END_BIT
)
2465 host
->data
->error
= -EILSEQ
;
2466 else if ((intmask
& SDHCI_INT_DATA_CRC
) &&
2467 SDHCI_GET_CMD(sdhci_readw(host
, SDHCI_COMMAND
))
2469 host
->data
->error
= -EILSEQ
;
2470 else if (intmask
& SDHCI_INT_ADMA_ERROR
) {
2471 pr_err("%s: ADMA error\n", mmc_hostname(host
->mmc
));
2472 sdhci_adma_show_error(host
);
2473 host
->data
->error
= -EIO
;
2474 if (host
->ops
->adma_workaround
)
2475 host
->ops
->adma_workaround(host
, intmask
);
2478 if (host
->data
->error
)
2479 sdhci_finish_data(host
);
2481 if (intmask
& (SDHCI_INT_DATA_AVAIL
| SDHCI_INT_SPACE_AVAIL
))
2482 sdhci_transfer_pio(host
);
2485 * We currently don't do anything fancy with DMA
2486 * boundaries, but as we can't disable the feature
2487 * we need to at least restart the transfer.
2489 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
2490 * should return a valid address to continue from, but as
2491 * some controllers are faulty, don't trust them.
2493 if (intmask
& SDHCI_INT_DMA_END
) {
2494 u32 dmastart
, dmanow
;
2495 dmastart
= sg_dma_address(host
->data
->sg
);
2496 dmanow
= dmastart
+ host
->data
->bytes_xfered
;
2498 * Force update to the next DMA block boundary.
2501 ~(SDHCI_DEFAULT_BOUNDARY_SIZE
- 1)) +
2502 SDHCI_DEFAULT_BOUNDARY_SIZE
;
2503 host
->data
->bytes_xfered
= dmanow
- dmastart
;
2504 DBG("%s: DMA base 0x%08x, transferred 0x%06x bytes,"
2506 mmc_hostname(host
->mmc
), dmastart
,
2507 host
->data
->bytes_xfered
, dmanow
);
2508 sdhci_writel(host
, dmanow
, SDHCI_DMA_ADDRESS
);
2511 if (intmask
& SDHCI_INT_DATA_END
) {
2514 * Data managed to finish before the
2515 * command completed. Make sure we do
2516 * things in the proper order.
2518 host
->data_early
= 1;
2520 sdhci_finish_data(host
);
2526 static irqreturn_t
sdhci_irq(int irq
, void *dev_id
)
2528 irqreturn_t result
= IRQ_NONE
;
2529 struct sdhci_host
*host
= dev_id
;
2530 u32 intmask
, mask
, unexpected
= 0;
2533 spin_lock(&host
->lock
);
2535 if (host
->runtime_suspended
&& !sdhci_sdio_irq_enabled(host
)) {
2536 spin_unlock(&host
->lock
);
2540 intmask
= sdhci_readl(host
, SDHCI_INT_STATUS
);
2541 if (!intmask
|| intmask
== 0xffffffff) {
2547 /* Clear selected interrupts. */
2548 mask
= intmask
& (SDHCI_INT_CMD_MASK
| SDHCI_INT_DATA_MASK
|
2549 SDHCI_INT_BUS_POWER
);
2550 sdhci_writel(host
, mask
, SDHCI_INT_STATUS
);
2552 DBG("*** %s got interrupt: 0x%08x\n",
2553 mmc_hostname(host
->mmc
), intmask
);
2555 if (intmask
& (SDHCI_INT_CARD_INSERT
| SDHCI_INT_CARD_REMOVE
)) {
2556 u32 present
= sdhci_readl(host
, SDHCI_PRESENT_STATE
) &
2560 * There is a observation on i.mx esdhc. INSERT
2561 * bit will be immediately set again when it gets
2562 * cleared, if a card is inserted. We have to mask
2563 * the irq to prevent interrupt storm which will
2564 * freeze the system. And the REMOVE gets the
2567 * More testing are needed here to ensure it works
2568 * for other platforms though.
2570 host
->ier
&= ~(SDHCI_INT_CARD_INSERT
|
2571 SDHCI_INT_CARD_REMOVE
);
2572 host
->ier
|= present
? SDHCI_INT_CARD_REMOVE
:
2573 SDHCI_INT_CARD_INSERT
;
2574 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
2575 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
2577 sdhci_writel(host
, intmask
& (SDHCI_INT_CARD_INSERT
|
2578 SDHCI_INT_CARD_REMOVE
), SDHCI_INT_STATUS
);
2580 host
->thread_isr
|= intmask
& (SDHCI_INT_CARD_INSERT
|
2581 SDHCI_INT_CARD_REMOVE
);
2582 result
= IRQ_WAKE_THREAD
;
2585 if (intmask
& SDHCI_INT_CMD_MASK
)
2586 sdhci_cmd_irq(host
, intmask
& SDHCI_INT_CMD_MASK
,
2589 if (intmask
& SDHCI_INT_DATA_MASK
)
2590 sdhci_data_irq(host
, intmask
& SDHCI_INT_DATA_MASK
);
2592 if (intmask
& SDHCI_INT_BUS_POWER
)
2593 pr_err("%s: Card is consuming too much power!\n",
2594 mmc_hostname(host
->mmc
));
2596 if (intmask
& SDHCI_INT_CARD_INT
) {
2597 sdhci_enable_sdio_irq_nolock(host
, false);
2598 host
->thread_isr
|= SDHCI_INT_CARD_INT
;
2599 result
= IRQ_WAKE_THREAD
;
2602 intmask
&= ~(SDHCI_INT_CARD_INSERT
| SDHCI_INT_CARD_REMOVE
|
2603 SDHCI_INT_CMD_MASK
| SDHCI_INT_DATA_MASK
|
2604 SDHCI_INT_ERROR
| SDHCI_INT_BUS_POWER
|
2605 SDHCI_INT_CARD_INT
);
2608 unexpected
|= intmask
;
2609 sdhci_writel(host
, intmask
, SDHCI_INT_STATUS
);
2612 if (result
== IRQ_NONE
)
2613 result
= IRQ_HANDLED
;
2615 intmask
= sdhci_readl(host
, SDHCI_INT_STATUS
);
2616 } while (intmask
&& --max_loops
);
2618 spin_unlock(&host
->lock
);
2621 pr_err("%s: Unexpected interrupt 0x%08x.\n",
2622 mmc_hostname(host
->mmc
), unexpected
);
2623 sdhci_dumpregs(host
);
2629 static irqreturn_t
sdhci_thread_irq(int irq
, void *dev_id
)
2631 struct sdhci_host
*host
= dev_id
;
2632 unsigned long flags
;
2635 spin_lock_irqsave(&host
->lock
, flags
);
2636 isr
= host
->thread_isr
;
2637 host
->thread_isr
= 0;
2638 spin_unlock_irqrestore(&host
->lock
, flags
);
2640 if (isr
& (SDHCI_INT_CARD_INSERT
| SDHCI_INT_CARD_REMOVE
)) {
2641 sdhci_card_event(host
->mmc
);
2642 mmc_detect_change(host
->mmc
, msecs_to_jiffies(200));
2645 if (isr
& SDHCI_INT_CARD_INT
) {
2646 sdio_run_irqs(host
->mmc
);
2648 spin_lock_irqsave(&host
->lock
, flags
);
2649 if (host
->flags
& SDHCI_SDIO_IRQ_ENABLED
)
2650 sdhci_enable_sdio_irq_nolock(host
, true);
2651 spin_unlock_irqrestore(&host
->lock
, flags
);
2654 return isr
? IRQ_HANDLED
: IRQ_NONE
;
2657 /*****************************************************************************\
2661 \*****************************************************************************/
2664 void sdhci_enable_irq_wakeups(struct sdhci_host
*host
)
2667 u8 mask
= SDHCI_WAKE_ON_INSERT
| SDHCI_WAKE_ON_REMOVE
2668 | SDHCI_WAKE_ON_INT
;
2670 val
= sdhci_readb(host
, SDHCI_WAKE_UP_CONTROL
);
2672 /* Avoid fake wake up */
2673 if (host
->quirks
& SDHCI_QUIRK_BROKEN_CARD_DETECTION
)
2674 val
&= ~(SDHCI_WAKE_ON_INSERT
| SDHCI_WAKE_ON_REMOVE
);
2675 sdhci_writeb(host
, val
, SDHCI_WAKE_UP_CONTROL
);
2677 EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups
);
2679 static void sdhci_disable_irq_wakeups(struct sdhci_host
*host
)
2682 u8 mask
= SDHCI_WAKE_ON_INSERT
| SDHCI_WAKE_ON_REMOVE
2683 | SDHCI_WAKE_ON_INT
;
2685 val
= sdhci_readb(host
, SDHCI_WAKE_UP_CONTROL
);
2687 sdhci_writeb(host
, val
, SDHCI_WAKE_UP_CONTROL
);
2690 int sdhci_suspend_host(struct sdhci_host
*host
)
2692 sdhci_disable_card_detection(host
);
2694 mmc_retune_timer_stop(host
->mmc
);
2695 mmc_retune_needed(host
->mmc
);
2697 if (!device_may_wakeup(mmc_dev(host
->mmc
))) {
2699 sdhci_writel(host
, 0, SDHCI_INT_ENABLE
);
2700 sdhci_writel(host
, 0, SDHCI_SIGNAL_ENABLE
);
2701 free_irq(host
->irq
, host
);
2703 sdhci_enable_irq_wakeups(host
);
2704 enable_irq_wake(host
->irq
);
2709 EXPORT_SYMBOL_GPL(sdhci_suspend_host
);
2711 int sdhci_resume_host(struct sdhci_host
*host
)
2715 if (host
->flags
& (SDHCI_USE_SDMA
| SDHCI_USE_ADMA
)) {
2716 if (host
->ops
->enable_dma
)
2717 host
->ops
->enable_dma(host
);
2720 if ((host
->mmc
->pm_flags
& MMC_PM_KEEP_POWER
) &&
2721 (host
->quirks2
& SDHCI_QUIRK2_HOST_OFF_CARD_ON
)) {
2722 /* Card keeps power but host controller does not */
2723 sdhci_init(host
, 0);
2726 sdhci_do_set_ios(host
, &host
->mmc
->ios
);
2728 sdhci_init(host
, (host
->mmc
->pm_flags
& MMC_PM_KEEP_POWER
));
2732 if (!device_may_wakeup(mmc_dev(host
->mmc
))) {
2733 ret
= request_threaded_irq(host
->irq
, sdhci_irq
,
2734 sdhci_thread_irq
, IRQF_SHARED
,
2735 mmc_hostname(host
->mmc
), host
);
2739 sdhci_disable_irq_wakeups(host
);
2740 disable_irq_wake(host
->irq
);
2743 sdhci_enable_card_detection(host
);
2748 EXPORT_SYMBOL_GPL(sdhci_resume_host
);
2750 static int sdhci_runtime_pm_get(struct sdhci_host
*host
)
2752 return pm_runtime_get_sync(host
->mmc
->parent
);
2755 static int sdhci_runtime_pm_put(struct sdhci_host
*host
)
2757 pm_runtime_mark_last_busy(host
->mmc
->parent
);
2758 return pm_runtime_put_autosuspend(host
->mmc
->parent
);
2761 static void sdhci_runtime_pm_bus_on(struct sdhci_host
*host
)
2763 if (host
->runtime_suspended
|| host
->bus_on
)
2765 host
->bus_on
= true;
2766 pm_runtime_get_noresume(host
->mmc
->parent
);
2769 static void sdhci_runtime_pm_bus_off(struct sdhci_host
*host
)
2771 if (host
->runtime_suspended
|| !host
->bus_on
)
2773 host
->bus_on
= false;
2774 pm_runtime_put_noidle(host
->mmc
->parent
);
2777 int sdhci_runtime_suspend_host(struct sdhci_host
*host
)
2779 unsigned long flags
;
2781 mmc_retune_timer_stop(host
->mmc
);
2782 mmc_retune_needed(host
->mmc
);
2784 spin_lock_irqsave(&host
->lock
, flags
);
2785 host
->ier
&= SDHCI_INT_CARD_INT
;
2786 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
2787 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
2788 spin_unlock_irqrestore(&host
->lock
, flags
);
2790 synchronize_hardirq(host
->irq
);
2792 spin_lock_irqsave(&host
->lock
, flags
);
2793 host
->runtime_suspended
= true;
2794 spin_unlock_irqrestore(&host
->lock
, flags
);
2798 EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host
);
2800 int sdhci_runtime_resume_host(struct sdhci_host
*host
)
2802 unsigned long flags
;
2803 int host_flags
= host
->flags
;
2805 if (host_flags
& (SDHCI_USE_SDMA
| SDHCI_USE_ADMA
)) {
2806 if (host
->ops
->enable_dma
)
2807 host
->ops
->enable_dma(host
);
2810 sdhci_init(host
, 0);
2812 /* Force clock and power re-program */
2815 sdhci_do_start_signal_voltage_switch(host
, &host
->mmc
->ios
);
2816 sdhci_do_set_ios(host
, &host
->mmc
->ios
);
2818 if ((host_flags
& SDHCI_PV_ENABLED
) &&
2819 !(host
->quirks2
& SDHCI_QUIRK2_PRESET_VALUE_BROKEN
)) {
2820 spin_lock_irqsave(&host
->lock
, flags
);
2821 sdhci_enable_preset_value(host
, true);
2822 spin_unlock_irqrestore(&host
->lock
, flags
);
2825 spin_lock_irqsave(&host
->lock
, flags
);
2827 host
->runtime_suspended
= false;
2829 /* Enable SDIO IRQ */
2830 if (host
->flags
& SDHCI_SDIO_IRQ_ENABLED
)
2831 sdhci_enable_sdio_irq_nolock(host
, true);
2833 /* Enable Card Detection */
2834 sdhci_enable_card_detection(host
);
2836 spin_unlock_irqrestore(&host
->lock
, flags
);
2840 EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host
);
2842 #endif /* CONFIG_PM */
2844 /*****************************************************************************\
2846 * Device allocation/registration *
2848 \*****************************************************************************/
2850 struct sdhci_host
*sdhci_alloc_host(struct device
*dev
,
2853 struct mmc_host
*mmc
;
2854 struct sdhci_host
*host
;
2856 WARN_ON(dev
== NULL
);
2858 mmc
= mmc_alloc_host(sizeof(struct sdhci_host
) + priv_size
, dev
);
2860 return ERR_PTR(-ENOMEM
);
2862 host
= mmc_priv(mmc
);
2868 EXPORT_SYMBOL_GPL(sdhci_alloc_host
);
2870 int sdhci_add_host(struct sdhci_host
*host
)
2872 struct mmc_host
*mmc
;
2873 u32 caps
[2] = {0, 0};
2874 u32 max_current_caps
;
2875 unsigned int ocr_avail
;
2876 unsigned int override_timeout_clk
;
2880 WARN_ON(host
== NULL
);
2887 host
->quirks
= debug_quirks
;
2889 host
->quirks2
= debug_quirks2
;
2891 override_timeout_clk
= host
->timeout_clk
;
2893 sdhci_do_reset(host
, SDHCI_RESET_ALL
);
2895 host
->version
= sdhci_readw(host
, SDHCI_HOST_VERSION
);
2896 host
->version
= (host
->version
& SDHCI_SPEC_VER_MASK
)
2897 >> SDHCI_SPEC_VER_SHIFT
;
2898 if (host
->version
> SDHCI_SPEC_300
) {
2899 pr_err("%s: Unknown controller version (%d). "
2900 "You may experience problems.\n", mmc_hostname(mmc
),
2904 caps
[0] = (host
->quirks
& SDHCI_QUIRK_MISSING_CAPS
) ? host
->caps
:
2905 sdhci_readl(host
, SDHCI_CAPABILITIES
);
2907 if (host
->version
>= SDHCI_SPEC_300
)
2908 caps
[1] = (host
->quirks
& SDHCI_QUIRK_MISSING_CAPS
) ?
2910 sdhci_readl(host
, SDHCI_CAPABILITIES_1
);
2912 if (host
->quirks
& SDHCI_QUIRK_FORCE_DMA
)
2913 host
->flags
|= SDHCI_USE_SDMA
;
2914 else if (!(caps
[0] & SDHCI_CAN_DO_SDMA
))
2915 DBG("Controller doesn't have SDMA capability\n");
2917 host
->flags
|= SDHCI_USE_SDMA
;
2919 if ((host
->quirks
& SDHCI_QUIRK_BROKEN_DMA
) &&
2920 (host
->flags
& SDHCI_USE_SDMA
)) {
2921 DBG("Disabling DMA as it is marked broken\n");
2922 host
->flags
&= ~SDHCI_USE_SDMA
;
2925 if ((host
->version
>= SDHCI_SPEC_200
) &&
2926 (caps
[0] & SDHCI_CAN_DO_ADMA2
))
2927 host
->flags
|= SDHCI_USE_ADMA
;
2929 if ((host
->quirks
& SDHCI_QUIRK_BROKEN_ADMA
) &&
2930 (host
->flags
& SDHCI_USE_ADMA
)) {
2931 DBG("Disabling ADMA as it is marked broken\n");
2932 host
->flags
&= ~SDHCI_USE_ADMA
;
2936 * It is assumed that a 64-bit capable device has set a 64-bit DMA mask
2937 * and *must* do 64-bit DMA. A driver has the opportunity to change
2938 * that during the first call to ->enable_dma(). Similarly
2939 * SDHCI_QUIRK2_BROKEN_64_BIT_DMA must be left to the drivers to
2942 if (sdhci_readl(host
, SDHCI_CAPABILITIES
) & SDHCI_CAN_64BIT
)
2943 host
->flags
|= SDHCI_USE_64_BIT_DMA
;
2945 if (host
->flags
& (SDHCI_USE_SDMA
| SDHCI_USE_ADMA
)) {
2946 if (host
->ops
->enable_dma
) {
2947 if (host
->ops
->enable_dma(host
)) {
2948 pr_warn("%s: No suitable DMA available - falling back to PIO\n",
2951 ~(SDHCI_USE_SDMA
| SDHCI_USE_ADMA
);
2956 /* SDMA does not support 64-bit DMA */
2957 if (host
->flags
& SDHCI_USE_64_BIT_DMA
)
2958 host
->flags
&= ~SDHCI_USE_SDMA
;
2960 if (host
->flags
& SDHCI_USE_ADMA
) {
2962 * The DMA descriptor table size is calculated as the maximum
2963 * number of segments times 2, to allow for an alignment
2964 * descriptor for each segment, plus 1 for a nop end descriptor,
2965 * all multipled by the descriptor size.
2967 if (host
->flags
& SDHCI_USE_64_BIT_DMA
) {
2968 host
->adma_table_sz
= (SDHCI_MAX_SEGS
* 2 + 1) *
2969 SDHCI_ADMA2_64_DESC_SZ
;
2970 host
->align_buffer_sz
= SDHCI_MAX_SEGS
*
2971 SDHCI_ADMA2_64_ALIGN
;
2972 host
->desc_sz
= SDHCI_ADMA2_64_DESC_SZ
;
2973 host
->align_sz
= SDHCI_ADMA2_64_ALIGN
;
2974 host
->align_mask
= SDHCI_ADMA2_64_ALIGN
- 1;
2976 host
->adma_table_sz
= (SDHCI_MAX_SEGS
* 2 + 1) *
2977 SDHCI_ADMA2_32_DESC_SZ
;
2978 host
->align_buffer_sz
= SDHCI_MAX_SEGS
*
2979 SDHCI_ADMA2_32_ALIGN
;
2980 host
->desc_sz
= SDHCI_ADMA2_32_DESC_SZ
;
2981 host
->align_sz
= SDHCI_ADMA2_32_ALIGN
;
2982 host
->align_mask
= SDHCI_ADMA2_32_ALIGN
- 1;
2984 host
->adma_table
= dma_alloc_coherent(mmc_dev(mmc
),
2985 host
->adma_table_sz
,
2988 host
->align_buffer
= kmalloc(host
->align_buffer_sz
, GFP_KERNEL
);
2989 if (!host
->adma_table
|| !host
->align_buffer
) {
2990 if (host
->adma_table
)
2991 dma_free_coherent(mmc_dev(mmc
),
2992 host
->adma_table_sz
,
2995 kfree(host
->align_buffer
);
2996 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
2998 host
->flags
&= ~SDHCI_USE_ADMA
;
2999 host
->adma_table
= NULL
;
3000 host
->align_buffer
= NULL
;
3001 } else if (host
->adma_addr
& host
->align_mask
) {
3002 pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
3004 host
->flags
&= ~SDHCI_USE_ADMA
;
3005 dma_free_coherent(mmc_dev(mmc
), host
->adma_table_sz
,
3006 host
->adma_table
, host
->adma_addr
);
3007 kfree(host
->align_buffer
);
3008 host
->adma_table
= NULL
;
3009 host
->align_buffer
= NULL
;
3014 * If we use DMA, then it's up to the caller to set the DMA
3015 * mask, but PIO does not need the hw shim so we set a new
3016 * mask here in that case.
3018 if (!(host
->flags
& (SDHCI_USE_SDMA
| SDHCI_USE_ADMA
))) {
3019 host
->dma_mask
= DMA_BIT_MASK(64);
3020 mmc_dev(mmc
)->dma_mask
= &host
->dma_mask
;
3023 if (host
->version
>= SDHCI_SPEC_300
)
3024 host
->max_clk
= (caps
[0] & SDHCI_CLOCK_V3_BASE_MASK
)
3025 >> SDHCI_CLOCK_BASE_SHIFT
;
3027 host
->max_clk
= (caps
[0] & SDHCI_CLOCK_BASE_MASK
)
3028 >> SDHCI_CLOCK_BASE_SHIFT
;
3030 host
->max_clk
*= 1000000;
3031 if (host
->max_clk
== 0 || host
->quirks
&
3032 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN
) {
3033 if (!host
->ops
->get_max_clock
) {
3034 pr_err("%s: Hardware doesn't specify base clock "
3035 "frequency.\n", mmc_hostname(mmc
));
3038 host
->max_clk
= host
->ops
->get_max_clock(host
);
3042 * In case of Host Controller v3.00, find out whether clock
3043 * multiplier is supported.
3045 host
->clk_mul
= (caps
[1] & SDHCI_CLOCK_MUL_MASK
) >>
3046 SDHCI_CLOCK_MUL_SHIFT
;
3049 * In case the value in Clock Multiplier is 0, then programmable
3050 * clock mode is not supported, otherwise the actual clock
3051 * multiplier is one more than the value of Clock Multiplier
3052 * in the Capabilities Register.
3058 * Set host parameters.
3060 mmc
->ops
= &sdhci_ops
;
3061 max_clk
= host
->max_clk
;
3063 if (host
->ops
->get_min_clock
)
3064 mmc
->f_min
= host
->ops
->get_min_clock(host
);
3065 else if (host
->version
>= SDHCI_SPEC_300
) {
3066 if (host
->clk_mul
) {
3067 mmc
->f_min
= (host
->max_clk
* host
->clk_mul
) / 1024;
3068 max_clk
= host
->max_clk
* host
->clk_mul
;
3070 mmc
->f_min
= host
->max_clk
/ SDHCI_MAX_DIV_SPEC_300
;
3072 mmc
->f_min
= host
->max_clk
/ SDHCI_MAX_DIV_SPEC_200
;
3074 if (!mmc
->f_max
|| (mmc
->f_max
&& (mmc
->f_max
> max_clk
)))
3075 mmc
->f_max
= max_clk
;
3077 if (!(host
->quirks
& SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK
)) {
3078 host
->timeout_clk
= (caps
[0] & SDHCI_TIMEOUT_CLK_MASK
) >>
3079 SDHCI_TIMEOUT_CLK_SHIFT
;
3080 if (host
->timeout_clk
== 0) {
3081 if (host
->ops
->get_timeout_clock
) {
3083 host
->ops
->get_timeout_clock(host
);
3085 pr_err("%s: Hardware doesn't specify timeout clock frequency.\n",
3091 if (caps
[0] & SDHCI_TIMEOUT_CLK_UNIT
)
3092 host
->timeout_clk
*= 1000;
3094 mmc
->max_busy_timeout
= host
->ops
->get_max_timeout_count
?
3095 host
->ops
->get_max_timeout_count(host
) : 1 << 27;
3096 mmc
->max_busy_timeout
/= host
->timeout_clk
;
3099 if (override_timeout_clk
)
3100 host
->timeout_clk
= override_timeout_clk
;
3102 mmc
->caps
|= MMC_CAP_SDIO_IRQ
| MMC_CAP_ERASE
| MMC_CAP_CMD23
;
3103 mmc
->caps2
|= MMC_CAP2_SDIO_IRQ_NOTHREAD
;
3105 if (host
->quirks
& SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12
)
3106 host
->flags
|= SDHCI_AUTO_CMD12
;
3108 /* Auto-CMD23 stuff only works in ADMA or PIO. */
3109 if ((host
->version
>= SDHCI_SPEC_300
) &&
3110 ((host
->flags
& SDHCI_USE_ADMA
) ||
3111 !(host
->flags
& SDHCI_USE_SDMA
)) &&
3112 !(host
->quirks2
& SDHCI_QUIRK2_ACMD23_BROKEN
)) {
3113 host
->flags
|= SDHCI_AUTO_CMD23
;
3114 DBG("%s: Auto-CMD23 available\n", mmc_hostname(mmc
));
3116 DBG("%s: Auto-CMD23 unavailable\n", mmc_hostname(mmc
));
3120 * A controller may support 8-bit width, but the board itself
3121 * might not have the pins brought out. Boards that support
3122 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in
3123 * their platform code before calling sdhci_add_host(), and we
3124 * won't assume 8-bit width for hosts without that CAP.
3126 if (!(host
->quirks
& SDHCI_QUIRK_FORCE_1_BIT_DATA
))
3127 mmc
->caps
|= MMC_CAP_4_BIT_DATA
;
3129 if (host
->quirks2
& SDHCI_QUIRK2_HOST_NO_CMD23
)
3130 mmc
->caps
&= ~MMC_CAP_CMD23
;
3132 if (caps
[0] & SDHCI_CAN_DO_HISPD
)
3133 mmc
->caps
|= MMC_CAP_SD_HIGHSPEED
| MMC_CAP_MMC_HIGHSPEED
;
3135 if ((host
->quirks
& SDHCI_QUIRK_BROKEN_CARD_DETECTION
) &&
3136 !(mmc
->caps
& MMC_CAP_NONREMOVABLE
) &&
3137 IS_ERR_VALUE(mmc_gpio_get_cd(host
->mmc
)))
3138 mmc
->caps
|= MMC_CAP_NEEDS_POLL
;
3140 /* If there are external regulators, get them */
3141 if (mmc_regulator_get_supply(mmc
) == -EPROBE_DEFER
)
3142 return -EPROBE_DEFER
;
3144 /* If vqmmc regulator and no 1.8V signalling, then there's no UHS */
3145 if (!IS_ERR(mmc
->supply
.vqmmc
)) {
3146 ret
= regulator_enable(mmc
->supply
.vqmmc
);
3147 if (!regulator_is_supported_voltage(mmc
->supply
.vqmmc
, 1700000,
3149 caps
[1] &= ~(SDHCI_SUPPORT_SDR104
|
3150 SDHCI_SUPPORT_SDR50
|
3151 SDHCI_SUPPORT_DDR50
);
3153 pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
3154 mmc_hostname(mmc
), ret
);
3155 mmc
->supply
.vqmmc
= ERR_PTR(-EINVAL
);
3159 if (host
->quirks2
& SDHCI_QUIRK2_NO_1_8_V
)
3160 caps
[1] &= ~(SDHCI_SUPPORT_SDR104
| SDHCI_SUPPORT_SDR50
|
3161 SDHCI_SUPPORT_DDR50
);
3163 /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
3164 if (caps
[1] & (SDHCI_SUPPORT_SDR104
| SDHCI_SUPPORT_SDR50
|
3165 SDHCI_SUPPORT_DDR50
))
3166 mmc
->caps
|= MMC_CAP_UHS_SDR12
| MMC_CAP_UHS_SDR25
;
3168 /* SDR104 supports also implies SDR50 support */
3169 if (caps
[1] & SDHCI_SUPPORT_SDR104
) {
3170 mmc
->caps
|= MMC_CAP_UHS_SDR104
| MMC_CAP_UHS_SDR50
;
3171 /* SD3.0: SDR104 is supported so (for eMMC) the caps2
3172 * field can be promoted to support HS200.
3174 if (!(host
->quirks2
& SDHCI_QUIRK2_BROKEN_HS200
))
3175 mmc
->caps2
|= MMC_CAP2_HS200
;
3176 } else if (caps
[1] & SDHCI_SUPPORT_SDR50
)
3177 mmc
->caps
|= MMC_CAP_UHS_SDR50
;
3179 if (host
->quirks2
& SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400
&&
3180 (caps
[1] & SDHCI_SUPPORT_HS400
))
3181 mmc
->caps2
|= MMC_CAP2_HS400
;
3183 if ((mmc
->caps2
& MMC_CAP2_HSX00_1_2V
) &&
3184 (IS_ERR(mmc
->supply
.vqmmc
) ||
3185 !regulator_is_supported_voltage(mmc
->supply
.vqmmc
, 1100000,
3187 mmc
->caps2
&= ~MMC_CAP2_HSX00_1_2V
;
3189 if ((caps
[1] & SDHCI_SUPPORT_DDR50
) &&
3190 !(host
->quirks2
& SDHCI_QUIRK2_BROKEN_DDR50
))
3191 mmc
->caps
|= MMC_CAP_UHS_DDR50
;
3193 /* Does the host need tuning for SDR50? */
3194 if (caps
[1] & SDHCI_USE_SDR50_TUNING
)
3195 host
->flags
|= SDHCI_SDR50_NEEDS_TUNING
;
3197 /* Does the host need tuning for SDR104 / HS200? */
3198 if (mmc
->caps2
& MMC_CAP2_HS200
)
3199 host
->flags
|= SDHCI_SDR104_NEEDS_TUNING
;
3201 /* Driver Type(s) (A, C, D) supported by the host */
3202 if (caps
[1] & SDHCI_DRIVER_TYPE_A
)
3203 mmc
->caps
|= MMC_CAP_DRIVER_TYPE_A
;
3204 if (caps
[1] & SDHCI_DRIVER_TYPE_C
)
3205 mmc
->caps
|= MMC_CAP_DRIVER_TYPE_C
;
3206 if (caps
[1] & SDHCI_DRIVER_TYPE_D
)
3207 mmc
->caps
|= MMC_CAP_DRIVER_TYPE_D
;
3209 /* Initial value for re-tuning timer count */
3210 host
->tuning_count
= (caps
[1] & SDHCI_RETUNING_TIMER_COUNT_MASK
) >>
3211 SDHCI_RETUNING_TIMER_COUNT_SHIFT
;
3214 * In case Re-tuning Timer is not disabled, the actual value of
3215 * re-tuning timer will be 2 ^ (n - 1).
3217 if (host
->tuning_count
)
3218 host
->tuning_count
= 1 << (host
->tuning_count
- 1);
3220 /* Re-tuning mode supported by the Host Controller */
3221 host
->tuning_mode
= (caps
[1] & SDHCI_RETUNING_MODE_MASK
) >>
3222 SDHCI_RETUNING_MODE_SHIFT
;
3227 * According to SD Host Controller spec v3.00, if the Host System
3228 * can afford more than 150mA, Host Driver should set XPC to 1. Also
3229 * the value is meaningful only if Voltage Support in the Capabilities
3230 * register is set. The actual current value is 4 times the register
3233 max_current_caps
= sdhci_readl(host
, SDHCI_MAX_CURRENT
);
3234 if (!max_current_caps
&& !IS_ERR(mmc
->supply
.vmmc
)) {
3235 int curr
= regulator_get_current_limit(mmc
->supply
.vmmc
);
3238 /* convert to SDHCI_MAX_CURRENT format */
3239 curr
= curr
/1000; /* convert to mA */
3240 curr
= curr
/SDHCI_MAX_CURRENT_MULTIPLIER
;
3242 curr
= min_t(u32
, curr
, SDHCI_MAX_CURRENT_LIMIT
);
3244 (curr
<< SDHCI_MAX_CURRENT_330_SHIFT
) |
3245 (curr
<< SDHCI_MAX_CURRENT_300_SHIFT
) |
3246 (curr
<< SDHCI_MAX_CURRENT_180_SHIFT
);
3250 if (caps
[0] & SDHCI_CAN_VDD_330
) {
3251 ocr_avail
|= MMC_VDD_32_33
| MMC_VDD_33_34
;
3253 mmc
->max_current_330
= ((max_current_caps
&
3254 SDHCI_MAX_CURRENT_330_MASK
) >>
3255 SDHCI_MAX_CURRENT_330_SHIFT
) *
3256 SDHCI_MAX_CURRENT_MULTIPLIER
;
3258 if (caps
[0] & SDHCI_CAN_VDD_300
) {
3259 ocr_avail
|= MMC_VDD_29_30
| MMC_VDD_30_31
;
3261 mmc
->max_current_300
= ((max_current_caps
&
3262 SDHCI_MAX_CURRENT_300_MASK
) >>
3263 SDHCI_MAX_CURRENT_300_SHIFT
) *
3264 SDHCI_MAX_CURRENT_MULTIPLIER
;
3266 if (caps
[0] & SDHCI_CAN_VDD_180
) {
3267 ocr_avail
|= MMC_VDD_165_195
;
3269 mmc
->max_current_180
= ((max_current_caps
&
3270 SDHCI_MAX_CURRENT_180_MASK
) >>
3271 SDHCI_MAX_CURRENT_180_SHIFT
) *
3272 SDHCI_MAX_CURRENT_MULTIPLIER
;
3275 /* If OCR set by host, use it instead. */
3277 ocr_avail
= host
->ocr_mask
;
3279 /* If OCR set by external regulators, give it highest prio. */
3281 ocr_avail
= mmc
->ocr_avail
;
3283 mmc
->ocr_avail
= ocr_avail
;
3284 mmc
->ocr_avail_sdio
= ocr_avail
;
3285 if (host
->ocr_avail_sdio
)
3286 mmc
->ocr_avail_sdio
&= host
->ocr_avail_sdio
;
3287 mmc
->ocr_avail_sd
= ocr_avail
;
3288 if (host
->ocr_avail_sd
)
3289 mmc
->ocr_avail_sd
&= host
->ocr_avail_sd
;
3290 else /* normal SD controllers don't support 1.8V */
3291 mmc
->ocr_avail_sd
&= ~MMC_VDD_165_195
;
3292 mmc
->ocr_avail_mmc
= ocr_avail
;
3293 if (host
->ocr_avail_mmc
)
3294 mmc
->ocr_avail_mmc
&= host
->ocr_avail_mmc
;
3296 if (mmc
->ocr_avail
== 0) {
3297 pr_err("%s: Hardware doesn't report any "
3298 "support voltages.\n", mmc_hostname(mmc
));
3302 spin_lock_init(&host
->lock
);
3305 * Maximum number of segments. Depends on if the hardware
3306 * can do scatter/gather or not.
3308 if (host
->flags
& SDHCI_USE_ADMA
)
3309 mmc
->max_segs
= SDHCI_MAX_SEGS
;
3310 else if (host
->flags
& SDHCI_USE_SDMA
)
3313 mmc
->max_segs
= SDHCI_MAX_SEGS
;
3316 * Maximum number of sectors in one transfer. Limited by SDMA boundary
3317 * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
3320 mmc
->max_req_size
= 524288;
3323 * Maximum segment size. Could be one segment with the maximum number
3324 * of bytes. When doing hardware scatter/gather, each entry cannot
3325 * be larger than 64 KiB though.
3327 if (host
->flags
& SDHCI_USE_ADMA
) {
3328 if (host
->quirks
& SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC
)
3329 mmc
->max_seg_size
= 65535;
3331 mmc
->max_seg_size
= 65536;
3333 mmc
->max_seg_size
= mmc
->max_req_size
;
3337 * Maximum block size. This varies from controller to controller and
3338 * is specified in the capabilities register.
3340 if (host
->quirks
& SDHCI_QUIRK_FORCE_BLK_SZ_2048
) {
3341 mmc
->max_blk_size
= 2;
3343 mmc
->max_blk_size
= (caps
[0] & SDHCI_MAX_BLOCK_MASK
) >>
3344 SDHCI_MAX_BLOCK_SHIFT
;
3345 if (mmc
->max_blk_size
>= 3) {
3346 pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n",
3348 mmc
->max_blk_size
= 0;
3352 mmc
->max_blk_size
= 512 << mmc
->max_blk_size
;
3355 * Maximum block count.
3357 mmc
->max_blk_count
= (host
->quirks
& SDHCI_QUIRK_NO_MULTIBLOCK
) ? 1 : 65535;
3362 tasklet_init(&host
->finish_tasklet
,
3363 sdhci_tasklet_finish
, (unsigned long)host
);
3365 setup_timer(&host
->timer
, sdhci_timeout_timer
, (unsigned long)host
);
3367 init_waitqueue_head(&host
->buf_ready_int
);
3369 sdhci_init(host
, 0);
3371 ret
= request_threaded_irq(host
->irq
, sdhci_irq
, sdhci_thread_irq
,
3372 IRQF_SHARED
, mmc_hostname(mmc
), host
);
3374 pr_err("%s: Failed to request IRQ %d: %d\n",
3375 mmc_hostname(mmc
), host
->irq
, ret
);
3379 #ifdef CONFIG_MMC_DEBUG
3380 sdhci_dumpregs(host
);
3383 #ifdef SDHCI_USE_LEDS_CLASS
3384 snprintf(host
->led_name
, sizeof(host
->led_name
),
3385 "%s::", mmc_hostname(mmc
));
3386 host
->led
.name
= host
->led_name
;
3387 host
->led
.brightness
= LED_OFF
;
3388 host
->led
.default_trigger
= mmc_hostname(mmc
);
3389 host
->led
.brightness_set
= sdhci_led_control
;
3391 ret
= led_classdev_register(mmc_dev(mmc
), &host
->led
);
3393 pr_err("%s: Failed to register LED device: %d\n",
3394 mmc_hostname(mmc
), ret
);
3403 pr_info("%s: SDHCI controller on %s [%s] using %s\n",
3404 mmc_hostname(mmc
), host
->hw_name
, dev_name(mmc_dev(mmc
)),
3405 (host
->flags
& SDHCI_USE_ADMA
) ?
3406 (host
->flags
& SDHCI_USE_64_BIT_DMA
) ? "ADMA 64-bit" : "ADMA" :
3407 (host
->flags
& SDHCI_USE_SDMA
) ? "DMA" : "PIO");
3409 sdhci_enable_card_detection(host
);
3413 #ifdef SDHCI_USE_LEDS_CLASS
3415 sdhci_do_reset(host
, SDHCI_RESET_ALL
);
3416 sdhci_writel(host
, 0, SDHCI_INT_ENABLE
);
3417 sdhci_writel(host
, 0, SDHCI_SIGNAL_ENABLE
);
3418 free_irq(host
->irq
, host
);
3421 tasklet_kill(&host
->finish_tasklet
);
3426 EXPORT_SYMBOL_GPL(sdhci_add_host
);
3428 void sdhci_remove_host(struct sdhci_host
*host
, int dead
)
3430 struct mmc_host
*mmc
= host
->mmc
;
3431 unsigned long flags
;
3434 spin_lock_irqsave(&host
->lock
, flags
);
3436 host
->flags
|= SDHCI_DEVICE_DEAD
;
3439 pr_err("%s: Controller removed during "
3440 " transfer!\n", mmc_hostname(mmc
));
3442 host
->mrq
->cmd
->error
= -ENOMEDIUM
;
3443 tasklet_schedule(&host
->finish_tasklet
);
3446 spin_unlock_irqrestore(&host
->lock
, flags
);
3449 sdhci_disable_card_detection(host
);
3451 mmc_remove_host(mmc
);
3453 #ifdef SDHCI_USE_LEDS_CLASS
3454 led_classdev_unregister(&host
->led
);
3458 sdhci_do_reset(host
, SDHCI_RESET_ALL
);
3460 sdhci_writel(host
, 0, SDHCI_INT_ENABLE
);
3461 sdhci_writel(host
, 0, SDHCI_SIGNAL_ENABLE
);
3462 free_irq(host
->irq
, host
);
3464 del_timer_sync(&host
->timer
);
3466 tasklet_kill(&host
->finish_tasklet
);
3468 if (!IS_ERR(mmc
->supply
.vqmmc
))
3469 regulator_disable(mmc
->supply
.vqmmc
);
3471 if (host
->adma_table
)
3472 dma_free_coherent(mmc_dev(mmc
), host
->adma_table_sz
,
3473 host
->adma_table
, host
->adma_addr
);
3474 kfree(host
->align_buffer
);
3476 host
->adma_table
= NULL
;
3477 host
->align_buffer
= NULL
;
3480 EXPORT_SYMBOL_GPL(sdhci_remove_host
);
3482 void sdhci_free_host(struct sdhci_host
*host
)
3484 mmc_free_host(host
->mmc
);
3487 EXPORT_SYMBOL_GPL(sdhci_free_host
);
3489 /*****************************************************************************\
3491 * Driver init/exit *
3493 \*****************************************************************************/
3495 static int __init
sdhci_drv_init(void)
3498 ": Secure Digital Host Controller Interface driver\n");
3499 pr_info(DRIVER_NAME
": Copyright(c) Pierre Ossman\n");
3504 static void __exit
sdhci_drv_exit(void)
3508 module_init(sdhci_drv_init
);
3509 module_exit(sdhci_drv_exit
);
3511 module_param(debug_quirks
, uint
, 0444);
3512 module_param(debug_quirks2
, uint
, 0444);
3514 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
3515 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
3516 MODULE_LICENSE("GPL");
3518 MODULE_PARM_DESC(debug_quirks
, "Force certain quirks.");
3519 MODULE_PARM_DESC(debug_quirks2
, "Force certain other quirks.");