2 * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
4 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
11 * Thanks to the following companies for their support:
13 * - JMicron (hardware and technical support)
16 #include <linux/delay.h>
17 #include <linux/highmem.h>
19 #include <linux/module.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/slab.h>
22 #include <linux/scatterlist.h>
23 #include <linux/regulator/consumer.h>
24 #include <linux/pm_runtime.h>
26 #include <linux/leds.h>
28 #include <linux/mmc/mmc.h>
29 #include <linux/mmc/host.h>
30 #include <linux/mmc/card.h>
31 #include <linux/mmc/sdio.h>
32 #include <linux/mmc/slot-gpio.h>
36 #define DRIVER_NAME "sdhci"
38 #define DBG(f, x...) \
39 pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x)
41 #if defined(CONFIG_LEDS_CLASS) || (defined(CONFIG_LEDS_CLASS_MODULE) && \
42 defined(CONFIG_MMC_SDHCI_MODULE))
43 #define SDHCI_USE_LEDS_CLASS
46 #define MAX_TUNING_LOOP 40
48 static unsigned int debug_quirks
= 0;
49 static unsigned int debug_quirks2
;
51 static void sdhci_finish_data(struct sdhci_host
*);
53 static void sdhci_finish_command(struct sdhci_host
*);
54 static int sdhci_execute_tuning(struct mmc_host
*mmc
, u32 opcode
);
55 static void sdhci_enable_preset_value(struct sdhci_host
*host
, bool enable
);
56 static int sdhci_do_get_cd(struct sdhci_host
*host
);
59 static int sdhci_runtime_pm_get(struct sdhci_host
*host
);
60 static int sdhci_runtime_pm_put(struct sdhci_host
*host
);
61 static void sdhci_runtime_pm_bus_on(struct sdhci_host
*host
);
62 static void sdhci_runtime_pm_bus_off(struct sdhci_host
*host
);
64 static inline int sdhci_runtime_pm_get(struct sdhci_host
*host
)
68 static inline int sdhci_runtime_pm_put(struct sdhci_host
*host
)
72 static void sdhci_runtime_pm_bus_on(struct sdhci_host
*host
)
75 static void sdhci_runtime_pm_bus_off(struct sdhci_host
*host
)
80 static void sdhci_dumpregs(struct sdhci_host
*host
)
82 pr_debug(DRIVER_NAME
": =========== REGISTER DUMP (%s)===========\n",
83 mmc_hostname(host
->mmc
));
85 pr_debug(DRIVER_NAME
": Sys addr: 0x%08x | Version: 0x%08x\n",
86 sdhci_readl(host
, SDHCI_DMA_ADDRESS
),
87 sdhci_readw(host
, SDHCI_HOST_VERSION
));
88 pr_debug(DRIVER_NAME
": Blk size: 0x%08x | Blk cnt: 0x%08x\n",
89 sdhci_readw(host
, SDHCI_BLOCK_SIZE
),
90 sdhci_readw(host
, SDHCI_BLOCK_COUNT
));
91 pr_debug(DRIVER_NAME
": Argument: 0x%08x | Trn mode: 0x%08x\n",
92 sdhci_readl(host
, SDHCI_ARGUMENT
),
93 sdhci_readw(host
, SDHCI_TRANSFER_MODE
));
94 pr_debug(DRIVER_NAME
": Present: 0x%08x | Host ctl: 0x%08x\n",
95 sdhci_readl(host
, SDHCI_PRESENT_STATE
),
96 sdhci_readb(host
, SDHCI_HOST_CONTROL
));
97 pr_debug(DRIVER_NAME
": Power: 0x%08x | Blk gap: 0x%08x\n",
98 sdhci_readb(host
, SDHCI_POWER_CONTROL
),
99 sdhci_readb(host
, SDHCI_BLOCK_GAP_CONTROL
));
100 pr_debug(DRIVER_NAME
": Wake-up: 0x%08x | Clock: 0x%08x\n",
101 sdhci_readb(host
, SDHCI_WAKE_UP_CONTROL
),
102 sdhci_readw(host
, SDHCI_CLOCK_CONTROL
));
103 pr_debug(DRIVER_NAME
": Timeout: 0x%08x | Int stat: 0x%08x\n",
104 sdhci_readb(host
, SDHCI_TIMEOUT_CONTROL
),
105 sdhci_readl(host
, SDHCI_INT_STATUS
));
106 pr_debug(DRIVER_NAME
": Int enab: 0x%08x | Sig enab: 0x%08x\n",
107 sdhci_readl(host
, SDHCI_INT_ENABLE
),
108 sdhci_readl(host
, SDHCI_SIGNAL_ENABLE
));
109 pr_debug(DRIVER_NAME
": AC12 err: 0x%08x | Slot int: 0x%08x\n",
110 sdhci_readw(host
, SDHCI_ACMD12_ERR
),
111 sdhci_readw(host
, SDHCI_SLOT_INT_STATUS
));
112 pr_debug(DRIVER_NAME
": Caps: 0x%08x | Caps_1: 0x%08x\n",
113 sdhci_readl(host
, SDHCI_CAPABILITIES
),
114 sdhci_readl(host
, SDHCI_CAPABILITIES_1
));
115 pr_debug(DRIVER_NAME
": Cmd: 0x%08x | Max curr: 0x%08x\n",
116 sdhci_readw(host
, SDHCI_COMMAND
),
117 sdhci_readl(host
, SDHCI_MAX_CURRENT
));
118 pr_debug(DRIVER_NAME
": Host ctl2: 0x%08x\n",
119 sdhci_readw(host
, SDHCI_HOST_CONTROL2
));
121 if (host
->flags
& SDHCI_USE_ADMA
) {
122 if (host
->flags
& SDHCI_USE_64_BIT_DMA
)
123 pr_debug(DRIVER_NAME
": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n",
124 readl(host
->ioaddr
+ SDHCI_ADMA_ERROR
),
125 readl(host
->ioaddr
+ SDHCI_ADMA_ADDRESS_HI
),
126 readl(host
->ioaddr
+ SDHCI_ADMA_ADDRESS
));
128 pr_debug(DRIVER_NAME
": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
129 readl(host
->ioaddr
+ SDHCI_ADMA_ERROR
),
130 readl(host
->ioaddr
+ SDHCI_ADMA_ADDRESS
));
133 pr_debug(DRIVER_NAME
": ===========================================\n");
136 /*****************************************************************************\
138 * Low level functions *
140 \*****************************************************************************/
142 static void sdhci_set_card_detection(struct sdhci_host
*host
, bool enable
)
146 if ((host
->quirks
& SDHCI_QUIRK_BROKEN_CARD_DETECTION
) ||
147 (host
->mmc
->caps
& MMC_CAP_NONREMOVABLE
))
151 present
= sdhci_readl(host
, SDHCI_PRESENT_STATE
) &
154 host
->ier
|= present
? SDHCI_INT_CARD_REMOVE
:
155 SDHCI_INT_CARD_INSERT
;
157 host
->ier
&= ~(SDHCI_INT_CARD_REMOVE
| SDHCI_INT_CARD_INSERT
);
160 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
161 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
164 static void sdhci_enable_card_detection(struct sdhci_host
*host
)
166 sdhci_set_card_detection(host
, true);
169 static void sdhci_disable_card_detection(struct sdhci_host
*host
)
171 sdhci_set_card_detection(host
, false);
174 void sdhci_reset(struct sdhci_host
*host
, u8 mask
)
176 unsigned long timeout
;
178 sdhci_writeb(host
, mask
, SDHCI_SOFTWARE_RESET
);
180 if (mask
& SDHCI_RESET_ALL
) {
182 /* Reset-all turns off SD Bus Power */
183 if (host
->quirks2
& SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON
)
184 sdhci_runtime_pm_bus_off(host
);
187 /* Wait max 100 ms */
190 /* hw clears the bit when it's done */
191 while (sdhci_readb(host
, SDHCI_SOFTWARE_RESET
) & mask
) {
193 pr_err("%s: Reset 0x%x never completed.\n",
194 mmc_hostname(host
->mmc
), (int)mask
);
195 sdhci_dumpregs(host
);
202 EXPORT_SYMBOL_GPL(sdhci_reset
);
204 static void sdhci_do_reset(struct sdhci_host
*host
, u8 mask
)
206 if (host
->quirks
& SDHCI_QUIRK_NO_CARD_NO_RESET
) {
207 if (!sdhci_do_get_cd(host
))
211 host
->ops
->reset(host
, mask
);
213 if (mask
& SDHCI_RESET_ALL
) {
214 if (host
->flags
& (SDHCI_USE_SDMA
| SDHCI_USE_ADMA
)) {
215 if (host
->ops
->enable_dma
)
216 host
->ops
->enable_dma(host
);
219 /* Resetting the controller clears many */
220 host
->preset_enabled
= false;
224 static void sdhci_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
);
226 static void sdhci_init(struct sdhci_host
*host
, int soft
)
229 sdhci_do_reset(host
, SDHCI_RESET_CMD
|SDHCI_RESET_DATA
);
231 sdhci_do_reset(host
, SDHCI_RESET_ALL
);
233 host
->ier
= SDHCI_INT_BUS_POWER
| SDHCI_INT_DATA_END_BIT
|
234 SDHCI_INT_DATA_CRC
| SDHCI_INT_DATA_TIMEOUT
|
235 SDHCI_INT_INDEX
| SDHCI_INT_END_BIT
| SDHCI_INT_CRC
|
236 SDHCI_INT_TIMEOUT
| SDHCI_INT_DATA_END
|
239 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
240 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
243 /* force clock reconfiguration */
245 sdhci_set_ios(host
->mmc
, &host
->mmc
->ios
);
249 static void sdhci_reinit(struct sdhci_host
*host
)
252 sdhci_enable_card_detection(host
);
255 static void sdhci_activate_led(struct sdhci_host
*host
)
259 ctrl
= sdhci_readb(host
, SDHCI_HOST_CONTROL
);
260 ctrl
|= SDHCI_CTRL_LED
;
261 sdhci_writeb(host
, ctrl
, SDHCI_HOST_CONTROL
);
264 static void sdhci_deactivate_led(struct sdhci_host
*host
)
268 ctrl
= sdhci_readb(host
, SDHCI_HOST_CONTROL
);
269 ctrl
&= ~SDHCI_CTRL_LED
;
270 sdhci_writeb(host
, ctrl
, SDHCI_HOST_CONTROL
);
273 #ifdef SDHCI_USE_LEDS_CLASS
274 static void sdhci_led_control(struct led_classdev
*led
,
275 enum led_brightness brightness
)
277 struct sdhci_host
*host
= container_of(led
, struct sdhci_host
, led
);
280 spin_lock_irqsave(&host
->lock
, flags
);
282 if (host
->runtime_suspended
)
285 if (brightness
== LED_OFF
)
286 sdhci_deactivate_led(host
);
288 sdhci_activate_led(host
);
290 spin_unlock_irqrestore(&host
->lock
, flags
);
294 /*****************************************************************************\
298 \*****************************************************************************/
300 static void sdhci_read_block_pio(struct sdhci_host
*host
)
303 size_t blksize
, len
, chunk
;
304 u32
uninitialized_var(scratch
);
307 DBG("PIO reading\n");
309 blksize
= host
->data
->blksz
;
312 local_irq_save(flags
);
315 BUG_ON(!sg_miter_next(&host
->sg_miter
));
317 len
= min(host
->sg_miter
.length
, blksize
);
320 host
->sg_miter
.consumed
= len
;
322 buf
= host
->sg_miter
.addr
;
326 scratch
= sdhci_readl(host
, SDHCI_BUFFER
);
330 *buf
= scratch
& 0xFF;
339 sg_miter_stop(&host
->sg_miter
);
341 local_irq_restore(flags
);
344 static void sdhci_write_block_pio(struct sdhci_host
*host
)
347 size_t blksize
, len
, chunk
;
351 DBG("PIO writing\n");
353 blksize
= host
->data
->blksz
;
357 local_irq_save(flags
);
360 BUG_ON(!sg_miter_next(&host
->sg_miter
));
362 len
= min(host
->sg_miter
.length
, blksize
);
365 host
->sg_miter
.consumed
= len
;
367 buf
= host
->sg_miter
.addr
;
370 scratch
|= (u32
)*buf
<< (chunk
* 8);
376 if ((chunk
== 4) || ((len
== 0) && (blksize
== 0))) {
377 sdhci_writel(host
, scratch
, SDHCI_BUFFER
);
384 sg_miter_stop(&host
->sg_miter
);
386 local_irq_restore(flags
);
389 static void sdhci_transfer_pio(struct sdhci_host
*host
)
395 if (host
->blocks
== 0)
398 if (host
->data
->flags
& MMC_DATA_READ
)
399 mask
= SDHCI_DATA_AVAILABLE
;
401 mask
= SDHCI_SPACE_AVAILABLE
;
404 * Some controllers (JMicron JMB38x) mess up the buffer bits
405 * for transfers < 4 bytes. As long as it is just one block,
406 * we can ignore the bits.
408 if ((host
->quirks
& SDHCI_QUIRK_BROKEN_SMALL_PIO
) &&
409 (host
->data
->blocks
== 1))
412 while (sdhci_readl(host
, SDHCI_PRESENT_STATE
) & mask
) {
413 if (host
->quirks
& SDHCI_QUIRK_PIO_NEEDS_DELAY
)
416 if (host
->data
->flags
& MMC_DATA_READ
)
417 sdhci_read_block_pio(host
);
419 sdhci_write_block_pio(host
);
422 if (host
->blocks
== 0)
426 DBG("PIO transfer complete.\n");
429 static int sdhci_pre_dma_transfer(struct sdhci_host
*host
,
430 struct mmc_data
*data
, int cookie
)
435 * If the data buffers are already mapped, return the previous
436 * dma_map_sg() result.
438 if (data
->host_cookie
== COOKIE_PRE_MAPPED
)
439 return data
->sg_count
;
441 sg_count
= dma_map_sg(mmc_dev(host
->mmc
), data
->sg
, data
->sg_len
,
442 data
->flags
& MMC_DATA_WRITE
?
443 DMA_TO_DEVICE
: DMA_FROM_DEVICE
);
448 data
->sg_count
= sg_count
;
449 data
->host_cookie
= cookie
;
454 static char *sdhci_kmap_atomic(struct scatterlist
*sg
, unsigned long *flags
)
456 local_irq_save(*flags
);
457 return kmap_atomic(sg_page(sg
)) + sg
->offset
;
460 static void sdhci_kunmap_atomic(void *buffer
, unsigned long *flags
)
462 kunmap_atomic(buffer
);
463 local_irq_restore(*flags
);
466 static void sdhci_adma_write_desc(struct sdhci_host
*host
, void *desc
,
467 dma_addr_t addr
, int len
, unsigned cmd
)
469 struct sdhci_adma2_64_desc
*dma_desc
= desc
;
471 /* 32-bit and 64-bit descriptors have these members in same position */
472 dma_desc
->cmd
= cpu_to_le16(cmd
);
473 dma_desc
->len
= cpu_to_le16(len
);
474 dma_desc
->addr_lo
= cpu_to_le32((u32
)addr
);
476 if (host
->flags
& SDHCI_USE_64_BIT_DMA
)
477 dma_desc
->addr_hi
= cpu_to_le32((u64
)addr
>> 32);
480 static void sdhci_adma_mark_end(void *desc
)
482 struct sdhci_adma2_64_desc
*dma_desc
= desc
;
484 /* 32-bit and 64-bit descriptors have 'cmd' in same position */
485 dma_desc
->cmd
|= cpu_to_le16(ADMA2_END
);
488 static void sdhci_adma_table_pre(struct sdhci_host
*host
,
489 struct mmc_data
*data
, int sg_count
)
491 struct scatterlist
*sg
;
493 dma_addr_t addr
, align_addr
;
499 * The spec does not specify endianness of descriptor table.
500 * We currently guess that it is LE.
503 host
->sg_count
= sg_count
;
505 desc
= host
->adma_table
;
506 align
= host
->align_buffer
;
508 align_addr
= host
->align_addr
;
510 for_each_sg(data
->sg
, sg
, host
->sg_count
, i
) {
511 addr
= sg_dma_address(sg
);
512 len
= sg_dma_len(sg
);
515 * The SDHCI specification states that ADMA addresses must
516 * be 32-bit aligned. If they aren't, then we use a bounce
517 * buffer for the (up to three) bytes that screw up the
520 offset
= (SDHCI_ADMA2_ALIGN
- (addr
& SDHCI_ADMA2_MASK
)) &
523 if (data
->flags
& MMC_DATA_WRITE
) {
524 buffer
= sdhci_kmap_atomic(sg
, &flags
);
525 memcpy(align
, buffer
, offset
);
526 sdhci_kunmap_atomic(buffer
, &flags
);
530 sdhci_adma_write_desc(host
, desc
, align_addr
, offset
,
533 BUG_ON(offset
> 65536);
535 align
+= SDHCI_ADMA2_ALIGN
;
536 align_addr
+= SDHCI_ADMA2_ALIGN
;
538 desc
+= host
->desc_sz
;
548 sdhci_adma_write_desc(host
, desc
, addr
, len
,
550 desc
+= host
->desc_sz
;
554 * If this triggers then we have a calculation bug
557 WARN_ON((desc
- host
->adma_table
) >= host
->adma_table_sz
);
560 if (host
->quirks
& SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC
) {
561 /* Mark the last descriptor as the terminating descriptor */
562 if (desc
!= host
->adma_table
) {
563 desc
-= host
->desc_sz
;
564 sdhci_adma_mark_end(desc
);
567 /* Add a terminating entry - nop, end, valid */
568 sdhci_adma_write_desc(host
, desc
, 0, 0, ADMA2_NOP_END_VALID
);
572 static void sdhci_adma_table_post(struct sdhci_host
*host
,
573 struct mmc_data
*data
)
575 struct scatterlist
*sg
;
581 if (data
->flags
& MMC_DATA_READ
) {
582 bool has_unaligned
= false;
584 /* Do a quick scan of the SG list for any unaligned mappings */
585 for_each_sg(data
->sg
, sg
, host
->sg_count
, i
)
586 if (sg_dma_address(sg
) & SDHCI_ADMA2_MASK
) {
587 has_unaligned
= true;
592 dma_sync_sg_for_cpu(mmc_dev(host
->mmc
), data
->sg
,
593 data
->sg_len
, DMA_FROM_DEVICE
);
595 align
= host
->align_buffer
;
597 for_each_sg(data
->sg
, sg
, host
->sg_count
, i
) {
598 if (sg_dma_address(sg
) & SDHCI_ADMA2_MASK
) {
599 size
= SDHCI_ADMA2_ALIGN
-
600 (sg_dma_address(sg
) & SDHCI_ADMA2_MASK
);
602 buffer
= sdhci_kmap_atomic(sg
, &flags
);
603 memcpy(buffer
, align
, size
);
604 sdhci_kunmap_atomic(buffer
, &flags
);
606 align
+= SDHCI_ADMA2_ALIGN
;
613 static u8
sdhci_calc_timeout(struct sdhci_host
*host
, struct mmc_command
*cmd
)
616 struct mmc_data
*data
= cmd
->data
;
617 unsigned target_timeout
, current_timeout
;
620 * If the host controller provides us with an incorrect timeout
621 * value, just skip the check and use 0xE. The hardware may take
622 * longer to time out, but that's much better than having a too-short
625 if (host
->quirks
& SDHCI_QUIRK_BROKEN_TIMEOUT_VAL
)
628 /* Unspecified timeout, assume max */
629 if (!data
&& !cmd
->busy_timeout
)
634 target_timeout
= cmd
->busy_timeout
* 1000;
636 target_timeout
= DIV_ROUND_UP(data
->timeout_ns
, 1000);
637 if (host
->clock
&& data
->timeout_clks
) {
638 unsigned long long val
;
641 * data->timeout_clks is in units of clock cycles.
642 * host->clock is in Hz. target_timeout is in us.
643 * Hence, us = 1000000 * cycles / Hz. Round up.
645 val
= 1000000 * data
->timeout_clks
;
646 if (do_div(val
, host
->clock
))
648 target_timeout
+= val
;
653 * Figure out needed cycles.
654 * We do this in steps in order to fit inside a 32 bit int.
655 * The first step is the minimum timeout, which will have a
656 * minimum resolution of 6 bits:
657 * (1) 2^13*1000 > 2^22,
658 * (2) host->timeout_clk < 2^16
663 current_timeout
= (1 << 13) * 1000 / host
->timeout_clk
;
664 while (current_timeout
< target_timeout
) {
666 current_timeout
<<= 1;
672 DBG("%s: Too large timeout 0x%x requested for CMD%d!\n",
673 mmc_hostname(host
->mmc
), count
, cmd
->opcode
);
680 static void sdhci_set_transfer_irqs(struct sdhci_host
*host
)
682 u32 pio_irqs
= SDHCI_INT_DATA_AVAIL
| SDHCI_INT_SPACE_AVAIL
;
683 u32 dma_irqs
= SDHCI_INT_DMA_END
| SDHCI_INT_ADMA_ERROR
;
685 if (host
->flags
& SDHCI_REQ_USE_DMA
)
686 host
->ier
= (host
->ier
& ~pio_irqs
) | dma_irqs
;
688 host
->ier
= (host
->ier
& ~dma_irqs
) | pio_irqs
;
690 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
691 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
694 static void sdhci_set_timeout(struct sdhci_host
*host
, struct mmc_command
*cmd
)
698 if (host
->ops
->set_timeout
) {
699 host
->ops
->set_timeout(host
, cmd
);
701 count
= sdhci_calc_timeout(host
, cmd
);
702 sdhci_writeb(host
, count
, SDHCI_TIMEOUT_CONTROL
);
706 static void sdhci_prepare_data(struct sdhci_host
*host
, struct mmc_command
*cmd
)
709 struct mmc_data
*data
= cmd
->data
;
713 if (data
|| (cmd
->flags
& MMC_RSP_BUSY
))
714 sdhci_set_timeout(host
, cmd
);
720 BUG_ON(data
->blksz
* data
->blocks
> 524288);
721 BUG_ON(data
->blksz
> host
->mmc
->max_blk_size
);
722 BUG_ON(data
->blocks
> 65535);
725 host
->data_early
= 0;
726 host
->data
->bytes_xfered
= 0;
728 if (host
->flags
& (SDHCI_USE_SDMA
| SDHCI_USE_ADMA
)) {
729 struct scatterlist
*sg
;
730 unsigned int length_mask
, offset_mask
;
733 host
->flags
|= SDHCI_REQ_USE_DMA
;
736 * FIXME: This doesn't account for merging when mapping the
739 * The assumption here being that alignment and lengths are
740 * the same after DMA mapping to device address space.
744 if (host
->flags
& SDHCI_USE_ADMA
) {
745 if (host
->quirks
& SDHCI_QUIRK_32BIT_ADMA_SIZE
) {
748 * As we use up to 3 byte chunks to work
749 * around alignment problems, we need to
750 * check the offset as well.
755 if (host
->quirks
& SDHCI_QUIRK_32BIT_DMA_SIZE
)
757 if (host
->quirks
& SDHCI_QUIRK_32BIT_DMA_ADDR
)
761 if (unlikely(length_mask
| offset_mask
)) {
762 for_each_sg(data
->sg
, sg
, data
->sg_len
, i
) {
763 if (sg
->length
& length_mask
) {
764 DBG("Reverting to PIO because of transfer size (%d)\n",
766 host
->flags
&= ~SDHCI_REQ_USE_DMA
;
769 if (sg
->offset
& offset_mask
) {
770 DBG("Reverting to PIO because of bad alignment\n");
771 host
->flags
&= ~SDHCI_REQ_USE_DMA
;
778 if (host
->flags
& SDHCI_REQ_USE_DMA
) {
779 int sg_cnt
= sdhci_pre_dma_transfer(host
, data
, COOKIE_MAPPED
);
783 * This only happens when someone fed
784 * us an invalid request.
787 host
->flags
&= ~SDHCI_REQ_USE_DMA
;
788 } else if (host
->flags
& SDHCI_USE_ADMA
) {
789 sdhci_adma_table_pre(host
, data
, sg_cnt
);
791 sdhci_writel(host
, host
->adma_addr
, SDHCI_ADMA_ADDRESS
);
792 if (host
->flags
& SDHCI_USE_64_BIT_DMA
)
794 (u64
)host
->adma_addr
>> 32,
795 SDHCI_ADMA_ADDRESS_HI
);
797 WARN_ON(sg_cnt
!= 1);
798 sdhci_writel(host
, sg_dma_address(data
->sg
),
804 * Always adjust the DMA selection as some controllers
805 * (e.g. JMicron) can't do PIO properly when the selection
808 if (host
->version
>= SDHCI_SPEC_200
) {
809 ctrl
= sdhci_readb(host
, SDHCI_HOST_CONTROL
);
810 ctrl
&= ~SDHCI_CTRL_DMA_MASK
;
811 if ((host
->flags
& SDHCI_REQ_USE_DMA
) &&
812 (host
->flags
& SDHCI_USE_ADMA
)) {
813 if (host
->flags
& SDHCI_USE_64_BIT_DMA
)
814 ctrl
|= SDHCI_CTRL_ADMA64
;
816 ctrl
|= SDHCI_CTRL_ADMA32
;
818 ctrl
|= SDHCI_CTRL_SDMA
;
820 sdhci_writeb(host
, ctrl
, SDHCI_HOST_CONTROL
);
823 if (!(host
->flags
& SDHCI_REQ_USE_DMA
)) {
826 flags
= SG_MITER_ATOMIC
;
827 if (host
->data
->flags
& MMC_DATA_READ
)
828 flags
|= SG_MITER_TO_SG
;
830 flags
|= SG_MITER_FROM_SG
;
831 sg_miter_start(&host
->sg_miter
, data
->sg
, data
->sg_len
, flags
);
832 host
->blocks
= data
->blocks
;
835 sdhci_set_transfer_irqs(host
);
837 /* Set the DMA boundary value and block size */
838 sdhci_writew(host
, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG
,
839 data
->blksz
), SDHCI_BLOCK_SIZE
);
840 sdhci_writew(host
, data
->blocks
, SDHCI_BLOCK_COUNT
);
843 static void sdhci_set_transfer_mode(struct sdhci_host
*host
,
844 struct mmc_command
*cmd
)
847 struct mmc_data
*data
= cmd
->data
;
851 SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD
) {
852 sdhci_writew(host
, 0x0, SDHCI_TRANSFER_MODE
);
854 /* clear Auto CMD settings for no data CMDs */
855 mode
= sdhci_readw(host
, SDHCI_TRANSFER_MODE
);
856 sdhci_writew(host
, mode
& ~(SDHCI_TRNS_AUTO_CMD12
|
857 SDHCI_TRNS_AUTO_CMD23
), SDHCI_TRANSFER_MODE
);
862 WARN_ON(!host
->data
);
864 if (!(host
->quirks2
& SDHCI_QUIRK2_SUPPORT_SINGLE
))
865 mode
= SDHCI_TRNS_BLK_CNT_EN
;
867 if (mmc_op_multi(cmd
->opcode
) || data
->blocks
> 1) {
868 mode
= SDHCI_TRNS_BLK_CNT_EN
| SDHCI_TRNS_MULTI
;
870 * If we are sending CMD23, CMD12 never gets sent
871 * on successful completion (so no Auto-CMD12).
873 if (!host
->mrq
->sbc
&& (host
->flags
& SDHCI_AUTO_CMD12
) &&
874 (cmd
->opcode
!= SD_IO_RW_EXTENDED
))
875 mode
|= SDHCI_TRNS_AUTO_CMD12
;
876 else if (host
->mrq
->sbc
&& (host
->flags
& SDHCI_AUTO_CMD23
)) {
877 mode
|= SDHCI_TRNS_AUTO_CMD23
;
878 sdhci_writel(host
, host
->mrq
->sbc
->arg
, SDHCI_ARGUMENT2
);
882 if (data
->flags
& MMC_DATA_READ
)
883 mode
|= SDHCI_TRNS_READ
;
884 if (host
->flags
& SDHCI_REQ_USE_DMA
)
885 mode
|= SDHCI_TRNS_DMA
;
887 sdhci_writew(host
, mode
, SDHCI_TRANSFER_MODE
);
890 static void sdhci_finish_data(struct sdhci_host
*host
)
892 struct mmc_data
*data
;
899 if ((host
->flags
& (SDHCI_REQ_USE_DMA
| SDHCI_USE_ADMA
)) ==
900 (SDHCI_REQ_USE_DMA
| SDHCI_USE_ADMA
))
901 sdhci_adma_table_post(host
, data
);
904 * The specification states that the block count register must
905 * be updated, but it does not specify at what point in the
906 * data flow. That makes the register entirely useless to read
907 * back so we have to assume that nothing made it to the card
908 * in the event of an error.
911 data
->bytes_xfered
= 0;
913 data
->bytes_xfered
= data
->blksz
* data
->blocks
;
916 * Need to send CMD12 if -
917 * a) open-ended multiblock transfer (no CMD23)
918 * b) error in multiblock transfer
925 * The controller needs a reset of internal state machines
926 * upon error conditions.
929 sdhci_do_reset(host
, SDHCI_RESET_CMD
);
930 sdhci_do_reset(host
, SDHCI_RESET_DATA
);
933 sdhci_send_command(host
, data
->stop
);
935 tasklet_schedule(&host
->finish_tasklet
);
938 void sdhci_send_command(struct sdhci_host
*host
, struct mmc_command
*cmd
)
942 unsigned long timeout
;
946 /* Initially, a command has no error */
952 mask
= SDHCI_CMD_INHIBIT
;
953 if ((cmd
->data
!= NULL
) || (cmd
->flags
& MMC_RSP_BUSY
))
954 mask
|= SDHCI_DATA_INHIBIT
;
956 /* We shouldn't wait for data inihibit for stop commands, even
957 though they might use busy signaling */
958 if (host
->mrq
->data
&& (cmd
== host
->mrq
->data
->stop
))
959 mask
&= ~SDHCI_DATA_INHIBIT
;
961 while (sdhci_readl(host
, SDHCI_PRESENT_STATE
) & mask
) {
963 pr_err("%s: Controller never released inhibit bit(s).\n",
964 mmc_hostname(host
->mmc
));
965 sdhci_dumpregs(host
);
967 tasklet_schedule(&host
->finish_tasklet
);
975 if (!cmd
->data
&& cmd
->busy_timeout
> 9000)
976 timeout
+= DIV_ROUND_UP(cmd
->busy_timeout
, 1000) * HZ
+ HZ
;
979 mod_timer(&host
->timer
, timeout
);
982 host
->busy_handle
= 0;
984 sdhci_prepare_data(host
, cmd
);
986 sdhci_writel(host
, cmd
->arg
, SDHCI_ARGUMENT
);
988 sdhci_set_transfer_mode(host
, cmd
);
990 if ((cmd
->flags
& MMC_RSP_136
) && (cmd
->flags
& MMC_RSP_BUSY
)) {
991 pr_err("%s: Unsupported response type!\n",
992 mmc_hostname(host
->mmc
));
993 cmd
->error
= -EINVAL
;
994 tasklet_schedule(&host
->finish_tasklet
);
998 if (!(cmd
->flags
& MMC_RSP_PRESENT
))
999 flags
= SDHCI_CMD_RESP_NONE
;
1000 else if (cmd
->flags
& MMC_RSP_136
)
1001 flags
= SDHCI_CMD_RESP_LONG
;
1002 else if (cmd
->flags
& MMC_RSP_BUSY
)
1003 flags
= SDHCI_CMD_RESP_SHORT_BUSY
;
1005 flags
= SDHCI_CMD_RESP_SHORT
;
1007 if (cmd
->flags
& MMC_RSP_CRC
)
1008 flags
|= SDHCI_CMD_CRC
;
1009 if (cmd
->flags
& MMC_RSP_OPCODE
)
1010 flags
|= SDHCI_CMD_INDEX
;
1012 /* CMD19 is special in that the Data Present Select should be set */
1013 if (cmd
->data
|| cmd
->opcode
== MMC_SEND_TUNING_BLOCK
||
1014 cmd
->opcode
== MMC_SEND_TUNING_BLOCK_HS200
)
1015 flags
|= SDHCI_CMD_DATA
;
1017 sdhci_writew(host
, SDHCI_MAKE_CMD(cmd
->opcode
, flags
), SDHCI_COMMAND
);
1019 EXPORT_SYMBOL_GPL(sdhci_send_command
);
1021 static void sdhci_finish_command(struct sdhci_host
*host
)
1025 BUG_ON(host
->cmd
== NULL
);
1027 if (host
->cmd
->flags
& MMC_RSP_PRESENT
) {
1028 if (host
->cmd
->flags
& MMC_RSP_136
) {
1029 /* CRC is stripped so we need to do some shifting. */
1030 for (i
= 0;i
< 4;i
++) {
1031 host
->cmd
->resp
[i
] = sdhci_readl(host
,
1032 SDHCI_RESPONSE
+ (3-i
)*4) << 8;
1034 host
->cmd
->resp
[i
] |=
1036 SDHCI_RESPONSE
+ (3-i
)*4-1);
1039 host
->cmd
->resp
[0] = sdhci_readl(host
, SDHCI_RESPONSE
);
1043 /* Finished CMD23, now send actual command. */
1044 if (host
->cmd
== host
->mrq
->sbc
) {
1046 sdhci_send_command(host
, host
->mrq
->cmd
);
1049 /* Processed actual command. */
1050 if (host
->data
&& host
->data_early
)
1051 sdhci_finish_data(host
);
1053 if (!host
->cmd
->data
)
1054 tasklet_schedule(&host
->finish_tasklet
);
1060 static u16
sdhci_get_preset_value(struct sdhci_host
*host
)
1064 switch (host
->timing
) {
1065 case MMC_TIMING_UHS_SDR12
:
1066 preset
= sdhci_readw(host
, SDHCI_PRESET_FOR_SDR12
);
1068 case MMC_TIMING_UHS_SDR25
:
1069 preset
= sdhci_readw(host
, SDHCI_PRESET_FOR_SDR25
);
1071 case MMC_TIMING_UHS_SDR50
:
1072 preset
= sdhci_readw(host
, SDHCI_PRESET_FOR_SDR50
);
1074 case MMC_TIMING_UHS_SDR104
:
1075 case MMC_TIMING_MMC_HS200
:
1076 preset
= sdhci_readw(host
, SDHCI_PRESET_FOR_SDR104
);
1078 case MMC_TIMING_UHS_DDR50
:
1079 case MMC_TIMING_MMC_DDR52
:
1080 preset
= sdhci_readw(host
, SDHCI_PRESET_FOR_DDR50
);
1082 case MMC_TIMING_MMC_HS400
:
1083 preset
= sdhci_readw(host
, SDHCI_PRESET_FOR_HS400
);
1086 pr_warn("%s: Invalid UHS-I mode selected\n",
1087 mmc_hostname(host
->mmc
));
1088 preset
= sdhci_readw(host
, SDHCI_PRESET_FOR_SDR12
);
1094 void sdhci_set_clock(struct sdhci_host
*host
, unsigned int clock
)
1096 int div
= 0; /* Initialized for compiler warning */
1097 int real_div
= div
, clk_mul
= 1;
1099 unsigned long timeout
;
1100 bool switch_base_clk
= false;
1102 host
->mmc
->actual_clock
= 0;
1104 sdhci_writew(host
, 0, SDHCI_CLOCK_CONTROL
);
1105 if (host
->quirks2
& SDHCI_QUIRK2_NEED_DELAY_AFTER_INT_CLK_RST
)
1111 if (host
->version
>= SDHCI_SPEC_300
) {
1112 if (host
->preset_enabled
) {
1115 clk
= sdhci_readw(host
, SDHCI_CLOCK_CONTROL
);
1116 pre_val
= sdhci_get_preset_value(host
);
1117 div
= (pre_val
& SDHCI_PRESET_SDCLK_FREQ_MASK
)
1118 >> SDHCI_PRESET_SDCLK_FREQ_SHIFT
;
1119 if (host
->clk_mul
&&
1120 (pre_val
& SDHCI_PRESET_CLKGEN_SEL_MASK
)) {
1121 clk
= SDHCI_PROG_CLOCK_MODE
;
1123 clk_mul
= host
->clk_mul
;
1125 real_div
= max_t(int, 1, div
<< 1);
1131 * Check if the Host Controller supports Programmable Clock
1134 if (host
->clk_mul
) {
1135 for (div
= 1; div
<= 1024; div
++) {
1136 if ((host
->max_clk
* host
->clk_mul
/ div
)
1140 if ((host
->max_clk
* host
->clk_mul
/ div
) <= clock
) {
1142 * Set Programmable Clock Mode in the Clock
1145 clk
= SDHCI_PROG_CLOCK_MODE
;
1147 clk_mul
= host
->clk_mul
;
1151 * Divisor can be too small to reach clock
1152 * speed requirement. Then use the base clock.
1154 switch_base_clk
= true;
1158 if (!host
->clk_mul
|| switch_base_clk
) {
1159 /* Version 3.00 divisors must be a multiple of 2. */
1160 if (host
->max_clk
<= clock
)
1163 for (div
= 2; div
< SDHCI_MAX_DIV_SPEC_300
;
1165 if ((host
->max_clk
/ div
) <= clock
)
1171 if ((host
->quirks2
& SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN
)
1172 && !div
&& host
->max_clk
<= 25000000)
1176 /* Version 2.00 divisors must be a power of 2. */
1177 for (div
= 1; div
< SDHCI_MAX_DIV_SPEC_200
; div
*= 2) {
1178 if ((host
->max_clk
/ div
) <= clock
)
1187 host
->mmc
->actual_clock
= (host
->max_clk
* clk_mul
) / real_div
;
1188 clk
|= (div
& SDHCI_DIV_MASK
) << SDHCI_DIVIDER_SHIFT
;
1189 clk
|= ((div
& SDHCI_DIV_HI_MASK
) >> SDHCI_DIV_MASK_LEN
)
1190 << SDHCI_DIVIDER_HI_SHIFT
;
1191 clk
|= SDHCI_CLOCK_INT_EN
;
1192 sdhci_writew(host
, clk
, SDHCI_CLOCK_CONTROL
);
1194 /* Wait max 20 ms */
1196 while (!((clk
= sdhci_readw(host
, SDHCI_CLOCK_CONTROL
))
1197 & SDHCI_CLOCK_INT_STABLE
)) {
1199 pr_err("%s: Internal clock never stabilised.\n",
1200 mmc_hostname(host
->mmc
));
1201 sdhci_dumpregs(host
);
1208 clk
|= SDHCI_CLOCK_CARD_EN
;
1209 sdhci_writew(host
, clk
, SDHCI_CLOCK_CONTROL
);
1211 EXPORT_SYMBOL_GPL(sdhci_set_clock
);
1213 static void sdhci_set_power_reg(struct sdhci_host
*host
, unsigned char mode
,
1216 struct mmc_host
*mmc
= host
->mmc
;
1218 spin_unlock_irq(&host
->lock
);
1219 mmc_regulator_set_ocr(mmc
, mmc
->supply
.vmmc
, vdd
);
1220 spin_lock_irq(&host
->lock
);
1222 if (mode
!= MMC_POWER_OFF
)
1223 sdhci_writeb(host
, SDHCI_POWER_ON
, SDHCI_POWER_CONTROL
);
1225 sdhci_writeb(host
, 0, SDHCI_POWER_CONTROL
);
1228 void sdhci_set_power(struct sdhci_host
*host
, unsigned char mode
,
1233 if (mode
!= MMC_POWER_OFF
) {
1235 case MMC_VDD_165_195
:
1236 pwr
= SDHCI_POWER_180
;
1240 pwr
= SDHCI_POWER_300
;
1244 pwr
= SDHCI_POWER_330
;
1247 WARN(1, "%s: Invalid vdd %#x\n",
1248 mmc_hostname(host
->mmc
), vdd
);
1253 if (host
->pwr
== pwr
)
1259 sdhci_writeb(host
, 0, SDHCI_POWER_CONTROL
);
1260 if (host
->quirks2
& SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON
)
1261 sdhci_runtime_pm_bus_off(host
);
1264 * Spec says that we should clear the power reg before setting
1265 * a new value. Some controllers don't seem to like this though.
1267 if (!(host
->quirks
& SDHCI_QUIRK_SINGLE_POWER_WRITE
))
1268 sdhci_writeb(host
, 0, SDHCI_POWER_CONTROL
);
1271 * At least the Marvell CaFe chip gets confused if we set the
1272 * voltage and set turn on power at the same time, so set the
1275 if (host
->quirks
& SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER
)
1276 sdhci_writeb(host
, pwr
, SDHCI_POWER_CONTROL
);
1278 pwr
|= SDHCI_POWER_ON
;
1280 sdhci_writeb(host
, pwr
, SDHCI_POWER_CONTROL
);
1282 if (host
->quirks2
& SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON
)
1283 sdhci_runtime_pm_bus_on(host
);
1286 * Some controllers need an extra 10ms delay of 10ms before
1287 * they can apply clock after applying power
1289 if (host
->quirks
& SDHCI_QUIRK_DELAY_AFTER_POWER
)
1293 EXPORT_SYMBOL_GPL(sdhci_set_power
);
1295 static void __sdhci_set_power(struct sdhci_host
*host
, unsigned char mode
,
1298 struct mmc_host
*mmc
= host
->mmc
;
1300 if (host
->ops
->set_power
)
1301 host
->ops
->set_power(host
, mode
, vdd
);
1302 else if (!IS_ERR(mmc
->supply
.vmmc
))
1303 sdhci_set_power_reg(host
, mode
, vdd
);
1305 sdhci_set_power(host
, mode
, vdd
);
1308 /*****************************************************************************\
1312 \*****************************************************************************/
1314 static void sdhci_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
1316 struct sdhci_host
*host
;
1318 unsigned long flags
;
1320 host
= mmc_priv(mmc
);
1322 sdhci_runtime_pm_get(host
);
1324 /* Firstly check card presence */
1325 present
= mmc
->ops
->get_cd(mmc
);
1327 spin_lock_irqsave(&host
->lock
, flags
);
1329 WARN_ON(host
->mrq
!= NULL
);
1331 #ifndef SDHCI_USE_LEDS_CLASS
1332 sdhci_activate_led(host
);
1336 * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED
1337 * requests if Auto-CMD12 is enabled.
1339 if (!mrq
->sbc
&& (host
->flags
& SDHCI_AUTO_CMD12
)) {
1341 mrq
->data
->stop
= NULL
;
1348 if (!present
|| host
->flags
& SDHCI_DEVICE_DEAD
) {
1349 host
->mrq
->cmd
->error
= -ENOMEDIUM
;
1350 tasklet_schedule(&host
->finish_tasklet
);
1352 if (mrq
->sbc
&& !(host
->flags
& SDHCI_AUTO_CMD23
))
1353 sdhci_send_command(host
, mrq
->sbc
);
1355 sdhci_send_command(host
, mrq
->cmd
);
1359 spin_unlock_irqrestore(&host
->lock
, flags
);
1362 void sdhci_set_bus_width(struct sdhci_host
*host
, int width
)
1366 ctrl
= sdhci_readb(host
, SDHCI_HOST_CONTROL
);
1367 if (width
== MMC_BUS_WIDTH_8
) {
1368 ctrl
&= ~SDHCI_CTRL_4BITBUS
;
1369 if (host
->version
>= SDHCI_SPEC_300
)
1370 ctrl
|= SDHCI_CTRL_8BITBUS
;
1372 if (host
->version
>= SDHCI_SPEC_300
)
1373 ctrl
&= ~SDHCI_CTRL_8BITBUS
;
1374 if (width
== MMC_BUS_WIDTH_4
)
1375 ctrl
|= SDHCI_CTRL_4BITBUS
;
1377 ctrl
&= ~SDHCI_CTRL_4BITBUS
;
1379 sdhci_writeb(host
, ctrl
, SDHCI_HOST_CONTROL
);
1381 EXPORT_SYMBOL_GPL(sdhci_set_bus_width
);
1383 void sdhci_set_uhs_signaling(struct sdhci_host
*host
, unsigned timing
)
1387 ctrl_2
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
1388 /* Select Bus Speed Mode for host */
1389 ctrl_2
&= ~SDHCI_CTRL_UHS_MASK
;
1390 if ((timing
== MMC_TIMING_MMC_HS200
) ||
1391 (timing
== MMC_TIMING_UHS_SDR104
))
1392 ctrl_2
|= SDHCI_CTRL_UHS_SDR104
;
1393 else if (timing
== MMC_TIMING_UHS_SDR12
)
1394 ctrl_2
|= SDHCI_CTRL_UHS_SDR12
;
1395 else if (timing
== MMC_TIMING_UHS_SDR25
)
1396 ctrl_2
|= SDHCI_CTRL_UHS_SDR25
;
1397 else if (timing
== MMC_TIMING_UHS_SDR50
)
1398 ctrl_2
|= SDHCI_CTRL_UHS_SDR50
;
1399 else if ((timing
== MMC_TIMING_UHS_DDR50
) ||
1400 (timing
== MMC_TIMING_MMC_DDR52
))
1401 ctrl_2
|= SDHCI_CTRL_UHS_DDR50
;
1402 else if (timing
== MMC_TIMING_MMC_HS400
)
1403 ctrl_2
|= SDHCI_CTRL_HS400
; /* Non-standard */
1404 sdhci_writew(host
, ctrl_2
, SDHCI_HOST_CONTROL2
);
1406 EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling
);
1408 static void sdhci_do_set_ios(struct sdhci_host
*host
, struct mmc_ios
*ios
)
1410 unsigned long flags
;
1412 struct mmc_host
*mmc
= host
->mmc
;
1414 spin_lock_irqsave(&host
->lock
, flags
);
1416 if (host
->flags
& SDHCI_DEVICE_DEAD
) {
1417 spin_unlock_irqrestore(&host
->lock
, flags
);
1418 if (!IS_ERR(mmc
->supply
.vmmc
) &&
1419 ios
->power_mode
== MMC_POWER_OFF
)
1420 mmc_regulator_set_ocr(mmc
, mmc
->supply
.vmmc
, 0);
1425 * Reset the chip on each power off.
1426 * Should clear out any weird states.
1428 if (ios
->power_mode
== MMC_POWER_OFF
) {
1429 sdhci_writel(host
, 0, SDHCI_SIGNAL_ENABLE
);
1433 if (host
->version
>= SDHCI_SPEC_300
&&
1434 (ios
->power_mode
== MMC_POWER_UP
) &&
1435 !(host
->quirks2
& SDHCI_QUIRK2_PRESET_VALUE_BROKEN
))
1436 sdhci_enable_preset_value(host
, false);
1438 if (!ios
->clock
|| ios
->clock
!= host
->clock
) {
1439 host
->ops
->set_clock(host
, ios
->clock
);
1440 host
->clock
= ios
->clock
;
1442 if (host
->quirks
& SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK
&&
1444 host
->timeout_clk
= host
->mmc
->actual_clock
?
1445 host
->mmc
->actual_clock
/ 1000 :
1447 host
->mmc
->max_busy_timeout
=
1448 host
->ops
->get_max_timeout_count
?
1449 host
->ops
->get_max_timeout_count(host
) :
1451 host
->mmc
->max_busy_timeout
/= host
->timeout_clk
;
1455 __sdhci_set_power(host
, ios
->power_mode
, ios
->vdd
);
1457 if (host
->ops
->platform_send_init_74_clocks
)
1458 host
->ops
->platform_send_init_74_clocks(host
, ios
->power_mode
);
1460 host
->ops
->set_bus_width(host
, ios
->bus_width
);
1462 ctrl
= sdhci_readb(host
, SDHCI_HOST_CONTROL
);
1464 if ((ios
->timing
== MMC_TIMING_SD_HS
||
1465 ios
->timing
== MMC_TIMING_MMC_HS
)
1466 && !(host
->quirks
& SDHCI_QUIRK_NO_HISPD_BIT
))
1467 ctrl
|= SDHCI_CTRL_HISPD
;
1469 ctrl
&= ~SDHCI_CTRL_HISPD
;
1471 if (host
->version
>= SDHCI_SPEC_300
) {
1474 /* In case of UHS-I modes, set High Speed Enable */
1475 if ((ios
->timing
== MMC_TIMING_MMC_HS400
) ||
1476 (ios
->timing
== MMC_TIMING_MMC_HS200
) ||
1477 (ios
->timing
== MMC_TIMING_MMC_DDR52
) ||
1478 (ios
->timing
== MMC_TIMING_UHS_SDR50
) ||
1479 (ios
->timing
== MMC_TIMING_UHS_SDR104
) ||
1480 (ios
->timing
== MMC_TIMING_UHS_DDR50
) ||
1481 (ios
->timing
== MMC_TIMING_UHS_SDR25
))
1482 ctrl
|= SDHCI_CTRL_HISPD
;
1484 if (!host
->preset_enabled
) {
1485 sdhci_writeb(host
, ctrl
, SDHCI_HOST_CONTROL
);
1487 * We only need to set Driver Strength if the
1488 * preset value enable is not set.
1490 ctrl_2
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
1491 ctrl_2
&= ~SDHCI_CTRL_DRV_TYPE_MASK
;
1492 if (ios
->drv_type
== MMC_SET_DRIVER_TYPE_A
)
1493 ctrl_2
|= SDHCI_CTRL_DRV_TYPE_A
;
1494 else if (ios
->drv_type
== MMC_SET_DRIVER_TYPE_B
)
1495 ctrl_2
|= SDHCI_CTRL_DRV_TYPE_B
;
1496 else if (ios
->drv_type
== MMC_SET_DRIVER_TYPE_C
)
1497 ctrl_2
|= SDHCI_CTRL_DRV_TYPE_C
;
1498 else if (ios
->drv_type
== MMC_SET_DRIVER_TYPE_D
)
1499 ctrl_2
|= SDHCI_CTRL_DRV_TYPE_D
;
1501 pr_warn("%s: invalid driver type, default to driver type B\n",
1503 ctrl_2
|= SDHCI_CTRL_DRV_TYPE_B
;
1506 sdhci_writew(host
, ctrl_2
, SDHCI_HOST_CONTROL2
);
1509 * According to SDHC Spec v3.00, if the Preset Value
1510 * Enable in the Host Control 2 register is set, we
1511 * need to reset SD Clock Enable before changing High
1512 * Speed Enable to avoid generating clock gliches.
1515 /* Reset SD Clock Enable */
1516 clk
= sdhci_readw(host
, SDHCI_CLOCK_CONTROL
);
1517 clk
&= ~SDHCI_CLOCK_CARD_EN
;
1518 sdhci_writew(host
, clk
, SDHCI_CLOCK_CONTROL
);
1520 sdhci_writeb(host
, ctrl
, SDHCI_HOST_CONTROL
);
1522 /* Re-enable SD Clock */
1523 host
->ops
->set_clock(host
, host
->clock
);
1526 /* Reset SD Clock Enable */
1527 clk
= sdhci_readw(host
, SDHCI_CLOCK_CONTROL
);
1528 clk
&= ~SDHCI_CLOCK_CARD_EN
;
1529 sdhci_writew(host
, clk
, SDHCI_CLOCK_CONTROL
);
1531 host
->ops
->set_uhs_signaling(host
, ios
->timing
);
1532 host
->timing
= ios
->timing
;
1534 if (!(host
->quirks2
& SDHCI_QUIRK2_PRESET_VALUE_BROKEN
) &&
1535 ((ios
->timing
== MMC_TIMING_UHS_SDR12
) ||
1536 (ios
->timing
== MMC_TIMING_UHS_SDR25
) ||
1537 (ios
->timing
== MMC_TIMING_UHS_SDR50
) ||
1538 (ios
->timing
== MMC_TIMING_UHS_SDR104
) ||
1539 (ios
->timing
== MMC_TIMING_UHS_DDR50
) ||
1540 (ios
->timing
== MMC_TIMING_MMC_DDR52
))) {
1543 sdhci_enable_preset_value(host
, true);
1544 preset
= sdhci_get_preset_value(host
);
1545 ios
->drv_type
= (preset
& SDHCI_PRESET_DRV_MASK
)
1546 >> SDHCI_PRESET_DRV_SHIFT
;
1549 /* Re-enable SD Clock */
1550 host
->ops
->set_clock(host
, host
->clock
);
1552 sdhci_writeb(host
, ctrl
, SDHCI_HOST_CONTROL
);
1555 * Some (ENE) controllers go apeshit on some ios operation,
1556 * signalling timeout and CRC errors even on CMD0. Resetting
1557 * it on each ios seems to solve the problem.
1559 if (host
->quirks
& SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS
)
1560 sdhci_do_reset(host
, SDHCI_RESET_CMD
| SDHCI_RESET_DATA
);
1563 spin_unlock_irqrestore(&host
->lock
, flags
);
1566 static void sdhci_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
1568 struct sdhci_host
*host
= mmc_priv(mmc
);
1570 sdhci_runtime_pm_get(host
);
1571 sdhci_do_set_ios(host
, ios
);
1572 sdhci_runtime_pm_put(host
);
1575 static int sdhci_do_get_cd(struct sdhci_host
*host
)
1577 int gpio_cd
= mmc_gpio_get_cd(host
->mmc
);
1579 if (host
->flags
& SDHCI_DEVICE_DEAD
)
1582 /* If nonremovable, assume that the card is always present. */
1583 if (host
->mmc
->caps
& MMC_CAP_NONREMOVABLE
)
1587 * Try slot gpio detect, if defined it take precedence
1588 * over build in controller functionality
1590 if (!IS_ERR_VALUE(gpio_cd
))
1593 /* If polling, assume that the card is always present. */
1594 if (host
->quirks
& SDHCI_QUIRK_BROKEN_CARD_DETECTION
)
1597 /* Host native card detect */
1598 return !!(sdhci_readl(host
, SDHCI_PRESENT_STATE
) & SDHCI_CARD_PRESENT
);
1601 static int sdhci_get_cd(struct mmc_host
*mmc
)
1603 struct sdhci_host
*host
= mmc_priv(mmc
);
1606 sdhci_runtime_pm_get(host
);
1607 ret
= sdhci_do_get_cd(host
);
1608 sdhci_runtime_pm_put(host
);
1612 static int sdhci_check_ro(struct sdhci_host
*host
)
1614 unsigned long flags
;
1617 spin_lock_irqsave(&host
->lock
, flags
);
1619 if (host
->flags
& SDHCI_DEVICE_DEAD
)
1621 else if (host
->ops
->get_ro
)
1622 is_readonly
= host
->ops
->get_ro(host
);
1624 is_readonly
= !(sdhci_readl(host
, SDHCI_PRESENT_STATE
)
1625 & SDHCI_WRITE_PROTECT
);
1627 spin_unlock_irqrestore(&host
->lock
, flags
);
1629 /* This quirk needs to be replaced by a callback-function later */
1630 return host
->quirks
& SDHCI_QUIRK_INVERTED_WRITE_PROTECT
?
1631 !is_readonly
: is_readonly
;
1634 #define SAMPLE_COUNT 5
1636 static int sdhci_do_get_ro(struct sdhci_host
*host
)
1640 if (!(host
->quirks
& SDHCI_QUIRK_UNSTABLE_RO_DETECT
))
1641 return sdhci_check_ro(host
);
1644 for (i
= 0; i
< SAMPLE_COUNT
; i
++) {
1645 if (sdhci_check_ro(host
)) {
1646 if (++ro_count
> SAMPLE_COUNT
/ 2)
1654 static void sdhci_hw_reset(struct mmc_host
*mmc
)
1656 struct sdhci_host
*host
= mmc_priv(mmc
);
1658 if (host
->ops
&& host
->ops
->hw_reset
)
1659 host
->ops
->hw_reset(host
);
1662 static int sdhci_get_ro(struct mmc_host
*mmc
)
1664 struct sdhci_host
*host
= mmc_priv(mmc
);
1667 sdhci_runtime_pm_get(host
);
1668 ret
= sdhci_do_get_ro(host
);
1669 sdhci_runtime_pm_put(host
);
1673 static void sdhci_enable_sdio_irq_nolock(struct sdhci_host
*host
, int enable
)
1675 if (!(host
->flags
& SDHCI_DEVICE_DEAD
)) {
1677 host
->ier
|= SDHCI_INT_CARD_INT
;
1679 host
->ier
&= ~SDHCI_INT_CARD_INT
;
1681 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
1682 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
1687 static void sdhci_enable_sdio_irq(struct mmc_host
*mmc
, int enable
)
1689 struct sdhci_host
*host
= mmc_priv(mmc
);
1690 unsigned long flags
;
1692 sdhci_runtime_pm_get(host
);
1694 spin_lock_irqsave(&host
->lock
, flags
);
1696 host
->flags
|= SDHCI_SDIO_IRQ_ENABLED
;
1698 host
->flags
&= ~SDHCI_SDIO_IRQ_ENABLED
;
1700 sdhci_enable_sdio_irq_nolock(host
, enable
);
1701 spin_unlock_irqrestore(&host
->lock
, flags
);
1703 sdhci_runtime_pm_put(host
);
1706 static int sdhci_do_start_signal_voltage_switch(struct sdhci_host
*host
,
1707 struct mmc_ios
*ios
)
1709 struct mmc_host
*mmc
= host
->mmc
;
1714 * Signal Voltage Switching is only applicable for Host Controllers
1717 if (host
->version
< SDHCI_SPEC_300
)
1720 ctrl
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
1722 switch (ios
->signal_voltage
) {
1723 case MMC_SIGNAL_VOLTAGE_330
:
1724 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
1725 ctrl
&= ~SDHCI_CTRL_VDD_180
;
1726 sdhci_writew(host
, ctrl
, SDHCI_HOST_CONTROL2
);
1728 if (!IS_ERR(mmc
->supply
.vqmmc
)) {
1729 ret
= regulator_set_voltage(mmc
->supply
.vqmmc
, 2700000,
1732 pr_warn("%s: Switching to 3.3V signalling voltage failed\n",
1738 usleep_range(5000, 5500);
1740 /* 3.3V regulator output should be stable within 5 ms */
1741 ctrl
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
1742 if (!(ctrl
& SDHCI_CTRL_VDD_180
))
1745 pr_warn("%s: 3.3V regulator output did not became stable\n",
1749 case MMC_SIGNAL_VOLTAGE_180
:
1750 if (!IS_ERR(mmc
->supply
.vqmmc
)) {
1751 ret
= regulator_set_voltage(mmc
->supply
.vqmmc
,
1754 pr_warn("%s: Switching to 1.8V signalling voltage failed\n",
1761 * Enable 1.8V Signal Enable in the Host Control2
1764 ctrl
|= SDHCI_CTRL_VDD_180
;
1765 sdhci_writew(host
, ctrl
, SDHCI_HOST_CONTROL2
);
1767 /* Some controller need to do more when switching */
1768 if (host
->ops
->voltage_switch
)
1769 host
->ops
->voltage_switch(host
);
1771 /* 1.8V regulator output should be stable within 5 ms */
1772 ctrl
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
1773 if (ctrl
& SDHCI_CTRL_VDD_180
)
1776 pr_warn("%s: 1.8V regulator output did not became stable\n",
1780 case MMC_SIGNAL_VOLTAGE_120
:
1781 if (!IS_ERR(mmc
->supply
.vqmmc
)) {
1782 ret
= regulator_set_voltage(mmc
->supply
.vqmmc
, 1100000,
1785 pr_warn("%s: Switching to 1.2V signalling voltage failed\n",
1792 /* No signal voltage switch required */
1797 static int sdhci_start_signal_voltage_switch(struct mmc_host
*mmc
,
1798 struct mmc_ios
*ios
)
1800 struct sdhci_host
*host
= mmc_priv(mmc
);
1803 if (host
->version
< SDHCI_SPEC_300
)
1805 sdhci_runtime_pm_get(host
);
1806 err
= sdhci_do_start_signal_voltage_switch(host
, ios
);
1807 sdhci_runtime_pm_put(host
);
1811 static int sdhci_card_busy(struct mmc_host
*mmc
)
1813 struct sdhci_host
*host
= mmc_priv(mmc
);
1816 sdhci_runtime_pm_get(host
);
1817 /* Check whether DAT[3:0] is 0000 */
1818 present_state
= sdhci_readl(host
, SDHCI_PRESENT_STATE
);
1819 sdhci_runtime_pm_put(host
);
1821 return !(present_state
& SDHCI_DATA_LVL_MASK
);
1824 static int sdhci_prepare_hs400_tuning(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
1826 struct sdhci_host
*host
= mmc_priv(mmc
);
1827 unsigned long flags
;
1829 spin_lock_irqsave(&host
->lock
, flags
);
1830 host
->flags
|= SDHCI_HS400_TUNING
;
1831 spin_unlock_irqrestore(&host
->lock
, flags
);
1836 static int sdhci_execute_tuning(struct mmc_host
*mmc
, u32 opcode
)
1838 struct sdhci_host
*host
= mmc_priv(mmc
);
1840 int tuning_loop_counter
= MAX_TUNING_LOOP
;
1842 unsigned long flags
;
1843 unsigned int tuning_count
= 0;
1846 sdhci_runtime_pm_get(host
);
1847 spin_lock_irqsave(&host
->lock
, flags
);
1849 hs400_tuning
= host
->flags
& SDHCI_HS400_TUNING
;
1850 host
->flags
&= ~SDHCI_HS400_TUNING
;
1852 if (host
->tuning_mode
== SDHCI_TUNING_MODE_1
)
1853 tuning_count
= host
->tuning_count
;
1856 * The Host Controller needs tuning in case of SDR104 and DDR50
1857 * mode, and for SDR50 mode when Use Tuning for SDR50 is set in
1858 * the Capabilities register.
1859 * If the Host Controller supports the HS200 mode then the
1860 * tuning function has to be executed.
1862 switch (host
->timing
) {
1863 /* HS400 tuning is done in HS200 mode */
1864 case MMC_TIMING_MMC_HS400
:
1868 case MMC_TIMING_MMC_HS200
:
1870 * Periodic re-tuning for HS400 is not expected to be needed, so
1877 case MMC_TIMING_UHS_SDR104
:
1878 case MMC_TIMING_UHS_DDR50
:
1881 case MMC_TIMING_UHS_SDR50
:
1882 if (host
->flags
& SDHCI_SDR50_NEEDS_TUNING
||
1883 host
->flags
& SDHCI_SDR104_NEEDS_TUNING
)
1891 if (host
->ops
->platform_execute_tuning
) {
1892 spin_unlock_irqrestore(&host
->lock
, flags
);
1893 err
= host
->ops
->platform_execute_tuning(host
, opcode
);
1894 sdhci_runtime_pm_put(host
);
1898 ctrl
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
1899 ctrl
|= SDHCI_CTRL_EXEC_TUNING
;
1900 if (host
->quirks2
& SDHCI_QUIRK2_TUNING_WORK_AROUND
)
1901 ctrl
|= SDHCI_CTRL_TUNED_CLK
;
1902 sdhci_writew(host
, ctrl
, SDHCI_HOST_CONTROL2
);
1905 * As per the Host Controller spec v3.00, tuning command
1906 * generates Buffer Read Ready interrupt, so enable that.
1908 * Note: The spec clearly says that when tuning sequence
1909 * is being performed, the controller does not generate
1910 * interrupts other than Buffer Read Ready interrupt. But
1911 * to make sure we don't hit a controller bug, we _only_
1912 * enable Buffer Read Ready interrupt here.
1914 sdhci_writel(host
, SDHCI_INT_DATA_AVAIL
, SDHCI_INT_ENABLE
);
1915 sdhci_writel(host
, SDHCI_INT_DATA_AVAIL
, SDHCI_SIGNAL_ENABLE
);
1918 * Issue CMD19 repeatedly till Execute Tuning is set to 0 or the number
1919 * of loops reaches 40 times or a timeout of 150ms occurs.
1922 struct mmc_command cmd
= {0};
1923 struct mmc_request mrq
= {NULL
};
1925 cmd
.opcode
= opcode
;
1927 cmd
.flags
= MMC_RSP_R1
| MMC_CMD_ADTC
;
1932 if (tuning_loop_counter
-- == 0)
1939 * In response to CMD19, the card sends 64 bytes of tuning
1940 * block to the Host Controller. So we set the block size
1943 if (cmd
.opcode
== MMC_SEND_TUNING_BLOCK_HS200
) {
1944 if (mmc
->ios
.bus_width
== MMC_BUS_WIDTH_8
)
1945 sdhci_writew(host
, SDHCI_MAKE_BLKSZ(7, 128),
1947 else if (mmc
->ios
.bus_width
== MMC_BUS_WIDTH_4
)
1948 sdhci_writew(host
, SDHCI_MAKE_BLKSZ(7, 64),
1951 sdhci_writew(host
, SDHCI_MAKE_BLKSZ(7, 64),
1956 * The tuning block is sent by the card to the host controller.
1957 * So we set the TRNS_READ bit in the Transfer Mode register.
1958 * This also takes care of setting DMA Enable and Multi Block
1959 * Select in the same register to 0.
1961 sdhci_writew(host
, SDHCI_TRNS_READ
, SDHCI_TRANSFER_MODE
);
1963 sdhci_send_command(host
, &cmd
);
1968 spin_unlock_irqrestore(&host
->lock
, flags
);
1969 /* Wait for Buffer Read Ready interrupt */
1970 wait_event_interruptible_timeout(host
->buf_ready_int
,
1971 (host
->tuning_done
== 1),
1972 msecs_to_jiffies(50));
1973 spin_lock_irqsave(&host
->lock
, flags
);
1975 if (!host
->tuning_done
) {
1976 pr_info(DRIVER_NAME
": Timeout waiting for Buffer Read Ready interrupt during tuning procedure, falling back to fixed sampling clock\n");
1977 ctrl
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
1978 ctrl
&= ~SDHCI_CTRL_TUNED_CLK
;
1979 ctrl
&= ~SDHCI_CTRL_EXEC_TUNING
;
1980 sdhci_writew(host
, ctrl
, SDHCI_HOST_CONTROL2
);
1986 host
->tuning_done
= 0;
1988 ctrl
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
1990 /* eMMC spec does not require a delay between tuning cycles */
1991 if (opcode
== MMC_SEND_TUNING_BLOCK
)
1993 } while (ctrl
& SDHCI_CTRL_EXEC_TUNING
);
1996 * The Host Driver has exhausted the maximum number of loops allowed,
1997 * so use fixed sampling frequency.
1999 if (tuning_loop_counter
< 0) {
2000 ctrl
&= ~SDHCI_CTRL_TUNED_CLK
;
2001 sdhci_writew(host
, ctrl
, SDHCI_HOST_CONTROL2
);
2003 if (!(ctrl
& SDHCI_CTRL_TUNED_CLK
)) {
2004 pr_info(DRIVER_NAME
": Tuning procedure failed, falling back to fixed sampling clock\n");
2011 * In case tuning fails, host controllers which support
2012 * re-tuning can try tuning again at a later time, when the
2013 * re-tuning timer expires. So for these controllers, we
2014 * return 0. Since there might be other controllers who do not
2015 * have this capability, we return error for them.
2020 host
->mmc
->retune_period
= err
? 0 : tuning_count
;
2022 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
2023 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
2025 spin_unlock_irqrestore(&host
->lock
, flags
);
2026 sdhci_runtime_pm_put(host
);
2031 static int sdhci_select_drive_strength(struct mmc_card
*card
,
2032 unsigned int max_dtr
, int host_drv
,
2033 int card_drv
, int *drv_type
)
2035 struct sdhci_host
*host
= mmc_priv(card
->host
);
2037 if (!host
->ops
->select_drive_strength
)
2040 return host
->ops
->select_drive_strength(host
, card
, max_dtr
, host_drv
,
2041 card_drv
, drv_type
);
2044 static void sdhci_enable_preset_value(struct sdhci_host
*host
, bool enable
)
2046 /* Host Controller v3.00 defines preset value registers */
2047 if (host
->version
< SDHCI_SPEC_300
)
2051 * We only enable or disable Preset Value if they are not already
2052 * enabled or disabled respectively. Otherwise, we bail out.
2054 if (host
->preset_enabled
!= enable
) {
2055 u16 ctrl
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
2058 ctrl
|= SDHCI_CTRL_PRESET_VAL_ENABLE
;
2060 ctrl
&= ~SDHCI_CTRL_PRESET_VAL_ENABLE
;
2062 sdhci_writew(host
, ctrl
, SDHCI_HOST_CONTROL2
);
2065 host
->flags
|= SDHCI_PV_ENABLED
;
2067 host
->flags
&= ~SDHCI_PV_ENABLED
;
2069 host
->preset_enabled
= enable
;
2073 static void sdhci_post_req(struct mmc_host
*mmc
, struct mmc_request
*mrq
,
2076 struct sdhci_host
*host
= mmc_priv(mmc
);
2077 struct mmc_data
*data
= mrq
->data
;
2079 if (data
->host_cookie
!= COOKIE_UNMAPPED
)
2080 dma_unmap_sg(mmc_dev(host
->mmc
), data
->sg
, data
->sg_len
,
2081 data
->flags
& MMC_DATA_WRITE
?
2082 DMA_TO_DEVICE
: DMA_FROM_DEVICE
);
2084 data
->host_cookie
= COOKIE_UNMAPPED
;
2087 static void sdhci_pre_req(struct mmc_host
*mmc
, struct mmc_request
*mrq
,
2090 struct sdhci_host
*host
= mmc_priv(mmc
);
2092 mrq
->data
->host_cookie
= COOKIE_UNMAPPED
;
2094 if (host
->flags
& SDHCI_REQ_USE_DMA
)
2095 sdhci_pre_dma_transfer(host
, mrq
->data
, COOKIE_PRE_MAPPED
);
2098 static void sdhci_card_event(struct mmc_host
*mmc
)
2100 struct sdhci_host
*host
= mmc_priv(mmc
);
2101 unsigned long flags
;
2104 /* First check if client has provided their own card event */
2105 if (host
->ops
->card_event
)
2106 host
->ops
->card_event(host
);
2108 present
= sdhci_do_get_cd(host
);
2110 spin_lock_irqsave(&host
->lock
, flags
);
2112 /* Check host->mrq first in case we are runtime suspended */
2113 if (host
->mrq
&& !present
) {
2114 pr_err("%s: Card removed during transfer!\n",
2115 mmc_hostname(host
->mmc
));
2116 pr_err("%s: Resetting controller.\n",
2117 mmc_hostname(host
->mmc
));
2119 sdhci_do_reset(host
, SDHCI_RESET_CMD
);
2120 sdhci_do_reset(host
, SDHCI_RESET_DATA
);
2122 host
->mrq
->cmd
->error
= -ENOMEDIUM
;
2123 tasklet_schedule(&host
->finish_tasklet
);
2126 spin_unlock_irqrestore(&host
->lock
, flags
);
2129 static const struct mmc_host_ops sdhci_ops
= {
2130 .request
= sdhci_request
,
2131 .post_req
= sdhci_post_req
,
2132 .pre_req
= sdhci_pre_req
,
2133 .set_ios
= sdhci_set_ios
,
2134 .get_cd
= sdhci_get_cd
,
2135 .get_ro
= sdhci_get_ro
,
2136 .hw_reset
= sdhci_hw_reset
,
2137 .enable_sdio_irq
= sdhci_enable_sdio_irq
,
2138 .start_signal_voltage_switch
= sdhci_start_signal_voltage_switch
,
2139 .prepare_hs400_tuning
= sdhci_prepare_hs400_tuning
,
2140 .execute_tuning
= sdhci_execute_tuning
,
2141 .select_drive_strength
= sdhci_select_drive_strength
,
2142 .card_event
= sdhci_card_event
,
2143 .card_busy
= sdhci_card_busy
,
2146 /*****************************************************************************\
2150 \*****************************************************************************/
2152 static void sdhci_tasklet_finish(unsigned long param
)
2154 struct sdhci_host
*host
;
2155 unsigned long flags
;
2156 struct mmc_request
*mrq
;
2158 host
= (struct sdhci_host
*)param
;
2160 spin_lock_irqsave(&host
->lock
, flags
);
2163 * If this tasklet gets rescheduled while running, it will
2164 * be run again afterwards but without any active request.
2167 spin_unlock_irqrestore(&host
->lock
, flags
);
2171 del_timer(&host
->timer
);
2176 * Always unmap the data buffers if they were mapped by
2177 * sdhci_prepare_data() whenever we finish with a request.
2178 * This avoids leaking DMA mappings on error.
2180 if (host
->flags
& SDHCI_REQ_USE_DMA
) {
2181 struct mmc_data
*data
= mrq
->data
;
2183 if (data
&& data
->host_cookie
== COOKIE_MAPPED
) {
2184 dma_unmap_sg(mmc_dev(host
->mmc
), data
->sg
, data
->sg_len
,
2185 (data
->flags
& MMC_DATA_READ
) ?
2186 DMA_FROM_DEVICE
: DMA_TO_DEVICE
);
2187 data
->host_cookie
= COOKIE_UNMAPPED
;
2192 * The controller needs a reset of internal state machines
2193 * upon error conditions.
2195 if (!(host
->flags
& SDHCI_DEVICE_DEAD
) &&
2196 ((mrq
->cmd
&& mrq
->cmd
->error
) ||
2197 (mrq
->sbc
&& mrq
->sbc
->error
) ||
2198 (mrq
->data
&& ((mrq
->data
->error
&& !mrq
->data
->stop
) ||
2199 (mrq
->data
->stop
&& mrq
->data
->stop
->error
))) ||
2200 (host
->quirks
& SDHCI_QUIRK_RESET_AFTER_REQUEST
))) {
2202 /* Some controllers need this kick or reset won't work here */
2203 if (host
->quirks
& SDHCI_QUIRK_CLOCK_BEFORE_RESET
)
2204 /* This is to force an update */
2205 host
->ops
->set_clock(host
, host
->clock
);
2207 /* Spec says we should do both at the same time, but Ricoh
2208 controllers do not like that. */
2209 sdhci_do_reset(host
, SDHCI_RESET_CMD
);
2210 sdhci_do_reset(host
, SDHCI_RESET_DATA
);
2217 #ifndef SDHCI_USE_LEDS_CLASS
2218 sdhci_deactivate_led(host
);
2222 spin_unlock_irqrestore(&host
->lock
, flags
);
2224 mmc_request_done(host
->mmc
, mrq
);
2225 sdhci_runtime_pm_put(host
);
2228 static void sdhci_timeout_timer(unsigned long data
)
2230 struct sdhci_host
*host
;
2231 unsigned long flags
;
2233 host
= (struct sdhci_host
*)data
;
2235 spin_lock_irqsave(&host
->lock
, flags
);
2238 pr_err("%s: Timeout waiting for hardware interrupt.\n",
2239 mmc_hostname(host
->mmc
));
2240 sdhci_dumpregs(host
);
2243 host
->data
->error
= -ETIMEDOUT
;
2244 sdhci_finish_data(host
);
2247 host
->cmd
->error
= -ETIMEDOUT
;
2249 host
->mrq
->cmd
->error
= -ETIMEDOUT
;
2251 tasklet_schedule(&host
->finish_tasklet
);
2256 spin_unlock_irqrestore(&host
->lock
, flags
);
2259 /*****************************************************************************\
2261 * Interrupt handling *
2263 \*****************************************************************************/
2265 static void sdhci_cmd_irq(struct sdhci_host
*host
, u32 intmask
, u32
*mask
)
2267 BUG_ON(intmask
== 0);
2270 pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n",
2271 mmc_hostname(host
->mmc
), (unsigned)intmask
);
2272 sdhci_dumpregs(host
);
2276 if (intmask
& (SDHCI_INT_TIMEOUT
| SDHCI_INT_CRC
|
2277 SDHCI_INT_END_BIT
| SDHCI_INT_INDEX
)) {
2278 if (intmask
& SDHCI_INT_TIMEOUT
)
2279 host
->cmd
->error
= -ETIMEDOUT
;
2281 host
->cmd
->error
= -EILSEQ
;
2284 * If this command initiates a data phase and a response
2285 * CRC error is signalled, the card can start transferring
2286 * data - the card may have received the command without
2287 * error. We must not terminate the mmc_request early.
2289 * If the card did not receive the command or returned an
2290 * error which prevented it sending data, the data phase
2293 if (host
->cmd
->data
&&
2294 (intmask
& (SDHCI_INT_CRC
| SDHCI_INT_TIMEOUT
)) ==
2300 tasklet_schedule(&host
->finish_tasklet
);
2305 * The host can send and interrupt when the busy state has
2306 * ended, allowing us to wait without wasting CPU cycles.
2307 * Unfortunately this is overloaded on the "data complete"
2308 * interrupt, so we need to take some care when handling
2311 * Note: The 1.0 specification is a bit ambiguous about this
2312 * feature so there might be some problems with older
2315 if (host
->cmd
->flags
& MMC_RSP_BUSY
) {
2316 if (host
->cmd
->data
)
2317 DBG("Cannot wait for busy signal when also doing a data transfer");
2318 else if (!(host
->quirks
& SDHCI_QUIRK_NO_BUSY_IRQ
)
2319 && !host
->busy_handle
) {
2320 /* Mark that command complete before busy is ended */
2321 host
->busy_handle
= 1;
2325 /* The controller does not support the end-of-busy IRQ,
2326 * fall through and take the SDHCI_INT_RESPONSE */
2327 } else if ((host
->quirks2
& SDHCI_QUIRK2_STOP_WITH_TC
) &&
2328 host
->cmd
->opcode
== MMC_STOP_TRANSMISSION
&& !host
->data
) {
2329 *mask
&= ~SDHCI_INT_DATA_END
;
2332 if (intmask
& SDHCI_INT_RESPONSE
)
2333 sdhci_finish_command(host
);
2336 #ifdef CONFIG_MMC_DEBUG
2337 static void sdhci_adma_show_error(struct sdhci_host
*host
)
2339 const char *name
= mmc_hostname(host
->mmc
);
2340 void *desc
= host
->adma_table
;
2342 sdhci_dumpregs(host
);
2345 struct sdhci_adma2_64_desc
*dma_desc
= desc
;
2347 if (host
->flags
& SDHCI_USE_64_BIT_DMA
)
2348 DBG("%s: %p: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
2349 name
, desc
, le32_to_cpu(dma_desc
->addr_hi
),
2350 le32_to_cpu(dma_desc
->addr_lo
),
2351 le16_to_cpu(dma_desc
->len
),
2352 le16_to_cpu(dma_desc
->cmd
));
2354 DBG("%s: %p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
2355 name
, desc
, le32_to_cpu(dma_desc
->addr_lo
),
2356 le16_to_cpu(dma_desc
->len
),
2357 le16_to_cpu(dma_desc
->cmd
));
2359 desc
+= host
->desc_sz
;
2361 if (dma_desc
->cmd
& cpu_to_le16(ADMA2_END
))
2366 static void sdhci_adma_show_error(struct sdhci_host
*host
) { }
2369 static void sdhci_data_irq(struct sdhci_host
*host
, u32 intmask
)
2372 BUG_ON(intmask
== 0);
2374 /* CMD19 generates _only_ Buffer Read Ready interrupt */
2375 if (intmask
& SDHCI_INT_DATA_AVAIL
) {
2376 command
= SDHCI_GET_CMD(sdhci_readw(host
, SDHCI_COMMAND
));
2377 if (command
== MMC_SEND_TUNING_BLOCK
||
2378 command
== MMC_SEND_TUNING_BLOCK_HS200
) {
2379 host
->tuning_done
= 1;
2380 wake_up(&host
->buf_ready_int
);
2387 * The "data complete" interrupt is also used to
2388 * indicate that a busy state has ended. See comment
2389 * above in sdhci_cmd_irq().
2391 if (host
->cmd
&& (host
->cmd
->flags
& MMC_RSP_BUSY
)) {
2392 if (intmask
& SDHCI_INT_DATA_TIMEOUT
) {
2393 host
->cmd
->error
= -ETIMEDOUT
;
2394 tasklet_schedule(&host
->finish_tasklet
);
2397 if (intmask
& SDHCI_INT_DATA_END
) {
2399 * Some cards handle busy-end interrupt
2400 * before the command completed, so make
2401 * sure we do things in the proper order.
2403 if (host
->busy_handle
)
2404 sdhci_finish_command(host
);
2406 host
->busy_handle
= 1;
2411 pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n",
2412 mmc_hostname(host
->mmc
), (unsigned)intmask
);
2413 sdhci_dumpregs(host
);
2418 if (intmask
& SDHCI_INT_DATA_TIMEOUT
)
2419 host
->data
->error
= -ETIMEDOUT
;
2420 else if (intmask
& SDHCI_INT_DATA_END_BIT
)
2421 host
->data
->error
= -EILSEQ
;
2422 else if ((intmask
& SDHCI_INT_DATA_CRC
) &&
2423 SDHCI_GET_CMD(sdhci_readw(host
, SDHCI_COMMAND
))
2425 host
->data
->error
= -EILSEQ
;
2426 else if (intmask
& SDHCI_INT_ADMA_ERROR
) {
2427 pr_err("%s: ADMA error\n", mmc_hostname(host
->mmc
));
2428 sdhci_adma_show_error(host
);
2429 host
->data
->error
= -EIO
;
2430 if (host
->ops
->adma_workaround
)
2431 host
->ops
->adma_workaround(host
, intmask
);
2434 if (host
->data
->error
)
2435 sdhci_finish_data(host
);
2437 if (intmask
& (SDHCI_INT_DATA_AVAIL
| SDHCI_INT_SPACE_AVAIL
))
2438 sdhci_transfer_pio(host
);
2441 * We currently don't do anything fancy with DMA
2442 * boundaries, but as we can't disable the feature
2443 * we need to at least restart the transfer.
2445 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
2446 * should return a valid address to continue from, but as
2447 * some controllers are faulty, don't trust them.
2449 if (intmask
& SDHCI_INT_DMA_END
) {
2450 u32 dmastart
, dmanow
;
2451 dmastart
= sg_dma_address(host
->data
->sg
);
2452 dmanow
= dmastart
+ host
->data
->bytes_xfered
;
2454 * Force update to the next DMA block boundary.
2457 ~(SDHCI_DEFAULT_BOUNDARY_SIZE
- 1)) +
2458 SDHCI_DEFAULT_BOUNDARY_SIZE
;
2459 host
->data
->bytes_xfered
= dmanow
- dmastart
;
2460 DBG("%s: DMA base 0x%08x, transferred 0x%06x bytes,"
2462 mmc_hostname(host
->mmc
), dmastart
,
2463 host
->data
->bytes_xfered
, dmanow
);
2464 sdhci_writel(host
, dmanow
, SDHCI_DMA_ADDRESS
);
2467 if (intmask
& SDHCI_INT_DATA_END
) {
2470 * Data managed to finish before the
2471 * command completed. Make sure we do
2472 * things in the proper order.
2474 host
->data_early
= 1;
2476 sdhci_finish_data(host
);
2482 static irqreturn_t
sdhci_irq(int irq
, void *dev_id
)
2484 irqreturn_t result
= IRQ_NONE
;
2485 struct sdhci_host
*host
= dev_id
;
2486 u32 intmask
, mask
, unexpected
= 0;
2489 spin_lock(&host
->lock
);
2491 if (host
->runtime_suspended
&& !sdhci_sdio_irq_enabled(host
)) {
2492 spin_unlock(&host
->lock
);
2496 intmask
= sdhci_readl(host
, SDHCI_INT_STATUS
);
2497 if (!intmask
|| intmask
== 0xffffffff) {
2503 /* Clear selected interrupts. */
2504 mask
= intmask
& (SDHCI_INT_CMD_MASK
| SDHCI_INT_DATA_MASK
|
2505 SDHCI_INT_BUS_POWER
);
2506 sdhci_writel(host
, mask
, SDHCI_INT_STATUS
);
2508 DBG("*** %s got interrupt: 0x%08x\n",
2509 mmc_hostname(host
->mmc
), intmask
);
2511 if (intmask
& (SDHCI_INT_CARD_INSERT
| SDHCI_INT_CARD_REMOVE
)) {
2512 u32 present
= sdhci_readl(host
, SDHCI_PRESENT_STATE
) &
2516 * There is a observation on i.mx esdhc. INSERT
2517 * bit will be immediately set again when it gets
2518 * cleared, if a card is inserted. We have to mask
2519 * the irq to prevent interrupt storm which will
2520 * freeze the system. And the REMOVE gets the
2523 * More testing are needed here to ensure it works
2524 * for other platforms though.
2526 host
->ier
&= ~(SDHCI_INT_CARD_INSERT
|
2527 SDHCI_INT_CARD_REMOVE
);
2528 host
->ier
|= present
? SDHCI_INT_CARD_REMOVE
:
2529 SDHCI_INT_CARD_INSERT
;
2530 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
2531 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
2533 sdhci_writel(host
, intmask
& (SDHCI_INT_CARD_INSERT
|
2534 SDHCI_INT_CARD_REMOVE
), SDHCI_INT_STATUS
);
2536 host
->thread_isr
|= intmask
& (SDHCI_INT_CARD_INSERT
|
2537 SDHCI_INT_CARD_REMOVE
);
2538 result
= IRQ_WAKE_THREAD
;
2541 if (intmask
& SDHCI_INT_CMD_MASK
)
2542 sdhci_cmd_irq(host
, intmask
& SDHCI_INT_CMD_MASK
,
2545 if (intmask
& SDHCI_INT_DATA_MASK
)
2546 sdhci_data_irq(host
, intmask
& SDHCI_INT_DATA_MASK
);
2548 if (intmask
& SDHCI_INT_BUS_POWER
)
2549 pr_err("%s: Card is consuming too much power!\n",
2550 mmc_hostname(host
->mmc
));
2552 if (intmask
& SDHCI_INT_CARD_INT
) {
2553 sdhci_enable_sdio_irq_nolock(host
, false);
2554 host
->thread_isr
|= SDHCI_INT_CARD_INT
;
2555 result
= IRQ_WAKE_THREAD
;
2558 intmask
&= ~(SDHCI_INT_CARD_INSERT
| SDHCI_INT_CARD_REMOVE
|
2559 SDHCI_INT_CMD_MASK
| SDHCI_INT_DATA_MASK
|
2560 SDHCI_INT_ERROR
| SDHCI_INT_BUS_POWER
|
2561 SDHCI_INT_CARD_INT
);
2564 unexpected
|= intmask
;
2565 sdhci_writel(host
, intmask
, SDHCI_INT_STATUS
);
2568 if (result
== IRQ_NONE
)
2569 result
= IRQ_HANDLED
;
2571 intmask
= sdhci_readl(host
, SDHCI_INT_STATUS
);
2572 } while (intmask
&& --max_loops
);
2574 spin_unlock(&host
->lock
);
2577 pr_err("%s: Unexpected interrupt 0x%08x.\n",
2578 mmc_hostname(host
->mmc
), unexpected
);
2579 sdhci_dumpregs(host
);
2585 static irqreturn_t
sdhci_thread_irq(int irq
, void *dev_id
)
2587 struct sdhci_host
*host
= dev_id
;
2588 unsigned long flags
;
2591 spin_lock_irqsave(&host
->lock
, flags
);
2592 isr
= host
->thread_isr
;
2593 host
->thread_isr
= 0;
2594 spin_unlock_irqrestore(&host
->lock
, flags
);
2596 if (isr
& (SDHCI_INT_CARD_INSERT
| SDHCI_INT_CARD_REMOVE
)) {
2597 sdhci_card_event(host
->mmc
);
2598 mmc_detect_change(host
->mmc
, msecs_to_jiffies(200));
2601 if (isr
& SDHCI_INT_CARD_INT
) {
2602 sdio_run_irqs(host
->mmc
);
2604 spin_lock_irqsave(&host
->lock
, flags
);
2605 if (host
->flags
& SDHCI_SDIO_IRQ_ENABLED
)
2606 sdhci_enable_sdio_irq_nolock(host
, true);
2607 spin_unlock_irqrestore(&host
->lock
, flags
);
2610 return isr
? IRQ_HANDLED
: IRQ_NONE
;
2613 /*****************************************************************************\
2617 \*****************************************************************************/
2620 void sdhci_enable_irq_wakeups(struct sdhci_host
*host
)
2623 u8 mask
= SDHCI_WAKE_ON_INSERT
| SDHCI_WAKE_ON_REMOVE
2624 | SDHCI_WAKE_ON_INT
;
2626 val
= sdhci_readb(host
, SDHCI_WAKE_UP_CONTROL
);
2628 /* Avoid fake wake up */
2629 if (host
->quirks
& SDHCI_QUIRK_BROKEN_CARD_DETECTION
)
2630 val
&= ~(SDHCI_WAKE_ON_INSERT
| SDHCI_WAKE_ON_REMOVE
);
2631 sdhci_writeb(host
, val
, SDHCI_WAKE_UP_CONTROL
);
2633 EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups
);
2635 static void sdhci_disable_irq_wakeups(struct sdhci_host
*host
)
2638 u8 mask
= SDHCI_WAKE_ON_INSERT
| SDHCI_WAKE_ON_REMOVE
2639 | SDHCI_WAKE_ON_INT
;
2641 val
= sdhci_readb(host
, SDHCI_WAKE_UP_CONTROL
);
2643 sdhci_writeb(host
, val
, SDHCI_WAKE_UP_CONTROL
);
2646 int sdhci_suspend_host(struct sdhci_host
*host
)
2648 sdhci_disable_card_detection(host
);
2650 mmc_retune_timer_stop(host
->mmc
);
2651 mmc_retune_needed(host
->mmc
);
2653 if (!device_may_wakeup(mmc_dev(host
->mmc
))) {
2655 sdhci_writel(host
, 0, SDHCI_INT_ENABLE
);
2656 sdhci_writel(host
, 0, SDHCI_SIGNAL_ENABLE
);
2657 free_irq(host
->irq
, host
);
2659 sdhci_enable_irq_wakeups(host
);
2660 enable_irq_wake(host
->irq
);
2665 EXPORT_SYMBOL_GPL(sdhci_suspend_host
);
2667 int sdhci_resume_host(struct sdhci_host
*host
)
2671 if (host
->flags
& (SDHCI_USE_SDMA
| SDHCI_USE_ADMA
)) {
2672 if (host
->ops
->enable_dma
)
2673 host
->ops
->enable_dma(host
);
2676 if ((host
->mmc
->pm_flags
& MMC_PM_KEEP_POWER
) &&
2677 (host
->quirks2
& SDHCI_QUIRK2_HOST_OFF_CARD_ON
)) {
2678 /* Card keeps power but host controller does not */
2679 sdhci_init(host
, 0);
2682 sdhci_do_set_ios(host
, &host
->mmc
->ios
);
2684 sdhci_init(host
, (host
->mmc
->pm_flags
& MMC_PM_KEEP_POWER
));
2688 if (!device_may_wakeup(mmc_dev(host
->mmc
))) {
2689 ret
= request_threaded_irq(host
->irq
, sdhci_irq
,
2690 sdhci_thread_irq
, IRQF_SHARED
,
2691 mmc_hostname(host
->mmc
), host
);
2695 sdhci_disable_irq_wakeups(host
);
2696 disable_irq_wake(host
->irq
);
2699 sdhci_enable_card_detection(host
);
2704 EXPORT_SYMBOL_GPL(sdhci_resume_host
);
2706 static int sdhci_runtime_pm_get(struct sdhci_host
*host
)
2708 return pm_runtime_get_sync(host
->mmc
->parent
);
2711 static int sdhci_runtime_pm_put(struct sdhci_host
*host
)
2713 pm_runtime_mark_last_busy(host
->mmc
->parent
);
2714 return pm_runtime_put_autosuspend(host
->mmc
->parent
);
2717 static void sdhci_runtime_pm_bus_on(struct sdhci_host
*host
)
2721 host
->bus_on
= true;
2722 pm_runtime_get_noresume(host
->mmc
->parent
);
2725 static void sdhci_runtime_pm_bus_off(struct sdhci_host
*host
)
2729 host
->bus_on
= false;
2730 pm_runtime_put_noidle(host
->mmc
->parent
);
2733 int sdhci_runtime_suspend_host(struct sdhci_host
*host
)
2735 unsigned long flags
;
2737 mmc_retune_timer_stop(host
->mmc
);
2738 mmc_retune_needed(host
->mmc
);
2740 spin_lock_irqsave(&host
->lock
, flags
);
2741 host
->ier
&= SDHCI_INT_CARD_INT
;
2742 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
2743 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
2744 spin_unlock_irqrestore(&host
->lock
, flags
);
2746 synchronize_hardirq(host
->irq
);
2748 spin_lock_irqsave(&host
->lock
, flags
);
2749 host
->runtime_suspended
= true;
2750 spin_unlock_irqrestore(&host
->lock
, flags
);
2754 EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host
);
2756 int sdhci_runtime_resume_host(struct sdhci_host
*host
)
2758 unsigned long flags
;
2759 int host_flags
= host
->flags
;
2761 if (host_flags
& (SDHCI_USE_SDMA
| SDHCI_USE_ADMA
)) {
2762 if (host
->ops
->enable_dma
)
2763 host
->ops
->enable_dma(host
);
2766 sdhci_init(host
, 0);
2768 /* Force clock and power re-program */
2771 sdhci_do_start_signal_voltage_switch(host
, &host
->mmc
->ios
);
2772 sdhci_do_set_ios(host
, &host
->mmc
->ios
);
2774 if ((host_flags
& SDHCI_PV_ENABLED
) &&
2775 !(host
->quirks2
& SDHCI_QUIRK2_PRESET_VALUE_BROKEN
)) {
2776 spin_lock_irqsave(&host
->lock
, flags
);
2777 sdhci_enable_preset_value(host
, true);
2778 spin_unlock_irqrestore(&host
->lock
, flags
);
2781 spin_lock_irqsave(&host
->lock
, flags
);
2783 host
->runtime_suspended
= false;
2785 /* Enable SDIO IRQ */
2786 if (host
->flags
& SDHCI_SDIO_IRQ_ENABLED
)
2787 sdhci_enable_sdio_irq_nolock(host
, true);
2789 /* Enable Card Detection */
2790 sdhci_enable_card_detection(host
);
2792 spin_unlock_irqrestore(&host
->lock
, flags
);
2796 EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host
);
2798 #endif /* CONFIG_PM */
2800 /*****************************************************************************\
2802 * Device allocation/registration *
2804 \*****************************************************************************/
2806 struct sdhci_host
*sdhci_alloc_host(struct device
*dev
,
2809 struct mmc_host
*mmc
;
2810 struct sdhci_host
*host
;
2812 WARN_ON(dev
== NULL
);
2814 mmc
= mmc_alloc_host(sizeof(struct sdhci_host
) + priv_size
, dev
);
2816 return ERR_PTR(-ENOMEM
);
2818 host
= mmc_priv(mmc
);
2820 host
->mmc_host_ops
= sdhci_ops
;
2821 mmc
->ops
= &host
->mmc_host_ops
;
2826 EXPORT_SYMBOL_GPL(sdhci_alloc_host
);
2828 static int sdhci_set_dma_mask(struct sdhci_host
*host
)
2830 struct mmc_host
*mmc
= host
->mmc
;
2831 struct device
*dev
= mmc_dev(mmc
);
2834 if (host
->quirks2
& SDHCI_QUIRK2_BROKEN_64_BIT_DMA
)
2835 host
->flags
&= ~SDHCI_USE_64_BIT_DMA
;
2837 /* Try 64-bit mask if hardware is capable of it */
2838 if (host
->flags
& SDHCI_USE_64_BIT_DMA
) {
2839 ret
= dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(64));
2841 pr_warn("%s: Failed to set 64-bit DMA mask.\n",
2843 host
->flags
&= ~SDHCI_USE_64_BIT_DMA
;
2847 /* 32-bit mask as default & fallback */
2849 ret
= dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(32));
2851 pr_warn("%s: Failed to set 32-bit DMA mask.\n",
2858 int sdhci_add_host(struct sdhci_host
*host
)
2860 struct mmc_host
*mmc
;
2861 u32 caps
[2] = {0, 0};
2862 u32 max_current_caps
;
2863 unsigned int ocr_avail
;
2864 unsigned int override_timeout_clk
;
2868 WARN_ON(host
== NULL
);
2875 host
->quirks
= debug_quirks
;
2877 host
->quirks2
= debug_quirks2
;
2879 override_timeout_clk
= host
->timeout_clk
;
2881 sdhci_do_reset(host
, SDHCI_RESET_ALL
);
2883 host
->version
= sdhci_readw(host
, SDHCI_HOST_VERSION
);
2884 host
->version
= (host
->version
& SDHCI_SPEC_VER_MASK
)
2885 >> SDHCI_SPEC_VER_SHIFT
;
2886 if (host
->version
> SDHCI_SPEC_300
) {
2887 pr_err("%s: Unknown controller version (%d). You may experience problems.\n",
2888 mmc_hostname(mmc
), host
->version
);
2891 caps
[0] = (host
->quirks
& SDHCI_QUIRK_MISSING_CAPS
) ? host
->caps
:
2892 sdhci_readl(host
, SDHCI_CAPABILITIES
);
2894 if (host
->version
>= SDHCI_SPEC_300
)
2895 caps
[1] = (host
->quirks
& SDHCI_QUIRK_MISSING_CAPS
) ?
2897 sdhci_readl(host
, SDHCI_CAPABILITIES_1
);
2899 if (host
->quirks
& SDHCI_QUIRK_FORCE_DMA
)
2900 host
->flags
|= SDHCI_USE_SDMA
;
2901 else if (!(caps
[0] & SDHCI_CAN_DO_SDMA
))
2902 DBG("Controller doesn't have SDMA capability\n");
2904 host
->flags
|= SDHCI_USE_SDMA
;
2906 if ((host
->quirks
& SDHCI_QUIRK_BROKEN_DMA
) &&
2907 (host
->flags
& SDHCI_USE_SDMA
)) {
2908 DBG("Disabling DMA as it is marked broken\n");
2909 host
->flags
&= ~SDHCI_USE_SDMA
;
2912 if ((host
->version
>= SDHCI_SPEC_200
) &&
2913 (caps
[0] & SDHCI_CAN_DO_ADMA2
))
2914 host
->flags
|= SDHCI_USE_ADMA
;
2916 if ((host
->quirks
& SDHCI_QUIRK_BROKEN_ADMA
) &&
2917 (host
->flags
& SDHCI_USE_ADMA
)) {
2918 DBG("Disabling ADMA as it is marked broken\n");
2919 host
->flags
&= ~SDHCI_USE_ADMA
;
2923 * It is assumed that a 64-bit capable device has set a 64-bit DMA mask
2924 * and *must* do 64-bit DMA. A driver has the opportunity to change
2925 * that during the first call to ->enable_dma(). Similarly
2926 * SDHCI_QUIRK2_BROKEN_64_BIT_DMA must be left to the drivers to
2929 if (caps
[0] & SDHCI_CAN_64BIT
)
2930 host
->flags
|= SDHCI_USE_64_BIT_DMA
;
2932 if (host
->flags
& (SDHCI_USE_SDMA
| SDHCI_USE_ADMA
)) {
2933 ret
= sdhci_set_dma_mask(host
);
2935 if (!ret
&& host
->ops
->enable_dma
)
2936 ret
= host
->ops
->enable_dma(host
);
2939 pr_warn("%s: No suitable DMA available - falling back to PIO\n",
2941 host
->flags
&= ~(SDHCI_USE_SDMA
| SDHCI_USE_ADMA
);
2947 /* SDMA does not support 64-bit DMA */
2948 if (host
->flags
& SDHCI_USE_64_BIT_DMA
)
2949 host
->flags
&= ~SDHCI_USE_SDMA
;
2951 if (host
->flags
& SDHCI_USE_ADMA
) {
2956 * The DMA descriptor table size is calculated as the maximum
2957 * number of segments times 2, to allow for an alignment
2958 * descriptor for each segment, plus 1 for a nop end descriptor,
2959 * all multipled by the descriptor size.
2961 if (host
->flags
& SDHCI_USE_64_BIT_DMA
) {
2962 host
->adma_table_sz
= (SDHCI_MAX_SEGS
* 2 + 1) *
2963 SDHCI_ADMA2_64_DESC_SZ
;
2964 host
->desc_sz
= SDHCI_ADMA2_64_DESC_SZ
;
2966 host
->adma_table_sz
= (SDHCI_MAX_SEGS
* 2 + 1) *
2967 SDHCI_ADMA2_32_DESC_SZ
;
2968 host
->desc_sz
= SDHCI_ADMA2_32_DESC_SZ
;
2971 host
->align_buffer_sz
= SDHCI_MAX_SEGS
* SDHCI_ADMA2_ALIGN
;
2972 buf
= dma_alloc_coherent(mmc_dev(mmc
), host
->align_buffer_sz
+
2973 host
->adma_table_sz
, &dma
, GFP_KERNEL
);
2975 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
2977 host
->flags
&= ~SDHCI_USE_ADMA
;
2978 } else if ((dma
+ host
->align_buffer_sz
) &
2979 (SDHCI_ADMA2_DESC_ALIGN
- 1)) {
2980 pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
2982 host
->flags
&= ~SDHCI_USE_ADMA
;
2983 dma_free_coherent(mmc_dev(mmc
), host
->align_buffer_sz
+
2984 host
->adma_table_sz
, buf
, dma
);
2986 host
->align_buffer
= buf
;
2987 host
->align_addr
= dma
;
2989 host
->adma_table
= buf
+ host
->align_buffer_sz
;
2990 host
->adma_addr
= dma
+ host
->align_buffer_sz
;
2995 * If we use DMA, then it's up to the caller to set the DMA
2996 * mask, but PIO does not need the hw shim so we set a new
2997 * mask here in that case.
2999 if (!(host
->flags
& (SDHCI_USE_SDMA
| SDHCI_USE_ADMA
))) {
3000 host
->dma_mask
= DMA_BIT_MASK(64);
3001 mmc_dev(mmc
)->dma_mask
= &host
->dma_mask
;
3004 if (host
->version
>= SDHCI_SPEC_300
)
3005 host
->max_clk
= (caps
[0] & SDHCI_CLOCK_V3_BASE_MASK
)
3006 >> SDHCI_CLOCK_BASE_SHIFT
;
3008 host
->max_clk
= (caps
[0] & SDHCI_CLOCK_BASE_MASK
)
3009 >> SDHCI_CLOCK_BASE_SHIFT
;
3011 host
->max_clk
*= 1000000;
3012 if (host
->max_clk
== 0 || host
->quirks
&
3013 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN
) {
3014 if (!host
->ops
->get_max_clock
) {
3015 pr_err("%s: Hardware doesn't specify base clock frequency.\n",
3019 host
->max_clk
= host
->ops
->get_max_clock(host
);
3023 * In case of Host Controller v3.00, find out whether clock
3024 * multiplier is supported.
3026 host
->clk_mul
= (caps
[1] & SDHCI_CLOCK_MUL_MASK
) >>
3027 SDHCI_CLOCK_MUL_SHIFT
;
3030 * In case the value in Clock Multiplier is 0, then programmable
3031 * clock mode is not supported, otherwise the actual clock
3032 * multiplier is one more than the value of Clock Multiplier
3033 * in the Capabilities Register.
3039 * Set host parameters.
3041 max_clk
= host
->max_clk
;
3043 if (host
->ops
->get_min_clock
)
3044 mmc
->f_min
= host
->ops
->get_min_clock(host
);
3045 else if (host
->version
>= SDHCI_SPEC_300
) {
3046 if (host
->clk_mul
) {
3047 mmc
->f_min
= (host
->max_clk
* host
->clk_mul
) / 1024;
3048 max_clk
= host
->max_clk
* host
->clk_mul
;
3050 mmc
->f_min
= host
->max_clk
/ SDHCI_MAX_DIV_SPEC_300
;
3052 mmc
->f_min
= host
->max_clk
/ SDHCI_MAX_DIV_SPEC_200
;
3054 if (!mmc
->f_max
|| (mmc
->f_max
&& (mmc
->f_max
> max_clk
)))
3055 mmc
->f_max
= max_clk
;
3057 if (!(host
->quirks
& SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK
)) {
3058 host
->timeout_clk
= (caps
[0] & SDHCI_TIMEOUT_CLK_MASK
) >>
3059 SDHCI_TIMEOUT_CLK_SHIFT
;
3060 if (host
->timeout_clk
== 0) {
3061 if (host
->ops
->get_timeout_clock
) {
3063 host
->ops
->get_timeout_clock(host
);
3065 pr_err("%s: Hardware doesn't specify timeout clock frequency.\n",
3071 if (caps
[0] & SDHCI_TIMEOUT_CLK_UNIT
)
3072 host
->timeout_clk
*= 1000;
3074 if (override_timeout_clk
)
3075 host
->timeout_clk
= override_timeout_clk
;
3077 mmc
->max_busy_timeout
= host
->ops
->get_max_timeout_count
?
3078 host
->ops
->get_max_timeout_count(host
) : 1 << 27;
3079 mmc
->max_busy_timeout
/= host
->timeout_clk
;
3082 mmc
->caps
|= MMC_CAP_SDIO_IRQ
| MMC_CAP_ERASE
| MMC_CAP_CMD23
;
3083 mmc
->caps2
|= MMC_CAP2_SDIO_IRQ_NOTHREAD
;
3085 if (host
->quirks
& SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12
)
3086 host
->flags
|= SDHCI_AUTO_CMD12
;
3088 /* Auto-CMD23 stuff only works in ADMA or PIO. */
3089 if ((host
->version
>= SDHCI_SPEC_300
) &&
3090 ((host
->flags
& SDHCI_USE_ADMA
) ||
3091 !(host
->flags
& SDHCI_USE_SDMA
)) &&
3092 !(host
->quirks2
& SDHCI_QUIRK2_ACMD23_BROKEN
)) {
3093 host
->flags
|= SDHCI_AUTO_CMD23
;
3094 DBG("%s: Auto-CMD23 available\n", mmc_hostname(mmc
));
3096 DBG("%s: Auto-CMD23 unavailable\n", mmc_hostname(mmc
));
3100 * A controller may support 8-bit width, but the board itself
3101 * might not have the pins brought out. Boards that support
3102 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in
3103 * their platform code before calling sdhci_add_host(), and we
3104 * won't assume 8-bit width for hosts without that CAP.
3106 if (!(host
->quirks
& SDHCI_QUIRK_FORCE_1_BIT_DATA
))
3107 mmc
->caps
|= MMC_CAP_4_BIT_DATA
;
3109 if (host
->quirks2
& SDHCI_QUIRK2_HOST_NO_CMD23
)
3110 mmc
->caps
&= ~MMC_CAP_CMD23
;
3112 if (caps
[0] & SDHCI_CAN_DO_HISPD
)
3113 mmc
->caps
|= MMC_CAP_SD_HIGHSPEED
| MMC_CAP_MMC_HIGHSPEED
;
3115 if ((host
->quirks
& SDHCI_QUIRK_BROKEN_CARD_DETECTION
) &&
3116 !(mmc
->caps
& MMC_CAP_NONREMOVABLE
) &&
3117 IS_ERR_VALUE(mmc_gpio_get_cd(host
->mmc
)))
3118 mmc
->caps
|= MMC_CAP_NEEDS_POLL
;
3120 /* If there are external regulators, get them */
3121 if (mmc_regulator_get_supply(mmc
) == -EPROBE_DEFER
)
3122 return -EPROBE_DEFER
;
3124 /* If vqmmc regulator and no 1.8V signalling, then there's no UHS */
3125 if (!IS_ERR(mmc
->supply
.vqmmc
)) {
3126 ret
= regulator_enable(mmc
->supply
.vqmmc
);
3127 if (!regulator_is_supported_voltage(mmc
->supply
.vqmmc
, 1700000,
3129 caps
[1] &= ~(SDHCI_SUPPORT_SDR104
|
3130 SDHCI_SUPPORT_SDR50
|
3131 SDHCI_SUPPORT_DDR50
);
3133 pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
3134 mmc_hostname(mmc
), ret
);
3135 mmc
->supply
.vqmmc
= ERR_PTR(-EINVAL
);
3139 if (host
->quirks2
& SDHCI_QUIRK2_NO_1_8_V
)
3140 caps
[1] &= ~(SDHCI_SUPPORT_SDR104
| SDHCI_SUPPORT_SDR50
|
3141 SDHCI_SUPPORT_DDR50
);
3143 /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
3144 if (caps
[1] & (SDHCI_SUPPORT_SDR104
| SDHCI_SUPPORT_SDR50
|
3145 SDHCI_SUPPORT_DDR50
))
3146 mmc
->caps
|= MMC_CAP_UHS_SDR12
| MMC_CAP_UHS_SDR25
;
3148 /* SDR104 supports also implies SDR50 support */
3149 if (caps
[1] & SDHCI_SUPPORT_SDR104
) {
3150 mmc
->caps
|= MMC_CAP_UHS_SDR104
| MMC_CAP_UHS_SDR50
;
3151 /* SD3.0: SDR104 is supported so (for eMMC) the caps2
3152 * field can be promoted to support HS200.
3154 if (!(host
->quirks2
& SDHCI_QUIRK2_BROKEN_HS200
))
3155 mmc
->caps2
|= MMC_CAP2_HS200
;
3156 } else if (caps
[1] & SDHCI_SUPPORT_SDR50
)
3157 mmc
->caps
|= MMC_CAP_UHS_SDR50
;
3159 if (host
->quirks2
& SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400
&&
3160 (caps
[1] & SDHCI_SUPPORT_HS400
))
3161 mmc
->caps2
|= MMC_CAP2_HS400
;
3163 if ((mmc
->caps2
& MMC_CAP2_HSX00_1_2V
) &&
3164 (IS_ERR(mmc
->supply
.vqmmc
) ||
3165 !regulator_is_supported_voltage(mmc
->supply
.vqmmc
, 1100000,
3167 mmc
->caps2
&= ~MMC_CAP2_HSX00_1_2V
;
3169 if ((caps
[1] & SDHCI_SUPPORT_DDR50
) &&
3170 !(host
->quirks2
& SDHCI_QUIRK2_BROKEN_DDR50
))
3171 mmc
->caps
|= MMC_CAP_UHS_DDR50
;
3173 /* Does the host need tuning for SDR50? */
3174 if (caps
[1] & SDHCI_USE_SDR50_TUNING
)
3175 host
->flags
|= SDHCI_SDR50_NEEDS_TUNING
;
3177 /* Does the host need tuning for SDR104 / HS200? */
3178 if (mmc
->caps2
& MMC_CAP2_HS200
)
3179 host
->flags
|= SDHCI_SDR104_NEEDS_TUNING
;
3181 /* Driver Type(s) (A, C, D) supported by the host */
3182 if (caps
[1] & SDHCI_DRIVER_TYPE_A
)
3183 mmc
->caps
|= MMC_CAP_DRIVER_TYPE_A
;
3184 if (caps
[1] & SDHCI_DRIVER_TYPE_C
)
3185 mmc
->caps
|= MMC_CAP_DRIVER_TYPE_C
;
3186 if (caps
[1] & SDHCI_DRIVER_TYPE_D
)
3187 mmc
->caps
|= MMC_CAP_DRIVER_TYPE_D
;
3189 /* Initial value for re-tuning timer count */
3190 host
->tuning_count
= (caps
[1] & SDHCI_RETUNING_TIMER_COUNT_MASK
) >>
3191 SDHCI_RETUNING_TIMER_COUNT_SHIFT
;
3194 * In case Re-tuning Timer is not disabled, the actual value of
3195 * re-tuning timer will be 2 ^ (n - 1).
3197 if (host
->tuning_count
)
3198 host
->tuning_count
= 1 << (host
->tuning_count
- 1);
3200 /* Re-tuning mode supported by the Host Controller */
3201 host
->tuning_mode
= (caps
[1] & SDHCI_RETUNING_MODE_MASK
) >>
3202 SDHCI_RETUNING_MODE_SHIFT
;
3207 * According to SD Host Controller spec v3.00, if the Host System
3208 * can afford more than 150mA, Host Driver should set XPC to 1. Also
3209 * the value is meaningful only if Voltage Support in the Capabilities
3210 * register is set. The actual current value is 4 times the register
3213 max_current_caps
= sdhci_readl(host
, SDHCI_MAX_CURRENT
);
3214 if (!max_current_caps
&& !IS_ERR(mmc
->supply
.vmmc
)) {
3215 int curr
= regulator_get_current_limit(mmc
->supply
.vmmc
);
3218 /* convert to SDHCI_MAX_CURRENT format */
3219 curr
= curr
/1000; /* convert to mA */
3220 curr
= curr
/SDHCI_MAX_CURRENT_MULTIPLIER
;
3222 curr
= min_t(u32
, curr
, SDHCI_MAX_CURRENT_LIMIT
);
3224 (curr
<< SDHCI_MAX_CURRENT_330_SHIFT
) |
3225 (curr
<< SDHCI_MAX_CURRENT_300_SHIFT
) |
3226 (curr
<< SDHCI_MAX_CURRENT_180_SHIFT
);
3230 if (caps
[0] & SDHCI_CAN_VDD_330
) {
3231 ocr_avail
|= MMC_VDD_32_33
| MMC_VDD_33_34
;
3233 mmc
->max_current_330
= ((max_current_caps
&
3234 SDHCI_MAX_CURRENT_330_MASK
) >>
3235 SDHCI_MAX_CURRENT_330_SHIFT
) *
3236 SDHCI_MAX_CURRENT_MULTIPLIER
;
3238 if (caps
[0] & SDHCI_CAN_VDD_300
) {
3239 ocr_avail
|= MMC_VDD_29_30
| MMC_VDD_30_31
;
3241 mmc
->max_current_300
= ((max_current_caps
&
3242 SDHCI_MAX_CURRENT_300_MASK
) >>
3243 SDHCI_MAX_CURRENT_300_SHIFT
) *
3244 SDHCI_MAX_CURRENT_MULTIPLIER
;
3246 if (caps
[0] & SDHCI_CAN_VDD_180
) {
3247 ocr_avail
|= MMC_VDD_165_195
;
3249 mmc
->max_current_180
= ((max_current_caps
&
3250 SDHCI_MAX_CURRENT_180_MASK
) >>
3251 SDHCI_MAX_CURRENT_180_SHIFT
) *
3252 SDHCI_MAX_CURRENT_MULTIPLIER
;
3255 /* If OCR set by host, use it instead. */
3257 ocr_avail
= host
->ocr_mask
;
3259 /* If OCR set by external regulators, give it highest prio. */
3261 ocr_avail
= mmc
->ocr_avail
;
3263 mmc
->ocr_avail
= ocr_avail
;
3264 mmc
->ocr_avail_sdio
= ocr_avail
;
3265 if (host
->ocr_avail_sdio
)
3266 mmc
->ocr_avail_sdio
&= host
->ocr_avail_sdio
;
3267 mmc
->ocr_avail_sd
= ocr_avail
;
3268 if (host
->ocr_avail_sd
)
3269 mmc
->ocr_avail_sd
&= host
->ocr_avail_sd
;
3270 else /* normal SD controllers don't support 1.8V */
3271 mmc
->ocr_avail_sd
&= ~MMC_VDD_165_195
;
3272 mmc
->ocr_avail_mmc
= ocr_avail
;
3273 if (host
->ocr_avail_mmc
)
3274 mmc
->ocr_avail_mmc
&= host
->ocr_avail_mmc
;
3276 if (mmc
->ocr_avail
== 0) {
3277 pr_err("%s: Hardware doesn't report any support voltages.\n",
3282 spin_lock_init(&host
->lock
);
3285 * Maximum number of segments. Depends on if the hardware
3286 * can do scatter/gather or not.
3288 if (host
->flags
& SDHCI_USE_ADMA
)
3289 mmc
->max_segs
= SDHCI_MAX_SEGS
;
3290 else if (host
->flags
& SDHCI_USE_SDMA
)
3293 mmc
->max_segs
= SDHCI_MAX_SEGS
;
3296 * Maximum number of sectors in one transfer. Limited by SDMA boundary
3297 * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
3300 mmc
->max_req_size
= 524288;
3303 * Maximum segment size. Could be one segment with the maximum number
3304 * of bytes. When doing hardware scatter/gather, each entry cannot
3305 * be larger than 64 KiB though.
3307 if (host
->flags
& SDHCI_USE_ADMA
) {
3308 if (host
->quirks
& SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC
)
3309 mmc
->max_seg_size
= 65535;
3311 mmc
->max_seg_size
= 65536;
3313 mmc
->max_seg_size
= mmc
->max_req_size
;
3317 * Maximum block size. This varies from controller to controller and
3318 * is specified in the capabilities register.
3320 if (host
->quirks
& SDHCI_QUIRK_FORCE_BLK_SZ_2048
) {
3321 mmc
->max_blk_size
= 2;
3323 mmc
->max_blk_size
= (caps
[0] & SDHCI_MAX_BLOCK_MASK
) >>
3324 SDHCI_MAX_BLOCK_SHIFT
;
3325 if (mmc
->max_blk_size
>= 3) {
3326 pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n",
3328 mmc
->max_blk_size
= 0;
3332 mmc
->max_blk_size
= 512 << mmc
->max_blk_size
;
3335 * Maximum block count.
3337 mmc
->max_blk_count
= (host
->quirks
& SDHCI_QUIRK_NO_MULTIBLOCK
) ? 1 : 65535;
3342 tasklet_init(&host
->finish_tasklet
,
3343 sdhci_tasklet_finish
, (unsigned long)host
);
3345 setup_timer(&host
->timer
, sdhci_timeout_timer
, (unsigned long)host
);
3347 init_waitqueue_head(&host
->buf_ready_int
);
3349 sdhci_init(host
, 0);
3351 ret
= request_threaded_irq(host
->irq
, sdhci_irq
, sdhci_thread_irq
,
3352 IRQF_SHARED
, mmc_hostname(mmc
), host
);
3354 pr_err("%s: Failed to request IRQ %d: %d\n",
3355 mmc_hostname(mmc
), host
->irq
, ret
);
3359 #ifdef CONFIG_MMC_DEBUG
3360 sdhci_dumpregs(host
);
3363 #ifdef SDHCI_USE_LEDS_CLASS
3364 snprintf(host
->led_name
, sizeof(host
->led_name
),
3365 "%s::", mmc_hostname(mmc
));
3366 host
->led
.name
= host
->led_name
;
3367 host
->led
.brightness
= LED_OFF
;
3368 host
->led
.default_trigger
= mmc_hostname(mmc
);
3369 host
->led
.brightness_set
= sdhci_led_control
;
3371 ret
= led_classdev_register(mmc_dev(mmc
), &host
->led
);
3373 pr_err("%s: Failed to register LED device: %d\n",
3374 mmc_hostname(mmc
), ret
);
3383 pr_info("%s: SDHCI controller on %s [%s] using %s\n",
3384 mmc_hostname(mmc
), host
->hw_name
, dev_name(mmc_dev(mmc
)),
3385 (host
->flags
& SDHCI_USE_ADMA
) ?
3386 (host
->flags
& SDHCI_USE_64_BIT_DMA
) ? "ADMA 64-bit" : "ADMA" :
3387 (host
->flags
& SDHCI_USE_SDMA
) ? "DMA" : "PIO");
3389 sdhci_enable_card_detection(host
);
3393 #ifdef SDHCI_USE_LEDS_CLASS
3395 sdhci_do_reset(host
, SDHCI_RESET_ALL
);
3396 sdhci_writel(host
, 0, SDHCI_INT_ENABLE
);
3397 sdhci_writel(host
, 0, SDHCI_SIGNAL_ENABLE
);
3398 free_irq(host
->irq
, host
);
3401 tasklet_kill(&host
->finish_tasklet
);
3406 EXPORT_SYMBOL_GPL(sdhci_add_host
);
3408 void sdhci_remove_host(struct sdhci_host
*host
, int dead
)
3410 struct mmc_host
*mmc
= host
->mmc
;
3411 unsigned long flags
;
3414 spin_lock_irqsave(&host
->lock
, flags
);
3416 host
->flags
|= SDHCI_DEVICE_DEAD
;
3419 pr_err("%s: Controller removed during "
3420 " transfer!\n", mmc_hostname(mmc
));
3422 host
->mrq
->cmd
->error
= -ENOMEDIUM
;
3423 tasklet_schedule(&host
->finish_tasklet
);
3426 spin_unlock_irqrestore(&host
->lock
, flags
);
3429 sdhci_disable_card_detection(host
);
3431 mmc_remove_host(mmc
);
3433 #ifdef SDHCI_USE_LEDS_CLASS
3434 led_classdev_unregister(&host
->led
);
3438 sdhci_do_reset(host
, SDHCI_RESET_ALL
);
3440 sdhci_writel(host
, 0, SDHCI_INT_ENABLE
);
3441 sdhci_writel(host
, 0, SDHCI_SIGNAL_ENABLE
);
3442 free_irq(host
->irq
, host
);
3444 del_timer_sync(&host
->timer
);
3446 tasklet_kill(&host
->finish_tasklet
);
3448 if (!IS_ERR(mmc
->supply
.vqmmc
))
3449 regulator_disable(mmc
->supply
.vqmmc
);
3451 if (host
->align_buffer
)
3452 dma_free_coherent(mmc_dev(mmc
), host
->align_buffer_sz
+
3453 host
->adma_table_sz
, host
->align_buffer
,
3456 host
->adma_table
= NULL
;
3457 host
->align_buffer
= NULL
;
3460 EXPORT_SYMBOL_GPL(sdhci_remove_host
);
3462 void sdhci_free_host(struct sdhci_host
*host
)
3464 mmc_free_host(host
->mmc
);
3467 EXPORT_SYMBOL_GPL(sdhci_free_host
);
3469 /*****************************************************************************\
3471 * Driver init/exit *
3473 \*****************************************************************************/
3475 static int __init
sdhci_drv_init(void)
3478 ": Secure Digital Host Controller Interface driver\n");
3479 pr_info(DRIVER_NAME
": Copyright(c) Pierre Ossman\n");
3484 static void __exit
sdhci_drv_exit(void)
3488 module_init(sdhci_drv_init
);
3489 module_exit(sdhci_drv_exit
);
3491 module_param(debug_quirks
, uint
, 0444);
3492 module_param(debug_quirks2
, uint
, 0444);
3494 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
3495 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
3496 MODULE_LICENSE("GPL");
3498 MODULE_PARM_DESC(debug_quirks
, "Force certain quirks.");
3499 MODULE_PARM_DESC(debug_quirks2
, "Force certain other quirks.");