2 * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
4 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
11 * Thanks to the following companies for their support:
13 * - JMicron (hardware and technical support)
16 #include <linux/delay.h>
17 #include <linux/highmem.h>
19 #include <linux/module.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/slab.h>
22 #include <linux/scatterlist.h>
23 #include <linux/regulator/consumer.h>
24 #include <linux/pm_runtime.h>
26 #include <linux/leds.h>
28 #include <linux/mmc/mmc.h>
29 #include <linux/mmc/host.h>
30 #include <linux/mmc/card.h>
31 #include <linux/mmc/sdio.h>
32 #include <linux/mmc/slot-gpio.h>
36 #define DRIVER_NAME "sdhci"
38 #define DBG(f, x...) \
39 pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x)
41 #if defined(CONFIG_LEDS_CLASS) || (defined(CONFIG_LEDS_CLASS_MODULE) && \
42 defined(CONFIG_MMC_SDHCI_MODULE))
43 #define SDHCI_USE_LEDS_CLASS
46 #define MAX_TUNING_LOOP 40
48 static unsigned int debug_quirks
= 0;
49 static unsigned int debug_quirks2
;
51 static void sdhci_finish_data(struct sdhci_host
*);
53 static void sdhci_finish_command(struct sdhci_host
*);
54 static int sdhci_execute_tuning(struct mmc_host
*mmc
, u32 opcode
);
55 static void sdhci_enable_preset_value(struct sdhci_host
*host
, bool enable
);
56 static int sdhci_pre_dma_transfer(struct sdhci_host
*host
,
57 struct mmc_data
*data
);
58 static int sdhci_do_get_cd(struct sdhci_host
*host
);
61 static int sdhci_runtime_pm_get(struct sdhci_host
*host
);
62 static int sdhci_runtime_pm_put(struct sdhci_host
*host
);
63 static void sdhci_runtime_pm_bus_on(struct sdhci_host
*host
);
64 static void sdhci_runtime_pm_bus_off(struct sdhci_host
*host
);
66 static inline int sdhci_runtime_pm_get(struct sdhci_host
*host
)
70 static inline int sdhci_runtime_pm_put(struct sdhci_host
*host
)
74 static void sdhci_runtime_pm_bus_on(struct sdhci_host
*host
)
77 static void sdhci_runtime_pm_bus_off(struct sdhci_host
*host
)
82 static void sdhci_dumpregs(struct sdhci_host
*host
)
84 pr_debug(DRIVER_NAME
": =========== REGISTER DUMP (%s)===========\n",
85 mmc_hostname(host
->mmc
));
87 pr_debug(DRIVER_NAME
": Sys addr: 0x%08x | Version: 0x%08x\n",
88 sdhci_readl(host
, SDHCI_DMA_ADDRESS
),
89 sdhci_readw(host
, SDHCI_HOST_VERSION
));
90 pr_debug(DRIVER_NAME
": Blk size: 0x%08x | Blk cnt: 0x%08x\n",
91 sdhci_readw(host
, SDHCI_BLOCK_SIZE
),
92 sdhci_readw(host
, SDHCI_BLOCK_COUNT
));
93 pr_debug(DRIVER_NAME
": Argument: 0x%08x | Trn mode: 0x%08x\n",
94 sdhci_readl(host
, SDHCI_ARGUMENT
),
95 sdhci_readw(host
, SDHCI_TRANSFER_MODE
));
96 pr_debug(DRIVER_NAME
": Present: 0x%08x | Host ctl: 0x%08x\n",
97 sdhci_readl(host
, SDHCI_PRESENT_STATE
),
98 sdhci_readb(host
, SDHCI_HOST_CONTROL
));
99 pr_debug(DRIVER_NAME
": Power: 0x%08x | Blk gap: 0x%08x\n",
100 sdhci_readb(host
, SDHCI_POWER_CONTROL
),
101 sdhci_readb(host
, SDHCI_BLOCK_GAP_CONTROL
));
102 pr_debug(DRIVER_NAME
": Wake-up: 0x%08x | Clock: 0x%08x\n",
103 sdhci_readb(host
, SDHCI_WAKE_UP_CONTROL
),
104 sdhci_readw(host
, SDHCI_CLOCK_CONTROL
));
105 pr_debug(DRIVER_NAME
": Timeout: 0x%08x | Int stat: 0x%08x\n",
106 sdhci_readb(host
, SDHCI_TIMEOUT_CONTROL
),
107 sdhci_readl(host
, SDHCI_INT_STATUS
));
108 pr_debug(DRIVER_NAME
": Int enab: 0x%08x | Sig enab: 0x%08x\n",
109 sdhci_readl(host
, SDHCI_INT_ENABLE
),
110 sdhci_readl(host
, SDHCI_SIGNAL_ENABLE
));
111 pr_debug(DRIVER_NAME
": AC12 err: 0x%08x | Slot int: 0x%08x\n",
112 sdhci_readw(host
, SDHCI_ACMD12_ERR
),
113 sdhci_readw(host
, SDHCI_SLOT_INT_STATUS
));
114 pr_debug(DRIVER_NAME
": Caps: 0x%08x | Caps_1: 0x%08x\n",
115 sdhci_readl(host
, SDHCI_CAPABILITIES
),
116 sdhci_readl(host
, SDHCI_CAPABILITIES_1
));
117 pr_debug(DRIVER_NAME
": Cmd: 0x%08x | Max curr: 0x%08x\n",
118 sdhci_readw(host
, SDHCI_COMMAND
),
119 sdhci_readl(host
, SDHCI_MAX_CURRENT
));
120 pr_debug(DRIVER_NAME
": Host ctl2: 0x%08x\n",
121 sdhci_readw(host
, SDHCI_HOST_CONTROL2
));
123 if (host
->flags
& SDHCI_USE_ADMA
) {
124 if (host
->flags
& SDHCI_USE_64_BIT_DMA
)
125 pr_debug(DRIVER_NAME
": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n",
126 readl(host
->ioaddr
+ SDHCI_ADMA_ERROR
),
127 readl(host
->ioaddr
+ SDHCI_ADMA_ADDRESS_HI
),
128 readl(host
->ioaddr
+ SDHCI_ADMA_ADDRESS
));
130 pr_debug(DRIVER_NAME
": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
131 readl(host
->ioaddr
+ SDHCI_ADMA_ERROR
),
132 readl(host
->ioaddr
+ SDHCI_ADMA_ADDRESS
));
135 pr_debug(DRIVER_NAME
": ===========================================\n");
138 /*****************************************************************************\
140 * Low level functions *
142 \*****************************************************************************/
144 static void sdhci_set_card_detection(struct sdhci_host
*host
, bool enable
)
148 if ((host
->quirks
& SDHCI_QUIRK_BROKEN_CARD_DETECTION
) ||
149 (host
->mmc
->caps
& MMC_CAP_NONREMOVABLE
))
153 present
= sdhci_readl(host
, SDHCI_PRESENT_STATE
) &
156 host
->ier
|= present
? SDHCI_INT_CARD_REMOVE
:
157 SDHCI_INT_CARD_INSERT
;
159 host
->ier
&= ~(SDHCI_INT_CARD_REMOVE
| SDHCI_INT_CARD_INSERT
);
162 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
163 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
166 static void sdhci_enable_card_detection(struct sdhci_host
*host
)
168 sdhci_set_card_detection(host
, true);
171 static void sdhci_disable_card_detection(struct sdhci_host
*host
)
173 sdhci_set_card_detection(host
, false);
176 void sdhci_reset(struct sdhci_host
*host
, u8 mask
)
178 unsigned long timeout
;
180 sdhci_writeb(host
, mask
, SDHCI_SOFTWARE_RESET
);
182 if (mask
& SDHCI_RESET_ALL
) {
184 /* Reset-all turns off SD Bus Power */
185 if (host
->quirks2
& SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON
)
186 sdhci_runtime_pm_bus_off(host
);
189 /* Wait max 100 ms */
192 /* hw clears the bit when it's done */
193 while (sdhci_readb(host
, SDHCI_SOFTWARE_RESET
) & mask
) {
195 pr_err("%s: Reset 0x%x never completed.\n",
196 mmc_hostname(host
->mmc
), (int)mask
);
197 sdhci_dumpregs(host
);
204 EXPORT_SYMBOL_GPL(sdhci_reset
);
206 static void sdhci_do_reset(struct sdhci_host
*host
, u8 mask
)
208 if (host
->quirks
& SDHCI_QUIRK_NO_CARD_NO_RESET
) {
209 if (!sdhci_do_get_cd(host
))
213 host
->ops
->reset(host
, mask
);
215 if (mask
& SDHCI_RESET_ALL
) {
216 if (host
->flags
& (SDHCI_USE_SDMA
| SDHCI_USE_ADMA
)) {
217 if (host
->ops
->enable_dma
)
218 host
->ops
->enable_dma(host
);
221 /* Resetting the controller clears many */
222 host
->preset_enabled
= false;
226 static void sdhci_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
);
228 static void sdhci_init(struct sdhci_host
*host
, int soft
)
231 sdhci_do_reset(host
, SDHCI_RESET_CMD
|SDHCI_RESET_DATA
);
233 sdhci_do_reset(host
, SDHCI_RESET_ALL
);
235 host
->ier
= SDHCI_INT_BUS_POWER
| SDHCI_INT_DATA_END_BIT
|
236 SDHCI_INT_DATA_CRC
| SDHCI_INT_DATA_TIMEOUT
|
237 SDHCI_INT_INDEX
| SDHCI_INT_END_BIT
| SDHCI_INT_CRC
|
238 SDHCI_INT_TIMEOUT
| SDHCI_INT_DATA_END
|
241 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
242 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
245 /* force clock reconfiguration */
247 sdhci_set_ios(host
->mmc
, &host
->mmc
->ios
);
251 static void sdhci_reinit(struct sdhci_host
*host
)
254 sdhci_enable_card_detection(host
);
257 static void sdhci_activate_led(struct sdhci_host
*host
)
261 ctrl
= sdhci_readb(host
, SDHCI_HOST_CONTROL
);
262 ctrl
|= SDHCI_CTRL_LED
;
263 sdhci_writeb(host
, ctrl
, SDHCI_HOST_CONTROL
);
266 static void sdhci_deactivate_led(struct sdhci_host
*host
)
270 ctrl
= sdhci_readb(host
, SDHCI_HOST_CONTROL
);
271 ctrl
&= ~SDHCI_CTRL_LED
;
272 sdhci_writeb(host
, ctrl
, SDHCI_HOST_CONTROL
);
275 #ifdef SDHCI_USE_LEDS_CLASS
276 static void sdhci_led_control(struct led_classdev
*led
,
277 enum led_brightness brightness
)
279 struct sdhci_host
*host
= container_of(led
, struct sdhci_host
, led
);
282 spin_lock_irqsave(&host
->lock
, flags
);
284 if (host
->runtime_suspended
)
287 if (brightness
== LED_OFF
)
288 sdhci_deactivate_led(host
);
290 sdhci_activate_led(host
);
292 spin_unlock_irqrestore(&host
->lock
, flags
);
296 /*****************************************************************************\
300 \*****************************************************************************/
302 static void sdhci_read_block_pio(struct sdhci_host
*host
)
305 size_t blksize
, len
, chunk
;
306 u32
uninitialized_var(scratch
);
309 DBG("PIO reading\n");
311 blksize
= host
->data
->blksz
;
314 local_irq_save(flags
);
317 BUG_ON(!sg_miter_next(&host
->sg_miter
));
319 len
= min(host
->sg_miter
.length
, blksize
);
322 host
->sg_miter
.consumed
= len
;
324 buf
= host
->sg_miter
.addr
;
328 scratch
= sdhci_readl(host
, SDHCI_BUFFER
);
332 *buf
= scratch
& 0xFF;
341 sg_miter_stop(&host
->sg_miter
);
343 local_irq_restore(flags
);
346 static void sdhci_write_block_pio(struct sdhci_host
*host
)
349 size_t blksize
, len
, chunk
;
353 DBG("PIO writing\n");
355 blksize
= host
->data
->blksz
;
359 local_irq_save(flags
);
362 BUG_ON(!sg_miter_next(&host
->sg_miter
));
364 len
= min(host
->sg_miter
.length
, blksize
);
367 host
->sg_miter
.consumed
= len
;
369 buf
= host
->sg_miter
.addr
;
372 scratch
|= (u32
)*buf
<< (chunk
* 8);
378 if ((chunk
== 4) || ((len
== 0) && (blksize
== 0))) {
379 sdhci_writel(host
, scratch
, SDHCI_BUFFER
);
386 sg_miter_stop(&host
->sg_miter
);
388 local_irq_restore(flags
);
391 static void sdhci_transfer_pio(struct sdhci_host
*host
)
397 if (host
->blocks
== 0)
400 if (host
->data
->flags
& MMC_DATA_READ
)
401 mask
= SDHCI_DATA_AVAILABLE
;
403 mask
= SDHCI_SPACE_AVAILABLE
;
406 * Some controllers (JMicron JMB38x) mess up the buffer bits
407 * for transfers < 4 bytes. As long as it is just one block,
408 * we can ignore the bits.
410 if ((host
->quirks
& SDHCI_QUIRK_BROKEN_SMALL_PIO
) &&
411 (host
->data
->blocks
== 1))
414 while (sdhci_readl(host
, SDHCI_PRESENT_STATE
) & mask
) {
415 if (host
->quirks
& SDHCI_QUIRK_PIO_NEEDS_DELAY
)
418 if (host
->data
->flags
& MMC_DATA_READ
)
419 sdhci_read_block_pio(host
);
421 sdhci_write_block_pio(host
);
424 if (host
->blocks
== 0)
428 DBG("PIO transfer complete.\n");
431 static char *sdhci_kmap_atomic(struct scatterlist
*sg
, unsigned long *flags
)
433 local_irq_save(*flags
);
434 return kmap_atomic(sg_page(sg
)) + sg
->offset
;
437 static void sdhci_kunmap_atomic(void *buffer
, unsigned long *flags
)
439 kunmap_atomic(buffer
);
440 local_irq_restore(*flags
);
443 static void sdhci_adma_write_desc(struct sdhci_host
*host
, void *desc
,
444 dma_addr_t addr
, int len
, unsigned cmd
)
446 struct sdhci_adma2_64_desc
*dma_desc
= desc
;
448 /* 32-bit and 64-bit descriptors have these members in same position */
449 dma_desc
->cmd
= cpu_to_le16(cmd
);
450 dma_desc
->len
= cpu_to_le16(len
);
451 dma_desc
->addr_lo
= cpu_to_le32((u32
)addr
);
453 if (host
->flags
& SDHCI_USE_64_BIT_DMA
)
454 dma_desc
->addr_hi
= cpu_to_le32((u64
)addr
>> 32);
457 static void sdhci_adma_mark_end(void *desc
)
459 struct sdhci_adma2_64_desc
*dma_desc
= desc
;
461 /* 32-bit and 64-bit descriptors have 'cmd' in same position */
462 dma_desc
->cmd
|= cpu_to_le16(ADMA2_END
);
465 static int sdhci_adma_table_pre(struct sdhci_host
*host
,
466 struct mmc_data
*data
)
473 dma_addr_t align_addr
;
476 struct scatterlist
*sg
;
482 * The spec does not specify endianness of descriptor table.
483 * We currently guess that it is LE.
486 if (data
->flags
& MMC_DATA_READ
)
487 direction
= DMA_FROM_DEVICE
;
489 direction
= DMA_TO_DEVICE
;
491 host
->align_addr
= dma_map_single(mmc_dev(host
->mmc
),
492 host
->align_buffer
, host
->align_buffer_sz
, direction
);
493 if (dma_mapping_error(mmc_dev(host
->mmc
), host
->align_addr
))
495 BUG_ON(host
->align_addr
& SDHCI_ADMA2_MASK
);
497 host
->sg_count
= sdhci_pre_dma_transfer(host
, data
);
498 if (host
->sg_count
< 0)
501 desc
= host
->adma_table
;
502 align
= host
->align_buffer
;
504 align_addr
= host
->align_addr
;
506 for_each_sg(data
->sg
, sg
, host
->sg_count
, i
) {
507 addr
= sg_dma_address(sg
);
508 len
= sg_dma_len(sg
);
511 * The SDHCI specification states that ADMA
512 * addresses must be 32-bit aligned. If they
513 * aren't, then we use a bounce buffer for
514 * the (up to three) bytes that screw up the
517 offset
= (SDHCI_ADMA2_ALIGN
- (addr
& SDHCI_ADMA2_MASK
)) &
520 if (data
->flags
& MMC_DATA_WRITE
) {
521 buffer
= sdhci_kmap_atomic(sg
, &flags
);
522 memcpy(align
, buffer
, offset
);
523 sdhci_kunmap_atomic(buffer
, &flags
);
527 sdhci_adma_write_desc(host
, desc
, align_addr
, offset
,
530 BUG_ON(offset
> 65536);
532 align
+= SDHCI_ADMA2_ALIGN
;
533 align_addr
+= SDHCI_ADMA2_ALIGN
;
535 desc
+= host
->desc_sz
;
545 sdhci_adma_write_desc(host
, desc
, addr
, len
,
547 desc
+= host
->desc_sz
;
551 * If this triggers then we have a calculation bug
554 WARN_ON((desc
- host
->adma_table
) >= host
->adma_table_sz
);
557 if (host
->quirks
& SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC
) {
559 * Mark the last descriptor as the terminating descriptor
561 if (desc
!= host
->adma_table
) {
562 desc
-= host
->desc_sz
;
563 sdhci_adma_mark_end(desc
);
567 * Add a terminating entry.
570 /* nop, end, valid */
571 sdhci_adma_write_desc(host
, desc
, 0, 0, ADMA2_NOP_END_VALID
);
575 * Resync align buffer as we might have changed it.
577 if (data
->flags
& MMC_DATA_WRITE
) {
578 dma_sync_single_for_device(mmc_dev(host
->mmc
),
579 host
->align_addr
, host
->align_buffer_sz
, direction
);
585 dma_unmap_single(mmc_dev(host
->mmc
), host
->align_addr
,
586 host
->align_buffer_sz
, direction
);
591 static void sdhci_adma_table_post(struct sdhci_host
*host
,
592 struct mmc_data
*data
)
596 struct scatterlist
*sg
;
603 if (data
->flags
& MMC_DATA_READ
)
604 direction
= DMA_FROM_DEVICE
;
606 direction
= DMA_TO_DEVICE
;
608 dma_unmap_single(mmc_dev(host
->mmc
), host
->align_addr
,
609 host
->align_buffer_sz
, direction
);
611 /* Do a quick scan of the SG list for any unaligned mappings */
612 has_unaligned
= false;
613 for_each_sg(data
->sg
, sg
, host
->sg_count
, i
)
614 if (sg_dma_address(sg
) & SDHCI_ADMA2_MASK
) {
615 has_unaligned
= true;
619 if (has_unaligned
&& data
->flags
& MMC_DATA_READ
) {
620 dma_sync_sg_for_cpu(mmc_dev(host
->mmc
), data
->sg
,
621 data
->sg_len
, direction
);
623 align
= host
->align_buffer
;
625 for_each_sg(data
->sg
, sg
, host
->sg_count
, i
) {
626 if (sg_dma_address(sg
) & SDHCI_ADMA2_MASK
) {
627 size
= SDHCI_ADMA2_ALIGN
-
628 (sg_dma_address(sg
) & SDHCI_ADMA2_MASK
);
630 buffer
= sdhci_kmap_atomic(sg
, &flags
);
631 memcpy(buffer
, align
, size
);
632 sdhci_kunmap_atomic(buffer
, &flags
);
634 align
+= SDHCI_ADMA2_ALIGN
;
639 if (data
->host_cookie
== COOKIE_MAPPED
) {
640 dma_unmap_sg(mmc_dev(host
->mmc
), data
->sg
,
641 data
->sg_len
, direction
);
642 data
->host_cookie
= COOKIE_UNMAPPED
;
646 static u8
sdhci_calc_timeout(struct sdhci_host
*host
, struct mmc_command
*cmd
)
649 struct mmc_data
*data
= cmd
->data
;
650 unsigned target_timeout
, current_timeout
;
653 * If the host controller provides us with an incorrect timeout
654 * value, just skip the check and use 0xE. The hardware may take
655 * longer to time out, but that's much better than having a too-short
658 if (host
->quirks
& SDHCI_QUIRK_BROKEN_TIMEOUT_VAL
)
661 /* Unspecified timeout, assume max */
662 if (!data
&& !cmd
->busy_timeout
)
667 target_timeout
= cmd
->busy_timeout
* 1000;
669 target_timeout
= DIV_ROUND_UP(data
->timeout_ns
, 1000);
670 if (host
->clock
&& data
->timeout_clks
) {
671 unsigned long long val
;
674 * data->timeout_clks is in units of clock cycles.
675 * host->clock is in Hz. target_timeout is in us.
676 * Hence, us = 1000000 * cycles / Hz. Round up.
678 val
= 1000000ULL * data
->timeout_clks
;
679 if (do_div(val
, host
->clock
))
681 target_timeout
+= val
;
686 * Figure out needed cycles.
687 * We do this in steps in order to fit inside a 32 bit int.
688 * The first step is the minimum timeout, which will have a
689 * minimum resolution of 6 bits:
690 * (1) 2^13*1000 > 2^22,
691 * (2) host->timeout_clk < 2^16
696 current_timeout
= (1 << 13) * 1000 / host
->timeout_clk
;
697 while (current_timeout
< target_timeout
) {
699 current_timeout
<<= 1;
705 DBG("%s: Too large timeout 0x%x requested for CMD%d!\n",
706 mmc_hostname(host
->mmc
), count
, cmd
->opcode
);
713 static void sdhci_set_transfer_irqs(struct sdhci_host
*host
)
715 u32 pio_irqs
= SDHCI_INT_DATA_AVAIL
| SDHCI_INT_SPACE_AVAIL
;
716 u32 dma_irqs
= SDHCI_INT_DMA_END
| SDHCI_INT_ADMA_ERROR
;
718 if (host
->flags
& SDHCI_REQ_USE_DMA
)
719 host
->ier
= (host
->ier
& ~pio_irqs
) | dma_irqs
;
721 host
->ier
= (host
->ier
& ~dma_irqs
) | pio_irqs
;
723 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
724 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
727 static void sdhci_set_timeout(struct sdhci_host
*host
, struct mmc_command
*cmd
)
731 if (host
->ops
->set_timeout
) {
732 host
->ops
->set_timeout(host
, cmd
);
734 count
= sdhci_calc_timeout(host
, cmd
);
735 sdhci_writeb(host
, count
, SDHCI_TIMEOUT_CONTROL
);
739 static void sdhci_prepare_data(struct sdhci_host
*host
, struct mmc_command
*cmd
)
742 struct mmc_data
*data
= cmd
->data
;
747 if (data
|| (cmd
->flags
& MMC_RSP_BUSY
))
748 sdhci_set_timeout(host
, cmd
);
754 BUG_ON(data
->blksz
* data
->blocks
> 524288);
755 BUG_ON(data
->blksz
> host
->mmc
->max_blk_size
);
756 BUG_ON(data
->blocks
> 65535);
759 host
->data_early
= 0;
760 host
->data
->bytes_xfered
= 0;
762 if (host
->flags
& (SDHCI_USE_SDMA
| SDHCI_USE_ADMA
))
763 host
->flags
|= SDHCI_REQ_USE_DMA
;
766 * FIXME: This doesn't account for merging when mapping the
769 if (host
->flags
& SDHCI_REQ_USE_DMA
) {
771 struct scatterlist
*sg
;
774 if (host
->flags
& SDHCI_USE_ADMA
) {
775 if (host
->quirks
& SDHCI_QUIRK_32BIT_ADMA_SIZE
)
778 if (host
->quirks
& SDHCI_QUIRK_32BIT_DMA_SIZE
)
782 if (unlikely(broken
)) {
783 for_each_sg(data
->sg
, sg
, data
->sg_len
, i
) {
784 if (sg
->length
& 0x3) {
785 DBG("Reverting to PIO because of "
786 "transfer size (%d)\n",
788 host
->flags
&= ~SDHCI_REQ_USE_DMA
;
796 * The assumption here being that alignment is the same after
797 * translation to device address space.
799 if (host
->flags
& SDHCI_REQ_USE_DMA
) {
801 struct scatterlist
*sg
;
804 if (host
->flags
& SDHCI_USE_ADMA
) {
806 * As we use 3 byte chunks to work around
807 * alignment problems, we need to check this
810 if (host
->quirks
& SDHCI_QUIRK_32BIT_ADMA_SIZE
)
813 if (host
->quirks
& SDHCI_QUIRK_32BIT_DMA_ADDR
)
817 if (unlikely(broken
)) {
818 for_each_sg(data
->sg
, sg
, data
->sg_len
, i
) {
819 if (sg
->offset
& 0x3) {
820 DBG("Reverting to PIO because of "
822 host
->flags
&= ~SDHCI_REQ_USE_DMA
;
829 if (host
->flags
& SDHCI_REQ_USE_DMA
) {
830 if (host
->flags
& SDHCI_USE_ADMA
) {
831 ret
= sdhci_adma_table_pre(host
, data
);
834 * This only happens when someone fed
835 * us an invalid request.
838 host
->flags
&= ~SDHCI_REQ_USE_DMA
;
840 sdhci_writel(host
, host
->adma_addr
,
842 if (host
->flags
& SDHCI_USE_64_BIT_DMA
)
844 (u64
)host
->adma_addr
>> 32,
845 SDHCI_ADMA_ADDRESS_HI
);
850 sg_cnt
= sdhci_pre_dma_transfer(host
, data
);
853 * This only happens when someone fed
854 * us an invalid request.
857 host
->flags
&= ~SDHCI_REQ_USE_DMA
;
859 WARN_ON(sg_cnt
!= 1);
860 sdhci_writel(host
, sg_dma_address(data
->sg
),
867 * Always adjust the DMA selection as some controllers
868 * (e.g. JMicron) can't do PIO properly when the selection
871 if (host
->version
>= SDHCI_SPEC_200
) {
872 ctrl
= sdhci_readb(host
, SDHCI_HOST_CONTROL
);
873 ctrl
&= ~SDHCI_CTRL_DMA_MASK
;
874 if ((host
->flags
& SDHCI_REQ_USE_DMA
) &&
875 (host
->flags
& SDHCI_USE_ADMA
)) {
876 if (host
->flags
& SDHCI_USE_64_BIT_DMA
)
877 ctrl
|= SDHCI_CTRL_ADMA64
;
879 ctrl
|= SDHCI_CTRL_ADMA32
;
881 ctrl
|= SDHCI_CTRL_SDMA
;
883 sdhci_writeb(host
, ctrl
, SDHCI_HOST_CONTROL
);
886 if (!(host
->flags
& SDHCI_REQ_USE_DMA
)) {
889 flags
= SG_MITER_ATOMIC
;
890 if (host
->data
->flags
& MMC_DATA_READ
)
891 flags
|= SG_MITER_TO_SG
;
893 flags
|= SG_MITER_FROM_SG
;
894 sg_miter_start(&host
->sg_miter
, data
->sg
, data
->sg_len
, flags
);
895 host
->blocks
= data
->blocks
;
898 sdhci_set_transfer_irqs(host
);
900 /* Set the DMA boundary value and block size */
901 sdhci_writew(host
, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG
,
902 data
->blksz
), SDHCI_BLOCK_SIZE
);
903 sdhci_writew(host
, data
->blocks
, SDHCI_BLOCK_COUNT
);
906 static void sdhci_set_transfer_mode(struct sdhci_host
*host
,
907 struct mmc_command
*cmd
)
910 struct mmc_data
*data
= cmd
->data
;
914 SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD
) {
915 sdhci_writew(host
, 0x0, SDHCI_TRANSFER_MODE
);
917 /* clear Auto CMD settings for no data CMDs */
918 mode
= sdhci_readw(host
, SDHCI_TRANSFER_MODE
);
919 sdhci_writew(host
, mode
& ~(SDHCI_TRNS_AUTO_CMD12
|
920 SDHCI_TRNS_AUTO_CMD23
), SDHCI_TRANSFER_MODE
);
925 WARN_ON(!host
->data
);
927 if (!(host
->quirks2
& SDHCI_QUIRK2_SUPPORT_SINGLE
))
928 mode
= SDHCI_TRNS_BLK_CNT_EN
;
930 if (mmc_op_multi(cmd
->opcode
) || data
->blocks
> 1) {
931 mode
= SDHCI_TRNS_BLK_CNT_EN
| SDHCI_TRNS_MULTI
;
933 * If we are sending CMD23, CMD12 never gets sent
934 * on successful completion (so no Auto-CMD12).
936 if (!host
->mrq
->sbc
&& (host
->flags
& SDHCI_AUTO_CMD12
) &&
937 (cmd
->opcode
!= SD_IO_RW_EXTENDED
))
938 mode
|= SDHCI_TRNS_AUTO_CMD12
;
939 else if (host
->mrq
->sbc
&& (host
->flags
& SDHCI_AUTO_CMD23
)) {
940 mode
|= SDHCI_TRNS_AUTO_CMD23
;
941 sdhci_writel(host
, host
->mrq
->sbc
->arg
, SDHCI_ARGUMENT2
);
945 if (data
->flags
& MMC_DATA_READ
)
946 mode
|= SDHCI_TRNS_READ
;
947 if (host
->flags
& SDHCI_REQ_USE_DMA
)
948 mode
|= SDHCI_TRNS_DMA
;
950 sdhci_writew(host
, mode
, SDHCI_TRANSFER_MODE
);
953 static void sdhci_finish_data(struct sdhci_host
*host
)
955 struct mmc_data
*data
;
962 if (host
->flags
& SDHCI_REQ_USE_DMA
) {
963 if (host
->flags
& SDHCI_USE_ADMA
)
964 sdhci_adma_table_post(host
, data
);
966 if (data
->host_cookie
== COOKIE_MAPPED
) {
967 dma_unmap_sg(mmc_dev(host
->mmc
),
968 data
->sg
, data
->sg_len
,
969 (data
->flags
& MMC_DATA_READ
) ?
970 DMA_FROM_DEVICE
: DMA_TO_DEVICE
);
971 data
->host_cookie
= COOKIE_UNMAPPED
;
977 * The specification states that the block count register must
978 * be updated, but it does not specify at what point in the
979 * data flow. That makes the register entirely useless to read
980 * back so we have to assume that nothing made it to the card
981 * in the event of an error.
984 data
->bytes_xfered
= 0;
986 data
->bytes_xfered
= data
->blksz
* data
->blocks
;
989 * Need to send CMD12 if -
990 * a) open-ended multiblock transfer (no CMD23)
991 * b) error in multiblock transfer
998 * The controller needs a reset of internal state machines
999 * upon error conditions.
1002 sdhci_do_reset(host
, SDHCI_RESET_CMD
);
1003 sdhci_do_reset(host
, SDHCI_RESET_DATA
);
1006 sdhci_send_command(host
, data
->stop
);
1008 tasklet_schedule(&host
->finish_tasklet
);
1011 void sdhci_send_command(struct sdhci_host
*host
, struct mmc_command
*cmd
)
1015 unsigned long timeout
;
1019 /* Wait max 10 ms */
1022 mask
= SDHCI_CMD_INHIBIT
;
1023 if ((cmd
->data
!= NULL
) || (cmd
->flags
& MMC_RSP_BUSY
))
1024 mask
|= SDHCI_DATA_INHIBIT
;
1026 /* We shouldn't wait for data inihibit for stop commands, even
1027 though they might use busy signaling */
1028 if (host
->mrq
->data
&& (cmd
== host
->mrq
->data
->stop
))
1029 mask
&= ~SDHCI_DATA_INHIBIT
;
1031 while (sdhci_readl(host
, SDHCI_PRESENT_STATE
) & mask
) {
1033 pr_err("%s: Controller never released "
1034 "inhibit bit(s).\n", mmc_hostname(host
->mmc
));
1035 sdhci_dumpregs(host
);
1037 tasklet_schedule(&host
->finish_tasklet
);
1045 if (!cmd
->data
&& cmd
->busy_timeout
> 9000)
1046 timeout
+= DIV_ROUND_UP(cmd
->busy_timeout
, 1000) * HZ
+ HZ
;
1049 mod_timer(&host
->timer
, timeout
);
1052 host
->busy_handle
= 0;
1054 sdhci_prepare_data(host
, cmd
);
1056 sdhci_writel(host
, cmd
->arg
, SDHCI_ARGUMENT
);
1058 sdhci_set_transfer_mode(host
, cmd
);
1060 if ((cmd
->flags
& MMC_RSP_136
) && (cmd
->flags
& MMC_RSP_BUSY
)) {
1061 pr_err("%s: Unsupported response type!\n",
1062 mmc_hostname(host
->mmc
));
1063 cmd
->error
= -EINVAL
;
1064 tasklet_schedule(&host
->finish_tasklet
);
1068 if (!(cmd
->flags
& MMC_RSP_PRESENT
))
1069 flags
= SDHCI_CMD_RESP_NONE
;
1070 else if (cmd
->flags
& MMC_RSP_136
)
1071 flags
= SDHCI_CMD_RESP_LONG
;
1072 else if (cmd
->flags
& MMC_RSP_BUSY
)
1073 flags
= SDHCI_CMD_RESP_SHORT_BUSY
;
1075 flags
= SDHCI_CMD_RESP_SHORT
;
1077 if (cmd
->flags
& MMC_RSP_CRC
)
1078 flags
|= SDHCI_CMD_CRC
;
1079 if (cmd
->flags
& MMC_RSP_OPCODE
)
1080 flags
|= SDHCI_CMD_INDEX
;
1082 /* CMD19 is special in that the Data Present Select should be set */
1083 if (cmd
->data
|| cmd
->opcode
== MMC_SEND_TUNING_BLOCK
||
1084 cmd
->opcode
== MMC_SEND_TUNING_BLOCK_HS200
)
1085 flags
|= SDHCI_CMD_DATA
;
1087 sdhci_writew(host
, SDHCI_MAKE_CMD(cmd
->opcode
, flags
), SDHCI_COMMAND
);
1089 EXPORT_SYMBOL_GPL(sdhci_send_command
);
1091 static void sdhci_finish_command(struct sdhci_host
*host
)
1095 BUG_ON(host
->cmd
== NULL
);
1097 if (host
->cmd
->flags
& MMC_RSP_PRESENT
) {
1098 if (host
->cmd
->flags
& MMC_RSP_136
) {
1099 /* CRC is stripped so we need to do some shifting. */
1100 for (i
= 0;i
< 4;i
++) {
1101 host
->cmd
->resp
[i
] = sdhci_readl(host
,
1102 SDHCI_RESPONSE
+ (3-i
)*4) << 8;
1104 host
->cmd
->resp
[i
] |=
1106 SDHCI_RESPONSE
+ (3-i
)*4-1);
1109 host
->cmd
->resp
[0] = sdhci_readl(host
, SDHCI_RESPONSE
);
1113 host
->cmd
->error
= 0;
1115 /* Finished CMD23, now send actual command. */
1116 if (host
->cmd
== host
->mrq
->sbc
) {
1118 sdhci_send_command(host
, host
->mrq
->cmd
);
1121 /* Processed actual command. */
1122 if (host
->data
&& host
->data_early
)
1123 sdhci_finish_data(host
);
1125 if (!host
->cmd
->data
)
1126 tasklet_schedule(&host
->finish_tasklet
);
1132 static u16
sdhci_get_preset_value(struct sdhci_host
*host
)
1136 switch (host
->timing
) {
1137 case MMC_TIMING_UHS_SDR12
:
1138 preset
= sdhci_readw(host
, SDHCI_PRESET_FOR_SDR12
);
1140 case MMC_TIMING_UHS_SDR25
:
1141 preset
= sdhci_readw(host
, SDHCI_PRESET_FOR_SDR25
);
1143 case MMC_TIMING_UHS_SDR50
:
1144 preset
= sdhci_readw(host
, SDHCI_PRESET_FOR_SDR50
);
1146 case MMC_TIMING_UHS_SDR104
:
1147 case MMC_TIMING_MMC_HS200
:
1148 preset
= sdhci_readw(host
, SDHCI_PRESET_FOR_SDR104
);
1150 case MMC_TIMING_UHS_DDR50
:
1151 case MMC_TIMING_MMC_DDR52
:
1152 preset
= sdhci_readw(host
, SDHCI_PRESET_FOR_DDR50
);
1154 case MMC_TIMING_MMC_HS400
:
1155 preset
= sdhci_readw(host
, SDHCI_PRESET_FOR_HS400
);
1158 pr_warn("%s: Invalid UHS-I mode selected\n",
1159 mmc_hostname(host
->mmc
));
1160 preset
= sdhci_readw(host
, SDHCI_PRESET_FOR_SDR12
);
1166 void sdhci_set_clock(struct sdhci_host
*host
, unsigned int clock
)
1168 int div
= 0; /* Initialized for compiler warning */
1169 int real_div
= div
, clk_mul
= 1;
1171 unsigned long timeout
;
1172 bool switch_base_clk
= false;
1174 host
->mmc
->actual_clock
= 0;
1176 sdhci_writew(host
, 0, SDHCI_CLOCK_CONTROL
);
1177 if (host
->quirks2
& SDHCI_QUIRK2_NEED_DELAY_AFTER_INT_CLK_RST
)
1183 if (host
->version
>= SDHCI_SPEC_300
) {
1184 if (host
->preset_enabled
) {
1187 clk
= sdhci_readw(host
, SDHCI_CLOCK_CONTROL
);
1188 pre_val
= sdhci_get_preset_value(host
);
1189 div
= (pre_val
& SDHCI_PRESET_SDCLK_FREQ_MASK
)
1190 >> SDHCI_PRESET_SDCLK_FREQ_SHIFT
;
1191 if (host
->clk_mul
&&
1192 (pre_val
& SDHCI_PRESET_CLKGEN_SEL_MASK
)) {
1193 clk
= SDHCI_PROG_CLOCK_MODE
;
1195 clk_mul
= host
->clk_mul
;
1197 real_div
= max_t(int, 1, div
<< 1);
1203 * Check if the Host Controller supports Programmable Clock
1206 if (host
->clk_mul
) {
1207 for (div
= 1; div
<= 1024; div
++) {
1208 if ((host
->max_clk
* host
->clk_mul
/ div
)
1212 if ((host
->max_clk
* host
->clk_mul
/ div
) <= clock
) {
1214 * Set Programmable Clock Mode in the Clock
1217 clk
= SDHCI_PROG_CLOCK_MODE
;
1219 clk_mul
= host
->clk_mul
;
1223 * Divisor can be too small to reach clock
1224 * speed requirement. Then use the base clock.
1226 switch_base_clk
= true;
1230 if (!host
->clk_mul
|| switch_base_clk
) {
1231 /* Version 3.00 divisors must be a multiple of 2. */
1232 if (host
->max_clk
<= clock
)
1235 for (div
= 2; div
< SDHCI_MAX_DIV_SPEC_300
;
1237 if ((host
->max_clk
/ div
) <= clock
)
1243 if ((host
->quirks2
& SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN
)
1244 && !div
&& host
->max_clk
<= 25000000)
1248 /* Version 2.00 divisors must be a power of 2. */
1249 for (div
= 1; div
< SDHCI_MAX_DIV_SPEC_200
; div
*= 2) {
1250 if ((host
->max_clk
/ div
) <= clock
)
1259 host
->mmc
->actual_clock
= (host
->max_clk
* clk_mul
) / real_div
;
1260 clk
|= (div
& SDHCI_DIV_MASK
) << SDHCI_DIVIDER_SHIFT
;
1261 clk
|= ((div
& SDHCI_DIV_HI_MASK
) >> SDHCI_DIV_MASK_LEN
)
1262 << SDHCI_DIVIDER_HI_SHIFT
;
1263 clk
|= SDHCI_CLOCK_INT_EN
;
1264 sdhci_writew(host
, clk
, SDHCI_CLOCK_CONTROL
);
1266 /* Wait max 20 ms */
1268 while (!((clk
= sdhci_readw(host
, SDHCI_CLOCK_CONTROL
))
1269 & SDHCI_CLOCK_INT_STABLE
)) {
1271 pr_err("%s: Internal clock never "
1272 "stabilised.\n", mmc_hostname(host
->mmc
));
1273 sdhci_dumpregs(host
);
1277 spin_unlock_irq(&host
->lock
);
1278 usleep_range(900, 1100);
1279 spin_lock_irq(&host
->lock
);
1282 clk
|= SDHCI_CLOCK_CARD_EN
;
1283 sdhci_writew(host
, clk
, SDHCI_CLOCK_CONTROL
);
1285 EXPORT_SYMBOL_GPL(sdhci_set_clock
);
1287 static void sdhci_set_power(struct sdhci_host
*host
, unsigned char mode
,
1290 struct mmc_host
*mmc
= host
->mmc
;
1293 if (!IS_ERR(mmc
->supply
.vmmc
)) {
1294 spin_unlock_irq(&host
->lock
);
1295 mmc_regulator_set_ocr(mmc
, mmc
->supply
.vmmc
, vdd
);
1296 spin_lock_irq(&host
->lock
);
1298 if (mode
!= MMC_POWER_OFF
)
1299 sdhci_writeb(host
, SDHCI_POWER_ON
, SDHCI_POWER_CONTROL
);
1301 sdhci_writeb(host
, 0, SDHCI_POWER_CONTROL
);
1306 if (mode
!= MMC_POWER_OFF
) {
1308 case MMC_VDD_165_195
:
1309 pwr
= SDHCI_POWER_180
;
1313 pwr
= SDHCI_POWER_300
;
1317 pwr
= SDHCI_POWER_330
;
1320 WARN(1, "%s: Invalid vdd %#x\n",
1321 mmc_hostname(host
->mmc
), vdd
);
1326 if (host
->pwr
== pwr
)
1332 sdhci_writeb(host
, 0, SDHCI_POWER_CONTROL
);
1333 if (host
->quirks2
& SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON
)
1334 sdhci_runtime_pm_bus_off(host
);
1338 * Spec says that we should clear the power reg before setting
1339 * a new value. Some controllers don't seem to like this though.
1341 if (!(host
->quirks
& SDHCI_QUIRK_SINGLE_POWER_WRITE
))
1342 sdhci_writeb(host
, 0, SDHCI_POWER_CONTROL
);
1345 * At least the Marvell CaFe chip gets confused if we set the
1346 * voltage and set turn on power at the same time, so set the
1349 if (host
->quirks
& SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER
)
1350 sdhci_writeb(host
, pwr
, SDHCI_POWER_CONTROL
);
1352 pwr
|= SDHCI_POWER_ON
;
1354 sdhci_writeb(host
, pwr
, SDHCI_POWER_CONTROL
);
1356 if (host
->quirks2
& SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON
)
1357 sdhci_runtime_pm_bus_on(host
);
1360 * Some controllers need an extra 10ms delay of 10ms before
1361 * they can apply clock after applying power
1363 if (host
->quirks
& SDHCI_QUIRK_DELAY_AFTER_POWER
)
1368 /*****************************************************************************\
1372 \*****************************************************************************/
1374 static void sdhci_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
1376 struct sdhci_host
*host
;
1378 unsigned long flags
;
1380 host
= mmc_priv(mmc
);
1382 sdhci_runtime_pm_get(host
);
1384 /* Firstly check card presence */
1385 present
= mmc
->ops
->get_cd(mmc
);
1387 spin_lock_irqsave(&host
->lock
, flags
);
1389 WARN_ON(host
->mrq
!= NULL
);
1391 #ifndef SDHCI_USE_LEDS_CLASS
1392 sdhci_activate_led(host
);
1396 * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED
1397 * requests if Auto-CMD12 is enabled.
1399 if (!mrq
->sbc
&& (host
->flags
& SDHCI_AUTO_CMD12
)) {
1401 mrq
->data
->stop
= NULL
;
1408 if (!present
|| host
->flags
& SDHCI_DEVICE_DEAD
) {
1409 host
->mrq
->cmd
->error
= -ENOMEDIUM
;
1410 tasklet_schedule(&host
->finish_tasklet
);
1412 if (mrq
->sbc
&& !(host
->flags
& SDHCI_AUTO_CMD23
))
1413 sdhci_send_command(host
, mrq
->sbc
);
1415 sdhci_send_command(host
, mrq
->cmd
);
1419 spin_unlock_irqrestore(&host
->lock
, flags
);
1422 void sdhci_set_bus_width(struct sdhci_host
*host
, int width
)
1426 ctrl
= sdhci_readb(host
, SDHCI_HOST_CONTROL
);
1427 if (width
== MMC_BUS_WIDTH_8
) {
1428 ctrl
&= ~SDHCI_CTRL_4BITBUS
;
1429 if (host
->version
>= SDHCI_SPEC_300
)
1430 ctrl
|= SDHCI_CTRL_8BITBUS
;
1432 if (host
->version
>= SDHCI_SPEC_300
)
1433 ctrl
&= ~SDHCI_CTRL_8BITBUS
;
1434 if (width
== MMC_BUS_WIDTH_4
)
1435 ctrl
|= SDHCI_CTRL_4BITBUS
;
1437 ctrl
&= ~SDHCI_CTRL_4BITBUS
;
1439 sdhci_writeb(host
, ctrl
, SDHCI_HOST_CONTROL
);
1441 EXPORT_SYMBOL_GPL(sdhci_set_bus_width
);
1443 void sdhci_set_uhs_signaling(struct sdhci_host
*host
, unsigned timing
)
1447 ctrl_2
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
1448 /* Select Bus Speed Mode for host */
1449 ctrl_2
&= ~SDHCI_CTRL_UHS_MASK
;
1450 if ((timing
== MMC_TIMING_MMC_HS200
) ||
1451 (timing
== MMC_TIMING_UHS_SDR104
))
1452 ctrl_2
|= SDHCI_CTRL_UHS_SDR104
;
1453 else if (timing
== MMC_TIMING_UHS_SDR12
)
1454 ctrl_2
|= SDHCI_CTRL_UHS_SDR12
;
1455 else if (timing
== MMC_TIMING_UHS_SDR25
)
1456 ctrl_2
|= SDHCI_CTRL_UHS_SDR25
;
1457 else if (timing
== MMC_TIMING_UHS_SDR50
)
1458 ctrl_2
|= SDHCI_CTRL_UHS_SDR50
;
1459 else if ((timing
== MMC_TIMING_UHS_DDR50
) ||
1460 (timing
== MMC_TIMING_MMC_DDR52
))
1461 ctrl_2
|= SDHCI_CTRL_UHS_DDR50
;
1462 else if (timing
== MMC_TIMING_MMC_HS400
)
1463 ctrl_2
|= SDHCI_CTRL_HS400
; /* Non-standard */
1464 sdhci_writew(host
, ctrl_2
, SDHCI_HOST_CONTROL2
);
1466 EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling
);
1468 static void sdhci_do_set_ios(struct sdhci_host
*host
, struct mmc_ios
*ios
)
1470 unsigned long flags
;
1472 struct mmc_host
*mmc
= host
->mmc
;
1474 spin_lock_irqsave(&host
->lock
, flags
);
1476 if (host
->flags
& SDHCI_DEVICE_DEAD
) {
1477 spin_unlock_irqrestore(&host
->lock
, flags
);
1478 if (!IS_ERR(mmc
->supply
.vmmc
) &&
1479 ios
->power_mode
== MMC_POWER_OFF
)
1480 mmc_regulator_set_ocr(mmc
, mmc
->supply
.vmmc
, 0);
1485 * Reset the chip on each power off.
1486 * Should clear out any weird states.
1488 if (ios
->power_mode
== MMC_POWER_OFF
) {
1489 sdhci_writel(host
, 0, SDHCI_SIGNAL_ENABLE
);
1493 if (host
->version
>= SDHCI_SPEC_300
&&
1494 (ios
->power_mode
== MMC_POWER_UP
) &&
1495 !(host
->quirks2
& SDHCI_QUIRK2_PRESET_VALUE_BROKEN
))
1496 sdhci_enable_preset_value(host
, false);
1498 if (!ios
->clock
|| ios
->clock
!= host
->clock
) {
1499 host
->ops
->set_clock(host
, ios
->clock
);
1500 host
->clock
= ios
->clock
;
1502 if (host
->quirks
& SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK
&&
1504 host
->timeout_clk
= host
->mmc
->actual_clock
?
1505 host
->mmc
->actual_clock
/ 1000 :
1507 host
->mmc
->max_busy_timeout
=
1508 host
->ops
->get_max_timeout_count
?
1509 host
->ops
->get_max_timeout_count(host
) :
1511 host
->mmc
->max_busy_timeout
/= host
->timeout_clk
;
1515 sdhci_set_power(host
, ios
->power_mode
, ios
->vdd
);
1517 if (host
->ops
->platform_send_init_74_clocks
)
1518 host
->ops
->platform_send_init_74_clocks(host
, ios
->power_mode
);
1520 host
->ops
->set_bus_width(host
, ios
->bus_width
);
1522 ctrl
= sdhci_readb(host
, SDHCI_HOST_CONTROL
);
1524 if ((ios
->timing
== MMC_TIMING_SD_HS
||
1525 ios
->timing
== MMC_TIMING_MMC_HS
)
1526 && !(host
->quirks
& SDHCI_QUIRK_NO_HISPD_BIT
))
1527 ctrl
|= SDHCI_CTRL_HISPD
;
1529 ctrl
&= ~SDHCI_CTRL_HISPD
;
1531 if (host
->version
>= SDHCI_SPEC_300
) {
1534 /* In case of UHS-I modes, set High Speed Enable */
1535 if ((ios
->timing
== MMC_TIMING_MMC_HS400
) ||
1536 (ios
->timing
== MMC_TIMING_MMC_HS200
) ||
1537 (ios
->timing
== MMC_TIMING_MMC_DDR52
) ||
1538 (ios
->timing
== MMC_TIMING_UHS_SDR50
) ||
1539 (ios
->timing
== MMC_TIMING_UHS_SDR104
) ||
1540 (ios
->timing
== MMC_TIMING_UHS_DDR50
) ||
1541 (ios
->timing
== MMC_TIMING_UHS_SDR25
))
1542 ctrl
|= SDHCI_CTRL_HISPD
;
1544 if (!host
->preset_enabled
) {
1545 sdhci_writeb(host
, ctrl
, SDHCI_HOST_CONTROL
);
1547 * We only need to set Driver Strength if the
1548 * preset value enable is not set.
1550 ctrl_2
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
1551 ctrl_2
&= ~SDHCI_CTRL_DRV_TYPE_MASK
;
1552 if (ios
->drv_type
== MMC_SET_DRIVER_TYPE_A
)
1553 ctrl_2
|= SDHCI_CTRL_DRV_TYPE_A
;
1554 else if (ios
->drv_type
== MMC_SET_DRIVER_TYPE_B
)
1555 ctrl_2
|= SDHCI_CTRL_DRV_TYPE_B
;
1556 else if (ios
->drv_type
== MMC_SET_DRIVER_TYPE_C
)
1557 ctrl_2
|= SDHCI_CTRL_DRV_TYPE_C
;
1558 else if (ios
->drv_type
== MMC_SET_DRIVER_TYPE_D
)
1559 ctrl_2
|= SDHCI_CTRL_DRV_TYPE_D
;
1561 pr_warn("%s: invalid driver type, default to "
1562 "driver type B\n", mmc_hostname(mmc
));
1563 ctrl_2
|= SDHCI_CTRL_DRV_TYPE_B
;
1566 sdhci_writew(host
, ctrl_2
, SDHCI_HOST_CONTROL2
);
1569 * According to SDHC Spec v3.00, if the Preset Value
1570 * Enable in the Host Control 2 register is set, we
1571 * need to reset SD Clock Enable before changing High
1572 * Speed Enable to avoid generating clock gliches.
1575 /* Reset SD Clock Enable */
1576 clk
= sdhci_readw(host
, SDHCI_CLOCK_CONTROL
);
1577 clk
&= ~SDHCI_CLOCK_CARD_EN
;
1578 sdhci_writew(host
, clk
, SDHCI_CLOCK_CONTROL
);
1580 sdhci_writeb(host
, ctrl
, SDHCI_HOST_CONTROL
);
1582 /* Re-enable SD Clock */
1583 host
->ops
->set_clock(host
, host
->clock
);
1586 /* Reset SD Clock Enable */
1587 clk
= sdhci_readw(host
, SDHCI_CLOCK_CONTROL
);
1588 clk
&= ~SDHCI_CLOCK_CARD_EN
;
1589 sdhci_writew(host
, clk
, SDHCI_CLOCK_CONTROL
);
1591 host
->ops
->set_uhs_signaling(host
, ios
->timing
);
1592 host
->timing
= ios
->timing
;
1594 if (!(host
->quirks2
& SDHCI_QUIRK2_PRESET_VALUE_BROKEN
) &&
1595 ((ios
->timing
== MMC_TIMING_UHS_SDR12
) ||
1596 (ios
->timing
== MMC_TIMING_UHS_SDR25
) ||
1597 (ios
->timing
== MMC_TIMING_UHS_SDR50
) ||
1598 (ios
->timing
== MMC_TIMING_UHS_SDR104
) ||
1599 (ios
->timing
== MMC_TIMING_UHS_DDR50
) ||
1600 (ios
->timing
== MMC_TIMING_MMC_DDR52
))) {
1603 sdhci_enable_preset_value(host
, true);
1604 preset
= sdhci_get_preset_value(host
);
1605 ios
->drv_type
= (preset
& SDHCI_PRESET_DRV_MASK
)
1606 >> SDHCI_PRESET_DRV_SHIFT
;
1609 /* Re-enable SD Clock */
1610 host
->ops
->set_clock(host
, host
->clock
);
1612 sdhci_writeb(host
, ctrl
, SDHCI_HOST_CONTROL
);
1615 * Some (ENE) controllers go apeshit on some ios operation,
1616 * signalling timeout and CRC errors even on CMD0. Resetting
1617 * it on each ios seems to solve the problem.
1619 if (host
->quirks
& SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS
)
1620 sdhci_do_reset(host
, SDHCI_RESET_CMD
| SDHCI_RESET_DATA
);
1623 spin_unlock_irqrestore(&host
->lock
, flags
);
1626 static void sdhci_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
1628 struct sdhci_host
*host
= mmc_priv(mmc
);
1630 sdhci_runtime_pm_get(host
);
1631 sdhci_do_set_ios(host
, ios
);
1632 sdhci_runtime_pm_put(host
);
1635 static int sdhci_do_get_cd(struct sdhci_host
*host
)
1637 int gpio_cd
= mmc_gpio_get_cd(host
->mmc
);
1639 if (host
->flags
& SDHCI_DEVICE_DEAD
)
1642 /* If nonremovable, assume that the card is always present. */
1643 if (host
->mmc
->caps
& MMC_CAP_NONREMOVABLE
)
1647 * Try slot gpio detect, if defined it take precedence
1648 * over build in controller functionality
1650 if (!IS_ERR_VALUE(gpio_cd
))
1653 /* If polling, assume that the card is always present. */
1654 if (host
->quirks
& SDHCI_QUIRK_BROKEN_CARD_DETECTION
)
1657 /* Host native card detect */
1658 return !!(sdhci_readl(host
, SDHCI_PRESENT_STATE
) & SDHCI_CARD_PRESENT
);
1661 static int sdhci_get_cd(struct mmc_host
*mmc
)
1663 struct sdhci_host
*host
= mmc_priv(mmc
);
1666 sdhci_runtime_pm_get(host
);
1667 ret
= sdhci_do_get_cd(host
);
1668 sdhci_runtime_pm_put(host
);
1672 static int sdhci_check_ro(struct sdhci_host
*host
)
1674 unsigned long flags
;
1677 spin_lock_irqsave(&host
->lock
, flags
);
1679 if (host
->flags
& SDHCI_DEVICE_DEAD
)
1681 else if (host
->ops
->get_ro
)
1682 is_readonly
= host
->ops
->get_ro(host
);
1684 is_readonly
= !(sdhci_readl(host
, SDHCI_PRESENT_STATE
)
1685 & SDHCI_WRITE_PROTECT
);
1687 spin_unlock_irqrestore(&host
->lock
, flags
);
1689 /* This quirk needs to be replaced by a callback-function later */
1690 return host
->quirks
& SDHCI_QUIRK_INVERTED_WRITE_PROTECT
?
1691 !is_readonly
: is_readonly
;
1694 #define SAMPLE_COUNT 5
1696 static int sdhci_do_get_ro(struct sdhci_host
*host
)
1700 if (!(host
->quirks
& SDHCI_QUIRK_UNSTABLE_RO_DETECT
))
1701 return sdhci_check_ro(host
);
1704 for (i
= 0; i
< SAMPLE_COUNT
; i
++) {
1705 if (sdhci_check_ro(host
)) {
1706 if (++ro_count
> SAMPLE_COUNT
/ 2)
1714 static void sdhci_hw_reset(struct mmc_host
*mmc
)
1716 struct sdhci_host
*host
= mmc_priv(mmc
);
1718 if (host
->ops
&& host
->ops
->hw_reset
)
1719 host
->ops
->hw_reset(host
);
1722 static int sdhci_get_ro(struct mmc_host
*mmc
)
1724 struct sdhci_host
*host
= mmc_priv(mmc
);
1727 sdhci_runtime_pm_get(host
);
1728 ret
= sdhci_do_get_ro(host
);
1729 sdhci_runtime_pm_put(host
);
1733 static void sdhci_enable_sdio_irq_nolock(struct sdhci_host
*host
, int enable
)
1735 if (!(host
->flags
& SDHCI_DEVICE_DEAD
)) {
1737 host
->ier
|= SDHCI_INT_CARD_INT
;
1739 host
->ier
&= ~SDHCI_INT_CARD_INT
;
1741 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
1742 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
1747 static void sdhci_enable_sdio_irq(struct mmc_host
*mmc
, int enable
)
1749 struct sdhci_host
*host
= mmc_priv(mmc
);
1750 unsigned long flags
;
1752 sdhci_runtime_pm_get(host
);
1754 spin_lock_irqsave(&host
->lock
, flags
);
1756 host
->flags
|= SDHCI_SDIO_IRQ_ENABLED
;
1758 host
->flags
&= ~SDHCI_SDIO_IRQ_ENABLED
;
1760 sdhci_enable_sdio_irq_nolock(host
, enable
);
1761 spin_unlock_irqrestore(&host
->lock
, flags
);
1763 sdhci_runtime_pm_put(host
);
1766 static int sdhci_do_start_signal_voltage_switch(struct sdhci_host
*host
,
1767 struct mmc_ios
*ios
)
1769 struct mmc_host
*mmc
= host
->mmc
;
1774 * Signal Voltage Switching is only applicable for Host Controllers
1777 if (host
->version
< SDHCI_SPEC_300
)
1780 ctrl
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
1782 switch (ios
->signal_voltage
) {
1783 case MMC_SIGNAL_VOLTAGE_330
:
1784 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
1785 ctrl
&= ~SDHCI_CTRL_VDD_180
;
1786 sdhci_writew(host
, ctrl
, SDHCI_HOST_CONTROL2
);
1788 if (!IS_ERR(mmc
->supply
.vqmmc
)) {
1789 ret
= regulator_set_voltage(mmc
->supply
.vqmmc
, 2700000,
1792 pr_warn("%s: Switching to 3.3V signalling voltage failed\n",
1798 usleep_range(5000, 5500);
1800 /* 3.3V regulator output should be stable within 5 ms */
1801 ctrl
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
1802 if (!(ctrl
& SDHCI_CTRL_VDD_180
))
1805 pr_warn("%s: 3.3V regulator output did not became stable\n",
1809 case MMC_SIGNAL_VOLTAGE_180
:
1810 if (!IS_ERR(mmc
->supply
.vqmmc
)) {
1811 ret
= regulator_set_voltage(mmc
->supply
.vqmmc
,
1814 pr_warn("%s: Switching to 1.8V signalling voltage failed\n",
1821 * Enable 1.8V Signal Enable in the Host Control2
1824 ctrl
|= SDHCI_CTRL_VDD_180
;
1825 sdhci_writew(host
, ctrl
, SDHCI_HOST_CONTROL2
);
1827 /* Some controller need to do more when switching */
1828 if (host
->ops
->voltage_switch
)
1829 host
->ops
->voltage_switch(host
);
1831 /* 1.8V regulator output should be stable within 5 ms */
1832 ctrl
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
1833 if (ctrl
& SDHCI_CTRL_VDD_180
)
1836 pr_warn("%s: 1.8V regulator output did not became stable\n",
1840 case MMC_SIGNAL_VOLTAGE_120
:
1841 if (!IS_ERR(mmc
->supply
.vqmmc
)) {
1842 ret
= regulator_set_voltage(mmc
->supply
.vqmmc
, 1100000,
1845 pr_warn("%s: Switching to 1.2V signalling voltage failed\n",
1852 /* No signal voltage switch required */
1857 static int sdhci_start_signal_voltage_switch(struct mmc_host
*mmc
,
1858 struct mmc_ios
*ios
)
1860 struct sdhci_host
*host
= mmc_priv(mmc
);
1863 if (host
->version
< SDHCI_SPEC_300
)
1865 sdhci_runtime_pm_get(host
);
1866 err
= sdhci_do_start_signal_voltage_switch(host
, ios
);
1867 sdhci_runtime_pm_put(host
);
1871 static int sdhci_card_busy(struct mmc_host
*mmc
)
1873 struct sdhci_host
*host
= mmc_priv(mmc
);
1876 sdhci_runtime_pm_get(host
);
1877 /* Check whether DAT[3:0] is 0000 */
1878 present_state
= sdhci_readl(host
, SDHCI_PRESENT_STATE
);
1879 sdhci_runtime_pm_put(host
);
1881 return !(present_state
& SDHCI_DATA_LVL_MASK
);
1884 static int sdhci_prepare_hs400_tuning(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
1886 struct sdhci_host
*host
= mmc_priv(mmc
);
1887 unsigned long flags
;
1889 spin_lock_irqsave(&host
->lock
, flags
);
1890 host
->flags
|= SDHCI_HS400_TUNING
;
1891 spin_unlock_irqrestore(&host
->lock
, flags
);
1896 static int sdhci_execute_tuning(struct mmc_host
*mmc
, u32 opcode
)
1898 struct sdhci_host
*host
= mmc_priv(mmc
);
1900 int tuning_loop_counter
= MAX_TUNING_LOOP
;
1902 unsigned long flags
;
1903 unsigned int tuning_count
= 0;
1906 sdhci_runtime_pm_get(host
);
1907 spin_lock_irqsave(&host
->lock
, flags
);
1909 hs400_tuning
= host
->flags
& SDHCI_HS400_TUNING
;
1910 host
->flags
&= ~SDHCI_HS400_TUNING
;
1912 if (host
->tuning_mode
== SDHCI_TUNING_MODE_1
)
1913 tuning_count
= host
->tuning_count
;
1916 * The Host Controller needs tuning in case of SDR104 and DDR50
1917 * mode, and for SDR50 mode when Use Tuning for SDR50 is set in
1918 * the Capabilities register.
1919 * If the Host Controller supports the HS200 mode then the
1920 * tuning function has to be executed.
1922 switch (host
->timing
) {
1923 /* HS400 tuning is done in HS200 mode */
1924 case MMC_TIMING_MMC_HS400
:
1928 case MMC_TIMING_MMC_HS200
:
1930 * Periodic re-tuning for HS400 is not expected to be needed, so
1937 case MMC_TIMING_UHS_SDR104
:
1938 case MMC_TIMING_UHS_DDR50
:
1941 case MMC_TIMING_UHS_SDR50
:
1942 if (host
->flags
& SDHCI_SDR50_NEEDS_TUNING
||
1943 host
->flags
& SDHCI_SDR104_NEEDS_TUNING
)
1951 if (host
->ops
->platform_execute_tuning
) {
1952 spin_unlock_irqrestore(&host
->lock
, flags
);
1953 err
= host
->ops
->platform_execute_tuning(host
, opcode
);
1954 sdhci_runtime_pm_put(host
);
1958 ctrl
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
1959 ctrl
|= SDHCI_CTRL_EXEC_TUNING
;
1960 if (host
->quirks2
& SDHCI_QUIRK2_TUNING_WORK_AROUND
)
1961 ctrl
|= SDHCI_CTRL_TUNED_CLK
;
1962 sdhci_writew(host
, ctrl
, SDHCI_HOST_CONTROL2
);
1965 * As per the Host Controller spec v3.00, tuning command
1966 * generates Buffer Read Ready interrupt, so enable that.
1968 * Note: The spec clearly says that when tuning sequence
1969 * is being performed, the controller does not generate
1970 * interrupts other than Buffer Read Ready interrupt. But
1971 * to make sure we don't hit a controller bug, we _only_
1972 * enable Buffer Read Ready interrupt here.
1974 sdhci_writel(host
, SDHCI_INT_DATA_AVAIL
, SDHCI_INT_ENABLE
);
1975 sdhci_writel(host
, SDHCI_INT_DATA_AVAIL
, SDHCI_SIGNAL_ENABLE
);
1978 * Issue CMD19 repeatedly till Execute Tuning is set to 0 or the number
1979 * of loops reaches 40 times or a timeout of 150ms occurs.
1982 struct mmc_command cmd
= {0};
1983 struct mmc_request mrq
= {NULL
};
1985 cmd
.opcode
= opcode
;
1987 cmd
.flags
= MMC_RSP_R1
| MMC_CMD_ADTC
;
1992 if (tuning_loop_counter
-- == 0)
1999 * In response to CMD19, the card sends 64 bytes of tuning
2000 * block to the Host Controller. So we set the block size
2003 if (cmd
.opcode
== MMC_SEND_TUNING_BLOCK_HS200
) {
2004 if (mmc
->ios
.bus_width
== MMC_BUS_WIDTH_8
)
2005 sdhci_writew(host
, SDHCI_MAKE_BLKSZ(7, 128),
2007 else if (mmc
->ios
.bus_width
== MMC_BUS_WIDTH_4
)
2008 sdhci_writew(host
, SDHCI_MAKE_BLKSZ(7, 64),
2011 sdhci_writew(host
, SDHCI_MAKE_BLKSZ(7, 64),
2016 * The tuning block is sent by the card to the host controller.
2017 * So we set the TRNS_READ bit in the Transfer Mode register.
2018 * This also takes care of setting DMA Enable and Multi Block
2019 * Select in the same register to 0.
2021 sdhci_writew(host
, SDHCI_TRNS_READ
, SDHCI_TRANSFER_MODE
);
2023 sdhci_send_command(host
, &cmd
);
2028 spin_unlock_irqrestore(&host
->lock
, flags
);
2029 /* Wait for Buffer Read Ready interrupt */
2030 wait_event_interruptible_timeout(host
->buf_ready_int
,
2031 (host
->tuning_done
== 1),
2032 msecs_to_jiffies(50));
2033 spin_lock_irqsave(&host
->lock
, flags
);
2035 if (!host
->tuning_done
) {
2036 pr_info(DRIVER_NAME
": Timeout waiting for "
2037 "Buffer Read Ready interrupt during tuning "
2038 "procedure, falling back to fixed sampling "
2040 ctrl
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
2041 ctrl
&= ~SDHCI_CTRL_TUNED_CLK
;
2042 ctrl
&= ~SDHCI_CTRL_EXEC_TUNING
;
2043 sdhci_writew(host
, ctrl
, SDHCI_HOST_CONTROL2
);
2045 sdhci_do_reset(host
, SDHCI_RESET_CMD
);
2046 sdhci_do_reset(host
, SDHCI_RESET_DATA
);
2050 if (cmd
.opcode
!= MMC_SEND_TUNING_BLOCK_HS200
)
2053 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
2054 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
2056 spin_unlock_irqrestore(&host
->lock
, flags
);
2058 memset(&cmd
, 0, sizeof(cmd
));
2059 cmd
.opcode
= MMC_STOP_TRANSMISSION
;
2060 cmd
.flags
= MMC_RSP_SPI_R1B
| MMC_RSP_R1B
| MMC_CMD_AC
;
2061 cmd
.busy_timeout
= 50;
2062 mmc_wait_for_cmd(mmc
, &cmd
, 0);
2064 spin_lock_irqsave(&host
->lock
, flags
);
2069 host
->tuning_done
= 0;
2071 ctrl
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
2073 /* eMMC spec does not require a delay between tuning cycles */
2074 if (opcode
== MMC_SEND_TUNING_BLOCK
)
2076 } while (ctrl
& SDHCI_CTRL_EXEC_TUNING
);
2079 * The Host Driver has exhausted the maximum number of loops allowed,
2080 * so use fixed sampling frequency.
2082 if (tuning_loop_counter
< 0) {
2083 ctrl
&= ~SDHCI_CTRL_TUNED_CLK
;
2084 sdhci_writew(host
, ctrl
, SDHCI_HOST_CONTROL2
);
2086 if (!(ctrl
& SDHCI_CTRL_TUNED_CLK
)) {
2087 pr_info(DRIVER_NAME
": Tuning procedure"
2088 " failed, falling back to fixed sampling"
2096 * In case tuning fails, host controllers which support
2097 * re-tuning can try tuning again at a later time, when the
2098 * re-tuning timer expires. So for these controllers, we
2099 * return 0. Since there might be other controllers who do not
2100 * have this capability, we return error for them.
2105 host
->mmc
->retune_period
= err
? 0 : tuning_count
;
2107 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
2108 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
2110 spin_unlock_irqrestore(&host
->lock
, flags
);
2111 sdhci_runtime_pm_put(host
);
2116 static int sdhci_select_drive_strength(struct mmc_card
*card
,
2117 unsigned int max_dtr
, int host_drv
,
2118 int card_drv
, int *drv_type
)
2120 struct sdhci_host
*host
= mmc_priv(card
->host
);
2122 if (!host
->ops
->select_drive_strength
)
2125 return host
->ops
->select_drive_strength(host
, card
, max_dtr
, host_drv
,
2126 card_drv
, drv_type
);
2129 static void sdhci_enable_preset_value(struct sdhci_host
*host
, bool enable
)
2131 /* Host Controller v3.00 defines preset value registers */
2132 if (host
->version
< SDHCI_SPEC_300
)
2136 * We only enable or disable Preset Value if they are not already
2137 * enabled or disabled respectively. Otherwise, we bail out.
2139 if (host
->preset_enabled
!= enable
) {
2140 u16 ctrl
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
2143 ctrl
|= SDHCI_CTRL_PRESET_VAL_ENABLE
;
2145 ctrl
&= ~SDHCI_CTRL_PRESET_VAL_ENABLE
;
2147 sdhci_writew(host
, ctrl
, SDHCI_HOST_CONTROL2
);
2150 host
->flags
|= SDHCI_PV_ENABLED
;
2152 host
->flags
&= ~SDHCI_PV_ENABLED
;
2154 host
->preset_enabled
= enable
;
2158 static void sdhci_post_req(struct mmc_host
*mmc
, struct mmc_request
*mrq
,
2161 struct sdhci_host
*host
= mmc_priv(mmc
);
2162 struct mmc_data
*data
= mrq
->data
;
2164 if (host
->flags
& SDHCI_REQ_USE_DMA
) {
2165 if (data
->host_cookie
== COOKIE_GIVEN
||
2166 data
->host_cookie
== COOKIE_MAPPED
)
2167 dma_unmap_sg(mmc_dev(host
->mmc
), data
->sg
, data
->sg_len
,
2168 data
->flags
& MMC_DATA_WRITE
?
2169 DMA_TO_DEVICE
: DMA_FROM_DEVICE
);
2170 data
->host_cookie
= COOKIE_UNMAPPED
;
2174 static int sdhci_pre_dma_transfer(struct sdhci_host
*host
,
2175 struct mmc_data
*data
)
2179 if (data
->host_cookie
== COOKIE_MAPPED
) {
2180 data
->host_cookie
= COOKIE_GIVEN
;
2181 return data
->sg_count
;
2184 WARN_ON(data
->host_cookie
== COOKIE_GIVEN
);
2186 sg_count
= dma_map_sg(mmc_dev(host
->mmc
), data
->sg
, data
->sg_len
,
2187 data
->flags
& MMC_DATA_WRITE
?
2188 DMA_TO_DEVICE
: DMA_FROM_DEVICE
);
2193 data
->sg_count
= sg_count
;
2194 data
->host_cookie
= COOKIE_MAPPED
;
2199 static void sdhci_pre_req(struct mmc_host
*mmc
, struct mmc_request
*mrq
,
2202 struct sdhci_host
*host
= mmc_priv(mmc
);
2204 mrq
->data
->host_cookie
= COOKIE_UNMAPPED
;
2206 if (host
->flags
& SDHCI_REQ_USE_DMA
)
2207 sdhci_pre_dma_transfer(host
, mrq
->data
);
2210 static void sdhci_card_event(struct mmc_host
*mmc
)
2212 struct sdhci_host
*host
= mmc_priv(mmc
);
2213 unsigned long flags
;
2216 /* First check if client has provided their own card event */
2217 if (host
->ops
->card_event
)
2218 host
->ops
->card_event(host
);
2220 present
= sdhci_do_get_cd(host
);
2222 spin_lock_irqsave(&host
->lock
, flags
);
2224 /* Check host->mrq first in case we are runtime suspended */
2225 if (host
->mrq
&& !present
) {
2226 pr_err("%s: Card removed during transfer!\n",
2227 mmc_hostname(host
->mmc
));
2228 pr_err("%s: Resetting controller.\n",
2229 mmc_hostname(host
->mmc
));
2231 sdhci_do_reset(host
, SDHCI_RESET_CMD
);
2232 sdhci_do_reset(host
, SDHCI_RESET_DATA
);
2234 host
->mrq
->cmd
->error
= -ENOMEDIUM
;
2235 tasklet_schedule(&host
->finish_tasklet
);
2238 spin_unlock_irqrestore(&host
->lock
, flags
);
2241 static const struct mmc_host_ops sdhci_ops
= {
2242 .request
= sdhci_request
,
2243 .post_req
= sdhci_post_req
,
2244 .pre_req
= sdhci_pre_req
,
2245 .set_ios
= sdhci_set_ios
,
2246 .get_cd
= sdhci_get_cd
,
2247 .get_ro
= sdhci_get_ro
,
2248 .hw_reset
= sdhci_hw_reset
,
2249 .enable_sdio_irq
= sdhci_enable_sdio_irq
,
2250 .start_signal_voltage_switch
= sdhci_start_signal_voltage_switch
,
2251 .prepare_hs400_tuning
= sdhci_prepare_hs400_tuning
,
2252 .execute_tuning
= sdhci_execute_tuning
,
2253 .select_drive_strength
= sdhci_select_drive_strength
,
2254 .card_event
= sdhci_card_event
,
2255 .card_busy
= sdhci_card_busy
,
2258 /*****************************************************************************\
2262 \*****************************************************************************/
2264 static void sdhci_tasklet_finish(unsigned long param
)
2266 struct sdhci_host
*host
;
2267 unsigned long flags
;
2268 struct mmc_request
*mrq
;
2270 host
= (struct sdhci_host
*)param
;
2272 spin_lock_irqsave(&host
->lock
, flags
);
2275 * If this tasklet gets rescheduled while running, it will
2276 * be run again afterwards but without any active request.
2279 spin_unlock_irqrestore(&host
->lock
, flags
);
2283 del_timer(&host
->timer
);
2288 * The controller needs a reset of internal state machines
2289 * upon error conditions.
2291 if (!(host
->flags
& SDHCI_DEVICE_DEAD
) &&
2292 ((mrq
->cmd
&& mrq
->cmd
->error
) ||
2293 (mrq
->sbc
&& mrq
->sbc
->error
) ||
2294 (mrq
->data
&& ((mrq
->data
->error
&& !mrq
->data
->stop
) ||
2295 (mrq
->data
->stop
&& mrq
->data
->stop
->error
))) ||
2296 (host
->quirks
& SDHCI_QUIRK_RESET_AFTER_REQUEST
))) {
2298 /* Some controllers need this kick or reset won't work here */
2299 if (host
->quirks
& SDHCI_QUIRK_CLOCK_BEFORE_RESET
)
2300 /* This is to force an update */
2301 host
->ops
->set_clock(host
, host
->clock
);
2303 /* Spec says we should do both at the same time, but Ricoh
2304 controllers do not like that. */
2305 sdhci_do_reset(host
, SDHCI_RESET_CMD
);
2306 sdhci_do_reset(host
, SDHCI_RESET_DATA
);
2313 #ifndef SDHCI_USE_LEDS_CLASS
2314 sdhci_deactivate_led(host
);
2318 spin_unlock_irqrestore(&host
->lock
, flags
);
2320 mmc_request_done(host
->mmc
, mrq
);
2321 sdhci_runtime_pm_put(host
);
2324 static void sdhci_timeout_timer(unsigned long data
)
2326 struct sdhci_host
*host
;
2327 unsigned long flags
;
2329 host
= (struct sdhci_host
*)data
;
2331 spin_lock_irqsave(&host
->lock
, flags
);
2334 pr_err("%s: Timeout waiting for hardware "
2335 "interrupt.\n", mmc_hostname(host
->mmc
));
2336 sdhci_dumpregs(host
);
2339 host
->data
->error
= -ETIMEDOUT
;
2340 sdhci_finish_data(host
);
2343 host
->cmd
->error
= -ETIMEDOUT
;
2345 host
->mrq
->cmd
->error
= -ETIMEDOUT
;
2347 tasklet_schedule(&host
->finish_tasklet
);
2352 spin_unlock_irqrestore(&host
->lock
, flags
);
2355 /*****************************************************************************\
2357 * Interrupt handling *
2359 \*****************************************************************************/
2361 static void sdhci_cmd_irq(struct sdhci_host
*host
, u32 intmask
, u32
*mask
)
2363 BUG_ON(intmask
== 0);
2366 pr_err("%s: Got command interrupt 0x%08x even "
2367 "though no command operation was in progress.\n",
2368 mmc_hostname(host
->mmc
), (unsigned)intmask
);
2369 sdhci_dumpregs(host
);
2373 if (intmask
& SDHCI_INT_TIMEOUT
)
2374 host
->cmd
->error
= -ETIMEDOUT
;
2375 else if (intmask
& (SDHCI_INT_CRC
| SDHCI_INT_END_BIT
|
2377 host
->cmd
->error
= -EILSEQ
;
2379 if (host
->cmd
->error
) {
2380 tasklet_schedule(&host
->finish_tasklet
);
2385 * The host can send and interrupt when the busy state has
2386 * ended, allowing us to wait without wasting CPU cycles.
2387 * Unfortunately this is overloaded on the "data complete"
2388 * interrupt, so we need to take some care when handling
2391 * Note: The 1.0 specification is a bit ambiguous about this
2392 * feature so there might be some problems with older
2395 if (host
->cmd
->flags
& MMC_RSP_BUSY
) {
2396 if (host
->cmd
->data
)
2397 DBG("Cannot wait for busy signal when also "
2398 "doing a data transfer");
2399 else if (!(host
->quirks
& SDHCI_QUIRK_NO_BUSY_IRQ
)
2400 && !host
->busy_handle
) {
2401 /* Mark that command complete before busy is ended */
2402 host
->busy_handle
= 1;
2406 /* The controller does not support the end-of-busy IRQ,
2407 * fall through and take the SDHCI_INT_RESPONSE */
2408 } else if ((host
->quirks2
& SDHCI_QUIRK2_STOP_WITH_TC
) &&
2409 host
->cmd
->opcode
== MMC_STOP_TRANSMISSION
&& !host
->data
) {
2410 *mask
&= ~SDHCI_INT_DATA_END
;
2413 if (intmask
& SDHCI_INT_RESPONSE
)
2414 sdhci_finish_command(host
);
2417 #ifdef CONFIG_MMC_DEBUG
2418 static void sdhci_adma_show_error(struct sdhci_host
*host
)
2420 const char *name
= mmc_hostname(host
->mmc
);
2421 void *desc
= host
->adma_table
;
2423 sdhci_dumpregs(host
);
2426 struct sdhci_adma2_64_desc
*dma_desc
= desc
;
2428 if (host
->flags
& SDHCI_USE_64_BIT_DMA
)
2429 DBG("%s: %p: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
2430 name
, desc
, le32_to_cpu(dma_desc
->addr_hi
),
2431 le32_to_cpu(dma_desc
->addr_lo
),
2432 le16_to_cpu(dma_desc
->len
),
2433 le16_to_cpu(dma_desc
->cmd
));
2435 DBG("%s: %p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
2436 name
, desc
, le32_to_cpu(dma_desc
->addr_lo
),
2437 le16_to_cpu(dma_desc
->len
),
2438 le16_to_cpu(dma_desc
->cmd
));
2440 desc
+= host
->desc_sz
;
2442 if (dma_desc
->cmd
& cpu_to_le16(ADMA2_END
))
2447 static void sdhci_adma_show_error(struct sdhci_host
*host
) { }
2450 static void sdhci_data_irq(struct sdhci_host
*host
, u32 intmask
)
2453 BUG_ON(intmask
== 0);
2455 /* CMD19 generates _only_ Buffer Read Ready interrupt */
2456 if (intmask
& SDHCI_INT_DATA_AVAIL
) {
2457 command
= SDHCI_GET_CMD(sdhci_readw(host
, SDHCI_COMMAND
));
2458 if (command
== MMC_SEND_TUNING_BLOCK
||
2459 command
== MMC_SEND_TUNING_BLOCK_HS200
) {
2460 host
->tuning_done
= 1;
2461 wake_up(&host
->buf_ready_int
);
2468 * The "data complete" interrupt is also used to
2469 * indicate that a busy state has ended. See comment
2470 * above in sdhci_cmd_irq().
2472 if (host
->cmd
&& (host
->cmd
->flags
& MMC_RSP_BUSY
)) {
2473 if (intmask
& SDHCI_INT_DATA_TIMEOUT
) {
2474 host
->cmd
->error
= -ETIMEDOUT
;
2475 tasklet_schedule(&host
->finish_tasklet
);
2478 if (intmask
& SDHCI_INT_DATA_END
) {
2480 * Some cards handle busy-end interrupt
2481 * before the command completed, so make
2482 * sure we do things in the proper order.
2484 if (host
->busy_handle
)
2485 sdhci_finish_command(host
);
2487 host
->busy_handle
= 1;
2492 pr_err("%s: Got data interrupt 0x%08x even "
2493 "though no data operation was in progress.\n",
2494 mmc_hostname(host
->mmc
), (unsigned)intmask
);
2495 sdhci_dumpregs(host
);
2500 if (intmask
& SDHCI_INT_DATA_TIMEOUT
)
2501 host
->data
->error
= -ETIMEDOUT
;
2502 else if (intmask
& SDHCI_INT_DATA_END_BIT
)
2503 host
->data
->error
= -EILSEQ
;
2504 else if ((intmask
& SDHCI_INT_DATA_CRC
) &&
2505 SDHCI_GET_CMD(sdhci_readw(host
, SDHCI_COMMAND
))
2507 host
->data
->error
= -EILSEQ
;
2508 else if (intmask
& SDHCI_INT_ADMA_ERROR
) {
2509 pr_err("%s: ADMA error\n", mmc_hostname(host
->mmc
));
2510 sdhci_adma_show_error(host
);
2511 host
->data
->error
= -EIO
;
2512 if (host
->ops
->adma_workaround
)
2513 host
->ops
->adma_workaround(host
, intmask
);
2516 if (host
->data
->error
)
2517 sdhci_finish_data(host
);
2519 if (intmask
& (SDHCI_INT_DATA_AVAIL
| SDHCI_INT_SPACE_AVAIL
))
2520 sdhci_transfer_pio(host
);
2523 * We currently don't do anything fancy with DMA
2524 * boundaries, but as we can't disable the feature
2525 * we need to at least restart the transfer.
2527 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
2528 * should return a valid address to continue from, but as
2529 * some controllers are faulty, don't trust them.
2531 if (intmask
& SDHCI_INT_DMA_END
) {
2532 u32 dmastart
, dmanow
;
2533 dmastart
= sg_dma_address(host
->data
->sg
);
2534 dmanow
= dmastart
+ host
->data
->bytes_xfered
;
2536 * Force update to the next DMA block boundary.
2539 ~(SDHCI_DEFAULT_BOUNDARY_SIZE
- 1)) +
2540 SDHCI_DEFAULT_BOUNDARY_SIZE
;
2541 host
->data
->bytes_xfered
= dmanow
- dmastart
;
2542 DBG("%s: DMA base 0x%08x, transferred 0x%06x bytes,"
2544 mmc_hostname(host
->mmc
), dmastart
,
2545 host
->data
->bytes_xfered
, dmanow
);
2546 sdhci_writel(host
, dmanow
, SDHCI_DMA_ADDRESS
);
2549 if (intmask
& SDHCI_INT_DATA_END
) {
2552 * Data managed to finish before the
2553 * command completed. Make sure we do
2554 * things in the proper order.
2556 host
->data_early
= 1;
2558 sdhci_finish_data(host
);
2564 static irqreturn_t
sdhci_irq(int irq
, void *dev_id
)
2566 irqreturn_t result
= IRQ_NONE
;
2567 struct sdhci_host
*host
= dev_id
;
2568 u32 intmask
, mask
, unexpected
= 0;
2571 spin_lock(&host
->lock
);
2573 if (host
->runtime_suspended
&& !sdhci_sdio_irq_enabled(host
)) {
2574 spin_unlock(&host
->lock
);
2578 intmask
= sdhci_readl(host
, SDHCI_INT_STATUS
);
2579 if (!intmask
|| intmask
== 0xffffffff) {
2585 /* Clear selected interrupts. */
2586 mask
= intmask
& (SDHCI_INT_CMD_MASK
| SDHCI_INT_DATA_MASK
|
2587 SDHCI_INT_BUS_POWER
);
2588 sdhci_writel(host
, mask
, SDHCI_INT_STATUS
);
2590 DBG("*** %s got interrupt: 0x%08x\n",
2591 mmc_hostname(host
->mmc
), intmask
);
2593 if (intmask
& (SDHCI_INT_CARD_INSERT
| SDHCI_INT_CARD_REMOVE
)) {
2594 u32 present
= sdhci_readl(host
, SDHCI_PRESENT_STATE
) &
2598 * There is a observation on i.mx esdhc. INSERT
2599 * bit will be immediately set again when it gets
2600 * cleared, if a card is inserted. We have to mask
2601 * the irq to prevent interrupt storm which will
2602 * freeze the system. And the REMOVE gets the
2605 * More testing are needed here to ensure it works
2606 * for other platforms though.
2608 host
->ier
&= ~(SDHCI_INT_CARD_INSERT
|
2609 SDHCI_INT_CARD_REMOVE
);
2610 host
->ier
|= present
? SDHCI_INT_CARD_REMOVE
:
2611 SDHCI_INT_CARD_INSERT
;
2612 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
2613 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
2615 sdhci_writel(host
, intmask
& (SDHCI_INT_CARD_INSERT
|
2616 SDHCI_INT_CARD_REMOVE
), SDHCI_INT_STATUS
);
2618 host
->thread_isr
|= intmask
& (SDHCI_INT_CARD_INSERT
|
2619 SDHCI_INT_CARD_REMOVE
);
2620 result
= IRQ_WAKE_THREAD
;
2623 if (intmask
& SDHCI_INT_CMD_MASK
)
2624 sdhci_cmd_irq(host
, intmask
& SDHCI_INT_CMD_MASK
,
2627 if (intmask
& SDHCI_INT_DATA_MASK
)
2628 sdhci_data_irq(host
, intmask
& SDHCI_INT_DATA_MASK
);
2630 if (intmask
& SDHCI_INT_BUS_POWER
)
2631 pr_err("%s: Card is consuming too much power!\n",
2632 mmc_hostname(host
->mmc
));
2634 if ((intmask
& SDHCI_INT_CARD_INT
) &&
2635 (host
->ier
& SDHCI_INT_CARD_INT
)) {
2636 sdhci_enable_sdio_irq_nolock(host
, false);
2637 host
->thread_isr
|= SDHCI_INT_CARD_INT
;
2638 result
= IRQ_WAKE_THREAD
;
2641 intmask
&= ~(SDHCI_INT_CARD_INSERT
| SDHCI_INT_CARD_REMOVE
|
2642 SDHCI_INT_CMD_MASK
| SDHCI_INT_DATA_MASK
|
2643 SDHCI_INT_ERROR
| SDHCI_INT_BUS_POWER
|
2644 SDHCI_INT_CARD_INT
);
2647 unexpected
|= intmask
;
2648 sdhci_writel(host
, intmask
, SDHCI_INT_STATUS
);
2651 if (result
== IRQ_NONE
)
2652 result
= IRQ_HANDLED
;
2654 intmask
= sdhci_readl(host
, SDHCI_INT_STATUS
);
2655 } while (intmask
&& --max_loops
);
2657 spin_unlock(&host
->lock
);
2660 pr_err("%s: Unexpected interrupt 0x%08x.\n",
2661 mmc_hostname(host
->mmc
), unexpected
);
2662 sdhci_dumpregs(host
);
2668 static irqreturn_t
sdhci_thread_irq(int irq
, void *dev_id
)
2670 struct sdhci_host
*host
= dev_id
;
2671 unsigned long flags
;
2674 spin_lock_irqsave(&host
->lock
, flags
);
2675 isr
= host
->thread_isr
;
2676 host
->thread_isr
= 0;
2677 spin_unlock_irqrestore(&host
->lock
, flags
);
2679 if (isr
& (SDHCI_INT_CARD_INSERT
| SDHCI_INT_CARD_REMOVE
)) {
2680 sdhci_card_event(host
->mmc
);
2681 mmc_detect_change(host
->mmc
, msecs_to_jiffies(200));
2684 if (isr
& SDHCI_INT_CARD_INT
) {
2685 sdio_run_irqs(host
->mmc
);
2687 spin_lock_irqsave(&host
->lock
, flags
);
2688 if (host
->flags
& SDHCI_SDIO_IRQ_ENABLED
)
2689 sdhci_enable_sdio_irq_nolock(host
, true);
2690 spin_unlock_irqrestore(&host
->lock
, flags
);
2693 return isr
? IRQ_HANDLED
: IRQ_NONE
;
2696 /*****************************************************************************\
2700 \*****************************************************************************/
2703 void sdhci_enable_irq_wakeups(struct sdhci_host
*host
)
2706 u8 mask
= SDHCI_WAKE_ON_INSERT
| SDHCI_WAKE_ON_REMOVE
2707 | SDHCI_WAKE_ON_INT
;
2709 val
= sdhci_readb(host
, SDHCI_WAKE_UP_CONTROL
);
2711 /* Avoid fake wake up */
2712 if (host
->quirks
& SDHCI_QUIRK_BROKEN_CARD_DETECTION
)
2713 val
&= ~(SDHCI_WAKE_ON_INSERT
| SDHCI_WAKE_ON_REMOVE
);
2714 sdhci_writeb(host
, val
, SDHCI_WAKE_UP_CONTROL
);
2716 EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups
);
2718 static void sdhci_disable_irq_wakeups(struct sdhci_host
*host
)
2721 u8 mask
= SDHCI_WAKE_ON_INSERT
| SDHCI_WAKE_ON_REMOVE
2722 | SDHCI_WAKE_ON_INT
;
2724 val
= sdhci_readb(host
, SDHCI_WAKE_UP_CONTROL
);
2726 sdhci_writeb(host
, val
, SDHCI_WAKE_UP_CONTROL
);
2729 int sdhci_suspend_host(struct sdhci_host
*host
)
2731 sdhci_disable_card_detection(host
);
2733 mmc_retune_timer_stop(host
->mmc
);
2734 mmc_retune_needed(host
->mmc
);
2736 if (!device_may_wakeup(mmc_dev(host
->mmc
))) {
2738 sdhci_writel(host
, 0, SDHCI_INT_ENABLE
);
2739 sdhci_writel(host
, 0, SDHCI_SIGNAL_ENABLE
);
2740 free_irq(host
->irq
, host
);
2742 sdhci_enable_irq_wakeups(host
);
2743 enable_irq_wake(host
->irq
);
2748 EXPORT_SYMBOL_GPL(sdhci_suspend_host
);
2750 int sdhci_resume_host(struct sdhci_host
*host
)
2754 if (host
->flags
& (SDHCI_USE_SDMA
| SDHCI_USE_ADMA
)) {
2755 if (host
->ops
->enable_dma
)
2756 host
->ops
->enable_dma(host
);
2759 if ((host
->mmc
->pm_flags
& MMC_PM_KEEP_POWER
) &&
2760 (host
->quirks2
& SDHCI_QUIRK2_HOST_OFF_CARD_ON
)) {
2761 /* Card keeps power but host controller does not */
2762 sdhci_init(host
, 0);
2765 sdhci_do_set_ios(host
, &host
->mmc
->ios
);
2767 sdhci_init(host
, (host
->mmc
->pm_flags
& MMC_PM_KEEP_POWER
));
2771 if (!device_may_wakeup(mmc_dev(host
->mmc
))) {
2772 ret
= request_threaded_irq(host
->irq
, sdhci_irq
,
2773 sdhci_thread_irq
, IRQF_SHARED
,
2774 mmc_hostname(host
->mmc
), host
);
2778 sdhci_disable_irq_wakeups(host
);
2779 disable_irq_wake(host
->irq
);
2782 sdhci_enable_card_detection(host
);
2787 EXPORT_SYMBOL_GPL(sdhci_resume_host
);
2789 static int sdhci_runtime_pm_get(struct sdhci_host
*host
)
2791 return pm_runtime_get_sync(host
->mmc
->parent
);
2794 static int sdhci_runtime_pm_put(struct sdhci_host
*host
)
2796 pm_runtime_mark_last_busy(host
->mmc
->parent
);
2797 return pm_runtime_put_autosuspend(host
->mmc
->parent
);
2800 static void sdhci_runtime_pm_bus_on(struct sdhci_host
*host
)
2804 host
->bus_on
= true;
2805 pm_runtime_get_noresume(host
->mmc
->parent
);
2808 static void sdhci_runtime_pm_bus_off(struct sdhci_host
*host
)
2812 host
->bus_on
= false;
2813 pm_runtime_put_noidle(host
->mmc
->parent
);
2816 int sdhci_runtime_suspend_host(struct sdhci_host
*host
)
2818 unsigned long flags
;
2820 mmc_retune_timer_stop(host
->mmc
);
2821 mmc_retune_needed(host
->mmc
);
2823 spin_lock_irqsave(&host
->lock
, flags
);
2824 host
->ier
&= SDHCI_INT_CARD_INT
;
2825 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
2826 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
2827 spin_unlock_irqrestore(&host
->lock
, flags
);
2829 synchronize_hardirq(host
->irq
);
2831 spin_lock_irqsave(&host
->lock
, flags
);
2832 host
->runtime_suspended
= true;
2833 spin_unlock_irqrestore(&host
->lock
, flags
);
2837 EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host
);
2839 int sdhci_runtime_resume_host(struct sdhci_host
*host
)
2841 unsigned long flags
;
2842 int host_flags
= host
->flags
;
2844 if (host_flags
& (SDHCI_USE_SDMA
| SDHCI_USE_ADMA
)) {
2845 if (host
->ops
->enable_dma
)
2846 host
->ops
->enable_dma(host
);
2849 sdhci_init(host
, 0);
2851 /* Force clock and power re-program */
2854 sdhci_do_start_signal_voltage_switch(host
, &host
->mmc
->ios
);
2855 sdhci_do_set_ios(host
, &host
->mmc
->ios
);
2857 if ((host_flags
& SDHCI_PV_ENABLED
) &&
2858 !(host
->quirks2
& SDHCI_QUIRK2_PRESET_VALUE_BROKEN
)) {
2859 spin_lock_irqsave(&host
->lock
, flags
);
2860 sdhci_enable_preset_value(host
, true);
2861 spin_unlock_irqrestore(&host
->lock
, flags
);
2864 spin_lock_irqsave(&host
->lock
, flags
);
2866 host
->runtime_suspended
= false;
2868 /* Enable SDIO IRQ */
2869 if (host
->flags
& SDHCI_SDIO_IRQ_ENABLED
)
2870 sdhci_enable_sdio_irq_nolock(host
, true);
2872 /* Enable Card Detection */
2873 sdhci_enable_card_detection(host
);
2875 spin_unlock_irqrestore(&host
->lock
, flags
);
2879 EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host
);
2881 #endif /* CONFIG_PM */
2883 /*****************************************************************************\
2885 * Device allocation/registration *
2887 \*****************************************************************************/
2889 struct sdhci_host
*sdhci_alloc_host(struct device
*dev
,
2892 struct mmc_host
*mmc
;
2893 struct sdhci_host
*host
;
2895 WARN_ON(dev
== NULL
);
2897 mmc
= mmc_alloc_host(sizeof(struct sdhci_host
) + priv_size
, dev
);
2899 return ERR_PTR(-ENOMEM
);
2901 host
= mmc_priv(mmc
);
2903 host
->mmc_host_ops
= sdhci_ops
;
2904 mmc
->ops
= &host
->mmc_host_ops
;
2909 EXPORT_SYMBOL_GPL(sdhci_alloc_host
);
2911 int sdhci_add_host(struct sdhci_host
*host
)
2913 struct mmc_host
*mmc
;
2914 u32 caps
[2] = {0, 0};
2915 u32 max_current_caps
;
2916 unsigned int ocr_avail
;
2917 unsigned int override_timeout_clk
;
2921 WARN_ON(host
== NULL
);
2928 host
->quirks
= debug_quirks
;
2930 host
->quirks2
= debug_quirks2
;
2932 override_timeout_clk
= host
->timeout_clk
;
2934 sdhci_do_reset(host
, SDHCI_RESET_ALL
);
2936 host
->version
= sdhci_readw(host
, SDHCI_HOST_VERSION
);
2937 host
->version
= (host
->version
& SDHCI_SPEC_VER_MASK
)
2938 >> SDHCI_SPEC_VER_SHIFT
;
2939 if (host
->version
> SDHCI_SPEC_300
) {
2940 pr_err("%s: Unknown controller version (%d). "
2941 "You may experience problems.\n", mmc_hostname(mmc
),
2945 caps
[0] = (host
->quirks
& SDHCI_QUIRK_MISSING_CAPS
) ? host
->caps
:
2946 sdhci_readl(host
, SDHCI_CAPABILITIES
);
2948 if (host
->version
>= SDHCI_SPEC_300
)
2949 caps
[1] = (host
->quirks
& SDHCI_QUIRK_MISSING_CAPS
) ?
2951 sdhci_readl(host
, SDHCI_CAPABILITIES_1
);
2953 if (host
->quirks
& SDHCI_QUIRK_FORCE_DMA
)
2954 host
->flags
|= SDHCI_USE_SDMA
;
2955 else if (!(caps
[0] & SDHCI_CAN_DO_SDMA
))
2956 DBG("Controller doesn't have SDMA capability\n");
2958 host
->flags
|= SDHCI_USE_SDMA
;
2960 if ((host
->quirks
& SDHCI_QUIRK_BROKEN_DMA
) &&
2961 (host
->flags
& SDHCI_USE_SDMA
)) {
2962 DBG("Disabling DMA as it is marked broken\n");
2963 host
->flags
&= ~SDHCI_USE_SDMA
;
2966 if ((host
->version
>= SDHCI_SPEC_200
) &&
2967 (caps
[0] & SDHCI_CAN_DO_ADMA2
))
2968 host
->flags
|= SDHCI_USE_ADMA
;
2970 if ((host
->quirks
& SDHCI_QUIRK_BROKEN_ADMA
) &&
2971 (host
->flags
& SDHCI_USE_ADMA
)) {
2972 DBG("Disabling ADMA as it is marked broken\n");
2973 host
->flags
&= ~SDHCI_USE_ADMA
;
2977 * It is assumed that a 64-bit capable device has set a 64-bit DMA mask
2978 * and *must* do 64-bit DMA. A driver has the opportunity to change
2979 * that during the first call to ->enable_dma(). Similarly
2980 * SDHCI_QUIRK2_BROKEN_64_BIT_DMA must be left to the drivers to
2983 if (sdhci_readl(host
, SDHCI_CAPABILITIES
) & SDHCI_CAN_64BIT
)
2984 host
->flags
|= SDHCI_USE_64_BIT_DMA
;
2986 if (host
->flags
& (SDHCI_USE_SDMA
| SDHCI_USE_ADMA
)) {
2987 if (host
->ops
->enable_dma
) {
2988 if (host
->ops
->enable_dma(host
)) {
2989 pr_warn("%s: No suitable DMA available - falling back to PIO\n",
2992 ~(SDHCI_USE_SDMA
| SDHCI_USE_ADMA
);
2997 /* SDMA does not support 64-bit DMA */
2998 if (host
->flags
& SDHCI_USE_64_BIT_DMA
)
2999 host
->flags
&= ~SDHCI_USE_SDMA
;
3001 if (host
->flags
& SDHCI_USE_ADMA
) {
3003 * The DMA descriptor table size is calculated as the maximum
3004 * number of segments times 2, to allow for an alignment
3005 * descriptor for each segment, plus 1 for a nop end descriptor,
3006 * all multipled by the descriptor size.
3008 if (host
->flags
& SDHCI_USE_64_BIT_DMA
) {
3009 host
->adma_table_sz
= (SDHCI_MAX_SEGS
* 2 + 1) *
3010 SDHCI_ADMA2_64_DESC_SZ
;
3011 host
->desc_sz
= SDHCI_ADMA2_64_DESC_SZ
;
3013 host
->adma_table_sz
= (SDHCI_MAX_SEGS
* 2 + 1) *
3014 SDHCI_ADMA2_32_DESC_SZ
;
3015 host
->desc_sz
= SDHCI_ADMA2_32_DESC_SZ
;
3017 host
->adma_table
= dma_alloc_coherent(mmc_dev(mmc
),
3018 host
->adma_table_sz
,
3021 host
->align_buffer_sz
= SDHCI_MAX_SEGS
* SDHCI_ADMA2_ALIGN
;
3022 host
->align_buffer
= kmalloc(host
->align_buffer_sz
, GFP_KERNEL
);
3023 if (!host
->adma_table
|| !host
->align_buffer
) {
3024 if (host
->adma_table
)
3025 dma_free_coherent(mmc_dev(mmc
),
3026 host
->adma_table_sz
,
3029 kfree(host
->align_buffer
);
3030 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
3032 host
->flags
&= ~SDHCI_USE_ADMA
;
3033 host
->adma_table
= NULL
;
3034 host
->align_buffer
= NULL
;
3035 } else if (host
->adma_addr
& (SDHCI_ADMA2_DESC_ALIGN
- 1)) {
3036 pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
3038 host
->flags
&= ~SDHCI_USE_ADMA
;
3039 dma_free_coherent(mmc_dev(mmc
), host
->adma_table_sz
,
3040 host
->adma_table
, host
->adma_addr
);
3041 kfree(host
->align_buffer
);
3042 host
->adma_table
= NULL
;
3043 host
->align_buffer
= NULL
;
3048 * If we use DMA, then it's up to the caller to set the DMA
3049 * mask, but PIO does not need the hw shim so we set a new
3050 * mask here in that case.
3052 if (!(host
->flags
& (SDHCI_USE_SDMA
| SDHCI_USE_ADMA
))) {
3053 host
->dma_mask
= DMA_BIT_MASK(64);
3054 mmc_dev(mmc
)->dma_mask
= &host
->dma_mask
;
3057 if (host
->version
>= SDHCI_SPEC_300
)
3058 host
->max_clk
= (caps
[0] & SDHCI_CLOCK_V3_BASE_MASK
)
3059 >> SDHCI_CLOCK_BASE_SHIFT
;
3061 host
->max_clk
= (caps
[0] & SDHCI_CLOCK_BASE_MASK
)
3062 >> SDHCI_CLOCK_BASE_SHIFT
;
3064 host
->max_clk
*= 1000000;
3065 if (host
->max_clk
== 0 || host
->quirks
&
3066 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN
) {
3067 if (!host
->ops
->get_max_clock
) {
3068 pr_err("%s: Hardware doesn't specify base clock "
3069 "frequency.\n", mmc_hostname(mmc
));
3072 host
->max_clk
= host
->ops
->get_max_clock(host
);
3076 * In case of Host Controller v3.00, find out whether clock
3077 * multiplier is supported.
3079 host
->clk_mul
= (caps
[1] & SDHCI_CLOCK_MUL_MASK
) >>
3080 SDHCI_CLOCK_MUL_SHIFT
;
3083 * In case the value in Clock Multiplier is 0, then programmable
3084 * clock mode is not supported, otherwise the actual clock
3085 * multiplier is one more than the value of Clock Multiplier
3086 * in the Capabilities Register.
3092 * Set host parameters.
3094 max_clk
= host
->max_clk
;
3096 if (host
->ops
->get_min_clock
)
3097 mmc
->f_min
= host
->ops
->get_min_clock(host
);
3098 else if (host
->version
>= SDHCI_SPEC_300
) {
3099 if (host
->clk_mul
) {
3100 mmc
->f_min
= (host
->max_clk
* host
->clk_mul
) / 1024;
3101 max_clk
= host
->max_clk
* host
->clk_mul
;
3103 mmc
->f_min
= host
->max_clk
/ SDHCI_MAX_DIV_SPEC_300
;
3105 mmc
->f_min
= host
->max_clk
/ SDHCI_MAX_DIV_SPEC_200
;
3107 if (!mmc
->f_max
|| (mmc
->f_max
&& (mmc
->f_max
> max_clk
)))
3108 mmc
->f_max
= max_clk
;
3110 if (!(host
->quirks
& SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK
)) {
3111 host
->timeout_clk
= (caps
[0] & SDHCI_TIMEOUT_CLK_MASK
) >>
3112 SDHCI_TIMEOUT_CLK_SHIFT
;
3113 if (host
->timeout_clk
== 0) {
3114 if (host
->ops
->get_timeout_clock
) {
3116 host
->ops
->get_timeout_clock(host
);
3118 pr_err("%s: Hardware doesn't specify timeout clock frequency.\n",
3124 if (caps
[0] & SDHCI_TIMEOUT_CLK_UNIT
)
3125 host
->timeout_clk
*= 1000;
3127 if (override_timeout_clk
)
3128 host
->timeout_clk
= override_timeout_clk
;
3130 mmc
->max_busy_timeout
= host
->ops
->get_max_timeout_count
?
3131 host
->ops
->get_max_timeout_count(host
) : 1 << 27;
3132 mmc
->max_busy_timeout
/= host
->timeout_clk
;
3135 mmc
->caps
|= MMC_CAP_SDIO_IRQ
| MMC_CAP_ERASE
| MMC_CAP_CMD23
;
3136 mmc
->caps2
|= MMC_CAP2_SDIO_IRQ_NOTHREAD
;
3138 if (host
->quirks
& SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12
)
3139 host
->flags
|= SDHCI_AUTO_CMD12
;
3141 /* Auto-CMD23 stuff only works in ADMA or PIO. */
3142 if ((host
->version
>= SDHCI_SPEC_300
) &&
3143 ((host
->flags
& SDHCI_USE_ADMA
) ||
3144 !(host
->flags
& SDHCI_USE_SDMA
)) &&
3145 !(host
->quirks2
& SDHCI_QUIRK2_ACMD23_BROKEN
)) {
3146 host
->flags
|= SDHCI_AUTO_CMD23
;
3147 DBG("%s: Auto-CMD23 available\n", mmc_hostname(mmc
));
3149 DBG("%s: Auto-CMD23 unavailable\n", mmc_hostname(mmc
));
3153 * A controller may support 8-bit width, but the board itself
3154 * might not have the pins brought out. Boards that support
3155 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in
3156 * their platform code before calling sdhci_add_host(), and we
3157 * won't assume 8-bit width for hosts without that CAP.
3159 if (!(host
->quirks
& SDHCI_QUIRK_FORCE_1_BIT_DATA
))
3160 mmc
->caps
|= MMC_CAP_4_BIT_DATA
;
3162 if (host
->quirks2
& SDHCI_QUIRK2_HOST_NO_CMD23
)
3163 mmc
->caps
&= ~MMC_CAP_CMD23
;
3165 if (caps
[0] & SDHCI_CAN_DO_HISPD
)
3166 mmc
->caps
|= MMC_CAP_SD_HIGHSPEED
| MMC_CAP_MMC_HIGHSPEED
;
3168 if ((host
->quirks
& SDHCI_QUIRK_BROKEN_CARD_DETECTION
) &&
3169 !(mmc
->caps
& MMC_CAP_NONREMOVABLE
) &&
3170 IS_ERR_VALUE(mmc_gpio_get_cd(host
->mmc
)))
3171 mmc
->caps
|= MMC_CAP_NEEDS_POLL
;
3173 /* If there are external regulators, get them */
3174 if (mmc_regulator_get_supply(mmc
) == -EPROBE_DEFER
)
3175 return -EPROBE_DEFER
;
3177 /* If vqmmc regulator and no 1.8V signalling, then there's no UHS */
3178 if (!IS_ERR(mmc
->supply
.vqmmc
)) {
3179 ret
= regulator_enable(mmc
->supply
.vqmmc
);
3180 if (!regulator_is_supported_voltage(mmc
->supply
.vqmmc
, 1700000,
3182 caps
[1] &= ~(SDHCI_SUPPORT_SDR104
|
3183 SDHCI_SUPPORT_SDR50
|
3184 SDHCI_SUPPORT_DDR50
);
3186 pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
3187 mmc_hostname(mmc
), ret
);
3188 mmc
->supply
.vqmmc
= ERR_PTR(-EINVAL
);
3192 if (host
->quirks2
& SDHCI_QUIRK2_NO_1_8_V
)
3193 caps
[1] &= ~(SDHCI_SUPPORT_SDR104
| SDHCI_SUPPORT_SDR50
|
3194 SDHCI_SUPPORT_DDR50
);
3196 /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
3197 if (caps
[1] & (SDHCI_SUPPORT_SDR104
| SDHCI_SUPPORT_SDR50
|
3198 SDHCI_SUPPORT_DDR50
))
3199 mmc
->caps
|= MMC_CAP_UHS_SDR12
| MMC_CAP_UHS_SDR25
;
3201 /* SDR104 supports also implies SDR50 support */
3202 if (caps
[1] & SDHCI_SUPPORT_SDR104
) {
3203 mmc
->caps
|= MMC_CAP_UHS_SDR104
| MMC_CAP_UHS_SDR50
;
3204 /* SD3.0: SDR104 is supported so (for eMMC) the caps2
3205 * field can be promoted to support HS200.
3207 if (!(host
->quirks2
& SDHCI_QUIRK2_BROKEN_HS200
))
3208 mmc
->caps2
|= MMC_CAP2_HS200
;
3209 } else if (caps
[1] & SDHCI_SUPPORT_SDR50
)
3210 mmc
->caps
|= MMC_CAP_UHS_SDR50
;
3212 if (host
->quirks2
& SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400
&&
3213 (caps
[1] & SDHCI_SUPPORT_HS400
))
3214 mmc
->caps2
|= MMC_CAP2_HS400
;
3216 if ((mmc
->caps2
& MMC_CAP2_HSX00_1_2V
) &&
3217 (IS_ERR(mmc
->supply
.vqmmc
) ||
3218 !regulator_is_supported_voltage(mmc
->supply
.vqmmc
, 1100000,
3220 mmc
->caps2
&= ~MMC_CAP2_HSX00_1_2V
;
3222 if ((caps
[1] & SDHCI_SUPPORT_DDR50
) &&
3223 !(host
->quirks2
& SDHCI_QUIRK2_BROKEN_DDR50
))
3224 mmc
->caps
|= MMC_CAP_UHS_DDR50
;
3226 /* Does the host need tuning for SDR50? */
3227 if (caps
[1] & SDHCI_USE_SDR50_TUNING
)
3228 host
->flags
|= SDHCI_SDR50_NEEDS_TUNING
;
3230 /* Does the host need tuning for SDR104 / HS200? */
3231 if (mmc
->caps2
& MMC_CAP2_HS200
)
3232 host
->flags
|= SDHCI_SDR104_NEEDS_TUNING
;
3234 /* Driver Type(s) (A, C, D) supported by the host */
3235 if (caps
[1] & SDHCI_DRIVER_TYPE_A
)
3236 mmc
->caps
|= MMC_CAP_DRIVER_TYPE_A
;
3237 if (caps
[1] & SDHCI_DRIVER_TYPE_C
)
3238 mmc
->caps
|= MMC_CAP_DRIVER_TYPE_C
;
3239 if (caps
[1] & SDHCI_DRIVER_TYPE_D
)
3240 mmc
->caps
|= MMC_CAP_DRIVER_TYPE_D
;
3242 /* Initial value for re-tuning timer count */
3243 host
->tuning_count
= (caps
[1] & SDHCI_RETUNING_TIMER_COUNT_MASK
) >>
3244 SDHCI_RETUNING_TIMER_COUNT_SHIFT
;
3247 * In case Re-tuning Timer is not disabled, the actual value of
3248 * re-tuning timer will be 2 ^ (n - 1).
3250 if (host
->tuning_count
)
3251 host
->tuning_count
= 1 << (host
->tuning_count
- 1);
3253 /* Re-tuning mode supported by the Host Controller */
3254 host
->tuning_mode
= (caps
[1] & SDHCI_RETUNING_MODE_MASK
) >>
3255 SDHCI_RETUNING_MODE_SHIFT
;
3260 * According to SD Host Controller spec v3.00, if the Host System
3261 * can afford more than 150mA, Host Driver should set XPC to 1. Also
3262 * the value is meaningful only if Voltage Support in the Capabilities
3263 * register is set. The actual current value is 4 times the register
3266 max_current_caps
= sdhci_readl(host
, SDHCI_MAX_CURRENT
);
3267 if (!max_current_caps
&& !IS_ERR(mmc
->supply
.vmmc
)) {
3268 int curr
= regulator_get_current_limit(mmc
->supply
.vmmc
);
3271 /* convert to SDHCI_MAX_CURRENT format */
3272 curr
= curr
/1000; /* convert to mA */
3273 curr
= curr
/SDHCI_MAX_CURRENT_MULTIPLIER
;
3275 curr
= min_t(u32
, curr
, SDHCI_MAX_CURRENT_LIMIT
);
3277 (curr
<< SDHCI_MAX_CURRENT_330_SHIFT
) |
3278 (curr
<< SDHCI_MAX_CURRENT_300_SHIFT
) |
3279 (curr
<< SDHCI_MAX_CURRENT_180_SHIFT
);
3283 if (caps
[0] & SDHCI_CAN_VDD_330
) {
3284 ocr_avail
|= MMC_VDD_32_33
| MMC_VDD_33_34
;
3286 mmc
->max_current_330
= ((max_current_caps
&
3287 SDHCI_MAX_CURRENT_330_MASK
) >>
3288 SDHCI_MAX_CURRENT_330_SHIFT
) *
3289 SDHCI_MAX_CURRENT_MULTIPLIER
;
3291 if (caps
[0] & SDHCI_CAN_VDD_300
) {
3292 ocr_avail
|= MMC_VDD_29_30
| MMC_VDD_30_31
;
3294 mmc
->max_current_300
= ((max_current_caps
&
3295 SDHCI_MAX_CURRENT_300_MASK
) >>
3296 SDHCI_MAX_CURRENT_300_SHIFT
) *
3297 SDHCI_MAX_CURRENT_MULTIPLIER
;
3299 if (caps
[0] & SDHCI_CAN_VDD_180
) {
3300 ocr_avail
|= MMC_VDD_165_195
;
3302 mmc
->max_current_180
= ((max_current_caps
&
3303 SDHCI_MAX_CURRENT_180_MASK
) >>
3304 SDHCI_MAX_CURRENT_180_SHIFT
) *
3305 SDHCI_MAX_CURRENT_MULTIPLIER
;
3308 /* If OCR set by host, use it instead. */
3310 ocr_avail
= host
->ocr_mask
;
3312 /* If OCR set by external regulators, give it highest prio. */
3314 ocr_avail
= mmc
->ocr_avail
;
3316 mmc
->ocr_avail
= ocr_avail
;
3317 mmc
->ocr_avail_sdio
= ocr_avail
;
3318 if (host
->ocr_avail_sdio
)
3319 mmc
->ocr_avail_sdio
&= host
->ocr_avail_sdio
;
3320 mmc
->ocr_avail_sd
= ocr_avail
;
3321 if (host
->ocr_avail_sd
)
3322 mmc
->ocr_avail_sd
&= host
->ocr_avail_sd
;
3323 else /* normal SD controllers don't support 1.8V */
3324 mmc
->ocr_avail_sd
&= ~MMC_VDD_165_195
;
3325 mmc
->ocr_avail_mmc
= ocr_avail
;
3326 if (host
->ocr_avail_mmc
)
3327 mmc
->ocr_avail_mmc
&= host
->ocr_avail_mmc
;
3329 if (mmc
->ocr_avail
== 0) {
3330 pr_err("%s: Hardware doesn't report any "
3331 "support voltages.\n", mmc_hostname(mmc
));
3335 spin_lock_init(&host
->lock
);
3338 * Maximum number of segments. Depends on if the hardware
3339 * can do scatter/gather or not.
3341 if (host
->flags
& SDHCI_USE_ADMA
)
3342 mmc
->max_segs
= SDHCI_MAX_SEGS
;
3343 else if (host
->flags
& SDHCI_USE_SDMA
)
3346 mmc
->max_segs
= SDHCI_MAX_SEGS
;
3349 * Maximum number of sectors in one transfer. Limited by SDMA boundary
3350 * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
3353 mmc
->max_req_size
= 524288;
3356 * Maximum segment size. Could be one segment with the maximum number
3357 * of bytes. When doing hardware scatter/gather, each entry cannot
3358 * be larger than 64 KiB though.
3360 if (host
->flags
& SDHCI_USE_ADMA
) {
3361 if (host
->quirks
& SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC
)
3362 mmc
->max_seg_size
= 65535;
3364 mmc
->max_seg_size
= 65536;
3366 mmc
->max_seg_size
= mmc
->max_req_size
;
3370 * Maximum block size. This varies from controller to controller and
3371 * is specified in the capabilities register.
3373 if (host
->quirks
& SDHCI_QUIRK_FORCE_BLK_SZ_2048
) {
3374 mmc
->max_blk_size
= 2;
3376 mmc
->max_blk_size
= (caps
[0] & SDHCI_MAX_BLOCK_MASK
) >>
3377 SDHCI_MAX_BLOCK_SHIFT
;
3378 if (mmc
->max_blk_size
>= 3) {
3379 pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n",
3381 mmc
->max_blk_size
= 0;
3385 mmc
->max_blk_size
= 512 << mmc
->max_blk_size
;
3388 * Maximum block count.
3390 mmc
->max_blk_count
= (host
->quirks
& SDHCI_QUIRK_NO_MULTIBLOCK
) ? 1 : 65535;
3395 tasklet_init(&host
->finish_tasklet
,
3396 sdhci_tasklet_finish
, (unsigned long)host
);
3398 setup_timer(&host
->timer
, sdhci_timeout_timer
, (unsigned long)host
);
3400 init_waitqueue_head(&host
->buf_ready_int
);
3402 sdhci_init(host
, 0);
3404 ret
= request_threaded_irq(host
->irq
, sdhci_irq
, sdhci_thread_irq
,
3405 IRQF_SHARED
, mmc_hostname(mmc
), host
);
3407 pr_err("%s: Failed to request IRQ %d: %d\n",
3408 mmc_hostname(mmc
), host
->irq
, ret
);
3412 #ifdef CONFIG_MMC_DEBUG
3413 sdhci_dumpregs(host
);
3416 #ifdef SDHCI_USE_LEDS_CLASS
3417 snprintf(host
->led_name
, sizeof(host
->led_name
),
3418 "%s::", mmc_hostname(mmc
));
3419 host
->led
.name
= host
->led_name
;
3420 host
->led
.brightness
= LED_OFF
;
3421 host
->led
.default_trigger
= mmc_hostname(mmc
);
3422 host
->led
.brightness_set
= sdhci_led_control
;
3424 ret
= led_classdev_register(mmc_dev(mmc
), &host
->led
);
3426 pr_err("%s: Failed to register LED device: %d\n",
3427 mmc_hostname(mmc
), ret
);
3436 pr_info("%s: SDHCI controller on %s [%s] using %s\n",
3437 mmc_hostname(mmc
), host
->hw_name
, dev_name(mmc_dev(mmc
)),
3438 (host
->flags
& SDHCI_USE_ADMA
) ?
3439 (host
->flags
& SDHCI_USE_64_BIT_DMA
) ? "ADMA 64-bit" : "ADMA" :
3440 (host
->flags
& SDHCI_USE_SDMA
) ? "DMA" : "PIO");
3442 sdhci_enable_card_detection(host
);
3446 #ifdef SDHCI_USE_LEDS_CLASS
3448 sdhci_do_reset(host
, SDHCI_RESET_ALL
);
3449 sdhci_writel(host
, 0, SDHCI_INT_ENABLE
);
3450 sdhci_writel(host
, 0, SDHCI_SIGNAL_ENABLE
);
3451 free_irq(host
->irq
, host
);
3454 tasklet_kill(&host
->finish_tasklet
);
3459 EXPORT_SYMBOL_GPL(sdhci_add_host
);
3461 void sdhci_remove_host(struct sdhci_host
*host
, int dead
)
3463 struct mmc_host
*mmc
= host
->mmc
;
3464 unsigned long flags
;
3467 spin_lock_irqsave(&host
->lock
, flags
);
3469 host
->flags
|= SDHCI_DEVICE_DEAD
;
3472 pr_err("%s: Controller removed during "
3473 " transfer!\n", mmc_hostname(mmc
));
3475 host
->mrq
->cmd
->error
= -ENOMEDIUM
;
3476 tasklet_schedule(&host
->finish_tasklet
);
3479 spin_unlock_irqrestore(&host
->lock
, flags
);
3482 sdhci_disable_card_detection(host
);
3484 mmc_remove_host(mmc
);
3486 #ifdef SDHCI_USE_LEDS_CLASS
3487 led_classdev_unregister(&host
->led
);
3491 sdhci_do_reset(host
, SDHCI_RESET_ALL
);
3493 sdhci_writel(host
, 0, SDHCI_INT_ENABLE
);
3494 sdhci_writel(host
, 0, SDHCI_SIGNAL_ENABLE
);
3495 free_irq(host
->irq
, host
);
3497 del_timer_sync(&host
->timer
);
3499 tasklet_kill(&host
->finish_tasklet
);
3501 if (!IS_ERR(mmc
->supply
.vqmmc
))
3502 regulator_disable(mmc
->supply
.vqmmc
);
3504 if (host
->adma_table
)
3505 dma_free_coherent(mmc_dev(mmc
), host
->adma_table_sz
,
3506 host
->adma_table
, host
->adma_addr
);
3507 kfree(host
->align_buffer
);
3509 host
->adma_table
= NULL
;
3510 host
->align_buffer
= NULL
;
3513 EXPORT_SYMBOL_GPL(sdhci_remove_host
);
3515 void sdhci_free_host(struct sdhci_host
*host
)
3517 mmc_free_host(host
->mmc
);
3520 EXPORT_SYMBOL_GPL(sdhci_free_host
);
3522 /*****************************************************************************\
3524 * Driver init/exit *
3526 \*****************************************************************************/
3528 static int __init
sdhci_drv_init(void)
3531 ": Secure Digital Host Controller Interface driver\n");
3532 pr_info(DRIVER_NAME
": Copyright(c) Pierre Ossman\n");
3537 static void __exit
sdhci_drv_exit(void)
3541 module_init(sdhci_drv_init
);
3542 module_exit(sdhci_drv_exit
);
3544 module_param(debug_quirks
, uint
, 0444);
3545 module_param(debug_quirks2
, uint
, 0444);
3547 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
3548 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
3549 MODULE_LICENSE("GPL");
3551 MODULE_PARM_DESC(debug_quirks
, "Force certain quirks.");
3552 MODULE_PARM_DESC(debug_quirks2
, "Force certain other quirks.");