2 * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
4 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
11 * Thanks to the following companies for their support:
13 * - JMicron (hardware and technical support)
16 #include <linux/delay.h>
17 #include <linux/highmem.h>
19 #include <linux/module.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/slab.h>
22 #include <linux/scatterlist.h>
23 #include <linux/regulator/consumer.h>
24 #include <linux/pm_runtime.h>
26 #include <linux/leds.h>
28 #include <linux/mmc/mmc.h>
29 #include <linux/mmc/host.h>
30 #include <linux/mmc/card.h>
31 #include <linux/mmc/slot-gpio.h>
35 #define DRIVER_NAME "sdhci"
37 #define DBG(f, x...) \
38 pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x)
40 #if defined(CONFIG_LEDS_CLASS) || (defined(CONFIG_LEDS_CLASS_MODULE) && \
41 defined(CONFIG_MMC_SDHCI_MODULE))
42 #define SDHCI_USE_LEDS_CLASS
45 #define MAX_TUNING_LOOP 40
47 static unsigned int debug_quirks
= 0;
48 static unsigned int debug_quirks2
;
50 static void sdhci_finish_data(struct sdhci_host
*);
52 static void sdhci_finish_command(struct sdhci_host
*);
53 static int sdhci_execute_tuning(struct mmc_host
*mmc
, u32 opcode
);
54 static void sdhci_tuning_timer(unsigned long data
);
55 static void sdhci_enable_preset_value(struct sdhci_host
*host
, bool enable
);
58 static int sdhci_runtime_pm_get(struct sdhci_host
*host
);
59 static int sdhci_runtime_pm_put(struct sdhci_host
*host
);
60 static void sdhci_runtime_pm_bus_on(struct sdhci_host
*host
);
61 static void sdhci_runtime_pm_bus_off(struct sdhci_host
*host
);
63 static inline int sdhci_runtime_pm_get(struct sdhci_host
*host
)
67 static inline int sdhci_runtime_pm_put(struct sdhci_host
*host
)
71 static void sdhci_runtime_pm_bus_on(struct sdhci_host
*host
)
74 static void sdhci_runtime_pm_bus_off(struct sdhci_host
*host
)
79 static void sdhci_dumpregs(struct sdhci_host
*host
)
81 pr_debug(DRIVER_NAME
": =========== REGISTER DUMP (%s)===========\n",
82 mmc_hostname(host
->mmc
));
84 pr_debug(DRIVER_NAME
": Sys addr: 0x%08x | Version: 0x%08x\n",
85 sdhci_readl(host
, SDHCI_DMA_ADDRESS
),
86 sdhci_readw(host
, SDHCI_HOST_VERSION
));
87 pr_debug(DRIVER_NAME
": Blk size: 0x%08x | Blk cnt: 0x%08x\n",
88 sdhci_readw(host
, SDHCI_BLOCK_SIZE
),
89 sdhci_readw(host
, SDHCI_BLOCK_COUNT
));
90 pr_debug(DRIVER_NAME
": Argument: 0x%08x | Trn mode: 0x%08x\n",
91 sdhci_readl(host
, SDHCI_ARGUMENT
),
92 sdhci_readw(host
, SDHCI_TRANSFER_MODE
));
93 pr_debug(DRIVER_NAME
": Present: 0x%08x | Host ctl: 0x%08x\n",
94 sdhci_readl(host
, SDHCI_PRESENT_STATE
),
95 sdhci_readb(host
, SDHCI_HOST_CONTROL
));
96 pr_debug(DRIVER_NAME
": Power: 0x%08x | Blk gap: 0x%08x\n",
97 sdhci_readb(host
, SDHCI_POWER_CONTROL
),
98 sdhci_readb(host
, SDHCI_BLOCK_GAP_CONTROL
));
99 pr_debug(DRIVER_NAME
": Wake-up: 0x%08x | Clock: 0x%08x\n",
100 sdhci_readb(host
, SDHCI_WAKE_UP_CONTROL
),
101 sdhci_readw(host
, SDHCI_CLOCK_CONTROL
));
102 pr_debug(DRIVER_NAME
": Timeout: 0x%08x | Int stat: 0x%08x\n",
103 sdhci_readb(host
, SDHCI_TIMEOUT_CONTROL
),
104 sdhci_readl(host
, SDHCI_INT_STATUS
));
105 pr_debug(DRIVER_NAME
": Int enab: 0x%08x | Sig enab: 0x%08x\n",
106 sdhci_readl(host
, SDHCI_INT_ENABLE
),
107 sdhci_readl(host
, SDHCI_SIGNAL_ENABLE
));
108 pr_debug(DRIVER_NAME
": AC12 err: 0x%08x | Slot int: 0x%08x\n",
109 sdhci_readw(host
, SDHCI_ACMD12_ERR
),
110 sdhci_readw(host
, SDHCI_SLOT_INT_STATUS
));
111 pr_debug(DRIVER_NAME
": Caps: 0x%08x | Caps_1: 0x%08x\n",
112 sdhci_readl(host
, SDHCI_CAPABILITIES
),
113 sdhci_readl(host
, SDHCI_CAPABILITIES_1
));
114 pr_debug(DRIVER_NAME
": Cmd: 0x%08x | Max curr: 0x%08x\n",
115 sdhci_readw(host
, SDHCI_COMMAND
),
116 sdhci_readl(host
, SDHCI_MAX_CURRENT
));
117 pr_debug(DRIVER_NAME
": Host ctl2: 0x%08x\n",
118 sdhci_readw(host
, SDHCI_HOST_CONTROL2
));
120 if (host
->flags
& SDHCI_USE_ADMA
) {
121 if (host
->flags
& SDHCI_USE_64_BIT_DMA
)
122 pr_debug(DRIVER_NAME
": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n",
123 readl(host
->ioaddr
+ SDHCI_ADMA_ERROR
),
124 readl(host
->ioaddr
+ SDHCI_ADMA_ADDRESS_HI
),
125 readl(host
->ioaddr
+ SDHCI_ADMA_ADDRESS
));
127 pr_debug(DRIVER_NAME
": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
128 readl(host
->ioaddr
+ SDHCI_ADMA_ERROR
),
129 readl(host
->ioaddr
+ SDHCI_ADMA_ADDRESS
));
132 pr_debug(DRIVER_NAME
": ===========================================\n");
135 /*****************************************************************************\
137 * Low level functions *
139 \*****************************************************************************/
141 static void sdhci_set_card_detection(struct sdhci_host
*host
, bool enable
)
145 if ((host
->quirks
& SDHCI_QUIRK_BROKEN_CARD_DETECTION
) ||
146 (host
->mmc
->caps
& MMC_CAP_NONREMOVABLE
))
150 present
= sdhci_readl(host
, SDHCI_PRESENT_STATE
) &
153 host
->ier
|= present
? SDHCI_INT_CARD_REMOVE
:
154 SDHCI_INT_CARD_INSERT
;
156 host
->ier
&= ~(SDHCI_INT_CARD_REMOVE
| SDHCI_INT_CARD_INSERT
);
159 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
160 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
163 static void sdhci_enable_card_detection(struct sdhci_host
*host
)
165 sdhci_set_card_detection(host
, true);
168 static void sdhci_disable_card_detection(struct sdhci_host
*host
)
170 sdhci_set_card_detection(host
, false);
173 void sdhci_reset(struct sdhci_host
*host
, u8 mask
)
175 unsigned long timeout
;
177 sdhci_writeb(host
, mask
, SDHCI_SOFTWARE_RESET
);
179 if (mask
& SDHCI_RESET_ALL
) {
181 /* Reset-all turns off SD Bus Power */
182 if (host
->quirks2
& SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON
)
183 sdhci_runtime_pm_bus_off(host
);
186 /* Wait max 100 ms */
189 /* hw clears the bit when it's done */
190 while (sdhci_readb(host
, SDHCI_SOFTWARE_RESET
) & mask
) {
192 pr_err("%s: Reset 0x%x never completed.\n",
193 mmc_hostname(host
->mmc
), (int)mask
);
194 sdhci_dumpregs(host
);
201 EXPORT_SYMBOL_GPL(sdhci_reset
);
203 static void sdhci_do_reset(struct sdhci_host
*host
, u8 mask
)
205 if (host
->quirks
& SDHCI_QUIRK_NO_CARD_NO_RESET
) {
206 if (!(sdhci_readl(host
, SDHCI_PRESENT_STATE
) &
211 host
->ops
->reset(host
, mask
);
213 if (mask
& SDHCI_RESET_ALL
) {
214 if (host
->flags
& (SDHCI_USE_SDMA
| SDHCI_USE_ADMA
)) {
215 if (host
->ops
->enable_dma
)
216 host
->ops
->enable_dma(host
);
219 /* Resetting the controller clears many */
220 host
->preset_enabled
= false;
224 static void sdhci_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
);
226 static void sdhci_init(struct sdhci_host
*host
, int soft
)
229 sdhci_do_reset(host
, SDHCI_RESET_CMD
|SDHCI_RESET_DATA
);
231 sdhci_do_reset(host
, SDHCI_RESET_ALL
);
233 host
->ier
= SDHCI_INT_BUS_POWER
| SDHCI_INT_DATA_END_BIT
|
234 SDHCI_INT_DATA_CRC
| SDHCI_INT_DATA_TIMEOUT
|
235 SDHCI_INT_INDEX
| SDHCI_INT_END_BIT
| SDHCI_INT_CRC
|
236 SDHCI_INT_TIMEOUT
| SDHCI_INT_DATA_END
|
239 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
240 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
243 /* force clock reconfiguration */
245 sdhci_set_ios(host
->mmc
, &host
->mmc
->ios
);
249 static void sdhci_reinit(struct sdhci_host
*host
)
253 * Retuning stuffs are affected by different cards inserted and only
254 * applicable to UHS-I cards. So reset these fields to their initial
255 * value when card is removed.
257 if (host
->flags
& SDHCI_USING_RETUNING_TIMER
) {
258 host
->flags
&= ~SDHCI_USING_RETUNING_TIMER
;
260 del_timer_sync(&host
->tuning_timer
);
261 host
->flags
&= ~SDHCI_NEEDS_RETUNING
;
262 host
->mmc
->max_blk_count
=
263 (host
->quirks
& SDHCI_QUIRK_NO_MULTIBLOCK
) ? 1 : 65535;
265 sdhci_enable_card_detection(host
);
268 static void sdhci_activate_led(struct sdhci_host
*host
)
272 ctrl
= sdhci_readb(host
, SDHCI_HOST_CONTROL
);
273 ctrl
|= SDHCI_CTRL_LED
;
274 sdhci_writeb(host
, ctrl
, SDHCI_HOST_CONTROL
);
277 static void sdhci_deactivate_led(struct sdhci_host
*host
)
281 ctrl
= sdhci_readb(host
, SDHCI_HOST_CONTROL
);
282 ctrl
&= ~SDHCI_CTRL_LED
;
283 sdhci_writeb(host
, ctrl
, SDHCI_HOST_CONTROL
);
286 #ifdef SDHCI_USE_LEDS_CLASS
287 static void sdhci_led_control(struct led_classdev
*led
,
288 enum led_brightness brightness
)
290 struct sdhci_host
*host
= container_of(led
, struct sdhci_host
, led
);
293 spin_lock_irqsave(&host
->lock
, flags
);
295 if (host
->runtime_suspended
)
298 if (brightness
== LED_OFF
)
299 sdhci_deactivate_led(host
);
301 sdhci_activate_led(host
);
303 spin_unlock_irqrestore(&host
->lock
, flags
);
307 /*****************************************************************************\
311 \*****************************************************************************/
313 static void sdhci_read_block_pio(struct sdhci_host
*host
)
316 size_t blksize
, len
, chunk
;
317 u32
uninitialized_var(scratch
);
320 DBG("PIO reading\n");
322 blksize
= host
->data
->blksz
;
325 local_irq_save(flags
);
328 if (!sg_miter_next(&host
->sg_miter
))
331 len
= min(host
->sg_miter
.length
, blksize
);
334 host
->sg_miter
.consumed
= len
;
336 buf
= host
->sg_miter
.addr
;
340 scratch
= sdhci_readl(host
, SDHCI_BUFFER
);
344 *buf
= scratch
& 0xFF;
353 sg_miter_stop(&host
->sg_miter
);
355 local_irq_restore(flags
);
358 static void sdhci_write_block_pio(struct sdhci_host
*host
)
361 size_t blksize
, len
, chunk
;
365 DBG("PIO writing\n");
367 blksize
= host
->data
->blksz
;
371 local_irq_save(flags
);
374 if (!sg_miter_next(&host
->sg_miter
))
377 len
= min(host
->sg_miter
.length
, blksize
);
380 host
->sg_miter
.consumed
= len
;
382 buf
= host
->sg_miter
.addr
;
385 scratch
|= (u32
)*buf
<< (chunk
* 8);
391 if ((chunk
== 4) || ((len
== 0) && (blksize
== 0))) {
392 sdhci_writel(host
, scratch
, SDHCI_BUFFER
);
399 sg_miter_stop(&host
->sg_miter
);
401 local_irq_restore(flags
);
404 static void sdhci_transfer_pio(struct sdhci_host
*host
)
410 if (host
->blocks
== 0)
413 if (host
->data
->flags
& MMC_DATA_READ
)
414 mask
= SDHCI_DATA_AVAILABLE
;
416 mask
= SDHCI_SPACE_AVAILABLE
;
419 * Some controllers (JMicron JMB38x) mess up the buffer bits
420 * for transfers < 4 bytes. As long as it is just one block,
421 * we can ignore the bits.
423 if ((host
->quirks
& SDHCI_QUIRK_BROKEN_SMALL_PIO
) &&
424 (host
->data
->blocks
== 1))
427 while (sdhci_readl(host
, SDHCI_PRESENT_STATE
) & mask
) {
428 if (host
->quirks
& SDHCI_QUIRK_PIO_NEEDS_DELAY
)
431 if (host
->data
->flags
& MMC_DATA_READ
)
432 sdhci_read_block_pio(host
);
434 sdhci_write_block_pio(host
);
437 if (host
->blocks
== 0)
441 DBG("PIO transfer complete.\n");
444 static char *sdhci_kmap_atomic(struct scatterlist
*sg
, unsigned long *flags
)
446 local_irq_save(*flags
);
447 return kmap_atomic(sg_page(sg
)) + sg
->offset
;
450 static void sdhci_kunmap_atomic(void *buffer
, unsigned long *flags
)
452 kunmap_atomic(buffer
);
453 local_irq_restore(*flags
);
456 static void sdhci_adma_write_desc(struct sdhci_host
*host
, void *desc
,
457 dma_addr_t addr
, int len
, unsigned cmd
)
459 struct sdhci_adma2_64_desc
*dma_desc
= desc
;
461 /* 32-bit and 64-bit descriptors have these members in same position */
462 dma_desc
->cmd
= cpu_to_le16(cmd
);
463 dma_desc
->len
= cpu_to_le16(len
);
464 dma_desc
->addr_lo
= cpu_to_le32((u32
)addr
);
466 if (host
->flags
& SDHCI_USE_64_BIT_DMA
)
467 dma_desc
->addr_hi
= cpu_to_le32((u64
)addr
>> 32);
470 static void sdhci_adma_mark_end(void *desc
)
472 struct sdhci_adma2_64_desc
*dma_desc
= desc
;
474 /* 32-bit and 64-bit descriptors have 'cmd' in same position */
475 dma_desc
->cmd
|= cpu_to_le16(ADMA2_END
);
478 static int sdhci_adma_table_pre(struct sdhci_host
*host
,
479 struct mmc_data
*data
)
486 dma_addr_t align_addr
;
489 struct scatterlist
*sg
;
495 * The spec does not specify endianness of descriptor table.
496 * We currently guess that it is LE.
499 if (data
->flags
& MMC_DATA_READ
)
500 direction
= DMA_FROM_DEVICE
;
502 direction
= DMA_TO_DEVICE
;
504 host
->align_addr
= dma_map_single(mmc_dev(host
->mmc
),
505 host
->align_buffer
, host
->align_buffer_sz
, direction
);
506 if (dma_mapping_error(mmc_dev(host
->mmc
), host
->align_addr
))
508 BUG_ON(host
->align_addr
& host
->align_mask
);
510 host
->sg_count
= dma_map_sg(mmc_dev(host
->mmc
),
511 data
->sg
, data
->sg_len
, direction
);
512 if (host
->sg_count
== 0)
515 desc
= host
->adma_table
;
516 align
= host
->align_buffer
;
518 align_addr
= host
->align_addr
;
520 for_each_sg(data
->sg
, sg
, host
->sg_count
, i
) {
521 addr
= sg_dma_address(sg
);
522 len
= sg_dma_len(sg
);
525 * The SDHCI specification states that ADMA
526 * addresses must be 32-bit aligned. If they
527 * aren't, then we use a bounce buffer for
528 * the (up to three) bytes that screw up the
531 offset
= (host
->align_sz
- (addr
& host
->align_mask
)) &
534 if (data
->flags
& MMC_DATA_WRITE
) {
535 buffer
= sdhci_kmap_atomic(sg
, &flags
);
536 WARN_ON(((long)buffer
& (PAGE_SIZE
- 1)) >
537 (PAGE_SIZE
- offset
));
538 memcpy(align
, buffer
, offset
);
539 sdhci_kunmap_atomic(buffer
, &flags
);
543 sdhci_adma_write_desc(host
, desc
, align_addr
, offset
,
546 BUG_ON(offset
> 65536);
548 align
+= host
->align_sz
;
549 align_addr
+= host
->align_sz
;
551 desc
+= host
->desc_sz
;
560 sdhci_adma_write_desc(host
, desc
, addr
, len
, ADMA2_TRAN_VALID
);
561 desc
+= host
->desc_sz
;
564 * If this triggers then we have a calculation bug
567 WARN_ON((desc
- host
->adma_table
) >= host
->adma_table_sz
);
570 if (host
->quirks
& SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC
) {
572 * Mark the last descriptor as the terminating descriptor
574 if (desc
!= host
->adma_table
) {
575 desc
-= host
->desc_sz
;
576 sdhci_adma_mark_end(desc
);
580 * Add a terminating entry.
583 /* nop, end, valid */
584 sdhci_adma_write_desc(host
, desc
, 0, 0, ADMA2_NOP_END_VALID
);
588 * Resync align buffer as we might have changed it.
590 if (data
->flags
& MMC_DATA_WRITE
) {
591 dma_sync_single_for_device(mmc_dev(host
->mmc
),
592 host
->align_addr
, host
->align_buffer_sz
, direction
);
598 dma_unmap_single(mmc_dev(host
->mmc
), host
->align_addr
,
599 host
->align_buffer_sz
, direction
);
604 static void sdhci_adma_table_post(struct sdhci_host
*host
,
605 struct mmc_data
*data
)
609 struct scatterlist
*sg
;
616 if (data
->flags
& MMC_DATA_READ
)
617 direction
= DMA_FROM_DEVICE
;
619 direction
= DMA_TO_DEVICE
;
621 dma_unmap_single(mmc_dev(host
->mmc
), host
->align_addr
,
622 host
->align_buffer_sz
, direction
);
624 /* Do a quick scan of the SG list for any unaligned mappings */
625 has_unaligned
= false;
626 for_each_sg(data
->sg
, sg
, host
->sg_count
, i
)
627 if (sg_dma_address(sg
) & host
->align_mask
) {
628 has_unaligned
= true;
632 if (has_unaligned
&& data
->flags
& MMC_DATA_READ
) {
633 dma_sync_sg_for_cpu(mmc_dev(host
->mmc
), data
->sg
,
634 data
->sg_len
, direction
);
636 align
= host
->align_buffer
;
638 for_each_sg(data
->sg
, sg
, host
->sg_count
, i
) {
639 if (sg_dma_address(sg
) & host
->align_mask
) {
640 size
= host
->align_sz
-
641 (sg_dma_address(sg
) & host
->align_mask
);
643 buffer
= sdhci_kmap_atomic(sg
, &flags
);
644 WARN_ON(((long)buffer
& (PAGE_SIZE
- 1)) >
646 memcpy(buffer
, align
, size
);
647 sdhci_kunmap_atomic(buffer
, &flags
);
649 align
+= host
->align_sz
;
654 dma_unmap_sg(mmc_dev(host
->mmc
), data
->sg
,
655 data
->sg_len
, direction
);
658 static u8
sdhci_calc_timeout(struct sdhci_host
*host
, struct mmc_command
*cmd
)
661 struct mmc_data
*data
= cmd
->data
;
662 unsigned target_timeout
, current_timeout
;
665 * If the host controller provides us with an incorrect timeout
666 * value, just skip the check and use 0xE. The hardware may take
667 * longer to time out, but that's much better than having a too-short
670 if (host
->quirks
& SDHCI_QUIRK_BROKEN_TIMEOUT_VAL
)
673 /* Unspecified timeout, assume max */
674 if (!data
&& !cmd
->busy_timeout
)
679 target_timeout
= cmd
->busy_timeout
* 1000;
681 target_timeout
= data
->timeout_ns
/ 1000;
683 target_timeout
+= data
->timeout_clks
/ host
->clock
;
687 * Figure out needed cycles.
688 * We do this in steps in order to fit inside a 32 bit int.
689 * The first step is the minimum timeout, which will have a
690 * minimum resolution of 6 bits:
691 * (1) 2^13*1000 > 2^22,
692 * (2) host->timeout_clk < 2^16
697 current_timeout
= (1 << 13) * 1000 / host
->timeout_clk
;
698 while (current_timeout
< target_timeout
) {
700 current_timeout
<<= 1;
706 DBG("%s: Too large timeout 0x%x requested for CMD%d!\n",
707 mmc_hostname(host
->mmc
), count
, cmd
->opcode
);
714 static void sdhci_set_transfer_irqs(struct sdhci_host
*host
)
716 u32 pio_irqs
= SDHCI_INT_DATA_AVAIL
| SDHCI_INT_SPACE_AVAIL
;
717 u32 dma_irqs
= SDHCI_INT_DMA_END
| SDHCI_INT_ADMA_ERROR
;
719 if (host
->flags
& SDHCI_REQ_USE_DMA
)
720 host
->ier
= (host
->ier
& ~pio_irqs
) | dma_irqs
;
722 host
->ier
= (host
->ier
& ~dma_irqs
) | pio_irqs
;
724 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
725 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
728 static void sdhci_set_timeout(struct sdhci_host
*host
, struct mmc_command
*cmd
)
732 if (host
->ops
->set_timeout
) {
733 host
->ops
->set_timeout(host
, cmd
);
735 count
= sdhci_calc_timeout(host
, cmd
);
736 sdhci_writeb(host
, count
, SDHCI_TIMEOUT_CONTROL
);
740 static void sdhci_prepare_data(struct sdhci_host
*host
, struct mmc_command
*cmd
)
743 struct mmc_data
*data
= cmd
->data
;
748 if (data
|| (cmd
->flags
& MMC_RSP_BUSY
))
749 sdhci_set_timeout(host
, cmd
);
755 BUG_ON(data
->blksz
* data
->blocks
> 524288);
756 BUG_ON(data
->blksz
> host
->mmc
->max_blk_size
);
757 BUG_ON(data
->blocks
> 65535);
760 host
->data_early
= 0;
761 host
->data
->bytes_xfered
= 0;
763 if (host
->flags
& (SDHCI_USE_SDMA
| SDHCI_USE_ADMA
))
764 host
->flags
|= SDHCI_REQ_USE_DMA
;
767 * FIXME: This doesn't account for merging when mapping the
770 if (host
->flags
& SDHCI_REQ_USE_DMA
) {
772 struct scatterlist
*sg
;
775 if (host
->flags
& SDHCI_USE_ADMA
) {
776 if (host
->quirks
& SDHCI_QUIRK_32BIT_ADMA_SIZE
)
779 if (host
->quirks
& SDHCI_QUIRK_32BIT_DMA_SIZE
)
783 if (unlikely(broken
)) {
784 for_each_sg(data
->sg
, sg
, data
->sg_len
, i
) {
785 if (sg
->length
& 0x3) {
786 DBG("Reverting to PIO because of "
787 "transfer size (%d)\n",
789 host
->flags
&= ~SDHCI_REQ_USE_DMA
;
797 * The assumption here being that alignment is the same after
798 * translation to device address space.
800 if (host
->flags
& SDHCI_REQ_USE_DMA
) {
802 struct scatterlist
*sg
;
805 if (host
->flags
& SDHCI_USE_ADMA
) {
807 * As we use 3 byte chunks to work around
808 * alignment problems, we need to check this
811 if (host
->quirks
& SDHCI_QUIRK_32BIT_ADMA_SIZE
)
814 if (host
->quirks
& SDHCI_QUIRK_32BIT_DMA_ADDR
)
818 if (unlikely(broken
)) {
819 for_each_sg(data
->sg
, sg
, data
->sg_len
, i
) {
820 if (sg
->offset
& 0x3) {
821 DBG("Reverting to PIO because of "
823 host
->flags
&= ~SDHCI_REQ_USE_DMA
;
830 if (host
->flags
& SDHCI_REQ_USE_DMA
) {
831 if (host
->flags
& SDHCI_USE_ADMA
) {
832 ret
= sdhci_adma_table_pre(host
, data
);
835 * This only happens when someone fed
836 * us an invalid request.
839 host
->flags
&= ~SDHCI_REQ_USE_DMA
;
841 sdhci_writel(host
, host
->adma_addr
,
843 if (host
->flags
& SDHCI_USE_64_BIT_DMA
)
845 (u64
)host
->adma_addr
>> 32,
846 SDHCI_ADMA_ADDRESS_HI
);
851 sg_cnt
= dma_map_sg(mmc_dev(host
->mmc
),
852 data
->sg
, data
->sg_len
,
853 (data
->flags
& MMC_DATA_READ
) ?
858 * This only happens when someone fed
859 * us an invalid request.
862 host
->flags
&= ~SDHCI_REQ_USE_DMA
;
864 WARN_ON(sg_cnt
!= 1);
865 sdhci_writel(host
, sg_dma_address(data
->sg
),
872 * Always adjust the DMA selection as some controllers
873 * (e.g. JMicron) can't do PIO properly when the selection
876 if (host
->version
>= SDHCI_SPEC_200
) {
877 ctrl
= sdhci_readb(host
, SDHCI_HOST_CONTROL
);
878 ctrl
&= ~SDHCI_CTRL_DMA_MASK
;
879 if ((host
->flags
& SDHCI_REQ_USE_DMA
) &&
880 (host
->flags
& SDHCI_USE_ADMA
)) {
881 if (host
->flags
& SDHCI_USE_64_BIT_DMA
)
882 ctrl
|= SDHCI_CTRL_ADMA64
;
884 ctrl
|= SDHCI_CTRL_ADMA32
;
886 ctrl
|= SDHCI_CTRL_SDMA
;
888 sdhci_writeb(host
, ctrl
, SDHCI_HOST_CONTROL
);
891 if (!(host
->flags
& SDHCI_REQ_USE_DMA
)) {
894 flags
= SG_MITER_ATOMIC
;
895 if (host
->data
->flags
& MMC_DATA_READ
)
896 flags
|= SG_MITER_TO_SG
;
898 flags
|= SG_MITER_FROM_SG
;
899 sg_miter_start(&host
->sg_miter
, data
->sg
, data
->sg_len
, flags
);
900 host
->blocks
= data
->blocks
;
903 sdhci_set_transfer_irqs(host
);
905 /* Set the DMA boundary value and block size */
906 sdhci_writew(host
, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG
,
907 data
->blksz
), SDHCI_BLOCK_SIZE
);
908 sdhci_writew(host
, data
->blocks
, SDHCI_BLOCK_COUNT
);
911 static void sdhci_set_transfer_mode(struct sdhci_host
*host
,
912 struct mmc_command
*cmd
)
915 struct mmc_data
*data
= cmd
->data
;
919 SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD
) {
920 sdhci_writew(host
, 0x0, SDHCI_TRANSFER_MODE
);
922 /* clear Auto CMD settings for no data CMDs */
923 mode
= sdhci_readw(host
, SDHCI_TRANSFER_MODE
);
924 sdhci_writew(host
, mode
& ~(SDHCI_TRNS_AUTO_CMD12
|
925 SDHCI_TRNS_AUTO_CMD23
), SDHCI_TRANSFER_MODE
);
930 WARN_ON(!host
->data
);
932 mode
= SDHCI_TRNS_BLK_CNT_EN
;
933 if (mmc_op_multi(cmd
->opcode
) || data
->blocks
> 1) {
934 mode
|= SDHCI_TRNS_MULTI
;
936 * If we are sending CMD23, CMD12 never gets sent
937 * on successful completion (so no Auto-CMD12).
939 if (!host
->mrq
->sbc
&& (host
->flags
& SDHCI_AUTO_CMD12
))
940 mode
|= SDHCI_TRNS_AUTO_CMD12
;
941 else if (host
->mrq
->sbc
&& (host
->flags
& SDHCI_AUTO_CMD23
)) {
942 mode
|= SDHCI_TRNS_AUTO_CMD23
;
943 sdhci_writel(host
, host
->mrq
->sbc
->arg
, SDHCI_ARGUMENT2
);
947 if (data
->flags
& MMC_DATA_READ
)
948 mode
|= SDHCI_TRNS_READ
;
949 if (host
->flags
& SDHCI_REQ_USE_DMA
)
950 mode
|= SDHCI_TRNS_DMA
;
952 sdhci_writew(host
, mode
, SDHCI_TRANSFER_MODE
);
955 static void sdhci_finish_data(struct sdhci_host
*host
)
957 struct mmc_data
*data
;
964 if (host
->flags
& SDHCI_REQ_USE_DMA
) {
965 if (host
->flags
& SDHCI_USE_ADMA
)
966 sdhci_adma_table_post(host
, data
);
968 dma_unmap_sg(mmc_dev(host
->mmc
), data
->sg
,
969 data
->sg_len
, (data
->flags
& MMC_DATA_READ
) ?
970 DMA_FROM_DEVICE
: DMA_TO_DEVICE
);
975 * The specification states that the block count register must
976 * be updated, but it does not specify at what point in the
977 * data flow. That makes the register entirely useless to read
978 * back so we have to assume that nothing made it to the card
979 * in the event of an error.
982 data
->bytes_xfered
= 0;
984 data
->bytes_xfered
= data
->blksz
* data
->blocks
;
987 * Need to send CMD12 if -
988 * a) open-ended multiblock transfer (no CMD23)
989 * b) error in multiblock transfer
996 * The controller needs a reset of internal state machines
997 * upon error conditions.
1000 sdhci_do_reset(host
, SDHCI_RESET_CMD
);
1001 sdhci_do_reset(host
, SDHCI_RESET_DATA
);
1004 sdhci_send_command(host
, data
->stop
);
1006 tasklet_schedule(&host
->finish_tasklet
);
1009 void sdhci_send_command(struct sdhci_host
*host
, struct mmc_command
*cmd
)
1013 unsigned long timeout
;
1017 /* Wait max 10 ms */
1020 mask
= SDHCI_CMD_INHIBIT
;
1021 if ((cmd
->data
!= NULL
) || (cmd
->flags
& MMC_RSP_BUSY
))
1022 mask
|= SDHCI_DATA_INHIBIT
;
1024 /* We shouldn't wait for data inihibit for stop commands, even
1025 though they might use busy signaling */
1026 if (host
->mrq
->data
&& (cmd
== host
->mrq
->data
->stop
))
1027 mask
&= ~SDHCI_DATA_INHIBIT
;
1029 while (sdhci_readl(host
, SDHCI_PRESENT_STATE
) & mask
) {
1031 pr_err("%s: Controller never released "
1032 "inhibit bit(s).\n", mmc_hostname(host
->mmc
));
1033 sdhci_dumpregs(host
);
1035 tasklet_schedule(&host
->finish_tasklet
);
1043 if (!cmd
->data
&& cmd
->busy_timeout
> 9000)
1044 timeout
+= DIV_ROUND_UP(cmd
->busy_timeout
, 1000) * HZ
+ HZ
;
1047 mod_timer(&host
->timer
, timeout
);
1050 host
->busy_handle
= 0;
1052 sdhci_prepare_data(host
, cmd
);
1054 sdhci_writel(host
, cmd
->arg
, SDHCI_ARGUMENT
);
1056 sdhci_set_transfer_mode(host
, cmd
);
1058 if ((cmd
->flags
& MMC_RSP_136
) && (cmd
->flags
& MMC_RSP_BUSY
)) {
1059 pr_err("%s: Unsupported response type!\n",
1060 mmc_hostname(host
->mmc
));
1061 cmd
->error
= -EINVAL
;
1062 tasklet_schedule(&host
->finish_tasklet
);
1066 if (!(cmd
->flags
& MMC_RSP_PRESENT
))
1067 flags
= SDHCI_CMD_RESP_NONE
;
1068 else if (cmd
->flags
& MMC_RSP_136
)
1069 flags
= SDHCI_CMD_RESP_LONG
;
1070 else if (cmd
->flags
& MMC_RSP_BUSY
)
1071 flags
= SDHCI_CMD_RESP_SHORT_BUSY
;
1073 flags
= SDHCI_CMD_RESP_SHORT
;
1075 if (cmd
->flags
& MMC_RSP_CRC
)
1076 flags
|= SDHCI_CMD_CRC
;
1077 if (cmd
->flags
& MMC_RSP_OPCODE
)
1078 flags
|= SDHCI_CMD_INDEX
;
1080 /* CMD19 is special in that the Data Present Select should be set */
1081 if (cmd
->data
|| cmd
->opcode
== MMC_SEND_TUNING_BLOCK
||
1082 cmd
->opcode
== MMC_SEND_TUNING_BLOCK_HS200
)
1083 flags
|= SDHCI_CMD_DATA
;
1085 sdhci_writew(host
, SDHCI_MAKE_CMD(cmd
->opcode
, flags
), SDHCI_COMMAND
);
1087 EXPORT_SYMBOL_GPL(sdhci_send_command
);
1089 static void sdhci_finish_command(struct sdhci_host
*host
)
1093 BUG_ON(host
->cmd
== NULL
);
1095 if (host
->cmd
->flags
& MMC_RSP_PRESENT
) {
1096 if (host
->cmd
->flags
& MMC_RSP_136
) {
1097 /* CRC is stripped so we need to do some shifting. */
1098 for (i
= 0;i
< 4;i
++) {
1099 host
->cmd
->resp
[i
] = sdhci_readl(host
,
1100 SDHCI_RESPONSE
+ (3-i
)*4) << 8;
1102 host
->cmd
->resp
[i
] |=
1104 SDHCI_RESPONSE
+ (3-i
)*4-1);
1107 host
->cmd
->resp
[0] = sdhci_readl(host
, SDHCI_RESPONSE
);
1111 host
->cmd
->error
= 0;
1113 /* Finished CMD23, now send actual command. */
1114 if (host
->cmd
== host
->mrq
->sbc
) {
1116 sdhci_send_command(host
, host
->mrq
->cmd
);
1119 /* Processed actual command. */
1120 if (host
->data
&& host
->data_early
)
1121 sdhci_finish_data(host
);
1123 if (!host
->cmd
->data
)
1124 tasklet_schedule(&host
->finish_tasklet
);
1130 static u16
sdhci_get_preset_value(struct sdhci_host
*host
)
1134 switch (host
->timing
) {
1135 case MMC_TIMING_UHS_SDR12
:
1136 preset
= sdhci_readw(host
, SDHCI_PRESET_FOR_SDR12
);
1138 case MMC_TIMING_UHS_SDR25
:
1139 preset
= sdhci_readw(host
, SDHCI_PRESET_FOR_SDR25
);
1141 case MMC_TIMING_UHS_SDR50
:
1142 preset
= sdhci_readw(host
, SDHCI_PRESET_FOR_SDR50
);
1144 case MMC_TIMING_UHS_SDR104
:
1145 case MMC_TIMING_MMC_HS200
:
1146 preset
= sdhci_readw(host
, SDHCI_PRESET_FOR_SDR104
);
1148 case MMC_TIMING_UHS_DDR50
:
1149 preset
= sdhci_readw(host
, SDHCI_PRESET_FOR_DDR50
);
1151 case MMC_TIMING_MMC_HS400
:
1152 preset
= sdhci_readw(host
, SDHCI_PRESET_FOR_HS400
);
1155 pr_warn("%s: Invalid UHS-I mode selected\n",
1156 mmc_hostname(host
->mmc
));
1157 preset
= sdhci_readw(host
, SDHCI_PRESET_FOR_SDR12
);
1163 void sdhci_set_clock(struct sdhci_host
*host
, unsigned int clock
)
1165 int div
= 0; /* Initialized for compiler warning */
1166 int real_div
= div
, clk_mul
= 1;
1168 unsigned long timeout
;
1170 host
->mmc
->actual_clock
= 0;
1172 sdhci_writew(host
, 0, SDHCI_CLOCK_CONTROL
);
1177 if (host
->version
>= SDHCI_SPEC_300
) {
1178 if (host
->preset_enabled
) {
1181 clk
= sdhci_readw(host
, SDHCI_CLOCK_CONTROL
);
1182 pre_val
= sdhci_get_preset_value(host
);
1183 div
= (pre_val
& SDHCI_PRESET_SDCLK_FREQ_MASK
)
1184 >> SDHCI_PRESET_SDCLK_FREQ_SHIFT
;
1185 if (host
->clk_mul
&&
1186 (pre_val
& SDHCI_PRESET_CLKGEN_SEL_MASK
)) {
1187 clk
= SDHCI_PROG_CLOCK_MODE
;
1189 clk_mul
= host
->clk_mul
;
1191 real_div
= max_t(int, 1, div
<< 1);
1197 * Check if the Host Controller supports Programmable Clock
1200 if (host
->clk_mul
) {
1201 for (div
= 1; div
<= 1024; div
++) {
1202 if ((host
->max_clk
* host
->clk_mul
/ div
)
1207 * Set Programmable Clock Mode in the Clock
1210 clk
= SDHCI_PROG_CLOCK_MODE
;
1212 clk_mul
= host
->clk_mul
;
1215 /* Version 3.00 divisors must be a multiple of 2. */
1216 if (host
->max_clk
<= clock
)
1219 for (div
= 2; div
< SDHCI_MAX_DIV_SPEC_300
;
1221 if ((host
->max_clk
/ div
) <= clock
)
1229 /* Version 2.00 divisors must be a power of 2. */
1230 for (div
= 1; div
< SDHCI_MAX_DIV_SPEC_200
; div
*= 2) {
1231 if ((host
->max_clk
/ div
) <= clock
)
1240 host
->mmc
->actual_clock
= (host
->max_clk
* clk_mul
) / real_div
;
1241 clk
|= (div
& SDHCI_DIV_MASK
) << SDHCI_DIVIDER_SHIFT
;
1242 clk
|= ((div
& SDHCI_DIV_HI_MASK
) >> SDHCI_DIV_MASK_LEN
)
1243 << SDHCI_DIVIDER_HI_SHIFT
;
1244 clk
|= SDHCI_CLOCK_INT_EN
;
1245 sdhci_writew(host
, clk
, SDHCI_CLOCK_CONTROL
);
1247 /* Wait max 20 ms */
1249 while (!((clk
= sdhci_readw(host
, SDHCI_CLOCK_CONTROL
))
1250 & SDHCI_CLOCK_INT_STABLE
)) {
1252 pr_err("%s: Internal clock never "
1253 "stabilised.\n", mmc_hostname(host
->mmc
));
1254 sdhci_dumpregs(host
);
1261 clk
|= SDHCI_CLOCK_CARD_EN
;
1262 sdhci_writew(host
, clk
, SDHCI_CLOCK_CONTROL
);
1264 EXPORT_SYMBOL_GPL(sdhci_set_clock
);
1266 static void sdhci_set_power(struct sdhci_host
*host
, unsigned char mode
,
1269 struct mmc_host
*mmc
= host
->mmc
;
1272 if (!IS_ERR(mmc
->supply
.vmmc
)) {
1273 spin_unlock_irq(&host
->lock
);
1274 mmc_regulator_set_ocr(mmc
, mmc
->supply
.vmmc
, vdd
);
1275 spin_lock_irq(&host
->lock
);
1279 if (mode
!= MMC_POWER_OFF
) {
1281 case MMC_VDD_165_195
:
1282 pwr
= SDHCI_POWER_180
;
1286 pwr
= SDHCI_POWER_300
;
1290 pwr
= SDHCI_POWER_330
;
1297 if (host
->pwr
== pwr
)
1303 sdhci_writeb(host
, 0, SDHCI_POWER_CONTROL
);
1304 if (host
->quirks2
& SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON
)
1305 sdhci_runtime_pm_bus_off(host
);
1309 * Spec says that we should clear the power reg before setting
1310 * a new value. Some controllers don't seem to like this though.
1312 if (!(host
->quirks
& SDHCI_QUIRK_SINGLE_POWER_WRITE
))
1313 sdhci_writeb(host
, 0, SDHCI_POWER_CONTROL
);
1316 * At least the Marvell CaFe chip gets confused if we set the
1317 * voltage and set turn on power at the same time, so set the
1320 if (host
->quirks
& SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER
)
1321 sdhci_writeb(host
, pwr
, SDHCI_POWER_CONTROL
);
1323 pwr
|= SDHCI_POWER_ON
;
1325 sdhci_writeb(host
, pwr
, SDHCI_POWER_CONTROL
);
1327 if (host
->quirks2
& SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON
)
1328 sdhci_runtime_pm_bus_on(host
);
1331 * Some controllers need an extra 10ms delay of 10ms before
1332 * they can apply clock after applying power
1334 if (host
->quirks
& SDHCI_QUIRK_DELAY_AFTER_POWER
)
1339 /*****************************************************************************\
1343 \*****************************************************************************/
1345 static void sdhci_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
1347 struct sdhci_host
*host
;
1349 unsigned long flags
;
1352 host
= mmc_priv(mmc
);
1354 sdhci_runtime_pm_get(host
);
1356 spin_lock_irqsave(&host
->lock
, flags
);
1358 WARN_ON(host
->mrq
!= NULL
);
1360 #ifndef SDHCI_USE_LEDS_CLASS
1361 sdhci_activate_led(host
);
1365 * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED
1366 * requests if Auto-CMD12 is enabled.
1368 if (!mrq
->sbc
&& (host
->flags
& SDHCI_AUTO_CMD12
)) {
1370 mrq
->data
->stop
= NULL
;
1378 * Firstly check card presence from cd-gpio. The return could
1379 * be one of the following possibilities:
1380 * negative: cd-gpio is not available
1381 * zero: cd-gpio is used, and card is removed
1382 * one: cd-gpio is used, and card is present
1384 present
= mmc_gpio_get_cd(host
->mmc
);
1386 /* If polling, assume that the card is always present. */
1387 if (host
->quirks
& SDHCI_QUIRK_BROKEN_CARD_DETECTION
)
1390 present
= sdhci_readl(host
, SDHCI_PRESENT_STATE
) &
1394 if (!present
|| host
->flags
& SDHCI_DEVICE_DEAD
) {
1395 host
->mrq
->cmd
->error
= -ENOMEDIUM
;
1396 tasklet_schedule(&host
->finish_tasklet
);
1400 present_state
= sdhci_readl(host
, SDHCI_PRESENT_STATE
);
1402 * Check if the re-tuning timer has already expired and there
1403 * is no on-going data transfer and DAT0 is not busy. If so,
1404 * we need to execute tuning procedure before sending command.
1406 if ((host
->flags
& SDHCI_NEEDS_RETUNING
) &&
1407 !(present_state
& (SDHCI_DOING_WRITE
| SDHCI_DOING_READ
)) &&
1408 (present_state
& SDHCI_DATA_0_LVL_MASK
)) {
1410 /* eMMC uses cmd21 but sd and sdio use cmd19 */
1412 mmc
->card
->type
== MMC_TYPE_MMC
?
1413 MMC_SEND_TUNING_BLOCK_HS200
:
1414 MMC_SEND_TUNING_BLOCK
;
1416 /* Here we need to set the host->mrq to NULL,
1417 * in case the pending finish_tasklet
1418 * finishes it incorrectly.
1422 spin_unlock_irqrestore(&host
->lock
, flags
);
1423 sdhci_execute_tuning(mmc
, tuning_opcode
);
1424 spin_lock_irqsave(&host
->lock
, flags
);
1426 /* Restore original mmc_request structure */
1431 if (mrq
->sbc
&& !(host
->flags
& SDHCI_AUTO_CMD23
))
1432 sdhci_send_command(host
, mrq
->sbc
);
1434 sdhci_send_command(host
, mrq
->cmd
);
1438 spin_unlock_irqrestore(&host
->lock
, flags
);
1441 void sdhci_set_bus_width(struct sdhci_host
*host
, int width
)
1445 ctrl
= sdhci_readb(host
, SDHCI_HOST_CONTROL
);
1446 if (width
== MMC_BUS_WIDTH_8
) {
1447 ctrl
&= ~SDHCI_CTRL_4BITBUS
;
1448 if (host
->version
>= SDHCI_SPEC_300
)
1449 ctrl
|= SDHCI_CTRL_8BITBUS
;
1451 if (host
->version
>= SDHCI_SPEC_300
)
1452 ctrl
&= ~SDHCI_CTRL_8BITBUS
;
1453 if (width
== MMC_BUS_WIDTH_4
)
1454 ctrl
|= SDHCI_CTRL_4BITBUS
;
1456 ctrl
&= ~SDHCI_CTRL_4BITBUS
;
1458 sdhci_writeb(host
, ctrl
, SDHCI_HOST_CONTROL
);
1460 EXPORT_SYMBOL_GPL(sdhci_set_bus_width
);
1462 void sdhci_set_uhs_signaling(struct sdhci_host
*host
, unsigned timing
)
1466 ctrl_2
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
1467 /* Select Bus Speed Mode for host */
1468 ctrl_2
&= ~SDHCI_CTRL_UHS_MASK
;
1469 if ((timing
== MMC_TIMING_MMC_HS200
) ||
1470 (timing
== MMC_TIMING_UHS_SDR104
))
1471 ctrl_2
|= SDHCI_CTRL_UHS_SDR104
;
1472 else if (timing
== MMC_TIMING_UHS_SDR12
)
1473 ctrl_2
|= SDHCI_CTRL_UHS_SDR12
;
1474 else if (timing
== MMC_TIMING_UHS_SDR25
)
1475 ctrl_2
|= SDHCI_CTRL_UHS_SDR25
;
1476 else if (timing
== MMC_TIMING_UHS_SDR50
)
1477 ctrl_2
|= SDHCI_CTRL_UHS_SDR50
;
1478 else if ((timing
== MMC_TIMING_UHS_DDR50
) ||
1479 (timing
== MMC_TIMING_MMC_DDR52
))
1480 ctrl_2
|= SDHCI_CTRL_UHS_DDR50
;
1481 else if (timing
== MMC_TIMING_MMC_HS400
)
1482 ctrl_2
|= SDHCI_CTRL_HS400
; /* Non-standard */
1483 sdhci_writew(host
, ctrl_2
, SDHCI_HOST_CONTROL2
);
1485 EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling
);
1487 static void sdhci_do_set_ios(struct sdhci_host
*host
, struct mmc_ios
*ios
)
1489 unsigned long flags
;
1491 struct mmc_host
*mmc
= host
->mmc
;
1493 spin_lock_irqsave(&host
->lock
, flags
);
1495 if (host
->flags
& SDHCI_DEVICE_DEAD
) {
1496 spin_unlock_irqrestore(&host
->lock
, flags
);
1497 if (!IS_ERR(mmc
->supply
.vmmc
) &&
1498 ios
->power_mode
== MMC_POWER_OFF
)
1499 mmc_regulator_set_ocr(mmc
, mmc
->supply
.vmmc
, 0);
1504 * Reset the chip on each power off.
1505 * Should clear out any weird states.
1507 if (ios
->power_mode
== MMC_POWER_OFF
) {
1508 sdhci_writel(host
, 0, SDHCI_SIGNAL_ENABLE
);
1512 if (host
->version
>= SDHCI_SPEC_300
&&
1513 (ios
->power_mode
== MMC_POWER_UP
) &&
1514 !(host
->quirks2
& SDHCI_QUIRK2_PRESET_VALUE_BROKEN
))
1515 sdhci_enable_preset_value(host
, false);
1517 if (!ios
->clock
|| ios
->clock
!= host
->clock
) {
1518 host
->ops
->set_clock(host
, ios
->clock
);
1519 host
->clock
= ios
->clock
;
1521 if (host
->quirks
& SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK
&&
1523 host
->timeout_clk
= host
->mmc
->actual_clock
?
1524 host
->mmc
->actual_clock
/ 1000 :
1526 host
->mmc
->max_busy_timeout
=
1527 host
->ops
->get_max_timeout_count
?
1528 host
->ops
->get_max_timeout_count(host
) :
1530 host
->mmc
->max_busy_timeout
/= host
->timeout_clk
;
1534 sdhci_set_power(host
, ios
->power_mode
, ios
->vdd
);
1536 if (host
->ops
->platform_send_init_74_clocks
)
1537 host
->ops
->platform_send_init_74_clocks(host
, ios
->power_mode
);
1539 host
->ops
->set_bus_width(host
, ios
->bus_width
);
1541 ctrl
= sdhci_readb(host
, SDHCI_HOST_CONTROL
);
1543 if ((ios
->timing
== MMC_TIMING_SD_HS
||
1544 ios
->timing
== MMC_TIMING_MMC_HS
)
1545 && !(host
->quirks
& SDHCI_QUIRK_NO_HISPD_BIT
))
1546 ctrl
|= SDHCI_CTRL_HISPD
;
1548 ctrl
&= ~SDHCI_CTRL_HISPD
;
1550 if (host
->version
>= SDHCI_SPEC_300
) {
1553 /* In case of UHS-I modes, set High Speed Enable */
1554 if ((ios
->timing
== MMC_TIMING_MMC_HS400
) ||
1555 (ios
->timing
== MMC_TIMING_MMC_HS200
) ||
1556 (ios
->timing
== MMC_TIMING_MMC_DDR52
) ||
1557 (ios
->timing
== MMC_TIMING_UHS_SDR50
) ||
1558 (ios
->timing
== MMC_TIMING_UHS_SDR104
) ||
1559 (ios
->timing
== MMC_TIMING_UHS_DDR50
) ||
1560 (ios
->timing
== MMC_TIMING_UHS_SDR25
))
1561 ctrl
|= SDHCI_CTRL_HISPD
;
1563 if (!host
->preset_enabled
) {
1564 sdhci_writeb(host
, ctrl
, SDHCI_HOST_CONTROL
);
1566 * We only need to set Driver Strength if the
1567 * preset value enable is not set.
1569 ctrl_2
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
1570 ctrl_2
&= ~SDHCI_CTRL_DRV_TYPE_MASK
;
1571 if (ios
->drv_type
== MMC_SET_DRIVER_TYPE_A
)
1572 ctrl_2
|= SDHCI_CTRL_DRV_TYPE_A
;
1573 else if (ios
->drv_type
== MMC_SET_DRIVER_TYPE_C
)
1574 ctrl_2
|= SDHCI_CTRL_DRV_TYPE_C
;
1576 sdhci_writew(host
, ctrl_2
, SDHCI_HOST_CONTROL2
);
1579 * According to SDHC Spec v3.00, if the Preset Value
1580 * Enable in the Host Control 2 register is set, we
1581 * need to reset SD Clock Enable before changing High
1582 * Speed Enable to avoid generating clock gliches.
1585 /* Reset SD Clock Enable */
1586 clk
= sdhci_readw(host
, SDHCI_CLOCK_CONTROL
);
1587 clk
&= ~SDHCI_CLOCK_CARD_EN
;
1588 sdhci_writew(host
, clk
, SDHCI_CLOCK_CONTROL
);
1590 sdhci_writeb(host
, ctrl
, SDHCI_HOST_CONTROL
);
1592 /* Re-enable SD Clock */
1593 host
->ops
->set_clock(host
, host
->clock
);
1596 /* Reset SD Clock Enable */
1597 clk
= sdhci_readw(host
, SDHCI_CLOCK_CONTROL
);
1598 clk
&= ~SDHCI_CLOCK_CARD_EN
;
1599 sdhci_writew(host
, clk
, SDHCI_CLOCK_CONTROL
);
1601 host
->ops
->set_uhs_signaling(host
, ios
->timing
);
1602 host
->timing
= ios
->timing
;
1604 if (!(host
->quirks2
& SDHCI_QUIRK2_PRESET_VALUE_BROKEN
) &&
1605 ((ios
->timing
== MMC_TIMING_UHS_SDR12
) ||
1606 (ios
->timing
== MMC_TIMING_UHS_SDR25
) ||
1607 (ios
->timing
== MMC_TIMING_UHS_SDR50
) ||
1608 (ios
->timing
== MMC_TIMING_UHS_SDR104
) ||
1609 (ios
->timing
== MMC_TIMING_UHS_DDR50
))) {
1612 sdhci_enable_preset_value(host
, true);
1613 preset
= sdhci_get_preset_value(host
);
1614 ios
->drv_type
= (preset
& SDHCI_PRESET_DRV_MASK
)
1615 >> SDHCI_PRESET_DRV_SHIFT
;
1618 /* Re-enable SD Clock */
1619 host
->ops
->set_clock(host
, host
->clock
);
1621 sdhci_writeb(host
, ctrl
, SDHCI_HOST_CONTROL
);
1624 * Some (ENE) controllers go apeshit on some ios operation,
1625 * signalling timeout and CRC errors even on CMD0. Resetting
1626 * it on each ios seems to solve the problem.
1628 if(host
->quirks
& SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS
)
1629 sdhci_do_reset(host
, SDHCI_RESET_CMD
| SDHCI_RESET_DATA
);
1632 spin_unlock_irqrestore(&host
->lock
, flags
);
1635 static void sdhci_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
1637 struct sdhci_host
*host
= mmc_priv(mmc
);
1639 sdhci_runtime_pm_get(host
);
1640 sdhci_do_set_ios(host
, ios
);
1641 sdhci_runtime_pm_put(host
);
1644 static int sdhci_do_get_cd(struct sdhci_host
*host
)
1646 int gpio_cd
= mmc_gpio_get_cd(host
->mmc
);
1648 if (host
->flags
& SDHCI_DEVICE_DEAD
)
1651 /* If polling/nonremovable, assume that the card is always present. */
1652 if ((host
->quirks
& SDHCI_QUIRK_BROKEN_CARD_DETECTION
) ||
1653 (host
->mmc
->caps
& MMC_CAP_NONREMOVABLE
))
1656 /* Try slot gpio detect */
1657 if (!IS_ERR_VALUE(gpio_cd
))
1660 /* Host native card detect */
1661 return !!(sdhci_readl(host
, SDHCI_PRESENT_STATE
) & SDHCI_CARD_PRESENT
);
1664 static int sdhci_get_cd(struct mmc_host
*mmc
)
1666 struct sdhci_host
*host
= mmc_priv(mmc
);
1669 sdhci_runtime_pm_get(host
);
1670 ret
= sdhci_do_get_cd(host
);
1671 sdhci_runtime_pm_put(host
);
1675 static int sdhci_check_ro(struct sdhci_host
*host
)
1677 unsigned long flags
;
1680 spin_lock_irqsave(&host
->lock
, flags
);
1682 if (host
->flags
& SDHCI_DEVICE_DEAD
)
1684 else if (host
->ops
->get_ro
)
1685 is_readonly
= host
->ops
->get_ro(host
);
1687 is_readonly
= !(sdhci_readl(host
, SDHCI_PRESENT_STATE
)
1688 & SDHCI_WRITE_PROTECT
);
1690 spin_unlock_irqrestore(&host
->lock
, flags
);
1692 /* This quirk needs to be replaced by a callback-function later */
1693 return host
->quirks
& SDHCI_QUIRK_INVERTED_WRITE_PROTECT
?
1694 !is_readonly
: is_readonly
;
1697 #define SAMPLE_COUNT 5
1699 static int sdhci_do_get_ro(struct sdhci_host
*host
)
1703 if (!(host
->quirks
& SDHCI_QUIRK_UNSTABLE_RO_DETECT
))
1704 return sdhci_check_ro(host
);
1707 for (i
= 0; i
< SAMPLE_COUNT
; i
++) {
1708 if (sdhci_check_ro(host
)) {
1709 if (++ro_count
> SAMPLE_COUNT
/ 2)
1717 static void sdhci_hw_reset(struct mmc_host
*mmc
)
1719 struct sdhci_host
*host
= mmc_priv(mmc
);
1721 if (host
->ops
&& host
->ops
->hw_reset
)
1722 host
->ops
->hw_reset(host
);
1725 static int sdhci_get_ro(struct mmc_host
*mmc
)
1727 struct sdhci_host
*host
= mmc_priv(mmc
);
1730 sdhci_runtime_pm_get(host
);
1731 ret
= sdhci_do_get_ro(host
);
1732 sdhci_runtime_pm_put(host
);
1736 static void sdhci_enable_sdio_irq_nolock(struct sdhci_host
*host
, int enable
)
1738 if (!(host
->flags
& SDHCI_DEVICE_DEAD
)) {
1740 host
->ier
|= SDHCI_INT_CARD_INT
;
1742 host
->ier
&= ~SDHCI_INT_CARD_INT
;
1744 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
1745 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
1750 static void sdhci_enable_sdio_irq(struct mmc_host
*mmc
, int enable
)
1752 struct sdhci_host
*host
= mmc_priv(mmc
);
1753 unsigned long flags
;
1755 sdhci_runtime_pm_get(host
);
1757 spin_lock_irqsave(&host
->lock
, flags
);
1759 host
->flags
|= SDHCI_SDIO_IRQ_ENABLED
;
1761 host
->flags
&= ~SDHCI_SDIO_IRQ_ENABLED
;
1763 sdhci_enable_sdio_irq_nolock(host
, enable
);
1764 spin_unlock_irqrestore(&host
->lock
, flags
);
1766 sdhci_runtime_pm_put(host
);
1769 static int sdhci_do_start_signal_voltage_switch(struct sdhci_host
*host
,
1770 struct mmc_ios
*ios
)
1772 struct mmc_host
*mmc
= host
->mmc
;
1777 * Signal Voltage Switching is only applicable for Host Controllers
1780 if (host
->version
< SDHCI_SPEC_300
)
1783 ctrl
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
1785 switch (ios
->signal_voltage
) {
1786 case MMC_SIGNAL_VOLTAGE_330
:
1787 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
1788 ctrl
&= ~SDHCI_CTRL_VDD_180
;
1789 sdhci_writew(host
, ctrl
, SDHCI_HOST_CONTROL2
);
1791 if (!IS_ERR(mmc
->supply
.vqmmc
)) {
1792 ret
= regulator_set_voltage(mmc
->supply
.vqmmc
, 2700000,
1795 pr_warn("%s: Switching to 3.3V signalling voltage failed\n",
1801 usleep_range(5000, 5500);
1803 /* 3.3V regulator output should be stable within 5 ms */
1804 ctrl
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
1805 if (!(ctrl
& SDHCI_CTRL_VDD_180
))
1808 pr_warn("%s: 3.3V regulator output did not became stable\n",
1812 case MMC_SIGNAL_VOLTAGE_180
:
1813 if (!IS_ERR(mmc
->supply
.vqmmc
)) {
1814 ret
= regulator_set_voltage(mmc
->supply
.vqmmc
,
1817 pr_warn("%s: Switching to 1.8V signalling voltage failed\n",
1824 * Enable 1.8V Signal Enable in the Host Control2
1827 ctrl
|= SDHCI_CTRL_VDD_180
;
1828 sdhci_writew(host
, ctrl
, SDHCI_HOST_CONTROL2
);
1830 /* 1.8V regulator output should be stable within 5 ms */
1831 ctrl
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
1832 if (ctrl
& SDHCI_CTRL_VDD_180
)
1835 pr_warn("%s: 1.8V regulator output did not became stable\n",
1839 case MMC_SIGNAL_VOLTAGE_120
:
1840 if (!IS_ERR(mmc
->supply
.vqmmc
)) {
1841 ret
= regulator_set_voltage(mmc
->supply
.vqmmc
, 1100000,
1844 pr_warn("%s: Switching to 1.2V signalling voltage failed\n",
1851 /* No signal voltage switch required */
1856 static int sdhci_start_signal_voltage_switch(struct mmc_host
*mmc
,
1857 struct mmc_ios
*ios
)
1859 struct sdhci_host
*host
= mmc_priv(mmc
);
1862 if (host
->version
< SDHCI_SPEC_300
)
1864 sdhci_runtime_pm_get(host
);
1865 err
= sdhci_do_start_signal_voltage_switch(host
, ios
);
1866 sdhci_runtime_pm_put(host
);
1870 static int sdhci_card_busy(struct mmc_host
*mmc
)
1872 struct sdhci_host
*host
= mmc_priv(mmc
);
1875 sdhci_runtime_pm_get(host
);
1876 /* Check whether DAT[3:0] is 0000 */
1877 present_state
= sdhci_readl(host
, SDHCI_PRESENT_STATE
);
1878 sdhci_runtime_pm_put(host
);
1880 return !(present_state
& SDHCI_DATA_LVL_MASK
);
1883 static int sdhci_execute_tuning(struct mmc_host
*mmc
, u32 opcode
)
1885 struct sdhci_host
*host
= mmc_priv(mmc
);
1887 int tuning_loop_counter
= MAX_TUNING_LOOP
;
1889 unsigned long flags
;
1891 sdhci_runtime_pm_get(host
);
1892 spin_lock_irqsave(&host
->lock
, flags
);
1895 * The Host Controller needs tuning only in case of SDR104 mode
1896 * and for SDR50 mode when Use Tuning for SDR50 is set in the
1897 * Capabilities register.
1898 * If the Host Controller supports the HS200 mode then the
1899 * tuning function has to be executed.
1901 switch (host
->timing
) {
1902 case MMC_TIMING_MMC_HS400
:
1903 case MMC_TIMING_MMC_HS200
:
1904 case MMC_TIMING_UHS_SDR104
:
1907 case MMC_TIMING_UHS_SDR50
:
1908 if (host
->flags
& SDHCI_SDR50_NEEDS_TUNING
||
1909 host
->flags
& SDHCI_SDR104_NEEDS_TUNING
)
1914 spin_unlock_irqrestore(&host
->lock
, flags
);
1915 sdhci_runtime_pm_put(host
);
1919 if (host
->ops
->platform_execute_tuning
) {
1920 spin_unlock_irqrestore(&host
->lock
, flags
);
1921 err
= host
->ops
->platform_execute_tuning(host
, opcode
);
1922 sdhci_runtime_pm_put(host
);
1926 ctrl
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
1927 ctrl
|= SDHCI_CTRL_EXEC_TUNING
;
1928 sdhci_writew(host
, ctrl
, SDHCI_HOST_CONTROL2
);
1931 * As per the Host Controller spec v3.00, tuning command
1932 * generates Buffer Read Ready interrupt, so enable that.
1934 * Note: The spec clearly says that when tuning sequence
1935 * is being performed, the controller does not generate
1936 * interrupts other than Buffer Read Ready interrupt. But
1937 * to make sure we don't hit a controller bug, we _only_
1938 * enable Buffer Read Ready interrupt here.
1940 sdhci_writel(host
, SDHCI_INT_DATA_AVAIL
, SDHCI_INT_ENABLE
);
1941 sdhci_writel(host
, SDHCI_INT_DATA_AVAIL
, SDHCI_SIGNAL_ENABLE
);
1944 * Issue CMD19 repeatedly till Execute Tuning is set to 0 or the number
1945 * of loops reaches 40 times or a timeout of 150ms occurs.
1948 struct mmc_command cmd
= {0};
1949 struct mmc_request mrq
= {NULL
};
1951 cmd
.opcode
= opcode
;
1953 cmd
.flags
= MMC_RSP_R1
| MMC_CMD_ADTC
;
1958 if (tuning_loop_counter
-- == 0)
1965 * In response to CMD19, the card sends 64 bytes of tuning
1966 * block to the Host Controller. So we set the block size
1969 if (cmd
.opcode
== MMC_SEND_TUNING_BLOCK_HS200
) {
1970 if (mmc
->ios
.bus_width
== MMC_BUS_WIDTH_8
)
1971 sdhci_writew(host
, SDHCI_MAKE_BLKSZ(7, 128),
1973 else if (mmc
->ios
.bus_width
== MMC_BUS_WIDTH_4
)
1974 sdhci_writew(host
, SDHCI_MAKE_BLKSZ(7, 64),
1977 sdhci_writew(host
, SDHCI_MAKE_BLKSZ(7, 64),
1982 * The tuning block is sent by the card to the host controller.
1983 * So we set the TRNS_READ bit in the Transfer Mode register.
1984 * This also takes care of setting DMA Enable and Multi Block
1985 * Select in the same register to 0.
1987 sdhci_writew(host
, SDHCI_TRNS_READ
, SDHCI_TRANSFER_MODE
);
1989 sdhci_send_command(host
, &cmd
);
1994 spin_unlock_irqrestore(&host
->lock
, flags
);
1995 /* Wait for Buffer Read Ready interrupt */
1996 wait_event_interruptible_timeout(host
->buf_ready_int
,
1997 (host
->tuning_done
== 1),
1998 msecs_to_jiffies(50));
1999 spin_lock_irqsave(&host
->lock
, flags
);
2001 if (!host
->tuning_done
) {
2002 pr_info(DRIVER_NAME
": Timeout waiting for "
2003 "Buffer Read Ready interrupt during tuning "
2004 "procedure, falling back to fixed sampling "
2006 ctrl
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
2007 ctrl
&= ~SDHCI_CTRL_TUNED_CLK
;
2008 ctrl
&= ~SDHCI_CTRL_EXEC_TUNING
;
2009 sdhci_writew(host
, ctrl
, SDHCI_HOST_CONTROL2
);
2015 host
->tuning_done
= 0;
2017 ctrl
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
2019 /* eMMC spec does not require a delay between tuning cycles */
2020 if (opcode
== MMC_SEND_TUNING_BLOCK
)
2022 } while (ctrl
& SDHCI_CTRL_EXEC_TUNING
);
2025 * The Host Driver has exhausted the maximum number of loops allowed,
2026 * so use fixed sampling frequency.
2028 if (tuning_loop_counter
< 0) {
2029 ctrl
&= ~SDHCI_CTRL_TUNED_CLK
;
2030 sdhci_writew(host
, ctrl
, SDHCI_HOST_CONTROL2
);
2032 if (!(ctrl
& SDHCI_CTRL_TUNED_CLK
)) {
2033 pr_info(DRIVER_NAME
": Tuning procedure"
2034 " failed, falling back to fixed sampling"
2041 * If this is the very first time we are here, we start the retuning
2042 * timer. Since only during the first time, SDHCI_NEEDS_RETUNING
2043 * flag won't be set, we check this condition before actually starting
2046 if (!(host
->flags
& SDHCI_NEEDS_RETUNING
) && host
->tuning_count
&&
2047 (host
->tuning_mode
== SDHCI_TUNING_MODE_1
)) {
2048 host
->flags
|= SDHCI_USING_RETUNING_TIMER
;
2049 mod_timer(&host
->tuning_timer
, jiffies
+
2050 host
->tuning_count
* HZ
);
2051 /* Tuning mode 1 limits the maximum data length to 4MB */
2052 mmc
->max_blk_count
= (4 * 1024 * 1024) / mmc
->max_blk_size
;
2053 } else if (host
->flags
& SDHCI_USING_RETUNING_TIMER
) {
2054 host
->flags
&= ~SDHCI_NEEDS_RETUNING
;
2055 /* Reload the new initial value for timer */
2056 mod_timer(&host
->tuning_timer
, jiffies
+
2057 host
->tuning_count
* HZ
);
2061 * In case tuning fails, host controllers which support re-tuning can
2062 * try tuning again at a later time, when the re-tuning timer expires.
2063 * So for these controllers, we return 0. Since there might be other
2064 * controllers who do not have this capability, we return error for
2065 * them. SDHCI_USING_RETUNING_TIMER means the host is currently using
2066 * a retuning timer to do the retuning for the card.
2068 if (err
&& (host
->flags
& SDHCI_USING_RETUNING_TIMER
))
2071 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
2072 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
2073 spin_unlock_irqrestore(&host
->lock
, flags
);
2074 sdhci_runtime_pm_put(host
);
2080 static void sdhci_enable_preset_value(struct sdhci_host
*host
, bool enable
)
2082 /* Host Controller v3.00 defines preset value registers */
2083 if (host
->version
< SDHCI_SPEC_300
)
2087 * We only enable or disable Preset Value if they are not already
2088 * enabled or disabled respectively. Otherwise, we bail out.
2090 if (host
->preset_enabled
!= enable
) {
2091 u16 ctrl
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
2094 ctrl
|= SDHCI_CTRL_PRESET_VAL_ENABLE
;
2096 ctrl
&= ~SDHCI_CTRL_PRESET_VAL_ENABLE
;
2098 sdhci_writew(host
, ctrl
, SDHCI_HOST_CONTROL2
);
2101 host
->flags
|= SDHCI_PV_ENABLED
;
2103 host
->flags
&= ~SDHCI_PV_ENABLED
;
2105 host
->preset_enabled
= enable
;
2109 static void sdhci_card_event(struct mmc_host
*mmc
)
2111 struct sdhci_host
*host
= mmc_priv(mmc
);
2112 unsigned long flags
;
2114 /* First check if client has provided their own card event */
2115 if (host
->ops
->card_event
)
2116 host
->ops
->card_event(host
);
2118 spin_lock_irqsave(&host
->lock
, flags
);
2120 /* Check host->mrq first in case we are runtime suspended */
2121 if (host
->mrq
&& !sdhci_do_get_cd(host
)) {
2122 pr_err("%s: Card removed during transfer!\n",
2123 mmc_hostname(host
->mmc
));
2124 pr_err("%s: Resetting controller.\n",
2125 mmc_hostname(host
->mmc
));
2127 sdhci_do_reset(host
, SDHCI_RESET_CMD
);
2128 sdhci_do_reset(host
, SDHCI_RESET_DATA
);
2130 host
->mrq
->cmd
->error
= -ENOMEDIUM
;
2131 tasklet_schedule(&host
->finish_tasklet
);
2134 spin_unlock_irqrestore(&host
->lock
, flags
);
2137 static const struct mmc_host_ops sdhci_ops
= {
2138 .request
= sdhci_request
,
2139 .set_ios
= sdhci_set_ios
,
2140 .get_cd
= sdhci_get_cd
,
2141 .get_ro
= sdhci_get_ro
,
2142 .hw_reset
= sdhci_hw_reset
,
2143 .enable_sdio_irq
= sdhci_enable_sdio_irq
,
2144 .start_signal_voltage_switch
= sdhci_start_signal_voltage_switch
,
2145 .execute_tuning
= sdhci_execute_tuning
,
2146 .card_event
= sdhci_card_event
,
2147 .card_busy
= sdhci_card_busy
,
2150 /*****************************************************************************\
2154 \*****************************************************************************/
2156 static void sdhci_tasklet_finish(unsigned long param
)
2158 struct sdhci_host
*host
;
2159 unsigned long flags
;
2160 struct mmc_request
*mrq
;
2162 host
= (struct sdhci_host
*)param
;
2164 spin_lock_irqsave(&host
->lock
, flags
);
2167 * If this tasklet gets rescheduled while running, it will
2168 * be run again afterwards but without any active request.
2171 spin_unlock_irqrestore(&host
->lock
, flags
);
2175 del_timer(&host
->timer
);
2180 * The controller needs a reset of internal state machines
2181 * upon error conditions.
2183 if (!(host
->flags
& SDHCI_DEVICE_DEAD
) &&
2184 ((mrq
->cmd
&& mrq
->cmd
->error
) ||
2185 (mrq
->sbc
&& mrq
->sbc
->error
) ||
2186 (mrq
->data
&& ((mrq
->data
->error
&& !mrq
->data
->stop
) ||
2187 (mrq
->data
->stop
&& mrq
->data
->stop
->error
))) ||
2188 (host
->quirks
& SDHCI_QUIRK_RESET_AFTER_REQUEST
))) {
2190 /* Some controllers need this kick or reset won't work here */
2191 if (host
->quirks
& SDHCI_QUIRK_CLOCK_BEFORE_RESET
)
2192 /* This is to force an update */
2193 host
->ops
->set_clock(host
, host
->clock
);
2195 /* Spec says we should do both at the same time, but Ricoh
2196 controllers do not like that. */
2197 sdhci_do_reset(host
, SDHCI_RESET_CMD
);
2198 sdhci_do_reset(host
, SDHCI_RESET_DATA
);
2205 #ifndef SDHCI_USE_LEDS_CLASS
2206 sdhci_deactivate_led(host
);
2210 spin_unlock_irqrestore(&host
->lock
, flags
);
2212 mmc_request_done(host
->mmc
, mrq
);
2213 sdhci_runtime_pm_put(host
);
2216 static void sdhci_timeout_timer(unsigned long data
)
2218 struct sdhci_host
*host
;
2219 unsigned long flags
;
2221 host
= (struct sdhci_host
*)data
;
2223 spin_lock_irqsave(&host
->lock
, flags
);
2226 pr_err("%s: Timeout waiting for hardware "
2227 "interrupt.\n", mmc_hostname(host
->mmc
));
2228 sdhci_dumpregs(host
);
2231 host
->data
->error
= -ETIMEDOUT
;
2232 sdhci_finish_data(host
);
2235 host
->cmd
->error
= -ETIMEDOUT
;
2237 host
->mrq
->cmd
->error
= -ETIMEDOUT
;
2239 tasklet_schedule(&host
->finish_tasklet
);
2244 spin_unlock_irqrestore(&host
->lock
, flags
);
2247 static void sdhci_tuning_timer(unsigned long data
)
2249 struct sdhci_host
*host
;
2250 unsigned long flags
;
2252 host
= (struct sdhci_host
*)data
;
2254 spin_lock_irqsave(&host
->lock
, flags
);
2256 host
->flags
|= SDHCI_NEEDS_RETUNING
;
2258 spin_unlock_irqrestore(&host
->lock
, flags
);
2261 /*****************************************************************************\
2263 * Interrupt handling *
2265 \*****************************************************************************/
2267 static void sdhci_cmd_irq(struct sdhci_host
*host
, u32 intmask
, u32
*mask
)
2269 BUG_ON(intmask
== 0);
2272 pr_err("%s: Got command interrupt 0x%08x even "
2273 "though no command operation was in progress.\n",
2274 mmc_hostname(host
->mmc
), (unsigned)intmask
);
2275 sdhci_dumpregs(host
);
2279 if (intmask
& SDHCI_INT_TIMEOUT
)
2280 host
->cmd
->error
= -ETIMEDOUT
;
2281 else if (intmask
& (SDHCI_INT_CRC
| SDHCI_INT_END_BIT
|
2283 host
->cmd
->error
= -EILSEQ
;
2285 if (host
->cmd
->error
) {
2286 tasklet_schedule(&host
->finish_tasklet
);
2291 * The host can send and interrupt when the busy state has
2292 * ended, allowing us to wait without wasting CPU cycles.
2293 * Unfortunately this is overloaded on the "data complete"
2294 * interrupt, so we need to take some care when handling
2297 * Note: The 1.0 specification is a bit ambiguous about this
2298 * feature so there might be some problems with older
2301 if (host
->cmd
->flags
& MMC_RSP_BUSY
) {
2302 if (host
->cmd
->data
)
2303 DBG("Cannot wait for busy signal when also "
2304 "doing a data transfer");
2305 else if (!(host
->quirks
& SDHCI_QUIRK_NO_BUSY_IRQ
)
2306 && !host
->busy_handle
) {
2307 /* Mark that command complete before busy is ended */
2308 host
->busy_handle
= 1;
2312 /* The controller does not support the end-of-busy IRQ,
2313 * fall through and take the SDHCI_INT_RESPONSE */
2314 } else if ((host
->quirks2
& SDHCI_QUIRK2_STOP_WITH_TC
) &&
2315 host
->cmd
->opcode
== MMC_STOP_TRANSMISSION
&& !host
->data
) {
2316 *mask
&= ~SDHCI_INT_DATA_END
;
2319 if (intmask
& SDHCI_INT_RESPONSE
)
2320 sdhci_finish_command(host
);
2323 #ifdef CONFIG_MMC_DEBUG
2324 static void sdhci_adma_show_error(struct sdhci_host
*host
)
2326 const char *name
= mmc_hostname(host
->mmc
);
2327 void *desc
= host
->adma_table
;
2329 sdhci_dumpregs(host
);
2332 struct sdhci_adma2_64_desc
*dma_desc
= desc
;
2334 if (host
->flags
& SDHCI_USE_64_BIT_DMA
)
2335 DBG("%s: %p: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
2336 name
, desc
, le32_to_cpu(dma_desc
->addr_hi
),
2337 le32_to_cpu(dma_desc
->addr_lo
),
2338 le16_to_cpu(dma_desc
->len
),
2339 le16_to_cpu(dma_desc
->cmd
));
2341 DBG("%s: %p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
2342 name
, desc
, le32_to_cpu(dma_desc
->addr_lo
),
2343 le16_to_cpu(dma_desc
->len
),
2344 le16_to_cpu(dma_desc
->cmd
));
2346 desc
+= host
->desc_sz
;
2348 if (dma_desc
->cmd
& cpu_to_le16(ADMA2_END
))
2353 static void sdhci_adma_show_error(struct sdhci_host
*host
) { }
2356 static void sdhci_data_irq(struct sdhci_host
*host
, u32 intmask
)
2359 BUG_ON(intmask
== 0);
2361 /* CMD19 generates _only_ Buffer Read Ready interrupt */
2362 if (intmask
& SDHCI_INT_DATA_AVAIL
) {
2363 command
= SDHCI_GET_CMD(sdhci_readw(host
, SDHCI_COMMAND
));
2364 if (command
== MMC_SEND_TUNING_BLOCK
||
2365 command
== MMC_SEND_TUNING_BLOCK_HS200
) {
2366 host
->tuning_done
= 1;
2367 wake_up(&host
->buf_ready_int
);
2374 * The "data complete" interrupt is also used to
2375 * indicate that a busy state has ended. See comment
2376 * above in sdhci_cmd_irq().
2378 if (host
->cmd
&& (host
->cmd
->flags
& MMC_RSP_BUSY
)) {
2379 if (intmask
& SDHCI_INT_DATA_TIMEOUT
) {
2380 host
->cmd
->error
= -ETIMEDOUT
;
2381 tasklet_schedule(&host
->finish_tasklet
);
2384 if (intmask
& SDHCI_INT_DATA_END
) {
2386 * Some cards handle busy-end interrupt
2387 * before the command completed, so make
2388 * sure we do things in the proper order.
2390 if (host
->busy_handle
)
2391 sdhci_finish_command(host
);
2393 host
->busy_handle
= 1;
2398 pr_err("%s: Got data interrupt 0x%08x even "
2399 "though no data operation was in progress.\n",
2400 mmc_hostname(host
->mmc
), (unsigned)intmask
);
2401 sdhci_dumpregs(host
);
2406 if (intmask
& SDHCI_INT_DATA_TIMEOUT
)
2407 host
->data
->error
= -ETIMEDOUT
;
2408 else if (intmask
& SDHCI_INT_DATA_END_BIT
)
2409 host
->data
->error
= -EILSEQ
;
2410 else if ((intmask
& SDHCI_INT_DATA_CRC
) &&
2411 SDHCI_GET_CMD(sdhci_readw(host
, SDHCI_COMMAND
))
2413 host
->data
->error
= -EILSEQ
;
2414 else if (intmask
& SDHCI_INT_ADMA_ERROR
) {
2415 pr_err("%s: ADMA error\n", mmc_hostname(host
->mmc
));
2416 sdhci_adma_show_error(host
);
2417 host
->data
->error
= -EIO
;
2418 if (host
->ops
->adma_workaround
)
2419 host
->ops
->adma_workaround(host
, intmask
);
2422 if (host
->data
->error
)
2423 sdhci_finish_data(host
);
2425 if (intmask
& (SDHCI_INT_DATA_AVAIL
| SDHCI_INT_SPACE_AVAIL
))
2426 sdhci_transfer_pio(host
);
2429 * We currently don't do anything fancy with DMA
2430 * boundaries, but as we can't disable the feature
2431 * we need to at least restart the transfer.
2433 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
2434 * should return a valid address to continue from, but as
2435 * some controllers are faulty, don't trust them.
2437 if (intmask
& SDHCI_INT_DMA_END
) {
2438 u32 dmastart
, dmanow
;
2439 dmastart
= sg_dma_address(host
->data
->sg
);
2440 dmanow
= dmastart
+ host
->data
->bytes_xfered
;
2442 * Force update to the next DMA block boundary.
2445 ~(SDHCI_DEFAULT_BOUNDARY_SIZE
- 1)) +
2446 SDHCI_DEFAULT_BOUNDARY_SIZE
;
2447 host
->data
->bytes_xfered
= dmanow
- dmastart
;
2448 DBG("%s: DMA base 0x%08x, transferred 0x%06x bytes,"
2450 mmc_hostname(host
->mmc
), dmastart
,
2451 host
->data
->bytes_xfered
, dmanow
);
2452 sdhci_writel(host
, dmanow
, SDHCI_DMA_ADDRESS
);
2455 if (intmask
& SDHCI_INT_DATA_END
) {
2458 * Data managed to finish before the
2459 * command completed. Make sure we do
2460 * things in the proper order.
2462 host
->data_early
= 1;
2464 sdhci_finish_data(host
);
2470 static irqreturn_t
sdhci_irq(int irq
, void *dev_id
)
2472 irqreturn_t result
= IRQ_NONE
;
2473 struct sdhci_host
*host
= dev_id
;
2474 u32 intmask
, mask
, unexpected
= 0;
2477 spin_lock(&host
->lock
);
2479 if (host
->runtime_suspended
&& !sdhci_sdio_irq_enabled(host
)) {
2480 spin_unlock(&host
->lock
);
2484 intmask
= sdhci_readl(host
, SDHCI_INT_STATUS
);
2485 if (!intmask
|| intmask
== 0xffffffff) {
2491 /* Clear selected interrupts. */
2492 mask
= intmask
& (SDHCI_INT_CMD_MASK
| SDHCI_INT_DATA_MASK
|
2493 SDHCI_INT_BUS_POWER
);
2494 sdhci_writel(host
, mask
, SDHCI_INT_STATUS
);
2496 DBG("*** %s got interrupt: 0x%08x\n",
2497 mmc_hostname(host
->mmc
), intmask
);
2499 if (intmask
& (SDHCI_INT_CARD_INSERT
| SDHCI_INT_CARD_REMOVE
)) {
2500 u32 present
= sdhci_readl(host
, SDHCI_PRESENT_STATE
) &
2504 * There is a observation on i.mx esdhc. INSERT
2505 * bit will be immediately set again when it gets
2506 * cleared, if a card is inserted. We have to mask
2507 * the irq to prevent interrupt storm which will
2508 * freeze the system. And the REMOVE gets the
2511 * More testing are needed here to ensure it works
2512 * for other platforms though.
2514 host
->ier
&= ~(SDHCI_INT_CARD_INSERT
|
2515 SDHCI_INT_CARD_REMOVE
);
2516 host
->ier
|= present
? SDHCI_INT_CARD_REMOVE
:
2517 SDHCI_INT_CARD_INSERT
;
2518 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
2519 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
2521 sdhci_writel(host
, intmask
& (SDHCI_INT_CARD_INSERT
|
2522 SDHCI_INT_CARD_REMOVE
), SDHCI_INT_STATUS
);
2524 host
->thread_isr
|= intmask
& (SDHCI_INT_CARD_INSERT
|
2525 SDHCI_INT_CARD_REMOVE
);
2526 result
= IRQ_WAKE_THREAD
;
2529 if (intmask
& SDHCI_INT_CMD_MASK
)
2530 sdhci_cmd_irq(host
, intmask
& SDHCI_INT_CMD_MASK
,
2533 if (intmask
& SDHCI_INT_DATA_MASK
)
2534 sdhci_data_irq(host
, intmask
& SDHCI_INT_DATA_MASK
);
2536 if (intmask
& SDHCI_INT_BUS_POWER
)
2537 pr_err("%s: Card is consuming too much power!\n",
2538 mmc_hostname(host
->mmc
));
2540 if (intmask
& SDHCI_INT_CARD_INT
) {
2541 sdhci_enable_sdio_irq_nolock(host
, false);
2542 host
->thread_isr
|= SDHCI_INT_CARD_INT
;
2543 result
= IRQ_WAKE_THREAD
;
2546 intmask
&= ~(SDHCI_INT_CARD_INSERT
| SDHCI_INT_CARD_REMOVE
|
2547 SDHCI_INT_CMD_MASK
| SDHCI_INT_DATA_MASK
|
2548 SDHCI_INT_ERROR
| SDHCI_INT_BUS_POWER
|
2549 SDHCI_INT_CARD_INT
);
2552 unexpected
|= intmask
;
2553 sdhci_writel(host
, intmask
, SDHCI_INT_STATUS
);
2556 if (result
== IRQ_NONE
)
2557 result
= IRQ_HANDLED
;
2559 intmask
= sdhci_readl(host
, SDHCI_INT_STATUS
);
2560 } while (intmask
&& --max_loops
);
2562 spin_unlock(&host
->lock
);
2565 pr_err("%s: Unexpected interrupt 0x%08x.\n",
2566 mmc_hostname(host
->mmc
), unexpected
);
2567 sdhci_dumpregs(host
);
2573 static irqreturn_t
sdhci_thread_irq(int irq
, void *dev_id
)
2575 struct sdhci_host
*host
= dev_id
;
2576 unsigned long flags
;
2579 spin_lock_irqsave(&host
->lock
, flags
);
2580 isr
= host
->thread_isr
;
2581 host
->thread_isr
= 0;
2582 spin_unlock_irqrestore(&host
->lock
, flags
);
2584 if (isr
& (SDHCI_INT_CARD_INSERT
| SDHCI_INT_CARD_REMOVE
)) {
2585 sdhci_card_event(host
->mmc
);
2586 mmc_detect_change(host
->mmc
, msecs_to_jiffies(200));
2589 if (isr
& SDHCI_INT_CARD_INT
) {
2590 sdio_run_irqs(host
->mmc
);
2592 spin_lock_irqsave(&host
->lock
, flags
);
2593 if (host
->flags
& SDHCI_SDIO_IRQ_ENABLED
)
2594 sdhci_enable_sdio_irq_nolock(host
, true);
2595 spin_unlock_irqrestore(&host
->lock
, flags
);
2598 return isr
? IRQ_HANDLED
: IRQ_NONE
;
2601 /*****************************************************************************\
2605 \*****************************************************************************/
2608 void sdhci_enable_irq_wakeups(struct sdhci_host
*host
)
2611 u8 mask
= SDHCI_WAKE_ON_INSERT
| SDHCI_WAKE_ON_REMOVE
2612 | SDHCI_WAKE_ON_INT
;
2614 val
= sdhci_readb(host
, SDHCI_WAKE_UP_CONTROL
);
2616 /* Avoid fake wake up */
2617 if (host
->quirks
& SDHCI_QUIRK_BROKEN_CARD_DETECTION
)
2618 val
&= ~(SDHCI_WAKE_ON_INSERT
| SDHCI_WAKE_ON_REMOVE
);
2619 sdhci_writeb(host
, val
, SDHCI_WAKE_UP_CONTROL
);
2621 EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups
);
2623 static void sdhci_disable_irq_wakeups(struct sdhci_host
*host
)
2626 u8 mask
= SDHCI_WAKE_ON_INSERT
| SDHCI_WAKE_ON_REMOVE
2627 | SDHCI_WAKE_ON_INT
;
2629 val
= sdhci_readb(host
, SDHCI_WAKE_UP_CONTROL
);
2631 sdhci_writeb(host
, val
, SDHCI_WAKE_UP_CONTROL
);
2634 int sdhci_suspend_host(struct sdhci_host
*host
)
2636 sdhci_disable_card_detection(host
);
2638 /* Disable tuning since we are suspending */
2639 if (host
->flags
& SDHCI_USING_RETUNING_TIMER
) {
2640 del_timer_sync(&host
->tuning_timer
);
2641 host
->flags
&= ~SDHCI_NEEDS_RETUNING
;
2644 if (!device_may_wakeup(mmc_dev(host
->mmc
))) {
2646 sdhci_writel(host
, 0, SDHCI_INT_ENABLE
);
2647 sdhci_writel(host
, 0, SDHCI_SIGNAL_ENABLE
);
2648 free_irq(host
->irq
, host
);
2650 sdhci_enable_irq_wakeups(host
);
2651 enable_irq_wake(host
->irq
);
2656 EXPORT_SYMBOL_GPL(sdhci_suspend_host
);
2658 int sdhci_resume_host(struct sdhci_host
*host
)
2662 if (host
->flags
& (SDHCI_USE_SDMA
| SDHCI_USE_ADMA
)) {
2663 if (host
->ops
->enable_dma
)
2664 host
->ops
->enable_dma(host
);
2667 if (!device_may_wakeup(mmc_dev(host
->mmc
))) {
2668 ret
= request_threaded_irq(host
->irq
, sdhci_irq
,
2669 sdhci_thread_irq
, IRQF_SHARED
,
2670 mmc_hostname(host
->mmc
), host
);
2674 sdhci_disable_irq_wakeups(host
);
2675 disable_irq_wake(host
->irq
);
2678 if ((host
->mmc
->pm_flags
& MMC_PM_KEEP_POWER
) &&
2679 (host
->quirks2
& SDHCI_QUIRK2_HOST_OFF_CARD_ON
)) {
2680 /* Card keeps power but host controller does not */
2681 sdhci_init(host
, 0);
2684 sdhci_do_set_ios(host
, &host
->mmc
->ios
);
2686 sdhci_init(host
, (host
->mmc
->pm_flags
& MMC_PM_KEEP_POWER
));
2690 sdhci_enable_card_detection(host
);
2692 /* Set the re-tuning expiration flag */
2693 if (host
->flags
& SDHCI_USING_RETUNING_TIMER
)
2694 host
->flags
|= SDHCI_NEEDS_RETUNING
;
2699 EXPORT_SYMBOL_GPL(sdhci_resume_host
);
2701 static int sdhci_runtime_pm_get(struct sdhci_host
*host
)
2703 return pm_runtime_get_sync(host
->mmc
->parent
);
2706 static int sdhci_runtime_pm_put(struct sdhci_host
*host
)
2708 pm_runtime_mark_last_busy(host
->mmc
->parent
);
2709 return pm_runtime_put_autosuspend(host
->mmc
->parent
);
2712 static void sdhci_runtime_pm_bus_on(struct sdhci_host
*host
)
2714 if (host
->runtime_suspended
|| host
->bus_on
)
2716 host
->bus_on
= true;
2717 pm_runtime_get_noresume(host
->mmc
->parent
);
2720 static void sdhci_runtime_pm_bus_off(struct sdhci_host
*host
)
2722 if (host
->runtime_suspended
|| !host
->bus_on
)
2724 host
->bus_on
= false;
2725 pm_runtime_put_noidle(host
->mmc
->parent
);
2728 int sdhci_runtime_suspend_host(struct sdhci_host
*host
)
2730 unsigned long flags
;
2732 /* Disable tuning since we are suspending */
2733 if (host
->flags
& SDHCI_USING_RETUNING_TIMER
) {
2734 del_timer_sync(&host
->tuning_timer
);
2735 host
->flags
&= ~SDHCI_NEEDS_RETUNING
;
2738 spin_lock_irqsave(&host
->lock
, flags
);
2739 host
->ier
&= SDHCI_INT_CARD_INT
;
2740 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
2741 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
2742 spin_unlock_irqrestore(&host
->lock
, flags
);
2744 synchronize_hardirq(host
->irq
);
2746 spin_lock_irqsave(&host
->lock
, flags
);
2747 host
->runtime_suspended
= true;
2748 spin_unlock_irqrestore(&host
->lock
, flags
);
2752 EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host
);
2754 int sdhci_runtime_resume_host(struct sdhci_host
*host
)
2756 unsigned long flags
;
2757 int host_flags
= host
->flags
;
2759 if (host_flags
& (SDHCI_USE_SDMA
| SDHCI_USE_ADMA
)) {
2760 if (host
->ops
->enable_dma
)
2761 host
->ops
->enable_dma(host
);
2764 sdhci_init(host
, 0);
2766 /* Force clock and power re-program */
2769 sdhci_do_set_ios(host
, &host
->mmc
->ios
);
2771 sdhci_do_start_signal_voltage_switch(host
, &host
->mmc
->ios
);
2772 if ((host_flags
& SDHCI_PV_ENABLED
) &&
2773 !(host
->quirks2
& SDHCI_QUIRK2_PRESET_VALUE_BROKEN
)) {
2774 spin_lock_irqsave(&host
->lock
, flags
);
2775 sdhci_enable_preset_value(host
, true);
2776 spin_unlock_irqrestore(&host
->lock
, flags
);
2779 /* Set the re-tuning expiration flag */
2780 if (host
->flags
& SDHCI_USING_RETUNING_TIMER
)
2781 host
->flags
|= SDHCI_NEEDS_RETUNING
;
2783 spin_lock_irqsave(&host
->lock
, flags
);
2785 host
->runtime_suspended
= false;
2787 /* Enable SDIO IRQ */
2788 if (host
->flags
& SDHCI_SDIO_IRQ_ENABLED
)
2789 sdhci_enable_sdio_irq_nolock(host
, true);
2791 /* Enable Card Detection */
2792 sdhci_enable_card_detection(host
);
2794 spin_unlock_irqrestore(&host
->lock
, flags
);
2798 EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host
);
2800 #endif /* CONFIG_PM */
2802 /*****************************************************************************\
2804 * Device allocation/registration *
2806 \*****************************************************************************/
2808 struct sdhci_host
*sdhci_alloc_host(struct device
*dev
,
2811 struct mmc_host
*mmc
;
2812 struct sdhci_host
*host
;
2814 WARN_ON(dev
== NULL
);
2816 mmc
= mmc_alloc_host(sizeof(struct sdhci_host
) + priv_size
, dev
);
2818 return ERR_PTR(-ENOMEM
);
2820 host
= mmc_priv(mmc
);
2826 EXPORT_SYMBOL_GPL(sdhci_alloc_host
);
2828 int sdhci_add_host(struct sdhci_host
*host
)
2830 struct mmc_host
*mmc
;
2831 u32 caps
[2] = {0, 0};
2832 u32 max_current_caps
;
2833 unsigned int ocr_avail
;
2834 unsigned int override_timeout_clk
;
2837 WARN_ON(host
== NULL
);
2844 host
->quirks
= debug_quirks
;
2846 host
->quirks2
= debug_quirks2
;
2848 override_timeout_clk
= host
->timeout_clk
;
2850 sdhci_do_reset(host
, SDHCI_RESET_ALL
);
2852 host
->version
= sdhci_readw(host
, SDHCI_HOST_VERSION
);
2853 host
->version
= (host
->version
& SDHCI_SPEC_VER_MASK
)
2854 >> SDHCI_SPEC_VER_SHIFT
;
2855 if (host
->version
> SDHCI_SPEC_300
) {
2856 pr_err("%s: Unknown controller version (%d). "
2857 "You may experience problems.\n", mmc_hostname(mmc
),
2861 caps
[0] = (host
->quirks
& SDHCI_QUIRK_MISSING_CAPS
) ? host
->caps
:
2862 sdhci_readl(host
, SDHCI_CAPABILITIES
);
2864 if (host
->version
>= SDHCI_SPEC_300
)
2865 caps
[1] = (host
->quirks
& SDHCI_QUIRK_MISSING_CAPS
) ?
2867 sdhci_readl(host
, SDHCI_CAPABILITIES_1
);
2869 if (host
->quirks
& SDHCI_QUIRK_FORCE_DMA
)
2870 host
->flags
|= SDHCI_USE_SDMA
;
2871 else if (!(caps
[0] & SDHCI_CAN_DO_SDMA
))
2872 DBG("Controller doesn't have SDMA capability\n");
2874 host
->flags
|= SDHCI_USE_SDMA
;
2876 if ((host
->quirks
& SDHCI_QUIRK_BROKEN_DMA
) &&
2877 (host
->flags
& SDHCI_USE_SDMA
)) {
2878 DBG("Disabling DMA as it is marked broken\n");
2879 host
->flags
&= ~SDHCI_USE_SDMA
;
2882 if ((host
->version
>= SDHCI_SPEC_200
) &&
2883 (caps
[0] & SDHCI_CAN_DO_ADMA2
))
2884 host
->flags
|= SDHCI_USE_ADMA
;
2886 if ((host
->quirks
& SDHCI_QUIRK_BROKEN_ADMA
) &&
2887 (host
->flags
& SDHCI_USE_ADMA
)) {
2888 DBG("Disabling ADMA as it is marked broken\n");
2889 host
->flags
&= ~SDHCI_USE_ADMA
;
2893 * It is assumed that a 64-bit capable device has set a 64-bit DMA mask
2894 * and *must* do 64-bit DMA. A driver has the opportunity to change
2895 * that during the first call to ->enable_dma(). Similarly
2896 * SDHCI_QUIRK2_BROKEN_64_BIT_DMA must be left to the drivers to
2899 if (sdhci_readl(host
, SDHCI_CAPABILITIES
) & SDHCI_CAN_64BIT
)
2900 host
->flags
|= SDHCI_USE_64_BIT_DMA
;
2902 if (host
->flags
& (SDHCI_USE_SDMA
| SDHCI_USE_ADMA
)) {
2903 if (host
->ops
->enable_dma
) {
2904 if (host
->ops
->enable_dma(host
)) {
2905 pr_warn("%s: No suitable DMA available - falling back to PIO\n",
2908 ~(SDHCI_USE_SDMA
| SDHCI_USE_ADMA
);
2913 /* SDMA does not support 64-bit DMA */
2914 if (host
->flags
& SDHCI_USE_64_BIT_DMA
)
2915 host
->flags
&= ~SDHCI_USE_SDMA
;
2917 if (host
->flags
& SDHCI_USE_ADMA
) {
2919 * The DMA descriptor table size is calculated as the maximum
2920 * number of segments times 2, to allow for an alignment
2921 * descriptor for each segment, plus 1 for a nop end descriptor,
2922 * all multipled by the descriptor size.
2924 if (host
->flags
& SDHCI_USE_64_BIT_DMA
) {
2925 host
->adma_table_sz
= (SDHCI_MAX_SEGS
* 2 + 1) *
2926 SDHCI_ADMA2_64_DESC_SZ
;
2927 host
->align_buffer_sz
= SDHCI_MAX_SEGS
*
2928 SDHCI_ADMA2_64_ALIGN
;
2929 host
->desc_sz
= SDHCI_ADMA2_64_DESC_SZ
;
2930 host
->align_sz
= SDHCI_ADMA2_64_ALIGN
;
2931 host
->align_mask
= SDHCI_ADMA2_64_ALIGN
- 1;
2933 host
->adma_table_sz
= (SDHCI_MAX_SEGS
* 2 + 1) *
2934 SDHCI_ADMA2_32_DESC_SZ
;
2935 host
->align_buffer_sz
= SDHCI_MAX_SEGS
*
2936 SDHCI_ADMA2_32_ALIGN
;
2937 host
->desc_sz
= SDHCI_ADMA2_32_DESC_SZ
;
2938 host
->align_sz
= SDHCI_ADMA2_32_ALIGN
;
2939 host
->align_mask
= SDHCI_ADMA2_32_ALIGN
- 1;
2941 host
->adma_table
= dma_alloc_coherent(mmc_dev(mmc
),
2942 host
->adma_table_sz
,
2945 host
->align_buffer
= kmalloc(host
->align_buffer_sz
, GFP_KERNEL
);
2946 if (!host
->adma_table
|| !host
->align_buffer
) {
2947 dma_free_coherent(mmc_dev(mmc
), host
->adma_table_sz
,
2948 host
->adma_table
, host
->adma_addr
);
2949 kfree(host
->align_buffer
);
2950 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
2952 host
->flags
&= ~SDHCI_USE_ADMA
;
2953 host
->adma_table
= NULL
;
2954 host
->align_buffer
= NULL
;
2955 } else if (host
->adma_addr
& host
->align_mask
) {
2956 pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
2958 host
->flags
&= ~SDHCI_USE_ADMA
;
2959 dma_free_coherent(mmc_dev(mmc
), host
->adma_table_sz
,
2960 host
->adma_table
, host
->adma_addr
);
2961 kfree(host
->align_buffer
);
2962 host
->adma_table
= NULL
;
2963 host
->align_buffer
= NULL
;
2968 * If we use DMA, then it's up to the caller to set the DMA
2969 * mask, but PIO does not need the hw shim so we set a new
2970 * mask here in that case.
2972 if (!(host
->flags
& (SDHCI_USE_SDMA
| SDHCI_USE_ADMA
))) {
2973 host
->dma_mask
= DMA_BIT_MASK(64);
2974 mmc_dev(mmc
)->dma_mask
= &host
->dma_mask
;
2977 if (host
->version
>= SDHCI_SPEC_300
)
2978 host
->max_clk
= (caps
[0] & SDHCI_CLOCK_V3_BASE_MASK
)
2979 >> SDHCI_CLOCK_BASE_SHIFT
;
2981 host
->max_clk
= (caps
[0] & SDHCI_CLOCK_BASE_MASK
)
2982 >> SDHCI_CLOCK_BASE_SHIFT
;
2984 host
->max_clk
*= 1000000;
2985 if (host
->max_clk
== 0 || host
->quirks
&
2986 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN
) {
2987 if (!host
->ops
->get_max_clock
) {
2988 pr_err("%s: Hardware doesn't specify base clock "
2989 "frequency.\n", mmc_hostname(mmc
));
2992 host
->max_clk
= host
->ops
->get_max_clock(host
);
2996 * In case of Host Controller v3.00, find out whether clock
2997 * multiplier is supported.
2999 host
->clk_mul
= (caps
[1] & SDHCI_CLOCK_MUL_MASK
) >>
3000 SDHCI_CLOCK_MUL_SHIFT
;
3003 * In case the value in Clock Multiplier is 0, then programmable
3004 * clock mode is not supported, otherwise the actual clock
3005 * multiplier is one more than the value of Clock Multiplier
3006 * in the Capabilities Register.
3012 * Set host parameters.
3014 mmc
->ops
= &sdhci_ops
;
3015 mmc
->f_max
= host
->max_clk
;
3016 if (host
->ops
->get_min_clock
)
3017 mmc
->f_min
= host
->ops
->get_min_clock(host
);
3018 else if (host
->version
>= SDHCI_SPEC_300
) {
3019 if (host
->clk_mul
) {
3020 mmc
->f_min
= (host
->max_clk
* host
->clk_mul
) / 1024;
3021 mmc
->f_max
= host
->max_clk
* host
->clk_mul
;
3023 mmc
->f_min
= host
->max_clk
/ SDHCI_MAX_DIV_SPEC_300
;
3025 mmc
->f_min
= host
->max_clk
/ SDHCI_MAX_DIV_SPEC_200
;
3027 if (!(host
->quirks
& SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK
)) {
3028 host
->timeout_clk
= (caps
[0] & SDHCI_TIMEOUT_CLK_MASK
) >>
3029 SDHCI_TIMEOUT_CLK_SHIFT
;
3030 if (host
->timeout_clk
== 0) {
3031 if (host
->ops
->get_timeout_clock
) {
3033 host
->ops
->get_timeout_clock(host
);
3035 pr_err("%s: Hardware doesn't specify timeout clock frequency.\n",
3041 if (caps
[0] & SDHCI_TIMEOUT_CLK_UNIT
)
3042 host
->timeout_clk
*= 1000;
3044 mmc
->max_busy_timeout
= host
->ops
->get_max_timeout_count
?
3045 host
->ops
->get_max_timeout_count(host
) : 1 << 27;
3046 mmc
->max_busy_timeout
/= host
->timeout_clk
;
3049 if (override_timeout_clk
)
3050 host
->timeout_clk
= override_timeout_clk
;
3052 mmc
->caps
|= MMC_CAP_SDIO_IRQ
| MMC_CAP_ERASE
| MMC_CAP_CMD23
;
3053 mmc
->caps2
|= MMC_CAP2_SDIO_IRQ_NOTHREAD
;
3055 if (host
->quirks
& SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12
)
3056 host
->flags
|= SDHCI_AUTO_CMD12
;
3058 /* Auto-CMD23 stuff only works in ADMA or PIO. */
3059 if ((host
->version
>= SDHCI_SPEC_300
) &&
3060 ((host
->flags
& SDHCI_USE_ADMA
) ||
3061 !(host
->flags
& SDHCI_USE_SDMA
))) {
3062 host
->flags
|= SDHCI_AUTO_CMD23
;
3063 DBG("%s: Auto-CMD23 available\n", mmc_hostname(mmc
));
3065 DBG("%s: Auto-CMD23 unavailable\n", mmc_hostname(mmc
));
3069 * A controller may support 8-bit width, but the board itself
3070 * might not have the pins brought out. Boards that support
3071 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in
3072 * their platform code before calling sdhci_add_host(), and we
3073 * won't assume 8-bit width for hosts without that CAP.
3075 if (!(host
->quirks
& SDHCI_QUIRK_FORCE_1_BIT_DATA
))
3076 mmc
->caps
|= MMC_CAP_4_BIT_DATA
;
3078 if (host
->quirks2
& SDHCI_QUIRK2_HOST_NO_CMD23
)
3079 mmc
->caps
&= ~MMC_CAP_CMD23
;
3081 if (caps
[0] & SDHCI_CAN_DO_HISPD
)
3082 mmc
->caps
|= MMC_CAP_SD_HIGHSPEED
| MMC_CAP_MMC_HIGHSPEED
;
3084 if ((host
->quirks
& SDHCI_QUIRK_BROKEN_CARD_DETECTION
) &&
3085 !(mmc
->caps
& MMC_CAP_NONREMOVABLE
))
3086 mmc
->caps
|= MMC_CAP_NEEDS_POLL
;
3088 /* If there are external regulators, get them */
3089 if (mmc_regulator_get_supply(mmc
) == -EPROBE_DEFER
)
3090 return -EPROBE_DEFER
;
3092 /* If vqmmc regulator and no 1.8V signalling, then there's no UHS */
3093 if (!IS_ERR(mmc
->supply
.vqmmc
)) {
3094 ret
= regulator_enable(mmc
->supply
.vqmmc
);
3095 if (!regulator_is_supported_voltage(mmc
->supply
.vqmmc
, 1700000,
3097 caps
[1] &= ~(SDHCI_SUPPORT_SDR104
|
3098 SDHCI_SUPPORT_SDR50
|
3099 SDHCI_SUPPORT_DDR50
);
3101 pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
3102 mmc_hostname(mmc
), ret
);
3103 mmc
->supply
.vqmmc
= ERR_PTR(-EINVAL
);
3107 if (host
->quirks2
& SDHCI_QUIRK2_NO_1_8_V
)
3108 caps
[1] &= ~(SDHCI_SUPPORT_SDR104
| SDHCI_SUPPORT_SDR50
|
3109 SDHCI_SUPPORT_DDR50
);
3111 /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
3112 if (caps
[1] & (SDHCI_SUPPORT_SDR104
| SDHCI_SUPPORT_SDR50
|
3113 SDHCI_SUPPORT_DDR50
))
3114 mmc
->caps
|= MMC_CAP_UHS_SDR12
| MMC_CAP_UHS_SDR25
;
3116 /* SDR104 supports also implies SDR50 support */
3117 if (caps
[1] & SDHCI_SUPPORT_SDR104
) {
3118 mmc
->caps
|= MMC_CAP_UHS_SDR104
| MMC_CAP_UHS_SDR50
;
3119 /* SD3.0: SDR104 is supported so (for eMMC) the caps2
3120 * field can be promoted to support HS200.
3122 if (!(host
->quirks2
& SDHCI_QUIRK2_BROKEN_HS200
))
3123 mmc
->caps2
|= MMC_CAP2_HS200
;
3124 } else if (caps
[1] & SDHCI_SUPPORT_SDR50
)
3125 mmc
->caps
|= MMC_CAP_UHS_SDR50
;
3127 if (host
->quirks2
& SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400
&&
3128 (caps
[1] & SDHCI_SUPPORT_HS400
))
3129 mmc
->caps2
|= MMC_CAP2_HS400
;
3131 if ((mmc
->caps2
& MMC_CAP2_HSX00_1_2V
) &&
3132 (IS_ERR(mmc
->supply
.vqmmc
) ||
3133 !regulator_is_supported_voltage(mmc
->supply
.vqmmc
, 1100000,
3135 mmc
->caps2
&= ~MMC_CAP2_HSX00_1_2V
;
3137 if ((caps
[1] & SDHCI_SUPPORT_DDR50
) &&
3138 !(host
->quirks2
& SDHCI_QUIRK2_BROKEN_DDR50
))
3139 mmc
->caps
|= MMC_CAP_UHS_DDR50
;
3141 /* Does the host need tuning for SDR50? */
3142 if (caps
[1] & SDHCI_USE_SDR50_TUNING
)
3143 host
->flags
|= SDHCI_SDR50_NEEDS_TUNING
;
3145 /* Does the host need tuning for SDR104 / HS200? */
3146 if (mmc
->caps2
& MMC_CAP2_HS200
)
3147 host
->flags
|= SDHCI_SDR104_NEEDS_TUNING
;
3149 /* Driver Type(s) (A, C, D) supported by the host */
3150 if (caps
[1] & SDHCI_DRIVER_TYPE_A
)
3151 mmc
->caps
|= MMC_CAP_DRIVER_TYPE_A
;
3152 if (caps
[1] & SDHCI_DRIVER_TYPE_C
)
3153 mmc
->caps
|= MMC_CAP_DRIVER_TYPE_C
;
3154 if (caps
[1] & SDHCI_DRIVER_TYPE_D
)
3155 mmc
->caps
|= MMC_CAP_DRIVER_TYPE_D
;
3157 /* Initial value for re-tuning timer count */
3158 host
->tuning_count
= (caps
[1] & SDHCI_RETUNING_TIMER_COUNT_MASK
) >>
3159 SDHCI_RETUNING_TIMER_COUNT_SHIFT
;
3162 * In case Re-tuning Timer is not disabled, the actual value of
3163 * re-tuning timer will be 2 ^ (n - 1).
3165 if (host
->tuning_count
)
3166 host
->tuning_count
= 1 << (host
->tuning_count
- 1);
3168 /* Re-tuning mode supported by the Host Controller */
3169 host
->tuning_mode
= (caps
[1] & SDHCI_RETUNING_MODE_MASK
) >>
3170 SDHCI_RETUNING_MODE_SHIFT
;
3175 * According to SD Host Controller spec v3.00, if the Host System
3176 * can afford more than 150mA, Host Driver should set XPC to 1. Also
3177 * the value is meaningful only if Voltage Support in the Capabilities
3178 * register is set. The actual current value is 4 times the register
3181 max_current_caps
= sdhci_readl(host
, SDHCI_MAX_CURRENT
);
3182 if (!max_current_caps
&& !IS_ERR(mmc
->supply
.vmmc
)) {
3183 int curr
= regulator_get_current_limit(mmc
->supply
.vmmc
);
3186 /* convert to SDHCI_MAX_CURRENT format */
3187 curr
= curr
/1000; /* convert to mA */
3188 curr
= curr
/SDHCI_MAX_CURRENT_MULTIPLIER
;
3190 curr
= min_t(u32
, curr
, SDHCI_MAX_CURRENT_LIMIT
);
3192 (curr
<< SDHCI_MAX_CURRENT_330_SHIFT
) |
3193 (curr
<< SDHCI_MAX_CURRENT_300_SHIFT
) |
3194 (curr
<< SDHCI_MAX_CURRENT_180_SHIFT
);
3198 if (caps
[0] & SDHCI_CAN_VDD_330
) {
3199 ocr_avail
|= MMC_VDD_32_33
| MMC_VDD_33_34
;
3201 mmc
->max_current_330
= ((max_current_caps
&
3202 SDHCI_MAX_CURRENT_330_MASK
) >>
3203 SDHCI_MAX_CURRENT_330_SHIFT
) *
3204 SDHCI_MAX_CURRENT_MULTIPLIER
;
3206 if (caps
[0] & SDHCI_CAN_VDD_300
) {
3207 ocr_avail
|= MMC_VDD_29_30
| MMC_VDD_30_31
;
3209 mmc
->max_current_300
= ((max_current_caps
&
3210 SDHCI_MAX_CURRENT_300_MASK
) >>
3211 SDHCI_MAX_CURRENT_300_SHIFT
) *
3212 SDHCI_MAX_CURRENT_MULTIPLIER
;
3214 if (caps
[0] & SDHCI_CAN_VDD_180
) {
3215 ocr_avail
|= MMC_VDD_165_195
;
3217 mmc
->max_current_180
= ((max_current_caps
&
3218 SDHCI_MAX_CURRENT_180_MASK
) >>
3219 SDHCI_MAX_CURRENT_180_SHIFT
) *
3220 SDHCI_MAX_CURRENT_MULTIPLIER
;
3223 /* If OCR set by external regulators, use it instead */
3225 ocr_avail
= mmc
->ocr_avail
;
3228 ocr_avail
&= host
->ocr_mask
;
3230 mmc
->ocr_avail
= ocr_avail
;
3231 mmc
->ocr_avail_sdio
= ocr_avail
;
3232 if (host
->ocr_avail_sdio
)
3233 mmc
->ocr_avail_sdio
&= host
->ocr_avail_sdio
;
3234 mmc
->ocr_avail_sd
= ocr_avail
;
3235 if (host
->ocr_avail_sd
)
3236 mmc
->ocr_avail_sd
&= host
->ocr_avail_sd
;
3237 else /* normal SD controllers don't support 1.8V */
3238 mmc
->ocr_avail_sd
&= ~MMC_VDD_165_195
;
3239 mmc
->ocr_avail_mmc
= ocr_avail
;
3240 if (host
->ocr_avail_mmc
)
3241 mmc
->ocr_avail_mmc
&= host
->ocr_avail_mmc
;
3243 if (mmc
->ocr_avail
== 0) {
3244 pr_err("%s: Hardware doesn't report any "
3245 "support voltages.\n", mmc_hostname(mmc
));
3249 spin_lock_init(&host
->lock
);
3252 * Maximum number of segments. Depends on if the hardware
3253 * can do scatter/gather or not.
3255 if (host
->flags
& SDHCI_USE_ADMA
)
3256 mmc
->max_segs
= SDHCI_MAX_SEGS
;
3257 else if (host
->flags
& SDHCI_USE_SDMA
)
3260 mmc
->max_segs
= SDHCI_MAX_SEGS
;
3263 * Maximum number of sectors in one transfer. Limited by DMA boundary
3266 mmc
->max_req_size
= 524288;
3269 * Maximum segment size. Could be one segment with the maximum number
3270 * of bytes. When doing hardware scatter/gather, each entry cannot
3271 * be larger than 64 KiB though.
3273 if (host
->flags
& SDHCI_USE_ADMA
) {
3274 if (host
->quirks
& SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC
)
3275 mmc
->max_seg_size
= 65535;
3277 mmc
->max_seg_size
= 65536;
3279 mmc
->max_seg_size
= mmc
->max_req_size
;
3283 * Maximum block size. This varies from controller to controller and
3284 * is specified in the capabilities register.
3286 if (host
->quirks
& SDHCI_QUIRK_FORCE_BLK_SZ_2048
) {
3287 mmc
->max_blk_size
= 2;
3289 mmc
->max_blk_size
= (caps
[0] & SDHCI_MAX_BLOCK_MASK
) >>
3290 SDHCI_MAX_BLOCK_SHIFT
;
3291 if (mmc
->max_blk_size
>= 3) {
3292 pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n",
3294 mmc
->max_blk_size
= 0;
3298 mmc
->max_blk_size
= 512 << mmc
->max_blk_size
;
3301 * Maximum block count.
3303 mmc
->max_blk_count
= (host
->quirks
& SDHCI_QUIRK_NO_MULTIBLOCK
) ? 1 : 65535;
3308 tasklet_init(&host
->finish_tasklet
,
3309 sdhci_tasklet_finish
, (unsigned long)host
);
3311 setup_timer(&host
->timer
, sdhci_timeout_timer
, (unsigned long)host
);
3313 if (host
->version
>= SDHCI_SPEC_300
) {
3314 init_waitqueue_head(&host
->buf_ready_int
);
3316 /* Initialize re-tuning timer */
3317 init_timer(&host
->tuning_timer
);
3318 host
->tuning_timer
.data
= (unsigned long)host
;
3319 host
->tuning_timer
.function
= sdhci_tuning_timer
;
3322 sdhci_init(host
, 0);
3324 ret
= request_threaded_irq(host
->irq
, sdhci_irq
, sdhci_thread_irq
,
3325 IRQF_SHARED
, mmc_hostname(mmc
), host
);
3327 pr_err("%s: Failed to request IRQ %d: %d\n",
3328 mmc_hostname(mmc
), host
->irq
, ret
);
3332 #ifdef CONFIG_MMC_DEBUG
3333 sdhci_dumpregs(host
);
3336 #ifdef SDHCI_USE_LEDS_CLASS
3337 snprintf(host
->led_name
, sizeof(host
->led_name
),
3338 "%s::", mmc_hostname(mmc
));
3339 host
->led
.name
= host
->led_name
;
3340 host
->led
.brightness
= LED_OFF
;
3341 host
->led
.default_trigger
= mmc_hostname(mmc
);
3342 host
->led
.brightness_set
= sdhci_led_control
;
3344 ret
= led_classdev_register(mmc_dev(mmc
), &host
->led
);
3346 pr_err("%s: Failed to register LED device: %d\n",
3347 mmc_hostname(mmc
), ret
);
3356 pr_info("%s: SDHCI controller on %s [%s] using %s\n",
3357 mmc_hostname(mmc
), host
->hw_name
, dev_name(mmc_dev(mmc
)),
3358 (host
->flags
& SDHCI_USE_ADMA
) ?
3359 (host
->flags
& SDHCI_USE_64_BIT_DMA
) ? "ADMA 64-bit" : "ADMA" :
3360 (host
->flags
& SDHCI_USE_SDMA
) ? "DMA" : "PIO");
3362 sdhci_enable_card_detection(host
);
3366 #ifdef SDHCI_USE_LEDS_CLASS
3368 sdhci_do_reset(host
, SDHCI_RESET_ALL
);
3369 sdhci_writel(host
, 0, SDHCI_INT_ENABLE
);
3370 sdhci_writel(host
, 0, SDHCI_SIGNAL_ENABLE
);
3371 free_irq(host
->irq
, host
);
3374 tasklet_kill(&host
->finish_tasklet
);
3379 EXPORT_SYMBOL_GPL(sdhci_add_host
);
3381 void sdhci_remove_host(struct sdhci_host
*host
, int dead
)
3383 struct mmc_host
*mmc
= host
->mmc
;
3384 unsigned long flags
;
3387 spin_lock_irqsave(&host
->lock
, flags
);
3389 host
->flags
|= SDHCI_DEVICE_DEAD
;
3392 pr_err("%s: Controller removed during "
3393 " transfer!\n", mmc_hostname(mmc
));
3395 host
->mrq
->cmd
->error
= -ENOMEDIUM
;
3396 tasklet_schedule(&host
->finish_tasklet
);
3399 spin_unlock_irqrestore(&host
->lock
, flags
);
3402 sdhci_disable_card_detection(host
);
3404 mmc_remove_host(mmc
);
3406 #ifdef SDHCI_USE_LEDS_CLASS
3407 led_classdev_unregister(&host
->led
);
3411 sdhci_do_reset(host
, SDHCI_RESET_ALL
);
3413 sdhci_writel(host
, 0, SDHCI_INT_ENABLE
);
3414 sdhci_writel(host
, 0, SDHCI_SIGNAL_ENABLE
);
3415 free_irq(host
->irq
, host
);
3417 del_timer_sync(&host
->timer
);
3419 tasklet_kill(&host
->finish_tasklet
);
3421 if (!IS_ERR(mmc
->supply
.vqmmc
))
3422 regulator_disable(mmc
->supply
.vqmmc
);
3424 if (host
->adma_table
)
3425 dma_free_coherent(mmc_dev(mmc
), host
->adma_table_sz
,
3426 host
->adma_table
, host
->adma_addr
);
3427 kfree(host
->align_buffer
);
3429 host
->adma_table
= NULL
;
3430 host
->align_buffer
= NULL
;
3433 EXPORT_SYMBOL_GPL(sdhci_remove_host
);
3435 void sdhci_free_host(struct sdhci_host
*host
)
3437 mmc_free_host(host
->mmc
);
3440 EXPORT_SYMBOL_GPL(sdhci_free_host
);
3442 /*****************************************************************************\
3444 * Driver init/exit *
3446 \*****************************************************************************/
3448 static int __init
sdhci_drv_init(void)
3451 ": Secure Digital Host Controller Interface driver\n");
3452 pr_info(DRIVER_NAME
": Copyright(c) Pierre Ossman\n");
3457 static void __exit
sdhci_drv_exit(void)
3461 module_init(sdhci_drv_init
);
3462 module_exit(sdhci_drv_exit
);
3464 module_param(debug_quirks
, uint
, 0444);
3465 module_param(debug_quirks2
, uint
, 0444);
3467 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
3468 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
3469 MODULE_LICENSE("GPL");
3471 MODULE_PARM_DESC(debug_quirks
, "Force certain quirks.");
3472 MODULE_PARM_DESC(debug_quirks2
, "Force certain other quirks.");