2 * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
4 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
11 * Thanks to the following companies for their support:
13 * - JMicron (hardware and technical support)
16 #include <linux/delay.h>
17 #include <linux/ktime.h>
18 #include <linux/highmem.h>
20 #include <linux/module.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/slab.h>
23 #include <linux/scatterlist.h>
24 #include <linux/regulator/consumer.h>
25 #include <linux/pm_runtime.h>
28 #include <linux/leds.h>
30 #include <linux/mmc/mmc.h>
31 #include <linux/mmc/host.h>
32 #include <linux/mmc/card.h>
33 #include <linux/mmc/sdio.h>
34 #include <linux/mmc/slot-gpio.h>
38 #define DRIVER_NAME "sdhci"
40 #define DBG(f, x...) \
41 pr_debug("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
43 #define SDHCI_DUMP(f, x...) \
44 pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
46 #define MAX_TUNING_LOOP 40
48 static unsigned int debug_quirks
= 0;
49 static unsigned int debug_quirks2
;
51 static void sdhci_finish_data(struct sdhci_host
*);
53 static void sdhci_enable_preset_value(struct sdhci_host
*host
, bool enable
);
55 void sdhci_dumpregs(struct sdhci_host
*host
)
57 SDHCI_DUMP("============ SDHCI REGISTER DUMP ===========\n");
59 SDHCI_DUMP("Sys addr: 0x%08x | Version: 0x%08x\n",
60 sdhci_readl(host
, SDHCI_DMA_ADDRESS
),
61 sdhci_readw(host
, SDHCI_HOST_VERSION
));
62 SDHCI_DUMP("Blk size: 0x%08x | Blk cnt: 0x%08x\n",
63 sdhci_readw(host
, SDHCI_BLOCK_SIZE
),
64 sdhci_readw(host
, SDHCI_BLOCK_COUNT
));
65 SDHCI_DUMP("Argument: 0x%08x | Trn mode: 0x%08x\n",
66 sdhci_readl(host
, SDHCI_ARGUMENT
),
67 sdhci_readw(host
, SDHCI_TRANSFER_MODE
));
68 SDHCI_DUMP("Present: 0x%08x | Host ctl: 0x%08x\n",
69 sdhci_readl(host
, SDHCI_PRESENT_STATE
),
70 sdhci_readb(host
, SDHCI_HOST_CONTROL
));
71 SDHCI_DUMP("Power: 0x%08x | Blk gap: 0x%08x\n",
72 sdhci_readb(host
, SDHCI_POWER_CONTROL
),
73 sdhci_readb(host
, SDHCI_BLOCK_GAP_CONTROL
));
74 SDHCI_DUMP("Wake-up: 0x%08x | Clock: 0x%08x\n",
75 sdhci_readb(host
, SDHCI_WAKE_UP_CONTROL
),
76 sdhci_readw(host
, SDHCI_CLOCK_CONTROL
));
77 SDHCI_DUMP("Timeout: 0x%08x | Int stat: 0x%08x\n",
78 sdhci_readb(host
, SDHCI_TIMEOUT_CONTROL
),
79 sdhci_readl(host
, SDHCI_INT_STATUS
));
80 SDHCI_DUMP("Int enab: 0x%08x | Sig enab: 0x%08x\n",
81 sdhci_readl(host
, SDHCI_INT_ENABLE
),
82 sdhci_readl(host
, SDHCI_SIGNAL_ENABLE
));
83 SDHCI_DUMP("AC12 err: 0x%08x | Slot int: 0x%08x\n",
84 sdhci_readw(host
, SDHCI_ACMD12_ERR
),
85 sdhci_readw(host
, SDHCI_SLOT_INT_STATUS
));
86 SDHCI_DUMP("Caps: 0x%08x | Caps_1: 0x%08x\n",
87 sdhci_readl(host
, SDHCI_CAPABILITIES
),
88 sdhci_readl(host
, SDHCI_CAPABILITIES_1
));
89 SDHCI_DUMP("Cmd: 0x%08x | Max curr: 0x%08x\n",
90 sdhci_readw(host
, SDHCI_COMMAND
),
91 sdhci_readl(host
, SDHCI_MAX_CURRENT
));
92 SDHCI_DUMP("Resp[0]: 0x%08x | Resp[1]: 0x%08x\n",
93 sdhci_readl(host
, SDHCI_RESPONSE
),
94 sdhci_readl(host
, SDHCI_RESPONSE
+ 4));
95 SDHCI_DUMP("Resp[2]: 0x%08x | Resp[3]: 0x%08x\n",
96 sdhci_readl(host
, SDHCI_RESPONSE
+ 8),
97 sdhci_readl(host
, SDHCI_RESPONSE
+ 12));
98 SDHCI_DUMP("Host ctl2: 0x%08x\n",
99 sdhci_readw(host
, SDHCI_HOST_CONTROL2
));
101 if (host
->flags
& SDHCI_USE_ADMA
) {
102 if (host
->flags
& SDHCI_USE_64_BIT_DMA
) {
103 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n",
104 sdhci_readl(host
, SDHCI_ADMA_ERROR
),
105 sdhci_readl(host
, SDHCI_ADMA_ADDRESS_HI
),
106 sdhci_readl(host
, SDHCI_ADMA_ADDRESS
));
108 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
109 sdhci_readl(host
, SDHCI_ADMA_ERROR
),
110 sdhci_readl(host
, SDHCI_ADMA_ADDRESS
));
114 SDHCI_DUMP("============================================\n");
116 EXPORT_SYMBOL_GPL(sdhci_dumpregs
);
118 /*****************************************************************************\
120 * Low level functions *
122 \*****************************************************************************/
124 static inline bool sdhci_data_line_cmd(struct mmc_command
*cmd
)
126 return cmd
->data
|| cmd
->flags
& MMC_RSP_BUSY
;
129 static void sdhci_set_card_detection(struct sdhci_host
*host
, bool enable
)
133 if ((host
->quirks
& SDHCI_QUIRK_BROKEN_CARD_DETECTION
) ||
134 !mmc_card_is_removable(host
->mmc
))
138 present
= sdhci_readl(host
, SDHCI_PRESENT_STATE
) &
141 host
->ier
|= present
? SDHCI_INT_CARD_REMOVE
:
142 SDHCI_INT_CARD_INSERT
;
144 host
->ier
&= ~(SDHCI_INT_CARD_REMOVE
| SDHCI_INT_CARD_INSERT
);
147 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
148 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
151 static void sdhci_enable_card_detection(struct sdhci_host
*host
)
153 sdhci_set_card_detection(host
, true);
156 static void sdhci_disable_card_detection(struct sdhci_host
*host
)
158 sdhci_set_card_detection(host
, false);
161 static void sdhci_runtime_pm_bus_on(struct sdhci_host
*host
)
166 pm_runtime_get_noresume(host
->mmc
->parent
);
169 static void sdhci_runtime_pm_bus_off(struct sdhci_host
*host
)
173 host
->bus_on
= false;
174 pm_runtime_put_noidle(host
->mmc
->parent
);
177 void sdhci_reset(struct sdhci_host
*host
, u8 mask
)
181 sdhci_writeb(host
, mask
, SDHCI_SOFTWARE_RESET
);
183 if (mask
& SDHCI_RESET_ALL
) {
185 /* Reset-all turns off SD Bus Power */
186 if (host
->quirks2
& SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON
)
187 sdhci_runtime_pm_bus_off(host
);
190 /* Wait max 100 ms */
191 timeout
= ktime_add_ms(ktime_get(), 100);
193 /* hw clears the bit when it's done */
194 while (sdhci_readb(host
, SDHCI_SOFTWARE_RESET
) & mask
) {
195 if (ktime_after(ktime_get(), timeout
)) {
196 pr_err("%s: Reset 0x%x never completed.\n",
197 mmc_hostname(host
->mmc
), (int)mask
);
198 sdhci_dumpregs(host
);
204 EXPORT_SYMBOL_GPL(sdhci_reset
);
206 static void sdhci_do_reset(struct sdhci_host
*host
, u8 mask
)
208 if (host
->quirks
& SDHCI_QUIRK_NO_CARD_NO_RESET
) {
209 struct mmc_host
*mmc
= host
->mmc
;
211 if (!mmc
->ops
->get_cd(mmc
))
215 host
->ops
->reset(host
, mask
);
217 if (mask
& SDHCI_RESET_ALL
) {
218 if (host
->flags
& (SDHCI_USE_SDMA
| SDHCI_USE_ADMA
)) {
219 if (host
->ops
->enable_dma
)
220 host
->ops
->enable_dma(host
);
223 /* Resetting the controller clears many */
224 host
->preset_enabled
= false;
228 static void sdhci_set_default_irqs(struct sdhci_host
*host
)
230 host
->ier
= SDHCI_INT_BUS_POWER
| SDHCI_INT_DATA_END_BIT
|
231 SDHCI_INT_DATA_CRC
| SDHCI_INT_DATA_TIMEOUT
|
232 SDHCI_INT_INDEX
| SDHCI_INT_END_BIT
| SDHCI_INT_CRC
|
233 SDHCI_INT_TIMEOUT
| SDHCI_INT_DATA_END
|
236 if (host
->tuning_mode
== SDHCI_TUNING_MODE_2
||
237 host
->tuning_mode
== SDHCI_TUNING_MODE_3
)
238 host
->ier
|= SDHCI_INT_RETUNE
;
240 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
241 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
244 static void sdhci_init(struct sdhci_host
*host
, int soft
)
246 struct mmc_host
*mmc
= host
->mmc
;
249 sdhci_do_reset(host
, SDHCI_RESET_CMD
| SDHCI_RESET_DATA
);
251 sdhci_do_reset(host
, SDHCI_RESET_ALL
);
253 sdhci_set_default_irqs(host
);
255 host
->cqe_on
= false;
258 /* force clock reconfiguration */
260 mmc
->ops
->set_ios(mmc
, &mmc
->ios
);
264 static void sdhci_reinit(struct sdhci_host
*host
)
267 sdhci_enable_card_detection(host
);
270 static void __sdhci_led_activate(struct sdhci_host
*host
)
274 ctrl
= sdhci_readb(host
, SDHCI_HOST_CONTROL
);
275 ctrl
|= SDHCI_CTRL_LED
;
276 sdhci_writeb(host
, ctrl
, SDHCI_HOST_CONTROL
);
279 static void __sdhci_led_deactivate(struct sdhci_host
*host
)
283 ctrl
= sdhci_readb(host
, SDHCI_HOST_CONTROL
);
284 ctrl
&= ~SDHCI_CTRL_LED
;
285 sdhci_writeb(host
, ctrl
, SDHCI_HOST_CONTROL
);
288 #if IS_REACHABLE(CONFIG_LEDS_CLASS)
289 static void sdhci_led_control(struct led_classdev
*led
,
290 enum led_brightness brightness
)
292 struct sdhci_host
*host
= container_of(led
, struct sdhci_host
, led
);
295 spin_lock_irqsave(&host
->lock
, flags
);
297 if (host
->runtime_suspended
)
300 if (brightness
== LED_OFF
)
301 __sdhci_led_deactivate(host
);
303 __sdhci_led_activate(host
);
305 spin_unlock_irqrestore(&host
->lock
, flags
);
308 static int sdhci_led_register(struct sdhci_host
*host
)
310 struct mmc_host
*mmc
= host
->mmc
;
312 snprintf(host
->led_name
, sizeof(host
->led_name
),
313 "%s::", mmc_hostname(mmc
));
315 host
->led
.name
= host
->led_name
;
316 host
->led
.brightness
= LED_OFF
;
317 host
->led
.default_trigger
= mmc_hostname(mmc
);
318 host
->led
.brightness_set
= sdhci_led_control
;
320 return led_classdev_register(mmc_dev(mmc
), &host
->led
);
323 static void sdhci_led_unregister(struct sdhci_host
*host
)
325 led_classdev_unregister(&host
->led
);
328 static inline void sdhci_led_activate(struct sdhci_host
*host
)
332 static inline void sdhci_led_deactivate(struct sdhci_host
*host
)
338 static inline int sdhci_led_register(struct sdhci_host
*host
)
343 static inline void sdhci_led_unregister(struct sdhci_host
*host
)
347 static inline void sdhci_led_activate(struct sdhci_host
*host
)
349 __sdhci_led_activate(host
);
352 static inline void sdhci_led_deactivate(struct sdhci_host
*host
)
354 __sdhci_led_deactivate(host
);
359 /*****************************************************************************\
363 \*****************************************************************************/
365 static void sdhci_read_block_pio(struct sdhci_host
*host
)
368 size_t blksize
, len
, chunk
;
369 u32
uninitialized_var(scratch
);
372 DBG("PIO reading\n");
374 blksize
= host
->data
->blksz
;
377 local_irq_save(flags
);
380 BUG_ON(!sg_miter_next(&host
->sg_miter
));
382 len
= min(host
->sg_miter
.length
, blksize
);
385 host
->sg_miter
.consumed
= len
;
387 buf
= host
->sg_miter
.addr
;
391 scratch
= sdhci_readl(host
, SDHCI_BUFFER
);
395 *buf
= scratch
& 0xFF;
404 sg_miter_stop(&host
->sg_miter
);
406 local_irq_restore(flags
);
409 static void sdhci_write_block_pio(struct sdhci_host
*host
)
412 size_t blksize
, len
, chunk
;
416 DBG("PIO writing\n");
418 blksize
= host
->data
->blksz
;
422 local_irq_save(flags
);
425 BUG_ON(!sg_miter_next(&host
->sg_miter
));
427 len
= min(host
->sg_miter
.length
, blksize
);
430 host
->sg_miter
.consumed
= len
;
432 buf
= host
->sg_miter
.addr
;
435 scratch
|= (u32
)*buf
<< (chunk
* 8);
441 if ((chunk
== 4) || ((len
== 0) && (blksize
== 0))) {
442 sdhci_writel(host
, scratch
, SDHCI_BUFFER
);
449 sg_miter_stop(&host
->sg_miter
);
451 local_irq_restore(flags
);
454 static void sdhci_transfer_pio(struct sdhci_host
*host
)
458 if (host
->blocks
== 0)
461 if (host
->data
->flags
& MMC_DATA_READ
)
462 mask
= SDHCI_DATA_AVAILABLE
;
464 mask
= SDHCI_SPACE_AVAILABLE
;
467 * Some controllers (JMicron JMB38x) mess up the buffer bits
468 * for transfers < 4 bytes. As long as it is just one block,
469 * we can ignore the bits.
471 if ((host
->quirks
& SDHCI_QUIRK_BROKEN_SMALL_PIO
) &&
472 (host
->data
->blocks
== 1))
475 while (sdhci_readl(host
, SDHCI_PRESENT_STATE
) & mask
) {
476 if (host
->quirks
& SDHCI_QUIRK_PIO_NEEDS_DELAY
)
479 if (host
->data
->flags
& MMC_DATA_READ
)
480 sdhci_read_block_pio(host
);
482 sdhci_write_block_pio(host
);
485 if (host
->blocks
== 0)
489 DBG("PIO transfer complete.\n");
492 static int sdhci_pre_dma_transfer(struct sdhci_host
*host
,
493 struct mmc_data
*data
, int cookie
)
498 * If the data buffers are already mapped, return the previous
499 * dma_map_sg() result.
501 if (data
->host_cookie
== COOKIE_PRE_MAPPED
)
502 return data
->sg_count
;
504 sg_count
= dma_map_sg(mmc_dev(host
->mmc
), data
->sg
, data
->sg_len
,
505 mmc_get_dma_dir(data
));
510 data
->sg_count
= sg_count
;
511 data
->host_cookie
= cookie
;
516 static char *sdhci_kmap_atomic(struct scatterlist
*sg
, unsigned long *flags
)
518 local_irq_save(*flags
);
519 return kmap_atomic(sg_page(sg
)) + sg
->offset
;
522 static void sdhci_kunmap_atomic(void *buffer
, unsigned long *flags
)
524 kunmap_atomic(buffer
);
525 local_irq_restore(*flags
);
528 static void sdhci_adma_write_desc(struct sdhci_host
*host
, void *desc
,
529 dma_addr_t addr
, int len
, unsigned cmd
)
531 struct sdhci_adma2_64_desc
*dma_desc
= desc
;
533 /* 32-bit and 64-bit descriptors have these members in same position */
534 dma_desc
->cmd
= cpu_to_le16(cmd
);
535 dma_desc
->len
= cpu_to_le16(len
);
536 dma_desc
->addr_lo
= cpu_to_le32((u32
)addr
);
538 if (host
->flags
& SDHCI_USE_64_BIT_DMA
)
539 dma_desc
->addr_hi
= cpu_to_le32((u64
)addr
>> 32);
542 static void sdhci_adma_mark_end(void *desc
)
544 struct sdhci_adma2_64_desc
*dma_desc
= desc
;
546 /* 32-bit and 64-bit descriptors have 'cmd' in same position */
547 dma_desc
->cmd
|= cpu_to_le16(ADMA2_END
);
550 static void sdhci_adma_table_pre(struct sdhci_host
*host
,
551 struct mmc_data
*data
, int sg_count
)
553 struct scatterlist
*sg
;
555 dma_addr_t addr
, align_addr
;
561 * The spec does not specify endianness of descriptor table.
562 * We currently guess that it is LE.
565 host
->sg_count
= sg_count
;
567 desc
= host
->adma_table
;
568 align
= host
->align_buffer
;
570 align_addr
= host
->align_addr
;
572 for_each_sg(data
->sg
, sg
, host
->sg_count
, i
) {
573 addr
= sg_dma_address(sg
);
574 len
= sg_dma_len(sg
);
577 * The SDHCI specification states that ADMA addresses must
578 * be 32-bit aligned. If they aren't, then we use a bounce
579 * buffer for the (up to three) bytes that screw up the
582 offset
= (SDHCI_ADMA2_ALIGN
- (addr
& SDHCI_ADMA2_MASK
)) &
585 if (data
->flags
& MMC_DATA_WRITE
) {
586 buffer
= sdhci_kmap_atomic(sg
, &flags
);
587 memcpy(align
, buffer
, offset
);
588 sdhci_kunmap_atomic(buffer
, &flags
);
592 sdhci_adma_write_desc(host
, desc
, align_addr
, offset
,
595 BUG_ON(offset
> 65536);
597 align
+= SDHCI_ADMA2_ALIGN
;
598 align_addr
+= SDHCI_ADMA2_ALIGN
;
600 desc
+= host
->desc_sz
;
610 sdhci_adma_write_desc(host
, desc
, addr
, len
,
612 desc
+= host
->desc_sz
;
616 * If this triggers then we have a calculation bug
619 WARN_ON((desc
- host
->adma_table
) >= host
->adma_table_sz
);
622 if (host
->quirks
& SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC
) {
623 /* Mark the last descriptor as the terminating descriptor */
624 if (desc
!= host
->adma_table
) {
625 desc
-= host
->desc_sz
;
626 sdhci_adma_mark_end(desc
);
629 /* Add a terminating entry - nop, end, valid */
630 sdhci_adma_write_desc(host
, desc
, 0, 0, ADMA2_NOP_END_VALID
);
634 static void sdhci_adma_table_post(struct sdhci_host
*host
,
635 struct mmc_data
*data
)
637 struct scatterlist
*sg
;
643 if (data
->flags
& MMC_DATA_READ
) {
644 bool has_unaligned
= false;
646 /* Do a quick scan of the SG list for any unaligned mappings */
647 for_each_sg(data
->sg
, sg
, host
->sg_count
, i
)
648 if (sg_dma_address(sg
) & SDHCI_ADMA2_MASK
) {
649 has_unaligned
= true;
654 dma_sync_sg_for_cpu(mmc_dev(host
->mmc
), data
->sg
,
655 data
->sg_len
, DMA_FROM_DEVICE
);
657 align
= host
->align_buffer
;
659 for_each_sg(data
->sg
, sg
, host
->sg_count
, i
) {
660 if (sg_dma_address(sg
) & SDHCI_ADMA2_MASK
) {
661 size
= SDHCI_ADMA2_ALIGN
-
662 (sg_dma_address(sg
) & SDHCI_ADMA2_MASK
);
664 buffer
= sdhci_kmap_atomic(sg
, &flags
);
665 memcpy(buffer
, align
, size
);
666 sdhci_kunmap_atomic(buffer
, &flags
);
668 align
+= SDHCI_ADMA2_ALIGN
;
675 static u8
sdhci_calc_timeout(struct sdhci_host
*host
, struct mmc_command
*cmd
)
678 struct mmc_data
*data
= cmd
->data
;
679 unsigned target_timeout
, current_timeout
;
682 * If the host controller provides us with an incorrect timeout
683 * value, just skip the check and use 0xE. The hardware may take
684 * longer to time out, but that's much better than having a too-short
687 if (host
->quirks
& SDHCI_QUIRK_BROKEN_TIMEOUT_VAL
)
690 /* Unspecified timeout, assume max */
691 if (!data
&& !cmd
->busy_timeout
)
696 target_timeout
= cmd
->busy_timeout
* 1000;
698 target_timeout
= DIV_ROUND_UP(data
->timeout_ns
, 1000);
699 if (host
->clock
&& data
->timeout_clks
) {
700 unsigned long long val
;
703 * data->timeout_clks is in units of clock cycles.
704 * host->clock is in Hz. target_timeout is in us.
705 * Hence, us = 1000000 * cycles / Hz. Round up.
707 val
= 1000000ULL * data
->timeout_clks
;
708 if (do_div(val
, host
->clock
))
710 target_timeout
+= val
;
715 * Figure out needed cycles.
716 * We do this in steps in order to fit inside a 32 bit int.
717 * The first step is the minimum timeout, which will have a
718 * minimum resolution of 6 bits:
719 * (1) 2^13*1000 > 2^22,
720 * (2) host->timeout_clk < 2^16
725 current_timeout
= (1 << 13) * 1000 / host
->timeout_clk
;
726 while (current_timeout
< target_timeout
) {
728 current_timeout
<<= 1;
734 DBG("Too large timeout 0x%x requested for CMD%d!\n",
742 static void sdhci_set_transfer_irqs(struct sdhci_host
*host
)
744 u32 pio_irqs
= SDHCI_INT_DATA_AVAIL
| SDHCI_INT_SPACE_AVAIL
;
745 u32 dma_irqs
= SDHCI_INT_DMA_END
| SDHCI_INT_ADMA_ERROR
;
747 if (host
->flags
& SDHCI_REQ_USE_DMA
)
748 host
->ier
= (host
->ier
& ~pio_irqs
) | dma_irqs
;
750 host
->ier
= (host
->ier
& ~dma_irqs
) | pio_irqs
;
752 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
753 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
756 static void sdhci_set_timeout(struct sdhci_host
*host
, struct mmc_command
*cmd
)
760 if (host
->ops
->set_timeout
) {
761 host
->ops
->set_timeout(host
, cmd
);
763 count
= sdhci_calc_timeout(host
, cmd
);
764 sdhci_writeb(host
, count
, SDHCI_TIMEOUT_CONTROL
);
768 static void sdhci_prepare_data(struct sdhci_host
*host
, struct mmc_command
*cmd
)
771 struct mmc_data
*data
= cmd
->data
;
773 if (sdhci_data_line_cmd(cmd
))
774 sdhci_set_timeout(host
, cmd
);
782 BUG_ON(data
->blksz
* data
->blocks
> 524288);
783 BUG_ON(data
->blksz
> host
->mmc
->max_blk_size
);
784 BUG_ON(data
->blocks
> 65535);
787 host
->data_early
= 0;
788 host
->data
->bytes_xfered
= 0;
790 if (host
->flags
& (SDHCI_USE_SDMA
| SDHCI_USE_ADMA
)) {
791 struct scatterlist
*sg
;
792 unsigned int length_mask
, offset_mask
;
795 host
->flags
|= SDHCI_REQ_USE_DMA
;
798 * FIXME: This doesn't account for merging when mapping the
801 * The assumption here being that alignment and lengths are
802 * the same after DMA mapping to device address space.
806 if (host
->flags
& SDHCI_USE_ADMA
) {
807 if (host
->quirks
& SDHCI_QUIRK_32BIT_ADMA_SIZE
) {
810 * As we use up to 3 byte chunks to work
811 * around alignment problems, we need to
812 * check the offset as well.
817 if (host
->quirks
& SDHCI_QUIRK_32BIT_DMA_SIZE
)
819 if (host
->quirks
& SDHCI_QUIRK_32BIT_DMA_ADDR
)
823 if (unlikely(length_mask
| offset_mask
)) {
824 for_each_sg(data
->sg
, sg
, data
->sg_len
, i
) {
825 if (sg
->length
& length_mask
) {
826 DBG("Reverting to PIO because of transfer size (%d)\n",
828 host
->flags
&= ~SDHCI_REQ_USE_DMA
;
831 if (sg
->offset
& offset_mask
) {
832 DBG("Reverting to PIO because of bad alignment\n");
833 host
->flags
&= ~SDHCI_REQ_USE_DMA
;
840 if (host
->flags
& SDHCI_REQ_USE_DMA
) {
841 int sg_cnt
= sdhci_pre_dma_transfer(host
, data
, COOKIE_MAPPED
);
845 * This only happens when someone fed
846 * us an invalid request.
849 host
->flags
&= ~SDHCI_REQ_USE_DMA
;
850 } else if (host
->flags
& SDHCI_USE_ADMA
) {
851 sdhci_adma_table_pre(host
, data
, sg_cnt
);
853 sdhci_writel(host
, host
->adma_addr
, SDHCI_ADMA_ADDRESS
);
854 if (host
->flags
& SDHCI_USE_64_BIT_DMA
)
856 (u64
)host
->adma_addr
>> 32,
857 SDHCI_ADMA_ADDRESS_HI
);
859 WARN_ON(sg_cnt
!= 1);
860 sdhci_writel(host
, sg_dma_address(data
->sg
),
866 * Always adjust the DMA selection as some controllers
867 * (e.g. JMicron) can't do PIO properly when the selection
870 if (host
->version
>= SDHCI_SPEC_200
) {
871 ctrl
= sdhci_readb(host
, SDHCI_HOST_CONTROL
);
872 ctrl
&= ~SDHCI_CTRL_DMA_MASK
;
873 if ((host
->flags
& SDHCI_REQ_USE_DMA
) &&
874 (host
->flags
& SDHCI_USE_ADMA
)) {
875 if (host
->flags
& SDHCI_USE_64_BIT_DMA
)
876 ctrl
|= SDHCI_CTRL_ADMA64
;
878 ctrl
|= SDHCI_CTRL_ADMA32
;
880 ctrl
|= SDHCI_CTRL_SDMA
;
882 sdhci_writeb(host
, ctrl
, SDHCI_HOST_CONTROL
);
885 if (!(host
->flags
& SDHCI_REQ_USE_DMA
)) {
888 flags
= SG_MITER_ATOMIC
;
889 if (host
->data
->flags
& MMC_DATA_READ
)
890 flags
|= SG_MITER_TO_SG
;
892 flags
|= SG_MITER_FROM_SG
;
893 sg_miter_start(&host
->sg_miter
, data
->sg
, data
->sg_len
, flags
);
894 host
->blocks
= data
->blocks
;
897 sdhci_set_transfer_irqs(host
);
899 /* Set the DMA boundary value and block size */
900 sdhci_writew(host
, SDHCI_MAKE_BLKSZ(host
->sdma_boundary
, data
->blksz
),
902 sdhci_writew(host
, data
->blocks
, SDHCI_BLOCK_COUNT
);
905 static inline bool sdhci_auto_cmd12(struct sdhci_host
*host
,
906 struct mmc_request
*mrq
)
908 return !mrq
->sbc
&& (host
->flags
& SDHCI_AUTO_CMD12
) &&
909 !mrq
->cap_cmd_during_tfr
;
912 static void sdhci_set_transfer_mode(struct sdhci_host
*host
,
913 struct mmc_command
*cmd
)
916 struct mmc_data
*data
= cmd
->data
;
920 SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD
) {
921 sdhci_writew(host
, 0x0, SDHCI_TRANSFER_MODE
);
923 /* clear Auto CMD settings for no data CMDs */
924 mode
= sdhci_readw(host
, SDHCI_TRANSFER_MODE
);
925 sdhci_writew(host
, mode
& ~(SDHCI_TRNS_AUTO_CMD12
|
926 SDHCI_TRNS_AUTO_CMD23
), SDHCI_TRANSFER_MODE
);
931 WARN_ON(!host
->data
);
933 if (!(host
->quirks2
& SDHCI_QUIRK2_SUPPORT_SINGLE
))
934 mode
= SDHCI_TRNS_BLK_CNT_EN
;
936 if (mmc_op_multi(cmd
->opcode
) || data
->blocks
> 1) {
937 mode
= SDHCI_TRNS_BLK_CNT_EN
| SDHCI_TRNS_MULTI
;
939 * If we are sending CMD23, CMD12 never gets sent
940 * on successful completion (so no Auto-CMD12).
942 if (sdhci_auto_cmd12(host
, cmd
->mrq
) &&
943 (cmd
->opcode
!= SD_IO_RW_EXTENDED
))
944 mode
|= SDHCI_TRNS_AUTO_CMD12
;
945 else if (cmd
->mrq
->sbc
&& (host
->flags
& SDHCI_AUTO_CMD23
)) {
946 mode
|= SDHCI_TRNS_AUTO_CMD23
;
947 sdhci_writel(host
, cmd
->mrq
->sbc
->arg
, SDHCI_ARGUMENT2
);
951 if (data
->flags
& MMC_DATA_READ
)
952 mode
|= SDHCI_TRNS_READ
;
953 if (host
->flags
& SDHCI_REQ_USE_DMA
)
954 mode
|= SDHCI_TRNS_DMA
;
956 sdhci_writew(host
, mode
, SDHCI_TRANSFER_MODE
);
959 static bool sdhci_needs_reset(struct sdhci_host
*host
, struct mmc_request
*mrq
)
961 return (!(host
->flags
& SDHCI_DEVICE_DEAD
) &&
962 ((mrq
->cmd
&& mrq
->cmd
->error
) ||
963 (mrq
->sbc
&& mrq
->sbc
->error
) ||
964 (mrq
->data
&& ((mrq
->data
->error
&& !mrq
->data
->stop
) ||
965 (mrq
->data
->stop
&& mrq
->data
->stop
->error
))) ||
966 (host
->quirks
& SDHCI_QUIRK_RESET_AFTER_REQUEST
)));
969 static void __sdhci_finish_mrq(struct sdhci_host
*host
, struct mmc_request
*mrq
)
973 for (i
= 0; i
< SDHCI_MAX_MRQS
; i
++) {
974 if (host
->mrqs_done
[i
] == mrq
) {
980 for (i
= 0; i
< SDHCI_MAX_MRQS
; i
++) {
981 if (!host
->mrqs_done
[i
]) {
982 host
->mrqs_done
[i
] = mrq
;
987 WARN_ON(i
>= SDHCI_MAX_MRQS
);
989 tasklet_schedule(&host
->finish_tasklet
);
992 static void sdhci_finish_mrq(struct sdhci_host
*host
, struct mmc_request
*mrq
)
994 if (host
->cmd
&& host
->cmd
->mrq
== mrq
)
997 if (host
->data_cmd
&& host
->data_cmd
->mrq
== mrq
)
998 host
->data_cmd
= NULL
;
1000 if (host
->data
&& host
->data
->mrq
== mrq
)
1003 if (sdhci_needs_reset(host
, mrq
))
1004 host
->pending_reset
= true;
1006 __sdhci_finish_mrq(host
, mrq
);
1009 static void sdhci_finish_data(struct sdhci_host
*host
)
1011 struct mmc_command
*data_cmd
= host
->data_cmd
;
1012 struct mmc_data
*data
= host
->data
;
1015 host
->data_cmd
= NULL
;
1017 if ((host
->flags
& (SDHCI_REQ_USE_DMA
| SDHCI_USE_ADMA
)) ==
1018 (SDHCI_REQ_USE_DMA
| SDHCI_USE_ADMA
))
1019 sdhci_adma_table_post(host
, data
);
1022 * The specification states that the block count register must
1023 * be updated, but it does not specify at what point in the
1024 * data flow. That makes the register entirely useless to read
1025 * back so we have to assume that nothing made it to the card
1026 * in the event of an error.
1029 data
->bytes_xfered
= 0;
1031 data
->bytes_xfered
= data
->blksz
* data
->blocks
;
1034 * Need to send CMD12 if -
1035 * a) open-ended multiblock transfer (no CMD23)
1036 * b) error in multiblock transfer
1043 * The controller needs a reset of internal state machines
1044 * upon error conditions.
1047 if (!host
->cmd
|| host
->cmd
== data_cmd
)
1048 sdhci_do_reset(host
, SDHCI_RESET_CMD
);
1049 sdhci_do_reset(host
, SDHCI_RESET_DATA
);
1053 * 'cap_cmd_during_tfr' request must not use the command line
1054 * after mmc_command_done() has been called. It is upper layer's
1055 * responsibility to send the stop command if required.
1057 if (data
->mrq
->cap_cmd_during_tfr
) {
1058 sdhci_finish_mrq(host
, data
->mrq
);
1060 /* Avoid triggering warning in sdhci_send_command() */
1062 sdhci_send_command(host
, data
->stop
);
1065 sdhci_finish_mrq(host
, data
->mrq
);
1069 static void sdhci_mod_timer(struct sdhci_host
*host
, struct mmc_request
*mrq
,
1070 unsigned long timeout
)
1072 if (sdhci_data_line_cmd(mrq
->cmd
))
1073 mod_timer(&host
->data_timer
, timeout
);
1075 mod_timer(&host
->timer
, timeout
);
1078 static void sdhci_del_timer(struct sdhci_host
*host
, struct mmc_request
*mrq
)
1080 if (sdhci_data_line_cmd(mrq
->cmd
))
1081 del_timer(&host
->data_timer
);
1083 del_timer(&host
->timer
);
1086 void sdhci_send_command(struct sdhci_host
*host
, struct mmc_command
*cmd
)
1090 unsigned long timeout
;
1094 /* Initially, a command has no error */
1097 if ((host
->quirks2
& SDHCI_QUIRK2_STOP_WITH_TC
) &&
1098 cmd
->opcode
== MMC_STOP_TRANSMISSION
)
1099 cmd
->flags
|= MMC_RSP_BUSY
;
1101 /* Wait max 10 ms */
1104 mask
= SDHCI_CMD_INHIBIT
;
1105 if (sdhci_data_line_cmd(cmd
))
1106 mask
|= SDHCI_DATA_INHIBIT
;
1108 /* We shouldn't wait for data inihibit for stop commands, even
1109 though they might use busy signaling */
1110 if (cmd
->mrq
->data
&& (cmd
== cmd
->mrq
->data
->stop
))
1111 mask
&= ~SDHCI_DATA_INHIBIT
;
1113 while (sdhci_readl(host
, SDHCI_PRESENT_STATE
) & mask
) {
1115 pr_err("%s: Controller never released inhibit bit(s).\n",
1116 mmc_hostname(host
->mmc
));
1117 sdhci_dumpregs(host
);
1119 sdhci_finish_mrq(host
, cmd
->mrq
);
1127 if (!cmd
->data
&& cmd
->busy_timeout
> 9000)
1128 timeout
+= DIV_ROUND_UP(cmd
->busy_timeout
, 1000) * HZ
+ HZ
;
1131 sdhci_mod_timer(host
, cmd
->mrq
, timeout
);
1134 if (sdhci_data_line_cmd(cmd
)) {
1135 WARN_ON(host
->data_cmd
);
1136 host
->data_cmd
= cmd
;
1139 sdhci_prepare_data(host
, cmd
);
1141 sdhci_writel(host
, cmd
->arg
, SDHCI_ARGUMENT
);
1143 sdhci_set_transfer_mode(host
, cmd
);
1145 if ((cmd
->flags
& MMC_RSP_136
) && (cmd
->flags
& MMC_RSP_BUSY
)) {
1146 pr_err("%s: Unsupported response type!\n",
1147 mmc_hostname(host
->mmc
));
1148 cmd
->error
= -EINVAL
;
1149 sdhci_finish_mrq(host
, cmd
->mrq
);
1153 if (!(cmd
->flags
& MMC_RSP_PRESENT
))
1154 flags
= SDHCI_CMD_RESP_NONE
;
1155 else if (cmd
->flags
& MMC_RSP_136
)
1156 flags
= SDHCI_CMD_RESP_LONG
;
1157 else if (cmd
->flags
& MMC_RSP_BUSY
)
1158 flags
= SDHCI_CMD_RESP_SHORT_BUSY
;
1160 flags
= SDHCI_CMD_RESP_SHORT
;
1162 if (cmd
->flags
& MMC_RSP_CRC
)
1163 flags
|= SDHCI_CMD_CRC
;
1164 if (cmd
->flags
& MMC_RSP_OPCODE
)
1165 flags
|= SDHCI_CMD_INDEX
;
1167 /* CMD19 is special in that the Data Present Select should be set */
1168 if (cmd
->data
|| cmd
->opcode
== MMC_SEND_TUNING_BLOCK
||
1169 cmd
->opcode
== MMC_SEND_TUNING_BLOCK_HS200
)
1170 flags
|= SDHCI_CMD_DATA
;
1172 sdhci_writew(host
, SDHCI_MAKE_CMD(cmd
->opcode
, flags
), SDHCI_COMMAND
);
1174 EXPORT_SYMBOL_GPL(sdhci_send_command
);
1176 static void sdhci_read_rsp_136(struct sdhci_host
*host
, struct mmc_command
*cmd
)
1180 for (i
= 0; i
< 4; i
++) {
1181 reg
= SDHCI_RESPONSE
+ (3 - i
) * 4;
1182 cmd
->resp
[i
] = sdhci_readl(host
, reg
);
1185 if (host
->quirks2
& SDHCI_QUIRK2_RSP_136_HAS_CRC
)
1188 /* CRC is stripped so we need to do some shifting */
1189 for (i
= 0; i
< 4; i
++) {
1192 cmd
->resp
[i
] |= cmd
->resp
[i
+ 1] >> 24;
1196 static void sdhci_finish_command(struct sdhci_host
*host
)
1198 struct mmc_command
*cmd
= host
->cmd
;
1202 if (cmd
->flags
& MMC_RSP_PRESENT
) {
1203 if (cmd
->flags
& MMC_RSP_136
) {
1204 sdhci_read_rsp_136(host
, cmd
);
1206 cmd
->resp
[0] = sdhci_readl(host
, SDHCI_RESPONSE
);
1210 if (cmd
->mrq
->cap_cmd_during_tfr
&& cmd
== cmd
->mrq
->cmd
)
1211 mmc_command_done(host
->mmc
, cmd
->mrq
);
1214 * The host can send and interrupt when the busy state has
1215 * ended, allowing us to wait without wasting CPU cycles.
1216 * The busy signal uses DAT0 so this is similar to waiting
1217 * for data to complete.
1219 * Note: The 1.0 specification is a bit ambiguous about this
1220 * feature so there might be some problems with older
1223 if (cmd
->flags
& MMC_RSP_BUSY
) {
1225 DBG("Cannot wait for busy signal when also doing a data transfer");
1226 } else if (!(host
->quirks
& SDHCI_QUIRK_NO_BUSY_IRQ
) &&
1227 cmd
== host
->data_cmd
) {
1228 /* Command complete before busy is ended */
1233 /* Finished CMD23, now send actual command. */
1234 if (cmd
== cmd
->mrq
->sbc
) {
1235 sdhci_send_command(host
, cmd
->mrq
->cmd
);
1238 /* Processed actual command. */
1239 if (host
->data
&& host
->data_early
)
1240 sdhci_finish_data(host
);
1243 sdhci_finish_mrq(host
, cmd
->mrq
);
1247 static u16
sdhci_get_preset_value(struct sdhci_host
*host
)
1251 switch (host
->timing
) {
1252 case MMC_TIMING_UHS_SDR12
:
1253 preset
= sdhci_readw(host
, SDHCI_PRESET_FOR_SDR12
);
1255 case MMC_TIMING_UHS_SDR25
:
1256 preset
= sdhci_readw(host
, SDHCI_PRESET_FOR_SDR25
);
1258 case MMC_TIMING_UHS_SDR50
:
1259 preset
= sdhci_readw(host
, SDHCI_PRESET_FOR_SDR50
);
1261 case MMC_TIMING_UHS_SDR104
:
1262 case MMC_TIMING_MMC_HS200
:
1263 preset
= sdhci_readw(host
, SDHCI_PRESET_FOR_SDR104
);
1265 case MMC_TIMING_UHS_DDR50
:
1266 case MMC_TIMING_MMC_DDR52
:
1267 preset
= sdhci_readw(host
, SDHCI_PRESET_FOR_DDR50
);
1269 case MMC_TIMING_MMC_HS400
:
1270 preset
= sdhci_readw(host
, SDHCI_PRESET_FOR_HS400
);
1273 pr_warn("%s: Invalid UHS-I mode selected\n",
1274 mmc_hostname(host
->mmc
));
1275 preset
= sdhci_readw(host
, SDHCI_PRESET_FOR_SDR12
);
1281 u16
sdhci_calc_clk(struct sdhci_host
*host
, unsigned int clock
,
1282 unsigned int *actual_clock
)
1284 int div
= 0; /* Initialized for compiler warning */
1285 int real_div
= div
, clk_mul
= 1;
1287 bool switch_base_clk
= false;
1289 if (host
->version
>= SDHCI_SPEC_300
) {
1290 if (host
->preset_enabled
) {
1293 clk
= sdhci_readw(host
, SDHCI_CLOCK_CONTROL
);
1294 pre_val
= sdhci_get_preset_value(host
);
1295 div
= (pre_val
& SDHCI_PRESET_SDCLK_FREQ_MASK
)
1296 >> SDHCI_PRESET_SDCLK_FREQ_SHIFT
;
1297 if (host
->clk_mul
&&
1298 (pre_val
& SDHCI_PRESET_CLKGEN_SEL_MASK
)) {
1299 clk
= SDHCI_PROG_CLOCK_MODE
;
1301 clk_mul
= host
->clk_mul
;
1303 real_div
= max_t(int, 1, div
<< 1);
1309 * Check if the Host Controller supports Programmable Clock
1312 if (host
->clk_mul
) {
1313 for (div
= 1; div
<= 1024; div
++) {
1314 if ((host
->max_clk
* host
->clk_mul
/ div
)
1318 if ((host
->max_clk
* host
->clk_mul
/ div
) <= clock
) {
1320 * Set Programmable Clock Mode in the Clock
1323 clk
= SDHCI_PROG_CLOCK_MODE
;
1325 clk_mul
= host
->clk_mul
;
1329 * Divisor can be too small to reach clock
1330 * speed requirement. Then use the base clock.
1332 switch_base_clk
= true;
1336 if (!host
->clk_mul
|| switch_base_clk
) {
1337 /* Version 3.00 divisors must be a multiple of 2. */
1338 if (host
->max_clk
<= clock
)
1341 for (div
= 2; div
< SDHCI_MAX_DIV_SPEC_300
;
1343 if ((host
->max_clk
/ div
) <= clock
)
1349 if ((host
->quirks2
& SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN
)
1350 && !div
&& host
->max_clk
<= 25000000)
1354 /* Version 2.00 divisors must be a power of 2. */
1355 for (div
= 1; div
< SDHCI_MAX_DIV_SPEC_200
; div
*= 2) {
1356 if ((host
->max_clk
/ div
) <= clock
)
1365 *actual_clock
= (host
->max_clk
* clk_mul
) / real_div
;
1366 clk
|= (div
& SDHCI_DIV_MASK
) << SDHCI_DIVIDER_SHIFT
;
1367 clk
|= ((div
& SDHCI_DIV_HI_MASK
) >> SDHCI_DIV_MASK_LEN
)
1368 << SDHCI_DIVIDER_HI_SHIFT
;
1372 EXPORT_SYMBOL_GPL(sdhci_calc_clk
);
1374 void sdhci_enable_clk(struct sdhci_host
*host
, u16 clk
)
1378 clk
|= SDHCI_CLOCK_INT_EN
;
1379 sdhci_writew(host
, clk
, SDHCI_CLOCK_CONTROL
);
1381 /* Wait max 20 ms */
1382 timeout
= ktime_add_ms(ktime_get(), 20);
1383 while (!((clk
= sdhci_readw(host
, SDHCI_CLOCK_CONTROL
))
1384 & SDHCI_CLOCK_INT_STABLE
)) {
1385 if (ktime_after(ktime_get(), timeout
)) {
1386 pr_err("%s: Internal clock never stabilised.\n",
1387 mmc_hostname(host
->mmc
));
1388 sdhci_dumpregs(host
);
1394 clk
|= SDHCI_CLOCK_CARD_EN
;
1395 sdhci_writew(host
, clk
, SDHCI_CLOCK_CONTROL
);
1397 EXPORT_SYMBOL_GPL(sdhci_enable_clk
);
1399 void sdhci_set_clock(struct sdhci_host
*host
, unsigned int clock
)
1403 host
->mmc
->actual_clock
= 0;
1405 sdhci_writew(host
, 0, SDHCI_CLOCK_CONTROL
);
1410 clk
= sdhci_calc_clk(host
, clock
, &host
->mmc
->actual_clock
);
1411 sdhci_enable_clk(host
, clk
);
1413 EXPORT_SYMBOL_GPL(sdhci_set_clock
);
1415 static void sdhci_set_power_reg(struct sdhci_host
*host
, unsigned char mode
,
1418 struct mmc_host
*mmc
= host
->mmc
;
1420 mmc_regulator_set_ocr(mmc
, mmc
->supply
.vmmc
, vdd
);
1422 if (mode
!= MMC_POWER_OFF
)
1423 sdhci_writeb(host
, SDHCI_POWER_ON
, SDHCI_POWER_CONTROL
);
1425 sdhci_writeb(host
, 0, SDHCI_POWER_CONTROL
);
1428 void sdhci_set_power_noreg(struct sdhci_host
*host
, unsigned char mode
,
1433 if (mode
!= MMC_POWER_OFF
) {
1435 case MMC_VDD_165_195
:
1436 pwr
= SDHCI_POWER_180
;
1440 pwr
= SDHCI_POWER_300
;
1444 pwr
= SDHCI_POWER_330
;
1447 WARN(1, "%s: Invalid vdd %#x\n",
1448 mmc_hostname(host
->mmc
), vdd
);
1453 if (host
->pwr
== pwr
)
1459 sdhci_writeb(host
, 0, SDHCI_POWER_CONTROL
);
1460 if (host
->quirks2
& SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON
)
1461 sdhci_runtime_pm_bus_off(host
);
1464 * Spec says that we should clear the power reg before setting
1465 * a new value. Some controllers don't seem to like this though.
1467 if (!(host
->quirks
& SDHCI_QUIRK_SINGLE_POWER_WRITE
))
1468 sdhci_writeb(host
, 0, SDHCI_POWER_CONTROL
);
1471 * At least the Marvell CaFe chip gets confused if we set the
1472 * voltage and set turn on power at the same time, so set the
1475 if (host
->quirks
& SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER
)
1476 sdhci_writeb(host
, pwr
, SDHCI_POWER_CONTROL
);
1478 pwr
|= SDHCI_POWER_ON
;
1480 sdhci_writeb(host
, pwr
, SDHCI_POWER_CONTROL
);
1482 if (host
->quirks2
& SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON
)
1483 sdhci_runtime_pm_bus_on(host
);
1486 * Some controllers need an extra 10ms delay of 10ms before
1487 * they can apply clock after applying power
1489 if (host
->quirks
& SDHCI_QUIRK_DELAY_AFTER_POWER
)
1493 EXPORT_SYMBOL_GPL(sdhci_set_power_noreg
);
1495 void sdhci_set_power(struct sdhci_host
*host
, unsigned char mode
,
1498 if (IS_ERR(host
->mmc
->supply
.vmmc
))
1499 sdhci_set_power_noreg(host
, mode
, vdd
);
1501 sdhci_set_power_reg(host
, mode
, vdd
);
1503 EXPORT_SYMBOL_GPL(sdhci_set_power
);
1505 /*****************************************************************************\
1509 \*****************************************************************************/
1511 static void sdhci_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
1513 struct sdhci_host
*host
;
1515 unsigned long flags
;
1517 host
= mmc_priv(mmc
);
1519 /* Firstly check card presence */
1520 present
= mmc
->ops
->get_cd(mmc
);
1522 spin_lock_irqsave(&host
->lock
, flags
);
1524 sdhci_led_activate(host
);
1527 * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED
1528 * requests if Auto-CMD12 is enabled.
1530 if (sdhci_auto_cmd12(host
, mrq
)) {
1532 mrq
->data
->stop
= NULL
;
1537 if (!present
|| host
->flags
& SDHCI_DEVICE_DEAD
) {
1538 mrq
->cmd
->error
= -ENOMEDIUM
;
1539 sdhci_finish_mrq(host
, mrq
);
1541 if (mrq
->sbc
&& !(host
->flags
& SDHCI_AUTO_CMD23
))
1542 sdhci_send_command(host
, mrq
->sbc
);
1544 sdhci_send_command(host
, mrq
->cmd
);
1548 spin_unlock_irqrestore(&host
->lock
, flags
);
1551 void sdhci_set_bus_width(struct sdhci_host
*host
, int width
)
1555 ctrl
= sdhci_readb(host
, SDHCI_HOST_CONTROL
);
1556 if (width
== MMC_BUS_WIDTH_8
) {
1557 ctrl
&= ~SDHCI_CTRL_4BITBUS
;
1558 ctrl
|= SDHCI_CTRL_8BITBUS
;
1560 if (host
->mmc
->caps
& MMC_CAP_8_BIT_DATA
)
1561 ctrl
&= ~SDHCI_CTRL_8BITBUS
;
1562 if (width
== MMC_BUS_WIDTH_4
)
1563 ctrl
|= SDHCI_CTRL_4BITBUS
;
1565 ctrl
&= ~SDHCI_CTRL_4BITBUS
;
1567 sdhci_writeb(host
, ctrl
, SDHCI_HOST_CONTROL
);
1569 EXPORT_SYMBOL_GPL(sdhci_set_bus_width
);
1571 void sdhci_set_uhs_signaling(struct sdhci_host
*host
, unsigned timing
)
1575 ctrl_2
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
1576 /* Select Bus Speed Mode for host */
1577 ctrl_2
&= ~SDHCI_CTRL_UHS_MASK
;
1578 if ((timing
== MMC_TIMING_MMC_HS200
) ||
1579 (timing
== MMC_TIMING_UHS_SDR104
))
1580 ctrl_2
|= SDHCI_CTRL_UHS_SDR104
;
1581 else if (timing
== MMC_TIMING_UHS_SDR12
)
1582 ctrl_2
|= SDHCI_CTRL_UHS_SDR12
;
1583 else if (timing
== MMC_TIMING_UHS_SDR25
)
1584 ctrl_2
|= SDHCI_CTRL_UHS_SDR25
;
1585 else if (timing
== MMC_TIMING_UHS_SDR50
)
1586 ctrl_2
|= SDHCI_CTRL_UHS_SDR50
;
1587 else if ((timing
== MMC_TIMING_UHS_DDR50
) ||
1588 (timing
== MMC_TIMING_MMC_DDR52
))
1589 ctrl_2
|= SDHCI_CTRL_UHS_DDR50
;
1590 else if (timing
== MMC_TIMING_MMC_HS400
)
1591 ctrl_2
|= SDHCI_CTRL_HS400
; /* Non-standard */
1592 sdhci_writew(host
, ctrl_2
, SDHCI_HOST_CONTROL2
);
1594 EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling
);
1596 void sdhci_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
1598 struct sdhci_host
*host
= mmc_priv(mmc
);
1601 if (ios
->power_mode
== MMC_POWER_UNDEFINED
)
1604 if (host
->flags
& SDHCI_DEVICE_DEAD
) {
1605 if (!IS_ERR(mmc
->supply
.vmmc
) &&
1606 ios
->power_mode
== MMC_POWER_OFF
)
1607 mmc_regulator_set_ocr(mmc
, mmc
->supply
.vmmc
, 0);
1612 * Reset the chip on each power off.
1613 * Should clear out any weird states.
1615 if (ios
->power_mode
== MMC_POWER_OFF
) {
1616 sdhci_writel(host
, 0, SDHCI_SIGNAL_ENABLE
);
1620 if (host
->version
>= SDHCI_SPEC_300
&&
1621 (ios
->power_mode
== MMC_POWER_UP
) &&
1622 !(host
->quirks2
& SDHCI_QUIRK2_PRESET_VALUE_BROKEN
))
1623 sdhci_enable_preset_value(host
, false);
1625 if (!ios
->clock
|| ios
->clock
!= host
->clock
) {
1626 host
->ops
->set_clock(host
, ios
->clock
);
1627 host
->clock
= ios
->clock
;
1629 if (host
->quirks
& SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK
&&
1631 host
->timeout_clk
= host
->mmc
->actual_clock
?
1632 host
->mmc
->actual_clock
/ 1000 :
1634 host
->mmc
->max_busy_timeout
=
1635 host
->ops
->get_max_timeout_count
?
1636 host
->ops
->get_max_timeout_count(host
) :
1638 host
->mmc
->max_busy_timeout
/= host
->timeout_clk
;
1642 if (host
->ops
->set_power
)
1643 host
->ops
->set_power(host
, ios
->power_mode
, ios
->vdd
);
1645 sdhci_set_power(host
, ios
->power_mode
, ios
->vdd
);
1647 if (host
->ops
->platform_send_init_74_clocks
)
1648 host
->ops
->platform_send_init_74_clocks(host
, ios
->power_mode
);
1650 host
->ops
->set_bus_width(host
, ios
->bus_width
);
1652 ctrl
= sdhci_readb(host
, SDHCI_HOST_CONTROL
);
1654 if (!(host
->quirks
& SDHCI_QUIRK_NO_HISPD_BIT
)) {
1655 if (ios
->timing
== MMC_TIMING_SD_HS
||
1656 ios
->timing
== MMC_TIMING_MMC_HS
||
1657 ios
->timing
== MMC_TIMING_MMC_HS400
||
1658 ios
->timing
== MMC_TIMING_MMC_HS200
||
1659 ios
->timing
== MMC_TIMING_MMC_DDR52
||
1660 ios
->timing
== MMC_TIMING_UHS_SDR50
||
1661 ios
->timing
== MMC_TIMING_UHS_SDR104
||
1662 ios
->timing
== MMC_TIMING_UHS_DDR50
||
1663 ios
->timing
== MMC_TIMING_UHS_SDR25
)
1664 ctrl
|= SDHCI_CTRL_HISPD
;
1666 ctrl
&= ~SDHCI_CTRL_HISPD
;
1669 if (host
->version
>= SDHCI_SPEC_300
) {
1672 if (!host
->preset_enabled
) {
1673 sdhci_writeb(host
, ctrl
, SDHCI_HOST_CONTROL
);
1675 * We only need to set Driver Strength if the
1676 * preset value enable is not set.
1678 ctrl_2
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
1679 ctrl_2
&= ~SDHCI_CTRL_DRV_TYPE_MASK
;
1680 if (ios
->drv_type
== MMC_SET_DRIVER_TYPE_A
)
1681 ctrl_2
|= SDHCI_CTRL_DRV_TYPE_A
;
1682 else if (ios
->drv_type
== MMC_SET_DRIVER_TYPE_B
)
1683 ctrl_2
|= SDHCI_CTRL_DRV_TYPE_B
;
1684 else if (ios
->drv_type
== MMC_SET_DRIVER_TYPE_C
)
1685 ctrl_2
|= SDHCI_CTRL_DRV_TYPE_C
;
1686 else if (ios
->drv_type
== MMC_SET_DRIVER_TYPE_D
)
1687 ctrl_2
|= SDHCI_CTRL_DRV_TYPE_D
;
1689 pr_warn("%s: invalid driver type, default to driver type B\n",
1691 ctrl_2
|= SDHCI_CTRL_DRV_TYPE_B
;
1694 sdhci_writew(host
, ctrl_2
, SDHCI_HOST_CONTROL2
);
1697 * According to SDHC Spec v3.00, if the Preset Value
1698 * Enable in the Host Control 2 register is set, we
1699 * need to reset SD Clock Enable before changing High
1700 * Speed Enable to avoid generating clock gliches.
1703 /* Reset SD Clock Enable */
1704 clk
= sdhci_readw(host
, SDHCI_CLOCK_CONTROL
);
1705 clk
&= ~SDHCI_CLOCK_CARD_EN
;
1706 sdhci_writew(host
, clk
, SDHCI_CLOCK_CONTROL
);
1708 sdhci_writeb(host
, ctrl
, SDHCI_HOST_CONTROL
);
1710 /* Re-enable SD Clock */
1711 host
->ops
->set_clock(host
, host
->clock
);
1714 /* Reset SD Clock Enable */
1715 clk
= sdhci_readw(host
, SDHCI_CLOCK_CONTROL
);
1716 clk
&= ~SDHCI_CLOCK_CARD_EN
;
1717 sdhci_writew(host
, clk
, SDHCI_CLOCK_CONTROL
);
1719 host
->ops
->set_uhs_signaling(host
, ios
->timing
);
1720 host
->timing
= ios
->timing
;
1722 if (!(host
->quirks2
& SDHCI_QUIRK2_PRESET_VALUE_BROKEN
) &&
1723 ((ios
->timing
== MMC_TIMING_UHS_SDR12
) ||
1724 (ios
->timing
== MMC_TIMING_UHS_SDR25
) ||
1725 (ios
->timing
== MMC_TIMING_UHS_SDR50
) ||
1726 (ios
->timing
== MMC_TIMING_UHS_SDR104
) ||
1727 (ios
->timing
== MMC_TIMING_UHS_DDR50
) ||
1728 (ios
->timing
== MMC_TIMING_MMC_DDR52
))) {
1731 sdhci_enable_preset_value(host
, true);
1732 preset
= sdhci_get_preset_value(host
);
1733 ios
->drv_type
= (preset
& SDHCI_PRESET_DRV_MASK
)
1734 >> SDHCI_PRESET_DRV_SHIFT
;
1737 /* Re-enable SD Clock */
1738 host
->ops
->set_clock(host
, host
->clock
);
1740 sdhci_writeb(host
, ctrl
, SDHCI_HOST_CONTROL
);
1743 * Some (ENE) controllers go apeshit on some ios operation,
1744 * signalling timeout and CRC errors even on CMD0. Resetting
1745 * it on each ios seems to solve the problem.
1747 if (host
->quirks
& SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS
)
1748 sdhci_do_reset(host
, SDHCI_RESET_CMD
| SDHCI_RESET_DATA
);
1752 EXPORT_SYMBOL_GPL(sdhci_set_ios
);
1754 static int sdhci_get_cd(struct mmc_host
*mmc
)
1756 struct sdhci_host
*host
= mmc_priv(mmc
);
1757 int gpio_cd
= mmc_gpio_get_cd(mmc
);
1759 if (host
->flags
& SDHCI_DEVICE_DEAD
)
1762 /* If nonremovable, assume that the card is always present. */
1763 if (!mmc_card_is_removable(host
->mmc
))
1767 * Try slot gpio detect, if defined it take precedence
1768 * over build in controller functionality
1773 /* If polling, assume that the card is always present. */
1774 if (host
->quirks
& SDHCI_QUIRK_BROKEN_CARD_DETECTION
)
1777 /* Host native card detect */
1778 return !!(sdhci_readl(host
, SDHCI_PRESENT_STATE
) & SDHCI_CARD_PRESENT
);
1781 static int sdhci_check_ro(struct sdhci_host
*host
)
1783 unsigned long flags
;
1786 spin_lock_irqsave(&host
->lock
, flags
);
1788 if (host
->flags
& SDHCI_DEVICE_DEAD
)
1790 else if (host
->ops
->get_ro
)
1791 is_readonly
= host
->ops
->get_ro(host
);
1793 is_readonly
= !(sdhci_readl(host
, SDHCI_PRESENT_STATE
)
1794 & SDHCI_WRITE_PROTECT
);
1796 spin_unlock_irqrestore(&host
->lock
, flags
);
1798 /* This quirk needs to be replaced by a callback-function later */
1799 return host
->quirks
& SDHCI_QUIRK_INVERTED_WRITE_PROTECT
?
1800 !is_readonly
: is_readonly
;
1803 #define SAMPLE_COUNT 5
1805 static int sdhci_get_ro(struct mmc_host
*mmc
)
1807 struct sdhci_host
*host
= mmc_priv(mmc
);
1810 if (!(host
->quirks
& SDHCI_QUIRK_UNSTABLE_RO_DETECT
))
1811 return sdhci_check_ro(host
);
1814 for (i
= 0; i
< SAMPLE_COUNT
; i
++) {
1815 if (sdhci_check_ro(host
)) {
1816 if (++ro_count
> SAMPLE_COUNT
/ 2)
1824 static void sdhci_hw_reset(struct mmc_host
*mmc
)
1826 struct sdhci_host
*host
= mmc_priv(mmc
);
1828 if (host
->ops
&& host
->ops
->hw_reset
)
1829 host
->ops
->hw_reset(host
);
1832 static void sdhci_enable_sdio_irq_nolock(struct sdhci_host
*host
, int enable
)
1834 if (!(host
->flags
& SDHCI_DEVICE_DEAD
)) {
1836 host
->ier
|= SDHCI_INT_CARD_INT
;
1838 host
->ier
&= ~SDHCI_INT_CARD_INT
;
1840 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
1841 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
1846 void sdhci_enable_sdio_irq(struct mmc_host
*mmc
, int enable
)
1848 struct sdhci_host
*host
= mmc_priv(mmc
);
1849 unsigned long flags
;
1852 pm_runtime_get_noresume(host
->mmc
->parent
);
1854 spin_lock_irqsave(&host
->lock
, flags
);
1856 host
->flags
|= SDHCI_SDIO_IRQ_ENABLED
;
1858 host
->flags
&= ~SDHCI_SDIO_IRQ_ENABLED
;
1860 sdhci_enable_sdio_irq_nolock(host
, enable
);
1861 spin_unlock_irqrestore(&host
->lock
, flags
);
1864 pm_runtime_put_noidle(host
->mmc
->parent
);
1866 EXPORT_SYMBOL_GPL(sdhci_enable_sdio_irq
);
1868 int sdhci_start_signal_voltage_switch(struct mmc_host
*mmc
,
1869 struct mmc_ios
*ios
)
1871 struct sdhci_host
*host
= mmc_priv(mmc
);
1876 * Signal Voltage Switching is only applicable for Host Controllers
1879 if (host
->version
< SDHCI_SPEC_300
)
1882 ctrl
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
1884 switch (ios
->signal_voltage
) {
1885 case MMC_SIGNAL_VOLTAGE_330
:
1886 if (!(host
->flags
& SDHCI_SIGNALING_330
))
1888 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
1889 ctrl
&= ~SDHCI_CTRL_VDD_180
;
1890 sdhci_writew(host
, ctrl
, SDHCI_HOST_CONTROL2
);
1892 if (!IS_ERR(mmc
->supply
.vqmmc
)) {
1893 ret
= mmc_regulator_set_vqmmc(mmc
, ios
);
1895 pr_warn("%s: Switching to 3.3V signalling voltage failed\n",
1901 usleep_range(5000, 5500);
1903 /* 3.3V regulator output should be stable within 5 ms */
1904 ctrl
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
1905 if (!(ctrl
& SDHCI_CTRL_VDD_180
))
1908 pr_warn("%s: 3.3V regulator output did not became stable\n",
1912 case MMC_SIGNAL_VOLTAGE_180
:
1913 if (!(host
->flags
& SDHCI_SIGNALING_180
))
1915 if (!IS_ERR(mmc
->supply
.vqmmc
)) {
1916 ret
= mmc_regulator_set_vqmmc(mmc
, ios
);
1918 pr_warn("%s: Switching to 1.8V signalling voltage failed\n",
1925 * Enable 1.8V Signal Enable in the Host Control2
1928 ctrl
|= SDHCI_CTRL_VDD_180
;
1929 sdhci_writew(host
, ctrl
, SDHCI_HOST_CONTROL2
);
1931 /* Some controller need to do more when switching */
1932 if (host
->ops
->voltage_switch
)
1933 host
->ops
->voltage_switch(host
);
1935 /* 1.8V regulator output should be stable within 5 ms */
1936 ctrl
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
1937 if (ctrl
& SDHCI_CTRL_VDD_180
)
1940 pr_warn("%s: 1.8V regulator output did not became stable\n",
1944 case MMC_SIGNAL_VOLTAGE_120
:
1945 if (!(host
->flags
& SDHCI_SIGNALING_120
))
1947 if (!IS_ERR(mmc
->supply
.vqmmc
)) {
1948 ret
= mmc_regulator_set_vqmmc(mmc
, ios
);
1950 pr_warn("%s: Switching to 1.2V signalling voltage failed\n",
1957 /* No signal voltage switch required */
1961 EXPORT_SYMBOL_GPL(sdhci_start_signal_voltage_switch
);
1963 static int sdhci_card_busy(struct mmc_host
*mmc
)
1965 struct sdhci_host
*host
= mmc_priv(mmc
);
1968 /* Check whether DAT[0] is 0 */
1969 present_state
= sdhci_readl(host
, SDHCI_PRESENT_STATE
);
1971 return !(present_state
& SDHCI_DATA_0_LVL_MASK
);
1974 static int sdhci_prepare_hs400_tuning(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
1976 struct sdhci_host
*host
= mmc_priv(mmc
);
1977 unsigned long flags
;
1979 spin_lock_irqsave(&host
->lock
, flags
);
1980 host
->flags
|= SDHCI_HS400_TUNING
;
1981 spin_unlock_irqrestore(&host
->lock
, flags
);
1986 static void sdhci_start_tuning(struct sdhci_host
*host
)
1990 ctrl
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
1991 ctrl
|= SDHCI_CTRL_EXEC_TUNING
;
1992 if (host
->quirks2
& SDHCI_QUIRK2_TUNING_WORK_AROUND
)
1993 ctrl
|= SDHCI_CTRL_TUNED_CLK
;
1994 sdhci_writew(host
, ctrl
, SDHCI_HOST_CONTROL2
);
1997 * As per the Host Controller spec v3.00, tuning command
1998 * generates Buffer Read Ready interrupt, so enable that.
2000 * Note: The spec clearly says that when tuning sequence
2001 * is being performed, the controller does not generate
2002 * interrupts other than Buffer Read Ready interrupt. But
2003 * to make sure we don't hit a controller bug, we _only_
2004 * enable Buffer Read Ready interrupt here.
2006 sdhci_writel(host
, SDHCI_INT_DATA_AVAIL
, SDHCI_INT_ENABLE
);
2007 sdhci_writel(host
, SDHCI_INT_DATA_AVAIL
, SDHCI_SIGNAL_ENABLE
);
2010 static void sdhci_end_tuning(struct sdhci_host
*host
)
2012 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
2013 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
2016 static void sdhci_reset_tuning(struct sdhci_host
*host
)
2020 ctrl
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
2021 ctrl
&= ~SDHCI_CTRL_TUNED_CLK
;
2022 ctrl
&= ~SDHCI_CTRL_EXEC_TUNING
;
2023 sdhci_writew(host
, ctrl
, SDHCI_HOST_CONTROL2
);
2026 static void sdhci_abort_tuning(struct sdhci_host
*host
, u32 opcode
)
2028 sdhci_reset_tuning(host
);
2030 sdhci_do_reset(host
, SDHCI_RESET_CMD
);
2031 sdhci_do_reset(host
, SDHCI_RESET_DATA
);
2033 sdhci_end_tuning(host
);
2035 mmc_abort_tuning(host
->mmc
, opcode
);
2039 * We use sdhci_send_tuning() because mmc_send_tuning() is not a good fit. SDHCI
2040 * tuning command does not have a data payload (or rather the hardware does it
2041 * automatically) so mmc_send_tuning() will return -EIO. Also the tuning command
2042 * interrupt setup is different to other commands and there is no timeout
2043 * interrupt so special handling is needed.
2045 static void sdhci_send_tuning(struct sdhci_host
*host
, u32 opcode
)
2047 struct mmc_host
*mmc
= host
->mmc
;
2048 struct mmc_command cmd
= {};
2049 struct mmc_request mrq
= {};
2050 unsigned long flags
;
2051 u32 b
= host
->sdma_boundary
;
2053 spin_lock_irqsave(&host
->lock
, flags
);
2055 cmd
.opcode
= opcode
;
2056 cmd
.flags
= MMC_RSP_R1
| MMC_CMD_ADTC
;
2061 * In response to CMD19, the card sends 64 bytes of tuning
2062 * block to the Host Controller. So we set the block size
2065 if (cmd
.opcode
== MMC_SEND_TUNING_BLOCK_HS200
&&
2066 mmc
->ios
.bus_width
== MMC_BUS_WIDTH_8
)
2067 sdhci_writew(host
, SDHCI_MAKE_BLKSZ(b
, 128), SDHCI_BLOCK_SIZE
);
2069 sdhci_writew(host
, SDHCI_MAKE_BLKSZ(b
, 64), SDHCI_BLOCK_SIZE
);
2072 * The tuning block is sent by the card to the host controller.
2073 * So we set the TRNS_READ bit in the Transfer Mode register.
2074 * This also takes care of setting DMA Enable and Multi Block
2075 * Select in the same register to 0.
2077 sdhci_writew(host
, SDHCI_TRNS_READ
, SDHCI_TRANSFER_MODE
);
2079 sdhci_send_command(host
, &cmd
);
2083 sdhci_del_timer(host
, &mrq
);
2085 host
->tuning_done
= 0;
2088 spin_unlock_irqrestore(&host
->lock
, flags
);
2090 /* Wait for Buffer Read Ready interrupt */
2091 wait_event_timeout(host
->buf_ready_int
, (host
->tuning_done
== 1),
2092 msecs_to_jiffies(50));
2096 static void __sdhci_execute_tuning(struct sdhci_host
*host
, u32 opcode
)
2101 * Issue opcode repeatedly till Execute Tuning is set to 0 or the number
2102 * of loops reaches 40 times.
2104 for (i
= 0; i
< MAX_TUNING_LOOP
; i
++) {
2107 sdhci_send_tuning(host
, opcode
);
2109 if (!host
->tuning_done
) {
2110 pr_info("%s: Tuning timeout, falling back to fixed sampling clock\n",
2111 mmc_hostname(host
->mmc
));
2112 sdhci_abort_tuning(host
, opcode
);
2116 ctrl
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
2117 if (!(ctrl
& SDHCI_CTRL_EXEC_TUNING
)) {
2118 if (ctrl
& SDHCI_CTRL_TUNED_CLK
)
2119 return; /* Success! */
2123 /* Spec does not require a delay between tuning cycles */
2124 if (host
->tuning_delay
> 0)
2125 mdelay(host
->tuning_delay
);
2128 pr_info("%s: Tuning failed, falling back to fixed sampling clock\n",
2129 mmc_hostname(host
->mmc
));
2130 sdhci_reset_tuning(host
);
2133 int sdhci_execute_tuning(struct mmc_host
*mmc
, u32 opcode
)
2135 struct sdhci_host
*host
= mmc_priv(mmc
);
2137 unsigned int tuning_count
= 0;
2140 hs400_tuning
= host
->flags
& SDHCI_HS400_TUNING
;
2142 if (host
->tuning_mode
== SDHCI_TUNING_MODE_1
)
2143 tuning_count
= host
->tuning_count
;
2146 * The Host Controller needs tuning in case of SDR104 and DDR50
2147 * mode, and for SDR50 mode when Use Tuning for SDR50 is set in
2148 * the Capabilities register.
2149 * If the Host Controller supports the HS200 mode then the
2150 * tuning function has to be executed.
2152 switch (host
->timing
) {
2153 /* HS400 tuning is done in HS200 mode */
2154 case MMC_TIMING_MMC_HS400
:
2158 case MMC_TIMING_MMC_HS200
:
2160 * Periodic re-tuning for HS400 is not expected to be needed, so
2167 case MMC_TIMING_UHS_SDR104
:
2168 case MMC_TIMING_UHS_DDR50
:
2171 case MMC_TIMING_UHS_SDR50
:
2172 if (host
->flags
& SDHCI_SDR50_NEEDS_TUNING
)
2180 if (host
->ops
->platform_execute_tuning
) {
2181 err
= host
->ops
->platform_execute_tuning(host
, opcode
);
2185 host
->mmc
->retune_period
= tuning_count
;
2187 if (host
->tuning_delay
< 0)
2188 host
->tuning_delay
= opcode
== MMC_SEND_TUNING_BLOCK
;
2190 sdhci_start_tuning(host
);
2192 __sdhci_execute_tuning(host
, opcode
);
2194 sdhci_end_tuning(host
);
2196 host
->flags
&= ~SDHCI_HS400_TUNING
;
2200 EXPORT_SYMBOL_GPL(sdhci_execute_tuning
);
2202 static void sdhci_enable_preset_value(struct sdhci_host
*host
, bool enable
)
2204 /* Host Controller v3.00 defines preset value registers */
2205 if (host
->version
< SDHCI_SPEC_300
)
2209 * We only enable or disable Preset Value if they are not already
2210 * enabled or disabled respectively. Otherwise, we bail out.
2212 if (host
->preset_enabled
!= enable
) {
2213 u16 ctrl
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
2216 ctrl
|= SDHCI_CTRL_PRESET_VAL_ENABLE
;
2218 ctrl
&= ~SDHCI_CTRL_PRESET_VAL_ENABLE
;
2220 sdhci_writew(host
, ctrl
, SDHCI_HOST_CONTROL2
);
2223 host
->flags
|= SDHCI_PV_ENABLED
;
2225 host
->flags
&= ~SDHCI_PV_ENABLED
;
2227 host
->preset_enabled
= enable
;
2231 static void sdhci_post_req(struct mmc_host
*mmc
, struct mmc_request
*mrq
,
2234 struct sdhci_host
*host
= mmc_priv(mmc
);
2235 struct mmc_data
*data
= mrq
->data
;
2237 if (data
->host_cookie
!= COOKIE_UNMAPPED
)
2238 dma_unmap_sg(mmc_dev(host
->mmc
), data
->sg
, data
->sg_len
,
2239 mmc_get_dma_dir(data
));
2241 data
->host_cookie
= COOKIE_UNMAPPED
;
2244 static void sdhci_pre_req(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
2246 struct sdhci_host
*host
= mmc_priv(mmc
);
2248 mrq
->data
->host_cookie
= COOKIE_UNMAPPED
;
2250 if (host
->flags
& SDHCI_REQ_USE_DMA
)
2251 sdhci_pre_dma_transfer(host
, mrq
->data
, COOKIE_PRE_MAPPED
);
2254 static inline bool sdhci_has_requests(struct sdhci_host
*host
)
2256 return host
->cmd
|| host
->data_cmd
;
2259 static void sdhci_error_out_mrqs(struct sdhci_host
*host
, int err
)
2261 if (host
->data_cmd
) {
2262 host
->data_cmd
->error
= err
;
2263 sdhci_finish_mrq(host
, host
->data_cmd
->mrq
);
2267 host
->cmd
->error
= err
;
2268 sdhci_finish_mrq(host
, host
->cmd
->mrq
);
2272 static void sdhci_card_event(struct mmc_host
*mmc
)
2274 struct sdhci_host
*host
= mmc_priv(mmc
);
2275 unsigned long flags
;
2278 /* First check if client has provided their own card event */
2279 if (host
->ops
->card_event
)
2280 host
->ops
->card_event(host
);
2282 present
= mmc
->ops
->get_cd(mmc
);
2284 spin_lock_irqsave(&host
->lock
, flags
);
2286 /* Check sdhci_has_requests() first in case we are runtime suspended */
2287 if (sdhci_has_requests(host
) && !present
) {
2288 pr_err("%s: Card removed during transfer!\n",
2289 mmc_hostname(host
->mmc
));
2290 pr_err("%s: Resetting controller.\n",
2291 mmc_hostname(host
->mmc
));
2293 sdhci_do_reset(host
, SDHCI_RESET_CMD
);
2294 sdhci_do_reset(host
, SDHCI_RESET_DATA
);
2296 sdhci_error_out_mrqs(host
, -ENOMEDIUM
);
2299 spin_unlock_irqrestore(&host
->lock
, flags
);
2302 static const struct mmc_host_ops sdhci_ops
= {
2303 .request
= sdhci_request
,
2304 .post_req
= sdhci_post_req
,
2305 .pre_req
= sdhci_pre_req
,
2306 .set_ios
= sdhci_set_ios
,
2307 .get_cd
= sdhci_get_cd
,
2308 .get_ro
= sdhci_get_ro
,
2309 .hw_reset
= sdhci_hw_reset
,
2310 .enable_sdio_irq
= sdhci_enable_sdio_irq
,
2311 .start_signal_voltage_switch
= sdhci_start_signal_voltage_switch
,
2312 .prepare_hs400_tuning
= sdhci_prepare_hs400_tuning
,
2313 .execute_tuning
= sdhci_execute_tuning
,
2314 .card_event
= sdhci_card_event
,
2315 .card_busy
= sdhci_card_busy
,
2318 /*****************************************************************************\
2322 \*****************************************************************************/
2324 static bool sdhci_request_done(struct sdhci_host
*host
)
2326 unsigned long flags
;
2327 struct mmc_request
*mrq
;
2330 spin_lock_irqsave(&host
->lock
, flags
);
2332 for (i
= 0; i
< SDHCI_MAX_MRQS
; i
++) {
2333 mrq
= host
->mrqs_done
[i
];
2339 spin_unlock_irqrestore(&host
->lock
, flags
);
2343 sdhci_del_timer(host
, mrq
);
2346 * Always unmap the data buffers if they were mapped by
2347 * sdhci_prepare_data() whenever we finish with a request.
2348 * This avoids leaking DMA mappings on error.
2350 if (host
->flags
& SDHCI_REQ_USE_DMA
) {
2351 struct mmc_data
*data
= mrq
->data
;
2353 if (data
&& data
->host_cookie
== COOKIE_MAPPED
) {
2354 dma_unmap_sg(mmc_dev(host
->mmc
), data
->sg
, data
->sg_len
,
2355 mmc_get_dma_dir(data
));
2356 data
->host_cookie
= COOKIE_UNMAPPED
;
2361 * The controller needs a reset of internal state machines
2362 * upon error conditions.
2364 if (sdhci_needs_reset(host
, mrq
)) {
2366 * Do not finish until command and data lines are available for
2367 * reset. Note there can only be one other mrq, so it cannot
2368 * also be in mrqs_done, otherwise host->cmd and host->data_cmd
2369 * would both be null.
2371 if (host
->cmd
|| host
->data_cmd
) {
2372 spin_unlock_irqrestore(&host
->lock
, flags
);
2376 /* Some controllers need this kick or reset won't work here */
2377 if (host
->quirks
& SDHCI_QUIRK_CLOCK_BEFORE_RESET
)
2378 /* This is to force an update */
2379 host
->ops
->set_clock(host
, host
->clock
);
2381 /* Spec says we should do both at the same time, but Ricoh
2382 controllers do not like that. */
2383 sdhci_do_reset(host
, SDHCI_RESET_CMD
);
2384 sdhci_do_reset(host
, SDHCI_RESET_DATA
);
2386 host
->pending_reset
= false;
2389 if (!sdhci_has_requests(host
))
2390 sdhci_led_deactivate(host
);
2392 host
->mrqs_done
[i
] = NULL
;
2395 spin_unlock_irqrestore(&host
->lock
, flags
);
2397 mmc_request_done(host
->mmc
, mrq
);
2402 static void sdhci_tasklet_finish(unsigned long param
)
2404 struct sdhci_host
*host
= (struct sdhci_host
*)param
;
2406 while (!sdhci_request_done(host
))
2410 static void sdhci_timeout_timer(unsigned long data
)
2412 struct sdhci_host
*host
;
2413 unsigned long flags
;
2415 host
= (struct sdhci_host
*)data
;
2417 spin_lock_irqsave(&host
->lock
, flags
);
2419 if (host
->cmd
&& !sdhci_data_line_cmd(host
->cmd
)) {
2420 pr_err("%s: Timeout waiting for hardware cmd interrupt.\n",
2421 mmc_hostname(host
->mmc
));
2422 sdhci_dumpregs(host
);
2424 host
->cmd
->error
= -ETIMEDOUT
;
2425 sdhci_finish_mrq(host
, host
->cmd
->mrq
);
2429 spin_unlock_irqrestore(&host
->lock
, flags
);
2432 static void sdhci_timeout_data_timer(unsigned long data
)
2434 struct sdhci_host
*host
;
2435 unsigned long flags
;
2437 host
= (struct sdhci_host
*)data
;
2439 spin_lock_irqsave(&host
->lock
, flags
);
2441 if (host
->data
|| host
->data_cmd
||
2442 (host
->cmd
&& sdhci_data_line_cmd(host
->cmd
))) {
2443 pr_err("%s: Timeout waiting for hardware interrupt.\n",
2444 mmc_hostname(host
->mmc
));
2445 sdhci_dumpregs(host
);
2448 host
->data
->error
= -ETIMEDOUT
;
2449 sdhci_finish_data(host
);
2450 } else if (host
->data_cmd
) {
2451 host
->data_cmd
->error
= -ETIMEDOUT
;
2452 sdhci_finish_mrq(host
, host
->data_cmd
->mrq
);
2454 host
->cmd
->error
= -ETIMEDOUT
;
2455 sdhci_finish_mrq(host
, host
->cmd
->mrq
);
2460 spin_unlock_irqrestore(&host
->lock
, flags
);
2463 /*****************************************************************************\
2465 * Interrupt handling *
2467 \*****************************************************************************/
2469 static void sdhci_cmd_irq(struct sdhci_host
*host
, u32 intmask
)
2473 * SDHCI recovers from errors by resetting the cmd and data
2474 * circuits. Until that is done, there very well might be more
2475 * interrupts, so ignore them in that case.
2477 if (host
->pending_reset
)
2479 pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n",
2480 mmc_hostname(host
->mmc
), (unsigned)intmask
);
2481 sdhci_dumpregs(host
);
2485 if (intmask
& (SDHCI_INT_TIMEOUT
| SDHCI_INT_CRC
|
2486 SDHCI_INT_END_BIT
| SDHCI_INT_INDEX
)) {
2487 if (intmask
& SDHCI_INT_TIMEOUT
)
2488 host
->cmd
->error
= -ETIMEDOUT
;
2490 host
->cmd
->error
= -EILSEQ
;
2493 * If this command initiates a data phase and a response
2494 * CRC error is signalled, the card can start transferring
2495 * data - the card may have received the command without
2496 * error. We must not terminate the mmc_request early.
2498 * If the card did not receive the command or returned an
2499 * error which prevented it sending data, the data phase
2502 if (host
->cmd
->data
&&
2503 (intmask
& (SDHCI_INT_CRC
| SDHCI_INT_TIMEOUT
)) ==
2509 sdhci_finish_mrq(host
, host
->cmd
->mrq
);
2513 if (intmask
& SDHCI_INT_RESPONSE
)
2514 sdhci_finish_command(host
);
2517 static void sdhci_adma_show_error(struct sdhci_host
*host
)
2519 void *desc
= host
->adma_table
;
2521 sdhci_dumpregs(host
);
2524 struct sdhci_adma2_64_desc
*dma_desc
= desc
;
2526 if (host
->flags
& SDHCI_USE_64_BIT_DMA
)
2527 DBG("%p: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
2528 desc
, le32_to_cpu(dma_desc
->addr_hi
),
2529 le32_to_cpu(dma_desc
->addr_lo
),
2530 le16_to_cpu(dma_desc
->len
),
2531 le16_to_cpu(dma_desc
->cmd
));
2533 DBG("%p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
2534 desc
, le32_to_cpu(dma_desc
->addr_lo
),
2535 le16_to_cpu(dma_desc
->len
),
2536 le16_to_cpu(dma_desc
->cmd
));
2538 desc
+= host
->desc_sz
;
2540 if (dma_desc
->cmd
& cpu_to_le16(ADMA2_END
))
2545 static void sdhci_data_irq(struct sdhci_host
*host
, u32 intmask
)
2549 /* CMD19 generates _only_ Buffer Read Ready interrupt */
2550 if (intmask
& SDHCI_INT_DATA_AVAIL
) {
2551 command
= SDHCI_GET_CMD(sdhci_readw(host
, SDHCI_COMMAND
));
2552 if (command
== MMC_SEND_TUNING_BLOCK
||
2553 command
== MMC_SEND_TUNING_BLOCK_HS200
) {
2554 host
->tuning_done
= 1;
2555 wake_up(&host
->buf_ready_int
);
2561 struct mmc_command
*data_cmd
= host
->data_cmd
;
2564 * The "data complete" interrupt is also used to
2565 * indicate that a busy state has ended. See comment
2566 * above in sdhci_cmd_irq().
2568 if (data_cmd
&& (data_cmd
->flags
& MMC_RSP_BUSY
)) {
2569 if (intmask
& SDHCI_INT_DATA_TIMEOUT
) {
2570 host
->data_cmd
= NULL
;
2571 data_cmd
->error
= -ETIMEDOUT
;
2572 sdhci_finish_mrq(host
, data_cmd
->mrq
);
2575 if (intmask
& SDHCI_INT_DATA_END
) {
2576 host
->data_cmd
= NULL
;
2578 * Some cards handle busy-end interrupt
2579 * before the command completed, so make
2580 * sure we do things in the proper order.
2582 if (host
->cmd
== data_cmd
)
2585 sdhci_finish_mrq(host
, data_cmd
->mrq
);
2591 * SDHCI recovers from errors by resetting the cmd and data
2592 * circuits. Until that is done, there very well might be more
2593 * interrupts, so ignore them in that case.
2595 if (host
->pending_reset
)
2598 pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n",
2599 mmc_hostname(host
->mmc
), (unsigned)intmask
);
2600 sdhci_dumpregs(host
);
2605 if (intmask
& SDHCI_INT_DATA_TIMEOUT
)
2606 host
->data
->error
= -ETIMEDOUT
;
2607 else if (intmask
& SDHCI_INT_DATA_END_BIT
)
2608 host
->data
->error
= -EILSEQ
;
2609 else if ((intmask
& SDHCI_INT_DATA_CRC
) &&
2610 SDHCI_GET_CMD(sdhci_readw(host
, SDHCI_COMMAND
))
2612 host
->data
->error
= -EILSEQ
;
2613 else if (intmask
& SDHCI_INT_ADMA_ERROR
) {
2614 pr_err("%s: ADMA error\n", mmc_hostname(host
->mmc
));
2615 sdhci_adma_show_error(host
);
2616 host
->data
->error
= -EIO
;
2617 if (host
->ops
->adma_workaround
)
2618 host
->ops
->adma_workaround(host
, intmask
);
2621 if (host
->data
->error
)
2622 sdhci_finish_data(host
);
2624 if (intmask
& (SDHCI_INT_DATA_AVAIL
| SDHCI_INT_SPACE_AVAIL
))
2625 sdhci_transfer_pio(host
);
2628 * We currently don't do anything fancy with DMA
2629 * boundaries, but as we can't disable the feature
2630 * we need to at least restart the transfer.
2632 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
2633 * should return a valid address to continue from, but as
2634 * some controllers are faulty, don't trust them.
2636 if (intmask
& SDHCI_INT_DMA_END
) {
2637 u32 dmastart
, dmanow
;
2638 dmastart
= sg_dma_address(host
->data
->sg
);
2639 dmanow
= dmastart
+ host
->data
->bytes_xfered
;
2641 * Force update to the next DMA block boundary.
2644 ~(SDHCI_DEFAULT_BOUNDARY_SIZE
- 1)) +
2645 SDHCI_DEFAULT_BOUNDARY_SIZE
;
2646 host
->data
->bytes_xfered
= dmanow
- dmastart
;
2647 DBG("DMA base 0x%08x, transferred 0x%06x bytes, next 0x%08x\n",
2648 dmastart
, host
->data
->bytes_xfered
, dmanow
);
2649 sdhci_writel(host
, dmanow
, SDHCI_DMA_ADDRESS
);
2652 if (intmask
& SDHCI_INT_DATA_END
) {
2653 if (host
->cmd
== host
->data_cmd
) {
2655 * Data managed to finish before the
2656 * command completed. Make sure we do
2657 * things in the proper order.
2659 host
->data_early
= 1;
2661 sdhci_finish_data(host
);
2667 static irqreturn_t
sdhci_irq(int irq
, void *dev_id
)
2669 irqreturn_t result
= IRQ_NONE
;
2670 struct sdhci_host
*host
= dev_id
;
2671 u32 intmask
, mask
, unexpected
= 0;
2674 spin_lock(&host
->lock
);
2676 if (host
->runtime_suspended
&& !sdhci_sdio_irq_enabled(host
)) {
2677 spin_unlock(&host
->lock
);
2681 intmask
= sdhci_readl(host
, SDHCI_INT_STATUS
);
2682 if (!intmask
|| intmask
== 0xffffffff) {
2688 DBG("IRQ status 0x%08x\n", intmask
);
2690 if (host
->ops
->irq
) {
2691 intmask
= host
->ops
->irq(host
, intmask
);
2696 /* Clear selected interrupts. */
2697 mask
= intmask
& (SDHCI_INT_CMD_MASK
| SDHCI_INT_DATA_MASK
|
2698 SDHCI_INT_BUS_POWER
);
2699 sdhci_writel(host
, mask
, SDHCI_INT_STATUS
);
2701 if (intmask
& (SDHCI_INT_CARD_INSERT
| SDHCI_INT_CARD_REMOVE
)) {
2702 u32 present
= sdhci_readl(host
, SDHCI_PRESENT_STATE
) &
2706 * There is a observation on i.mx esdhc. INSERT
2707 * bit will be immediately set again when it gets
2708 * cleared, if a card is inserted. We have to mask
2709 * the irq to prevent interrupt storm which will
2710 * freeze the system. And the REMOVE gets the
2713 * More testing are needed here to ensure it works
2714 * for other platforms though.
2716 host
->ier
&= ~(SDHCI_INT_CARD_INSERT
|
2717 SDHCI_INT_CARD_REMOVE
);
2718 host
->ier
|= present
? SDHCI_INT_CARD_REMOVE
:
2719 SDHCI_INT_CARD_INSERT
;
2720 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
2721 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
2723 sdhci_writel(host
, intmask
& (SDHCI_INT_CARD_INSERT
|
2724 SDHCI_INT_CARD_REMOVE
), SDHCI_INT_STATUS
);
2726 host
->thread_isr
|= intmask
& (SDHCI_INT_CARD_INSERT
|
2727 SDHCI_INT_CARD_REMOVE
);
2728 result
= IRQ_WAKE_THREAD
;
2731 if (intmask
& SDHCI_INT_CMD_MASK
)
2732 sdhci_cmd_irq(host
, intmask
& SDHCI_INT_CMD_MASK
);
2734 if (intmask
& SDHCI_INT_DATA_MASK
)
2735 sdhci_data_irq(host
, intmask
& SDHCI_INT_DATA_MASK
);
2737 if (intmask
& SDHCI_INT_BUS_POWER
)
2738 pr_err("%s: Card is consuming too much power!\n",
2739 mmc_hostname(host
->mmc
));
2741 if (intmask
& SDHCI_INT_RETUNE
)
2742 mmc_retune_needed(host
->mmc
);
2744 if ((intmask
& SDHCI_INT_CARD_INT
) &&
2745 (host
->ier
& SDHCI_INT_CARD_INT
)) {
2746 sdhci_enable_sdio_irq_nolock(host
, false);
2747 host
->thread_isr
|= SDHCI_INT_CARD_INT
;
2748 result
= IRQ_WAKE_THREAD
;
2751 intmask
&= ~(SDHCI_INT_CARD_INSERT
| SDHCI_INT_CARD_REMOVE
|
2752 SDHCI_INT_CMD_MASK
| SDHCI_INT_DATA_MASK
|
2753 SDHCI_INT_ERROR
| SDHCI_INT_BUS_POWER
|
2754 SDHCI_INT_RETUNE
| SDHCI_INT_CARD_INT
);
2757 unexpected
|= intmask
;
2758 sdhci_writel(host
, intmask
, SDHCI_INT_STATUS
);
2761 if (result
== IRQ_NONE
)
2762 result
= IRQ_HANDLED
;
2764 intmask
= sdhci_readl(host
, SDHCI_INT_STATUS
);
2765 } while (intmask
&& --max_loops
);
2767 spin_unlock(&host
->lock
);
2770 pr_err("%s: Unexpected interrupt 0x%08x.\n",
2771 mmc_hostname(host
->mmc
), unexpected
);
2772 sdhci_dumpregs(host
);
2778 static irqreturn_t
sdhci_thread_irq(int irq
, void *dev_id
)
2780 struct sdhci_host
*host
= dev_id
;
2781 unsigned long flags
;
2784 spin_lock_irqsave(&host
->lock
, flags
);
2785 isr
= host
->thread_isr
;
2786 host
->thread_isr
= 0;
2787 spin_unlock_irqrestore(&host
->lock
, flags
);
2789 if (isr
& (SDHCI_INT_CARD_INSERT
| SDHCI_INT_CARD_REMOVE
)) {
2790 struct mmc_host
*mmc
= host
->mmc
;
2792 mmc
->ops
->card_event(mmc
);
2793 mmc_detect_change(mmc
, msecs_to_jiffies(200));
2796 if (isr
& SDHCI_INT_CARD_INT
) {
2797 sdio_run_irqs(host
->mmc
);
2799 spin_lock_irqsave(&host
->lock
, flags
);
2800 if (host
->flags
& SDHCI_SDIO_IRQ_ENABLED
)
2801 sdhci_enable_sdio_irq_nolock(host
, true);
2802 spin_unlock_irqrestore(&host
->lock
, flags
);
2805 return isr
? IRQ_HANDLED
: IRQ_NONE
;
2808 /*****************************************************************************\
2812 \*****************************************************************************/
2816 * To enable wakeup events, the corresponding events have to be enabled in
2817 * the Interrupt Status Enable register too. See 'Table 1-6: Wakeup Signal
2818 * Table' in the SD Host Controller Standard Specification.
2819 * It is useless to restore SDHCI_INT_ENABLE state in
2820 * sdhci_disable_irq_wakeups() since it will be set by
2821 * sdhci_enable_card_detection() or sdhci_init().
2823 void sdhci_enable_irq_wakeups(struct sdhci_host
*host
)
2826 u8 mask
= SDHCI_WAKE_ON_INSERT
| SDHCI_WAKE_ON_REMOVE
2827 | SDHCI_WAKE_ON_INT
;
2828 u32 irq_val
= SDHCI_INT_CARD_INSERT
| SDHCI_INT_CARD_REMOVE
|
2831 val
= sdhci_readb(host
, SDHCI_WAKE_UP_CONTROL
);
2833 /* Avoid fake wake up */
2834 if (host
->quirks
& SDHCI_QUIRK_BROKEN_CARD_DETECTION
) {
2835 val
&= ~(SDHCI_WAKE_ON_INSERT
| SDHCI_WAKE_ON_REMOVE
);
2836 irq_val
&= ~(SDHCI_INT_CARD_INSERT
| SDHCI_INT_CARD_REMOVE
);
2838 sdhci_writeb(host
, val
, SDHCI_WAKE_UP_CONTROL
);
2839 sdhci_writel(host
, irq_val
, SDHCI_INT_ENABLE
);
2841 EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups
);
2843 static void sdhci_disable_irq_wakeups(struct sdhci_host
*host
)
2846 u8 mask
= SDHCI_WAKE_ON_INSERT
| SDHCI_WAKE_ON_REMOVE
2847 | SDHCI_WAKE_ON_INT
;
2849 val
= sdhci_readb(host
, SDHCI_WAKE_UP_CONTROL
);
2851 sdhci_writeb(host
, val
, SDHCI_WAKE_UP_CONTROL
);
2854 int sdhci_suspend_host(struct sdhci_host
*host
)
2856 sdhci_disable_card_detection(host
);
2858 mmc_retune_timer_stop(host
->mmc
);
2860 if (!device_may_wakeup(mmc_dev(host
->mmc
))) {
2862 sdhci_writel(host
, 0, SDHCI_INT_ENABLE
);
2863 sdhci_writel(host
, 0, SDHCI_SIGNAL_ENABLE
);
2864 free_irq(host
->irq
, host
);
2866 sdhci_enable_irq_wakeups(host
);
2867 enable_irq_wake(host
->irq
);
2872 EXPORT_SYMBOL_GPL(sdhci_suspend_host
);
2874 int sdhci_resume_host(struct sdhci_host
*host
)
2876 struct mmc_host
*mmc
= host
->mmc
;
2879 if (host
->flags
& (SDHCI_USE_SDMA
| SDHCI_USE_ADMA
)) {
2880 if (host
->ops
->enable_dma
)
2881 host
->ops
->enable_dma(host
);
2884 if ((host
->mmc
->pm_flags
& MMC_PM_KEEP_POWER
) &&
2885 (host
->quirks2
& SDHCI_QUIRK2_HOST_OFF_CARD_ON
)) {
2886 /* Card keeps power but host controller does not */
2887 sdhci_init(host
, 0);
2890 mmc
->ops
->set_ios(mmc
, &mmc
->ios
);
2892 sdhci_init(host
, (host
->mmc
->pm_flags
& MMC_PM_KEEP_POWER
));
2896 if (!device_may_wakeup(mmc_dev(host
->mmc
))) {
2897 ret
= request_threaded_irq(host
->irq
, sdhci_irq
,
2898 sdhci_thread_irq
, IRQF_SHARED
,
2899 mmc_hostname(host
->mmc
), host
);
2903 sdhci_disable_irq_wakeups(host
);
2904 disable_irq_wake(host
->irq
);
2907 sdhci_enable_card_detection(host
);
2912 EXPORT_SYMBOL_GPL(sdhci_resume_host
);
2914 int sdhci_runtime_suspend_host(struct sdhci_host
*host
)
2916 unsigned long flags
;
2918 mmc_retune_timer_stop(host
->mmc
);
2920 spin_lock_irqsave(&host
->lock
, flags
);
2921 host
->ier
&= SDHCI_INT_CARD_INT
;
2922 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
2923 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
2924 spin_unlock_irqrestore(&host
->lock
, flags
);
2926 synchronize_hardirq(host
->irq
);
2928 spin_lock_irqsave(&host
->lock
, flags
);
2929 host
->runtime_suspended
= true;
2930 spin_unlock_irqrestore(&host
->lock
, flags
);
2934 EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host
);
2936 int sdhci_runtime_resume_host(struct sdhci_host
*host
)
2938 struct mmc_host
*mmc
= host
->mmc
;
2939 unsigned long flags
;
2940 int host_flags
= host
->flags
;
2942 if (host_flags
& (SDHCI_USE_SDMA
| SDHCI_USE_ADMA
)) {
2943 if (host
->ops
->enable_dma
)
2944 host
->ops
->enable_dma(host
);
2947 sdhci_init(host
, 0);
2949 if (mmc
->ios
.power_mode
!= MMC_POWER_UNDEFINED
&&
2950 mmc
->ios
.power_mode
!= MMC_POWER_OFF
) {
2951 /* Force clock and power re-program */
2954 mmc
->ops
->start_signal_voltage_switch(mmc
, &mmc
->ios
);
2955 mmc
->ops
->set_ios(mmc
, &mmc
->ios
);
2957 if ((host_flags
& SDHCI_PV_ENABLED
) &&
2958 !(host
->quirks2
& SDHCI_QUIRK2_PRESET_VALUE_BROKEN
)) {
2959 spin_lock_irqsave(&host
->lock
, flags
);
2960 sdhci_enable_preset_value(host
, true);
2961 spin_unlock_irqrestore(&host
->lock
, flags
);
2964 if ((mmc
->caps2
& MMC_CAP2_HS400_ES
) &&
2965 mmc
->ops
->hs400_enhanced_strobe
)
2966 mmc
->ops
->hs400_enhanced_strobe(mmc
, &mmc
->ios
);
2969 spin_lock_irqsave(&host
->lock
, flags
);
2971 host
->runtime_suspended
= false;
2973 /* Enable SDIO IRQ */
2974 if (host
->flags
& SDHCI_SDIO_IRQ_ENABLED
)
2975 sdhci_enable_sdio_irq_nolock(host
, true);
2977 /* Enable Card Detection */
2978 sdhci_enable_card_detection(host
);
2980 spin_unlock_irqrestore(&host
->lock
, flags
);
2984 EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host
);
2986 #endif /* CONFIG_PM */
2988 /*****************************************************************************\
2990 * Command Queue Engine (CQE) helpers *
2992 \*****************************************************************************/
2994 void sdhci_cqe_enable(struct mmc_host
*mmc
)
2996 struct sdhci_host
*host
= mmc_priv(mmc
);
2997 unsigned long flags
;
3000 spin_lock_irqsave(&host
->lock
, flags
);
3002 ctrl
= sdhci_readb(host
, SDHCI_HOST_CONTROL
);
3003 ctrl
&= ~SDHCI_CTRL_DMA_MASK
;
3004 if (host
->flags
& SDHCI_USE_64_BIT_DMA
)
3005 ctrl
|= SDHCI_CTRL_ADMA64
;
3007 ctrl
|= SDHCI_CTRL_ADMA32
;
3008 sdhci_writeb(host
, ctrl
, SDHCI_HOST_CONTROL
);
3010 sdhci_writew(host
, SDHCI_MAKE_BLKSZ(host
->sdma_boundary
, 512),
3013 /* Set maximum timeout */
3014 sdhci_writeb(host
, 0xE, SDHCI_TIMEOUT_CONTROL
);
3016 host
->ier
= host
->cqe_ier
;
3018 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
3019 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
3021 host
->cqe_on
= true;
3023 pr_debug("%s: sdhci: CQE on, IRQ mask %#x, IRQ status %#x\n",
3024 mmc_hostname(mmc
), host
->ier
,
3025 sdhci_readl(host
, SDHCI_INT_STATUS
));
3028 spin_unlock_irqrestore(&host
->lock
, flags
);
3030 EXPORT_SYMBOL_GPL(sdhci_cqe_enable
);
3032 void sdhci_cqe_disable(struct mmc_host
*mmc
, bool recovery
)
3034 struct sdhci_host
*host
= mmc_priv(mmc
);
3035 unsigned long flags
;
3037 spin_lock_irqsave(&host
->lock
, flags
);
3039 sdhci_set_default_irqs(host
);
3041 host
->cqe_on
= false;
3044 sdhci_do_reset(host
, SDHCI_RESET_CMD
);
3045 sdhci_do_reset(host
, SDHCI_RESET_DATA
);
3048 pr_debug("%s: sdhci: CQE off, IRQ mask %#x, IRQ status %#x\n",
3049 mmc_hostname(mmc
), host
->ier
,
3050 sdhci_readl(host
, SDHCI_INT_STATUS
));
3053 spin_unlock_irqrestore(&host
->lock
, flags
);
3055 EXPORT_SYMBOL_GPL(sdhci_cqe_disable
);
3057 bool sdhci_cqe_irq(struct sdhci_host
*host
, u32 intmask
, int *cmd_error
,
3065 if (intmask
& (SDHCI_INT_INDEX
| SDHCI_INT_END_BIT
| SDHCI_INT_CRC
))
3066 *cmd_error
= -EILSEQ
;
3067 else if (intmask
& SDHCI_INT_TIMEOUT
)
3068 *cmd_error
= -ETIMEDOUT
;
3072 if (intmask
& (SDHCI_INT_DATA_END_BIT
| SDHCI_INT_DATA_CRC
))
3073 *data_error
= -EILSEQ
;
3074 else if (intmask
& SDHCI_INT_DATA_TIMEOUT
)
3075 *data_error
= -ETIMEDOUT
;
3076 else if (intmask
& SDHCI_INT_ADMA_ERROR
)
3081 /* Clear selected interrupts. */
3082 mask
= intmask
& host
->cqe_ier
;
3083 sdhci_writel(host
, mask
, SDHCI_INT_STATUS
);
3085 if (intmask
& SDHCI_INT_BUS_POWER
)
3086 pr_err("%s: Card is consuming too much power!\n",
3087 mmc_hostname(host
->mmc
));
3089 intmask
&= ~(host
->cqe_ier
| SDHCI_INT_ERROR
);
3091 sdhci_writel(host
, intmask
, SDHCI_INT_STATUS
);
3092 pr_err("%s: CQE: Unexpected interrupt 0x%08x.\n",
3093 mmc_hostname(host
->mmc
), intmask
);
3094 sdhci_dumpregs(host
);
3099 EXPORT_SYMBOL_GPL(sdhci_cqe_irq
);
3101 /*****************************************************************************\
3103 * Device allocation/registration *
3105 \*****************************************************************************/
3107 struct sdhci_host
*sdhci_alloc_host(struct device
*dev
,
3110 struct mmc_host
*mmc
;
3111 struct sdhci_host
*host
;
3113 WARN_ON(dev
== NULL
);
3115 mmc
= mmc_alloc_host(sizeof(struct sdhci_host
) + priv_size
, dev
);
3117 return ERR_PTR(-ENOMEM
);
3119 host
= mmc_priv(mmc
);
3121 host
->mmc_host_ops
= sdhci_ops
;
3122 mmc
->ops
= &host
->mmc_host_ops
;
3124 host
->flags
= SDHCI_SIGNALING_330
;
3126 host
->cqe_ier
= SDHCI_CQE_INT_MASK
;
3127 host
->cqe_err_ier
= SDHCI_CQE_INT_ERR_MASK
;
3129 host
->tuning_delay
= -1;
3131 host
->sdma_boundary
= SDHCI_DEFAULT_BOUNDARY_ARG
;
3136 EXPORT_SYMBOL_GPL(sdhci_alloc_host
);
3138 static int sdhci_set_dma_mask(struct sdhci_host
*host
)
3140 struct mmc_host
*mmc
= host
->mmc
;
3141 struct device
*dev
= mmc_dev(mmc
);
3144 if (host
->quirks2
& SDHCI_QUIRK2_BROKEN_64_BIT_DMA
)
3145 host
->flags
&= ~SDHCI_USE_64_BIT_DMA
;
3147 /* Try 64-bit mask if hardware is capable of it */
3148 if (host
->flags
& SDHCI_USE_64_BIT_DMA
) {
3149 ret
= dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(64));
3151 pr_warn("%s: Failed to set 64-bit DMA mask.\n",
3153 host
->flags
&= ~SDHCI_USE_64_BIT_DMA
;
3157 /* 32-bit mask as default & fallback */
3159 ret
= dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(32));
3161 pr_warn("%s: Failed to set 32-bit DMA mask.\n",
3168 void __sdhci_read_caps(struct sdhci_host
*host
, u16
*ver
, u32
*caps
, u32
*caps1
)
3171 u64 dt_caps_mask
= 0;
3174 if (host
->read_caps
)
3177 host
->read_caps
= true;
3180 host
->quirks
= debug_quirks
;
3183 host
->quirks2
= debug_quirks2
;
3185 sdhci_do_reset(host
, SDHCI_RESET_ALL
);
3187 of_property_read_u64(mmc_dev(host
->mmc
)->of_node
,
3188 "sdhci-caps-mask", &dt_caps_mask
);
3189 of_property_read_u64(mmc_dev(host
->mmc
)->of_node
,
3190 "sdhci-caps", &dt_caps
);
3192 v
= ver
? *ver
: sdhci_readw(host
, SDHCI_HOST_VERSION
);
3193 host
->version
= (v
& SDHCI_SPEC_VER_MASK
) >> SDHCI_SPEC_VER_SHIFT
;
3195 if (host
->quirks
& SDHCI_QUIRK_MISSING_CAPS
)
3201 host
->caps
= sdhci_readl(host
, SDHCI_CAPABILITIES
);
3202 host
->caps
&= ~lower_32_bits(dt_caps_mask
);
3203 host
->caps
|= lower_32_bits(dt_caps
);
3206 if (host
->version
< SDHCI_SPEC_300
)
3210 host
->caps1
= *caps1
;
3212 host
->caps1
= sdhci_readl(host
, SDHCI_CAPABILITIES_1
);
3213 host
->caps1
&= ~upper_32_bits(dt_caps_mask
);
3214 host
->caps1
|= upper_32_bits(dt_caps
);
3217 EXPORT_SYMBOL_GPL(__sdhci_read_caps
);
3219 int sdhci_setup_host(struct sdhci_host
*host
)
3221 struct mmc_host
*mmc
;
3222 u32 max_current_caps
;
3223 unsigned int ocr_avail
;
3224 unsigned int override_timeout_clk
;
3228 WARN_ON(host
== NULL
);
3235 * If there are external regulators, get them. Note this must be done
3236 * early before resetting the host and reading the capabilities so that
3237 * the host can take the appropriate action if regulators are not
3240 ret
= mmc_regulator_get_supply(mmc
);
3241 if (ret
== -EPROBE_DEFER
)
3244 DBG("Version: 0x%08x | Present: 0x%08x\n",
3245 sdhci_readw(host
, SDHCI_HOST_VERSION
),
3246 sdhci_readl(host
, SDHCI_PRESENT_STATE
));
3247 DBG("Caps: 0x%08x | Caps_1: 0x%08x\n",
3248 sdhci_readl(host
, SDHCI_CAPABILITIES
),
3249 sdhci_readl(host
, SDHCI_CAPABILITIES_1
));
3251 sdhci_read_caps(host
);
3253 override_timeout_clk
= host
->timeout_clk
;
3255 if (host
->version
> SDHCI_SPEC_300
) {
3256 pr_err("%s: Unknown controller version (%d). You may experience problems.\n",
3257 mmc_hostname(mmc
), host
->version
);
3260 if (host
->quirks
& SDHCI_QUIRK_FORCE_DMA
)
3261 host
->flags
|= SDHCI_USE_SDMA
;
3262 else if (!(host
->caps
& SDHCI_CAN_DO_SDMA
))
3263 DBG("Controller doesn't have SDMA capability\n");
3265 host
->flags
|= SDHCI_USE_SDMA
;
3267 if ((host
->quirks
& SDHCI_QUIRK_BROKEN_DMA
) &&
3268 (host
->flags
& SDHCI_USE_SDMA
)) {
3269 DBG("Disabling DMA as it is marked broken\n");
3270 host
->flags
&= ~SDHCI_USE_SDMA
;
3273 if ((host
->version
>= SDHCI_SPEC_200
) &&
3274 (host
->caps
& SDHCI_CAN_DO_ADMA2
))
3275 host
->flags
|= SDHCI_USE_ADMA
;
3277 if ((host
->quirks
& SDHCI_QUIRK_BROKEN_ADMA
) &&
3278 (host
->flags
& SDHCI_USE_ADMA
)) {
3279 DBG("Disabling ADMA as it is marked broken\n");
3280 host
->flags
&= ~SDHCI_USE_ADMA
;
3284 * It is assumed that a 64-bit capable device has set a 64-bit DMA mask
3285 * and *must* do 64-bit DMA. A driver has the opportunity to change
3286 * that during the first call to ->enable_dma(). Similarly
3287 * SDHCI_QUIRK2_BROKEN_64_BIT_DMA must be left to the drivers to
3290 if (host
->caps
& SDHCI_CAN_64BIT
)
3291 host
->flags
|= SDHCI_USE_64_BIT_DMA
;
3293 if (host
->flags
& (SDHCI_USE_SDMA
| SDHCI_USE_ADMA
)) {
3294 ret
= sdhci_set_dma_mask(host
);
3296 if (!ret
&& host
->ops
->enable_dma
)
3297 ret
= host
->ops
->enable_dma(host
);
3300 pr_warn("%s: No suitable DMA available - falling back to PIO\n",
3302 host
->flags
&= ~(SDHCI_USE_SDMA
| SDHCI_USE_ADMA
);
3308 /* SDMA does not support 64-bit DMA */
3309 if (host
->flags
& SDHCI_USE_64_BIT_DMA
)
3310 host
->flags
&= ~SDHCI_USE_SDMA
;
3312 if (host
->flags
& SDHCI_USE_ADMA
) {
3317 * The DMA descriptor table size is calculated as the maximum
3318 * number of segments times 2, to allow for an alignment
3319 * descriptor for each segment, plus 1 for a nop end descriptor,
3320 * all multipled by the descriptor size.
3322 if (host
->flags
& SDHCI_USE_64_BIT_DMA
) {
3323 host
->adma_table_sz
= (SDHCI_MAX_SEGS
* 2 + 1) *
3324 SDHCI_ADMA2_64_DESC_SZ
;
3325 host
->desc_sz
= SDHCI_ADMA2_64_DESC_SZ
;
3327 host
->adma_table_sz
= (SDHCI_MAX_SEGS
* 2 + 1) *
3328 SDHCI_ADMA2_32_DESC_SZ
;
3329 host
->desc_sz
= SDHCI_ADMA2_32_DESC_SZ
;
3332 host
->align_buffer_sz
= SDHCI_MAX_SEGS
* SDHCI_ADMA2_ALIGN
;
3333 buf
= dma_alloc_coherent(mmc_dev(mmc
), host
->align_buffer_sz
+
3334 host
->adma_table_sz
, &dma
, GFP_KERNEL
);
3336 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
3338 host
->flags
&= ~SDHCI_USE_ADMA
;
3339 } else if ((dma
+ host
->align_buffer_sz
) &
3340 (SDHCI_ADMA2_DESC_ALIGN
- 1)) {
3341 pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
3343 host
->flags
&= ~SDHCI_USE_ADMA
;
3344 dma_free_coherent(mmc_dev(mmc
), host
->align_buffer_sz
+
3345 host
->adma_table_sz
, buf
, dma
);
3347 host
->align_buffer
= buf
;
3348 host
->align_addr
= dma
;
3350 host
->adma_table
= buf
+ host
->align_buffer_sz
;
3351 host
->adma_addr
= dma
+ host
->align_buffer_sz
;
3356 * If we use DMA, then it's up to the caller to set the DMA
3357 * mask, but PIO does not need the hw shim so we set a new
3358 * mask here in that case.
3360 if (!(host
->flags
& (SDHCI_USE_SDMA
| SDHCI_USE_ADMA
))) {
3361 host
->dma_mask
= DMA_BIT_MASK(64);
3362 mmc_dev(mmc
)->dma_mask
= &host
->dma_mask
;
3365 if (host
->version
>= SDHCI_SPEC_300
)
3366 host
->max_clk
= (host
->caps
& SDHCI_CLOCK_V3_BASE_MASK
)
3367 >> SDHCI_CLOCK_BASE_SHIFT
;
3369 host
->max_clk
= (host
->caps
& SDHCI_CLOCK_BASE_MASK
)
3370 >> SDHCI_CLOCK_BASE_SHIFT
;
3372 host
->max_clk
*= 1000000;
3373 if (host
->max_clk
== 0 || host
->quirks
&
3374 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN
) {
3375 if (!host
->ops
->get_max_clock
) {
3376 pr_err("%s: Hardware doesn't specify base clock frequency.\n",
3381 host
->max_clk
= host
->ops
->get_max_clock(host
);
3385 * In case of Host Controller v3.00, find out whether clock
3386 * multiplier is supported.
3388 host
->clk_mul
= (host
->caps1
& SDHCI_CLOCK_MUL_MASK
) >>
3389 SDHCI_CLOCK_MUL_SHIFT
;
3392 * In case the value in Clock Multiplier is 0, then programmable
3393 * clock mode is not supported, otherwise the actual clock
3394 * multiplier is one more than the value of Clock Multiplier
3395 * in the Capabilities Register.
3401 * Set host parameters.
3403 max_clk
= host
->max_clk
;
3405 if (host
->ops
->get_min_clock
)
3406 mmc
->f_min
= host
->ops
->get_min_clock(host
);
3407 else if (host
->version
>= SDHCI_SPEC_300
) {
3408 if (host
->clk_mul
) {
3409 mmc
->f_min
= (host
->max_clk
* host
->clk_mul
) / 1024;
3410 max_clk
= host
->max_clk
* host
->clk_mul
;
3412 mmc
->f_min
= host
->max_clk
/ SDHCI_MAX_DIV_SPEC_300
;
3414 mmc
->f_min
= host
->max_clk
/ SDHCI_MAX_DIV_SPEC_200
;
3416 if (!mmc
->f_max
|| mmc
->f_max
> max_clk
)
3417 mmc
->f_max
= max_clk
;
3419 if (!(host
->quirks
& SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK
)) {
3420 host
->timeout_clk
= (host
->caps
& SDHCI_TIMEOUT_CLK_MASK
) >>
3421 SDHCI_TIMEOUT_CLK_SHIFT
;
3423 if (host
->caps
& SDHCI_TIMEOUT_CLK_UNIT
)
3424 host
->timeout_clk
*= 1000;
3426 if (host
->timeout_clk
== 0) {
3427 if (!host
->ops
->get_timeout_clock
) {
3428 pr_err("%s: Hardware doesn't specify timeout clock frequency.\n",
3435 DIV_ROUND_UP(host
->ops
->get_timeout_clock(host
),
3439 if (override_timeout_clk
)
3440 host
->timeout_clk
= override_timeout_clk
;
3442 mmc
->max_busy_timeout
= host
->ops
->get_max_timeout_count
?
3443 host
->ops
->get_max_timeout_count(host
) : 1 << 27;
3444 mmc
->max_busy_timeout
/= host
->timeout_clk
;
3447 mmc
->caps
|= MMC_CAP_SDIO_IRQ
| MMC_CAP_ERASE
| MMC_CAP_CMD23
;
3448 mmc
->caps2
|= MMC_CAP2_SDIO_IRQ_NOTHREAD
;
3450 if (host
->quirks
& SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12
)
3451 host
->flags
|= SDHCI_AUTO_CMD12
;
3453 /* Auto-CMD23 stuff only works in ADMA or PIO. */
3454 if ((host
->version
>= SDHCI_SPEC_300
) &&
3455 ((host
->flags
& SDHCI_USE_ADMA
) ||
3456 !(host
->flags
& SDHCI_USE_SDMA
)) &&
3457 !(host
->quirks2
& SDHCI_QUIRK2_ACMD23_BROKEN
)) {
3458 host
->flags
|= SDHCI_AUTO_CMD23
;
3459 DBG("Auto-CMD23 available\n");
3461 DBG("Auto-CMD23 unavailable\n");
3465 * A controller may support 8-bit width, but the board itself
3466 * might not have the pins brought out. Boards that support
3467 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in
3468 * their platform code before calling sdhci_add_host(), and we
3469 * won't assume 8-bit width for hosts without that CAP.
3471 if (!(host
->quirks
& SDHCI_QUIRK_FORCE_1_BIT_DATA
))
3472 mmc
->caps
|= MMC_CAP_4_BIT_DATA
;
3474 if (host
->quirks2
& SDHCI_QUIRK2_HOST_NO_CMD23
)
3475 mmc
->caps
&= ~MMC_CAP_CMD23
;
3477 if (host
->caps
& SDHCI_CAN_DO_HISPD
)
3478 mmc
->caps
|= MMC_CAP_SD_HIGHSPEED
| MMC_CAP_MMC_HIGHSPEED
;
3480 if ((host
->quirks
& SDHCI_QUIRK_BROKEN_CARD_DETECTION
) &&
3481 mmc_card_is_removable(mmc
) &&
3482 mmc_gpio_get_cd(host
->mmc
) < 0)
3483 mmc
->caps
|= MMC_CAP_NEEDS_POLL
;
3485 /* If vqmmc regulator and no 1.8V signalling, then there's no UHS */
3486 if (!IS_ERR(mmc
->supply
.vqmmc
)) {
3487 ret
= regulator_enable(mmc
->supply
.vqmmc
);
3488 if (!regulator_is_supported_voltage(mmc
->supply
.vqmmc
, 1700000,
3490 host
->caps1
&= ~(SDHCI_SUPPORT_SDR104
|
3491 SDHCI_SUPPORT_SDR50
|
3492 SDHCI_SUPPORT_DDR50
);
3494 pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
3495 mmc_hostname(mmc
), ret
);
3496 mmc
->supply
.vqmmc
= ERR_PTR(-EINVAL
);
3500 if (host
->quirks2
& SDHCI_QUIRK2_NO_1_8_V
) {
3501 host
->caps1
&= ~(SDHCI_SUPPORT_SDR104
| SDHCI_SUPPORT_SDR50
|
3502 SDHCI_SUPPORT_DDR50
);
3505 /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
3506 if (host
->caps1
& (SDHCI_SUPPORT_SDR104
| SDHCI_SUPPORT_SDR50
|
3507 SDHCI_SUPPORT_DDR50
))
3508 mmc
->caps
|= MMC_CAP_UHS_SDR12
| MMC_CAP_UHS_SDR25
;
3510 /* SDR104 supports also implies SDR50 support */
3511 if (host
->caps1
& SDHCI_SUPPORT_SDR104
) {
3512 mmc
->caps
|= MMC_CAP_UHS_SDR104
| MMC_CAP_UHS_SDR50
;
3513 /* SD3.0: SDR104 is supported so (for eMMC) the caps2
3514 * field can be promoted to support HS200.
3516 if (!(host
->quirks2
& SDHCI_QUIRK2_BROKEN_HS200
))
3517 mmc
->caps2
|= MMC_CAP2_HS200
;
3518 } else if (host
->caps1
& SDHCI_SUPPORT_SDR50
) {
3519 mmc
->caps
|= MMC_CAP_UHS_SDR50
;
3522 if (host
->quirks2
& SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400
&&
3523 (host
->caps1
& SDHCI_SUPPORT_HS400
))
3524 mmc
->caps2
|= MMC_CAP2_HS400
;
3526 if ((mmc
->caps2
& MMC_CAP2_HSX00_1_2V
) &&
3527 (IS_ERR(mmc
->supply
.vqmmc
) ||
3528 !regulator_is_supported_voltage(mmc
->supply
.vqmmc
, 1100000,
3530 mmc
->caps2
&= ~MMC_CAP2_HSX00_1_2V
;
3532 if ((host
->caps1
& SDHCI_SUPPORT_DDR50
) &&
3533 !(host
->quirks2
& SDHCI_QUIRK2_BROKEN_DDR50
))
3534 mmc
->caps
|= MMC_CAP_UHS_DDR50
;
3536 /* Does the host need tuning for SDR50? */
3537 if (host
->caps1
& SDHCI_USE_SDR50_TUNING
)
3538 host
->flags
|= SDHCI_SDR50_NEEDS_TUNING
;
3540 /* Driver Type(s) (A, C, D) supported by the host */
3541 if (host
->caps1
& SDHCI_DRIVER_TYPE_A
)
3542 mmc
->caps
|= MMC_CAP_DRIVER_TYPE_A
;
3543 if (host
->caps1
& SDHCI_DRIVER_TYPE_C
)
3544 mmc
->caps
|= MMC_CAP_DRIVER_TYPE_C
;
3545 if (host
->caps1
& SDHCI_DRIVER_TYPE_D
)
3546 mmc
->caps
|= MMC_CAP_DRIVER_TYPE_D
;
3548 /* Initial value for re-tuning timer count */
3549 host
->tuning_count
= (host
->caps1
& SDHCI_RETUNING_TIMER_COUNT_MASK
) >>
3550 SDHCI_RETUNING_TIMER_COUNT_SHIFT
;
3553 * In case Re-tuning Timer is not disabled, the actual value of
3554 * re-tuning timer will be 2 ^ (n - 1).
3556 if (host
->tuning_count
)
3557 host
->tuning_count
= 1 << (host
->tuning_count
- 1);
3559 /* Re-tuning mode supported by the Host Controller */
3560 host
->tuning_mode
= (host
->caps1
& SDHCI_RETUNING_MODE_MASK
) >>
3561 SDHCI_RETUNING_MODE_SHIFT
;
3566 * According to SD Host Controller spec v3.00, if the Host System
3567 * can afford more than 150mA, Host Driver should set XPC to 1. Also
3568 * the value is meaningful only if Voltage Support in the Capabilities
3569 * register is set. The actual current value is 4 times the register
3572 max_current_caps
= sdhci_readl(host
, SDHCI_MAX_CURRENT
);
3573 if (!max_current_caps
&& !IS_ERR(mmc
->supply
.vmmc
)) {
3574 int curr
= regulator_get_current_limit(mmc
->supply
.vmmc
);
3577 /* convert to SDHCI_MAX_CURRENT format */
3578 curr
= curr
/1000; /* convert to mA */
3579 curr
= curr
/SDHCI_MAX_CURRENT_MULTIPLIER
;
3581 curr
= min_t(u32
, curr
, SDHCI_MAX_CURRENT_LIMIT
);
3583 (curr
<< SDHCI_MAX_CURRENT_330_SHIFT
) |
3584 (curr
<< SDHCI_MAX_CURRENT_300_SHIFT
) |
3585 (curr
<< SDHCI_MAX_CURRENT_180_SHIFT
);
3589 if (host
->caps
& SDHCI_CAN_VDD_330
) {
3590 ocr_avail
|= MMC_VDD_32_33
| MMC_VDD_33_34
;
3592 mmc
->max_current_330
= ((max_current_caps
&
3593 SDHCI_MAX_CURRENT_330_MASK
) >>
3594 SDHCI_MAX_CURRENT_330_SHIFT
) *
3595 SDHCI_MAX_CURRENT_MULTIPLIER
;
3597 if (host
->caps
& SDHCI_CAN_VDD_300
) {
3598 ocr_avail
|= MMC_VDD_29_30
| MMC_VDD_30_31
;
3600 mmc
->max_current_300
= ((max_current_caps
&
3601 SDHCI_MAX_CURRENT_300_MASK
) >>
3602 SDHCI_MAX_CURRENT_300_SHIFT
) *
3603 SDHCI_MAX_CURRENT_MULTIPLIER
;
3605 if (host
->caps
& SDHCI_CAN_VDD_180
) {
3606 ocr_avail
|= MMC_VDD_165_195
;
3608 mmc
->max_current_180
= ((max_current_caps
&
3609 SDHCI_MAX_CURRENT_180_MASK
) >>
3610 SDHCI_MAX_CURRENT_180_SHIFT
) *
3611 SDHCI_MAX_CURRENT_MULTIPLIER
;
3614 /* If OCR set by host, use it instead. */
3616 ocr_avail
= host
->ocr_mask
;
3618 /* If OCR set by external regulators, give it highest prio. */
3620 ocr_avail
= mmc
->ocr_avail
;
3622 mmc
->ocr_avail
= ocr_avail
;
3623 mmc
->ocr_avail_sdio
= ocr_avail
;
3624 if (host
->ocr_avail_sdio
)
3625 mmc
->ocr_avail_sdio
&= host
->ocr_avail_sdio
;
3626 mmc
->ocr_avail_sd
= ocr_avail
;
3627 if (host
->ocr_avail_sd
)
3628 mmc
->ocr_avail_sd
&= host
->ocr_avail_sd
;
3629 else /* normal SD controllers don't support 1.8V */
3630 mmc
->ocr_avail_sd
&= ~MMC_VDD_165_195
;
3631 mmc
->ocr_avail_mmc
= ocr_avail
;
3632 if (host
->ocr_avail_mmc
)
3633 mmc
->ocr_avail_mmc
&= host
->ocr_avail_mmc
;
3635 if (mmc
->ocr_avail
== 0) {
3636 pr_err("%s: Hardware doesn't report any support voltages.\n",
3642 if ((mmc
->caps
& (MMC_CAP_UHS_SDR12
| MMC_CAP_UHS_SDR25
|
3643 MMC_CAP_UHS_SDR50
| MMC_CAP_UHS_SDR104
|
3644 MMC_CAP_UHS_DDR50
| MMC_CAP_1_8V_DDR
)) ||
3645 (mmc
->caps2
& (MMC_CAP2_HS200_1_8V_SDR
| MMC_CAP2_HS400_1_8V
)))
3646 host
->flags
|= SDHCI_SIGNALING_180
;
3648 if (mmc
->caps2
& MMC_CAP2_HSX00_1_2V
)
3649 host
->flags
|= SDHCI_SIGNALING_120
;
3651 spin_lock_init(&host
->lock
);
3654 * Maximum number of segments. Depends on if the hardware
3655 * can do scatter/gather or not.
3657 if (host
->flags
& SDHCI_USE_ADMA
)
3658 mmc
->max_segs
= SDHCI_MAX_SEGS
;
3659 else if (host
->flags
& SDHCI_USE_SDMA
)
3662 mmc
->max_segs
= SDHCI_MAX_SEGS
;
3665 * Maximum number of sectors in one transfer. Limited by SDMA boundary
3666 * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
3669 mmc
->max_req_size
= 524288;
3672 * Maximum segment size. Could be one segment with the maximum number
3673 * of bytes. When doing hardware scatter/gather, each entry cannot
3674 * be larger than 64 KiB though.
3676 if (host
->flags
& SDHCI_USE_ADMA
) {
3677 if (host
->quirks
& SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC
)
3678 mmc
->max_seg_size
= 65535;
3680 mmc
->max_seg_size
= 65536;
3682 mmc
->max_seg_size
= mmc
->max_req_size
;
3686 * Maximum block size. This varies from controller to controller and
3687 * is specified in the capabilities register.
3689 if (host
->quirks
& SDHCI_QUIRK_FORCE_BLK_SZ_2048
) {
3690 mmc
->max_blk_size
= 2;
3692 mmc
->max_blk_size
= (host
->caps
& SDHCI_MAX_BLOCK_MASK
) >>
3693 SDHCI_MAX_BLOCK_SHIFT
;
3694 if (mmc
->max_blk_size
>= 3) {
3695 pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n",
3697 mmc
->max_blk_size
= 0;
3701 mmc
->max_blk_size
= 512 << mmc
->max_blk_size
;
3704 * Maximum block count.
3706 mmc
->max_blk_count
= (host
->quirks
& SDHCI_QUIRK_NO_MULTIBLOCK
) ? 1 : 65535;
3711 if (!IS_ERR(mmc
->supply
.vqmmc
))
3712 regulator_disable(mmc
->supply
.vqmmc
);
3714 if (host
->align_buffer
)
3715 dma_free_coherent(mmc_dev(mmc
), host
->align_buffer_sz
+
3716 host
->adma_table_sz
, host
->align_buffer
,
3718 host
->adma_table
= NULL
;
3719 host
->align_buffer
= NULL
;
3723 EXPORT_SYMBOL_GPL(sdhci_setup_host
);
3725 void sdhci_cleanup_host(struct sdhci_host
*host
)
3727 struct mmc_host
*mmc
= host
->mmc
;
3729 if (!IS_ERR(mmc
->supply
.vqmmc
))
3730 regulator_disable(mmc
->supply
.vqmmc
);
3732 if (host
->align_buffer
)
3733 dma_free_coherent(mmc_dev(mmc
), host
->align_buffer_sz
+
3734 host
->adma_table_sz
, host
->align_buffer
,
3736 host
->adma_table
= NULL
;
3737 host
->align_buffer
= NULL
;
3739 EXPORT_SYMBOL_GPL(sdhci_cleanup_host
);
3741 int __sdhci_add_host(struct sdhci_host
*host
)
3743 struct mmc_host
*mmc
= host
->mmc
;
3749 tasklet_init(&host
->finish_tasklet
,
3750 sdhci_tasklet_finish
, (unsigned long)host
);
3752 setup_timer(&host
->timer
, sdhci_timeout_timer
, (unsigned long)host
);
3753 setup_timer(&host
->data_timer
, sdhci_timeout_data_timer
,
3754 (unsigned long)host
);
3756 init_waitqueue_head(&host
->buf_ready_int
);
3758 sdhci_init(host
, 0);
3760 ret
= request_threaded_irq(host
->irq
, sdhci_irq
, sdhci_thread_irq
,
3761 IRQF_SHARED
, mmc_hostname(mmc
), host
);
3763 pr_err("%s: Failed to request IRQ %d: %d\n",
3764 mmc_hostname(mmc
), host
->irq
, ret
);
3768 ret
= sdhci_led_register(host
);
3770 pr_err("%s: Failed to register LED device: %d\n",
3771 mmc_hostname(mmc
), ret
);
3777 ret
= mmc_add_host(mmc
);
3781 pr_info("%s: SDHCI controller on %s [%s] using %s\n",
3782 mmc_hostname(mmc
), host
->hw_name
, dev_name(mmc_dev(mmc
)),
3783 (host
->flags
& SDHCI_USE_ADMA
) ?
3784 (host
->flags
& SDHCI_USE_64_BIT_DMA
) ? "ADMA 64-bit" : "ADMA" :
3785 (host
->flags
& SDHCI_USE_SDMA
) ? "DMA" : "PIO");
3787 sdhci_enable_card_detection(host
);
3792 sdhci_led_unregister(host
);
3794 sdhci_do_reset(host
, SDHCI_RESET_ALL
);
3795 sdhci_writel(host
, 0, SDHCI_INT_ENABLE
);
3796 sdhci_writel(host
, 0, SDHCI_SIGNAL_ENABLE
);
3797 free_irq(host
->irq
, host
);
3799 tasklet_kill(&host
->finish_tasklet
);
3803 EXPORT_SYMBOL_GPL(__sdhci_add_host
);
3805 int sdhci_add_host(struct sdhci_host
*host
)
3809 ret
= sdhci_setup_host(host
);
3813 ret
= __sdhci_add_host(host
);
3820 sdhci_cleanup_host(host
);
3824 EXPORT_SYMBOL_GPL(sdhci_add_host
);
3826 void sdhci_remove_host(struct sdhci_host
*host
, int dead
)
3828 struct mmc_host
*mmc
= host
->mmc
;
3829 unsigned long flags
;
3832 spin_lock_irqsave(&host
->lock
, flags
);
3834 host
->flags
|= SDHCI_DEVICE_DEAD
;
3836 if (sdhci_has_requests(host
)) {
3837 pr_err("%s: Controller removed during "
3838 " transfer!\n", mmc_hostname(mmc
));
3839 sdhci_error_out_mrqs(host
, -ENOMEDIUM
);
3842 spin_unlock_irqrestore(&host
->lock
, flags
);
3845 sdhci_disable_card_detection(host
);
3847 mmc_remove_host(mmc
);
3849 sdhci_led_unregister(host
);
3852 sdhci_do_reset(host
, SDHCI_RESET_ALL
);
3854 sdhci_writel(host
, 0, SDHCI_INT_ENABLE
);
3855 sdhci_writel(host
, 0, SDHCI_SIGNAL_ENABLE
);
3856 free_irq(host
->irq
, host
);
3858 del_timer_sync(&host
->timer
);
3859 del_timer_sync(&host
->data_timer
);
3861 tasklet_kill(&host
->finish_tasklet
);
3863 if (!IS_ERR(mmc
->supply
.vqmmc
))
3864 regulator_disable(mmc
->supply
.vqmmc
);
3866 if (host
->align_buffer
)
3867 dma_free_coherent(mmc_dev(mmc
), host
->align_buffer_sz
+
3868 host
->adma_table_sz
, host
->align_buffer
,
3871 host
->adma_table
= NULL
;
3872 host
->align_buffer
= NULL
;
3875 EXPORT_SYMBOL_GPL(sdhci_remove_host
);
3877 void sdhci_free_host(struct sdhci_host
*host
)
3879 mmc_free_host(host
->mmc
);
3882 EXPORT_SYMBOL_GPL(sdhci_free_host
);
3884 /*****************************************************************************\
3886 * Driver init/exit *
3888 \*****************************************************************************/
3890 static int __init
sdhci_drv_init(void)
3893 ": Secure Digital Host Controller Interface driver\n");
3894 pr_info(DRIVER_NAME
": Copyright(c) Pierre Ossman\n");
3899 static void __exit
sdhci_drv_exit(void)
3903 module_init(sdhci_drv_init
);
3904 module_exit(sdhci_drv_exit
);
3906 module_param(debug_quirks
, uint
, 0444);
3907 module_param(debug_quirks2
, uint
, 0444);
3909 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
3910 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
3911 MODULE_LICENSE("GPL");
3913 MODULE_PARM_DESC(debug_quirks
, "Force certain quirks.");
3914 MODULE_PARM_DESC(debug_quirks2
, "Force certain other quirks.");