1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
5 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
7 * Thanks to the following companies for their support:
9 * - JMicron (hardware and technical support)
12 #include <linux/bitfield.h>
13 #include <linux/delay.h>
14 #include <linux/dmaengine.h>
15 #include <linux/ktime.h>
16 #include <linux/highmem.h>
18 #include <linux/module.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/slab.h>
21 #include <linux/scatterlist.h>
22 #include <linux/sizes.h>
23 #include <linux/regulator/consumer.h>
24 #include <linux/pm_runtime.h>
26 #include <linux/bug.h>
27 #include <linux/leds.h>
29 #include <linux/mmc/mmc.h>
30 #include <linux/mmc/host.h>
31 #include <linux/mmc/card.h>
32 #include <linux/mmc/sdio.h>
33 #include <linux/mmc/slot-gpio.h>
37 #define DRIVER_NAME "sdhci"
39 #define DBG(f, x...) \
40 pr_debug("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
42 #define SDHCI_DUMP(f, x...) \
43 pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
45 #define MAX_TUNING_LOOP 40
47 static unsigned int debug_quirks
= 0;
48 static unsigned int debug_quirks2
;
50 static bool sdhci_send_command(struct sdhci_host
*host
, struct mmc_command
*cmd
);
52 void sdhci_dumpregs(struct sdhci_host
*host
)
54 SDHCI_DUMP("============ SDHCI REGISTER DUMP ===========\n");
56 SDHCI_DUMP("Sys addr: 0x%08x | Version: 0x%08x\n",
57 sdhci_readl(host
, SDHCI_DMA_ADDRESS
),
58 sdhci_readw(host
, SDHCI_HOST_VERSION
));
59 SDHCI_DUMP("Blk size: 0x%08x | Blk cnt: 0x%08x\n",
60 sdhci_readw(host
, SDHCI_BLOCK_SIZE
),
61 sdhci_readw(host
, SDHCI_BLOCK_COUNT
));
62 SDHCI_DUMP("Argument: 0x%08x | Trn mode: 0x%08x\n",
63 sdhci_readl(host
, SDHCI_ARGUMENT
),
64 sdhci_readw(host
, SDHCI_TRANSFER_MODE
));
65 SDHCI_DUMP("Present: 0x%08x | Host ctl: 0x%08x\n",
66 sdhci_readl(host
, SDHCI_PRESENT_STATE
),
67 sdhci_readb(host
, SDHCI_HOST_CONTROL
));
68 SDHCI_DUMP("Power: 0x%08x | Blk gap: 0x%08x\n",
69 sdhci_readb(host
, SDHCI_POWER_CONTROL
),
70 sdhci_readb(host
, SDHCI_BLOCK_GAP_CONTROL
));
71 SDHCI_DUMP("Wake-up: 0x%08x | Clock: 0x%08x\n",
72 sdhci_readb(host
, SDHCI_WAKE_UP_CONTROL
),
73 sdhci_readw(host
, SDHCI_CLOCK_CONTROL
));
74 SDHCI_DUMP("Timeout: 0x%08x | Int stat: 0x%08x\n",
75 sdhci_readb(host
, SDHCI_TIMEOUT_CONTROL
),
76 sdhci_readl(host
, SDHCI_INT_STATUS
));
77 SDHCI_DUMP("Int enab: 0x%08x | Sig enab: 0x%08x\n",
78 sdhci_readl(host
, SDHCI_INT_ENABLE
),
79 sdhci_readl(host
, SDHCI_SIGNAL_ENABLE
));
80 SDHCI_DUMP("ACmd stat: 0x%08x | Slot int: 0x%08x\n",
81 sdhci_readw(host
, SDHCI_AUTO_CMD_STATUS
),
82 sdhci_readw(host
, SDHCI_SLOT_INT_STATUS
));
83 SDHCI_DUMP("Caps: 0x%08x | Caps_1: 0x%08x\n",
84 sdhci_readl(host
, SDHCI_CAPABILITIES
),
85 sdhci_readl(host
, SDHCI_CAPABILITIES_1
));
86 SDHCI_DUMP("Cmd: 0x%08x | Max curr: 0x%08x\n",
87 sdhci_readw(host
, SDHCI_COMMAND
),
88 sdhci_readl(host
, SDHCI_MAX_CURRENT
));
89 SDHCI_DUMP("Resp[0]: 0x%08x | Resp[1]: 0x%08x\n",
90 sdhci_readl(host
, SDHCI_RESPONSE
),
91 sdhci_readl(host
, SDHCI_RESPONSE
+ 4));
92 SDHCI_DUMP("Resp[2]: 0x%08x | Resp[3]: 0x%08x\n",
93 sdhci_readl(host
, SDHCI_RESPONSE
+ 8),
94 sdhci_readl(host
, SDHCI_RESPONSE
+ 12));
95 SDHCI_DUMP("Host ctl2: 0x%08x\n",
96 sdhci_readw(host
, SDHCI_HOST_CONTROL2
));
98 if (host
->flags
& SDHCI_USE_ADMA
) {
99 if (host
->flags
& SDHCI_USE_64_BIT_DMA
) {
100 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n",
101 sdhci_readl(host
, SDHCI_ADMA_ERROR
),
102 sdhci_readl(host
, SDHCI_ADMA_ADDRESS_HI
),
103 sdhci_readl(host
, SDHCI_ADMA_ADDRESS
));
105 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
106 sdhci_readl(host
, SDHCI_ADMA_ERROR
),
107 sdhci_readl(host
, SDHCI_ADMA_ADDRESS
));
111 if (host
->ops
->dump_uhs2_regs
)
112 host
->ops
->dump_uhs2_regs(host
);
114 if (host
->ops
->dump_vendor_regs
)
115 host
->ops
->dump_vendor_regs(host
);
117 SDHCI_DUMP("============================================\n");
119 EXPORT_SYMBOL_GPL(sdhci_dumpregs
);
121 /*****************************************************************************\
123 * Low level functions *
125 \*****************************************************************************/
127 static void sdhci_do_enable_v4_mode(struct sdhci_host
*host
)
131 ctrl2
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
132 if (ctrl2
& SDHCI_CTRL_V4_MODE
)
135 ctrl2
|= SDHCI_CTRL_V4_MODE
;
136 sdhci_writew(host
, ctrl2
, SDHCI_HOST_CONTROL2
);
140 * This can be called before sdhci_add_host() by Vendor's host controller
141 * driver to enable v4 mode if supported.
143 void sdhci_enable_v4_mode(struct sdhci_host
*host
)
145 host
->v4_mode
= true;
146 sdhci_do_enable_v4_mode(host
);
148 EXPORT_SYMBOL_GPL(sdhci_enable_v4_mode
);
150 bool sdhci_data_line_cmd(struct mmc_command
*cmd
)
152 return cmd
->data
|| cmd
->flags
& MMC_RSP_BUSY
;
154 EXPORT_SYMBOL_GPL(sdhci_data_line_cmd
);
156 static void sdhci_set_card_detection(struct sdhci_host
*host
, bool enable
)
160 if ((host
->quirks
& SDHCI_QUIRK_BROKEN_CARD_DETECTION
) ||
161 !mmc_card_is_removable(host
->mmc
) || mmc_can_gpio_cd(host
->mmc
))
165 present
= sdhci_readl(host
, SDHCI_PRESENT_STATE
) &
168 host
->ier
|= present
? SDHCI_INT_CARD_REMOVE
:
169 SDHCI_INT_CARD_INSERT
;
171 host
->ier
&= ~(SDHCI_INT_CARD_REMOVE
| SDHCI_INT_CARD_INSERT
);
174 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
175 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
178 static void sdhci_enable_card_detection(struct sdhci_host
*host
)
180 sdhci_set_card_detection(host
, true);
183 static void sdhci_disable_card_detection(struct sdhci_host
*host
)
185 sdhci_set_card_detection(host
, false);
188 static void sdhci_runtime_pm_bus_on(struct sdhci_host
*host
)
193 pm_runtime_get_noresume(mmc_dev(host
->mmc
));
196 static void sdhci_runtime_pm_bus_off(struct sdhci_host
*host
)
200 host
->bus_on
= false;
201 pm_runtime_put_noidle(mmc_dev(host
->mmc
));
204 void sdhci_reset(struct sdhci_host
*host
, u8 mask
)
208 sdhci_writeb(host
, mask
, SDHCI_SOFTWARE_RESET
);
210 if (mask
& SDHCI_RESET_ALL
) {
212 /* Reset-all turns off SD Bus Power */
213 if (host
->quirks2
& SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON
)
214 sdhci_runtime_pm_bus_off(host
);
217 /* Wait max 100 ms */
218 timeout
= ktime_add_ms(ktime_get(), 100);
220 /* hw clears the bit when it's done */
222 bool timedout
= ktime_after(ktime_get(), timeout
);
224 if (!(sdhci_readb(host
, SDHCI_SOFTWARE_RESET
) & mask
))
227 pr_err("%s: Reset 0x%x never completed.\n",
228 mmc_hostname(host
->mmc
), (int)mask
);
229 sdhci_err_stats_inc(host
, CTRL_TIMEOUT
);
230 sdhci_dumpregs(host
);
236 EXPORT_SYMBOL_GPL(sdhci_reset
);
238 bool sdhci_do_reset(struct sdhci_host
*host
, u8 mask
)
240 if (host
->quirks
& SDHCI_QUIRK_NO_CARD_NO_RESET
) {
241 struct mmc_host
*mmc
= host
->mmc
;
243 if (!mmc
->ops
->get_cd(mmc
))
247 host
->ops
->reset(host
, mask
);
251 EXPORT_SYMBOL_GPL(sdhci_do_reset
);
253 static void sdhci_reset_for_all(struct sdhci_host
*host
)
255 if (sdhci_do_reset(host
, SDHCI_RESET_ALL
)) {
256 if (host
->flags
& (SDHCI_USE_SDMA
| SDHCI_USE_ADMA
)) {
257 if (host
->ops
->enable_dma
)
258 host
->ops
->enable_dma(host
);
260 /* Resetting the controller clears many */
261 host
->preset_enabled
= false;
265 enum sdhci_reset_reason
{
266 SDHCI_RESET_FOR_INIT
,
267 SDHCI_RESET_FOR_REQUEST_ERROR
,
268 SDHCI_RESET_FOR_REQUEST_ERROR_DATA_ONLY
,
269 SDHCI_RESET_FOR_TUNING_ABORT
,
270 SDHCI_RESET_FOR_CARD_REMOVED
,
271 SDHCI_RESET_FOR_CQE_RECOVERY
,
274 static void sdhci_reset_for_reason(struct sdhci_host
*host
, enum sdhci_reset_reason reason
)
276 if (host
->quirks2
& SDHCI_QUIRK2_ISSUE_CMD_DAT_RESET_TOGETHER
) {
277 sdhci_do_reset(host
, SDHCI_RESET_CMD
| SDHCI_RESET_DATA
);
282 case SDHCI_RESET_FOR_INIT
:
283 sdhci_do_reset(host
, SDHCI_RESET_CMD
| SDHCI_RESET_DATA
);
285 case SDHCI_RESET_FOR_REQUEST_ERROR
:
286 case SDHCI_RESET_FOR_TUNING_ABORT
:
287 case SDHCI_RESET_FOR_CARD_REMOVED
:
288 case SDHCI_RESET_FOR_CQE_RECOVERY
:
289 sdhci_do_reset(host
, SDHCI_RESET_CMD
);
290 sdhci_do_reset(host
, SDHCI_RESET_DATA
);
292 case SDHCI_RESET_FOR_REQUEST_ERROR_DATA_ONLY
:
293 sdhci_do_reset(host
, SDHCI_RESET_DATA
);
298 #define sdhci_reset_for(h, r) sdhci_reset_for_reason((h), SDHCI_RESET_FOR_##r)
300 static void sdhci_set_default_irqs(struct sdhci_host
*host
)
302 host
->ier
= SDHCI_INT_BUS_POWER
| SDHCI_INT_DATA_END_BIT
|
303 SDHCI_INT_DATA_CRC
| SDHCI_INT_DATA_TIMEOUT
|
304 SDHCI_INT_INDEX
| SDHCI_INT_END_BIT
| SDHCI_INT_CRC
|
305 SDHCI_INT_TIMEOUT
| SDHCI_INT_DATA_END
|
308 if (host
->tuning_mode
== SDHCI_TUNING_MODE_2
||
309 host
->tuning_mode
== SDHCI_TUNING_MODE_3
)
310 host
->ier
|= SDHCI_INT_RETUNE
;
312 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
313 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
316 static void sdhci_config_dma(struct sdhci_host
*host
)
321 if (host
->version
< SDHCI_SPEC_200
)
324 ctrl
= sdhci_readb(host
, SDHCI_HOST_CONTROL
);
327 * Always adjust the DMA selection as some controllers
328 * (e.g. JMicron) can't do PIO properly when the selection
331 ctrl
&= ~SDHCI_CTRL_DMA_MASK
;
332 if (!(host
->flags
& SDHCI_REQ_USE_DMA
))
335 /* Note if DMA Select is zero then SDMA is selected */
336 if (host
->flags
& SDHCI_USE_ADMA
)
337 ctrl
|= SDHCI_CTRL_ADMA32
;
339 if (host
->flags
& SDHCI_USE_64_BIT_DMA
) {
341 * If v4 mode, all supported DMA can be 64-bit addressing if
342 * controller supports 64-bit system address, otherwise only
343 * ADMA can support 64-bit addressing.
346 ctrl2
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
347 ctrl2
|= SDHCI_CTRL_64BIT_ADDR
;
348 sdhci_writew(host
, ctrl2
, SDHCI_HOST_CONTROL2
);
349 } else if (host
->flags
& SDHCI_USE_ADMA
) {
351 * Don't need to undo SDHCI_CTRL_ADMA32 in order to
352 * set SDHCI_CTRL_ADMA64.
354 ctrl
|= SDHCI_CTRL_ADMA64
;
359 sdhci_writeb(host
, ctrl
, SDHCI_HOST_CONTROL
);
362 static void sdhci_init(struct sdhci_host
*host
, int soft
)
364 struct mmc_host
*mmc
= host
->mmc
;
368 sdhci_reset_for(host
, INIT
);
370 sdhci_reset_for_all(host
);
373 sdhci_do_enable_v4_mode(host
);
375 spin_lock_irqsave(&host
->lock
, flags
);
376 sdhci_set_default_irqs(host
);
377 spin_unlock_irqrestore(&host
->lock
, flags
);
379 host
->cqe_on
= false;
382 /* force clock reconfiguration */
384 host
->reinit_uhs
= true;
385 mmc
->ops
->set_ios(mmc
, &mmc
->ios
);
389 static void sdhci_reinit(struct sdhci_host
*host
)
391 u32 cd
= host
->ier
& (SDHCI_INT_CARD_REMOVE
| SDHCI_INT_CARD_INSERT
);
394 sdhci_enable_card_detection(host
);
397 * A change to the card detect bits indicates a change in present state,
398 * refer sdhci_set_card_detection(). A card detect interrupt might have
399 * been missed while the host controller was being reset, so trigger a
402 if (cd
!= (host
->ier
& (SDHCI_INT_CARD_REMOVE
| SDHCI_INT_CARD_INSERT
)))
403 mmc_detect_change(host
->mmc
, msecs_to_jiffies(200));
406 static void __sdhci_led_activate(struct sdhci_host
*host
)
410 if (host
->quirks
& SDHCI_QUIRK_NO_LED
)
413 ctrl
= sdhci_readb(host
, SDHCI_HOST_CONTROL
);
414 ctrl
|= SDHCI_CTRL_LED
;
415 sdhci_writeb(host
, ctrl
, SDHCI_HOST_CONTROL
);
418 static void __sdhci_led_deactivate(struct sdhci_host
*host
)
422 if (host
->quirks
& SDHCI_QUIRK_NO_LED
)
425 ctrl
= sdhci_readb(host
, SDHCI_HOST_CONTROL
);
426 ctrl
&= ~SDHCI_CTRL_LED
;
427 sdhci_writeb(host
, ctrl
, SDHCI_HOST_CONTROL
);
430 #if IS_REACHABLE(CONFIG_LEDS_CLASS)
431 static void sdhci_led_control(struct led_classdev
*led
,
432 enum led_brightness brightness
)
434 struct sdhci_host
*host
= container_of(led
, struct sdhci_host
, led
);
437 spin_lock_irqsave(&host
->lock
, flags
);
439 if (host
->runtime_suspended
)
442 if (brightness
== LED_OFF
)
443 __sdhci_led_deactivate(host
);
445 __sdhci_led_activate(host
);
447 spin_unlock_irqrestore(&host
->lock
, flags
);
450 static int sdhci_led_register(struct sdhci_host
*host
)
452 struct mmc_host
*mmc
= host
->mmc
;
454 if (host
->quirks
& SDHCI_QUIRK_NO_LED
)
457 snprintf(host
->led_name
, sizeof(host
->led_name
),
458 "%s::", mmc_hostname(mmc
));
460 host
->led
.name
= host
->led_name
;
461 host
->led
.brightness
= LED_OFF
;
462 host
->led
.default_trigger
= mmc_hostname(mmc
);
463 host
->led
.brightness_set
= sdhci_led_control
;
465 return led_classdev_register(mmc_dev(mmc
), &host
->led
);
468 static void sdhci_led_unregister(struct sdhci_host
*host
)
470 if (host
->quirks
& SDHCI_QUIRK_NO_LED
)
473 led_classdev_unregister(&host
->led
);
476 static inline void sdhci_led_activate(struct sdhci_host
*host
)
480 static inline void sdhci_led_deactivate(struct sdhci_host
*host
)
486 static inline int sdhci_led_register(struct sdhci_host
*host
)
491 static inline void sdhci_led_unregister(struct sdhci_host
*host
)
495 static inline void sdhci_led_activate(struct sdhci_host
*host
)
497 __sdhci_led_activate(host
);
500 static inline void sdhci_led_deactivate(struct sdhci_host
*host
)
502 __sdhci_led_deactivate(host
);
507 void sdhci_mod_timer(struct sdhci_host
*host
, struct mmc_request
*mrq
,
508 unsigned long timeout
)
510 if (sdhci_data_line_cmd(mrq
->cmd
))
511 mod_timer(&host
->data_timer
, timeout
);
513 mod_timer(&host
->timer
, timeout
);
515 EXPORT_SYMBOL_GPL(sdhci_mod_timer
);
517 static void sdhci_del_timer(struct sdhci_host
*host
, struct mmc_request
*mrq
)
519 if (sdhci_data_line_cmd(mrq
->cmd
))
520 del_timer(&host
->data_timer
);
522 del_timer(&host
->timer
);
525 static inline bool sdhci_has_requests(struct sdhci_host
*host
)
527 return host
->cmd
|| host
->data_cmd
;
530 /*****************************************************************************\
534 \*****************************************************************************/
536 static void sdhci_read_block_pio(struct sdhci_host
*host
)
538 size_t blksize
, len
, chunk
;
542 DBG("PIO reading\n");
544 blksize
= host
->data
->blksz
;
548 BUG_ON(!sg_miter_next(&host
->sg_miter
));
550 len
= min(host
->sg_miter
.length
, blksize
);
553 host
->sg_miter
.consumed
= len
;
555 buf
= host
->sg_miter
.addr
;
559 scratch
= sdhci_readl(host
, SDHCI_BUFFER
);
563 *buf
= scratch
& 0xFF;
572 sg_miter_stop(&host
->sg_miter
);
575 static void sdhci_write_block_pio(struct sdhci_host
*host
)
577 size_t blksize
, len
, chunk
;
581 DBG("PIO writing\n");
583 blksize
= host
->data
->blksz
;
588 BUG_ON(!sg_miter_next(&host
->sg_miter
));
590 len
= min(host
->sg_miter
.length
, blksize
);
593 host
->sg_miter
.consumed
= len
;
595 buf
= host
->sg_miter
.addr
;
598 scratch
|= (u32
)*buf
<< (chunk
* 8);
604 if ((chunk
== 4) || ((len
== 0) && (blksize
== 0))) {
605 sdhci_writel(host
, scratch
, SDHCI_BUFFER
);
612 sg_miter_stop(&host
->sg_miter
);
615 static void sdhci_transfer_pio(struct sdhci_host
*host
)
619 if (host
->blocks
== 0)
622 if (host
->data
->flags
& MMC_DATA_READ
)
623 mask
= SDHCI_DATA_AVAILABLE
;
625 mask
= SDHCI_SPACE_AVAILABLE
;
628 * Some controllers (JMicron JMB38x) mess up the buffer bits
629 * for transfers < 4 bytes. As long as it is just one block,
630 * we can ignore the bits.
632 if ((host
->quirks
& SDHCI_QUIRK_BROKEN_SMALL_PIO
) &&
633 (host
->data
->blocks
== 1))
636 while (sdhci_readl(host
, SDHCI_PRESENT_STATE
) & mask
) {
637 if (host
->quirks
& SDHCI_QUIRK_PIO_NEEDS_DELAY
)
640 if (host
->data
->flags
& MMC_DATA_READ
)
641 sdhci_read_block_pio(host
);
643 sdhci_write_block_pio(host
);
646 if (host
->blocks
== 0)
650 DBG("PIO transfer complete.\n");
653 static int sdhci_pre_dma_transfer(struct sdhci_host
*host
,
654 struct mmc_data
*data
, int cookie
)
659 * If the data buffers are already mapped, return the previous
660 * dma_map_sg() result.
662 if (data
->host_cookie
== COOKIE_PRE_MAPPED
)
663 return data
->sg_count
;
665 /* Bounce write requests to the bounce buffer */
666 if (host
->bounce_buffer
) {
667 unsigned int length
= data
->blksz
* data
->blocks
;
669 if (length
> host
->bounce_buffer_size
) {
670 pr_err("%s: asked for transfer of %u bytes exceeds bounce buffer %u bytes\n",
671 mmc_hostname(host
->mmc
), length
,
672 host
->bounce_buffer_size
);
675 if (mmc_get_dma_dir(data
) == DMA_TO_DEVICE
) {
676 /* Copy the data to the bounce buffer */
677 if (host
->ops
->copy_to_bounce_buffer
) {
678 host
->ops
->copy_to_bounce_buffer(host
,
681 sg_copy_to_buffer(data
->sg
, data
->sg_len
,
682 host
->bounce_buffer
, length
);
685 /* Switch ownership to the DMA */
686 dma_sync_single_for_device(mmc_dev(host
->mmc
),
688 host
->bounce_buffer_size
,
689 mmc_get_dma_dir(data
));
690 /* Just a dummy value */
693 /* Just access the data directly from memory */
694 sg_count
= dma_map_sg(mmc_dev(host
->mmc
),
695 data
->sg
, data
->sg_len
,
696 mmc_get_dma_dir(data
));
702 data
->sg_count
= sg_count
;
703 data
->host_cookie
= cookie
;
708 static char *sdhci_kmap_atomic(struct scatterlist
*sg
)
710 return kmap_local_page(sg_page(sg
)) + sg
->offset
;
713 static void sdhci_kunmap_atomic(void *buffer
)
715 kunmap_local(buffer
);
718 void sdhci_adma_write_desc(struct sdhci_host
*host
, void **desc
,
719 dma_addr_t addr
, int len
, unsigned int cmd
)
721 struct sdhci_adma2_64_desc
*dma_desc
= *desc
;
723 /* 32-bit and 64-bit descriptors have these members in same position */
724 dma_desc
->cmd
= cpu_to_le16(cmd
);
725 dma_desc
->len
= cpu_to_le16(len
);
726 dma_desc
->addr_lo
= cpu_to_le32(lower_32_bits(addr
));
728 if (host
->flags
& SDHCI_USE_64_BIT_DMA
)
729 dma_desc
->addr_hi
= cpu_to_le32(upper_32_bits(addr
));
731 *desc
+= host
->desc_sz
;
733 EXPORT_SYMBOL_GPL(sdhci_adma_write_desc
);
735 static inline void __sdhci_adma_write_desc(struct sdhci_host
*host
,
736 void **desc
, dma_addr_t addr
,
737 int len
, unsigned int cmd
)
739 if (host
->ops
->adma_write_desc
)
740 host
->ops
->adma_write_desc(host
, desc
, addr
, len
, cmd
);
742 sdhci_adma_write_desc(host
, desc
, addr
, len
, cmd
);
745 static void sdhci_adma_mark_end(void *desc
)
747 struct sdhci_adma2_64_desc
*dma_desc
= desc
;
749 /* 32-bit and 64-bit descriptors have 'cmd' in same position */
750 dma_desc
->cmd
|= cpu_to_le16(ADMA2_END
);
753 static void sdhci_adma_table_pre(struct sdhci_host
*host
,
754 struct mmc_data
*data
, int sg_count
)
756 struct scatterlist
*sg
;
757 dma_addr_t addr
, align_addr
;
763 * The spec does not specify endianness of descriptor table.
764 * We currently guess that it is LE.
767 host
->sg_count
= sg_count
;
769 desc
= host
->adma_table
;
770 align
= host
->align_buffer
;
772 align_addr
= host
->align_addr
;
774 for_each_sg(data
->sg
, sg
, host
->sg_count
, i
) {
775 addr
= sg_dma_address(sg
);
776 len
= sg_dma_len(sg
);
779 * The SDHCI specification states that ADMA addresses must
780 * be 32-bit aligned. If they aren't, then we use a bounce
781 * buffer for the (up to three) bytes that screw up the
784 offset
= (SDHCI_ADMA2_ALIGN
- (addr
& SDHCI_ADMA2_MASK
)) &
787 if (data
->flags
& MMC_DATA_WRITE
) {
788 buffer
= sdhci_kmap_atomic(sg
);
789 memcpy(align
, buffer
, offset
);
790 sdhci_kunmap_atomic(buffer
);
794 __sdhci_adma_write_desc(host
, &desc
, align_addr
,
795 offset
, ADMA2_TRAN_VALID
);
797 BUG_ON(offset
> 65536);
799 align
+= SDHCI_ADMA2_ALIGN
;
800 align_addr
+= SDHCI_ADMA2_ALIGN
;
807 * The block layer forces a minimum segment size of PAGE_SIZE,
808 * so 'len' can be too big here if PAGE_SIZE >= 64KiB. Write
809 * multiple descriptors, noting that the ADMA table is sized
810 * for 4KiB chunks anyway, so it will be big enough.
812 while (len
> host
->max_adma
) {
813 int n
= 32 * 1024; /* 32KiB*/
815 __sdhci_adma_write_desc(host
, &desc
, addr
, n
, ADMA2_TRAN_VALID
);
822 __sdhci_adma_write_desc(host
, &desc
, addr
, len
,
826 * If this triggers then we have a calculation bug
829 WARN_ON((desc
- host
->adma_table
) >= host
->adma_table_sz
);
832 if (host
->quirks
& SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC
) {
833 /* Mark the last descriptor as the terminating descriptor */
834 if (desc
!= host
->adma_table
) {
835 desc
-= host
->desc_sz
;
836 sdhci_adma_mark_end(desc
);
839 /* Add a terminating entry - nop, end, valid */
840 __sdhci_adma_write_desc(host
, &desc
, 0, 0, ADMA2_NOP_END_VALID
);
844 static void sdhci_adma_table_post(struct sdhci_host
*host
,
845 struct mmc_data
*data
)
847 struct scatterlist
*sg
;
852 if (data
->flags
& MMC_DATA_READ
) {
853 bool has_unaligned
= false;
855 /* Do a quick scan of the SG list for any unaligned mappings */
856 for_each_sg(data
->sg
, sg
, host
->sg_count
, i
)
857 if (sg_dma_address(sg
) & SDHCI_ADMA2_MASK
) {
858 has_unaligned
= true;
863 dma_sync_sg_for_cpu(mmc_dev(host
->mmc
), data
->sg
,
864 data
->sg_len
, DMA_FROM_DEVICE
);
866 align
= host
->align_buffer
;
868 for_each_sg(data
->sg
, sg
, host
->sg_count
, i
) {
869 if (sg_dma_address(sg
) & SDHCI_ADMA2_MASK
) {
870 size
= SDHCI_ADMA2_ALIGN
-
871 (sg_dma_address(sg
) & SDHCI_ADMA2_MASK
);
873 buffer
= sdhci_kmap_atomic(sg
);
874 memcpy(buffer
, align
, size
);
875 sdhci_kunmap_atomic(buffer
);
877 align
+= SDHCI_ADMA2_ALIGN
;
884 static void sdhci_set_adma_addr(struct sdhci_host
*host
, dma_addr_t addr
)
886 sdhci_writel(host
, lower_32_bits(addr
), SDHCI_ADMA_ADDRESS
);
887 if (host
->flags
& SDHCI_USE_64_BIT_DMA
)
888 sdhci_writel(host
, upper_32_bits(addr
), SDHCI_ADMA_ADDRESS_HI
);
891 static dma_addr_t
sdhci_sdma_address(struct sdhci_host
*host
)
893 if (host
->bounce_buffer
)
894 return host
->bounce_addr
;
896 return sg_dma_address(host
->data
->sg
);
899 static void sdhci_set_sdma_addr(struct sdhci_host
*host
, dma_addr_t addr
)
902 sdhci_set_adma_addr(host
, addr
);
904 sdhci_writel(host
, addr
, SDHCI_DMA_ADDRESS
);
907 static unsigned int sdhci_target_timeout(struct sdhci_host
*host
,
908 struct mmc_command
*cmd
,
909 struct mmc_data
*data
)
911 unsigned int target_timeout
;
915 target_timeout
= cmd
->busy_timeout
* 1000;
917 target_timeout
= DIV_ROUND_UP(data
->timeout_ns
, 1000);
918 if (host
->clock
&& data
->timeout_clks
) {
919 unsigned long long val
;
922 * data->timeout_clks is in units of clock cycles.
923 * host->clock is in Hz. target_timeout is in us.
924 * Hence, us = 1000000 * cycles / Hz. Round up.
926 val
= 1000000ULL * data
->timeout_clks
;
927 if (do_div(val
, host
->clock
))
929 target_timeout
+= val
;
933 return target_timeout
;
936 static void sdhci_calc_sw_timeout(struct sdhci_host
*host
,
937 struct mmc_command
*cmd
)
939 struct mmc_data
*data
= cmd
->data
;
940 struct mmc_host
*mmc
= host
->mmc
;
941 struct mmc_ios
*ios
= &mmc
->ios
;
942 unsigned char bus_width
= 1 << ios
->bus_width
;
948 target_timeout
= sdhci_target_timeout(host
, cmd
, data
);
949 target_timeout
*= NSEC_PER_USEC
;
953 freq
= mmc
->actual_clock
? : host
->clock
;
954 transfer_time
= (u64
)blksz
* NSEC_PER_SEC
* (8 / bus_width
);
955 do_div(transfer_time
, freq
);
956 /* multiply by '2' to account for any unknowns */
957 transfer_time
= transfer_time
* 2;
958 /* calculate timeout for the entire data */
959 host
->data_timeout
= data
->blocks
* target_timeout
+
962 host
->data_timeout
= target_timeout
;
965 if (host
->data_timeout
)
966 host
->data_timeout
+= MMC_CMD_TRANSFER_TIME
;
969 static u8
sdhci_calc_timeout(struct sdhci_host
*host
, struct mmc_command
*cmd
,
973 struct mmc_data
*data
;
974 unsigned target_timeout
, current_timeout
;
979 * If the host controller provides us with an incorrect timeout
980 * value, just skip the check and use the maximum. The hardware may take
981 * longer to time out, but that's much better than having a too-short
984 if (host
->quirks
& SDHCI_QUIRK_BROKEN_TIMEOUT_VAL
)
985 return host
->max_timeout_count
;
987 /* Unspecified command, assume max */
989 return host
->max_timeout_count
;
992 /* Unspecified timeout, assume max */
993 if (!data
&& !cmd
->busy_timeout
)
994 return host
->max_timeout_count
;
997 target_timeout
= sdhci_target_timeout(host
, cmd
, data
);
1000 * Figure out needed cycles.
1001 * We do this in steps in order to fit inside a 32 bit int.
1002 * The first step is the minimum timeout, which will have a
1003 * minimum resolution of 6 bits:
1004 * (1) 2^13*1000 > 2^22,
1005 * (2) host->timeout_clk < 2^16
1010 current_timeout
= (1 << 13) * 1000 / host
->timeout_clk
;
1011 while (current_timeout
< target_timeout
) {
1013 current_timeout
<<= 1;
1014 if (count
> host
->max_timeout_count
) {
1015 if (!(host
->quirks2
& SDHCI_QUIRK2_DISABLE_HW_TIMEOUT
))
1016 DBG("Too large timeout 0x%x requested for CMD%d!\n",
1017 count
, cmd
->opcode
);
1018 count
= host
->max_timeout_count
;
1027 static void sdhci_set_transfer_irqs(struct sdhci_host
*host
)
1029 u32 pio_irqs
= SDHCI_INT_DATA_AVAIL
| SDHCI_INT_SPACE_AVAIL
;
1030 u32 dma_irqs
= SDHCI_INT_DMA_END
| SDHCI_INT_ADMA_ERROR
;
1032 if (host
->flags
& SDHCI_REQ_USE_DMA
)
1033 host
->ier
= (host
->ier
& ~pio_irqs
) | dma_irqs
;
1035 host
->ier
= (host
->ier
& ~dma_irqs
) | pio_irqs
;
1037 if (host
->flags
& (SDHCI_AUTO_CMD23
| SDHCI_AUTO_CMD12
))
1038 host
->ier
|= SDHCI_INT_AUTO_CMD_ERR
;
1040 host
->ier
&= ~SDHCI_INT_AUTO_CMD_ERR
;
1042 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
1043 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
1046 void sdhci_set_data_timeout_irq(struct sdhci_host
*host
, bool enable
)
1049 host
->ier
|= SDHCI_INT_DATA_TIMEOUT
;
1051 host
->ier
&= ~SDHCI_INT_DATA_TIMEOUT
;
1052 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
1053 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
1055 EXPORT_SYMBOL_GPL(sdhci_set_data_timeout_irq
);
1057 void __sdhci_set_timeout(struct sdhci_host
*host
, struct mmc_command
*cmd
)
1059 bool too_big
= false;
1060 u8 count
= sdhci_calc_timeout(host
, cmd
, &too_big
);
1063 host
->quirks2
& SDHCI_QUIRK2_DISABLE_HW_TIMEOUT
) {
1064 sdhci_calc_sw_timeout(host
, cmd
);
1065 sdhci_set_data_timeout_irq(host
, false);
1066 } else if (!(host
->ier
& SDHCI_INT_DATA_TIMEOUT
)) {
1067 sdhci_set_data_timeout_irq(host
, true);
1070 sdhci_writeb(host
, count
, SDHCI_TIMEOUT_CONTROL
);
1072 EXPORT_SYMBOL_GPL(__sdhci_set_timeout
);
1074 static void sdhci_set_timeout(struct sdhci_host
*host
, struct mmc_command
*cmd
)
1076 if (host
->ops
->set_timeout
)
1077 host
->ops
->set_timeout(host
, cmd
);
1079 __sdhci_set_timeout(host
, cmd
);
1082 void sdhci_initialize_data(struct sdhci_host
*host
, struct mmc_data
*data
)
1084 WARN_ON(host
->data
);
1087 BUG_ON(data
->blksz
* data
->blocks
> 524288);
1088 BUG_ON(data
->blksz
> host
->mmc
->max_blk_size
);
1089 BUG_ON(data
->blocks
> 65535);
1092 host
->data_early
= 0;
1093 host
->data
->bytes_xfered
= 0;
1095 EXPORT_SYMBOL_GPL(sdhci_initialize_data
);
1097 static inline void sdhci_set_block_info(struct sdhci_host
*host
,
1098 struct mmc_data
*data
)
1100 /* Set the DMA boundary value and block size */
1102 SDHCI_MAKE_BLKSZ(host
->sdma_boundary
, data
->blksz
),
1105 * For Version 4.10 onwards, if v4 mode is enabled, 32-bit Block Count
1106 * can be supported, in that case 16-bit block count register must be 0.
1108 if (host
->version
>= SDHCI_SPEC_410
&& host
->v4_mode
&&
1109 (host
->quirks2
& SDHCI_QUIRK2_USE_32BIT_BLK_CNT
)) {
1110 if (sdhci_readw(host
, SDHCI_BLOCK_COUNT
))
1111 sdhci_writew(host
, 0, SDHCI_BLOCK_COUNT
);
1112 sdhci_writew(host
, data
->blocks
, SDHCI_32BIT_BLK_CNT
);
1114 sdhci_writew(host
, data
->blocks
, SDHCI_BLOCK_COUNT
);
1118 void sdhci_prepare_dma(struct sdhci_host
*host
, struct mmc_data
*data
)
1120 if (host
->flags
& (SDHCI_USE_SDMA
| SDHCI_USE_ADMA
)) {
1121 struct scatterlist
*sg
;
1122 unsigned int length_mask
, offset_mask
;
1125 host
->flags
|= SDHCI_REQ_USE_DMA
;
1128 * FIXME: This doesn't account for merging when mapping the
1131 * The assumption here being that alignment and lengths are
1132 * the same after DMA mapping to device address space.
1136 if (host
->flags
& SDHCI_USE_ADMA
) {
1137 if (host
->quirks
& SDHCI_QUIRK_32BIT_ADMA_SIZE
) {
1140 * As we use up to 3 byte chunks to work
1141 * around alignment problems, we need to
1142 * check the offset as well.
1147 if (host
->quirks
& SDHCI_QUIRK_32BIT_DMA_SIZE
)
1149 if (host
->quirks
& SDHCI_QUIRK_32BIT_DMA_ADDR
)
1153 if (unlikely(length_mask
| offset_mask
)) {
1154 for_each_sg(data
->sg
, sg
, data
->sg_len
, i
) {
1155 if (sg
->length
& length_mask
) {
1156 DBG("Reverting to PIO because of transfer size (%d)\n",
1158 host
->flags
&= ~SDHCI_REQ_USE_DMA
;
1161 if (sg
->offset
& offset_mask
) {
1162 DBG("Reverting to PIO because of bad alignment\n");
1163 host
->flags
&= ~SDHCI_REQ_USE_DMA
;
1170 sdhci_config_dma(host
);
1172 if (host
->flags
& SDHCI_REQ_USE_DMA
) {
1173 int sg_cnt
= sdhci_pre_dma_transfer(host
, data
, COOKIE_MAPPED
);
1177 * This only happens when someone fed
1178 * us an invalid request.
1181 host
->flags
&= ~SDHCI_REQ_USE_DMA
;
1182 } else if (host
->flags
& SDHCI_USE_ADMA
) {
1183 sdhci_adma_table_pre(host
, data
, sg_cnt
);
1184 sdhci_set_adma_addr(host
, host
->adma_addr
);
1186 WARN_ON(sg_cnt
!= 1);
1187 sdhci_set_sdma_addr(host
, sdhci_sdma_address(host
));
1191 if (!(host
->flags
& SDHCI_REQ_USE_DMA
)) {
1194 flags
= SG_MITER_ATOMIC
;
1195 if (host
->data
->flags
& MMC_DATA_READ
)
1196 flags
|= SG_MITER_TO_SG
;
1198 flags
|= SG_MITER_FROM_SG
;
1199 sg_miter_start(&host
->sg_miter
, data
->sg
, data
->sg_len
, flags
);
1200 host
->blocks
= data
->blocks
;
1203 sdhci_set_transfer_irqs(host
);
1205 EXPORT_SYMBOL_GPL(sdhci_prepare_dma
);
1207 static void sdhci_prepare_data(struct sdhci_host
*host
, struct mmc_command
*cmd
)
1209 struct mmc_data
*data
= cmd
->data
;
1211 sdhci_initialize_data(host
, data
);
1213 sdhci_prepare_dma(host
, data
);
1215 sdhci_set_block_info(host
, data
);
1218 #if IS_ENABLED(CONFIG_MMC_SDHCI_EXTERNAL_DMA)
1220 static int sdhci_external_dma_init(struct sdhci_host
*host
)
1223 struct mmc_host
*mmc
= host
->mmc
;
1225 host
->tx_chan
= dma_request_chan(mmc_dev(mmc
), "tx");
1226 if (IS_ERR(host
->tx_chan
)) {
1227 ret
= PTR_ERR(host
->tx_chan
);
1228 if (ret
!= -EPROBE_DEFER
)
1229 pr_warn("Failed to request TX DMA channel.\n");
1230 host
->tx_chan
= NULL
;
1234 host
->rx_chan
= dma_request_chan(mmc_dev(mmc
), "rx");
1235 if (IS_ERR(host
->rx_chan
)) {
1236 if (host
->tx_chan
) {
1237 dma_release_channel(host
->tx_chan
);
1238 host
->tx_chan
= NULL
;
1241 ret
= PTR_ERR(host
->rx_chan
);
1242 if (ret
!= -EPROBE_DEFER
)
1243 pr_warn("Failed to request RX DMA channel.\n");
1244 host
->rx_chan
= NULL
;
1250 static struct dma_chan
*sdhci_external_dma_channel(struct sdhci_host
*host
,
1251 struct mmc_data
*data
)
1253 return data
->flags
& MMC_DATA_WRITE
? host
->tx_chan
: host
->rx_chan
;
1256 static int sdhci_external_dma_setup(struct sdhci_host
*host
,
1257 struct mmc_command
*cmd
)
1260 enum dma_transfer_direction dir
;
1261 struct dma_async_tx_descriptor
*desc
;
1262 struct mmc_data
*data
= cmd
->data
;
1263 struct dma_chan
*chan
;
1264 struct dma_slave_config cfg
;
1265 dma_cookie_t cookie
;
1271 memset(&cfg
, 0, sizeof(cfg
));
1272 cfg
.src_addr
= host
->mapbase
+ SDHCI_BUFFER
;
1273 cfg
.dst_addr
= host
->mapbase
+ SDHCI_BUFFER
;
1274 cfg
.src_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
1275 cfg
.dst_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
1276 cfg
.src_maxburst
= data
->blksz
/ 4;
1277 cfg
.dst_maxburst
= data
->blksz
/ 4;
1279 /* Sanity check: all the SG entries must be aligned by block size. */
1280 for (i
= 0; i
< data
->sg_len
; i
++) {
1281 if ((data
->sg
+ i
)->length
% data
->blksz
)
1285 chan
= sdhci_external_dma_channel(host
, data
);
1287 ret
= dmaengine_slave_config(chan
, &cfg
);
1291 sg_cnt
= sdhci_pre_dma_transfer(host
, data
, COOKIE_MAPPED
);
1295 dir
= data
->flags
& MMC_DATA_WRITE
? DMA_MEM_TO_DEV
: DMA_DEV_TO_MEM
;
1296 desc
= dmaengine_prep_slave_sg(chan
, data
->sg
, data
->sg_len
, dir
,
1297 DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
1301 desc
->callback
= NULL
;
1302 desc
->callback_param
= NULL
;
1304 cookie
= dmaengine_submit(desc
);
1305 if (dma_submit_error(cookie
))
1311 static void sdhci_external_dma_release(struct sdhci_host
*host
)
1313 if (host
->tx_chan
) {
1314 dma_release_channel(host
->tx_chan
);
1315 host
->tx_chan
= NULL
;
1318 if (host
->rx_chan
) {
1319 dma_release_channel(host
->rx_chan
);
1320 host
->rx_chan
= NULL
;
1323 sdhci_switch_external_dma(host
, false);
1326 static void __sdhci_external_dma_prepare_data(struct sdhci_host
*host
,
1327 struct mmc_command
*cmd
)
1329 struct mmc_data
*data
= cmd
->data
;
1331 sdhci_initialize_data(host
, data
);
1333 host
->flags
|= SDHCI_REQ_USE_DMA
;
1334 sdhci_set_transfer_irqs(host
);
1336 sdhci_set_block_info(host
, data
);
1339 static void sdhci_external_dma_prepare_data(struct sdhci_host
*host
,
1340 struct mmc_command
*cmd
)
1342 if (!sdhci_external_dma_setup(host
, cmd
)) {
1343 __sdhci_external_dma_prepare_data(host
, cmd
);
1345 sdhci_external_dma_release(host
);
1346 pr_err("%s: Cannot use external DMA, switch to the DMA/PIO which standard SDHCI provides.\n",
1347 mmc_hostname(host
->mmc
));
1348 sdhci_prepare_data(host
, cmd
);
1352 static void sdhci_external_dma_pre_transfer(struct sdhci_host
*host
,
1353 struct mmc_command
*cmd
)
1355 struct dma_chan
*chan
;
1360 chan
= sdhci_external_dma_channel(host
, cmd
->data
);
1362 dma_async_issue_pending(chan
);
1367 static inline int sdhci_external_dma_init(struct sdhci_host
*host
)
1372 static inline void sdhci_external_dma_release(struct sdhci_host
*host
)
1376 static inline void sdhci_external_dma_prepare_data(struct sdhci_host
*host
,
1377 struct mmc_command
*cmd
)
1379 /* This should never happen */
1383 static inline void sdhci_external_dma_pre_transfer(struct sdhci_host
*host
,
1384 struct mmc_command
*cmd
)
1388 static inline struct dma_chan
*sdhci_external_dma_channel(struct sdhci_host
*host
,
1389 struct mmc_data
*data
)
1396 void sdhci_switch_external_dma(struct sdhci_host
*host
, bool en
)
1398 host
->use_external_dma
= en
;
1400 EXPORT_SYMBOL_GPL(sdhci_switch_external_dma
);
1402 static inline bool sdhci_auto_cmd12(struct sdhci_host
*host
,
1403 struct mmc_request
*mrq
)
1405 return !mrq
->sbc
&& (host
->flags
& SDHCI_AUTO_CMD12
) &&
1406 !mrq
->cap_cmd_during_tfr
;
1409 static inline bool sdhci_auto_cmd23(struct sdhci_host
*host
,
1410 struct mmc_request
*mrq
)
1412 return mrq
->sbc
&& (host
->flags
& SDHCI_AUTO_CMD23
);
1415 static inline bool sdhci_manual_cmd23(struct sdhci_host
*host
,
1416 struct mmc_request
*mrq
)
1418 return mrq
->sbc
&& !(host
->flags
& SDHCI_AUTO_CMD23
);
1421 static inline void sdhci_auto_cmd_select(struct sdhci_host
*host
,
1422 struct mmc_command
*cmd
,
1425 bool use_cmd12
= sdhci_auto_cmd12(host
, cmd
->mrq
) &&
1426 (cmd
->opcode
!= SD_IO_RW_EXTENDED
);
1427 bool use_cmd23
= sdhci_auto_cmd23(host
, cmd
->mrq
);
1431 * In case of Version 4.10 or later, use of 'Auto CMD Auto
1432 * Select' is recommended rather than use of 'Auto CMD12
1433 * Enable' or 'Auto CMD23 Enable'. We require Version 4 Mode
1434 * here because some controllers (e.g sdhci-of-dwmshc) expect it.
1436 if (host
->version
>= SDHCI_SPEC_410
&& host
->v4_mode
&&
1437 (use_cmd12
|| use_cmd23
)) {
1438 *mode
|= SDHCI_TRNS_AUTO_SEL
;
1440 ctrl2
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
1442 ctrl2
|= SDHCI_CMD23_ENABLE
;
1444 ctrl2
&= ~SDHCI_CMD23_ENABLE
;
1445 sdhci_writew(host
, ctrl2
, SDHCI_HOST_CONTROL2
);
1451 * If we are sending CMD23, CMD12 never gets sent
1452 * on successful completion (so no Auto-CMD12).
1455 *mode
|= SDHCI_TRNS_AUTO_CMD12
;
1457 *mode
|= SDHCI_TRNS_AUTO_CMD23
;
1460 static void sdhci_set_transfer_mode(struct sdhci_host
*host
,
1461 struct mmc_command
*cmd
)
1464 struct mmc_data
*data
= cmd
->data
;
1468 SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD
) {
1469 /* must not clear SDHCI_TRANSFER_MODE when tuning */
1470 if (!mmc_op_tuning(cmd
->opcode
))
1471 sdhci_writew(host
, 0x0, SDHCI_TRANSFER_MODE
);
1473 /* clear Auto CMD settings for no data CMDs */
1474 mode
= sdhci_readw(host
, SDHCI_TRANSFER_MODE
);
1475 sdhci_writew(host
, mode
& ~(SDHCI_TRNS_AUTO_CMD12
|
1476 SDHCI_TRNS_AUTO_CMD23
), SDHCI_TRANSFER_MODE
);
1481 WARN_ON(!host
->data
);
1483 if (!(host
->quirks2
& SDHCI_QUIRK2_SUPPORT_SINGLE
))
1484 mode
= SDHCI_TRNS_BLK_CNT_EN
;
1486 if (mmc_op_multi(cmd
->opcode
) || data
->blocks
> 1) {
1487 mode
= SDHCI_TRNS_BLK_CNT_EN
| SDHCI_TRNS_MULTI
;
1488 sdhci_auto_cmd_select(host
, cmd
, &mode
);
1489 if (sdhci_auto_cmd23(host
, cmd
->mrq
))
1490 sdhci_writel(host
, cmd
->mrq
->sbc
->arg
, SDHCI_ARGUMENT2
);
1493 if (data
->flags
& MMC_DATA_READ
)
1494 mode
|= SDHCI_TRNS_READ
;
1495 if (host
->flags
& SDHCI_REQ_USE_DMA
)
1496 mode
|= SDHCI_TRNS_DMA
;
1498 sdhci_writew(host
, mode
, SDHCI_TRANSFER_MODE
);
1501 bool sdhci_needs_reset(struct sdhci_host
*host
, struct mmc_request
*mrq
)
1503 return (!(host
->flags
& SDHCI_DEVICE_DEAD
) &&
1504 ((mrq
->cmd
&& mrq
->cmd
->error
) ||
1505 (mrq
->sbc
&& mrq
->sbc
->error
) ||
1506 (mrq
->data
&& mrq
->data
->stop
&& mrq
->data
->stop
->error
) ||
1507 (host
->quirks
& SDHCI_QUIRK_RESET_AFTER_REQUEST
)));
1509 EXPORT_SYMBOL_GPL(sdhci_needs_reset
);
1511 static void sdhci_set_mrq_done(struct sdhci_host
*host
, struct mmc_request
*mrq
)
1515 for (i
= 0; i
< SDHCI_MAX_MRQS
; i
++) {
1516 if (host
->mrqs_done
[i
] == mrq
) {
1522 for (i
= 0; i
< SDHCI_MAX_MRQS
; i
++) {
1523 if (!host
->mrqs_done
[i
]) {
1524 host
->mrqs_done
[i
] = mrq
;
1529 WARN_ON(i
>= SDHCI_MAX_MRQS
);
1532 void __sdhci_finish_mrq(struct sdhci_host
*host
, struct mmc_request
*mrq
)
1534 if (host
->cmd
&& host
->cmd
->mrq
== mrq
)
1537 if (host
->data_cmd
&& host
->data_cmd
->mrq
== mrq
)
1538 host
->data_cmd
= NULL
;
1540 if (host
->deferred_cmd
&& host
->deferred_cmd
->mrq
== mrq
)
1541 host
->deferred_cmd
= NULL
;
1543 if (host
->data
&& host
->data
->mrq
== mrq
)
1546 if (sdhci_needs_reset(host
, mrq
))
1547 host
->pending_reset
= true;
1549 sdhci_set_mrq_done(host
, mrq
);
1551 sdhci_del_timer(host
, mrq
);
1553 if (!sdhci_has_requests(host
))
1554 sdhci_led_deactivate(host
);
1556 EXPORT_SYMBOL_GPL(__sdhci_finish_mrq
);
1558 void sdhci_finish_mrq(struct sdhci_host
*host
, struct mmc_request
*mrq
)
1560 __sdhci_finish_mrq(host
, mrq
);
1562 queue_work(host
->complete_wq
, &host
->complete_work
);
1564 EXPORT_SYMBOL_GPL(sdhci_finish_mrq
);
1566 void __sdhci_finish_data_common(struct sdhci_host
*host
, bool defer_reset
)
1568 struct mmc_command
*data_cmd
= host
->data_cmd
;
1569 struct mmc_data
*data
= host
->data
;
1572 host
->data_cmd
= NULL
;
1575 * The controller needs a reset of internal state machines upon error
1580 host
->pending_reset
= true;
1581 else if (!host
->cmd
|| host
->cmd
== data_cmd
)
1582 sdhci_reset_for(host
, REQUEST_ERROR
);
1584 sdhci_reset_for(host
, REQUEST_ERROR_DATA_ONLY
);
1587 if ((host
->flags
& (SDHCI_REQ_USE_DMA
| SDHCI_USE_ADMA
)) ==
1588 (SDHCI_REQ_USE_DMA
| SDHCI_USE_ADMA
))
1589 sdhci_adma_table_post(host
, data
);
1592 * The specification states that the block count register must
1593 * be updated, but it does not specify at what point in the
1594 * data flow. That makes the register entirely useless to read
1595 * back so we have to assume that nothing made it to the card
1596 * in the event of an error.
1599 data
->bytes_xfered
= 0;
1601 data
->bytes_xfered
= data
->blksz
* data
->blocks
;
1603 EXPORT_SYMBOL_GPL(__sdhci_finish_data_common
);
1605 static void __sdhci_finish_data(struct sdhci_host
*host
, bool sw_data_timeout
)
1607 struct mmc_data
*data
= host
->data
;
1609 __sdhci_finish_data_common(host
, false);
1612 * Need to send CMD12 if -
1613 * a) open-ended multiblock transfer not using auto CMD12 (no CMD23)
1614 * b) error in multiblock transfer
1617 ((!data
->mrq
->sbc
&& !sdhci_auto_cmd12(host
, data
->mrq
)) ||
1620 * 'cap_cmd_during_tfr' request must not use the command line
1621 * after mmc_command_done() has been called. It is upper layer's
1622 * responsibility to send the stop command if required.
1624 if (data
->mrq
->cap_cmd_during_tfr
) {
1625 __sdhci_finish_mrq(host
, data
->mrq
);
1627 /* Avoid triggering warning in sdhci_send_command() */
1629 if (!sdhci_send_command(host
, data
->stop
)) {
1630 if (sw_data_timeout
) {
1632 * This is anyway a sw data timeout, so
1635 data
->stop
->error
= -EIO
;
1636 __sdhci_finish_mrq(host
, data
->mrq
);
1638 WARN_ON(host
->deferred_cmd
);
1639 host
->deferred_cmd
= data
->stop
;
1644 __sdhci_finish_mrq(host
, data
->mrq
);
1648 static void sdhci_finish_data(struct sdhci_host
*host
)
1650 __sdhci_finish_data(host
, false);
1653 static bool sdhci_send_command(struct sdhci_host
*host
, struct mmc_command
*cmd
)
1657 unsigned long timeout
;
1661 /* Initially, a command has no error */
1664 if ((host
->quirks2
& SDHCI_QUIRK2_STOP_WITH_TC
) &&
1665 cmd
->opcode
== MMC_STOP_TRANSMISSION
)
1666 cmd
->flags
|= MMC_RSP_BUSY
;
1668 mask
= SDHCI_CMD_INHIBIT
;
1669 if (sdhci_data_line_cmd(cmd
))
1670 mask
|= SDHCI_DATA_INHIBIT
;
1672 /* We shouldn't wait for data inihibit for stop commands, even
1673 though they might use busy signaling */
1674 if (cmd
->mrq
->data
&& (cmd
== cmd
->mrq
->data
->stop
))
1675 mask
&= ~SDHCI_DATA_INHIBIT
;
1677 if (sdhci_readl(host
, SDHCI_PRESENT_STATE
) & mask
)
1681 host
->data_timeout
= 0;
1682 if (sdhci_data_line_cmd(cmd
)) {
1683 WARN_ON(host
->data_cmd
);
1684 host
->data_cmd
= cmd
;
1685 sdhci_set_timeout(host
, cmd
);
1689 if (host
->use_external_dma
)
1690 sdhci_external_dma_prepare_data(host
, cmd
);
1692 sdhci_prepare_data(host
, cmd
);
1695 sdhci_writel(host
, cmd
->arg
, SDHCI_ARGUMENT
);
1697 sdhci_set_transfer_mode(host
, cmd
);
1699 if ((cmd
->flags
& MMC_RSP_136
) && (cmd
->flags
& MMC_RSP_BUSY
)) {
1700 WARN_ONCE(1, "Unsupported response type!\n");
1702 * This does not happen in practice because 136-bit response
1703 * commands never have busy waiting, so rather than complicate
1704 * the error path, just remove busy waiting and continue.
1706 cmd
->flags
&= ~MMC_RSP_BUSY
;
1709 if (!(cmd
->flags
& MMC_RSP_PRESENT
))
1710 flags
= SDHCI_CMD_RESP_NONE
;
1711 else if (cmd
->flags
& MMC_RSP_136
)
1712 flags
= SDHCI_CMD_RESP_LONG
;
1713 else if (cmd
->flags
& MMC_RSP_BUSY
)
1714 flags
= SDHCI_CMD_RESP_SHORT_BUSY
;
1716 flags
= SDHCI_CMD_RESP_SHORT
;
1718 if (cmd
->flags
& MMC_RSP_CRC
)
1719 flags
|= SDHCI_CMD_CRC
;
1720 if (cmd
->flags
& MMC_RSP_OPCODE
)
1721 flags
|= SDHCI_CMD_INDEX
;
1723 /* CMD19 is special in that the Data Present Select should be set */
1724 if (cmd
->data
|| mmc_op_tuning(cmd
->opcode
))
1725 flags
|= SDHCI_CMD_DATA
;
1728 if (host
->data_timeout
)
1729 timeout
+= nsecs_to_jiffies(host
->data_timeout
);
1730 else if (!cmd
->data
&& cmd
->busy_timeout
> 9000)
1731 timeout
+= DIV_ROUND_UP(cmd
->busy_timeout
, 1000) * HZ
+ HZ
;
1734 sdhci_mod_timer(host
, cmd
->mrq
, timeout
);
1736 if (host
->use_external_dma
)
1737 sdhci_external_dma_pre_transfer(host
, cmd
);
1739 sdhci_writew(host
, SDHCI_MAKE_CMD(cmd
->opcode
, flags
), SDHCI_COMMAND
);
1744 bool sdhci_present_error(struct sdhci_host
*host
,
1745 struct mmc_command
*cmd
, bool present
)
1747 if (!present
|| host
->flags
& SDHCI_DEVICE_DEAD
) {
1748 cmd
->error
= -ENOMEDIUM
;
1754 EXPORT_SYMBOL_GPL(sdhci_present_error
);
1756 static bool sdhci_send_command_retry(struct sdhci_host
*host
,
1757 struct mmc_command
*cmd
,
1758 unsigned long flags
)
1759 __releases(host
->lock
)
1760 __acquires(host
->lock
)
1762 struct mmc_command
*deferred_cmd
= host
->deferred_cmd
;
1763 int timeout
= 10; /* Approx. 10 ms */
1766 while (!sdhci_send_command(host
, cmd
)) {
1768 pr_err("%s: Controller never released inhibit bit(s).\n",
1769 mmc_hostname(host
->mmc
));
1770 sdhci_err_stats_inc(host
, CTRL_TIMEOUT
);
1771 sdhci_dumpregs(host
);
1776 spin_unlock_irqrestore(&host
->lock
, flags
);
1778 usleep_range(1000, 1250);
1780 present
= host
->mmc
->ops
->get_cd(host
->mmc
);
1782 spin_lock_irqsave(&host
->lock
, flags
);
1784 /* A deferred command might disappear, handle that */
1785 if (cmd
== deferred_cmd
&& cmd
!= host
->deferred_cmd
)
1788 if (sdhci_present_error(host
, cmd
, present
))
1792 if (cmd
== host
->deferred_cmd
)
1793 host
->deferred_cmd
= NULL
;
1798 static void sdhci_read_rsp_136(struct sdhci_host
*host
, struct mmc_command
*cmd
)
1802 for (i
= 0; i
< 4; i
++) {
1803 reg
= SDHCI_RESPONSE
+ (3 - i
) * 4;
1804 cmd
->resp
[i
] = sdhci_readl(host
, reg
);
1807 if (host
->quirks2
& SDHCI_QUIRK2_RSP_136_HAS_CRC
)
1810 /* CRC is stripped so we need to do some shifting */
1811 for (i
= 0; i
< 4; i
++) {
1814 cmd
->resp
[i
] |= cmd
->resp
[i
+ 1] >> 24;
1818 static void sdhci_finish_command(struct sdhci_host
*host
)
1820 struct mmc_command
*cmd
= host
->cmd
;
1824 if (cmd
->flags
& MMC_RSP_PRESENT
) {
1825 if (cmd
->flags
& MMC_RSP_136
) {
1826 sdhci_read_rsp_136(host
, cmd
);
1828 cmd
->resp
[0] = sdhci_readl(host
, SDHCI_RESPONSE
);
1832 if (cmd
->mrq
->cap_cmd_during_tfr
&& cmd
== cmd
->mrq
->cmd
)
1833 mmc_command_done(host
->mmc
, cmd
->mrq
);
1836 * The host can send and interrupt when the busy state has
1837 * ended, allowing us to wait without wasting CPU cycles.
1838 * The busy signal uses DAT0 so this is similar to waiting
1839 * for data to complete.
1841 * Note: The 1.0 specification is a bit ambiguous about this
1842 * feature so there might be some problems with older
1845 if (cmd
->flags
& MMC_RSP_BUSY
) {
1847 DBG("Cannot wait for busy signal when also doing a data transfer");
1848 } else if (!(host
->quirks
& SDHCI_QUIRK_NO_BUSY_IRQ
) &&
1849 cmd
== host
->data_cmd
) {
1850 /* Command complete before busy is ended */
1855 /* Finished CMD23, now send actual command. */
1856 if (cmd
== cmd
->mrq
->sbc
) {
1857 if (!sdhci_send_command(host
, cmd
->mrq
->cmd
)) {
1858 WARN_ON(host
->deferred_cmd
);
1859 host
->deferred_cmd
= cmd
->mrq
->cmd
;
1863 /* Processed actual command. */
1864 if (host
->data
&& host
->data_early
)
1865 sdhci_finish_data(host
);
1868 __sdhci_finish_mrq(host
, cmd
->mrq
);
1872 static u16
sdhci_get_preset_value(struct sdhci_host
*host
)
1876 switch (host
->timing
) {
1877 case MMC_TIMING_MMC_HS
:
1878 case MMC_TIMING_SD_HS
:
1879 preset
= sdhci_readw(host
, SDHCI_PRESET_FOR_HIGH_SPEED
);
1881 case MMC_TIMING_UHS_SDR12
:
1882 preset
= sdhci_readw(host
, SDHCI_PRESET_FOR_SDR12
);
1884 case MMC_TIMING_UHS_SDR25
:
1885 preset
= sdhci_readw(host
, SDHCI_PRESET_FOR_SDR25
);
1887 case MMC_TIMING_UHS_SDR50
:
1888 preset
= sdhci_readw(host
, SDHCI_PRESET_FOR_SDR50
);
1890 case MMC_TIMING_UHS_SDR104
:
1891 case MMC_TIMING_MMC_HS200
:
1892 preset
= sdhci_readw(host
, SDHCI_PRESET_FOR_SDR104
);
1894 case MMC_TIMING_UHS_DDR50
:
1895 case MMC_TIMING_MMC_DDR52
:
1896 preset
= sdhci_readw(host
, SDHCI_PRESET_FOR_DDR50
);
1898 case MMC_TIMING_MMC_HS400
:
1899 preset
= sdhci_readw(host
, SDHCI_PRESET_FOR_HS400
);
1901 case MMC_TIMING_UHS2_SPEED_A
:
1902 case MMC_TIMING_UHS2_SPEED_A_HD
:
1903 case MMC_TIMING_UHS2_SPEED_B
:
1904 case MMC_TIMING_UHS2_SPEED_B_HD
:
1905 preset
= sdhci_readw(host
, SDHCI_PRESET_FOR_UHS2
);
1908 pr_warn("%s: Invalid UHS-I mode selected\n",
1909 mmc_hostname(host
->mmc
));
1910 preset
= sdhci_readw(host
, SDHCI_PRESET_FOR_SDR12
);
1916 u16
sdhci_calc_clk(struct sdhci_host
*host
, unsigned int clock
,
1917 unsigned int *actual_clock
)
1919 int div
= 0; /* Initialized for compiler warning */
1920 int real_div
= div
, clk_mul
= 1;
1922 bool switch_base_clk
= false;
1924 if (host
->version
>= SDHCI_SPEC_300
) {
1925 if (host
->preset_enabled
) {
1928 clk
= sdhci_readw(host
, SDHCI_CLOCK_CONTROL
);
1929 pre_val
= sdhci_get_preset_value(host
);
1930 div
= FIELD_GET(SDHCI_PRESET_SDCLK_FREQ_MASK
, pre_val
);
1931 if (host
->clk_mul
&&
1932 (pre_val
& SDHCI_PRESET_CLKGEN_SEL
)) {
1933 clk
= SDHCI_PROG_CLOCK_MODE
;
1935 clk_mul
= host
->clk_mul
;
1937 real_div
= max_t(int, 1, div
<< 1);
1943 * Check if the Host Controller supports Programmable Clock
1946 if (host
->clk_mul
) {
1947 for (div
= 1; div
<= 1024; div
++) {
1948 if ((host
->max_clk
* host
->clk_mul
/ div
)
1952 if ((host
->max_clk
* host
->clk_mul
/ div
) <= clock
) {
1954 * Set Programmable Clock Mode in the Clock
1957 clk
= SDHCI_PROG_CLOCK_MODE
;
1959 clk_mul
= host
->clk_mul
;
1963 * Divisor can be too small to reach clock
1964 * speed requirement. Then use the base clock.
1966 switch_base_clk
= true;
1970 if (!host
->clk_mul
|| switch_base_clk
) {
1971 /* Version 3.00 divisors must be a multiple of 2. */
1972 if (host
->max_clk
<= clock
)
1975 for (div
= 2; div
< SDHCI_MAX_DIV_SPEC_300
;
1977 if ((host
->max_clk
/ div
) <= clock
)
1983 if ((host
->quirks2
& SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN
)
1984 && !div
&& host
->max_clk
<= 25000000)
1988 /* Version 2.00 divisors must be a power of 2. */
1989 for (div
= 1; div
< SDHCI_MAX_DIV_SPEC_200
; div
*= 2) {
1990 if ((host
->max_clk
/ div
) <= clock
)
1999 *actual_clock
= (host
->max_clk
* clk_mul
) / real_div
;
2000 clk
|= (div
& SDHCI_DIV_MASK
) << SDHCI_DIVIDER_SHIFT
;
2001 clk
|= ((div
& SDHCI_DIV_HI_MASK
) >> SDHCI_DIV_MASK_LEN
)
2002 << SDHCI_DIVIDER_HI_SHIFT
;
2006 EXPORT_SYMBOL_GPL(sdhci_calc_clk
);
2008 void sdhci_enable_clk(struct sdhci_host
*host
, u16 clk
)
2012 clk
|= SDHCI_CLOCK_INT_EN
;
2013 sdhci_writew(host
, clk
, SDHCI_CLOCK_CONTROL
);
2015 /* Wait max 150 ms */
2016 timeout
= ktime_add_ms(ktime_get(), 150);
2018 bool timedout
= ktime_after(ktime_get(), timeout
);
2020 clk
= sdhci_readw(host
, SDHCI_CLOCK_CONTROL
);
2021 if (clk
& SDHCI_CLOCK_INT_STABLE
)
2024 pr_err("%s: Internal clock never stabilised.\n",
2025 mmc_hostname(host
->mmc
));
2026 sdhci_err_stats_inc(host
, CTRL_TIMEOUT
);
2027 sdhci_dumpregs(host
);
2033 if (host
->version
>= SDHCI_SPEC_410
&& host
->v4_mode
) {
2034 clk
|= SDHCI_CLOCK_PLL_EN
;
2035 clk
&= ~SDHCI_CLOCK_INT_STABLE
;
2036 sdhci_writew(host
, clk
, SDHCI_CLOCK_CONTROL
);
2038 /* Wait max 150 ms */
2039 timeout
= ktime_add_ms(ktime_get(), 150);
2041 bool timedout
= ktime_after(ktime_get(), timeout
);
2043 clk
= sdhci_readw(host
, SDHCI_CLOCK_CONTROL
);
2044 if (clk
& SDHCI_CLOCK_INT_STABLE
)
2047 pr_err("%s: PLL clock never stabilised.\n",
2048 mmc_hostname(host
->mmc
));
2049 sdhci_err_stats_inc(host
, CTRL_TIMEOUT
);
2050 sdhci_dumpregs(host
);
2057 clk
|= SDHCI_CLOCK_CARD_EN
;
2058 sdhci_writew(host
, clk
, SDHCI_CLOCK_CONTROL
);
2060 EXPORT_SYMBOL_GPL(sdhci_enable_clk
);
2062 void sdhci_set_clock(struct sdhci_host
*host
, unsigned int clock
)
2066 host
->mmc
->actual_clock
= 0;
2068 sdhci_writew(host
, 0, SDHCI_CLOCK_CONTROL
);
2073 clk
= sdhci_calc_clk(host
, clock
, &host
->mmc
->actual_clock
);
2074 sdhci_enable_clk(host
, clk
);
2076 EXPORT_SYMBOL_GPL(sdhci_set_clock
);
2078 static void sdhci_set_power_reg(struct sdhci_host
*host
, unsigned char mode
,
2081 struct mmc_host
*mmc
= host
->mmc
;
2083 mmc_regulator_set_ocr(mmc
, mmc
->supply
.vmmc
, vdd
);
2085 if (mode
!= MMC_POWER_OFF
)
2086 sdhci_writeb(host
, SDHCI_POWER_ON
, SDHCI_POWER_CONTROL
);
2088 sdhci_writeb(host
, 0, SDHCI_POWER_CONTROL
);
2091 unsigned short sdhci_get_vdd_value(unsigned short vdd
)
2094 case MMC_VDD_165_195
:
2096 * Without a regulator, SDHCI does not support 2.0v
2097 * so we only get here if the driver deliberately
2098 * added the 2.0v range to ocr_avail. Map it to 1.8v
2099 * for the purpose of turning on the power.
2102 return SDHCI_POWER_180
;
2105 return SDHCI_POWER_300
;
2109 * 3.4V ~ 3.6V are valid only for those platforms where it's
2110 * known that the voltage range is supported by hardware.
2114 return SDHCI_POWER_330
;
2119 EXPORT_SYMBOL_GPL(sdhci_get_vdd_value
);
2121 void sdhci_set_power_noreg(struct sdhci_host
*host
, unsigned char mode
,
2126 if (mode
!= MMC_POWER_OFF
) {
2127 pwr
= sdhci_get_vdd_value(vdd
);
2129 WARN(1, "%s: Invalid vdd %#x\n",
2130 mmc_hostname(host
->mmc
), vdd
);
2134 if (host
->pwr
== pwr
)
2140 sdhci_writeb(host
, 0, SDHCI_POWER_CONTROL
);
2141 if (host
->quirks2
& SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON
)
2142 sdhci_runtime_pm_bus_off(host
);
2145 * Spec says that we should clear the power reg before setting
2146 * a new value. Some controllers don't seem to like this though.
2148 if (!(host
->quirks
& SDHCI_QUIRK_SINGLE_POWER_WRITE
))
2149 sdhci_writeb(host
, 0, SDHCI_POWER_CONTROL
);
2152 * At least the Marvell CaFe chip gets confused if we set the
2153 * voltage and set turn on power at the same time, so set the
2156 if (host
->quirks
& SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER
)
2157 sdhci_writeb(host
, pwr
, SDHCI_POWER_CONTROL
);
2159 pwr
|= SDHCI_POWER_ON
;
2161 sdhci_writeb(host
, pwr
, SDHCI_POWER_CONTROL
);
2163 if (host
->quirks2
& SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON
)
2164 sdhci_runtime_pm_bus_on(host
);
2167 * Some controllers need an extra 10ms delay of 10ms before
2168 * they can apply clock after applying power
2170 if (host
->quirks
& SDHCI_QUIRK_DELAY_AFTER_POWER
)
2174 EXPORT_SYMBOL_GPL(sdhci_set_power_noreg
);
2176 void sdhci_set_power(struct sdhci_host
*host
, unsigned char mode
,
2179 if (IS_ERR(host
->mmc
->supply
.vmmc
))
2180 sdhci_set_power_noreg(host
, mode
, vdd
);
2182 sdhci_set_power_reg(host
, mode
, vdd
);
2184 EXPORT_SYMBOL_GPL(sdhci_set_power
);
2187 * Some controllers need to configure a valid bus voltage on their power
2188 * register regardless of whether an external regulator is taking care of power
2189 * supply. This helper function takes care of it if set as the controller's
2190 * sdhci_ops.set_power callback.
2192 void sdhci_set_power_and_bus_voltage(struct sdhci_host
*host
,
2196 if (!IS_ERR(host
->mmc
->supply
.vmmc
)) {
2197 struct mmc_host
*mmc
= host
->mmc
;
2199 mmc_regulator_set_ocr(mmc
, mmc
->supply
.vmmc
, vdd
);
2201 sdhci_set_power_noreg(host
, mode
, vdd
);
2203 EXPORT_SYMBOL_GPL(sdhci_set_power_and_bus_voltage
);
2205 /*****************************************************************************\
2209 \*****************************************************************************/
2211 void sdhci_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
2213 struct sdhci_host
*host
= mmc_priv(mmc
);
2214 struct mmc_command
*cmd
;
2215 unsigned long flags
;
2218 /* Firstly check card presence */
2219 present
= mmc
->ops
->get_cd(mmc
);
2221 spin_lock_irqsave(&host
->lock
, flags
);
2223 sdhci_led_activate(host
);
2225 if (sdhci_present_error(host
, mrq
->cmd
, present
))
2228 cmd
= sdhci_manual_cmd23(host
, mrq
) ? mrq
->sbc
: mrq
->cmd
;
2230 if (!sdhci_send_command_retry(host
, cmd
, flags
))
2233 spin_unlock_irqrestore(&host
->lock
, flags
);
2238 sdhci_finish_mrq(host
, mrq
);
2239 spin_unlock_irqrestore(&host
->lock
, flags
);
2241 EXPORT_SYMBOL_GPL(sdhci_request
);
2243 int sdhci_request_atomic(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
2245 struct sdhci_host
*host
= mmc_priv(mmc
);
2246 struct mmc_command
*cmd
;
2247 unsigned long flags
;
2250 spin_lock_irqsave(&host
->lock
, flags
);
2252 if (sdhci_present_error(host
, mrq
->cmd
, true)) {
2253 sdhci_finish_mrq(host
, mrq
);
2257 cmd
= sdhci_manual_cmd23(host
, mrq
) ? mrq
->sbc
: mrq
->cmd
;
2260 * The HSQ may send a command in interrupt context without polling
2261 * the busy signaling, which means we should return BUSY if controller
2262 * has not released inhibit bits to allow HSQ trying to send request
2263 * again in non-atomic context. So we should not finish this request
2266 if (!sdhci_send_command(host
, cmd
))
2269 sdhci_led_activate(host
);
2272 spin_unlock_irqrestore(&host
->lock
, flags
);
2275 EXPORT_SYMBOL_GPL(sdhci_request_atomic
);
2277 void sdhci_set_bus_width(struct sdhci_host
*host
, int width
)
2281 ctrl
= sdhci_readb(host
, SDHCI_HOST_CONTROL
);
2282 if (width
== MMC_BUS_WIDTH_8
) {
2283 ctrl
&= ~SDHCI_CTRL_4BITBUS
;
2284 ctrl
|= SDHCI_CTRL_8BITBUS
;
2286 if (host
->mmc
->caps
& MMC_CAP_8_BIT_DATA
)
2287 ctrl
&= ~SDHCI_CTRL_8BITBUS
;
2288 if (width
== MMC_BUS_WIDTH_4
)
2289 ctrl
|= SDHCI_CTRL_4BITBUS
;
2291 ctrl
&= ~SDHCI_CTRL_4BITBUS
;
2293 sdhci_writeb(host
, ctrl
, SDHCI_HOST_CONTROL
);
2295 EXPORT_SYMBOL_GPL(sdhci_set_bus_width
);
2297 void sdhci_set_uhs_signaling(struct sdhci_host
*host
, unsigned timing
)
2301 ctrl_2
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
2302 /* Select Bus Speed Mode for host */
2303 ctrl_2
&= ~SDHCI_CTRL_UHS_MASK
;
2304 if ((timing
== MMC_TIMING_MMC_HS200
) ||
2305 (timing
== MMC_TIMING_UHS_SDR104
))
2306 ctrl_2
|= SDHCI_CTRL_UHS_SDR104
;
2307 else if (timing
== MMC_TIMING_UHS_SDR12
)
2308 ctrl_2
|= SDHCI_CTRL_UHS_SDR12
;
2309 else if (timing
== MMC_TIMING_UHS_SDR25
)
2310 ctrl_2
|= SDHCI_CTRL_UHS_SDR25
;
2311 else if (timing
== MMC_TIMING_UHS_SDR50
)
2312 ctrl_2
|= SDHCI_CTRL_UHS_SDR50
;
2313 else if ((timing
== MMC_TIMING_UHS_DDR50
) ||
2314 (timing
== MMC_TIMING_MMC_DDR52
))
2315 ctrl_2
|= SDHCI_CTRL_UHS_DDR50
;
2316 else if (timing
== MMC_TIMING_MMC_HS400
)
2317 ctrl_2
|= SDHCI_CTRL_HS400
; /* Non-standard */
2318 sdhci_writew(host
, ctrl_2
, SDHCI_HOST_CONTROL2
);
2320 EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling
);
2322 static bool sdhci_timing_has_preset(unsigned char timing
)
2325 case MMC_TIMING_UHS_SDR12
:
2326 case MMC_TIMING_UHS_SDR25
:
2327 case MMC_TIMING_UHS_SDR50
:
2328 case MMC_TIMING_UHS_SDR104
:
2329 case MMC_TIMING_UHS_DDR50
:
2330 case MMC_TIMING_MMC_DDR52
:
2336 static bool sdhci_preset_needed(struct sdhci_host
*host
, unsigned char timing
)
2338 return !(host
->quirks2
& SDHCI_QUIRK2_PRESET_VALUE_BROKEN
) &&
2339 sdhci_timing_has_preset(timing
);
2342 static bool sdhci_presetable_values_change(struct sdhci_host
*host
, struct mmc_ios
*ios
)
2345 * Preset Values are: Driver Strength, Clock Generator and SDCLK/RCLK
2346 * Frequency. Check if preset values need to be enabled, or the Driver
2347 * Strength needs updating. Note, clock changes are handled separately.
2349 return !host
->preset_enabled
&&
2350 (sdhci_preset_needed(host
, ios
->timing
) || host
->drv_type
!= ios
->drv_type
);
2353 void sdhci_set_ios_common(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
2355 struct sdhci_host
*host
= mmc_priv(mmc
);
2358 * Reset the chip on each power off.
2359 * Should clear out any weird states.
2361 if (ios
->power_mode
== MMC_POWER_OFF
) {
2362 sdhci_writel(host
, 0, SDHCI_SIGNAL_ENABLE
);
2366 if (host
->version
>= SDHCI_SPEC_300
&&
2367 (ios
->power_mode
== MMC_POWER_UP
) &&
2368 !(host
->quirks2
& SDHCI_QUIRK2_PRESET_VALUE_BROKEN
))
2369 sdhci_enable_preset_value(host
, false);
2371 if (!ios
->clock
|| ios
->clock
!= host
->clock
) {
2372 host
->ops
->set_clock(host
, ios
->clock
);
2373 host
->clock
= ios
->clock
;
2375 if (host
->quirks
& SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK
&&
2377 host
->timeout_clk
= mmc
->actual_clock
?
2378 mmc
->actual_clock
/ 1000 :
2380 mmc
->max_busy_timeout
=
2381 host
->ops
->get_max_timeout_count
?
2382 host
->ops
->get_max_timeout_count(host
) :
2384 mmc
->max_busy_timeout
/= host
->timeout_clk
;
2388 EXPORT_SYMBOL_GPL(sdhci_set_ios_common
);
2390 void sdhci_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
2392 struct sdhci_host
*host
= mmc_priv(mmc
);
2393 bool reinit_uhs
= host
->reinit_uhs
;
2394 bool turning_on_clk
;
2397 host
->reinit_uhs
= false;
2399 if (ios
->power_mode
== MMC_POWER_UNDEFINED
)
2402 if (host
->flags
& SDHCI_DEVICE_DEAD
) {
2403 if (!IS_ERR(mmc
->supply
.vmmc
) &&
2404 ios
->power_mode
== MMC_POWER_OFF
)
2405 mmc_regulator_set_ocr(mmc
, mmc
->supply
.vmmc
, 0);
2409 turning_on_clk
= ios
->clock
!= host
->clock
&& ios
->clock
&& !host
->clock
;
2411 sdhci_set_ios_common(mmc
, ios
);
2413 if (host
->ops
->set_power
)
2414 host
->ops
->set_power(host
, ios
->power_mode
, ios
->vdd
);
2416 sdhci_set_power(host
, ios
->power_mode
, ios
->vdd
);
2418 if (host
->ops
->platform_send_init_74_clocks
)
2419 host
->ops
->platform_send_init_74_clocks(host
, ios
->power_mode
);
2421 host
->ops
->set_bus_width(host
, ios
->bus_width
);
2424 * Special case to avoid multiple clock changes during voltage
2429 host
->timing
== ios
->timing
&&
2430 host
->version
>= SDHCI_SPEC_300
&&
2431 !sdhci_presetable_values_change(host
, ios
))
2434 ctrl
= sdhci_readb(host
, SDHCI_HOST_CONTROL
);
2436 if (!(host
->quirks
& SDHCI_QUIRK_NO_HISPD_BIT
)) {
2437 if (ios
->timing
== MMC_TIMING_SD_HS
||
2438 ios
->timing
== MMC_TIMING_MMC_HS
||
2439 ios
->timing
== MMC_TIMING_MMC_HS400
||
2440 ios
->timing
== MMC_TIMING_MMC_HS200
||
2441 ios
->timing
== MMC_TIMING_MMC_DDR52
||
2442 ios
->timing
== MMC_TIMING_UHS_SDR50
||
2443 ios
->timing
== MMC_TIMING_UHS_SDR104
||
2444 ios
->timing
== MMC_TIMING_UHS_DDR50
||
2445 ios
->timing
== MMC_TIMING_UHS_SDR25
)
2446 ctrl
|= SDHCI_CTRL_HISPD
;
2448 ctrl
&= ~SDHCI_CTRL_HISPD
;
2451 if (host
->version
>= SDHCI_SPEC_300
) {
2455 * According to SDHCI Spec v3.00, if the Preset Value
2456 * Enable in the Host Control 2 register is set, we
2457 * need to reset SD Clock Enable before changing High
2458 * Speed Enable to avoid generating clock glitches.
2460 clk
= sdhci_readw(host
, SDHCI_CLOCK_CONTROL
);
2461 if (clk
& SDHCI_CLOCK_CARD_EN
) {
2462 clk
&= ~SDHCI_CLOCK_CARD_EN
;
2463 sdhci_writew(host
, clk
, SDHCI_CLOCK_CONTROL
);
2466 sdhci_writeb(host
, ctrl
, SDHCI_HOST_CONTROL
);
2468 if (!host
->preset_enabled
) {
2470 * We only need to set Driver Strength if the
2471 * preset value enable is not set.
2473 ctrl_2
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
2474 ctrl_2
&= ~SDHCI_CTRL_DRV_TYPE_MASK
;
2475 if (ios
->drv_type
== MMC_SET_DRIVER_TYPE_A
)
2476 ctrl_2
|= SDHCI_CTRL_DRV_TYPE_A
;
2477 else if (ios
->drv_type
== MMC_SET_DRIVER_TYPE_B
)
2478 ctrl_2
|= SDHCI_CTRL_DRV_TYPE_B
;
2479 else if (ios
->drv_type
== MMC_SET_DRIVER_TYPE_C
)
2480 ctrl_2
|= SDHCI_CTRL_DRV_TYPE_C
;
2481 else if (ios
->drv_type
== MMC_SET_DRIVER_TYPE_D
)
2482 ctrl_2
|= SDHCI_CTRL_DRV_TYPE_D
;
2484 pr_warn("%s: invalid driver type, default to driver type B\n",
2486 ctrl_2
|= SDHCI_CTRL_DRV_TYPE_B
;
2489 sdhci_writew(host
, ctrl_2
, SDHCI_HOST_CONTROL2
);
2490 host
->drv_type
= ios
->drv_type
;
2493 host
->ops
->set_uhs_signaling(host
, ios
->timing
);
2494 host
->timing
= ios
->timing
;
2496 if (sdhci_preset_needed(host
, ios
->timing
)) {
2499 sdhci_enable_preset_value(host
, true);
2500 preset
= sdhci_get_preset_value(host
);
2501 ios
->drv_type
= FIELD_GET(SDHCI_PRESET_DRV_MASK
,
2503 host
->drv_type
= ios
->drv_type
;
2506 /* Re-enable SD Clock */
2507 host
->ops
->set_clock(host
, host
->clock
);
2509 sdhci_writeb(host
, ctrl
, SDHCI_HOST_CONTROL
);
2511 EXPORT_SYMBOL_GPL(sdhci_set_ios
);
2513 static int sdhci_get_cd(struct mmc_host
*mmc
)
2515 struct sdhci_host
*host
= mmc_priv(mmc
);
2516 int gpio_cd
= mmc_gpio_get_cd(mmc
);
2518 if (host
->flags
& SDHCI_DEVICE_DEAD
)
2521 /* If nonremovable, assume that the card is always present. */
2522 if (!mmc_card_is_removable(mmc
))
2526 * Try slot gpio detect, if defined it take precedence
2527 * over build in controller functionality
2532 /* If polling, assume that the card is always present. */
2533 if (host
->quirks
& SDHCI_QUIRK_BROKEN_CARD_DETECTION
)
2536 /* Host native card detect */
2537 return !!(sdhci_readl(host
, SDHCI_PRESENT_STATE
) & SDHCI_CARD_PRESENT
);
2540 int sdhci_get_cd_nogpio(struct mmc_host
*mmc
)
2542 struct sdhci_host
*host
= mmc_priv(mmc
);
2543 unsigned long flags
;
2546 spin_lock_irqsave(&host
->lock
, flags
);
2548 if (host
->flags
& SDHCI_DEVICE_DEAD
)
2551 ret
= !!(sdhci_readl(host
, SDHCI_PRESENT_STATE
) & SDHCI_CARD_PRESENT
);
2553 spin_unlock_irqrestore(&host
->lock
, flags
);
2557 EXPORT_SYMBOL_GPL(sdhci_get_cd_nogpio
);
2559 int sdhci_get_ro(struct mmc_host
*mmc
)
2561 struct sdhci_host
*host
= mmc_priv(mmc
);
2562 bool allow_invert
= false;
2565 if (host
->flags
& SDHCI_DEVICE_DEAD
) {
2567 } else if (host
->ops
->get_ro
) {
2568 is_readonly
= host
->ops
->get_ro(host
);
2569 } else if (mmc_can_gpio_ro(mmc
)) {
2570 is_readonly
= mmc_gpio_get_ro(mmc
);
2571 /* Do not invert twice */
2572 allow_invert
= !(mmc
->caps2
& MMC_CAP2_RO_ACTIVE_HIGH
);
2574 is_readonly
= !(sdhci_readl(host
, SDHCI_PRESENT_STATE
)
2575 & SDHCI_WRITE_PROTECT
);
2576 allow_invert
= true;
2579 if (is_readonly
>= 0 &&
2581 (host
->quirks
& SDHCI_QUIRK_INVERTED_WRITE_PROTECT
))
2582 is_readonly
= !is_readonly
;
2586 EXPORT_SYMBOL_GPL(sdhci_get_ro
);
2588 static void sdhci_hw_reset(struct mmc_host
*mmc
)
2590 struct sdhci_host
*host
= mmc_priv(mmc
);
2592 if (host
->ops
&& host
->ops
->hw_reset
)
2593 host
->ops
->hw_reset(host
);
2596 static void sdhci_enable_sdio_irq_nolock(struct sdhci_host
*host
, int enable
)
2598 if (!(host
->flags
& SDHCI_DEVICE_DEAD
)) {
2600 host
->ier
|= SDHCI_INT_CARD_INT
;
2602 host
->ier
&= ~SDHCI_INT_CARD_INT
;
2604 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
2605 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
2609 void sdhci_enable_sdio_irq(struct mmc_host
*mmc
, int enable
)
2611 struct sdhci_host
*host
= mmc_priv(mmc
);
2612 unsigned long flags
;
2615 pm_runtime_get_noresume(mmc_dev(mmc
));
2617 spin_lock_irqsave(&host
->lock
, flags
);
2618 sdhci_enable_sdio_irq_nolock(host
, enable
);
2619 spin_unlock_irqrestore(&host
->lock
, flags
);
2622 pm_runtime_put_noidle(mmc_dev(mmc
));
2624 EXPORT_SYMBOL_GPL(sdhci_enable_sdio_irq
);
2626 static void sdhci_ack_sdio_irq(struct mmc_host
*mmc
)
2628 struct sdhci_host
*host
= mmc_priv(mmc
);
2629 unsigned long flags
;
2631 spin_lock_irqsave(&host
->lock
, flags
);
2632 sdhci_enable_sdio_irq_nolock(host
, true);
2633 spin_unlock_irqrestore(&host
->lock
, flags
);
2636 int sdhci_start_signal_voltage_switch(struct mmc_host
*mmc
,
2637 struct mmc_ios
*ios
)
2639 struct sdhci_host
*host
= mmc_priv(mmc
);
2644 * Signal Voltage Switching is only applicable for Host Controllers
2647 if (host
->version
< SDHCI_SPEC_300
)
2650 ctrl
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
2652 switch (ios
->signal_voltage
) {
2653 case MMC_SIGNAL_VOLTAGE_330
:
2654 if (!(host
->flags
& SDHCI_SIGNALING_330
))
2656 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
2657 ctrl
&= ~SDHCI_CTRL_VDD_180
;
2658 sdhci_writew(host
, ctrl
, SDHCI_HOST_CONTROL2
);
2660 if (!IS_ERR(mmc
->supply
.vqmmc
)) {
2661 ret
= mmc_regulator_set_vqmmc(mmc
, ios
);
2663 pr_warn("%s: Switching to 3.3V signalling voltage failed\n",
2669 usleep_range(5000, 5500);
2671 /* 3.3V regulator output should be stable within 5 ms */
2672 ctrl
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
2673 if (!(ctrl
& SDHCI_CTRL_VDD_180
))
2676 pr_warn("%s: 3.3V regulator output did not become stable\n",
2680 case MMC_SIGNAL_VOLTAGE_180
:
2681 if (!(host
->flags
& SDHCI_SIGNALING_180
))
2683 if (!IS_ERR(mmc
->supply
.vqmmc
)) {
2684 ret
= mmc_regulator_set_vqmmc(mmc
, ios
);
2686 pr_warn("%s: Switching to 1.8V signalling voltage failed\n",
2693 * Enable 1.8V Signal Enable in the Host Control2
2696 ctrl
|= SDHCI_CTRL_VDD_180
;
2697 sdhci_writew(host
, ctrl
, SDHCI_HOST_CONTROL2
);
2699 /* Some controller need to do more when switching */
2700 if (host
->ops
->voltage_switch
)
2701 host
->ops
->voltage_switch(host
);
2703 /* 1.8V regulator output should be stable within 5 ms */
2704 ctrl
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
2705 if (ctrl
& SDHCI_CTRL_VDD_180
)
2708 pr_warn("%s: 1.8V regulator output did not become stable\n",
2712 case MMC_SIGNAL_VOLTAGE_120
:
2713 if (!(host
->flags
& SDHCI_SIGNALING_120
))
2715 if (!IS_ERR(mmc
->supply
.vqmmc
)) {
2716 ret
= mmc_regulator_set_vqmmc(mmc
, ios
);
2718 pr_warn("%s: Switching to 1.2V signalling voltage failed\n",
2725 /* No signal voltage switch required */
2729 EXPORT_SYMBOL_GPL(sdhci_start_signal_voltage_switch
);
2731 static int sdhci_card_busy(struct mmc_host
*mmc
)
2733 struct sdhci_host
*host
= mmc_priv(mmc
);
2736 /* Check whether DAT[0] is 0 */
2737 present_state
= sdhci_readl(host
, SDHCI_PRESENT_STATE
);
2739 return !(present_state
& SDHCI_DATA_0_LVL_MASK
);
2742 static int sdhci_prepare_hs400_tuning(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
2744 struct sdhci_host
*host
= mmc_priv(mmc
);
2745 unsigned long flags
;
2747 spin_lock_irqsave(&host
->lock
, flags
);
2748 host
->flags
|= SDHCI_HS400_TUNING
;
2749 spin_unlock_irqrestore(&host
->lock
, flags
);
2754 void sdhci_start_tuning(struct sdhci_host
*host
)
2758 ctrl
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
2759 ctrl
|= SDHCI_CTRL_EXEC_TUNING
;
2760 if (host
->quirks2
& SDHCI_QUIRK2_TUNING_WORK_AROUND
)
2761 ctrl
|= SDHCI_CTRL_TUNED_CLK
;
2762 sdhci_writew(host
, ctrl
, SDHCI_HOST_CONTROL2
);
2765 * As per the Host Controller spec v3.00, tuning command
2766 * generates Buffer Read Ready interrupt, so enable that.
2768 * Note: The spec clearly says that when tuning sequence
2769 * is being performed, the controller does not generate
2770 * interrupts other than Buffer Read Ready interrupt. But
2771 * to make sure we don't hit a controller bug, we _only_
2772 * enable Buffer Read Ready interrupt here.
2774 sdhci_writel(host
, SDHCI_INT_DATA_AVAIL
, SDHCI_INT_ENABLE
);
2775 sdhci_writel(host
, SDHCI_INT_DATA_AVAIL
, SDHCI_SIGNAL_ENABLE
);
2777 EXPORT_SYMBOL_GPL(sdhci_start_tuning
);
2779 void sdhci_end_tuning(struct sdhci_host
*host
)
2781 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
2782 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
2784 EXPORT_SYMBOL_GPL(sdhci_end_tuning
);
2786 void sdhci_reset_tuning(struct sdhci_host
*host
)
2790 ctrl
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
2791 ctrl
&= ~SDHCI_CTRL_TUNED_CLK
;
2792 ctrl
&= ~SDHCI_CTRL_EXEC_TUNING
;
2793 sdhci_writew(host
, ctrl
, SDHCI_HOST_CONTROL2
);
2795 EXPORT_SYMBOL_GPL(sdhci_reset_tuning
);
2797 void sdhci_abort_tuning(struct sdhci_host
*host
, u32 opcode
)
2799 sdhci_reset_tuning(host
);
2801 sdhci_reset_for(host
, TUNING_ABORT
);
2803 sdhci_end_tuning(host
);
2805 mmc_send_abort_tuning(host
->mmc
, opcode
);
2807 EXPORT_SYMBOL_GPL(sdhci_abort_tuning
);
2810 * We use sdhci_send_tuning() because mmc_send_tuning() is not a good fit. SDHCI
2811 * tuning command does not have a data payload (or rather the hardware does it
2812 * automatically) so mmc_send_tuning() will return -EIO. Also the tuning command
2813 * interrupt setup is different to other commands and there is no timeout
2814 * interrupt so special handling is needed.
2816 void sdhci_send_tuning(struct sdhci_host
*host
, u32 opcode
)
2818 struct mmc_host
*mmc
= host
->mmc
;
2819 struct mmc_command cmd
= {};
2820 struct mmc_request mrq
= {};
2821 unsigned long flags
;
2822 u32 b
= host
->sdma_boundary
;
2824 spin_lock_irqsave(&host
->lock
, flags
);
2826 cmd
.opcode
= opcode
;
2827 cmd
.flags
= MMC_RSP_R1
| MMC_CMD_ADTC
;
2832 * In response to CMD19, the card sends 64 bytes of tuning
2833 * block to the Host Controller. So we set the block size
2836 if (cmd
.opcode
== MMC_SEND_TUNING_BLOCK_HS200
&&
2837 mmc
->ios
.bus_width
== MMC_BUS_WIDTH_8
)
2838 sdhci_writew(host
, SDHCI_MAKE_BLKSZ(b
, 128), SDHCI_BLOCK_SIZE
);
2840 sdhci_writew(host
, SDHCI_MAKE_BLKSZ(b
, 64), SDHCI_BLOCK_SIZE
);
2843 * The tuning block is sent by the card to the host controller.
2844 * So we set the TRNS_READ bit in the Transfer Mode register.
2845 * This also takes care of setting DMA Enable and Multi Block
2846 * Select in the same register to 0.
2848 sdhci_writew(host
, SDHCI_TRNS_READ
, SDHCI_TRANSFER_MODE
);
2850 if (!sdhci_send_command_retry(host
, &cmd
, flags
)) {
2851 spin_unlock_irqrestore(&host
->lock
, flags
);
2852 host
->tuning_done
= 0;
2858 sdhci_del_timer(host
, &mrq
);
2860 host
->tuning_done
= 0;
2862 spin_unlock_irqrestore(&host
->lock
, flags
);
2864 /* Wait for Buffer Read Ready interrupt */
2865 wait_event_timeout(host
->buf_ready_int
, (host
->tuning_done
== 1),
2866 msecs_to_jiffies(50));
2869 EXPORT_SYMBOL_GPL(sdhci_send_tuning
);
2871 int __sdhci_execute_tuning(struct sdhci_host
*host
, u32 opcode
)
2876 * Issue opcode repeatedly till Execute Tuning is set to 0 or the number
2877 * of loops reaches tuning loop count.
2879 for (i
= 0; i
< host
->tuning_loop_count
; i
++) {
2882 sdhci_send_tuning(host
, opcode
);
2884 if (!host
->tuning_done
) {
2885 pr_debug("%s: Tuning timeout, falling back to fixed sampling clock\n",
2886 mmc_hostname(host
->mmc
));
2887 sdhci_abort_tuning(host
, opcode
);
2891 /* Spec does not require a delay between tuning cycles */
2892 if (host
->tuning_delay
> 0)
2893 mdelay(host
->tuning_delay
);
2895 ctrl
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
2896 if (!(ctrl
& SDHCI_CTRL_EXEC_TUNING
)) {
2897 if (ctrl
& SDHCI_CTRL_TUNED_CLK
)
2898 return 0; /* Success! */
2904 pr_info("%s: Tuning failed, falling back to fixed sampling clock\n",
2905 mmc_hostname(host
->mmc
));
2906 sdhci_reset_tuning(host
);
2909 EXPORT_SYMBOL_GPL(__sdhci_execute_tuning
);
2911 int sdhci_execute_tuning(struct mmc_host
*mmc
, u32 opcode
)
2913 struct sdhci_host
*host
= mmc_priv(mmc
);
2915 unsigned int tuning_count
= 0;
2918 hs400_tuning
= host
->flags
& SDHCI_HS400_TUNING
;
2920 if (host
->tuning_mode
== SDHCI_TUNING_MODE_1
)
2921 tuning_count
= host
->tuning_count
;
2924 * The Host Controller needs tuning in case of SDR104 and DDR50
2925 * mode, and for SDR50 mode when Use Tuning for SDR50 is set in
2926 * the Capabilities register.
2927 * If the Host Controller supports the HS200 mode then the
2928 * tuning function has to be executed.
2930 switch (host
->timing
) {
2931 /* HS400 tuning is done in HS200 mode */
2932 case MMC_TIMING_MMC_HS400
:
2936 case MMC_TIMING_MMC_HS200
:
2938 * Periodic re-tuning for HS400 is not expected to be needed, so
2945 case MMC_TIMING_UHS_SDR104
:
2946 case MMC_TIMING_UHS_DDR50
:
2949 case MMC_TIMING_UHS_SDR50
:
2950 if (host
->flags
& SDHCI_SDR50_NEEDS_TUNING
)
2958 if (host
->ops
->platform_execute_tuning
) {
2959 err
= host
->ops
->platform_execute_tuning(host
, opcode
);
2963 mmc
->retune_period
= tuning_count
;
2965 if (host
->tuning_delay
< 0)
2966 host
->tuning_delay
= opcode
== MMC_SEND_TUNING_BLOCK
;
2968 sdhci_start_tuning(host
);
2970 host
->tuning_err
= __sdhci_execute_tuning(host
, opcode
);
2972 sdhci_end_tuning(host
);
2974 host
->flags
&= ~SDHCI_HS400_TUNING
;
2978 EXPORT_SYMBOL_GPL(sdhci_execute_tuning
);
2980 void sdhci_enable_preset_value(struct sdhci_host
*host
, bool enable
)
2982 /* Host Controller v3.00 defines preset value registers */
2983 if (host
->version
< SDHCI_SPEC_300
)
2987 * We only enable or disable Preset Value if they are not already
2988 * enabled or disabled respectively. Otherwise, we bail out.
2990 if (host
->preset_enabled
!= enable
) {
2991 u16 ctrl
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
2994 ctrl
|= SDHCI_CTRL_PRESET_VAL_ENABLE
;
2996 ctrl
&= ~SDHCI_CTRL_PRESET_VAL_ENABLE
;
2998 sdhci_writew(host
, ctrl
, SDHCI_HOST_CONTROL2
);
3001 host
->flags
|= SDHCI_PV_ENABLED
;
3003 host
->flags
&= ~SDHCI_PV_ENABLED
;
3005 host
->preset_enabled
= enable
;
3008 EXPORT_SYMBOL_GPL(sdhci_enable_preset_value
);
3010 static void sdhci_post_req(struct mmc_host
*mmc
, struct mmc_request
*mrq
,
3013 struct mmc_data
*data
= mrq
->data
;
3015 if (data
->host_cookie
!= COOKIE_UNMAPPED
)
3016 dma_unmap_sg(mmc_dev(mmc
), data
->sg
, data
->sg_len
,
3017 mmc_get_dma_dir(data
));
3019 data
->host_cookie
= COOKIE_UNMAPPED
;
3022 static void sdhci_pre_req(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
3024 struct sdhci_host
*host
= mmc_priv(mmc
);
3026 mrq
->data
->host_cookie
= COOKIE_UNMAPPED
;
3029 * No pre-mapping in the pre hook if we're using the bounce buffer,
3030 * for that we would need two bounce buffers since one buffer is
3031 * in flight when this is getting called.
3033 if (host
->flags
& SDHCI_REQ_USE_DMA
&& !host
->bounce_buffer
)
3034 sdhci_pre_dma_transfer(host
, mrq
->data
, COOKIE_PRE_MAPPED
);
3037 static void sdhci_error_out_mrqs(struct sdhci_host
*host
, int err
)
3039 if (host
->data_cmd
) {
3040 host
->data_cmd
->error
= err
;
3041 sdhci_finish_mrq(host
, host
->data_cmd
->mrq
);
3045 host
->cmd
->error
= err
;
3046 sdhci_finish_mrq(host
, host
->cmd
->mrq
);
3050 static void sdhci_card_event(struct mmc_host
*mmc
)
3052 struct sdhci_host
*host
= mmc_priv(mmc
);
3053 unsigned long flags
;
3056 /* First check if client has provided their own card event */
3057 if (host
->ops
->card_event
)
3058 host
->ops
->card_event(host
);
3060 present
= mmc
->ops
->get_cd(mmc
);
3062 spin_lock_irqsave(&host
->lock
, flags
);
3064 /* Check sdhci_has_requests() first in case we are runtime suspended */
3065 if (sdhci_has_requests(host
) && !present
) {
3066 pr_err("%s: Card removed during transfer!\n",
3068 pr_err("%s: Resetting controller.\n",
3071 sdhci_reset_for(host
, CARD_REMOVED
);
3073 sdhci_error_out_mrqs(host
, -ENOMEDIUM
);
3076 spin_unlock_irqrestore(&host
->lock
, flags
);
3079 static const struct mmc_host_ops sdhci_ops
= {
3080 .request
= sdhci_request
,
3081 .post_req
= sdhci_post_req
,
3082 .pre_req
= sdhci_pre_req
,
3083 .set_ios
= sdhci_set_ios
,
3084 .get_cd
= sdhci_get_cd
,
3085 .get_ro
= sdhci_get_ro
,
3086 .card_hw_reset
= sdhci_hw_reset
,
3087 .enable_sdio_irq
= sdhci_enable_sdio_irq
,
3088 .ack_sdio_irq
= sdhci_ack_sdio_irq
,
3089 .start_signal_voltage_switch
= sdhci_start_signal_voltage_switch
,
3090 .prepare_hs400_tuning
= sdhci_prepare_hs400_tuning
,
3091 .execute_tuning
= sdhci_execute_tuning
,
3092 .card_event
= sdhci_card_event
,
3093 .card_busy
= sdhci_card_busy
,
3096 /*****************************************************************************\
3100 \*****************************************************************************/
3102 void sdhci_request_done_dma(struct sdhci_host
*host
, struct mmc_request
*mrq
)
3104 struct mmc_data
*data
= mrq
->data
;
3106 if (data
&& data
->host_cookie
== COOKIE_MAPPED
) {
3107 if (host
->bounce_buffer
) {
3109 * On reads, copy the bounced data into the
3112 if (mmc_get_dma_dir(data
) == DMA_FROM_DEVICE
) {
3113 unsigned int length
= data
->bytes_xfered
;
3115 if (length
> host
->bounce_buffer_size
) {
3116 pr_err("%s: bounce buffer is %u bytes but DMA claims to have transferred %u bytes\n",
3117 mmc_hostname(host
->mmc
),
3118 host
->bounce_buffer_size
,
3119 data
->bytes_xfered
);
3120 /* Cap it down and continue */
3121 length
= host
->bounce_buffer_size
;
3123 dma_sync_single_for_cpu(mmc_dev(host
->mmc
),
3125 host
->bounce_buffer_size
,
3127 sg_copy_from_buffer(data
->sg
,
3129 host
->bounce_buffer
,
3132 /* No copying, just switch ownership */
3133 dma_sync_single_for_cpu(mmc_dev(host
->mmc
),
3135 host
->bounce_buffer_size
,
3136 mmc_get_dma_dir(data
));
3139 /* Unmap the raw data */
3140 dma_unmap_sg(mmc_dev(host
->mmc
), data
->sg
,
3142 mmc_get_dma_dir(data
));
3144 data
->host_cookie
= COOKIE_UNMAPPED
;
3147 EXPORT_SYMBOL_GPL(sdhci_request_done_dma
);
3149 static bool sdhci_request_done(struct sdhci_host
*host
)
3151 unsigned long flags
;
3152 struct mmc_request
*mrq
;
3155 spin_lock_irqsave(&host
->lock
, flags
);
3157 for (i
= 0; i
< SDHCI_MAX_MRQS
; i
++) {
3158 mrq
= host
->mrqs_done
[i
];
3164 spin_unlock_irqrestore(&host
->lock
, flags
);
3169 * The controller needs a reset of internal state machines
3170 * upon error conditions.
3172 if (sdhci_needs_reset(host
, mrq
)) {
3174 * Do not finish until command and data lines are available for
3175 * reset. Note there can only be one other mrq, so it cannot
3176 * also be in mrqs_done, otherwise host->cmd and host->data_cmd
3177 * would both be null.
3179 if (host
->cmd
|| host
->data_cmd
) {
3180 spin_unlock_irqrestore(&host
->lock
, flags
);
3184 /* Some controllers need this kick or reset won't work here */
3185 if (host
->quirks
& SDHCI_QUIRK_CLOCK_BEFORE_RESET
)
3186 /* This is to force an update */
3187 host
->ops
->set_clock(host
, host
->clock
);
3189 sdhci_reset_for(host
, REQUEST_ERROR
);
3191 host
->pending_reset
= false;
3195 * Always unmap the data buffers if they were mapped by
3196 * sdhci_prepare_data() whenever we finish with a request.
3197 * This avoids leaking DMA mappings on error.
3199 if (host
->flags
& SDHCI_REQ_USE_DMA
) {
3200 struct mmc_data
*data
= mrq
->data
;
3202 if (host
->use_external_dma
&& data
&&
3203 (mrq
->cmd
->error
|| data
->error
)) {
3204 struct dma_chan
*chan
= sdhci_external_dma_channel(host
, data
);
3206 host
->mrqs_done
[i
] = NULL
;
3207 spin_unlock_irqrestore(&host
->lock
, flags
);
3208 dmaengine_terminate_sync(chan
);
3209 spin_lock_irqsave(&host
->lock
, flags
);
3210 sdhci_set_mrq_done(host
, mrq
);
3213 sdhci_request_done_dma(host
, mrq
);
3216 host
->mrqs_done
[i
] = NULL
;
3218 spin_unlock_irqrestore(&host
->lock
, flags
);
3220 if (host
->ops
->request_done
)
3221 host
->ops
->request_done(host
, mrq
);
3223 mmc_request_done(host
->mmc
, mrq
);
3228 void sdhci_complete_work(struct work_struct
*work
)
3230 struct sdhci_host
*host
= container_of(work
, struct sdhci_host
,
3233 while (!sdhci_request_done(host
))
3236 EXPORT_SYMBOL_GPL(sdhci_complete_work
);
3238 static void sdhci_timeout_timer(struct timer_list
*t
)
3240 struct sdhci_host
*host
;
3241 unsigned long flags
;
3243 host
= from_timer(host
, t
, timer
);
3245 spin_lock_irqsave(&host
->lock
, flags
);
3247 if (host
->cmd
&& !sdhci_data_line_cmd(host
->cmd
)) {
3248 pr_err("%s: Timeout waiting for hardware cmd interrupt.\n",
3249 mmc_hostname(host
->mmc
));
3250 sdhci_err_stats_inc(host
, REQ_TIMEOUT
);
3251 sdhci_dumpregs(host
);
3253 host
->cmd
->error
= -ETIMEDOUT
;
3254 sdhci_finish_mrq(host
, host
->cmd
->mrq
);
3257 spin_unlock_irqrestore(&host
->lock
, flags
);
3260 static void sdhci_timeout_data_timer(struct timer_list
*t
)
3262 struct sdhci_host
*host
;
3263 unsigned long flags
;
3265 host
= from_timer(host
, t
, data_timer
);
3267 spin_lock_irqsave(&host
->lock
, flags
);
3269 if (host
->data
|| host
->data_cmd
||
3270 (host
->cmd
&& sdhci_data_line_cmd(host
->cmd
))) {
3271 pr_err("%s: Timeout waiting for hardware interrupt.\n",
3272 mmc_hostname(host
->mmc
));
3273 sdhci_err_stats_inc(host
, REQ_TIMEOUT
);
3274 sdhci_dumpregs(host
);
3277 host
->data
->error
= -ETIMEDOUT
;
3278 __sdhci_finish_data(host
, true);
3279 queue_work(host
->complete_wq
, &host
->complete_work
);
3280 } else if (host
->data_cmd
) {
3281 host
->data_cmd
->error
= -ETIMEDOUT
;
3282 sdhci_finish_mrq(host
, host
->data_cmd
->mrq
);
3284 host
->cmd
->error
= -ETIMEDOUT
;
3285 sdhci_finish_mrq(host
, host
->cmd
->mrq
);
3289 spin_unlock_irqrestore(&host
->lock
, flags
);
3292 /*****************************************************************************\
3294 * Interrupt handling *
3296 \*****************************************************************************/
3298 static void sdhci_cmd_irq(struct sdhci_host
*host
, u32 intmask
, u32
*intmask_p
)
3300 /* Handle auto-CMD12 error */
3301 if (intmask
& SDHCI_INT_AUTO_CMD_ERR
&& host
->data_cmd
) {
3302 struct mmc_request
*mrq
= host
->data_cmd
->mrq
;
3303 u16 auto_cmd_status
= sdhci_readw(host
, SDHCI_AUTO_CMD_STATUS
);
3304 int data_err_bit
= (auto_cmd_status
& SDHCI_AUTO_CMD_TIMEOUT
) ?
3305 SDHCI_INT_DATA_TIMEOUT
:
3308 /* Treat auto-CMD12 error the same as data error */
3309 if (!mrq
->sbc
&& (host
->flags
& SDHCI_AUTO_CMD12
)) {
3310 *intmask_p
|= data_err_bit
;
3317 * SDHCI recovers from errors by resetting the cmd and data
3318 * circuits. Until that is done, there very well might be more
3319 * interrupts, so ignore them in that case.
3321 if (host
->pending_reset
)
3323 pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n",
3324 mmc_hostname(host
->mmc
), (unsigned)intmask
);
3325 sdhci_err_stats_inc(host
, UNEXPECTED_IRQ
);
3326 sdhci_dumpregs(host
);
3330 if (intmask
& (SDHCI_INT_TIMEOUT
| SDHCI_INT_CRC
|
3331 SDHCI_INT_END_BIT
| SDHCI_INT_INDEX
)) {
3332 if (intmask
& SDHCI_INT_TIMEOUT
) {
3333 host
->cmd
->error
= -ETIMEDOUT
;
3334 sdhci_err_stats_inc(host
, CMD_TIMEOUT
);
3336 host
->cmd
->error
= -EILSEQ
;
3337 if (!mmc_op_tuning(host
->cmd
->opcode
))
3338 sdhci_err_stats_inc(host
, CMD_CRC
);
3340 /* Treat data command CRC error the same as data CRC error */
3341 if (host
->cmd
->data
&&
3342 (intmask
& (SDHCI_INT_CRC
| SDHCI_INT_TIMEOUT
)) ==
3345 *intmask_p
|= SDHCI_INT_DATA_CRC
;
3349 __sdhci_finish_mrq(host
, host
->cmd
->mrq
);
3353 /* Handle auto-CMD23 error */
3354 if (intmask
& SDHCI_INT_AUTO_CMD_ERR
) {
3355 struct mmc_request
*mrq
= host
->cmd
->mrq
;
3356 u16 auto_cmd_status
= sdhci_readw(host
, SDHCI_AUTO_CMD_STATUS
);
3357 int err
= (auto_cmd_status
& SDHCI_AUTO_CMD_TIMEOUT
) ?
3361 sdhci_err_stats_inc(host
, AUTO_CMD
);
3363 if (sdhci_auto_cmd23(host
, mrq
)) {
3364 mrq
->sbc
->error
= err
;
3365 __sdhci_finish_mrq(host
, mrq
);
3370 if (intmask
& SDHCI_INT_RESPONSE
)
3371 sdhci_finish_command(host
);
3374 static void sdhci_adma_show_error(struct sdhci_host
*host
)
3376 void *desc
= host
->adma_table
;
3377 dma_addr_t dma
= host
->adma_addr
;
3379 sdhci_dumpregs(host
);
3382 struct sdhci_adma2_64_desc
*dma_desc
= desc
;
3384 if (host
->flags
& SDHCI_USE_64_BIT_DMA
)
3385 SDHCI_DUMP("%08llx: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
3386 (unsigned long long)dma
,
3387 le32_to_cpu(dma_desc
->addr_hi
),
3388 le32_to_cpu(dma_desc
->addr_lo
),
3389 le16_to_cpu(dma_desc
->len
),
3390 le16_to_cpu(dma_desc
->cmd
));
3392 SDHCI_DUMP("%08llx: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
3393 (unsigned long long)dma
,
3394 le32_to_cpu(dma_desc
->addr_lo
),
3395 le16_to_cpu(dma_desc
->len
),
3396 le16_to_cpu(dma_desc
->cmd
));
3398 desc
+= host
->desc_sz
;
3399 dma
+= host
->desc_sz
;
3401 if (dma_desc
->cmd
& cpu_to_le16(ADMA2_END
))
3406 static void sdhci_data_irq(struct sdhci_host
*host
, u32 intmask
)
3409 * CMD19 generates _only_ Buffer Read Ready interrupt if
3410 * use sdhci_send_tuning.
3411 * Need to exclude this case: PIO mode and use mmc_send_tuning,
3412 * If not, sdhci_transfer_pio will never be called, make the
3413 * SDHCI_INT_DATA_AVAIL always there, stuck in irq storm.
3415 if (intmask
& SDHCI_INT_DATA_AVAIL
&& !host
->data
) {
3416 if (mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host
, SDHCI_COMMAND
)))) {
3417 host
->tuning_done
= 1;
3418 wake_up(&host
->buf_ready_int
);
3424 struct mmc_command
*data_cmd
= host
->data_cmd
;
3427 * The "data complete" interrupt is also used to
3428 * indicate that a busy state has ended. See comment
3429 * above in sdhci_cmd_irq().
3431 if (data_cmd
&& (data_cmd
->flags
& MMC_RSP_BUSY
)) {
3432 if (intmask
& SDHCI_INT_DATA_TIMEOUT
) {
3433 host
->data_cmd
= NULL
;
3434 data_cmd
->error
= -ETIMEDOUT
;
3435 sdhci_err_stats_inc(host
, CMD_TIMEOUT
);
3436 __sdhci_finish_mrq(host
, data_cmd
->mrq
);
3439 if (intmask
& SDHCI_INT_DATA_END
) {
3440 host
->data_cmd
= NULL
;
3442 * Some cards handle busy-end interrupt
3443 * before the command completed, so make
3444 * sure we do things in the proper order.
3446 if (host
->cmd
== data_cmd
)
3449 __sdhci_finish_mrq(host
, data_cmd
->mrq
);
3455 * SDHCI recovers from errors by resetting the cmd and data
3456 * circuits. Until that is done, there very well might be more
3457 * interrupts, so ignore them in that case.
3459 if (host
->pending_reset
)
3462 pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n",
3463 mmc_hostname(host
->mmc
), (unsigned)intmask
);
3464 sdhci_err_stats_inc(host
, UNEXPECTED_IRQ
);
3465 sdhci_dumpregs(host
);
3470 if (intmask
& SDHCI_INT_DATA_TIMEOUT
) {
3471 host
->data
->error
= -ETIMEDOUT
;
3472 sdhci_err_stats_inc(host
, DAT_TIMEOUT
);
3473 } else if (intmask
& SDHCI_INT_DATA_END_BIT
) {
3474 host
->data
->error
= -EILSEQ
;
3475 if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host
, SDHCI_COMMAND
))))
3476 sdhci_err_stats_inc(host
, DAT_CRC
);
3477 } else if ((intmask
& (SDHCI_INT_DATA_CRC
| SDHCI_INT_TUNING_ERROR
)) &&
3478 SDHCI_GET_CMD(sdhci_readw(host
, SDHCI_COMMAND
))
3479 != MMC_BUS_TEST_R
) {
3480 host
->data
->error
= -EILSEQ
;
3481 if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host
, SDHCI_COMMAND
))))
3482 sdhci_err_stats_inc(host
, DAT_CRC
);
3483 if (intmask
& SDHCI_INT_TUNING_ERROR
) {
3484 u16 ctrl2
= sdhci_readw(host
, SDHCI_HOST_CONTROL2
);
3486 ctrl2
&= ~SDHCI_CTRL_TUNED_CLK
;
3487 sdhci_writew(host
, ctrl2
, SDHCI_HOST_CONTROL2
);
3489 } else if (intmask
& SDHCI_INT_ADMA_ERROR
) {
3490 pr_err("%s: ADMA error: 0x%08x\n", mmc_hostname(host
->mmc
),
3492 sdhci_adma_show_error(host
);
3493 sdhci_err_stats_inc(host
, ADMA
);
3494 host
->data
->error
= -EIO
;
3495 if (host
->ops
->adma_workaround
)
3496 host
->ops
->adma_workaround(host
, intmask
);
3499 if (host
->data
->error
)
3500 sdhci_finish_data(host
);
3502 if (intmask
& (SDHCI_INT_DATA_AVAIL
| SDHCI_INT_SPACE_AVAIL
))
3503 sdhci_transfer_pio(host
);
3506 * We currently don't do anything fancy with DMA
3507 * boundaries, but as we can't disable the feature
3508 * we need to at least restart the transfer.
3510 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
3511 * should return a valid address to continue from, but as
3512 * some controllers are faulty, don't trust them.
3514 if (intmask
& SDHCI_INT_DMA_END
) {
3515 dma_addr_t dmastart
, dmanow
;
3517 dmastart
= sdhci_sdma_address(host
);
3518 dmanow
= dmastart
+ host
->data
->bytes_xfered
;
3520 * Force update to the next DMA block boundary.
3523 ~((dma_addr_t
)SDHCI_DEFAULT_BOUNDARY_SIZE
- 1)) +
3524 SDHCI_DEFAULT_BOUNDARY_SIZE
;
3525 host
->data
->bytes_xfered
= dmanow
- dmastart
;
3526 DBG("DMA base %pad, transferred 0x%06x bytes, next %pad\n",
3527 &dmastart
, host
->data
->bytes_xfered
, &dmanow
);
3528 sdhci_set_sdma_addr(host
, dmanow
);
3531 if (intmask
& SDHCI_INT_DATA_END
) {
3532 if (host
->cmd
== host
->data_cmd
) {
3534 * Data managed to finish before the
3535 * command completed. Make sure we do
3536 * things in the proper order.
3538 host
->data_early
= 1;
3540 sdhci_finish_data(host
);
3546 static inline bool sdhci_defer_done(struct sdhci_host
*host
,
3547 struct mmc_request
*mrq
)
3549 struct mmc_data
*data
= mrq
->data
;
3551 return host
->pending_reset
|| host
->always_defer_done
||
3552 ((host
->flags
& SDHCI_REQ_USE_DMA
) && data
&&
3553 data
->host_cookie
== COOKIE_MAPPED
);
3556 static irqreturn_t
sdhci_irq(int irq
, void *dev_id
)
3558 struct mmc_request
*mrqs_done
[SDHCI_MAX_MRQS
] = {0};
3559 irqreturn_t result
= IRQ_NONE
;
3560 struct sdhci_host
*host
= dev_id
;
3561 u32 intmask
, mask
, unexpected
= 0;
3565 spin_lock(&host
->lock
);
3567 if (host
->runtime_suspended
) {
3568 spin_unlock(&host
->lock
);
3572 intmask
= sdhci_readl(host
, SDHCI_INT_STATUS
);
3573 if (!intmask
|| intmask
== 0xffffffff) {
3579 DBG("IRQ status 0x%08x\n", intmask
);
3581 if (host
->ops
->irq
) {
3582 intmask
= host
->ops
->irq(host
, intmask
);
3587 /* Clear selected interrupts. */
3588 mask
= intmask
& (SDHCI_INT_CMD_MASK
| SDHCI_INT_DATA_MASK
|
3589 SDHCI_INT_BUS_POWER
);
3590 sdhci_writel(host
, mask
, SDHCI_INT_STATUS
);
3592 if (intmask
& (SDHCI_INT_CARD_INSERT
| SDHCI_INT_CARD_REMOVE
)) {
3593 u32 present
= sdhci_readl(host
, SDHCI_PRESENT_STATE
) &
3597 * There is a observation on i.mx esdhc. INSERT
3598 * bit will be immediately set again when it gets
3599 * cleared, if a card is inserted. We have to mask
3600 * the irq to prevent interrupt storm which will
3601 * freeze the system. And the REMOVE gets the
3604 * More testing are needed here to ensure it works
3605 * for other platforms though.
3607 host
->ier
&= ~(SDHCI_INT_CARD_INSERT
|
3608 SDHCI_INT_CARD_REMOVE
);
3609 host
->ier
|= present
? SDHCI_INT_CARD_REMOVE
:
3610 SDHCI_INT_CARD_INSERT
;
3611 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
3612 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
3614 sdhci_writel(host
, intmask
& (SDHCI_INT_CARD_INSERT
|
3615 SDHCI_INT_CARD_REMOVE
), SDHCI_INT_STATUS
);
3617 host
->thread_isr
|= intmask
& (SDHCI_INT_CARD_INSERT
|
3618 SDHCI_INT_CARD_REMOVE
);
3619 result
= IRQ_WAKE_THREAD
;
3622 if (intmask
& SDHCI_INT_CMD_MASK
)
3623 sdhci_cmd_irq(host
, intmask
& SDHCI_INT_CMD_MASK
, &intmask
);
3625 if (intmask
& SDHCI_INT_DATA_MASK
)
3626 sdhci_data_irq(host
, intmask
& SDHCI_INT_DATA_MASK
);
3628 if (intmask
& SDHCI_INT_BUS_POWER
)
3629 pr_err("%s: Card is consuming too much power!\n",
3630 mmc_hostname(host
->mmc
));
3632 if (intmask
& SDHCI_INT_RETUNE
)
3633 mmc_retune_needed(host
->mmc
);
3635 if ((intmask
& SDHCI_INT_CARD_INT
) &&
3636 (host
->ier
& SDHCI_INT_CARD_INT
)) {
3637 sdhci_enable_sdio_irq_nolock(host
, false);
3638 sdio_signal_irq(host
->mmc
);
3641 intmask
&= ~(SDHCI_INT_CARD_INSERT
| SDHCI_INT_CARD_REMOVE
|
3642 SDHCI_INT_CMD_MASK
| SDHCI_INT_DATA_MASK
|
3643 SDHCI_INT_ERROR
| SDHCI_INT_BUS_POWER
|
3644 SDHCI_INT_RETUNE
| SDHCI_INT_CARD_INT
);
3647 unexpected
|= intmask
;
3648 sdhci_writel(host
, intmask
, SDHCI_INT_STATUS
);
3651 if (result
== IRQ_NONE
)
3652 result
= IRQ_HANDLED
;
3654 intmask
= sdhci_readl(host
, SDHCI_INT_STATUS
);
3655 } while (intmask
&& --max_loops
);
3657 /* Determine if mrqs can be completed immediately */
3658 for (i
= 0; i
< SDHCI_MAX_MRQS
; i
++) {
3659 struct mmc_request
*mrq
= host
->mrqs_done
[i
];
3664 if (sdhci_defer_done(host
, mrq
)) {
3665 result
= IRQ_WAKE_THREAD
;
3668 host
->mrqs_done
[i
] = NULL
;
3672 if (host
->deferred_cmd
)
3673 result
= IRQ_WAKE_THREAD
;
3675 spin_unlock(&host
->lock
);
3677 /* Process mrqs ready for immediate completion */
3678 for (i
= 0; i
< SDHCI_MAX_MRQS
; i
++) {
3682 if (host
->ops
->request_done
)
3683 host
->ops
->request_done(host
, mrqs_done
[i
]);
3685 mmc_request_done(host
->mmc
, mrqs_done
[i
]);
3689 pr_err("%s: Unexpected interrupt 0x%08x.\n",
3690 mmc_hostname(host
->mmc
), unexpected
);
3691 sdhci_err_stats_inc(host
, UNEXPECTED_IRQ
);
3692 sdhci_dumpregs(host
);
3698 irqreturn_t
sdhci_thread_irq(int irq
, void *dev_id
)
3700 struct sdhci_host
*host
= dev_id
;
3701 struct mmc_command
*cmd
;
3702 unsigned long flags
;
3705 while (!sdhci_request_done(host
))
3708 spin_lock_irqsave(&host
->lock
, flags
);
3710 isr
= host
->thread_isr
;
3711 host
->thread_isr
= 0;
3713 cmd
= host
->deferred_cmd
;
3714 if (cmd
&& !sdhci_send_command_retry(host
, cmd
, flags
))
3715 sdhci_finish_mrq(host
, cmd
->mrq
);
3717 spin_unlock_irqrestore(&host
->lock
, flags
);
3719 if (isr
& (SDHCI_INT_CARD_INSERT
| SDHCI_INT_CARD_REMOVE
)) {
3720 struct mmc_host
*mmc
= host
->mmc
;
3722 mmc
->ops
->card_event(mmc
);
3723 mmc_detect_change(mmc
, msecs_to_jiffies(200));
3728 EXPORT_SYMBOL_GPL(sdhci_thread_irq
);
3730 /*****************************************************************************\
3734 \*****************************************************************************/
3738 static bool sdhci_cd_irq_can_wakeup(struct sdhci_host
*host
)
3740 return mmc_card_is_removable(host
->mmc
) &&
3741 !(host
->quirks
& SDHCI_QUIRK_BROKEN_CARD_DETECTION
) &&
3742 !mmc_can_gpio_cd(host
->mmc
);
3746 * To enable wakeup events, the corresponding events have to be enabled in
3747 * the Interrupt Status Enable register too. See 'Table 1-6: Wakeup Signal
3748 * Table' in the SD Host Controller Standard Specification.
3749 * It is useless to restore SDHCI_INT_ENABLE state in
3750 * sdhci_disable_irq_wakeups() since it will be set by
3751 * sdhci_enable_card_detection() or sdhci_init().
3753 static bool sdhci_enable_irq_wakeups(struct sdhci_host
*host
)
3755 u8 mask
= SDHCI_WAKE_ON_INSERT
| SDHCI_WAKE_ON_REMOVE
|
3761 if (sdhci_cd_irq_can_wakeup(host
)) {
3762 wake_val
|= SDHCI_WAKE_ON_INSERT
| SDHCI_WAKE_ON_REMOVE
;
3763 irq_val
|= SDHCI_INT_CARD_INSERT
| SDHCI_INT_CARD_REMOVE
;
3766 if (mmc_card_wake_sdio_irq(host
->mmc
)) {
3767 wake_val
|= SDHCI_WAKE_ON_INT
;
3768 irq_val
|= SDHCI_INT_CARD_INT
;
3774 val
= sdhci_readb(host
, SDHCI_WAKE_UP_CONTROL
);
3777 sdhci_writeb(host
, val
, SDHCI_WAKE_UP_CONTROL
);
3779 sdhci_writel(host
, irq_val
, SDHCI_INT_ENABLE
);
3781 host
->irq_wake_enabled
= !enable_irq_wake(host
->irq
);
3783 return host
->irq_wake_enabled
;
3786 static void sdhci_disable_irq_wakeups(struct sdhci_host
*host
)
3789 u8 mask
= SDHCI_WAKE_ON_INSERT
| SDHCI_WAKE_ON_REMOVE
3790 | SDHCI_WAKE_ON_INT
;
3792 val
= sdhci_readb(host
, SDHCI_WAKE_UP_CONTROL
);
3794 sdhci_writeb(host
, val
, SDHCI_WAKE_UP_CONTROL
);
3796 disable_irq_wake(host
->irq
);
3798 host
->irq_wake_enabled
= false;
3801 int sdhci_suspend_host(struct sdhci_host
*host
)
3803 sdhci_disable_card_detection(host
);
3805 mmc_retune_timer_stop(host
->mmc
);
3807 if (!device_may_wakeup(mmc_dev(host
->mmc
)) ||
3808 !sdhci_enable_irq_wakeups(host
)) {
3810 sdhci_writel(host
, 0, SDHCI_INT_ENABLE
);
3811 sdhci_writel(host
, 0, SDHCI_SIGNAL_ENABLE
);
3812 free_irq(host
->irq
, host
);
3818 EXPORT_SYMBOL_GPL(sdhci_suspend_host
);
3820 int sdhci_resume_host(struct sdhci_host
*host
)
3822 struct mmc_host
*mmc
= host
->mmc
;
3825 if (host
->flags
& (SDHCI_USE_SDMA
| SDHCI_USE_ADMA
)) {
3826 if (host
->ops
->enable_dma
)
3827 host
->ops
->enable_dma(host
);
3830 if ((mmc
->pm_flags
& MMC_PM_KEEP_POWER
) &&
3831 (host
->quirks2
& SDHCI_QUIRK2_HOST_OFF_CARD_ON
)) {
3832 /* Card keeps power but host controller does not */
3833 sdhci_init(host
, 0);
3836 host
->reinit_uhs
= true;
3837 mmc
->ops
->set_ios(mmc
, &mmc
->ios
);
3839 sdhci_init(host
, (mmc
->pm_flags
& MMC_PM_KEEP_POWER
));
3842 if (host
->irq_wake_enabled
) {
3843 sdhci_disable_irq_wakeups(host
);
3845 ret
= request_threaded_irq(host
->irq
, sdhci_irq
,
3846 sdhci_thread_irq
, IRQF_SHARED
,
3847 mmc_hostname(mmc
), host
);
3852 sdhci_enable_card_detection(host
);
3857 EXPORT_SYMBOL_GPL(sdhci_resume_host
);
3859 int sdhci_runtime_suspend_host(struct sdhci_host
*host
)
3861 unsigned long flags
;
3863 mmc_retune_timer_stop(host
->mmc
);
3865 spin_lock_irqsave(&host
->lock
, flags
);
3866 host
->ier
&= SDHCI_INT_CARD_INT
;
3867 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
3868 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
3869 spin_unlock_irqrestore(&host
->lock
, flags
);
3871 synchronize_hardirq(host
->irq
);
3873 spin_lock_irqsave(&host
->lock
, flags
);
3874 host
->runtime_suspended
= true;
3875 spin_unlock_irqrestore(&host
->lock
, flags
);
3879 EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host
);
3881 int sdhci_runtime_resume_host(struct sdhci_host
*host
, int soft_reset
)
3883 struct mmc_host
*mmc
= host
->mmc
;
3884 unsigned long flags
;
3885 int host_flags
= host
->flags
;
3887 if (host_flags
& (SDHCI_USE_SDMA
| SDHCI_USE_ADMA
)) {
3888 if (host
->ops
->enable_dma
)
3889 host
->ops
->enable_dma(host
);
3892 sdhci_init(host
, soft_reset
);
3894 if (mmc
->ios
.power_mode
!= MMC_POWER_UNDEFINED
&&
3895 mmc
->ios
.power_mode
!= MMC_POWER_OFF
) {
3896 /* Force clock and power re-program */
3899 host
->reinit_uhs
= true;
3900 mmc
->ops
->start_signal_voltage_switch(mmc
, &mmc
->ios
);
3901 mmc
->ops
->set_ios(mmc
, &mmc
->ios
);
3903 if ((host_flags
& SDHCI_PV_ENABLED
) &&
3904 !(host
->quirks2
& SDHCI_QUIRK2_PRESET_VALUE_BROKEN
)) {
3905 spin_lock_irqsave(&host
->lock
, flags
);
3906 sdhci_enable_preset_value(host
, true);
3907 spin_unlock_irqrestore(&host
->lock
, flags
);
3910 if ((mmc
->caps2
& MMC_CAP2_HS400_ES
) &&
3911 mmc
->ops
->hs400_enhanced_strobe
)
3912 mmc
->ops
->hs400_enhanced_strobe(mmc
, &mmc
->ios
);
3915 spin_lock_irqsave(&host
->lock
, flags
);
3917 host
->runtime_suspended
= false;
3919 /* Enable SDIO IRQ */
3920 if (sdio_irq_claimed(mmc
))
3921 sdhci_enable_sdio_irq_nolock(host
, true);
3923 /* Enable Card Detection */
3924 sdhci_enable_card_detection(host
);
3926 spin_unlock_irqrestore(&host
->lock
, flags
);
3930 EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host
);
3932 #endif /* CONFIG_PM */
3934 /*****************************************************************************\
3936 * Command Queue Engine (CQE) helpers *
3938 \*****************************************************************************/
3940 void sdhci_cqe_enable(struct mmc_host
*mmc
)
3942 struct sdhci_host
*host
= mmc_priv(mmc
);
3943 unsigned long flags
;
3946 spin_lock_irqsave(&host
->lock
, flags
);
3948 ctrl
= sdhci_readb(host
, SDHCI_HOST_CONTROL
);
3949 ctrl
&= ~SDHCI_CTRL_DMA_MASK
;
3951 * Host from V4.10 supports ADMA3 DMA type.
3952 * ADMA3 performs integrated descriptor which is more suitable
3953 * for cmd queuing to fetch both command and transfer descriptors.
3955 if (host
->v4_mode
&& (host
->caps1
& SDHCI_CAN_DO_ADMA3
))
3956 ctrl
|= SDHCI_CTRL_ADMA3
;
3957 else if (host
->flags
& SDHCI_USE_64_BIT_DMA
)
3958 ctrl
|= SDHCI_CTRL_ADMA64
;
3960 ctrl
|= SDHCI_CTRL_ADMA32
;
3961 sdhci_writeb(host
, ctrl
, SDHCI_HOST_CONTROL
);
3963 sdhci_writew(host
, SDHCI_MAKE_BLKSZ(host
->sdma_boundary
, 512),
3966 /* Set maximum timeout */
3967 sdhci_set_timeout(host
, NULL
);
3969 host
->ier
= host
->cqe_ier
;
3971 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
3972 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
3974 host
->cqe_on
= true;
3976 pr_debug("%s: sdhci: CQE on, IRQ mask %#x, IRQ status %#x\n",
3977 mmc_hostname(mmc
), host
->ier
,
3978 sdhci_readl(host
, SDHCI_INT_STATUS
));
3980 spin_unlock_irqrestore(&host
->lock
, flags
);
3982 EXPORT_SYMBOL_GPL(sdhci_cqe_enable
);
3984 void sdhci_cqe_disable(struct mmc_host
*mmc
, bool recovery
)
3986 struct sdhci_host
*host
= mmc_priv(mmc
);
3987 unsigned long flags
;
3989 spin_lock_irqsave(&host
->lock
, flags
);
3991 sdhci_set_default_irqs(host
);
3993 host
->cqe_on
= false;
3996 sdhci_reset_for(host
, CQE_RECOVERY
);
3998 pr_debug("%s: sdhci: CQE off, IRQ mask %#x, IRQ status %#x\n",
3999 mmc_hostname(mmc
), host
->ier
,
4000 sdhci_readl(host
, SDHCI_INT_STATUS
));
4002 spin_unlock_irqrestore(&host
->lock
, flags
);
4004 EXPORT_SYMBOL_GPL(sdhci_cqe_disable
);
4006 bool sdhci_cqe_irq(struct sdhci_host
*host
, u32 intmask
, int *cmd_error
,
4014 if (intmask
& (SDHCI_INT_INDEX
| SDHCI_INT_END_BIT
| SDHCI_INT_CRC
)) {
4015 *cmd_error
= -EILSEQ
;
4016 if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host
, SDHCI_COMMAND
))))
4017 sdhci_err_stats_inc(host
, CMD_CRC
);
4018 } else if (intmask
& SDHCI_INT_TIMEOUT
) {
4019 *cmd_error
= -ETIMEDOUT
;
4020 sdhci_err_stats_inc(host
, CMD_TIMEOUT
);
4024 if (intmask
& (SDHCI_INT_DATA_END_BIT
| SDHCI_INT_DATA_CRC
| SDHCI_INT_TUNING_ERROR
)) {
4025 *data_error
= -EILSEQ
;
4026 if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host
, SDHCI_COMMAND
))))
4027 sdhci_err_stats_inc(host
, DAT_CRC
);
4028 } else if (intmask
& SDHCI_INT_DATA_TIMEOUT
) {
4029 *data_error
= -ETIMEDOUT
;
4030 sdhci_err_stats_inc(host
, DAT_TIMEOUT
);
4031 } else if (intmask
& SDHCI_INT_ADMA_ERROR
) {
4033 sdhci_err_stats_inc(host
, ADMA
);
4037 /* Clear selected interrupts. */
4038 mask
= intmask
& host
->cqe_ier
;
4039 sdhci_writel(host
, mask
, SDHCI_INT_STATUS
);
4041 if (intmask
& SDHCI_INT_BUS_POWER
)
4042 pr_err("%s: Card is consuming too much power!\n",
4043 mmc_hostname(host
->mmc
));
4045 intmask
&= ~(host
->cqe_ier
| SDHCI_INT_ERROR
);
4047 sdhci_writel(host
, intmask
, SDHCI_INT_STATUS
);
4048 pr_err("%s: CQE: Unexpected interrupt 0x%08x.\n",
4049 mmc_hostname(host
->mmc
), intmask
);
4050 sdhci_err_stats_inc(host
, UNEXPECTED_IRQ
);
4051 sdhci_dumpregs(host
);
4056 EXPORT_SYMBOL_GPL(sdhci_cqe_irq
);
4058 /*****************************************************************************\
4060 * Device allocation/registration *
4062 \*****************************************************************************/
4064 struct sdhci_host
*sdhci_alloc_host(struct device
*dev
,
4067 struct mmc_host
*mmc
;
4068 struct sdhci_host
*host
;
4070 WARN_ON(dev
== NULL
);
4072 mmc
= mmc_alloc_host(sizeof(struct sdhci_host
) + priv_size
, dev
);
4074 return ERR_PTR(-ENOMEM
);
4076 host
= mmc_priv(mmc
);
4078 host
->mmc_host_ops
= sdhci_ops
;
4079 mmc
->ops
= &host
->mmc_host_ops
;
4081 host
->flags
= SDHCI_SIGNALING_330
;
4083 host
->cqe_ier
= SDHCI_CQE_INT_MASK
;
4084 host
->cqe_err_ier
= SDHCI_CQE_INT_ERR_MASK
;
4086 host
->tuning_delay
= -1;
4087 host
->tuning_loop_count
= MAX_TUNING_LOOP
;
4089 host
->sdma_boundary
= SDHCI_DEFAULT_BOUNDARY_ARG
;
4092 * The DMA table descriptor count is calculated as the maximum
4093 * number of segments times 2, to allow for an alignment
4094 * descriptor for each segment, plus 1 for a nop end descriptor.
4096 host
->adma_table_cnt
= SDHCI_MAX_SEGS
* 2 + 1;
4097 host
->max_adma
= 65536;
4099 host
->max_timeout_count
= 0xE;
4101 host
->complete_work_fn
= sdhci_complete_work
;
4102 host
->thread_irq_fn
= sdhci_thread_irq
;
4107 EXPORT_SYMBOL_GPL(sdhci_alloc_host
);
4109 static int sdhci_set_dma_mask(struct sdhci_host
*host
)
4111 struct mmc_host
*mmc
= host
->mmc
;
4112 struct device
*dev
= mmc_dev(mmc
);
4115 if (host
->quirks2
& SDHCI_QUIRK2_BROKEN_64_BIT_DMA
)
4116 host
->flags
&= ~SDHCI_USE_64_BIT_DMA
;
4118 /* Try 64-bit mask if hardware is capable of it */
4119 if (host
->flags
& SDHCI_USE_64_BIT_DMA
) {
4120 ret
= dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(64));
4122 pr_warn("%s: Failed to set 64-bit DMA mask.\n",
4124 host
->flags
&= ~SDHCI_USE_64_BIT_DMA
;
4128 /* 32-bit mask as default & fallback */
4130 ret
= dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(32));
4132 pr_warn("%s: Failed to set 32-bit DMA mask.\n",
4139 void __sdhci_read_caps(struct sdhci_host
*host
, const u16
*ver
,
4140 const u32
*caps
, const u32
*caps1
)
4143 u64 dt_caps_mask
= 0;
4146 if (host
->read_caps
)
4149 host
->read_caps
= true;
4152 host
->quirks
= debug_quirks
;
4155 host
->quirks2
= debug_quirks2
;
4157 sdhci_reset_for_all(host
);
4160 sdhci_do_enable_v4_mode(host
);
4162 device_property_read_u64(mmc_dev(host
->mmc
),
4163 "sdhci-caps-mask", &dt_caps_mask
);
4164 device_property_read_u64(mmc_dev(host
->mmc
),
4165 "sdhci-caps", &dt_caps
);
4167 v
= ver
? *ver
: sdhci_readw(host
, SDHCI_HOST_VERSION
);
4168 host
->version
= (v
& SDHCI_SPEC_VER_MASK
) >> SDHCI_SPEC_VER_SHIFT
;
4173 host
->caps
= sdhci_readl(host
, SDHCI_CAPABILITIES
);
4174 host
->caps
&= ~lower_32_bits(dt_caps_mask
);
4175 host
->caps
|= lower_32_bits(dt_caps
);
4178 if (host
->version
< SDHCI_SPEC_300
)
4182 host
->caps1
= *caps1
;
4184 host
->caps1
= sdhci_readl(host
, SDHCI_CAPABILITIES_1
);
4185 host
->caps1
&= ~upper_32_bits(dt_caps_mask
);
4186 host
->caps1
|= upper_32_bits(dt_caps
);
4189 EXPORT_SYMBOL_GPL(__sdhci_read_caps
);
4191 static void sdhci_allocate_bounce_buffer(struct sdhci_host
*host
)
4193 struct mmc_host
*mmc
= host
->mmc
;
4194 unsigned int max_blocks
;
4195 unsigned int bounce_size
;
4199 * Cap the bounce buffer at 64KB. Using a bigger bounce buffer
4200 * has diminishing returns, this is probably because SD/MMC
4201 * cards are usually optimized to handle this size of requests.
4203 bounce_size
= SZ_64K
;
4205 * Adjust downwards to maximum request size if this is less
4206 * than our segment size, else hammer down the maximum
4207 * request size to the maximum buffer size.
4209 if (mmc
->max_req_size
< bounce_size
)
4210 bounce_size
= mmc
->max_req_size
;
4211 max_blocks
= bounce_size
/ 512;
4214 * When we just support one segment, we can get significant
4215 * speedups by the help of a bounce buffer to group scattered
4216 * reads/writes together.
4218 host
->bounce_buffer
= devm_kmalloc(mmc_dev(mmc
),
4221 if (!host
->bounce_buffer
) {
4222 pr_err("%s: failed to allocate %u bytes for bounce buffer, falling back to single segments\n",
4226 * Exiting with zero here makes sure we proceed with
4227 * mmc->max_segs == 1.
4232 host
->bounce_addr
= dma_map_single(mmc_dev(mmc
),
4233 host
->bounce_buffer
,
4236 ret
= dma_mapping_error(mmc_dev(mmc
), host
->bounce_addr
);
4238 devm_kfree(mmc_dev(mmc
), host
->bounce_buffer
);
4239 host
->bounce_buffer
= NULL
;
4240 /* Again fall back to max_segs == 1 */
4244 host
->bounce_buffer_size
= bounce_size
;
4246 /* Lie about this since we're bouncing */
4247 mmc
->max_segs
= max_blocks
;
4248 mmc
->max_seg_size
= bounce_size
;
4249 mmc
->max_req_size
= bounce_size
;
4251 pr_info("%s bounce up to %u segments into one, max segment size %u bytes\n",
4252 mmc_hostname(mmc
), max_blocks
, bounce_size
);
4255 static inline bool sdhci_can_64bit_dma(struct sdhci_host
*host
)
4258 * According to SD Host Controller spec v4.10, bit[27] added from
4259 * version 4.10 in Capabilities Register is used as 64-bit System
4260 * Address support for V4 mode.
4262 if (host
->version
>= SDHCI_SPEC_410
&& host
->v4_mode
)
4263 return host
->caps
& SDHCI_CAN_64BIT_V4
;
4265 return host
->caps
& SDHCI_CAN_64BIT
;
4268 int sdhci_setup_host(struct sdhci_host
*host
)
4270 struct mmc_host
*mmc
;
4271 u32 max_current_caps
;
4272 unsigned int ocr_avail
;
4273 unsigned int override_timeout_clk
;
4276 bool enable_vqmmc
= false;
4278 WARN_ON(host
== NULL
);
4285 * If there are external regulators, get them. Note this must be done
4286 * early before resetting the host and reading the capabilities so that
4287 * the host can take the appropriate action if regulators are not
4290 if (!mmc
->supply
.vqmmc
) {
4291 ret
= mmc_regulator_get_supply(mmc
);
4294 enable_vqmmc
= true;
4297 DBG("Version: 0x%08x | Present: 0x%08x\n",
4298 sdhci_readw(host
, SDHCI_HOST_VERSION
),
4299 sdhci_readl(host
, SDHCI_PRESENT_STATE
));
4300 DBG("Caps: 0x%08x | Caps_1: 0x%08x\n",
4301 sdhci_readl(host
, SDHCI_CAPABILITIES
),
4302 sdhci_readl(host
, SDHCI_CAPABILITIES_1
));
4304 sdhci_read_caps(host
);
4306 override_timeout_clk
= host
->timeout_clk
;
4308 if (host
->version
> SDHCI_SPEC_420
) {
4309 pr_err("%s: Unknown controller version (%d). You may experience problems.\n",
4310 mmc_hostname(mmc
), host
->version
);
4313 if (host
->quirks
& SDHCI_QUIRK_FORCE_DMA
)
4314 host
->flags
|= SDHCI_USE_SDMA
;
4315 else if (!(host
->caps
& SDHCI_CAN_DO_SDMA
))
4316 DBG("Controller doesn't have SDMA capability\n");
4318 host
->flags
|= SDHCI_USE_SDMA
;
4320 if ((host
->quirks
& SDHCI_QUIRK_BROKEN_DMA
) &&
4321 (host
->flags
& SDHCI_USE_SDMA
)) {
4322 DBG("Disabling DMA as it is marked broken\n");
4323 host
->flags
&= ~SDHCI_USE_SDMA
;
4326 if ((host
->version
>= SDHCI_SPEC_200
) &&
4327 (host
->caps
& SDHCI_CAN_DO_ADMA2
))
4328 host
->flags
|= SDHCI_USE_ADMA
;
4330 if ((host
->quirks
& SDHCI_QUIRK_BROKEN_ADMA
) &&
4331 (host
->flags
& SDHCI_USE_ADMA
)) {
4332 DBG("Disabling ADMA as it is marked broken\n");
4333 host
->flags
&= ~SDHCI_USE_ADMA
;
4336 if (sdhci_can_64bit_dma(host
))
4337 host
->flags
|= SDHCI_USE_64_BIT_DMA
;
4339 if (host
->use_external_dma
) {
4340 ret
= sdhci_external_dma_init(host
);
4341 if (ret
== -EPROBE_DEFER
)
4344 * Fall back to use the DMA/PIO integrated in standard SDHCI
4345 * instead of external DMA devices.
4348 sdhci_switch_external_dma(host
, false);
4349 /* Disable internal DMA sources */
4351 host
->flags
&= ~(SDHCI_USE_SDMA
| SDHCI_USE_ADMA
);
4354 if (host
->flags
& (SDHCI_USE_SDMA
| SDHCI_USE_ADMA
)) {
4355 if (host
->ops
->set_dma_mask
)
4356 ret
= host
->ops
->set_dma_mask(host
);
4358 ret
= sdhci_set_dma_mask(host
);
4360 if (!ret
&& host
->ops
->enable_dma
)
4361 ret
= host
->ops
->enable_dma(host
);
4364 pr_warn("%s: No suitable DMA available - falling back to PIO\n",
4366 host
->flags
&= ~(SDHCI_USE_SDMA
| SDHCI_USE_ADMA
);
4372 /* SDMA does not support 64-bit DMA if v4 mode not set */
4373 if ((host
->flags
& SDHCI_USE_64_BIT_DMA
) && !host
->v4_mode
)
4374 host
->flags
&= ~SDHCI_USE_SDMA
;
4376 if (host
->flags
& SDHCI_USE_ADMA
) {
4380 if (!(host
->flags
& SDHCI_USE_64_BIT_DMA
))
4381 host
->alloc_desc_sz
= SDHCI_ADMA2_32_DESC_SZ
;
4382 else if (!host
->alloc_desc_sz
)
4383 host
->alloc_desc_sz
= SDHCI_ADMA2_64_DESC_SZ(host
);
4385 host
->desc_sz
= host
->alloc_desc_sz
;
4386 host
->adma_table_sz
= host
->adma_table_cnt
* host
->desc_sz
;
4388 host
->align_buffer_sz
= SDHCI_MAX_SEGS
* SDHCI_ADMA2_ALIGN
;
4390 * Use zalloc to zero the reserved high 32-bits of 128-bit
4391 * descriptors so that they never need to be written.
4393 buf
= dma_alloc_coherent(mmc_dev(mmc
),
4394 host
->align_buffer_sz
+ host
->adma_table_sz
,
4397 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
4399 host
->flags
&= ~SDHCI_USE_ADMA
;
4400 } else if ((dma
+ host
->align_buffer_sz
) &
4401 (SDHCI_ADMA2_DESC_ALIGN
- 1)) {
4402 pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
4404 host
->flags
&= ~SDHCI_USE_ADMA
;
4405 dma_free_coherent(mmc_dev(mmc
), host
->align_buffer_sz
+
4406 host
->adma_table_sz
, buf
, dma
);
4408 host
->align_buffer
= buf
;
4409 host
->align_addr
= dma
;
4411 host
->adma_table
= buf
+ host
->align_buffer_sz
;
4412 host
->adma_addr
= dma
+ host
->align_buffer_sz
;
4417 * If we use DMA, then it's up to the caller to set the DMA
4418 * mask, but PIO does not need the hw shim so we set a new
4419 * mask here in that case.
4421 if (!(host
->flags
& (SDHCI_USE_SDMA
| SDHCI_USE_ADMA
))) {
4422 host
->dma_mask
= DMA_BIT_MASK(64);
4423 mmc_dev(mmc
)->dma_mask
= &host
->dma_mask
;
4426 if (host
->version
>= SDHCI_SPEC_300
)
4427 host
->max_clk
= FIELD_GET(SDHCI_CLOCK_V3_BASE_MASK
, host
->caps
);
4429 host
->max_clk
= FIELD_GET(SDHCI_CLOCK_BASE_MASK
, host
->caps
);
4431 host
->max_clk
*= 1000000;
4432 if (host
->max_clk
== 0 || host
->quirks
&
4433 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN
) {
4434 if (!host
->ops
->get_max_clock
) {
4435 pr_err("%s: Hardware doesn't specify base clock frequency.\n",
4440 host
->max_clk
= host
->ops
->get_max_clock(host
);
4444 * In case of Host Controller v3.00, find out whether clock
4445 * multiplier is supported.
4447 host
->clk_mul
= FIELD_GET(SDHCI_CLOCK_MUL_MASK
, host
->caps1
);
4450 * In case the value in Clock Multiplier is 0, then programmable
4451 * clock mode is not supported, otherwise the actual clock
4452 * multiplier is one more than the value of Clock Multiplier
4453 * in the Capabilities Register.
4459 * Set host parameters.
4461 max_clk
= host
->max_clk
;
4463 if (host
->ops
->get_min_clock
)
4464 mmc
->f_min
= host
->ops
->get_min_clock(host
);
4465 else if (host
->version
>= SDHCI_SPEC_300
) {
4467 max_clk
= host
->max_clk
* host
->clk_mul
;
4469 * Divided Clock Mode minimum clock rate is always less than
4470 * Programmable Clock Mode minimum clock rate.
4472 mmc
->f_min
= host
->max_clk
/ SDHCI_MAX_DIV_SPEC_300
;
4474 mmc
->f_min
= host
->max_clk
/ SDHCI_MAX_DIV_SPEC_200
;
4476 if (!mmc
->f_max
|| mmc
->f_max
> max_clk
)
4477 mmc
->f_max
= max_clk
;
4479 if (!(host
->quirks
& SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK
)) {
4480 host
->timeout_clk
= FIELD_GET(SDHCI_TIMEOUT_CLK_MASK
, host
->caps
);
4482 if (host
->caps
& SDHCI_TIMEOUT_CLK_UNIT
)
4483 host
->timeout_clk
*= 1000;
4485 if (host
->timeout_clk
== 0) {
4486 if (!host
->ops
->get_timeout_clock
) {
4487 pr_err("%s: Hardware doesn't specify timeout clock frequency.\n",
4494 DIV_ROUND_UP(host
->ops
->get_timeout_clock(host
),
4498 if (override_timeout_clk
)
4499 host
->timeout_clk
= override_timeout_clk
;
4501 mmc
->max_busy_timeout
= host
->ops
->get_max_timeout_count
?
4502 host
->ops
->get_max_timeout_count(host
) : 1 << 27;
4503 mmc
->max_busy_timeout
/= host
->timeout_clk
;
4506 if (host
->quirks2
& SDHCI_QUIRK2_DISABLE_HW_TIMEOUT
&&
4507 !host
->ops
->get_max_timeout_count
)
4508 mmc
->max_busy_timeout
= 0;
4510 mmc
->caps
|= MMC_CAP_SDIO_IRQ
| MMC_CAP_CMD23
;
4511 mmc
->caps2
|= MMC_CAP2_SDIO_IRQ_NOTHREAD
;
4513 if (host
->quirks
& SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12
)
4514 host
->flags
|= SDHCI_AUTO_CMD12
;
4517 * For v3 mode, Auto-CMD23 stuff only works in ADMA or PIO.
4518 * For v4 mode, SDMA may use Auto-CMD23 as well.
4520 if ((host
->version
>= SDHCI_SPEC_300
) &&
4521 ((host
->flags
& SDHCI_USE_ADMA
) ||
4522 !(host
->flags
& SDHCI_USE_SDMA
) || host
->v4_mode
) &&
4523 !(host
->quirks2
& SDHCI_QUIRK2_ACMD23_BROKEN
)) {
4524 host
->flags
|= SDHCI_AUTO_CMD23
;
4525 DBG("Auto-CMD23 available\n");
4527 DBG("Auto-CMD23 unavailable\n");
4531 * A controller may support 8-bit width, but the board itself
4532 * might not have the pins brought out. Boards that support
4533 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in
4534 * their platform code before calling sdhci_add_host(), and we
4535 * won't assume 8-bit width for hosts without that CAP.
4537 if (!(host
->quirks
& SDHCI_QUIRK_FORCE_1_BIT_DATA
))
4538 mmc
->caps
|= MMC_CAP_4_BIT_DATA
;
4540 if (host
->quirks2
& SDHCI_QUIRK2_HOST_NO_CMD23
)
4541 mmc
->caps
&= ~MMC_CAP_CMD23
;
4543 if (host
->caps
& SDHCI_CAN_DO_HISPD
)
4544 mmc
->caps
|= MMC_CAP_SD_HIGHSPEED
| MMC_CAP_MMC_HIGHSPEED
;
4546 if ((host
->quirks
& SDHCI_QUIRK_BROKEN_CARD_DETECTION
) &&
4547 mmc_card_is_removable(mmc
) &&
4548 mmc_gpio_get_cd(mmc
) < 0)
4549 mmc
->caps
|= MMC_CAP_NEEDS_POLL
;
4551 if (!IS_ERR(mmc
->supply
.vqmmc
)) {
4553 ret
= regulator_enable(mmc
->supply
.vqmmc
);
4554 host
->sdhci_core_to_disable_vqmmc
= !ret
;
4557 /* If vqmmc provides no 1.8V signalling, then there's no UHS */
4558 if (!regulator_is_supported_voltage(mmc
->supply
.vqmmc
, 1700000,
4560 host
->caps1
&= ~(SDHCI_SUPPORT_SDR104
|
4561 SDHCI_SUPPORT_SDR50
|
4562 SDHCI_SUPPORT_DDR50
);
4564 /* In eMMC case vqmmc might be a fixed 1.8V regulator */
4565 if (!regulator_is_supported_voltage(mmc
->supply
.vqmmc
, 2700000,
4567 host
->flags
&= ~SDHCI_SIGNALING_330
;
4570 pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
4571 mmc_hostname(mmc
), ret
);
4572 mmc
->supply
.vqmmc
= ERR_PTR(-EINVAL
);
4577 if (host
->quirks2
& SDHCI_QUIRK2_NO_1_8_V
) {
4578 host
->caps1
&= ~(SDHCI_SUPPORT_SDR104
| SDHCI_SUPPORT_SDR50
|
4579 SDHCI_SUPPORT_DDR50
);
4581 * The SDHCI controller in a SoC might support HS200/HS400
4582 * (indicated using mmc-hs200-1_8v/mmc-hs400-1_8v dt property),
4583 * but if the board is modeled such that the IO lines are not
4584 * connected to 1.8v then HS200/HS400 cannot be supported.
4585 * Disable HS200/HS400 if the board does not have 1.8v connected
4586 * to the IO lines. (Applicable for other modes in 1.8v)
4588 mmc
->caps2
&= ~(MMC_CAP2_HSX00_1_8V
| MMC_CAP2_HS400_ES
);
4589 mmc
->caps
&= ~(MMC_CAP_1_8V_DDR
| MMC_CAP_UHS
);
4592 /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
4593 if (host
->caps1
& (SDHCI_SUPPORT_SDR104
| SDHCI_SUPPORT_SDR50
|
4594 SDHCI_SUPPORT_DDR50
))
4595 mmc
->caps
|= MMC_CAP_UHS_SDR12
| MMC_CAP_UHS_SDR25
;
4597 /* SDR104 supports also implies SDR50 support */
4598 if (host
->caps1
& SDHCI_SUPPORT_SDR104
) {
4599 mmc
->caps
|= MMC_CAP_UHS_SDR104
| MMC_CAP_UHS_SDR50
;
4600 /* SD3.0: SDR104 is supported so (for eMMC) the caps2
4601 * field can be promoted to support HS200.
4603 if (!(host
->quirks2
& SDHCI_QUIRK2_BROKEN_HS200
))
4604 mmc
->caps2
|= MMC_CAP2_HS200
;
4605 } else if (host
->caps1
& SDHCI_SUPPORT_SDR50
) {
4606 mmc
->caps
|= MMC_CAP_UHS_SDR50
;
4609 if (host
->quirks2
& SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400
&&
4610 (host
->caps1
& SDHCI_SUPPORT_HS400
))
4611 mmc
->caps2
|= MMC_CAP2_HS400
;
4613 if ((mmc
->caps2
& MMC_CAP2_HSX00_1_2V
) &&
4614 (IS_ERR(mmc
->supply
.vqmmc
) ||
4615 !regulator_is_supported_voltage(mmc
->supply
.vqmmc
, 1100000,
4617 mmc
->caps2
&= ~MMC_CAP2_HSX00_1_2V
;
4619 if ((host
->caps1
& SDHCI_SUPPORT_DDR50
) &&
4620 !(host
->quirks2
& SDHCI_QUIRK2_BROKEN_DDR50
))
4621 mmc
->caps
|= MMC_CAP_UHS_DDR50
;
4623 /* Does the host need tuning for SDR50? */
4624 if (host
->caps1
& SDHCI_USE_SDR50_TUNING
)
4625 host
->flags
|= SDHCI_SDR50_NEEDS_TUNING
;
4627 /* Driver Type(s) (A, C, D) supported by the host */
4628 if (host
->caps1
& SDHCI_DRIVER_TYPE_A
)
4629 mmc
->caps
|= MMC_CAP_DRIVER_TYPE_A
;
4630 if (host
->caps1
& SDHCI_DRIVER_TYPE_C
)
4631 mmc
->caps
|= MMC_CAP_DRIVER_TYPE_C
;
4632 if (host
->caps1
& SDHCI_DRIVER_TYPE_D
)
4633 mmc
->caps
|= MMC_CAP_DRIVER_TYPE_D
;
4635 /* Initial value for re-tuning timer count */
4636 host
->tuning_count
= FIELD_GET(SDHCI_RETUNING_TIMER_COUNT_MASK
,
4640 * In case Re-tuning Timer is not disabled, the actual value of
4641 * re-tuning timer will be 2 ^ (n - 1).
4643 if (host
->tuning_count
)
4644 host
->tuning_count
= 1 << (host
->tuning_count
- 1);
4646 /* Re-tuning mode supported by the Host Controller */
4647 host
->tuning_mode
= FIELD_GET(SDHCI_RETUNING_MODE_MASK
, host
->caps1
);
4652 * According to SD Host Controller spec v3.00, if the Host System
4653 * can afford more than 150mA, Host Driver should set XPC to 1. Also
4654 * the value is meaningful only if Voltage Support in the Capabilities
4655 * register is set. The actual current value is 4 times the register
4658 max_current_caps
= sdhci_readl(host
, SDHCI_MAX_CURRENT
);
4659 if (!max_current_caps
&& !IS_ERR(mmc
->supply
.vmmc
)) {
4660 int curr
= regulator_get_current_limit(mmc
->supply
.vmmc
);
4663 /* convert to SDHCI_MAX_CURRENT format */
4664 curr
= curr
/1000; /* convert to mA */
4665 curr
= curr
/SDHCI_MAX_CURRENT_MULTIPLIER
;
4667 curr
= min_t(u32
, curr
, SDHCI_MAX_CURRENT_LIMIT
);
4669 FIELD_PREP(SDHCI_MAX_CURRENT_330_MASK
, curr
) |
4670 FIELD_PREP(SDHCI_MAX_CURRENT_300_MASK
, curr
) |
4671 FIELD_PREP(SDHCI_MAX_CURRENT_180_MASK
, curr
);
4675 if (host
->caps
& SDHCI_CAN_VDD_330
) {
4676 ocr_avail
|= MMC_VDD_32_33
| MMC_VDD_33_34
;
4678 mmc
->max_current_330
= FIELD_GET(SDHCI_MAX_CURRENT_330_MASK
,
4680 SDHCI_MAX_CURRENT_MULTIPLIER
;
4682 if (host
->caps
& SDHCI_CAN_VDD_300
) {
4683 ocr_avail
|= MMC_VDD_29_30
| MMC_VDD_30_31
;
4685 mmc
->max_current_300
= FIELD_GET(SDHCI_MAX_CURRENT_300_MASK
,
4687 SDHCI_MAX_CURRENT_MULTIPLIER
;
4689 if (host
->caps
& SDHCI_CAN_VDD_180
) {
4690 ocr_avail
|= MMC_VDD_165_195
;
4692 mmc
->max_current_180
= FIELD_GET(SDHCI_MAX_CURRENT_180_MASK
,
4694 SDHCI_MAX_CURRENT_MULTIPLIER
;
4697 /* If OCR set by host, use it instead. */
4699 ocr_avail
= host
->ocr_mask
;
4701 /* If OCR set by external regulators, give it highest prio. */
4703 ocr_avail
= mmc
->ocr_avail
;
4705 mmc
->ocr_avail
= ocr_avail
;
4706 mmc
->ocr_avail_sdio
= ocr_avail
;
4707 if (host
->ocr_avail_sdio
)
4708 mmc
->ocr_avail_sdio
&= host
->ocr_avail_sdio
;
4709 mmc
->ocr_avail_sd
= ocr_avail
;
4710 if (host
->ocr_avail_sd
)
4711 mmc
->ocr_avail_sd
&= host
->ocr_avail_sd
;
4712 else /* normal SD controllers don't support 1.8V */
4713 mmc
->ocr_avail_sd
&= ~MMC_VDD_165_195
;
4714 mmc
->ocr_avail_mmc
= ocr_avail
;
4715 if (host
->ocr_avail_mmc
)
4716 mmc
->ocr_avail_mmc
&= host
->ocr_avail_mmc
;
4718 if (mmc
->ocr_avail
== 0) {
4719 pr_err("%s: Hardware doesn't report any support voltages.\n",
4725 if ((mmc
->caps
& (MMC_CAP_UHS_SDR12
| MMC_CAP_UHS_SDR25
|
4726 MMC_CAP_UHS_SDR50
| MMC_CAP_UHS_SDR104
|
4727 MMC_CAP_UHS_DDR50
| MMC_CAP_1_8V_DDR
)) ||
4728 (mmc
->caps2
& (MMC_CAP2_HS200_1_8V_SDR
| MMC_CAP2_HS400_1_8V
)))
4729 host
->flags
|= SDHCI_SIGNALING_180
;
4731 if (mmc
->caps2
& MMC_CAP2_HSX00_1_2V
)
4732 host
->flags
|= SDHCI_SIGNALING_120
;
4734 spin_lock_init(&host
->lock
);
4737 * Maximum number of sectors in one transfer. Limited by SDMA boundary
4738 * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
4741 mmc
->max_req_size
= 524288;
4744 * Maximum number of segments. Depends on if the hardware
4745 * can do scatter/gather or not.
4747 if (host
->flags
& SDHCI_USE_ADMA
) {
4748 mmc
->max_segs
= SDHCI_MAX_SEGS
;
4749 } else if (host
->flags
& SDHCI_USE_SDMA
) {
4751 mmc
->max_req_size
= min_t(size_t, mmc
->max_req_size
,
4752 dma_max_mapping_size(mmc_dev(mmc
)));
4754 mmc
->max_segs
= SDHCI_MAX_SEGS
;
4758 * Maximum segment size. Could be one segment with the maximum number
4759 * of bytes. When doing hardware scatter/gather, each entry cannot
4760 * be larger than 64 KiB though.
4762 if (host
->flags
& SDHCI_USE_ADMA
) {
4763 if (host
->quirks
& SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC
) {
4764 host
->max_adma
= 65532; /* 32-bit alignment */
4765 mmc
->max_seg_size
= 65535;
4767 * sdhci_adma_table_pre() expects to define 1 DMA
4768 * descriptor per segment, so the maximum segment size
4769 * is set accordingly. SDHCI allows up to 64KiB per DMA
4770 * descriptor (16-bit field), but some controllers do
4771 * not support "zero means 65536" reducing the maximum
4772 * for them to 65535. That is a problem if PAGE_SIZE is
4773 * 64KiB because the block layer does not support
4774 * max_seg_size < PAGE_SIZE, however
4775 * sdhci_adma_table_pre() has a workaround to handle
4776 * that case, and split the descriptor. Refer also
4777 * comment in sdhci_adma_table_pre().
4779 if (mmc
->max_seg_size
< PAGE_SIZE
)
4780 mmc
->max_seg_size
= PAGE_SIZE
;
4782 mmc
->max_seg_size
= 65536;
4785 mmc
->max_seg_size
= mmc
->max_req_size
;
4789 * Maximum block size. This varies from controller to controller and
4790 * is specified in the capabilities register.
4792 if (host
->quirks
& SDHCI_QUIRK_FORCE_BLK_SZ_2048
) {
4793 mmc
->max_blk_size
= 2;
4795 mmc
->max_blk_size
= (host
->caps
& SDHCI_MAX_BLOCK_MASK
) >>
4796 SDHCI_MAX_BLOCK_SHIFT
;
4797 if (mmc
->max_blk_size
>= 3) {
4798 pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n",
4800 mmc
->max_blk_size
= 0;
4804 mmc
->max_blk_size
= 512 << mmc
->max_blk_size
;
4807 * Maximum block count.
4809 mmc
->max_blk_count
= (host
->quirks
& SDHCI_QUIRK_NO_MULTIBLOCK
) ? 1 : 65535;
4811 if (mmc
->max_segs
== 1)
4812 /* This may alter mmc->*_blk_* parameters */
4813 sdhci_allocate_bounce_buffer(host
);
4818 if (host
->sdhci_core_to_disable_vqmmc
)
4819 regulator_disable(mmc
->supply
.vqmmc
);
4821 if (host
->align_buffer
)
4822 dma_free_coherent(mmc_dev(mmc
), host
->align_buffer_sz
+
4823 host
->adma_table_sz
, host
->align_buffer
,
4825 host
->adma_table
= NULL
;
4826 host
->align_buffer
= NULL
;
4830 EXPORT_SYMBOL_GPL(sdhci_setup_host
);
4832 void sdhci_cleanup_host(struct sdhci_host
*host
)
4834 struct mmc_host
*mmc
= host
->mmc
;
4836 if (host
->sdhci_core_to_disable_vqmmc
)
4837 regulator_disable(mmc
->supply
.vqmmc
);
4839 if (host
->align_buffer
)
4840 dma_free_coherent(mmc_dev(mmc
), host
->align_buffer_sz
+
4841 host
->adma_table_sz
, host
->align_buffer
,
4844 if (host
->use_external_dma
)
4845 sdhci_external_dma_release(host
);
4847 host
->adma_table
= NULL
;
4848 host
->align_buffer
= NULL
;
4850 EXPORT_SYMBOL_GPL(sdhci_cleanup_host
);
4852 int __sdhci_add_host(struct sdhci_host
*host
)
4854 unsigned int flags
= WQ_UNBOUND
| WQ_MEM_RECLAIM
| WQ_HIGHPRI
;
4855 struct mmc_host
*mmc
= host
->mmc
;
4858 if ((mmc
->caps2
& MMC_CAP2_CQE
) &&
4859 (host
->quirks
& SDHCI_QUIRK_BROKEN_CQE
)) {
4860 mmc
->caps2
&= ~MMC_CAP2_CQE
;
4861 mmc
->cqe_ops
= NULL
;
4864 host
->complete_wq
= alloc_workqueue("sdhci", flags
, 0);
4865 if (!host
->complete_wq
)
4868 INIT_WORK(&host
->complete_work
, host
->complete_work_fn
);
4870 timer_setup(&host
->timer
, sdhci_timeout_timer
, 0);
4871 timer_setup(&host
->data_timer
, sdhci_timeout_data_timer
, 0);
4873 init_waitqueue_head(&host
->buf_ready_int
);
4875 sdhci_init(host
, 0);
4877 ret
= request_threaded_irq(host
->irq
, sdhci_irq
, host
->thread_irq_fn
,
4878 IRQF_SHARED
, mmc_hostname(mmc
), host
);
4880 pr_err("%s: Failed to request IRQ %d: %d\n",
4881 mmc_hostname(mmc
), host
->irq
, ret
);
4885 ret
= sdhci_led_register(host
);
4887 pr_err("%s: Failed to register LED device: %d\n",
4888 mmc_hostname(mmc
), ret
);
4892 ret
= mmc_add_host(mmc
);
4896 pr_info("%s: SDHCI controller on %s [%s] using %s\n",
4897 mmc_hostname(mmc
), host
->hw_name
, dev_name(mmc_dev(mmc
)),
4898 host
->use_external_dma
? "External DMA" :
4899 (host
->flags
& SDHCI_USE_ADMA
) ?
4900 (host
->flags
& SDHCI_USE_64_BIT_DMA
) ? "ADMA 64-bit" : "ADMA" :
4901 (host
->flags
& SDHCI_USE_SDMA
) ? "DMA" : "PIO");
4903 sdhci_enable_card_detection(host
);
4908 sdhci_led_unregister(host
);
4910 sdhci_reset_for_all(host
);
4911 sdhci_writel(host
, 0, SDHCI_INT_ENABLE
);
4912 sdhci_writel(host
, 0, SDHCI_SIGNAL_ENABLE
);
4913 free_irq(host
->irq
, host
);
4915 destroy_workqueue(host
->complete_wq
);
4919 EXPORT_SYMBOL_GPL(__sdhci_add_host
);
4921 int sdhci_add_host(struct sdhci_host
*host
)
4925 ret
= sdhci_setup_host(host
);
4929 ret
= __sdhci_add_host(host
);
4936 sdhci_cleanup_host(host
);
4940 EXPORT_SYMBOL_GPL(sdhci_add_host
);
4942 void sdhci_remove_host(struct sdhci_host
*host
, int dead
)
4944 struct mmc_host
*mmc
= host
->mmc
;
4945 unsigned long flags
;
4948 spin_lock_irqsave(&host
->lock
, flags
);
4950 host
->flags
|= SDHCI_DEVICE_DEAD
;
4952 if (sdhci_has_requests(host
)) {
4953 pr_err("%s: Controller removed during "
4954 " transfer!\n", mmc_hostname(mmc
));
4955 sdhci_error_out_mrqs(host
, -ENOMEDIUM
);
4958 spin_unlock_irqrestore(&host
->lock
, flags
);
4961 sdhci_disable_card_detection(host
);
4963 mmc_remove_host(mmc
);
4965 sdhci_led_unregister(host
);
4968 sdhci_reset_for_all(host
);
4970 sdhci_writel(host
, 0, SDHCI_INT_ENABLE
);
4971 sdhci_writel(host
, 0, SDHCI_SIGNAL_ENABLE
);
4972 free_irq(host
->irq
, host
);
4974 del_timer_sync(&host
->timer
);
4975 del_timer_sync(&host
->data_timer
);
4977 destroy_workqueue(host
->complete_wq
);
4979 if (host
->sdhci_core_to_disable_vqmmc
)
4980 regulator_disable(mmc
->supply
.vqmmc
);
4982 if (host
->align_buffer
)
4983 dma_free_coherent(mmc_dev(mmc
), host
->align_buffer_sz
+
4984 host
->adma_table_sz
, host
->align_buffer
,
4987 if (host
->use_external_dma
)
4988 sdhci_external_dma_release(host
);
4990 host
->adma_table
= NULL
;
4991 host
->align_buffer
= NULL
;
4994 EXPORT_SYMBOL_GPL(sdhci_remove_host
);
4996 void sdhci_free_host(struct sdhci_host
*host
)
4998 mmc_free_host(host
->mmc
);
5001 EXPORT_SYMBOL_GPL(sdhci_free_host
);
5003 /*****************************************************************************\
5005 * Driver init/exit *
5007 \*****************************************************************************/
5009 static int __init
sdhci_drv_init(void)
5012 ": Secure Digital Host Controller Interface driver\n");
5013 pr_info(DRIVER_NAME
": Copyright(c) Pierre Ossman\n");
5018 static void __exit
sdhci_drv_exit(void)
5022 module_init(sdhci_drv_init
);
5023 module_exit(sdhci_drv_exit
);
5025 module_param(debug_quirks
, uint
, 0444);
5026 module_param(debug_quirks2
, uint
, 0444);
5028 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
5029 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
5030 MODULE_LICENSE("GPL");
5032 MODULE_PARM_DESC(debug_quirks
, "Force certain quirks.");
5033 MODULE_PARM_DESC(debug_quirks2
, "Force certain other quirks.");