treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / mmc / host / sdhci.c
blob63db84481dff2fba90fe021c311334ca994a9d81
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
5 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
7 * Thanks to the following companies for their support:
9 * - JMicron (hardware and technical support)
12 #include <linux/delay.h>
13 #include <linux/dmaengine.h>
14 #include <linux/ktime.h>
15 #include <linux/highmem.h>
16 #include <linux/io.h>
17 #include <linux/module.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/slab.h>
20 #include <linux/scatterlist.h>
21 #include <linux/sizes.h>
22 #include <linux/swiotlb.h>
23 #include <linux/regulator/consumer.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/of.h>
27 #include <linux/leds.h>
29 #include <linux/mmc/mmc.h>
30 #include <linux/mmc/host.h>
31 #include <linux/mmc/card.h>
32 #include <linux/mmc/sdio.h>
33 #include <linux/mmc/slot-gpio.h>
35 #include "sdhci.h"
37 #define DRIVER_NAME "sdhci"
39 #define DBG(f, x...) \
40 pr_debug("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
42 #define SDHCI_DUMP(f, x...) \
43 pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
45 #define MAX_TUNING_LOOP 40
47 static unsigned int debug_quirks = 0;
48 static unsigned int debug_quirks2;
50 static void sdhci_finish_data(struct sdhci_host *);
52 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
54 void sdhci_dumpregs(struct sdhci_host *host)
56 SDHCI_DUMP("============ SDHCI REGISTER DUMP ===========\n");
58 SDHCI_DUMP("Sys addr: 0x%08x | Version: 0x%08x\n",
59 sdhci_readl(host, SDHCI_DMA_ADDRESS),
60 sdhci_readw(host, SDHCI_HOST_VERSION));
61 SDHCI_DUMP("Blk size: 0x%08x | Blk cnt: 0x%08x\n",
62 sdhci_readw(host, SDHCI_BLOCK_SIZE),
63 sdhci_readw(host, SDHCI_BLOCK_COUNT));
64 SDHCI_DUMP("Argument: 0x%08x | Trn mode: 0x%08x\n",
65 sdhci_readl(host, SDHCI_ARGUMENT),
66 sdhci_readw(host, SDHCI_TRANSFER_MODE));
67 SDHCI_DUMP("Present: 0x%08x | Host ctl: 0x%08x\n",
68 sdhci_readl(host, SDHCI_PRESENT_STATE),
69 sdhci_readb(host, SDHCI_HOST_CONTROL));
70 SDHCI_DUMP("Power: 0x%08x | Blk gap: 0x%08x\n",
71 sdhci_readb(host, SDHCI_POWER_CONTROL),
72 sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
73 SDHCI_DUMP("Wake-up: 0x%08x | Clock: 0x%08x\n",
74 sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
75 sdhci_readw(host, SDHCI_CLOCK_CONTROL));
76 SDHCI_DUMP("Timeout: 0x%08x | Int stat: 0x%08x\n",
77 sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
78 sdhci_readl(host, SDHCI_INT_STATUS));
79 SDHCI_DUMP("Int enab: 0x%08x | Sig enab: 0x%08x\n",
80 sdhci_readl(host, SDHCI_INT_ENABLE),
81 sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
82 SDHCI_DUMP("ACmd stat: 0x%08x | Slot int: 0x%08x\n",
83 sdhci_readw(host, SDHCI_AUTO_CMD_STATUS),
84 sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
85 SDHCI_DUMP("Caps: 0x%08x | Caps_1: 0x%08x\n",
86 sdhci_readl(host, SDHCI_CAPABILITIES),
87 sdhci_readl(host, SDHCI_CAPABILITIES_1));
88 SDHCI_DUMP("Cmd: 0x%08x | Max curr: 0x%08x\n",
89 sdhci_readw(host, SDHCI_COMMAND),
90 sdhci_readl(host, SDHCI_MAX_CURRENT));
91 SDHCI_DUMP("Resp[0]: 0x%08x | Resp[1]: 0x%08x\n",
92 sdhci_readl(host, SDHCI_RESPONSE),
93 sdhci_readl(host, SDHCI_RESPONSE + 4));
94 SDHCI_DUMP("Resp[2]: 0x%08x | Resp[3]: 0x%08x\n",
95 sdhci_readl(host, SDHCI_RESPONSE + 8),
96 sdhci_readl(host, SDHCI_RESPONSE + 12));
97 SDHCI_DUMP("Host ctl2: 0x%08x\n",
98 sdhci_readw(host, SDHCI_HOST_CONTROL2));
100 if (host->flags & SDHCI_USE_ADMA) {
101 if (host->flags & SDHCI_USE_64_BIT_DMA) {
102 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n",
103 sdhci_readl(host, SDHCI_ADMA_ERROR),
104 sdhci_readl(host, SDHCI_ADMA_ADDRESS_HI),
105 sdhci_readl(host, SDHCI_ADMA_ADDRESS));
106 } else {
107 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
108 sdhci_readl(host, SDHCI_ADMA_ERROR),
109 sdhci_readl(host, SDHCI_ADMA_ADDRESS));
113 SDHCI_DUMP("============================================\n");
115 EXPORT_SYMBOL_GPL(sdhci_dumpregs);
117 /*****************************************************************************\
119 * Low level functions *
121 \*****************************************************************************/
123 static void sdhci_do_enable_v4_mode(struct sdhci_host *host)
125 u16 ctrl2;
127 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
128 if (ctrl2 & SDHCI_CTRL_V4_MODE)
129 return;
131 ctrl2 |= SDHCI_CTRL_V4_MODE;
132 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
136 * This can be called before sdhci_add_host() by Vendor's host controller
137 * driver to enable v4 mode if supported.
139 void sdhci_enable_v4_mode(struct sdhci_host *host)
141 host->v4_mode = true;
142 sdhci_do_enable_v4_mode(host);
144 EXPORT_SYMBOL_GPL(sdhci_enable_v4_mode);
146 static inline bool sdhci_data_line_cmd(struct mmc_command *cmd)
148 return cmd->data || cmd->flags & MMC_RSP_BUSY;
151 static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
153 u32 present;
155 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
156 !mmc_card_is_removable(host->mmc))
157 return;
159 if (enable) {
160 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
161 SDHCI_CARD_PRESENT;
163 host->ier |= present ? SDHCI_INT_CARD_REMOVE :
164 SDHCI_INT_CARD_INSERT;
165 } else {
166 host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
169 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
170 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
173 static void sdhci_enable_card_detection(struct sdhci_host *host)
175 sdhci_set_card_detection(host, true);
178 static void sdhci_disable_card_detection(struct sdhci_host *host)
180 sdhci_set_card_detection(host, false);
183 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
185 if (host->bus_on)
186 return;
187 host->bus_on = true;
188 pm_runtime_get_noresume(host->mmc->parent);
191 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
193 if (!host->bus_on)
194 return;
195 host->bus_on = false;
196 pm_runtime_put_noidle(host->mmc->parent);
199 void sdhci_reset(struct sdhci_host *host, u8 mask)
201 ktime_t timeout;
203 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
205 if (mask & SDHCI_RESET_ALL) {
206 host->clock = 0;
207 /* Reset-all turns off SD Bus Power */
208 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
209 sdhci_runtime_pm_bus_off(host);
212 /* Wait max 100 ms */
213 timeout = ktime_add_ms(ktime_get(), 100);
215 /* hw clears the bit when it's done */
216 while (1) {
217 bool timedout = ktime_after(ktime_get(), timeout);
219 if (!(sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask))
220 break;
221 if (timedout) {
222 pr_err("%s: Reset 0x%x never completed.\n",
223 mmc_hostname(host->mmc), (int)mask);
224 sdhci_dumpregs(host);
225 return;
227 udelay(10);
230 EXPORT_SYMBOL_GPL(sdhci_reset);
232 static void sdhci_do_reset(struct sdhci_host *host, u8 mask)
234 if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
235 struct mmc_host *mmc = host->mmc;
237 if (!mmc->ops->get_cd(mmc))
238 return;
241 host->ops->reset(host, mask);
243 if (mask & SDHCI_RESET_ALL) {
244 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
245 if (host->ops->enable_dma)
246 host->ops->enable_dma(host);
249 /* Resetting the controller clears many */
250 host->preset_enabled = false;
254 static void sdhci_set_default_irqs(struct sdhci_host *host)
256 host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
257 SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
258 SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC |
259 SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END |
260 SDHCI_INT_RESPONSE;
262 if (host->tuning_mode == SDHCI_TUNING_MODE_2 ||
263 host->tuning_mode == SDHCI_TUNING_MODE_3)
264 host->ier |= SDHCI_INT_RETUNE;
266 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
267 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
270 static void sdhci_config_dma(struct sdhci_host *host)
272 u8 ctrl;
273 u16 ctrl2;
275 if (host->version < SDHCI_SPEC_200)
276 return;
278 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
281 * Always adjust the DMA selection as some controllers
282 * (e.g. JMicron) can't do PIO properly when the selection
283 * is ADMA.
285 ctrl &= ~SDHCI_CTRL_DMA_MASK;
286 if (!(host->flags & SDHCI_REQ_USE_DMA))
287 goto out;
289 /* Note if DMA Select is zero then SDMA is selected */
290 if (host->flags & SDHCI_USE_ADMA)
291 ctrl |= SDHCI_CTRL_ADMA32;
293 if (host->flags & SDHCI_USE_64_BIT_DMA) {
295 * If v4 mode, all supported DMA can be 64-bit addressing if
296 * controller supports 64-bit system address, otherwise only
297 * ADMA can support 64-bit addressing.
299 if (host->v4_mode) {
300 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
301 ctrl2 |= SDHCI_CTRL_64BIT_ADDR;
302 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
303 } else if (host->flags & SDHCI_USE_ADMA) {
305 * Don't need to undo SDHCI_CTRL_ADMA32 in order to
306 * set SDHCI_CTRL_ADMA64.
308 ctrl |= SDHCI_CTRL_ADMA64;
312 out:
313 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
316 static void sdhci_init(struct sdhci_host *host, int soft)
318 struct mmc_host *mmc = host->mmc;
320 if (soft)
321 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
322 else
323 sdhci_do_reset(host, SDHCI_RESET_ALL);
325 if (host->v4_mode)
326 sdhci_do_enable_v4_mode(host);
328 sdhci_set_default_irqs(host);
330 host->cqe_on = false;
332 if (soft) {
333 /* force clock reconfiguration */
334 host->clock = 0;
335 mmc->ops->set_ios(mmc, &mmc->ios);
339 static void sdhci_reinit(struct sdhci_host *host)
341 u32 cd = host->ier & (SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
343 sdhci_init(host, 0);
344 sdhci_enable_card_detection(host);
347 * A change to the card detect bits indicates a change in present state,
348 * refer sdhci_set_card_detection(). A card detect interrupt might have
349 * been missed while the host controller was being reset, so trigger a
350 * rescan to check.
352 if (cd != (host->ier & (SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT)))
353 mmc_detect_change(host->mmc, msecs_to_jiffies(200));
356 static void __sdhci_led_activate(struct sdhci_host *host)
358 u8 ctrl;
360 if (host->quirks & SDHCI_QUIRK_NO_LED)
361 return;
363 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
364 ctrl |= SDHCI_CTRL_LED;
365 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
368 static void __sdhci_led_deactivate(struct sdhci_host *host)
370 u8 ctrl;
372 if (host->quirks & SDHCI_QUIRK_NO_LED)
373 return;
375 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
376 ctrl &= ~SDHCI_CTRL_LED;
377 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
380 #if IS_REACHABLE(CONFIG_LEDS_CLASS)
381 static void sdhci_led_control(struct led_classdev *led,
382 enum led_brightness brightness)
384 struct sdhci_host *host = container_of(led, struct sdhci_host, led);
385 unsigned long flags;
387 spin_lock_irqsave(&host->lock, flags);
389 if (host->runtime_suspended)
390 goto out;
392 if (brightness == LED_OFF)
393 __sdhci_led_deactivate(host);
394 else
395 __sdhci_led_activate(host);
396 out:
397 spin_unlock_irqrestore(&host->lock, flags);
400 static int sdhci_led_register(struct sdhci_host *host)
402 struct mmc_host *mmc = host->mmc;
404 if (host->quirks & SDHCI_QUIRK_NO_LED)
405 return 0;
407 snprintf(host->led_name, sizeof(host->led_name),
408 "%s::", mmc_hostname(mmc));
410 host->led.name = host->led_name;
411 host->led.brightness = LED_OFF;
412 host->led.default_trigger = mmc_hostname(mmc);
413 host->led.brightness_set = sdhci_led_control;
415 return led_classdev_register(mmc_dev(mmc), &host->led);
418 static void sdhci_led_unregister(struct sdhci_host *host)
420 if (host->quirks & SDHCI_QUIRK_NO_LED)
421 return;
423 led_classdev_unregister(&host->led);
426 static inline void sdhci_led_activate(struct sdhci_host *host)
430 static inline void sdhci_led_deactivate(struct sdhci_host *host)
434 #else
436 static inline int sdhci_led_register(struct sdhci_host *host)
438 return 0;
441 static inline void sdhci_led_unregister(struct sdhci_host *host)
445 static inline void sdhci_led_activate(struct sdhci_host *host)
447 __sdhci_led_activate(host);
450 static inline void sdhci_led_deactivate(struct sdhci_host *host)
452 __sdhci_led_deactivate(host);
455 #endif
457 static void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq,
458 unsigned long timeout)
460 if (sdhci_data_line_cmd(mrq->cmd))
461 mod_timer(&host->data_timer, timeout);
462 else
463 mod_timer(&host->timer, timeout);
466 static void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq)
468 if (sdhci_data_line_cmd(mrq->cmd))
469 del_timer(&host->data_timer);
470 else
471 del_timer(&host->timer);
474 static inline bool sdhci_has_requests(struct sdhci_host *host)
476 return host->cmd || host->data_cmd;
479 /*****************************************************************************\
481 * Core functions *
483 \*****************************************************************************/
485 static void sdhci_read_block_pio(struct sdhci_host *host)
487 unsigned long flags;
488 size_t blksize, len, chunk;
489 u32 uninitialized_var(scratch);
490 u8 *buf;
492 DBG("PIO reading\n");
494 blksize = host->data->blksz;
495 chunk = 0;
497 local_irq_save(flags);
499 while (blksize) {
500 BUG_ON(!sg_miter_next(&host->sg_miter));
502 len = min(host->sg_miter.length, blksize);
504 blksize -= len;
505 host->sg_miter.consumed = len;
507 buf = host->sg_miter.addr;
509 while (len) {
510 if (chunk == 0) {
511 scratch = sdhci_readl(host, SDHCI_BUFFER);
512 chunk = 4;
515 *buf = scratch & 0xFF;
517 buf++;
518 scratch >>= 8;
519 chunk--;
520 len--;
524 sg_miter_stop(&host->sg_miter);
526 local_irq_restore(flags);
529 static void sdhci_write_block_pio(struct sdhci_host *host)
531 unsigned long flags;
532 size_t blksize, len, chunk;
533 u32 scratch;
534 u8 *buf;
536 DBG("PIO writing\n");
538 blksize = host->data->blksz;
539 chunk = 0;
540 scratch = 0;
542 local_irq_save(flags);
544 while (blksize) {
545 BUG_ON(!sg_miter_next(&host->sg_miter));
547 len = min(host->sg_miter.length, blksize);
549 blksize -= len;
550 host->sg_miter.consumed = len;
552 buf = host->sg_miter.addr;
554 while (len) {
555 scratch |= (u32)*buf << (chunk * 8);
557 buf++;
558 chunk++;
559 len--;
561 if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
562 sdhci_writel(host, scratch, SDHCI_BUFFER);
563 chunk = 0;
564 scratch = 0;
569 sg_miter_stop(&host->sg_miter);
571 local_irq_restore(flags);
574 static void sdhci_transfer_pio(struct sdhci_host *host)
576 u32 mask;
578 if (host->blocks == 0)
579 return;
581 if (host->data->flags & MMC_DATA_READ)
582 mask = SDHCI_DATA_AVAILABLE;
583 else
584 mask = SDHCI_SPACE_AVAILABLE;
587 * Some controllers (JMicron JMB38x) mess up the buffer bits
588 * for transfers < 4 bytes. As long as it is just one block,
589 * we can ignore the bits.
591 if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
592 (host->data->blocks == 1))
593 mask = ~0;
595 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
596 if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
597 udelay(100);
599 if (host->data->flags & MMC_DATA_READ)
600 sdhci_read_block_pio(host);
601 else
602 sdhci_write_block_pio(host);
604 host->blocks--;
605 if (host->blocks == 0)
606 break;
609 DBG("PIO transfer complete.\n");
612 static int sdhci_pre_dma_transfer(struct sdhci_host *host,
613 struct mmc_data *data, int cookie)
615 int sg_count;
618 * If the data buffers are already mapped, return the previous
619 * dma_map_sg() result.
621 if (data->host_cookie == COOKIE_PRE_MAPPED)
622 return data->sg_count;
624 /* Bounce write requests to the bounce buffer */
625 if (host->bounce_buffer) {
626 unsigned int length = data->blksz * data->blocks;
628 if (length > host->bounce_buffer_size) {
629 pr_err("%s: asked for transfer of %u bytes exceeds bounce buffer %u bytes\n",
630 mmc_hostname(host->mmc), length,
631 host->bounce_buffer_size);
632 return -EIO;
634 if (mmc_get_dma_dir(data) == DMA_TO_DEVICE) {
635 /* Copy the data to the bounce buffer */
636 sg_copy_to_buffer(data->sg, data->sg_len,
637 host->bounce_buffer,
638 length);
640 /* Switch ownership to the DMA */
641 dma_sync_single_for_device(host->mmc->parent,
642 host->bounce_addr,
643 host->bounce_buffer_size,
644 mmc_get_dma_dir(data));
645 /* Just a dummy value */
646 sg_count = 1;
647 } else {
648 /* Just access the data directly from memory */
649 sg_count = dma_map_sg(mmc_dev(host->mmc),
650 data->sg, data->sg_len,
651 mmc_get_dma_dir(data));
654 if (sg_count == 0)
655 return -ENOSPC;
657 data->sg_count = sg_count;
658 data->host_cookie = cookie;
660 return sg_count;
663 static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
665 local_irq_save(*flags);
666 return kmap_atomic(sg_page(sg)) + sg->offset;
669 static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
671 kunmap_atomic(buffer);
672 local_irq_restore(*flags);
675 void sdhci_adma_write_desc(struct sdhci_host *host, void **desc,
676 dma_addr_t addr, int len, unsigned int cmd)
678 struct sdhci_adma2_64_desc *dma_desc = *desc;
680 /* 32-bit and 64-bit descriptors have these members in same position */
681 dma_desc->cmd = cpu_to_le16(cmd);
682 dma_desc->len = cpu_to_le16(len);
683 dma_desc->addr_lo = cpu_to_le32(lower_32_bits(addr));
685 if (host->flags & SDHCI_USE_64_BIT_DMA)
686 dma_desc->addr_hi = cpu_to_le32(upper_32_bits(addr));
688 *desc += host->desc_sz;
690 EXPORT_SYMBOL_GPL(sdhci_adma_write_desc);
692 static inline void __sdhci_adma_write_desc(struct sdhci_host *host,
693 void **desc, dma_addr_t addr,
694 int len, unsigned int cmd)
696 if (host->ops->adma_write_desc)
697 host->ops->adma_write_desc(host, desc, addr, len, cmd);
698 else
699 sdhci_adma_write_desc(host, desc, addr, len, cmd);
702 static void sdhci_adma_mark_end(void *desc)
704 struct sdhci_adma2_64_desc *dma_desc = desc;
706 /* 32-bit and 64-bit descriptors have 'cmd' in same position */
707 dma_desc->cmd |= cpu_to_le16(ADMA2_END);
710 static void sdhci_adma_table_pre(struct sdhci_host *host,
711 struct mmc_data *data, int sg_count)
713 struct scatterlist *sg;
714 unsigned long flags;
715 dma_addr_t addr, align_addr;
716 void *desc, *align;
717 char *buffer;
718 int len, offset, i;
721 * The spec does not specify endianness of descriptor table.
722 * We currently guess that it is LE.
725 host->sg_count = sg_count;
727 desc = host->adma_table;
728 align = host->align_buffer;
730 align_addr = host->align_addr;
732 for_each_sg(data->sg, sg, host->sg_count, i) {
733 addr = sg_dma_address(sg);
734 len = sg_dma_len(sg);
737 * The SDHCI specification states that ADMA addresses must
738 * be 32-bit aligned. If they aren't, then we use a bounce
739 * buffer for the (up to three) bytes that screw up the
740 * alignment.
742 offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) &
743 SDHCI_ADMA2_MASK;
744 if (offset) {
745 if (data->flags & MMC_DATA_WRITE) {
746 buffer = sdhci_kmap_atomic(sg, &flags);
747 memcpy(align, buffer, offset);
748 sdhci_kunmap_atomic(buffer, &flags);
751 /* tran, valid */
752 __sdhci_adma_write_desc(host, &desc, align_addr,
753 offset, ADMA2_TRAN_VALID);
755 BUG_ON(offset > 65536);
757 align += SDHCI_ADMA2_ALIGN;
758 align_addr += SDHCI_ADMA2_ALIGN;
760 addr += offset;
761 len -= offset;
764 BUG_ON(len > 65536);
766 /* tran, valid */
767 if (len)
768 __sdhci_adma_write_desc(host, &desc, addr, len,
769 ADMA2_TRAN_VALID);
772 * If this triggers then we have a calculation bug
773 * somewhere. :/
775 WARN_ON((desc - host->adma_table) >= host->adma_table_sz);
778 if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
779 /* Mark the last descriptor as the terminating descriptor */
780 if (desc != host->adma_table) {
781 desc -= host->desc_sz;
782 sdhci_adma_mark_end(desc);
784 } else {
785 /* Add a terminating entry - nop, end, valid */
786 __sdhci_adma_write_desc(host, &desc, 0, 0, ADMA2_NOP_END_VALID);
790 static void sdhci_adma_table_post(struct sdhci_host *host,
791 struct mmc_data *data)
793 struct scatterlist *sg;
794 int i, size;
795 void *align;
796 char *buffer;
797 unsigned long flags;
799 if (data->flags & MMC_DATA_READ) {
800 bool has_unaligned = false;
802 /* Do a quick scan of the SG list for any unaligned mappings */
803 for_each_sg(data->sg, sg, host->sg_count, i)
804 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
805 has_unaligned = true;
806 break;
809 if (has_unaligned) {
810 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
811 data->sg_len, DMA_FROM_DEVICE);
813 align = host->align_buffer;
815 for_each_sg(data->sg, sg, host->sg_count, i) {
816 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
817 size = SDHCI_ADMA2_ALIGN -
818 (sg_dma_address(sg) & SDHCI_ADMA2_MASK);
820 buffer = sdhci_kmap_atomic(sg, &flags);
821 memcpy(buffer, align, size);
822 sdhci_kunmap_atomic(buffer, &flags);
824 align += SDHCI_ADMA2_ALIGN;
831 static void sdhci_set_adma_addr(struct sdhci_host *host, dma_addr_t addr)
833 sdhci_writel(host, lower_32_bits(addr), SDHCI_ADMA_ADDRESS);
834 if (host->flags & SDHCI_USE_64_BIT_DMA)
835 sdhci_writel(host, upper_32_bits(addr), SDHCI_ADMA_ADDRESS_HI);
838 static dma_addr_t sdhci_sdma_address(struct sdhci_host *host)
840 if (host->bounce_buffer)
841 return host->bounce_addr;
842 else
843 return sg_dma_address(host->data->sg);
846 static void sdhci_set_sdma_addr(struct sdhci_host *host, dma_addr_t addr)
848 if (host->v4_mode)
849 sdhci_set_adma_addr(host, addr);
850 else
851 sdhci_writel(host, addr, SDHCI_DMA_ADDRESS);
854 static unsigned int sdhci_target_timeout(struct sdhci_host *host,
855 struct mmc_command *cmd,
856 struct mmc_data *data)
858 unsigned int target_timeout;
860 /* timeout in us */
861 if (!data) {
862 target_timeout = cmd->busy_timeout * 1000;
863 } else {
864 target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000);
865 if (host->clock && data->timeout_clks) {
866 unsigned long long val;
869 * data->timeout_clks is in units of clock cycles.
870 * host->clock is in Hz. target_timeout is in us.
871 * Hence, us = 1000000 * cycles / Hz. Round up.
873 val = 1000000ULL * data->timeout_clks;
874 if (do_div(val, host->clock))
875 target_timeout++;
876 target_timeout += val;
880 return target_timeout;
883 static void sdhci_calc_sw_timeout(struct sdhci_host *host,
884 struct mmc_command *cmd)
886 struct mmc_data *data = cmd->data;
887 struct mmc_host *mmc = host->mmc;
888 struct mmc_ios *ios = &mmc->ios;
889 unsigned char bus_width = 1 << ios->bus_width;
890 unsigned int blksz;
891 unsigned int freq;
892 u64 target_timeout;
893 u64 transfer_time;
895 target_timeout = sdhci_target_timeout(host, cmd, data);
896 target_timeout *= NSEC_PER_USEC;
898 if (data) {
899 blksz = data->blksz;
900 freq = host->mmc->actual_clock ? : host->clock;
901 transfer_time = (u64)blksz * NSEC_PER_SEC * (8 / bus_width);
902 do_div(transfer_time, freq);
903 /* multiply by '2' to account for any unknowns */
904 transfer_time = transfer_time * 2;
905 /* calculate timeout for the entire data */
906 host->data_timeout = data->blocks * target_timeout +
907 transfer_time;
908 } else {
909 host->data_timeout = target_timeout;
912 if (host->data_timeout)
913 host->data_timeout += MMC_CMD_TRANSFER_TIME;
916 static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd,
917 bool *too_big)
919 u8 count;
920 struct mmc_data *data;
921 unsigned target_timeout, current_timeout;
923 *too_big = true;
926 * If the host controller provides us with an incorrect timeout
927 * value, just skip the check and use 0xE. The hardware may take
928 * longer to time out, but that's much better than having a too-short
929 * timeout value.
931 if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
932 return 0xE;
934 /* Unspecified command, asume max */
935 if (cmd == NULL)
936 return 0xE;
938 data = cmd->data;
939 /* Unspecified timeout, assume max */
940 if (!data && !cmd->busy_timeout)
941 return 0xE;
943 /* timeout in us */
944 target_timeout = sdhci_target_timeout(host, cmd, data);
947 * Figure out needed cycles.
948 * We do this in steps in order to fit inside a 32 bit int.
949 * The first step is the minimum timeout, which will have a
950 * minimum resolution of 6 bits:
951 * (1) 2^13*1000 > 2^22,
952 * (2) host->timeout_clk < 2^16
953 * =>
954 * (1) / (2) > 2^6
956 count = 0;
957 current_timeout = (1 << 13) * 1000 / host->timeout_clk;
958 while (current_timeout < target_timeout) {
959 count++;
960 current_timeout <<= 1;
961 if (count >= 0xF)
962 break;
965 if (count >= 0xF) {
966 if (!(host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT))
967 DBG("Too large timeout 0x%x requested for CMD%d!\n",
968 count, cmd->opcode);
969 count = 0xE;
970 } else {
971 *too_big = false;
974 return count;
977 static void sdhci_set_transfer_irqs(struct sdhci_host *host)
979 u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
980 u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
982 if (host->flags & SDHCI_REQ_USE_DMA)
983 host->ier = (host->ier & ~pio_irqs) | dma_irqs;
984 else
985 host->ier = (host->ier & ~dma_irqs) | pio_irqs;
987 if (host->flags & (SDHCI_AUTO_CMD23 | SDHCI_AUTO_CMD12))
988 host->ier |= SDHCI_INT_AUTO_CMD_ERR;
989 else
990 host->ier &= ~SDHCI_INT_AUTO_CMD_ERR;
992 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
993 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
996 void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable)
998 if (enable)
999 host->ier |= SDHCI_INT_DATA_TIMEOUT;
1000 else
1001 host->ier &= ~SDHCI_INT_DATA_TIMEOUT;
1002 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
1003 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
1005 EXPORT_SYMBOL_GPL(sdhci_set_data_timeout_irq);
1007 void __sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
1009 bool too_big = false;
1010 u8 count = sdhci_calc_timeout(host, cmd, &too_big);
1012 if (too_big &&
1013 host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) {
1014 sdhci_calc_sw_timeout(host, cmd);
1015 sdhci_set_data_timeout_irq(host, false);
1016 } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) {
1017 sdhci_set_data_timeout_irq(host, true);
1020 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
1022 EXPORT_SYMBOL_GPL(__sdhci_set_timeout);
1024 static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
1026 if (host->ops->set_timeout)
1027 host->ops->set_timeout(host, cmd);
1028 else
1029 __sdhci_set_timeout(host, cmd);
1032 static void sdhci_initialize_data(struct sdhci_host *host,
1033 struct mmc_data *data)
1035 WARN_ON(host->data);
1037 /* Sanity checks */
1038 BUG_ON(data->blksz * data->blocks > 524288);
1039 BUG_ON(data->blksz > host->mmc->max_blk_size);
1040 BUG_ON(data->blocks > 65535);
1042 host->data = data;
1043 host->data_early = 0;
1044 host->data->bytes_xfered = 0;
1047 static inline void sdhci_set_block_info(struct sdhci_host *host,
1048 struct mmc_data *data)
1050 /* Set the DMA boundary value and block size */
1051 sdhci_writew(host,
1052 SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz),
1053 SDHCI_BLOCK_SIZE);
1055 * For Version 4.10 onwards, if v4 mode is enabled, 32-bit Block Count
1056 * can be supported, in that case 16-bit block count register must be 0.
1058 if (host->version >= SDHCI_SPEC_410 && host->v4_mode &&
1059 (host->quirks2 & SDHCI_QUIRK2_USE_32BIT_BLK_CNT)) {
1060 if (sdhci_readw(host, SDHCI_BLOCK_COUNT))
1061 sdhci_writew(host, 0, SDHCI_BLOCK_COUNT);
1062 sdhci_writew(host, data->blocks, SDHCI_32BIT_BLK_CNT);
1063 } else {
1064 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
1068 static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
1070 struct mmc_data *data = cmd->data;
1072 sdhci_initialize_data(host, data);
1074 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
1075 struct scatterlist *sg;
1076 unsigned int length_mask, offset_mask;
1077 int i;
1079 host->flags |= SDHCI_REQ_USE_DMA;
1082 * FIXME: This doesn't account for merging when mapping the
1083 * scatterlist.
1085 * The assumption here being that alignment and lengths are
1086 * the same after DMA mapping to device address space.
1088 length_mask = 0;
1089 offset_mask = 0;
1090 if (host->flags & SDHCI_USE_ADMA) {
1091 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) {
1092 length_mask = 3;
1094 * As we use up to 3 byte chunks to work
1095 * around alignment problems, we need to
1096 * check the offset as well.
1098 offset_mask = 3;
1100 } else {
1101 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
1102 length_mask = 3;
1103 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
1104 offset_mask = 3;
1107 if (unlikely(length_mask | offset_mask)) {
1108 for_each_sg(data->sg, sg, data->sg_len, i) {
1109 if (sg->length & length_mask) {
1110 DBG("Reverting to PIO because of transfer size (%d)\n",
1111 sg->length);
1112 host->flags &= ~SDHCI_REQ_USE_DMA;
1113 break;
1115 if (sg->offset & offset_mask) {
1116 DBG("Reverting to PIO because of bad alignment\n");
1117 host->flags &= ~SDHCI_REQ_USE_DMA;
1118 break;
1124 if (host->flags & SDHCI_REQ_USE_DMA) {
1125 int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
1127 if (sg_cnt <= 0) {
1129 * This only happens when someone fed
1130 * us an invalid request.
1132 WARN_ON(1);
1133 host->flags &= ~SDHCI_REQ_USE_DMA;
1134 } else if (host->flags & SDHCI_USE_ADMA) {
1135 sdhci_adma_table_pre(host, data, sg_cnt);
1136 sdhci_set_adma_addr(host, host->adma_addr);
1137 } else {
1138 WARN_ON(sg_cnt != 1);
1139 sdhci_set_sdma_addr(host, sdhci_sdma_address(host));
1143 sdhci_config_dma(host);
1145 if (!(host->flags & SDHCI_REQ_USE_DMA)) {
1146 int flags;
1148 flags = SG_MITER_ATOMIC;
1149 if (host->data->flags & MMC_DATA_READ)
1150 flags |= SG_MITER_TO_SG;
1151 else
1152 flags |= SG_MITER_FROM_SG;
1153 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
1154 host->blocks = data->blocks;
1157 sdhci_set_transfer_irqs(host);
1159 sdhci_set_block_info(host, data);
1162 #if IS_ENABLED(CONFIG_MMC_SDHCI_EXTERNAL_DMA)
1164 static int sdhci_external_dma_init(struct sdhci_host *host)
1166 int ret = 0;
1167 struct mmc_host *mmc = host->mmc;
1169 host->tx_chan = dma_request_chan(mmc->parent, "tx");
1170 if (IS_ERR(host->tx_chan)) {
1171 ret = PTR_ERR(host->tx_chan);
1172 if (ret != -EPROBE_DEFER)
1173 pr_warn("Failed to request TX DMA channel.\n");
1174 host->tx_chan = NULL;
1175 return ret;
1178 host->rx_chan = dma_request_chan(mmc->parent, "rx");
1179 if (IS_ERR(host->rx_chan)) {
1180 if (host->tx_chan) {
1181 dma_release_channel(host->tx_chan);
1182 host->tx_chan = NULL;
1185 ret = PTR_ERR(host->rx_chan);
1186 if (ret != -EPROBE_DEFER)
1187 pr_warn("Failed to request RX DMA channel.\n");
1188 host->rx_chan = NULL;
1191 return ret;
1194 static struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host,
1195 struct mmc_data *data)
1197 return data->flags & MMC_DATA_WRITE ? host->tx_chan : host->rx_chan;
1200 static int sdhci_external_dma_setup(struct sdhci_host *host,
1201 struct mmc_command *cmd)
1203 int ret, i;
1204 enum dma_transfer_direction dir;
1205 struct dma_async_tx_descriptor *desc;
1206 struct mmc_data *data = cmd->data;
1207 struct dma_chan *chan;
1208 struct dma_slave_config cfg;
1209 dma_cookie_t cookie;
1210 int sg_cnt;
1212 if (!host->mapbase)
1213 return -EINVAL;
1215 cfg.src_addr = host->mapbase + SDHCI_BUFFER;
1216 cfg.dst_addr = host->mapbase + SDHCI_BUFFER;
1217 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1218 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1219 cfg.src_maxburst = data->blksz / 4;
1220 cfg.dst_maxburst = data->blksz / 4;
1222 /* Sanity check: all the SG entries must be aligned by block size. */
1223 for (i = 0; i < data->sg_len; i++) {
1224 if ((data->sg + i)->length % data->blksz)
1225 return -EINVAL;
1228 chan = sdhci_external_dma_channel(host, data);
1230 ret = dmaengine_slave_config(chan, &cfg);
1231 if (ret)
1232 return ret;
1234 sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
1235 if (sg_cnt <= 0)
1236 return -EINVAL;
1238 dir = data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
1239 desc = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len, dir,
1240 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1241 if (!desc)
1242 return -EINVAL;
1244 desc->callback = NULL;
1245 desc->callback_param = NULL;
1247 cookie = dmaengine_submit(desc);
1248 if (dma_submit_error(cookie))
1249 ret = cookie;
1251 return ret;
1254 static void sdhci_external_dma_release(struct sdhci_host *host)
1256 if (host->tx_chan) {
1257 dma_release_channel(host->tx_chan);
1258 host->tx_chan = NULL;
1261 if (host->rx_chan) {
1262 dma_release_channel(host->rx_chan);
1263 host->rx_chan = NULL;
1266 sdhci_switch_external_dma(host, false);
1269 static void __sdhci_external_dma_prepare_data(struct sdhci_host *host,
1270 struct mmc_command *cmd)
1272 struct mmc_data *data = cmd->data;
1274 sdhci_initialize_data(host, data);
1276 host->flags |= SDHCI_REQ_USE_DMA;
1277 sdhci_set_transfer_irqs(host);
1279 sdhci_set_block_info(host, data);
1282 static void sdhci_external_dma_prepare_data(struct sdhci_host *host,
1283 struct mmc_command *cmd)
1285 if (!sdhci_external_dma_setup(host, cmd)) {
1286 __sdhci_external_dma_prepare_data(host, cmd);
1287 } else {
1288 sdhci_external_dma_release(host);
1289 pr_err("%s: Cannot use external DMA, switch to the DMA/PIO which standard SDHCI provides.\n",
1290 mmc_hostname(host->mmc));
1291 sdhci_prepare_data(host, cmd);
1295 static void sdhci_external_dma_pre_transfer(struct sdhci_host *host,
1296 struct mmc_command *cmd)
1298 struct dma_chan *chan;
1300 if (!cmd->data)
1301 return;
1303 chan = sdhci_external_dma_channel(host, cmd->data);
1304 if (chan)
1305 dma_async_issue_pending(chan);
1308 #else
1310 static inline int sdhci_external_dma_init(struct sdhci_host *host)
1312 return -EOPNOTSUPP;
1315 static inline void sdhci_external_dma_release(struct sdhci_host *host)
1319 static inline void sdhci_external_dma_prepare_data(struct sdhci_host *host,
1320 struct mmc_command *cmd)
1322 /* This should never happen */
1323 WARN_ON_ONCE(1);
1326 static inline void sdhci_external_dma_pre_transfer(struct sdhci_host *host,
1327 struct mmc_command *cmd)
1331 static inline struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host,
1332 struct mmc_data *data)
1334 return NULL;
1337 #endif
1339 void sdhci_switch_external_dma(struct sdhci_host *host, bool en)
1341 host->use_external_dma = en;
1343 EXPORT_SYMBOL_GPL(sdhci_switch_external_dma);
1345 static inline bool sdhci_auto_cmd12(struct sdhci_host *host,
1346 struct mmc_request *mrq)
1348 return !mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) &&
1349 !mrq->cap_cmd_during_tfr;
1352 static inline void sdhci_auto_cmd_select(struct sdhci_host *host,
1353 struct mmc_command *cmd,
1354 u16 *mode)
1356 bool use_cmd12 = sdhci_auto_cmd12(host, cmd->mrq) &&
1357 (cmd->opcode != SD_IO_RW_EXTENDED);
1358 bool use_cmd23 = cmd->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23);
1359 u16 ctrl2;
1362 * In case of Version 4.10 or later, use of 'Auto CMD Auto
1363 * Select' is recommended rather than use of 'Auto CMD12
1364 * Enable' or 'Auto CMD23 Enable'.
1366 if (host->version >= SDHCI_SPEC_410 && (use_cmd12 || use_cmd23)) {
1367 *mode |= SDHCI_TRNS_AUTO_SEL;
1369 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1370 if (use_cmd23)
1371 ctrl2 |= SDHCI_CMD23_ENABLE;
1372 else
1373 ctrl2 &= ~SDHCI_CMD23_ENABLE;
1374 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
1376 return;
1380 * If we are sending CMD23, CMD12 never gets sent
1381 * on successful completion (so no Auto-CMD12).
1383 if (use_cmd12)
1384 *mode |= SDHCI_TRNS_AUTO_CMD12;
1385 else if (use_cmd23)
1386 *mode |= SDHCI_TRNS_AUTO_CMD23;
1389 static void sdhci_set_transfer_mode(struct sdhci_host *host,
1390 struct mmc_command *cmd)
1392 u16 mode = 0;
1393 struct mmc_data *data = cmd->data;
1395 if (data == NULL) {
1396 if (host->quirks2 &
1397 SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) {
1398 /* must not clear SDHCI_TRANSFER_MODE when tuning */
1399 if (cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200)
1400 sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
1401 } else {
1402 /* clear Auto CMD settings for no data CMDs */
1403 mode = sdhci_readw(host, SDHCI_TRANSFER_MODE);
1404 sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 |
1405 SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE);
1407 return;
1410 WARN_ON(!host->data);
1412 if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE))
1413 mode = SDHCI_TRNS_BLK_CNT_EN;
1415 if (mmc_op_multi(cmd->opcode) || data->blocks > 1) {
1416 mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI;
1417 sdhci_auto_cmd_select(host, cmd, &mode);
1418 if (cmd->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23))
1419 sdhci_writel(host, cmd->mrq->sbc->arg, SDHCI_ARGUMENT2);
1422 if (data->flags & MMC_DATA_READ)
1423 mode |= SDHCI_TRNS_READ;
1424 if (host->flags & SDHCI_REQ_USE_DMA)
1425 mode |= SDHCI_TRNS_DMA;
1427 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
1430 static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
1432 return (!(host->flags & SDHCI_DEVICE_DEAD) &&
1433 ((mrq->cmd && mrq->cmd->error) ||
1434 (mrq->sbc && mrq->sbc->error) ||
1435 (mrq->data && mrq->data->stop && mrq->data->stop->error) ||
1436 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)));
1439 static void sdhci_set_mrq_done(struct sdhci_host *host, struct mmc_request *mrq)
1441 int i;
1443 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
1444 if (host->mrqs_done[i] == mrq) {
1445 WARN_ON(1);
1446 return;
1450 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
1451 if (!host->mrqs_done[i]) {
1452 host->mrqs_done[i] = mrq;
1453 break;
1457 WARN_ON(i >= SDHCI_MAX_MRQS);
1460 static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
1462 if (host->cmd && host->cmd->mrq == mrq)
1463 host->cmd = NULL;
1465 if (host->data_cmd && host->data_cmd->mrq == mrq)
1466 host->data_cmd = NULL;
1468 if (host->data && host->data->mrq == mrq)
1469 host->data = NULL;
1471 if (sdhci_needs_reset(host, mrq))
1472 host->pending_reset = true;
1474 sdhci_set_mrq_done(host, mrq);
1476 sdhci_del_timer(host, mrq);
1478 if (!sdhci_has_requests(host))
1479 sdhci_led_deactivate(host);
1482 static void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
1484 __sdhci_finish_mrq(host, mrq);
1486 queue_work(host->complete_wq, &host->complete_work);
1489 static void sdhci_finish_data(struct sdhci_host *host)
1491 struct mmc_command *data_cmd = host->data_cmd;
1492 struct mmc_data *data = host->data;
1494 host->data = NULL;
1495 host->data_cmd = NULL;
1498 * The controller needs a reset of internal state machines upon error
1499 * conditions.
1501 if (data->error) {
1502 if (!host->cmd || host->cmd == data_cmd)
1503 sdhci_do_reset(host, SDHCI_RESET_CMD);
1504 sdhci_do_reset(host, SDHCI_RESET_DATA);
1507 if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) ==
1508 (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA))
1509 sdhci_adma_table_post(host, data);
1512 * The specification states that the block count register must
1513 * be updated, but it does not specify at what point in the
1514 * data flow. That makes the register entirely useless to read
1515 * back so we have to assume that nothing made it to the card
1516 * in the event of an error.
1518 if (data->error)
1519 data->bytes_xfered = 0;
1520 else
1521 data->bytes_xfered = data->blksz * data->blocks;
1524 * Need to send CMD12 if -
1525 * a) open-ended multiblock transfer not using auto CMD12 (no CMD23)
1526 * b) error in multiblock transfer
1528 if (data->stop &&
1529 ((!data->mrq->sbc && !sdhci_auto_cmd12(host, data->mrq)) ||
1530 data->error)) {
1532 * 'cap_cmd_during_tfr' request must not use the command line
1533 * after mmc_command_done() has been called. It is upper layer's
1534 * responsibility to send the stop command if required.
1536 if (data->mrq->cap_cmd_during_tfr) {
1537 __sdhci_finish_mrq(host, data->mrq);
1538 } else {
1539 /* Avoid triggering warning in sdhci_send_command() */
1540 host->cmd = NULL;
1541 sdhci_send_command(host, data->stop);
1543 } else {
1544 __sdhci_finish_mrq(host, data->mrq);
1548 void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
1550 int flags;
1551 u32 mask;
1552 unsigned long timeout;
1554 WARN_ON(host->cmd);
1556 /* Initially, a command has no error */
1557 cmd->error = 0;
1559 if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) &&
1560 cmd->opcode == MMC_STOP_TRANSMISSION)
1561 cmd->flags |= MMC_RSP_BUSY;
1563 /* Wait max 10 ms */
1564 timeout = 10;
1566 mask = SDHCI_CMD_INHIBIT;
1567 if (sdhci_data_line_cmd(cmd))
1568 mask |= SDHCI_DATA_INHIBIT;
1570 /* We shouldn't wait for data inihibit for stop commands, even
1571 though they might use busy signaling */
1572 if (cmd->mrq->data && (cmd == cmd->mrq->data->stop))
1573 mask &= ~SDHCI_DATA_INHIBIT;
1575 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
1576 if (timeout == 0) {
1577 pr_err("%s: Controller never released inhibit bit(s).\n",
1578 mmc_hostname(host->mmc));
1579 sdhci_dumpregs(host);
1580 cmd->error = -EIO;
1581 sdhci_finish_mrq(host, cmd->mrq);
1582 return;
1584 timeout--;
1585 mdelay(1);
1588 host->cmd = cmd;
1589 host->data_timeout = 0;
1590 if (sdhci_data_line_cmd(cmd)) {
1591 WARN_ON(host->data_cmd);
1592 host->data_cmd = cmd;
1593 sdhci_set_timeout(host, cmd);
1596 if (cmd->data) {
1597 if (host->use_external_dma)
1598 sdhci_external_dma_prepare_data(host, cmd);
1599 else
1600 sdhci_prepare_data(host, cmd);
1603 sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
1605 sdhci_set_transfer_mode(host, cmd);
1607 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
1608 pr_err("%s: Unsupported response type!\n",
1609 mmc_hostname(host->mmc));
1610 cmd->error = -EINVAL;
1611 sdhci_finish_mrq(host, cmd->mrq);
1612 return;
1615 if (!(cmd->flags & MMC_RSP_PRESENT))
1616 flags = SDHCI_CMD_RESP_NONE;
1617 else if (cmd->flags & MMC_RSP_136)
1618 flags = SDHCI_CMD_RESP_LONG;
1619 else if (cmd->flags & MMC_RSP_BUSY)
1620 flags = SDHCI_CMD_RESP_SHORT_BUSY;
1621 else
1622 flags = SDHCI_CMD_RESP_SHORT;
1624 if (cmd->flags & MMC_RSP_CRC)
1625 flags |= SDHCI_CMD_CRC;
1626 if (cmd->flags & MMC_RSP_OPCODE)
1627 flags |= SDHCI_CMD_INDEX;
1629 /* CMD19 is special in that the Data Present Select should be set */
1630 if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK ||
1631 cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
1632 flags |= SDHCI_CMD_DATA;
1634 timeout = jiffies;
1635 if (host->data_timeout)
1636 timeout += nsecs_to_jiffies(host->data_timeout);
1637 else if (!cmd->data && cmd->busy_timeout > 9000)
1638 timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ;
1639 else
1640 timeout += 10 * HZ;
1641 sdhci_mod_timer(host, cmd->mrq, timeout);
1643 if (host->use_external_dma)
1644 sdhci_external_dma_pre_transfer(host, cmd);
1646 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
1648 EXPORT_SYMBOL_GPL(sdhci_send_command);
1650 static void sdhci_read_rsp_136(struct sdhci_host *host, struct mmc_command *cmd)
1652 int i, reg;
1654 for (i = 0; i < 4; i++) {
1655 reg = SDHCI_RESPONSE + (3 - i) * 4;
1656 cmd->resp[i] = sdhci_readl(host, reg);
1659 if (host->quirks2 & SDHCI_QUIRK2_RSP_136_HAS_CRC)
1660 return;
1662 /* CRC is stripped so we need to do some shifting */
1663 for (i = 0; i < 4; i++) {
1664 cmd->resp[i] <<= 8;
1665 if (i != 3)
1666 cmd->resp[i] |= cmd->resp[i + 1] >> 24;
1670 static void sdhci_finish_command(struct sdhci_host *host)
1672 struct mmc_command *cmd = host->cmd;
1674 host->cmd = NULL;
1676 if (cmd->flags & MMC_RSP_PRESENT) {
1677 if (cmd->flags & MMC_RSP_136) {
1678 sdhci_read_rsp_136(host, cmd);
1679 } else {
1680 cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
1684 if (cmd->mrq->cap_cmd_during_tfr && cmd == cmd->mrq->cmd)
1685 mmc_command_done(host->mmc, cmd->mrq);
1688 * The host can send and interrupt when the busy state has
1689 * ended, allowing us to wait without wasting CPU cycles.
1690 * The busy signal uses DAT0 so this is similar to waiting
1691 * for data to complete.
1693 * Note: The 1.0 specification is a bit ambiguous about this
1694 * feature so there might be some problems with older
1695 * controllers.
1697 if (cmd->flags & MMC_RSP_BUSY) {
1698 if (cmd->data) {
1699 DBG("Cannot wait for busy signal when also doing a data transfer");
1700 } else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) &&
1701 cmd == host->data_cmd) {
1702 /* Command complete before busy is ended */
1703 return;
1707 /* Finished CMD23, now send actual command. */
1708 if (cmd == cmd->mrq->sbc) {
1709 sdhci_send_command(host, cmd->mrq->cmd);
1710 } else {
1712 /* Processed actual command. */
1713 if (host->data && host->data_early)
1714 sdhci_finish_data(host);
1716 if (!cmd->data)
1717 __sdhci_finish_mrq(host, cmd->mrq);
1721 static u16 sdhci_get_preset_value(struct sdhci_host *host)
1723 u16 preset = 0;
1725 switch (host->timing) {
1726 case MMC_TIMING_UHS_SDR12:
1727 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1728 break;
1729 case MMC_TIMING_UHS_SDR25:
1730 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25);
1731 break;
1732 case MMC_TIMING_UHS_SDR50:
1733 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50);
1734 break;
1735 case MMC_TIMING_UHS_SDR104:
1736 case MMC_TIMING_MMC_HS200:
1737 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
1738 break;
1739 case MMC_TIMING_UHS_DDR50:
1740 case MMC_TIMING_MMC_DDR52:
1741 preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
1742 break;
1743 case MMC_TIMING_MMC_HS400:
1744 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400);
1745 break;
1746 default:
1747 pr_warn("%s: Invalid UHS-I mode selected\n",
1748 mmc_hostname(host->mmc));
1749 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1750 break;
1752 return preset;
1755 u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock,
1756 unsigned int *actual_clock)
1758 int div = 0; /* Initialized for compiler warning */
1759 int real_div = div, clk_mul = 1;
1760 u16 clk = 0;
1761 bool switch_base_clk = false;
1763 if (host->version >= SDHCI_SPEC_300) {
1764 if (host->preset_enabled) {
1765 u16 pre_val;
1767 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1768 pre_val = sdhci_get_preset_value(host);
1769 div = (pre_val & SDHCI_PRESET_SDCLK_FREQ_MASK)
1770 >> SDHCI_PRESET_SDCLK_FREQ_SHIFT;
1771 if (host->clk_mul &&
1772 (pre_val & SDHCI_PRESET_CLKGEN_SEL_MASK)) {
1773 clk = SDHCI_PROG_CLOCK_MODE;
1774 real_div = div + 1;
1775 clk_mul = host->clk_mul;
1776 } else {
1777 real_div = max_t(int, 1, div << 1);
1779 goto clock_set;
1783 * Check if the Host Controller supports Programmable Clock
1784 * Mode.
1786 if (host->clk_mul) {
1787 for (div = 1; div <= 1024; div++) {
1788 if ((host->max_clk * host->clk_mul / div)
1789 <= clock)
1790 break;
1792 if ((host->max_clk * host->clk_mul / div) <= clock) {
1794 * Set Programmable Clock Mode in the Clock
1795 * Control register.
1797 clk = SDHCI_PROG_CLOCK_MODE;
1798 real_div = div;
1799 clk_mul = host->clk_mul;
1800 div--;
1801 } else {
1803 * Divisor can be too small to reach clock
1804 * speed requirement. Then use the base clock.
1806 switch_base_clk = true;
1810 if (!host->clk_mul || switch_base_clk) {
1811 /* Version 3.00 divisors must be a multiple of 2. */
1812 if (host->max_clk <= clock)
1813 div = 1;
1814 else {
1815 for (div = 2; div < SDHCI_MAX_DIV_SPEC_300;
1816 div += 2) {
1817 if ((host->max_clk / div) <= clock)
1818 break;
1821 real_div = div;
1822 div >>= 1;
1823 if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN)
1824 && !div && host->max_clk <= 25000000)
1825 div = 1;
1827 } else {
1828 /* Version 2.00 divisors must be a power of 2. */
1829 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
1830 if ((host->max_clk / div) <= clock)
1831 break;
1833 real_div = div;
1834 div >>= 1;
1837 clock_set:
1838 if (real_div)
1839 *actual_clock = (host->max_clk * clk_mul) / real_div;
1840 clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
1841 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
1842 << SDHCI_DIVIDER_HI_SHIFT;
1844 return clk;
1846 EXPORT_SYMBOL_GPL(sdhci_calc_clk);
1848 void sdhci_enable_clk(struct sdhci_host *host, u16 clk)
1850 ktime_t timeout;
1852 clk |= SDHCI_CLOCK_INT_EN;
1853 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1855 /* Wait max 150 ms */
1856 timeout = ktime_add_ms(ktime_get(), 150);
1857 while (1) {
1858 bool timedout = ktime_after(ktime_get(), timeout);
1860 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1861 if (clk & SDHCI_CLOCK_INT_STABLE)
1862 break;
1863 if (timedout) {
1864 pr_err("%s: Internal clock never stabilised.\n",
1865 mmc_hostname(host->mmc));
1866 sdhci_dumpregs(host);
1867 return;
1869 udelay(10);
1872 if (host->version >= SDHCI_SPEC_410 && host->v4_mode) {
1873 clk |= SDHCI_CLOCK_PLL_EN;
1874 clk &= ~SDHCI_CLOCK_INT_STABLE;
1875 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1877 /* Wait max 150 ms */
1878 timeout = ktime_add_ms(ktime_get(), 150);
1879 while (1) {
1880 bool timedout = ktime_after(ktime_get(), timeout);
1882 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1883 if (clk & SDHCI_CLOCK_INT_STABLE)
1884 break;
1885 if (timedout) {
1886 pr_err("%s: PLL clock never stabilised.\n",
1887 mmc_hostname(host->mmc));
1888 sdhci_dumpregs(host);
1889 return;
1891 udelay(10);
1895 clk |= SDHCI_CLOCK_CARD_EN;
1896 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1898 EXPORT_SYMBOL_GPL(sdhci_enable_clk);
1900 void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
1902 u16 clk;
1904 host->mmc->actual_clock = 0;
1906 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
1908 if (clock == 0)
1909 return;
1911 clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
1912 sdhci_enable_clk(host, clk);
1914 EXPORT_SYMBOL_GPL(sdhci_set_clock);
1916 static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode,
1917 unsigned short vdd)
1919 struct mmc_host *mmc = host->mmc;
1921 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
1923 if (mode != MMC_POWER_OFF)
1924 sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
1925 else
1926 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1929 void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
1930 unsigned short vdd)
1932 u8 pwr = 0;
1934 if (mode != MMC_POWER_OFF) {
1935 switch (1 << vdd) {
1936 case MMC_VDD_165_195:
1938 * Without a regulator, SDHCI does not support 2.0v
1939 * so we only get here if the driver deliberately
1940 * added the 2.0v range to ocr_avail. Map it to 1.8v
1941 * for the purpose of turning on the power.
1943 case MMC_VDD_20_21:
1944 pwr = SDHCI_POWER_180;
1945 break;
1946 case MMC_VDD_29_30:
1947 case MMC_VDD_30_31:
1948 pwr = SDHCI_POWER_300;
1949 break;
1950 case MMC_VDD_32_33:
1951 case MMC_VDD_33_34:
1952 pwr = SDHCI_POWER_330;
1953 break;
1954 default:
1955 WARN(1, "%s: Invalid vdd %#x\n",
1956 mmc_hostname(host->mmc), vdd);
1957 break;
1961 if (host->pwr == pwr)
1962 return;
1964 host->pwr = pwr;
1966 if (pwr == 0) {
1967 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1968 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
1969 sdhci_runtime_pm_bus_off(host);
1970 } else {
1972 * Spec says that we should clear the power reg before setting
1973 * a new value. Some controllers don't seem to like this though.
1975 if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
1976 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1979 * At least the Marvell CaFe chip gets confused if we set the
1980 * voltage and set turn on power at the same time, so set the
1981 * voltage first.
1983 if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
1984 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1986 pwr |= SDHCI_POWER_ON;
1988 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1990 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
1991 sdhci_runtime_pm_bus_on(host);
1994 * Some controllers need an extra 10ms delay of 10ms before
1995 * they can apply clock after applying power
1997 if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
1998 mdelay(10);
2001 EXPORT_SYMBOL_GPL(sdhci_set_power_noreg);
2003 void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
2004 unsigned short vdd)
2006 if (IS_ERR(host->mmc->supply.vmmc))
2007 sdhci_set_power_noreg(host, mode, vdd);
2008 else
2009 sdhci_set_power_reg(host, mode, vdd);
2011 EXPORT_SYMBOL_GPL(sdhci_set_power);
2013 /*****************************************************************************\
2015 * MMC callbacks *
2017 \*****************************************************************************/
2019 void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
2021 struct sdhci_host *host;
2022 int present;
2023 unsigned long flags;
2025 host = mmc_priv(mmc);
2027 /* Firstly check card presence */
2028 present = mmc->ops->get_cd(mmc);
2030 spin_lock_irqsave(&host->lock, flags);
2032 sdhci_led_activate(host);
2034 if (!present || host->flags & SDHCI_DEVICE_DEAD) {
2035 mrq->cmd->error = -ENOMEDIUM;
2036 sdhci_finish_mrq(host, mrq);
2037 } else {
2038 if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23))
2039 sdhci_send_command(host, mrq->sbc);
2040 else
2041 sdhci_send_command(host, mrq->cmd);
2044 spin_unlock_irqrestore(&host->lock, flags);
2046 EXPORT_SYMBOL_GPL(sdhci_request);
2048 void sdhci_set_bus_width(struct sdhci_host *host, int width)
2050 u8 ctrl;
2052 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
2053 if (width == MMC_BUS_WIDTH_8) {
2054 ctrl &= ~SDHCI_CTRL_4BITBUS;
2055 ctrl |= SDHCI_CTRL_8BITBUS;
2056 } else {
2057 if (host->mmc->caps & MMC_CAP_8_BIT_DATA)
2058 ctrl &= ~SDHCI_CTRL_8BITBUS;
2059 if (width == MMC_BUS_WIDTH_4)
2060 ctrl |= SDHCI_CTRL_4BITBUS;
2061 else
2062 ctrl &= ~SDHCI_CTRL_4BITBUS;
2064 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
2066 EXPORT_SYMBOL_GPL(sdhci_set_bus_width);
2068 void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
2070 u16 ctrl_2;
2072 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2073 /* Select Bus Speed Mode for host */
2074 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
2075 if ((timing == MMC_TIMING_MMC_HS200) ||
2076 (timing == MMC_TIMING_UHS_SDR104))
2077 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
2078 else if (timing == MMC_TIMING_UHS_SDR12)
2079 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
2080 else if (timing == MMC_TIMING_UHS_SDR25)
2081 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
2082 else if (timing == MMC_TIMING_UHS_SDR50)
2083 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
2084 else if ((timing == MMC_TIMING_UHS_DDR50) ||
2085 (timing == MMC_TIMING_MMC_DDR52))
2086 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
2087 else if (timing == MMC_TIMING_MMC_HS400)
2088 ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */
2089 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
2091 EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);
2093 void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
2095 struct sdhci_host *host = mmc_priv(mmc);
2096 u8 ctrl;
2098 if (ios->power_mode == MMC_POWER_UNDEFINED)
2099 return;
2101 if (host->flags & SDHCI_DEVICE_DEAD) {
2102 if (!IS_ERR(mmc->supply.vmmc) &&
2103 ios->power_mode == MMC_POWER_OFF)
2104 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
2105 return;
2109 * Reset the chip on each power off.
2110 * Should clear out any weird states.
2112 if (ios->power_mode == MMC_POWER_OFF) {
2113 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
2114 sdhci_reinit(host);
2117 if (host->version >= SDHCI_SPEC_300 &&
2118 (ios->power_mode == MMC_POWER_UP) &&
2119 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
2120 sdhci_enable_preset_value(host, false);
2122 if (!ios->clock || ios->clock != host->clock) {
2123 host->ops->set_clock(host, ios->clock);
2124 host->clock = ios->clock;
2126 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK &&
2127 host->clock) {
2128 host->timeout_clk = host->mmc->actual_clock ?
2129 host->mmc->actual_clock / 1000 :
2130 host->clock / 1000;
2131 host->mmc->max_busy_timeout =
2132 host->ops->get_max_timeout_count ?
2133 host->ops->get_max_timeout_count(host) :
2134 1 << 27;
2135 host->mmc->max_busy_timeout /= host->timeout_clk;
2139 if (host->ops->set_power)
2140 host->ops->set_power(host, ios->power_mode, ios->vdd);
2141 else
2142 sdhci_set_power(host, ios->power_mode, ios->vdd);
2144 if (host->ops->platform_send_init_74_clocks)
2145 host->ops->platform_send_init_74_clocks(host, ios->power_mode);
2147 host->ops->set_bus_width(host, ios->bus_width);
2149 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
2151 if (!(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) {
2152 if (ios->timing == MMC_TIMING_SD_HS ||
2153 ios->timing == MMC_TIMING_MMC_HS ||
2154 ios->timing == MMC_TIMING_MMC_HS400 ||
2155 ios->timing == MMC_TIMING_MMC_HS200 ||
2156 ios->timing == MMC_TIMING_MMC_DDR52 ||
2157 ios->timing == MMC_TIMING_UHS_SDR50 ||
2158 ios->timing == MMC_TIMING_UHS_SDR104 ||
2159 ios->timing == MMC_TIMING_UHS_DDR50 ||
2160 ios->timing == MMC_TIMING_UHS_SDR25)
2161 ctrl |= SDHCI_CTRL_HISPD;
2162 else
2163 ctrl &= ~SDHCI_CTRL_HISPD;
2166 if (host->version >= SDHCI_SPEC_300) {
2167 u16 clk, ctrl_2;
2169 if (!host->preset_enabled) {
2170 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
2172 * We only need to set Driver Strength if the
2173 * preset value enable is not set.
2175 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2176 ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
2177 if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
2178 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
2179 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_B)
2180 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
2181 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C)
2182 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C;
2183 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D)
2184 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D;
2185 else {
2186 pr_warn("%s: invalid driver type, default to driver type B\n",
2187 mmc_hostname(mmc));
2188 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
2191 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
2192 } else {
2194 * According to SDHC Spec v3.00, if the Preset Value
2195 * Enable in the Host Control 2 register is set, we
2196 * need to reset SD Clock Enable before changing High
2197 * Speed Enable to avoid generating clock gliches.
2200 /* Reset SD Clock Enable */
2201 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
2202 clk &= ~SDHCI_CLOCK_CARD_EN;
2203 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
2205 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
2207 /* Re-enable SD Clock */
2208 host->ops->set_clock(host, host->clock);
2211 /* Reset SD Clock Enable */
2212 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
2213 clk &= ~SDHCI_CLOCK_CARD_EN;
2214 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
2216 host->ops->set_uhs_signaling(host, ios->timing);
2217 host->timing = ios->timing;
2219 if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
2220 ((ios->timing == MMC_TIMING_UHS_SDR12) ||
2221 (ios->timing == MMC_TIMING_UHS_SDR25) ||
2222 (ios->timing == MMC_TIMING_UHS_SDR50) ||
2223 (ios->timing == MMC_TIMING_UHS_SDR104) ||
2224 (ios->timing == MMC_TIMING_UHS_DDR50) ||
2225 (ios->timing == MMC_TIMING_MMC_DDR52))) {
2226 u16 preset;
2228 sdhci_enable_preset_value(host, true);
2229 preset = sdhci_get_preset_value(host);
2230 ios->drv_type = (preset & SDHCI_PRESET_DRV_MASK)
2231 >> SDHCI_PRESET_DRV_SHIFT;
2234 /* Re-enable SD Clock */
2235 host->ops->set_clock(host, host->clock);
2236 } else
2237 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
2240 * Some (ENE) controllers go apeshit on some ios operation,
2241 * signalling timeout and CRC errors even on CMD0. Resetting
2242 * it on each ios seems to solve the problem.
2244 if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
2245 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
2247 EXPORT_SYMBOL_GPL(sdhci_set_ios);
2249 static int sdhci_get_cd(struct mmc_host *mmc)
2251 struct sdhci_host *host = mmc_priv(mmc);
2252 int gpio_cd = mmc_gpio_get_cd(mmc);
2254 if (host->flags & SDHCI_DEVICE_DEAD)
2255 return 0;
2257 /* If nonremovable, assume that the card is always present. */
2258 if (!mmc_card_is_removable(host->mmc))
2259 return 1;
2262 * Try slot gpio detect, if defined it take precedence
2263 * over build in controller functionality
2265 if (gpio_cd >= 0)
2266 return !!gpio_cd;
2268 /* If polling, assume that the card is always present. */
2269 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
2270 return 1;
2272 /* Host native card detect */
2273 return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
2276 static int sdhci_check_ro(struct sdhci_host *host)
2278 unsigned long flags;
2279 int is_readonly;
2281 spin_lock_irqsave(&host->lock, flags);
2283 if (host->flags & SDHCI_DEVICE_DEAD)
2284 is_readonly = 0;
2285 else if (host->ops->get_ro)
2286 is_readonly = host->ops->get_ro(host);
2287 else if (mmc_can_gpio_ro(host->mmc))
2288 is_readonly = mmc_gpio_get_ro(host->mmc);
2289 else
2290 is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
2291 & SDHCI_WRITE_PROTECT);
2293 spin_unlock_irqrestore(&host->lock, flags);
2295 /* This quirk needs to be replaced by a callback-function later */
2296 return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
2297 !is_readonly : is_readonly;
2300 #define SAMPLE_COUNT 5
2302 static int sdhci_get_ro(struct mmc_host *mmc)
2304 struct sdhci_host *host = mmc_priv(mmc);
2305 int i, ro_count;
2307 if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
2308 return sdhci_check_ro(host);
2310 ro_count = 0;
2311 for (i = 0; i < SAMPLE_COUNT; i++) {
2312 if (sdhci_check_ro(host)) {
2313 if (++ro_count > SAMPLE_COUNT / 2)
2314 return 1;
2316 msleep(30);
2318 return 0;
2321 static void sdhci_hw_reset(struct mmc_host *mmc)
2323 struct sdhci_host *host = mmc_priv(mmc);
2325 if (host->ops && host->ops->hw_reset)
2326 host->ops->hw_reset(host);
2329 static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
2331 if (!(host->flags & SDHCI_DEVICE_DEAD)) {
2332 if (enable)
2333 host->ier |= SDHCI_INT_CARD_INT;
2334 else
2335 host->ier &= ~SDHCI_INT_CARD_INT;
2337 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2338 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2342 void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
2344 struct sdhci_host *host = mmc_priv(mmc);
2345 unsigned long flags;
2347 if (enable)
2348 pm_runtime_get_noresume(host->mmc->parent);
2350 spin_lock_irqsave(&host->lock, flags);
2351 sdhci_enable_sdio_irq_nolock(host, enable);
2352 spin_unlock_irqrestore(&host->lock, flags);
2354 if (!enable)
2355 pm_runtime_put_noidle(host->mmc->parent);
2357 EXPORT_SYMBOL_GPL(sdhci_enable_sdio_irq);
2359 static void sdhci_ack_sdio_irq(struct mmc_host *mmc)
2361 struct sdhci_host *host = mmc_priv(mmc);
2362 unsigned long flags;
2364 spin_lock_irqsave(&host->lock, flags);
2365 sdhci_enable_sdio_irq_nolock(host, true);
2366 spin_unlock_irqrestore(&host->lock, flags);
2369 int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
2370 struct mmc_ios *ios)
2372 struct sdhci_host *host = mmc_priv(mmc);
2373 u16 ctrl;
2374 int ret;
2377 * Signal Voltage Switching is only applicable for Host Controllers
2378 * v3.00 and above.
2380 if (host->version < SDHCI_SPEC_300)
2381 return 0;
2383 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2385 switch (ios->signal_voltage) {
2386 case MMC_SIGNAL_VOLTAGE_330:
2387 if (!(host->flags & SDHCI_SIGNALING_330))
2388 return -EINVAL;
2389 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
2390 ctrl &= ~SDHCI_CTRL_VDD_180;
2391 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2393 if (!IS_ERR(mmc->supply.vqmmc)) {
2394 ret = mmc_regulator_set_vqmmc(mmc, ios);
2395 if (ret) {
2396 pr_warn("%s: Switching to 3.3V signalling voltage failed\n",
2397 mmc_hostname(mmc));
2398 return -EIO;
2401 /* Wait for 5ms */
2402 usleep_range(5000, 5500);
2404 /* 3.3V regulator output should be stable within 5 ms */
2405 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2406 if (!(ctrl & SDHCI_CTRL_VDD_180))
2407 return 0;
2409 pr_warn("%s: 3.3V regulator output did not become stable\n",
2410 mmc_hostname(mmc));
2412 return -EAGAIN;
2413 case MMC_SIGNAL_VOLTAGE_180:
2414 if (!(host->flags & SDHCI_SIGNALING_180))
2415 return -EINVAL;
2416 if (!IS_ERR(mmc->supply.vqmmc)) {
2417 ret = mmc_regulator_set_vqmmc(mmc, ios);
2418 if (ret) {
2419 pr_warn("%s: Switching to 1.8V signalling voltage failed\n",
2420 mmc_hostname(mmc));
2421 return -EIO;
2426 * Enable 1.8V Signal Enable in the Host Control2
2427 * register
2429 ctrl |= SDHCI_CTRL_VDD_180;
2430 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2432 /* Some controller need to do more when switching */
2433 if (host->ops->voltage_switch)
2434 host->ops->voltage_switch(host);
2436 /* 1.8V regulator output should be stable within 5 ms */
2437 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2438 if (ctrl & SDHCI_CTRL_VDD_180)
2439 return 0;
2441 pr_warn("%s: 1.8V regulator output did not become stable\n",
2442 mmc_hostname(mmc));
2444 return -EAGAIN;
2445 case MMC_SIGNAL_VOLTAGE_120:
2446 if (!(host->flags & SDHCI_SIGNALING_120))
2447 return -EINVAL;
2448 if (!IS_ERR(mmc->supply.vqmmc)) {
2449 ret = mmc_regulator_set_vqmmc(mmc, ios);
2450 if (ret) {
2451 pr_warn("%s: Switching to 1.2V signalling voltage failed\n",
2452 mmc_hostname(mmc));
2453 return -EIO;
2456 return 0;
2457 default:
2458 /* No signal voltage switch required */
2459 return 0;
2462 EXPORT_SYMBOL_GPL(sdhci_start_signal_voltage_switch);
2464 static int sdhci_card_busy(struct mmc_host *mmc)
2466 struct sdhci_host *host = mmc_priv(mmc);
2467 u32 present_state;
2469 /* Check whether DAT[0] is 0 */
2470 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
2472 return !(present_state & SDHCI_DATA_0_LVL_MASK);
2475 static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
2477 struct sdhci_host *host = mmc_priv(mmc);
2478 unsigned long flags;
2480 spin_lock_irqsave(&host->lock, flags);
2481 host->flags |= SDHCI_HS400_TUNING;
2482 spin_unlock_irqrestore(&host->lock, flags);
2484 return 0;
2487 void sdhci_start_tuning(struct sdhci_host *host)
2489 u16 ctrl;
2491 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2492 ctrl |= SDHCI_CTRL_EXEC_TUNING;
2493 if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND)
2494 ctrl |= SDHCI_CTRL_TUNED_CLK;
2495 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2498 * As per the Host Controller spec v3.00, tuning command
2499 * generates Buffer Read Ready interrupt, so enable that.
2501 * Note: The spec clearly says that when tuning sequence
2502 * is being performed, the controller does not generate
2503 * interrupts other than Buffer Read Ready interrupt. But
2504 * to make sure we don't hit a controller bug, we _only_
2505 * enable Buffer Read Ready interrupt here.
2507 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE);
2508 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE);
2510 EXPORT_SYMBOL_GPL(sdhci_start_tuning);
2512 void sdhci_end_tuning(struct sdhci_host *host)
2514 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2515 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2517 EXPORT_SYMBOL_GPL(sdhci_end_tuning);
2519 void sdhci_reset_tuning(struct sdhci_host *host)
2521 u16 ctrl;
2523 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2524 ctrl &= ~SDHCI_CTRL_TUNED_CLK;
2525 ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
2526 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2528 EXPORT_SYMBOL_GPL(sdhci_reset_tuning);
2530 void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode)
2532 sdhci_reset_tuning(host);
2534 sdhci_do_reset(host, SDHCI_RESET_CMD);
2535 sdhci_do_reset(host, SDHCI_RESET_DATA);
2537 sdhci_end_tuning(host);
2539 mmc_abort_tuning(host->mmc, opcode);
2541 EXPORT_SYMBOL_GPL(sdhci_abort_tuning);
2544 * We use sdhci_send_tuning() because mmc_send_tuning() is not a good fit. SDHCI
2545 * tuning command does not have a data payload (or rather the hardware does it
2546 * automatically) so mmc_send_tuning() will return -EIO. Also the tuning command
2547 * interrupt setup is different to other commands and there is no timeout
2548 * interrupt so special handling is needed.
2550 void sdhci_send_tuning(struct sdhci_host *host, u32 opcode)
2552 struct mmc_host *mmc = host->mmc;
2553 struct mmc_command cmd = {};
2554 struct mmc_request mrq = {};
2555 unsigned long flags;
2556 u32 b = host->sdma_boundary;
2558 spin_lock_irqsave(&host->lock, flags);
2560 cmd.opcode = opcode;
2561 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
2562 cmd.mrq = &mrq;
2564 mrq.cmd = &cmd;
2566 * In response to CMD19, the card sends 64 bytes of tuning
2567 * block to the Host Controller. So we set the block size
2568 * to 64 here.
2570 if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200 &&
2571 mmc->ios.bus_width == MMC_BUS_WIDTH_8)
2572 sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 128), SDHCI_BLOCK_SIZE);
2573 else
2574 sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 64), SDHCI_BLOCK_SIZE);
2577 * The tuning block is sent by the card to the host controller.
2578 * So we set the TRNS_READ bit in the Transfer Mode register.
2579 * This also takes care of setting DMA Enable and Multi Block
2580 * Select in the same register to 0.
2582 sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
2584 sdhci_send_command(host, &cmd);
2586 host->cmd = NULL;
2588 sdhci_del_timer(host, &mrq);
2590 host->tuning_done = 0;
2592 spin_unlock_irqrestore(&host->lock, flags);
2594 /* Wait for Buffer Read Ready interrupt */
2595 wait_event_timeout(host->buf_ready_int, (host->tuning_done == 1),
2596 msecs_to_jiffies(50));
2599 EXPORT_SYMBOL_GPL(sdhci_send_tuning);
2601 static int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
2603 int i;
2606 * Issue opcode repeatedly till Execute Tuning is set to 0 or the number
2607 * of loops reaches tuning loop count.
2609 for (i = 0; i < host->tuning_loop_count; i++) {
2610 u16 ctrl;
2612 sdhci_send_tuning(host, opcode);
2614 if (!host->tuning_done) {
2615 pr_debug("%s: Tuning timeout, falling back to fixed sampling clock\n",
2616 mmc_hostname(host->mmc));
2617 sdhci_abort_tuning(host, opcode);
2618 return -ETIMEDOUT;
2621 /* Spec does not require a delay between tuning cycles */
2622 if (host->tuning_delay > 0)
2623 mdelay(host->tuning_delay);
2625 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2626 if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) {
2627 if (ctrl & SDHCI_CTRL_TUNED_CLK)
2628 return 0; /* Success! */
2629 break;
2634 pr_info("%s: Tuning failed, falling back to fixed sampling clock\n",
2635 mmc_hostname(host->mmc));
2636 sdhci_reset_tuning(host);
2637 return -EAGAIN;
2640 int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
2642 struct sdhci_host *host = mmc_priv(mmc);
2643 int err = 0;
2644 unsigned int tuning_count = 0;
2645 bool hs400_tuning;
2647 hs400_tuning = host->flags & SDHCI_HS400_TUNING;
2649 if (host->tuning_mode == SDHCI_TUNING_MODE_1)
2650 tuning_count = host->tuning_count;
2653 * The Host Controller needs tuning in case of SDR104 and DDR50
2654 * mode, and for SDR50 mode when Use Tuning for SDR50 is set in
2655 * the Capabilities register.
2656 * If the Host Controller supports the HS200 mode then the
2657 * tuning function has to be executed.
2659 switch (host->timing) {
2660 /* HS400 tuning is done in HS200 mode */
2661 case MMC_TIMING_MMC_HS400:
2662 err = -EINVAL;
2663 goto out;
2665 case MMC_TIMING_MMC_HS200:
2667 * Periodic re-tuning for HS400 is not expected to be needed, so
2668 * disable it here.
2670 if (hs400_tuning)
2671 tuning_count = 0;
2672 break;
2674 case MMC_TIMING_UHS_SDR104:
2675 case MMC_TIMING_UHS_DDR50:
2676 break;
2678 case MMC_TIMING_UHS_SDR50:
2679 if (host->flags & SDHCI_SDR50_NEEDS_TUNING)
2680 break;
2681 /* FALLTHROUGH */
2683 default:
2684 goto out;
2687 if (host->ops->platform_execute_tuning) {
2688 err = host->ops->platform_execute_tuning(host, opcode);
2689 goto out;
2692 host->mmc->retune_period = tuning_count;
2694 if (host->tuning_delay < 0)
2695 host->tuning_delay = opcode == MMC_SEND_TUNING_BLOCK;
2697 sdhci_start_tuning(host);
2699 host->tuning_err = __sdhci_execute_tuning(host, opcode);
2701 sdhci_end_tuning(host);
2702 out:
2703 host->flags &= ~SDHCI_HS400_TUNING;
2705 return err;
2707 EXPORT_SYMBOL_GPL(sdhci_execute_tuning);
2709 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
2711 /* Host Controller v3.00 defines preset value registers */
2712 if (host->version < SDHCI_SPEC_300)
2713 return;
2716 * We only enable or disable Preset Value if they are not already
2717 * enabled or disabled respectively. Otherwise, we bail out.
2719 if (host->preset_enabled != enable) {
2720 u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2722 if (enable)
2723 ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
2724 else
2725 ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
2727 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2729 if (enable)
2730 host->flags |= SDHCI_PV_ENABLED;
2731 else
2732 host->flags &= ~SDHCI_PV_ENABLED;
2734 host->preset_enabled = enable;
2738 static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
2739 int err)
2741 struct sdhci_host *host = mmc_priv(mmc);
2742 struct mmc_data *data = mrq->data;
2744 if (data->host_cookie != COOKIE_UNMAPPED)
2745 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
2746 mmc_get_dma_dir(data));
2748 data->host_cookie = COOKIE_UNMAPPED;
2751 static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
2753 struct sdhci_host *host = mmc_priv(mmc);
2755 mrq->data->host_cookie = COOKIE_UNMAPPED;
2758 * No pre-mapping in the pre hook if we're using the bounce buffer,
2759 * for that we would need two bounce buffers since one buffer is
2760 * in flight when this is getting called.
2762 if (host->flags & SDHCI_REQ_USE_DMA && !host->bounce_buffer)
2763 sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED);
2766 static void sdhci_error_out_mrqs(struct sdhci_host *host, int err)
2768 if (host->data_cmd) {
2769 host->data_cmd->error = err;
2770 sdhci_finish_mrq(host, host->data_cmd->mrq);
2773 if (host->cmd) {
2774 host->cmd->error = err;
2775 sdhci_finish_mrq(host, host->cmd->mrq);
2779 static void sdhci_card_event(struct mmc_host *mmc)
2781 struct sdhci_host *host = mmc_priv(mmc);
2782 unsigned long flags;
2783 int present;
2785 /* First check if client has provided their own card event */
2786 if (host->ops->card_event)
2787 host->ops->card_event(host);
2789 present = mmc->ops->get_cd(mmc);
2791 spin_lock_irqsave(&host->lock, flags);
2793 /* Check sdhci_has_requests() first in case we are runtime suspended */
2794 if (sdhci_has_requests(host) && !present) {
2795 pr_err("%s: Card removed during transfer!\n",
2796 mmc_hostname(host->mmc));
2797 pr_err("%s: Resetting controller.\n",
2798 mmc_hostname(host->mmc));
2800 sdhci_do_reset(host, SDHCI_RESET_CMD);
2801 sdhci_do_reset(host, SDHCI_RESET_DATA);
2803 sdhci_error_out_mrqs(host, -ENOMEDIUM);
2806 spin_unlock_irqrestore(&host->lock, flags);
2809 static const struct mmc_host_ops sdhci_ops = {
2810 .request = sdhci_request,
2811 .post_req = sdhci_post_req,
2812 .pre_req = sdhci_pre_req,
2813 .set_ios = sdhci_set_ios,
2814 .get_cd = sdhci_get_cd,
2815 .get_ro = sdhci_get_ro,
2816 .hw_reset = sdhci_hw_reset,
2817 .enable_sdio_irq = sdhci_enable_sdio_irq,
2818 .ack_sdio_irq = sdhci_ack_sdio_irq,
2819 .start_signal_voltage_switch = sdhci_start_signal_voltage_switch,
2820 .prepare_hs400_tuning = sdhci_prepare_hs400_tuning,
2821 .execute_tuning = sdhci_execute_tuning,
2822 .card_event = sdhci_card_event,
2823 .card_busy = sdhci_card_busy,
2826 /*****************************************************************************\
2828 * Request done *
2830 \*****************************************************************************/
2832 static bool sdhci_request_done(struct sdhci_host *host)
2834 unsigned long flags;
2835 struct mmc_request *mrq;
2836 int i;
2838 spin_lock_irqsave(&host->lock, flags);
2840 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
2841 mrq = host->mrqs_done[i];
2842 if (mrq)
2843 break;
2846 if (!mrq) {
2847 spin_unlock_irqrestore(&host->lock, flags);
2848 return true;
2852 * Always unmap the data buffers if they were mapped by
2853 * sdhci_prepare_data() whenever we finish with a request.
2854 * This avoids leaking DMA mappings on error.
2856 if (host->flags & SDHCI_REQ_USE_DMA) {
2857 struct mmc_data *data = mrq->data;
2859 if (host->use_external_dma && data &&
2860 (mrq->cmd->error || data->error)) {
2861 struct dma_chan *chan = sdhci_external_dma_channel(host, data);
2863 host->mrqs_done[i] = NULL;
2864 spin_unlock_irqrestore(&host->lock, flags);
2865 dmaengine_terminate_sync(chan);
2866 spin_lock_irqsave(&host->lock, flags);
2867 sdhci_set_mrq_done(host, mrq);
2870 if (data && data->host_cookie == COOKIE_MAPPED) {
2871 if (host->bounce_buffer) {
2873 * On reads, copy the bounced data into the
2874 * sglist
2876 if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) {
2877 unsigned int length = data->bytes_xfered;
2879 if (length > host->bounce_buffer_size) {
2880 pr_err("%s: bounce buffer is %u bytes but DMA claims to have transferred %u bytes\n",
2881 mmc_hostname(host->mmc),
2882 host->bounce_buffer_size,
2883 data->bytes_xfered);
2884 /* Cap it down and continue */
2885 length = host->bounce_buffer_size;
2887 dma_sync_single_for_cpu(
2888 host->mmc->parent,
2889 host->bounce_addr,
2890 host->bounce_buffer_size,
2891 DMA_FROM_DEVICE);
2892 sg_copy_from_buffer(data->sg,
2893 data->sg_len,
2894 host->bounce_buffer,
2895 length);
2896 } else {
2897 /* No copying, just switch ownership */
2898 dma_sync_single_for_cpu(
2899 host->mmc->parent,
2900 host->bounce_addr,
2901 host->bounce_buffer_size,
2902 mmc_get_dma_dir(data));
2904 } else {
2905 /* Unmap the raw data */
2906 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
2907 data->sg_len,
2908 mmc_get_dma_dir(data));
2910 data->host_cookie = COOKIE_UNMAPPED;
2915 * The controller needs a reset of internal state machines
2916 * upon error conditions.
2918 if (sdhci_needs_reset(host, mrq)) {
2920 * Do not finish until command and data lines are available for
2921 * reset. Note there can only be one other mrq, so it cannot
2922 * also be in mrqs_done, otherwise host->cmd and host->data_cmd
2923 * would both be null.
2925 if (host->cmd || host->data_cmd) {
2926 spin_unlock_irqrestore(&host->lock, flags);
2927 return true;
2930 /* Some controllers need this kick or reset won't work here */
2931 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
2932 /* This is to force an update */
2933 host->ops->set_clock(host, host->clock);
2935 /* Spec says we should do both at the same time, but Ricoh
2936 controllers do not like that. */
2937 sdhci_do_reset(host, SDHCI_RESET_CMD);
2938 sdhci_do_reset(host, SDHCI_RESET_DATA);
2940 host->pending_reset = false;
2943 host->mrqs_done[i] = NULL;
2945 spin_unlock_irqrestore(&host->lock, flags);
2947 mmc_request_done(host->mmc, mrq);
2949 return false;
2952 static void sdhci_complete_work(struct work_struct *work)
2954 struct sdhci_host *host = container_of(work, struct sdhci_host,
2955 complete_work);
2957 while (!sdhci_request_done(host))
2961 static void sdhci_timeout_timer(struct timer_list *t)
2963 struct sdhci_host *host;
2964 unsigned long flags;
2966 host = from_timer(host, t, timer);
2968 spin_lock_irqsave(&host->lock, flags);
2970 if (host->cmd && !sdhci_data_line_cmd(host->cmd)) {
2971 pr_err("%s: Timeout waiting for hardware cmd interrupt.\n",
2972 mmc_hostname(host->mmc));
2973 sdhci_dumpregs(host);
2975 host->cmd->error = -ETIMEDOUT;
2976 sdhci_finish_mrq(host, host->cmd->mrq);
2979 spin_unlock_irqrestore(&host->lock, flags);
2982 static void sdhci_timeout_data_timer(struct timer_list *t)
2984 struct sdhci_host *host;
2985 unsigned long flags;
2987 host = from_timer(host, t, data_timer);
2989 spin_lock_irqsave(&host->lock, flags);
2991 if (host->data || host->data_cmd ||
2992 (host->cmd && sdhci_data_line_cmd(host->cmd))) {
2993 pr_err("%s: Timeout waiting for hardware interrupt.\n",
2994 mmc_hostname(host->mmc));
2995 sdhci_dumpregs(host);
2997 if (host->data) {
2998 host->data->error = -ETIMEDOUT;
2999 sdhci_finish_data(host);
3000 queue_work(host->complete_wq, &host->complete_work);
3001 } else if (host->data_cmd) {
3002 host->data_cmd->error = -ETIMEDOUT;
3003 sdhci_finish_mrq(host, host->data_cmd->mrq);
3004 } else {
3005 host->cmd->error = -ETIMEDOUT;
3006 sdhci_finish_mrq(host, host->cmd->mrq);
3010 spin_unlock_irqrestore(&host->lock, flags);
3013 /*****************************************************************************\
3015 * Interrupt handling *
3017 \*****************************************************************************/
3019 static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p)
3021 /* Handle auto-CMD12 error */
3022 if (intmask & SDHCI_INT_AUTO_CMD_ERR && host->data_cmd) {
3023 struct mmc_request *mrq = host->data_cmd->mrq;
3024 u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS);
3025 int data_err_bit = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ?
3026 SDHCI_INT_DATA_TIMEOUT :
3027 SDHCI_INT_DATA_CRC;
3029 /* Treat auto-CMD12 error the same as data error */
3030 if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) {
3031 *intmask_p |= data_err_bit;
3032 return;
3036 if (!host->cmd) {
3038 * SDHCI recovers from errors by resetting the cmd and data
3039 * circuits. Until that is done, there very well might be more
3040 * interrupts, so ignore them in that case.
3042 if (host->pending_reset)
3043 return;
3044 pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n",
3045 mmc_hostname(host->mmc), (unsigned)intmask);
3046 sdhci_dumpregs(host);
3047 return;
3050 if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC |
3051 SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) {
3052 if (intmask & SDHCI_INT_TIMEOUT)
3053 host->cmd->error = -ETIMEDOUT;
3054 else
3055 host->cmd->error = -EILSEQ;
3057 /* Treat data command CRC error the same as data CRC error */
3058 if (host->cmd->data &&
3059 (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
3060 SDHCI_INT_CRC) {
3061 host->cmd = NULL;
3062 *intmask_p |= SDHCI_INT_DATA_CRC;
3063 return;
3066 __sdhci_finish_mrq(host, host->cmd->mrq);
3067 return;
3070 /* Handle auto-CMD23 error */
3071 if (intmask & SDHCI_INT_AUTO_CMD_ERR) {
3072 struct mmc_request *mrq = host->cmd->mrq;
3073 u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS);
3074 int err = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ?
3075 -ETIMEDOUT :
3076 -EILSEQ;
3078 if (mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
3079 mrq->sbc->error = err;
3080 __sdhci_finish_mrq(host, mrq);
3081 return;
3085 if (intmask & SDHCI_INT_RESPONSE)
3086 sdhci_finish_command(host);
3089 static void sdhci_adma_show_error(struct sdhci_host *host)
3091 void *desc = host->adma_table;
3092 dma_addr_t dma = host->adma_addr;
3094 sdhci_dumpregs(host);
3096 while (true) {
3097 struct sdhci_adma2_64_desc *dma_desc = desc;
3099 if (host->flags & SDHCI_USE_64_BIT_DMA)
3100 SDHCI_DUMP("%08llx: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
3101 (unsigned long long)dma,
3102 le32_to_cpu(dma_desc->addr_hi),
3103 le32_to_cpu(dma_desc->addr_lo),
3104 le16_to_cpu(dma_desc->len),
3105 le16_to_cpu(dma_desc->cmd));
3106 else
3107 SDHCI_DUMP("%08llx: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
3108 (unsigned long long)dma,
3109 le32_to_cpu(dma_desc->addr_lo),
3110 le16_to_cpu(dma_desc->len),
3111 le16_to_cpu(dma_desc->cmd));
3113 desc += host->desc_sz;
3114 dma += host->desc_sz;
3116 if (dma_desc->cmd & cpu_to_le16(ADMA2_END))
3117 break;
3121 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
3123 u32 command;
3125 /* CMD19 generates _only_ Buffer Read Ready interrupt */
3126 if (intmask & SDHCI_INT_DATA_AVAIL) {
3127 command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
3128 if (command == MMC_SEND_TUNING_BLOCK ||
3129 command == MMC_SEND_TUNING_BLOCK_HS200) {
3130 host->tuning_done = 1;
3131 wake_up(&host->buf_ready_int);
3132 return;
3136 if (!host->data) {
3137 struct mmc_command *data_cmd = host->data_cmd;
3140 * The "data complete" interrupt is also used to
3141 * indicate that a busy state has ended. See comment
3142 * above in sdhci_cmd_irq().
3144 if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) {
3145 if (intmask & SDHCI_INT_DATA_TIMEOUT) {
3146 host->data_cmd = NULL;
3147 data_cmd->error = -ETIMEDOUT;
3148 __sdhci_finish_mrq(host, data_cmd->mrq);
3149 return;
3151 if (intmask & SDHCI_INT_DATA_END) {
3152 host->data_cmd = NULL;
3154 * Some cards handle busy-end interrupt
3155 * before the command completed, so make
3156 * sure we do things in the proper order.
3158 if (host->cmd == data_cmd)
3159 return;
3161 __sdhci_finish_mrq(host, data_cmd->mrq);
3162 return;
3167 * SDHCI recovers from errors by resetting the cmd and data
3168 * circuits. Until that is done, there very well might be more
3169 * interrupts, so ignore them in that case.
3171 if (host->pending_reset)
3172 return;
3174 pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n",
3175 mmc_hostname(host->mmc), (unsigned)intmask);
3176 sdhci_dumpregs(host);
3178 return;
3181 if (intmask & SDHCI_INT_DATA_TIMEOUT)
3182 host->data->error = -ETIMEDOUT;
3183 else if (intmask & SDHCI_INT_DATA_END_BIT)
3184 host->data->error = -EILSEQ;
3185 else if ((intmask & SDHCI_INT_DATA_CRC) &&
3186 SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
3187 != MMC_BUS_TEST_R)
3188 host->data->error = -EILSEQ;
3189 else if (intmask & SDHCI_INT_ADMA_ERROR) {
3190 pr_err("%s: ADMA error: 0x%08x\n", mmc_hostname(host->mmc),
3191 intmask);
3192 sdhci_adma_show_error(host);
3193 host->data->error = -EIO;
3194 if (host->ops->adma_workaround)
3195 host->ops->adma_workaround(host, intmask);
3198 if (host->data->error)
3199 sdhci_finish_data(host);
3200 else {
3201 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
3202 sdhci_transfer_pio(host);
3205 * We currently don't do anything fancy with DMA
3206 * boundaries, but as we can't disable the feature
3207 * we need to at least restart the transfer.
3209 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
3210 * should return a valid address to continue from, but as
3211 * some controllers are faulty, don't trust them.
3213 if (intmask & SDHCI_INT_DMA_END) {
3214 dma_addr_t dmastart, dmanow;
3216 dmastart = sdhci_sdma_address(host);
3217 dmanow = dmastart + host->data->bytes_xfered;
3219 * Force update to the next DMA block boundary.
3221 dmanow = (dmanow &
3222 ~((dma_addr_t)SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
3223 SDHCI_DEFAULT_BOUNDARY_SIZE;
3224 host->data->bytes_xfered = dmanow - dmastart;
3225 DBG("DMA base %pad, transferred 0x%06x bytes, next %pad\n",
3226 &dmastart, host->data->bytes_xfered, &dmanow);
3227 sdhci_set_sdma_addr(host, dmanow);
3230 if (intmask & SDHCI_INT_DATA_END) {
3231 if (host->cmd == host->data_cmd) {
3233 * Data managed to finish before the
3234 * command completed. Make sure we do
3235 * things in the proper order.
3237 host->data_early = 1;
3238 } else {
3239 sdhci_finish_data(host);
3245 static inline bool sdhci_defer_done(struct sdhci_host *host,
3246 struct mmc_request *mrq)
3248 struct mmc_data *data = mrq->data;
3250 return host->pending_reset ||
3251 ((host->flags & SDHCI_REQ_USE_DMA) && data &&
3252 data->host_cookie == COOKIE_MAPPED);
3255 static irqreturn_t sdhci_irq(int irq, void *dev_id)
3257 struct mmc_request *mrqs_done[SDHCI_MAX_MRQS] = {0};
3258 irqreturn_t result = IRQ_NONE;
3259 struct sdhci_host *host = dev_id;
3260 u32 intmask, mask, unexpected = 0;
3261 int max_loops = 16;
3262 int i;
3264 spin_lock(&host->lock);
3266 if (host->runtime_suspended) {
3267 spin_unlock(&host->lock);
3268 return IRQ_NONE;
3271 intmask = sdhci_readl(host, SDHCI_INT_STATUS);
3272 if (!intmask || intmask == 0xffffffff) {
3273 result = IRQ_NONE;
3274 goto out;
3277 do {
3278 DBG("IRQ status 0x%08x\n", intmask);
3280 if (host->ops->irq) {
3281 intmask = host->ops->irq(host, intmask);
3282 if (!intmask)
3283 goto cont;
3286 /* Clear selected interrupts. */
3287 mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
3288 SDHCI_INT_BUS_POWER);
3289 sdhci_writel(host, mask, SDHCI_INT_STATUS);
3291 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
3292 u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
3293 SDHCI_CARD_PRESENT;
3296 * There is a observation on i.mx esdhc. INSERT
3297 * bit will be immediately set again when it gets
3298 * cleared, if a card is inserted. We have to mask
3299 * the irq to prevent interrupt storm which will
3300 * freeze the system. And the REMOVE gets the
3301 * same situation.
3303 * More testing are needed here to ensure it works
3304 * for other platforms though.
3306 host->ier &= ~(SDHCI_INT_CARD_INSERT |
3307 SDHCI_INT_CARD_REMOVE);
3308 host->ier |= present ? SDHCI_INT_CARD_REMOVE :
3309 SDHCI_INT_CARD_INSERT;
3310 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3311 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3313 sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
3314 SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
3316 host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT |
3317 SDHCI_INT_CARD_REMOVE);
3318 result = IRQ_WAKE_THREAD;
3321 if (intmask & SDHCI_INT_CMD_MASK)
3322 sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK, &intmask);
3324 if (intmask & SDHCI_INT_DATA_MASK)
3325 sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
3327 if (intmask & SDHCI_INT_BUS_POWER)
3328 pr_err("%s: Card is consuming too much power!\n",
3329 mmc_hostname(host->mmc));
3331 if (intmask & SDHCI_INT_RETUNE)
3332 mmc_retune_needed(host->mmc);
3334 if ((intmask & SDHCI_INT_CARD_INT) &&
3335 (host->ier & SDHCI_INT_CARD_INT)) {
3336 sdhci_enable_sdio_irq_nolock(host, false);
3337 sdio_signal_irq(host->mmc);
3340 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
3341 SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
3342 SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER |
3343 SDHCI_INT_RETUNE | SDHCI_INT_CARD_INT);
3345 if (intmask) {
3346 unexpected |= intmask;
3347 sdhci_writel(host, intmask, SDHCI_INT_STATUS);
3349 cont:
3350 if (result == IRQ_NONE)
3351 result = IRQ_HANDLED;
3353 intmask = sdhci_readl(host, SDHCI_INT_STATUS);
3354 } while (intmask && --max_loops);
3356 /* Determine if mrqs can be completed immediately */
3357 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
3358 struct mmc_request *mrq = host->mrqs_done[i];
3360 if (!mrq)
3361 continue;
3363 if (sdhci_defer_done(host, mrq)) {
3364 result = IRQ_WAKE_THREAD;
3365 } else {
3366 mrqs_done[i] = mrq;
3367 host->mrqs_done[i] = NULL;
3370 out:
3371 spin_unlock(&host->lock);
3373 /* Process mrqs ready for immediate completion */
3374 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
3375 if (mrqs_done[i])
3376 mmc_request_done(host->mmc, mrqs_done[i]);
3379 if (unexpected) {
3380 pr_err("%s: Unexpected interrupt 0x%08x.\n",
3381 mmc_hostname(host->mmc), unexpected);
3382 sdhci_dumpregs(host);
3385 return result;
3388 static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
3390 struct sdhci_host *host = dev_id;
3391 unsigned long flags;
3392 u32 isr;
3394 while (!sdhci_request_done(host))
3397 spin_lock_irqsave(&host->lock, flags);
3398 isr = host->thread_isr;
3399 host->thread_isr = 0;
3400 spin_unlock_irqrestore(&host->lock, flags);
3402 if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
3403 struct mmc_host *mmc = host->mmc;
3405 mmc->ops->card_event(mmc);
3406 mmc_detect_change(mmc, msecs_to_jiffies(200));
3409 return IRQ_HANDLED;
3412 /*****************************************************************************\
3414 * Suspend/resume *
3416 \*****************************************************************************/
3418 #ifdef CONFIG_PM
3420 static bool sdhci_cd_irq_can_wakeup(struct sdhci_host *host)
3422 return mmc_card_is_removable(host->mmc) &&
3423 !(host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
3424 !mmc_can_gpio_cd(host->mmc);
3428 * To enable wakeup events, the corresponding events have to be enabled in
3429 * the Interrupt Status Enable register too. See 'Table 1-6: Wakeup Signal
3430 * Table' in the SD Host Controller Standard Specification.
3431 * It is useless to restore SDHCI_INT_ENABLE state in
3432 * sdhci_disable_irq_wakeups() since it will be set by
3433 * sdhci_enable_card_detection() or sdhci_init().
3435 static bool sdhci_enable_irq_wakeups(struct sdhci_host *host)
3437 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE |
3438 SDHCI_WAKE_ON_INT;
3439 u32 irq_val = 0;
3440 u8 wake_val = 0;
3441 u8 val;
3443 if (sdhci_cd_irq_can_wakeup(host)) {
3444 wake_val |= SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE;
3445 irq_val |= SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE;
3448 if (mmc_card_wake_sdio_irq(host->mmc)) {
3449 wake_val |= SDHCI_WAKE_ON_INT;
3450 irq_val |= SDHCI_INT_CARD_INT;
3453 if (!irq_val)
3454 return false;
3456 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
3457 val &= ~mask;
3458 val |= wake_val;
3459 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
3461 sdhci_writel(host, irq_val, SDHCI_INT_ENABLE);
3463 host->irq_wake_enabled = !enable_irq_wake(host->irq);
3465 return host->irq_wake_enabled;
3468 static void sdhci_disable_irq_wakeups(struct sdhci_host *host)
3470 u8 val;
3471 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
3472 | SDHCI_WAKE_ON_INT;
3474 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
3475 val &= ~mask;
3476 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
3478 disable_irq_wake(host->irq);
3480 host->irq_wake_enabled = false;
3483 int sdhci_suspend_host(struct sdhci_host *host)
3485 sdhci_disable_card_detection(host);
3487 mmc_retune_timer_stop(host->mmc);
3489 if (!device_may_wakeup(mmc_dev(host->mmc)) ||
3490 !sdhci_enable_irq_wakeups(host)) {
3491 host->ier = 0;
3492 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
3493 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
3494 free_irq(host->irq, host);
3497 return 0;
3500 EXPORT_SYMBOL_GPL(sdhci_suspend_host);
3502 int sdhci_resume_host(struct sdhci_host *host)
3504 struct mmc_host *mmc = host->mmc;
3505 int ret = 0;
3507 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3508 if (host->ops->enable_dma)
3509 host->ops->enable_dma(host);
3512 if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) &&
3513 (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) {
3514 /* Card keeps power but host controller does not */
3515 sdhci_init(host, 0);
3516 host->pwr = 0;
3517 host->clock = 0;
3518 mmc->ops->set_ios(mmc, &mmc->ios);
3519 } else {
3520 sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER));
3523 if (host->irq_wake_enabled) {
3524 sdhci_disable_irq_wakeups(host);
3525 } else {
3526 ret = request_threaded_irq(host->irq, sdhci_irq,
3527 sdhci_thread_irq, IRQF_SHARED,
3528 mmc_hostname(host->mmc), host);
3529 if (ret)
3530 return ret;
3533 sdhci_enable_card_detection(host);
3535 return ret;
3538 EXPORT_SYMBOL_GPL(sdhci_resume_host);
3540 int sdhci_runtime_suspend_host(struct sdhci_host *host)
3542 unsigned long flags;
3544 mmc_retune_timer_stop(host->mmc);
3546 spin_lock_irqsave(&host->lock, flags);
3547 host->ier &= SDHCI_INT_CARD_INT;
3548 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3549 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3550 spin_unlock_irqrestore(&host->lock, flags);
3552 synchronize_hardirq(host->irq);
3554 spin_lock_irqsave(&host->lock, flags);
3555 host->runtime_suspended = true;
3556 spin_unlock_irqrestore(&host->lock, flags);
3558 return 0;
3560 EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);
3562 int sdhci_runtime_resume_host(struct sdhci_host *host, int soft_reset)
3564 struct mmc_host *mmc = host->mmc;
3565 unsigned long flags;
3566 int host_flags = host->flags;
3568 if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3569 if (host->ops->enable_dma)
3570 host->ops->enable_dma(host);
3573 sdhci_init(host, soft_reset);
3575 if (mmc->ios.power_mode != MMC_POWER_UNDEFINED &&
3576 mmc->ios.power_mode != MMC_POWER_OFF) {
3577 /* Force clock and power re-program */
3578 host->pwr = 0;
3579 host->clock = 0;
3580 mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios);
3581 mmc->ops->set_ios(mmc, &mmc->ios);
3583 if ((host_flags & SDHCI_PV_ENABLED) &&
3584 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
3585 spin_lock_irqsave(&host->lock, flags);
3586 sdhci_enable_preset_value(host, true);
3587 spin_unlock_irqrestore(&host->lock, flags);
3590 if ((mmc->caps2 & MMC_CAP2_HS400_ES) &&
3591 mmc->ops->hs400_enhanced_strobe)
3592 mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios);
3595 spin_lock_irqsave(&host->lock, flags);
3597 host->runtime_suspended = false;
3599 /* Enable SDIO IRQ */
3600 if (sdio_irq_claimed(mmc))
3601 sdhci_enable_sdio_irq_nolock(host, true);
3603 /* Enable Card Detection */
3604 sdhci_enable_card_detection(host);
3606 spin_unlock_irqrestore(&host->lock, flags);
3608 return 0;
3610 EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host);
3612 #endif /* CONFIG_PM */
3614 /*****************************************************************************\
3616 * Command Queue Engine (CQE) helpers *
3618 \*****************************************************************************/
3620 void sdhci_cqe_enable(struct mmc_host *mmc)
3622 struct sdhci_host *host = mmc_priv(mmc);
3623 unsigned long flags;
3624 u8 ctrl;
3626 spin_lock_irqsave(&host->lock, flags);
3628 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
3629 ctrl &= ~SDHCI_CTRL_DMA_MASK;
3631 * Host from V4.10 supports ADMA3 DMA type.
3632 * ADMA3 performs integrated descriptor which is more suitable
3633 * for cmd queuing to fetch both command and transfer descriptors.
3635 if (host->v4_mode && (host->caps1 & SDHCI_CAN_DO_ADMA3))
3636 ctrl |= SDHCI_CTRL_ADMA3;
3637 else if (host->flags & SDHCI_USE_64_BIT_DMA)
3638 ctrl |= SDHCI_CTRL_ADMA64;
3639 else
3640 ctrl |= SDHCI_CTRL_ADMA32;
3641 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
3643 sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, 512),
3644 SDHCI_BLOCK_SIZE);
3646 /* Set maximum timeout */
3647 sdhci_set_timeout(host, NULL);
3649 host->ier = host->cqe_ier;
3651 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3652 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3654 host->cqe_on = true;
3656 pr_debug("%s: sdhci: CQE on, IRQ mask %#x, IRQ status %#x\n",
3657 mmc_hostname(mmc), host->ier,
3658 sdhci_readl(host, SDHCI_INT_STATUS));
3660 spin_unlock_irqrestore(&host->lock, flags);
3662 EXPORT_SYMBOL_GPL(sdhci_cqe_enable);
3664 void sdhci_cqe_disable(struct mmc_host *mmc, bool recovery)
3666 struct sdhci_host *host = mmc_priv(mmc);
3667 unsigned long flags;
3669 spin_lock_irqsave(&host->lock, flags);
3671 sdhci_set_default_irqs(host);
3673 host->cqe_on = false;
3675 if (recovery) {
3676 sdhci_do_reset(host, SDHCI_RESET_CMD);
3677 sdhci_do_reset(host, SDHCI_RESET_DATA);
3680 pr_debug("%s: sdhci: CQE off, IRQ mask %#x, IRQ status %#x\n",
3681 mmc_hostname(mmc), host->ier,
3682 sdhci_readl(host, SDHCI_INT_STATUS));
3684 spin_unlock_irqrestore(&host->lock, flags);
3686 EXPORT_SYMBOL_GPL(sdhci_cqe_disable);
3688 bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error,
3689 int *data_error)
3691 u32 mask;
3693 if (!host->cqe_on)
3694 return false;
3696 if (intmask & (SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC))
3697 *cmd_error = -EILSEQ;
3698 else if (intmask & SDHCI_INT_TIMEOUT)
3699 *cmd_error = -ETIMEDOUT;
3700 else
3701 *cmd_error = 0;
3703 if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC))
3704 *data_error = -EILSEQ;
3705 else if (intmask & SDHCI_INT_DATA_TIMEOUT)
3706 *data_error = -ETIMEDOUT;
3707 else if (intmask & SDHCI_INT_ADMA_ERROR)
3708 *data_error = -EIO;
3709 else
3710 *data_error = 0;
3712 /* Clear selected interrupts. */
3713 mask = intmask & host->cqe_ier;
3714 sdhci_writel(host, mask, SDHCI_INT_STATUS);
3716 if (intmask & SDHCI_INT_BUS_POWER)
3717 pr_err("%s: Card is consuming too much power!\n",
3718 mmc_hostname(host->mmc));
3720 intmask &= ~(host->cqe_ier | SDHCI_INT_ERROR);
3721 if (intmask) {
3722 sdhci_writel(host, intmask, SDHCI_INT_STATUS);
3723 pr_err("%s: CQE: Unexpected interrupt 0x%08x.\n",
3724 mmc_hostname(host->mmc), intmask);
3725 sdhci_dumpregs(host);
3728 return true;
3730 EXPORT_SYMBOL_GPL(sdhci_cqe_irq);
3732 /*****************************************************************************\
3734 * Device allocation/registration *
3736 \*****************************************************************************/
3738 struct sdhci_host *sdhci_alloc_host(struct device *dev,
3739 size_t priv_size)
3741 struct mmc_host *mmc;
3742 struct sdhci_host *host;
3744 WARN_ON(dev == NULL);
3746 mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
3747 if (!mmc)
3748 return ERR_PTR(-ENOMEM);
3750 host = mmc_priv(mmc);
3751 host->mmc = mmc;
3752 host->mmc_host_ops = sdhci_ops;
3753 mmc->ops = &host->mmc_host_ops;
3755 host->flags = SDHCI_SIGNALING_330;
3757 host->cqe_ier = SDHCI_CQE_INT_MASK;
3758 host->cqe_err_ier = SDHCI_CQE_INT_ERR_MASK;
3760 host->tuning_delay = -1;
3761 host->tuning_loop_count = MAX_TUNING_LOOP;
3763 host->sdma_boundary = SDHCI_DEFAULT_BOUNDARY_ARG;
3766 * The DMA table descriptor count is calculated as the maximum
3767 * number of segments times 2, to allow for an alignment
3768 * descriptor for each segment, plus 1 for a nop end descriptor.
3770 host->adma_table_cnt = SDHCI_MAX_SEGS * 2 + 1;
3772 return host;
3775 EXPORT_SYMBOL_GPL(sdhci_alloc_host);
3777 static int sdhci_set_dma_mask(struct sdhci_host *host)
3779 struct mmc_host *mmc = host->mmc;
3780 struct device *dev = mmc_dev(mmc);
3781 int ret = -EINVAL;
3783 if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA)
3784 host->flags &= ~SDHCI_USE_64_BIT_DMA;
3786 /* Try 64-bit mask if hardware is capable of it */
3787 if (host->flags & SDHCI_USE_64_BIT_DMA) {
3788 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
3789 if (ret) {
3790 pr_warn("%s: Failed to set 64-bit DMA mask.\n",
3791 mmc_hostname(mmc));
3792 host->flags &= ~SDHCI_USE_64_BIT_DMA;
3796 /* 32-bit mask as default & fallback */
3797 if (ret) {
3798 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
3799 if (ret)
3800 pr_warn("%s: Failed to set 32-bit DMA mask.\n",
3801 mmc_hostname(mmc));
3804 return ret;
3807 void __sdhci_read_caps(struct sdhci_host *host, const u16 *ver,
3808 const u32 *caps, const u32 *caps1)
3810 u16 v;
3811 u64 dt_caps_mask = 0;
3812 u64 dt_caps = 0;
3814 if (host->read_caps)
3815 return;
3817 host->read_caps = true;
3819 if (debug_quirks)
3820 host->quirks = debug_quirks;
3822 if (debug_quirks2)
3823 host->quirks2 = debug_quirks2;
3825 sdhci_do_reset(host, SDHCI_RESET_ALL);
3827 if (host->v4_mode)
3828 sdhci_do_enable_v4_mode(host);
3830 of_property_read_u64(mmc_dev(host->mmc)->of_node,
3831 "sdhci-caps-mask", &dt_caps_mask);
3832 of_property_read_u64(mmc_dev(host->mmc)->of_node,
3833 "sdhci-caps", &dt_caps);
3835 v = ver ? *ver : sdhci_readw(host, SDHCI_HOST_VERSION);
3836 host->version = (v & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT;
3838 if (host->quirks & SDHCI_QUIRK_MISSING_CAPS)
3839 return;
3841 if (caps) {
3842 host->caps = *caps;
3843 } else {
3844 host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
3845 host->caps &= ~lower_32_bits(dt_caps_mask);
3846 host->caps |= lower_32_bits(dt_caps);
3849 if (host->version < SDHCI_SPEC_300)
3850 return;
3852 if (caps1) {
3853 host->caps1 = *caps1;
3854 } else {
3855 host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
3856 host->caps1 &= ~upper_32_bits(dt_caps_mask);
3857 host->caps1 |= upper_32_bits(dt_caps);
3860 EXPORT_SYMBOL_GPL(__sdhci_read_caps);
3862 static void sdhci_allocate_bounce_buffer(struct sdhci_host *host)
3864 struct mmc_host *mmc = host->mmc;
3865 unsigned int max_blocks;
3866 unsigned int bounce_size;
3867 int ret;
3870 * Cap the bounce buffer at 64KB. Using a bigger bounce buffer
3871 * has diminishing returns, this is probably because SD/MMC
3872 * cards are usually optimized to handle this size of requests.
3874 bounce_size = SZ_64K;
3876 * Adjust downwards to maximum request size if this is less
3877 * than our segment size, else hammer down the maximum
3878 * request size to the maximum buffer size.
3880 if (mmc->max_req_size < bounce_size)
3881 bounce_size = mmc->max_req_size;
3882 max_blocks = bounce_size / 512;
3885 * When we just support one segment, we can get significant
3886 * speedups by the help of a bounce buffer to group scattered
3887 * reads/writes together.
3889 host->bounce_buffer = devm_kmalloc(mmc->parent,
3890 bounce_size,
3891 GFP_KERNEL);
3892 if (!host->bounce_buffer) {
3893 pr_err("%s: failed to allocate %u bytes for bounce buffer, falling back to single segments\n",
3894 mmc_hostname(mmc),
3895 bounce_size);
3897 * Exiting with zero here makes sure we proceed with
3898 * mmc->max_segs == 1.
3900 return;
3903 host->bounce_addr = dma_map_single(mmc->parent,
3904 host->bounce_buffer,
3905 bounce_size,
3906 DMA_BIDIRECTIONAL);
3907 ret = dma_mapping_error(mmc->parent, host->bounce_addr);
3908 if (ret)
3909 /* Again fall back to max_segs == 1 */
3910 return;
3911 host->bounce_buffer_size = bounce_size;
3913 /* Lie about this since we're bouncing */
3914 mmc->max_segs = max_blocks;
3915 mmc->max_seg_size = bounce_size;
3916 mmc->max_req_size = bounce_size;
3918 pr_info("%s bounce up to %u segments into one, max segment size %u bytes\n",
3919 mmc_hostname(mmc), max_blocks, bounce_size);
3922 static inline bool sdhci_can_64bit_dma(struct sdhci_host *host)
3925 * According to SD Host Controller spec v4.10, bit[27] added from
3926 * version 4.10 in Capabilities Register is used as 64-bit System
3927 * Address support for V4 mode.
3929 if (host->version >= SDHCI_SPEC_410 && host->v4_mode)
3930 return host->caps & SDHCI_CAN_64BIT_V4;
3932 return host->caps & SDHCI_CAN_64BIT;
3935 int sdhci_setup_host(struct sdhci_host *host)
3937 struct mmc_host *mmc;
3938 u32 max_current_caps;
3939 unsigned int ocr_avail;
3940 unsigned int override_timeout_clk;
3941 u32 max_clk;
3942 int ret;
3944 WARN_ON(host == NULL);
3945 if (host == NULL)
3946 return -EINVAL;
3948 mmc = host->mmc;
3951 * If there are external regulators, get them. Note this must be done
3952 * early before resetting the host and reading the capabilities so that
3953 * the host can take the appropriate action if regulators are not
3954 * available.
3956 ret = mmc_regulator_get_supply(mmc);
3957 if (ret)
3958 return ret;
3960 DBG("Version: 0x%08x | Present: 0x%08x\n",
3961 sdhci_readw(host, SDHCI_HOST_VERSION),
3962 sdhci_readl(host, SDHCI_PRESENT_STATE));
3963 DBG("Caps: 0x%08x | Caps_1: 0x%08x\n",
3964 sdhci_readl(host, SDHCI_CAPABILITIES),
3965 sdhci_readl(host, SDHCI_CAPABILITIES_1));
3967 sdhci_read_caps(host);
3969 override_timeout_clk = host->timeout_clk;
3971 if (host->version > SDHCI_SPEC_420) {
3972 pr_err("%s: Unknown controller version (%d). You may experience problems.\n",
3973 mmc_hostname(mmc), host->version);
3976 if (host->quirks & SDHCI_QUIRK_BROKEN_CQE)
3977 mmc->caps2 &= ~MMC_CAP2_CQE;
3979 if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
3980 host->flags |= SDHCI_USE_SDMA;
3981 else if (!(host->caps & SDHCI_CAN_DO_SDMA))
3982 DBG("Controller doesn't have SDMA capability\n");
3983 else
3984 host->flags |= SDHCI_USE_SDMA;
3986 if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
3987 (host->flags & SDHCI_USE_SDMA)) {
3988 DBG("Disabling DMA as it is marked broken\n");
3989 host->flags &= ~SDHCI_USE_SDMA;
3992 if ((host->version >= SDHCI_SPEC_200) &&
3993 (host->caps & SDHCI_CAN_DO_ADMA2))
3994 host->flags |= SDHCI_USE_ADMA;
3996 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
3997 (host->flags & SDHCI_USE_ADMA)) {
3998 DBG("Disabling ADMA as it is marked broken\n");
3999 host->flags &= ~SDHCI_USE_ADMA;
4002 if (sdhci_can_64bit_dma(host))
4003 host->flags |= SDHCI_USE_64_BIT_DMA;
4005 if (host->use_external_dma) {
4006 ret = sdhci_external_dma_init(host);
4007 if (ret == -EPROBE_DEFER)
4008 goto unreg;
4010 * Fall back to use the DMA/PIO integrated in standard SDHCI
4011 * instead of external DMA devices.
4013 else if (ret)
4014 sdhci_switch_external_dma(host, false);
4015 /* Disable internal DMA sources */
4016 else
4017 host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
4020 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
4021 if (host->ops->set_dma_mask)
4022 ret = host->ops->set_dma_mask(host);
4023 else
4024 ret = sdhci_set_dma_mask(host);
4026 if (!ret && host->ops->enable_dma)
4027 ret = host->ops->enable_dma(host);
4029 if (ret) {
4030 pr_warn("%s: No suitable DMA available - falling back to PIO\n",
4031 mmc_hostname(mmc));
4032 host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
4034 ret = 0;
4038 /* SDMA does not support 64-bit DMA if v4 mode not set */
4039 if ((host->flags & SDHCI_USE_64_BIT_DMA) && !host->v4_mode)
4040 host->flags &= ~SDHCI_USE_SDMA;
4042 if (host->flags & SDHCI_USE_ADMA) {
4043 dma_addr_t dma;
4044 void *buf;
4046 if (!(host->flags & SDHCI_USE_64_BIT_DMA))
4047 host->alloc_desc_sz = SDHCI_ADMA2_32_DESC_SZ;
4048 else if (!host->alloc_desc_sz)
4049 host->alloc_desc_sz = SDHCI_ADMA2_64_DESC_SZ(host);
4051 host->desc_sz = host->alloc_desc_sz;
4052 host->adma_table_sz = host->adma_table_cnt * host->desc_sz;
4054 host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
4056 * Use zalloc to zero the reserved high 32-bits of 128-bit
4057 * descriptors so that they never need to be written.
4059 buf = dma_alloc_coherent(mmc_dev(mmc),
4060 host->align_buffer_sz + host->adma_table_sz,
4061 &dma, GFP_KERNEL);
4062 if (!buf) {
4063 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
4064 mmc_hostname(mmc));
4065 host->flags &= ~SDHCI_USE_ADMA;
4066 } else if ((dma + host->align_buffer_sz) &
4067 (SDHCI_ADMA2_DESC_ALIGN - 1)) {
4068 pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
4069 mmc_hostname(mmc));
4070 host->flags &= ~SDHCI_USE_ADMA;
4071 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4072 host->adma_table_sz, buf, dma);
4073 } else {
4074 host->align_buffer = buf;
4075 host->align_addr = dma;
4077 host->adma_table = buf + host->align_buffer_sz;
4078 host->adma_addr = dma + host->align_buffer_sz;
4083 * If we use DMA, then it's up to the caller to set the DMA
4084 * mask, but PIO does not need the hw shim so we set a new
4085 * mask here in that case.
4087 if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) {
4088 host->dma_mask = DMA_BIT_MASK(64);
4089 mmc_dev(mmc)->dma_mask = &host->dma_mask;
4092 if (host->version >= SDHCI_SPEC_300)
4093 host->max_clk = (host->caps & SDHCI_CLOCK_V3_BASE_MASK)
4094 >> SDHCI_CLOCK_BASE_SHIFT;
4095 else
4096 host->max_clk = (host->caps & SDHCI_CLOCK_BASE_MASK)
4097 >> SDHCI_CLOCK_BASE_SHIFT;
4099 host->max_clk *= 1000000;
4100 if (host->max_clk == 0 || host->quirks &
4101 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
4102 if (!host->ops->get_max_clock) {
4103 pr_err("%s: Hardware doesn't specify base clock frequency.\n",
4104 mmc_hostname(mmc));
4105 ret = -ENODEV;
4106 goto undma;
4108 host->max_clk = host->ops->get_max_clock(host);
4112 * In case of Host Controller v3.00, find out whether clock
4113 * multiplier is supported.
4115 host->clk_mul = (host->caps1 & SDHCI_CLOCK_MUL_MASK) >>
4116 SDHCI_CLOCK_MUL_SHIFT;
4119 * In case the value in Clock Multiplier is 0, then programmable
4120 * clock mode is not supported, otherwise the actual clock
4121 * multiplier is one more than the value of Clock Multiplier
4122 * in the Capabilities Register.
4124 if (host->clk_mul)
4125 host->clk_mul += 1;
4128 * Set host parameters.
4130 max_clk = host->max_clk;
4132 if (host->ops->get_min_clock)
4133 mmc->f_min = host->ops->get_min_clock(host);
4134 else if (host->version >= SDHCI_SPEC_300) {
4135 if (host->clk_mul)
4136 max_clk = host->max_clk * host->clk_mul;
4138 * Divided Clock Mode minimum clock rate is always less than
4139 * Programmable Clock Mode minimum clock rate.
4141 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
4142 } else
4143 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
4145 if (!mmc->f_max || mmc->f_max > max_clk)
4146 mmc->f_max = max_clk;
4148 if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
4149 host->timeout_clk = (host->caps & SDHCI_TIMEOUT_CLK_MASK) >>
4150 SDHCI_TIMEOUT_CLK_SHIFT;
4152 if (host->caps & SDHCI_TIMEOUT_CLK_UNIT)
4153 host->timeout_clk *= 1000;
4155 if (host->timeout_clk == 0) {
4156 if (!host->ops->get_timeout_clock) {
4157 pr_err("%s: Hardware doesn't specify timeout clock frequency.\n",
4158 mmc_hostname(mmc));
4159 ret = -ENODEV;
4160 goto undma;
4163 host->timeout_clk =
4164 DIV_ROUND_UP(host->ops->get_timeout_clock(host),
4165 1000);
4168 if (override_timeout_clk)
4169 host->timeout_clk = override_timeout_clk;
4171 mmc->max_busy_timeout = host->ops->get_max_timeout_count ?
4172 host->ops->get_max_timeout_count(host) : 1 << 27;
4173 mmc->max_busy_timeout /= host->timeout_clk;
4176 if (host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT &&
4177 !host->ops->get_max_timeout_count)
4178 mmc->max_busy_timeout = 0;
4180 mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
4181 mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
4183 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
4184 host->flags |= SDHCI_AUTO_CMD12;
4187 * For v3 mode, Auto-CMD23 stuff only works in ADMA or PIO.
4188 * For v4 mode, SDMA may use Auto-CMD23 as well.
4190 if ((host->version >= SDHCI_SPEC_300) &&
4191 ((host->flags & SDHCI_USE_ADMA) ||
4192 !(host->flags & SDHCI_USE_SDMA) || host->v4_mode) &&
4193 !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) {
4194 host->flags |= SDHCI_AUTO_CMD23;
4195 DBG("Auto-CMD23 available\n");
4196 } else {
4197 DBG("Auto-CMD23 unavailable\n");
4201 * A controller may support 8-bit width, but the board itself
4202 * might not have the pins brought out. Boards that support
4203 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in
4204 * their platform code before calling sdhci_add_host(), and we
4205 * won't assume 8-bit width for hosts without that CAP.
4207 if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
4208 mmc->caps |= MMC_CAP_4_BIT_DATA;
4210 if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23)
4211 mmc->caps &= ~MMC_CAP_CMD23;
4213 if (host->caps & SDHCI_CAN_DO_HISPD)
4214 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
4216 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
4217 mmc_card_is_removable(mmc) &&
4218 mmc_gpio_get_cd(host->mmc) < 0)
4219 mmc->caps |= MMC_CAP_NEEDS_POLL;
4221 if (!IS_ERR(mmc->supply.vqmmc)) {
4222 ret = regulator_enable(mmc->supply.vqmmc);
4224 /* If vqmmc provides no 1.8V signalling, then there's no UHS */
4225 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000,
4226 1950000))
4227 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 |
4228 SDHCI_SUPPORT_SDR50 |
4229 SDHCI_SUPPORT_DDR50);
4231 /* In eMMC case vqmmc might be a fixed 1.8V regulator */
4232 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 2700000,
4233 3600000))
4234 host->flags &= ~SDHCI_SIGNALING_330;
4236 if (ret) {
4237 pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
4238 mmc_hostname(mmc), ret);
4239 mmc->supply.vqmmc = ERR_PTR(-EINVAL);
4243 if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) {
4244 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
4245 SDHCI_SUPPORT_DDR50);
4247 * The SDHCI controller in a SoC might support HS200/HS400
4248 * (indicated using mmc-hs200-1_8v/mmc-hs400-1_8v dt property),
4249 * but if the board is modeled such that the IO lines are not
4250 * connected to 1.8v then HS200/HS400 cannot be supported.
4251 * Disable HS200/HS400 if the board does not have 1.8v connected
4252 * to the IO lines. (Applicable for other modes in 1.8v)
4254 mmc->caps2 &= ~(MMC_CAP2_HSX00_1_8V | MMC_CAP2_HS400_ES);
4255 mmc->caps &= ~(MMC_CAP_1_8V_DDR | MMC_CAP_UHS);
4258 /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
4259 if (host->caps1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
4260 SDHCI_SUPPORT_DDR50))
4261 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
4263 /* SDR104 supports also implies SDR50 support */
4264 if (host->caps1 & SDHCI_SUPPORT_SDR104) {
4265 mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
4266 /* SD3.0: SDR104 is supported so (for eMMC) the caps2
4267 * field can be promoted to support HS200.
4269 if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200))
4270 mmc->caps2 |= MMC_CAP2_HS200;
4271 } else if (host->caps1 & SDHCI_SUPPORT_SDR50) {
4272 mmc->caps |= MMC_CAP_UHS_SDR50;
4275 if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 &&
4276 (host->caps1 & SDHCI_SUPPORT_HS400))
4277 mmc->caps2 |= MMC_CAP2_HS400;
4279 if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) &&
4280 (IS_ERR(mmc->supply.vqmmc) ||
4281 !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000,
4282 1300000)))
4283 mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V;
4285 if ((host->caps1 & SDHCI_SUPPORT_DDR50) &&
4286 !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50))
4287 mmc->caps |= MMC_CAP_UHS_DDR50;
4289 /* Does the host need tuning for SDR50? */
4290 if (host->caps1 & SDHCI_USE_SDR50_TUNING)
4291 host->flags |= SDHCI_SDR50_NEEDS_TUNING;
4293 /* Driver Type(s) (A, C, D) supported by the host */
4294 if (host->caps1 & SDHCI_DRIVER_TYPE_A)
4295 mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
4296 if (host->caps1 & SDHCI_DRIVER_TYPE_C)
4297 mmc->caps |= MMC_CAP_DRIVER_TYPE_C;
4298 if (host->caps1 & SDHCI_DRIVER_TYPE_D)
4299 mmc->caps |= MMC_CAP_DRIVER_TYPE_D;
4301 /* Initial value for re-tuning timer count */
4302 host->tuning_count = (host->caps1 & SDHCI_RETUNING_TIMER_COUNT_MASK) >>
4303 SDHCI_RETUNING_TIMER_COUNT_SHIFT;
4306 * In case Re-tuning Timer is not disabled, the actual value of
4307 * re-tuning timer will be 2 ^ (n - 1).
4309 if (host->tuning_count)
4310 host->tuning_count = 1 << (host->tuning_count - 1);
4312 /* Re-tuning mode supported by the Host Controller */
4313 host->tuning_mode = (host->caps1 & SDHCI_RETUNING_MODE_MASK) >>
4314 SDHCI_RETUNING_MODE_SHIFT;
4316 ocr_avail = 0;
4319 * According to SD Host Controller spec v3.00, if the Host System
4320 * can afford more than 150mA, Host Driver should set XPC to 1. Also
4321 * the value is meaningful only if Voltage Support in the Capabilities
4322 * register is set. The actual current value is 4 times the register
4323 * value.
4325 max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
4326 if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) {
4327 int curr = regulator_get_current_limit(mmc->supply.vmmc);
4328 if (curr > 0) {
4330 /* convert to SDHCI_MAX_CURRENT format */
4331 curr = curr/1000; /* convert to mA */
4332 curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER;
4334 curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT);
4335 max_current_caps =
4336 (curr << SDHCI_MAX_CURRENT_330_SHIFT) |
4337 (curr << SDHCI_MAX_CURRENT_300_SHIFT) |
4338 (curr << SDHCI_MAX_CURRENT_180_SHIFT);
4342 if (host->caps & SDHCI_CAN_VDD_330) {
4343 ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
4345 mmc->max_current_330 = ((max_current_caps &
4346 SDHCI_MAX_CURRENT_330_MASK) >>
4347 SDHCI_MAX_CURRENT_330_SHIFT) *
4348 SDHCI_MAX_CURRENT_MULTIPLIER;
4350 if (host->caps & SDHCI_CAN_VDD_300) {
4351 ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
4353 mmc->max_current_300 = ((max_current_caps &
4354 SDHCI_MAX_CURRENT_300_MASK) >>
4355 SDHCI_MAX_CURRENT_300_SHIFT) *
4356 SDHCI_MAX_CURRENT_MULTIPLIER;
4358 if (host->caps & SDHCI_CAN_VDD_180) {
4359 ocr_avail |= MMC_VDD_165_195;
4361 mmc->max_current_180 = ((max_current_caps &
4362 SDHCI_MAX_CURRENT_180_MASK) >>
4363 SDHCI_MAX_CURRENT_180_SHIFT) *
4364 SDHCI_MAX_CURRENT_MULTIPLIER;
4367 /* If OCR set by host, use it instead. */
4368 if (host->ocr_mask)
4369 ocr_avail = host->ocr_mask;
4371 /* If OCR set by external regulators, give it highest prio. */
4372 if (mmc->ocr_avail)
4373 ocr_avail = mmc->ocr_avail;
4375 mmc->ocr_avail = ocr_avail;
4376 mmc->ocr_avail_sdio = ocr_avail;
4377 if (host->ocr_avail_sdio)
4378 mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
4379 mmc->ocr_avail_sd = ocr_avail;
4380 if (host->ocr_avail_sd)
4381 mmc->ocr_avail_sd &= host->ocr_avail_sd;
4382 else /* normal SD controllers don't support 1.8V */
4383 mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
4384 mmc->ocr_avail_mmc = ocr_avail;
4385 if (host->ocr_avail_mmc)
4386 mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
4388 if (mmc->ocr_avail == 0) {
4389 pr_err("%s: Hardware doesn't report any support voltages.\n",
4390 mmc_hostname(mmc));
4391 ret = -ENODEV;
4392 goto unreg;
4395 if ((mmc->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
4396 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
4397 MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR)) ||
4398 (mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V)))
4399 host->flags |= SDHCI_SIGNALING_180;
4401 if (mmc->caps2 & MMC_CAP2_HSX00_1_2V)
4402 host->flags |= SDHCI_SIGNALING_120;
4404 spin_lock_init(&host->lock);
4407 * Maximum number of sectors in one transfer. Limited by SDMA boundary
4408 * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
4409 * is less anyway.
4411 mmc->max_req_size = 524288;
4414 * Maximum number of segments. Depends on if the hardware
4415 * can do scatter/gather or not.
4417 if (host->flags & SDHCI_USE_ADMA) {
4418 mmc->max_segs = SDHCI_MAX_SEGS;
4419 } else if (host->flags & SDHCI_USE_SDMA) {
4420 mmc->max_segs = 1;
4421 if (swiotlb_max_segment()) {
4422 unsigned int max_req_size = (1 << IO_TLB_SHIFT) *
4423 IO_TLB_SEGSIZE;
4424 mmc->max_req_size = min(mmc->max_req_size,
4425 max_req_size);
4427 } else { /* PIO */
4428 mmc->max_segs = SDHCI_MAX_SEGS;
4432 * Maximum segment size. Could be one segment with the maximum number
4433 * of bytes. When doing hardware scatter/gather, each entry cannot
4434 * be larger than 64 KiB though.
4436 if (host->flags & SDHCI_USE_ADMA) {
4437 if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
4438 mmc->max_seg_size = 65535;
4439 else
4440 mmc->max_seg_size = 65536;
4441 } else {
4442 mmc->max_seg_size = mmc->max_req_size;
4446 * Maximum block size. This varies from controller to controller and
4447 * is specified in the capabilities register.
4449 if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
4450 mmc->max_blk_size = 2;
4451 } else {
4452 mmc->max_blk_size = (host->caps & SDHCI_MAX_BLOCK_MASK) >>
4453 SDHCI_MAX_BLOCK_SHIFT;
4454 if (mmc->max_blk_size >= 3) {
4455 pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n",
4456 mmc_hostname(mmc));
4457 mmc->max_blk_size = 0;
4461 mmc->max_blk_size = 512 << mmc->max_blk_size;
4464 * Maximum block count.
4466 mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
4468 if (mmc->max_segs == 1)
4469 /* This may alter mmc->*_blk_* parameters */
4470 sdhci_allocate_bounce_buffer(host);
4472 return 0;
4474 unreg:
4475 if (!IS_ERR(mmc->supply.vqmmc))
4476 regulator_disable(mmc->supply.vqmmc);
4477 undma:
4478 if (host->align_buffer)
4479 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4480 host->adma_table_sz, host->align_buffer,
4481 host->align_addr);
4482 host->adma_table = NULL;
4483 host->align_buffer = NULL;
4485 return ret;
4487 EXPORT_SYMBOL_GPL(sdhci_setup_host);
4489 void sdhci_cleanup_host(struct sdhci_host *host)
4491 struct mmc_host *mmc = host->mmc;
4493 if (!IS_ERR(mmc->supply.vqmmc))
4494 regulator_disable(mmc->supply.vqmmc);
4496 if (host->align_buffer)
4497 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4498 host->adma_table_sz, host->align_buffer,
4499 host->align_addr);
4501 if (host->use_external_dma)
4502 sdhci_external_dma_release(host);
4504 host->adma_table = NULL;
4505 host->align_buffer = NULL;
4507 EXPORT_SYMBOL_GPL(sdhci_cleanup_host);
4509 int __sdhci_add_host(struct sdhci_host *host)
4511 unsigned int flags = WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI;
4512 struct mmc_host *mmc = host->mmc;
4513 int ret;
4515 host->complete_wq = alloc_workqueue("sdhci", flags, 0);
4516 if (!host->complete_wq)
4517 return -ENOMEM;
4519 INIT_WORK(&host->complete_work, sdhci_complete_work);
4521 timer_setup(&host->timer, sdhci_timeout_timer, 0);
4522 timer_setup(&host->data_timer, sdhci_timeout_data_timer, 0);
4524 init_waitqueue_head(&host->buf_ready_int);
4526 sdhci_init(host, 0);
4528 ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
4529 IRQF_SHARED, mmc_hostname(mmc), host);
4530 if (ret) {
4531 pr_err("%s: Failed to request IRQ %d: %d\n",
4532 mmc_hostname(mmc), host->irq, ret);
4533 goto unwq;
4536 ret = sdhci_led_register(host);
4537 if (ret) {
4538 pr_err("%s: Failed to register LED device: %d\n",
4539 mmc_hostname(mmc), ret);
4540 goto unirq;
4543 ret = mmc_add_host(mmc);
4544 if (ret)
4545 goto unled;
4547 pr_info("%s: SDHCI controller on %s [%s] using %s\n",
4548 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
4549 host->use_external_dma ? "External DMA" :
4550 (host->flags & SDHCI_USE_ADMA) ?
4551 (host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" :
4552 (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
4554 sdhci_enable_card_detection(host);
4556 return 0;
4558 unled:
4559 sdhci_led_unregister(host);
4560 unirq:
4561 sdhci_do_reset(host, SDHCI_RESET_ALL);
4562 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
4563 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
4564 free_irq(host->irq, host);
4565 unwq:
4566 destroy_workqueue(host->complete_wq);
4568 return ret;
4570 EXPORT_SYMBOL_GPL(__sdhci_add_host);
4572 int sdhci_add_host(struct sdhci_host *host)
4574 int ret;
4576 ret = sdhci_setup_host(host);
4577 if (ret)
4578 return ret;
4580 ret = __sdhci_add_host(host);
4581 if (ret)
4582 goto cleanup;
4584 return 0;
4586 cleanup:
4587 sdhci_cleanup_host(host);
4589 return ret;
4591 EXPORT_SYMBOL_GPL(sdhci_add_host);
4593 void sdhci_remove_host(struct sdhci_host *host, int dead)
4595 struct mmc_host *mmc = host->mmc;
4596 unsigned long flags;
4598 if (dead) {
4599 spin_lock_irqsave(&host->lock, flags);
4601 host->flags |= SDHCI_DEVICE_DEAD;
4603 if (sdhci_has_requests(host)) {
4604 pr_err("%s: Controller removed during "
4605 " transfer!\n", mmc_hostname(mmc));
4606 sdhci_error_out_mrqs(host, -ENOMEDIUM);
4609 spin_unlock_irqrestore(&host->lock, flags);
4612 sdhci_disable_card_detection(host);
4614 mmc_remove_host(mmc);
4616 sdhci_led_unregister(host);
4618 if (!dead)
4619 sdhci_do_reset(host, SDHCI_RESET_ALL);
4621 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
4622 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
4623 free_irq(host->irq, host);
4625 del_timer_sync(&host->timer);
4626 del_timer_sync(&host->data_timer);
4628 destroy_workqueue(host->complete_wq);
4630 if (!IS_ERR(mmc->supply.vqmmc))
4631 regulator_disable(mmc->supply.vqmmc);
4633 if (host->align_buffer)
4634 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4635 host->adma_table_sz, host->align_buffer,
4636 host->align_addr);
4638 if (host->use_external_dma)
4639 sdhci_external_dma_release(host);
4641 host->adma_table = NULL;
4642 host->align_buffer = NULL;
4645 EXPORT_SYMBOL_GPL(sdhci_remove_host);
4647 void sdhci_free_host(struct sdhci_host *host)
4649 mmc_free_host(host->mmc);
4652 EXPORT_SYMBOL_GPL(sdhci_free_host);
4654 /*****************************************************************************\
4656 * Driver init/exit *
4658 \*****************************************************************************/
4660 static int __init sdhci_drv_init(void)
4662 pr_info(DRIVER_NAME
4663 ": Secure Digital Host Controller Interface driver\n");
4664 pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
4666 return 0;
4669 static void __exit sdhci_drv_exit(void)
4673 module_init(sdhci_drv_init);
4674 module_exit(sdhci_drv_exit);
4676 module_param(debug_quirks, uint, 0444);
4677 module_param(debug_quirks2, uint, 0444);
4679 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
4680 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
4681 MODULE_LICENSE("GPL");
4683 MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
4684 MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks.");