Merge tag 'block-5.11-2021-01-10' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / drivers / mmc / host / sdhci-of-esdhc.c
blobab5ab969f711deecdbca38e27c758b2f2b416bd4
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Freescale eSDHC controller driver.
5 * Copyright (c) 2007, 2010, 2012 Freescale Semiconductor, Inc.
6 * Copyright (c) 2009 MontaVista Software, Inc.
7 * Copyright 2020 NXP
9 * Authors: Xiaobo Xie <X.Xie@freescale.com>
10 * Anton Vorontsov <avorontsov@ru.mvista.com>
13 #include <linux/err.h>
14 #include <linux/io.h>
15 #include <linux/of.h>
16 #include <linux/of_address.h>
17 #include <linux/delay.h>
18 #include <linux/module.h>
19 #include <linux/sys_soc.h>
20 #include <linux/clk.h>
21 #include <linux/ktime.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/iopoll.h>
24 #include <linux/mmc/host.h>
25 #include <linux/mmc/mmc.h>
26 #include "sdhci-pltfm.h"
27 #include "sdhci-esdhc.h"
29 #define VENDOR_V_22 0x12
30 #define VENDOR_V_23 0x13
32 #define MMC_TIMING_NUM (MMC_TIMING_MMC_HS400 + 1)
34 struct esdhc_clk_fixup {
35 const unsigned int sd_dflt_max_clk;
36 const unsigned int max_clk[MMC_TIMING_NUM];
39 static const struct esdhc_clk_fixup ls1021a_esdhc_clk = {
40 .sd_dflt_max_clk = 25000000,
41 .max_clk[MMC_TIMING_MMC_HS] = 46500000,
42 .max_clk[MMC_TIMING_SD_HS] = 46500000,
45 static const struct esdhc_clk_fixup ls1046a_esdhc_clk = {
46 .sd_dflt_max_clk = 25000000,
47 .max_clk[MMC_TIMING_UHS_SDR104] = 167000000,
48 .max_clk[MMC_TIMING_MMC_HS200] = 167000000,
51 static const struct esdhc_clk_fixup ls1012a_esdhc_clk = {
52 .sd_dflt_max_clk = 25000000,
53 .max_clk[MMC_TIMING_UHS_SDR104] = 125000000,
54 .max_clk[MMC_TIMING_MMC_HS200] = 125000000,
57 static const struct esdhc_clk_fixup p1010_esdhc_clk = {
58 .sd_dflt_max_clk = 20000000,
59 .max_clk[MMC_TIMING_LEGACY] = 20000000,
60 .max_clk[MMC_TIMING_MMC_HS] = 42000000,
61 .max_clk[MMC_TIMING_SD_HS] = 40000000,
64 static const struct of_device_id sdhci_esdhc_of_match[] = {
65 { .compatible = "fsl,ls1021a-esdhc", .data = &ls1021a_esdhc_clk},
66 { .compatible = "fsl,ls1046a-esdhc", .data = &ls1046a_esdhc_clk},
67 { .compatible = "fsl,ls1012a-esdhc", .data = &ls1012a_esdhc_clk},
68 { .compatible = "fsl,p1010-esdhc", .data = &p1010_esdhc_clk},
69 { .compatible = "fsl,mpc8379-esdhc" },
70 { .compatible = "fsl,mpc8536-esdhc" },
71 { .compatible = "fsl,esdhc" },
72 { }
74 MODULE_DEVICE_TABLE(of, sdhci_esdhc_of_match);
76 struct sdhci_esdhc {
77 u8 vendor_ver;
78 u8 spec_ver;
79 bool quirk_incorrect_hostver;
80 bool quirk_limited_clk_division;
81 bool quirk_unreliable_pulse_detection;
82 bool quirk_tuning_erratum_type1;
83 bool quirk_tuning_erratum_type2;
84 bool quirk_ignore_data_inhibit;
85 bool quirk_delay_before_data_reset;
86 bool quirk_trans_complete_erratum;
87 bool in_sw_tuning;
88 unsigned int peripheral_clock;
89 const struct esdhc_clk_fixup *clk_fixup;
90 u32 div_ratio;
93 /**
94 * esdhc_read*_fixup - Fixup the value read from incompatible eSDHC register
95 * to make it compatible with SD spec.
97 * @host: pointer to sdhci_host
98 * @spec_reg: SD spec register address
99 * @value: 32bit eSDHC register value on spec_reg address
101 * In SD spec, there are 8/16/32/64 bits registers, while all of eSDHC
102 * registers are 32 bits. There are differences in register size, register
103 * address, register function, bit position and function between eSDHC spec
104 * and SD spec.
106 * Return a fixed up register value
108 static u32 esdhc_readl_fixup(struct sdhci_host *host,
109 int spec_reg, u32 value)
111 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
112 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
113 u32 ret;
116 * The bit of ADMA flag in eSDHC is not compatible with standard
117 * SDHC register, so set fake flag SDHCI_CAN_DO_ADMA2 when ADMA is
118 * supported by eSDHC.
119 * And for many FSL eSDHC controller, the reset value of field
120 * SDHCI_CAN_DO_ADMA1 is 1, but some of them can't support ADMA,
121 * only these vendor version is greater than 2.2/0x12 support ADMA.
123 if ((spec_reg == SDHCI_CAPABILITIES) && (value & SDHCI_CAN_DO_ADMA1)) {
124 if (esdhc->vendor_ver > VENDOR_V_22) {
125 ret = value | SDHCI_CAN_DO_ADMA2;
126 return ret;
130 * The DAT[3:0] line signal levels and the CMD line signal level are
131 * not compatible with standard SDHC register. The line signal levels
132 * DAT[7:0] are at bits 31:24 and the command line signal level is at
133 * bit 23. All other bits are the same as in the standard SDHC
134 * register.
136 if (spec_reg == SDHCI_PRESENT_STATE) {
137 ret = value & 0x000fffff;
138 ret |= (value >> 4) & SDHCI_DATA_LVL_MASK;
139 ret |= (value << 1) & SDHCI_CMD_LVL;
140 return ret;
144 * DTS properties of mmc host are used to enable each speed mode
145 * according to soc and board capability. So clean up
146 * SDR50/SDR104/DDR50 support bits here.
148 if (spec_reg == SDHCI_CAPABILITIES_1) {
149 ret = value & ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 |
150 SDHCI_SUPPORT_DDR50);
151 return ret;
155 * Some controllers have unreliable Data Line Active
156 * bit for commands with busy signal. This affects
157 * Command Inhibit (data) bit. Just ignore it since
158 * MMC core driver has already polled card status
159 * with CMD13 after any command with busy siganl.
161 if ((spec_reg == SDHCI_PRESENT_STATE) &&
162 (esdhc->quirk_ignore_data_inhibit == true)) {
163 ret = value & ~SDHCI_DATA_INHIBIT;
164 return ret;
167 ret = value;
168 return ret;
171 static u16 esdhc_readw_fixup(struct sdhci_host *host,
172 int spec_reg, u32 value)
174 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
175 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
176 u16 ret;
177 int shift = (spec_reg & 0x2) * 8;
179 if (spec_reg == SDHCI_TRANSFER_MODE)
180 return pltfm_host->xfer_mode_shadow;
182 if (spec_reg == SDHCI_HOST_VERSION)
183 ret = value & 0xffff;
184 else
185 ret = (value >> shift) & 0xffff;
186 /* Workaround for T4240-R1.0-R2.0 eSDHC which has incorrect
187 * vendor version and spec version information.
189 if ((spec_reg == SDHCI_HOST_VERSION) &&
190 (esdhc->quirk_incorrect_hostver))
191 ret = (VENDOR_V_23 << SDHCI_VENDOR_VER_SHIFT) | SDHCI_SPEC_200;
192 return ret;
195 static u8 esdhc_readb_fixup(struct sdhci_host *host,
196 int spec_reg, u32 value)
198 u8 ret;
199 u8 dma_bits;
200 int shift = (spec_reg & 0x3) * 8;
202 ret = (value >> shift) & 0xff;
205 * "DMA select" locates at offset 0x28 in SD specification, but on
206 * P5020 or P3041, it locates at 0x29.
208 if (spec_reg == SDHCI_HOST_CONTROL) {
209 /* DMA select is 22,23 bits in Protocol Control Register */
210 dma_bits = (value >> 5) & SDHCI_CTRL_DMA_MASK;
211 /* fixup the result */
212 ret &= ~SDHCI_CTRL_DMA_MASK;
213 ret |= dma_bits;
215 return ret;
219 * esdhc_write*_fixup - Fixup the SD spec register value so that it could be
220 * written into eSDHC register.
222 * @host: pointer to sdhci_host
223 * @spec_reg: SD spec register address
224 * @value: 8/16/32bit SD spec register value that would be written
225 * @old_value: 32bit eSDHC register value on spec_reg address
227 * In SD spec, there are 8/16/32/64 bits registers, while all of eSDHC
228 * registers are 32 bits. There are differences in register size, register
229 * address, register function, bit position and function between eSDHC spec
230 * and SD spec.
232 * Return a fixed up register value
234 static u32 esdhc_writel_fixup(struct sdhci_host *host,
235 int spec_reg, u32 value, u32 old_value)
237 u32 ret;
240 * Enabling IRQSTATEN[BGESEN] is just to set IRQSTAT[BGE]
241 * when SYSCTL[RSTD] is set for some special operations.
242 * No any impact on other operation.
244 if (spec_reg == SDHCI_INT_ENABLE)
245 ret = value | SDHCI_INT_BLK_GAP;
246 else
247 ret = value;
249 return ret;
252 static u32 esdhc_writew_fixup(struct sdhci_host *host,
253 int spec_reg, u16 value, u32 old_value)
255 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
256 int shift = (spec_reg & 0x2) * 8;
257 u32 ret;
259 switch (spec_reg) {
260 case SDHCI_TRANSFER_MODE:
262 * Postpone this write, we must do it together with a
263 * command write that is down below. Return old value.
265 pltfm_host->xfer_mode_shadow = value;
266 return old_value;
267 case SDHCI_COMMAND:
268 ret = (value << 16) | pltfm_host->xfer_mode_shadow;
269 return ret;
272 ret = old_value & (~(0xffff << shift));
273 ret |= (value << shift);
275 if (spec_reg == SDHCI_BLOCK_SIZE) {
277 * Two last DMA bits are reserved, and first one is used for
278 * non-standard blksz of 4096 bytes that we don't support
279 * yet. So clear the DMA boundary bits.
281 ret &= (~SDHCI_MAKE_BLKSZ(0x7, 0));
283 return ret;
286 static u32 esdhc_writeb_fixup(struct sdhci_host *host,
287 int spec_reg, u8 value, u32 old_value)
289 u32 ret;
290 u32 dma_bits;
291 u8 tmp;
292 int shift = (spec_reg & 0x3) * 8;
295 * eSDHC doesn't have a standard power control register, so we do
296 * nothing here to avoid incorrect operation.
298 if (spec_reg == SDHCI_POWER_CONTROL)
299 return old_value;
301 * "DMA select" location is offset 0x28 in SD specification, but on
302 * P5020 or P3041, it's located at 0x29.
304 if (spec_reg == SDHCI_HOST_CONTROL) {
306 * If host control register is not standard, exit
307 * this function
309 if (host->quirks2 & SDHCI_QUIRK2_BROKEN_HOST_CONTROL)
310 return old_value;
312 /* DMA select is 22,23 bits in Protocol Control Register */
313 dma_bits = (value & SDHCI_CTRL_DMA_MASK) << 5;
314 ret = (old_value & (~(SDHCI_CTRL_DMA_MASK << 5))) | dma_bits;
315 tmp = (value & (~SDHCI_CTRL_DMA_MASK)) |
316 (old_value & SDHCI_CTRL_DMA_MASK);
317 ret = (ret & (~0xff)) | tmp;
319 /* Prevent SDHCI core from writing reserved bits (e.g. HISPD) */
320 ret &= ~ESDHC_HOST_CONTROL_RES;
321 return ret;
324 ret = (old_value & (~(0xff << shift))) | (value << shift);
325 return ret;
328 static u32 esdhc_be_readl(struct sdhci_host *host, int reg)
330 u32 ret;
331 u32 value;
333 if (reg == SDHCI_CAPABILITIES_1)
334 value = ioread32be(host->ioaddr + ESDHC_CAPABILITIES_1);
335 else
336 value = ioread32be(host->ioaddr + reg);
338 ret = esdhc_readl_fixup(host, reg, value);
340 return ret;
343 static u32 esdhc_le_readl(struct sdhci_host *host, int reg)
345 u32 ret;
346 u32 value;
348 if (reg == SDHCI_CAPABILITIES_1)
349 value = ioread32(host->ioaddr + ESDHC_CAPABILITIES_1);
350 else
351 value = ioread32(host->ioaddr + reg);
353 ret = esdhc_readl_fixup(host, reg, value);
355 return ret;
358 static u16 esdhc_be_readw(struct sdhci_host *host, int reg)
360 u16 ret;
361 u32 value;
362 int base = reg & ~0x3;
364 value = ioread32be(host->ioaddr + base);
365 ret = esdhc_readw_fixup(host, reg, value);
366 return ret;
369 static u16 esdhc_le_readw(struct sdhci_host *host, int reg)
371 u16 ret;
372 u32 value;
373 int base = reg & ~0x3;
375 value = ioread32(host->ioaddr + base);
376 ret = esdhc_readw_fixup(host, reg, value);
377 return ret;
380 static u8 esdhc_be_readb(struct sdhci_host *host, int reg)
382 u8 ret;
383 u32 value;
384 int base = reg & ~0x3;
386 value = ioread32be(host->ioaddr + base);
387 ret = esdhc_readb_fixup(host, reg, value);
388 return ret;
391 static u8 esdhc_le_readb(struct sdhci_host *host, int reg)
393 u8 ret;
394 u32 value;
395 int base = reg & ~0x3;
397 value = ioread32(host->ioaddr + base);
398 ret = esdhc_readb_fixup(host, reg, value);
399 return ret;
402 static void esdhc_be_writel(struct sdhci_host *host, u32 val, int reg)
404 u32 value;
406 value = esdhc_writel_fixup(host, reg, val, 0);
407 iowrite32be(value, host->ioaddr + reg);
410 static void esdhc_le_writel(struct sdhci_host *host, u32 val, int reg)
412 u32 value;
414 value = esdhc_writel_fixup(host, reg, val, 0);
415 iowrite32(value, host->ioaddr + reg);
418 static void esdhc_be_writew(struct sdhci_host *host, u16 val, int reg)
420 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
421 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
422 int base = reg & ~0x3;
423 u32 value;
424 u32 ret;
426 value = ioread32be(host->ioaddr + base);
427 ret = esdhc_writew_fixup(host, reg, val, value);
428 if (reg != SDHCI_TRANSFER_MODE)
429 iowrite32be(ret, host->ioaddr + base);
431 /* Starting SW tuning requires ESDHC_SMPCLKSEL to be set
432 * 1us later after ESDHC_EXTN is set.
434 if (base == ESDHC_SYSTEM_CONTROL_2) {
435 if (!(value & ESDHC_EXTN) && (ret & ESDHC_EXTN) &&
436 esdhc->in_sw_tuning) {
437 udelay(1);
438 ret |= ESDHC_SMPCLKSEL;
439 iowrite32be(ret, host->ioaddr + base);
444 static void esdhc_le_writew(struct sdhci_host *host, u16 val, int reg)
446 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
447 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
448 int base = reg & ~0x3;
449 u32 value;
450 u32 ret;
452 value = ioread32(host->ioaddr + base);
453 ret = esdhc_writew_fixup(host, reg, val, value);
454 if (reg != SDHCI_TRANSFER_MODE)
455 iowrite32(ret, host->ioaddr + base);
457 /* Starting SW tuning requires ESDHC_SMPCLKSEL to be set
458 * 1us later after ESDHC_EXTN is set.
460 if (base == ESDHC_SYSTEM_CONTROL_2) {
461 if (!(value & ESDHC_EXTN) && (ret & ESDHC_EXTN) &&
462 esdhc->in_sw_tuning) {
463 udelay(1);
464 ret |= ESDHC_SMPCLKSEL;
465 iowrite32(ret, host->ioaddr + base);
470 static void esdhc_be_writeb(struct sdhci_host *host, u8 val, int reg)
472 int base = reg & ~0x3;
473 u32 value;
474 u32 ret;
476 value = ioread32be(host->ioaddr + base);
477 ret = esdhc_writeb_fixup(host, reg, val, value);
478 iowrite32be(ret, host->ioaddr + base);
481 static void esdhc_le_writeb(struct sdhci_host *host, u8 val, int reg)
483 int base = reg & ~0x3;
484 u32 value;
485 u32 ret;
487 value = ioread32(host->ioaddr + base);
488 ret = esdhc_writeb_fixup(host, reg, val, value);
489 iowrite32(ret, host->ioaddr + base);
493 * For Abort or Suspend after Stop at Block Gap, ignore the ADMA
494 * error(IRQSTAT[ADMAE]) if both Transfer Complete(IRQSTAT[TC])
495 * and Block Gap Event(IRQSTAT[BGE]) are also set.
496 * For Continue, apply soft reset for data(SYSCTL[RSTD]);
497 * and re-issue the entire read transaction from beginning.
499 static void esdhc_of_adma_workaround(struct sdhci_host *host, u32 intmask)
501 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
502 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
503 bool applicable;
504 dma_addr_t dmastart;
505 dma_addr_t dmanow;
507 applicable = (intmask & SDHCI_INT_DATA_END) &&
508 (intmask & SDHCI_INT_BLK_GAP) &&
509 (esdhc->vendor_ver == VENDOR_V_23);
510 if (!applicable)
511 return;
513 host->data->error = 0;
514 dmastart = sg_dma_address(host->data->sg);
515 dmanow = dmastart + host->data->bytes_xfered;
517 * Force update to the next DMA block boundary.
519 dmanow = (dmanow & ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
520 SDHCI_DEFAULT_BOUNDARY_SIZE;
521 host->data->bytes_xfered = dmanow - dmastart;
522 sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
525 static int esdhc_of_enable_dma(struct sdhci_host *host)
527 u32 value;
528 struct device *dev = mmc_dev(host->mmc);
530 if (of_device_is_compatible(dev->of_node, "fsl,ls1043a-esdhc") ||
531 of_device_is_compatible(dev->of_node, "fsl,ls1046a-esdhc"))
532 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
534 value = sdhci_readl(host, ESDHC_DMA_SYSCTL);
536 if (of_dma_is_coherent(dev->of_node))
537 value |= ESDHC_DMA_SNOOP;
538 else
539 value &= ~ESDHC_DMA_SNOOP;
541 sdhci_writel(host, value, ESDHC_DMA_SYSCTL);
542 return 0;
545 static unsigned int esdhc_of_get_max_clock(struct sdhci_host *host)
547 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
548 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
550 if (esdhc->peripheral_clock)
551 return esdhc->peripheral_clock;
552 else
553 return pltfm_host->clock;
556 static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host)
558 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
559 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
560 unsigned int clock;
562 if (esdhc->peripheral_clock)
563 clock = esdhc->peripheral_clock;
564 else
565 clock = pltfm_host->clock;
566 return clock / 256 / 16;
569 static void esdhc_clock_enable(struct sdhci_host *host, bool enable)
571 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
572 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
573 ktime_t timeout;
574 u32 val, clk_en;
576 clk_en = ESDHC_CLOCK_SDCLKEN;
579 * IPGEN/HCKEN/PEREN bits exist on eSDHC whose vendor version
580 * is 2.2 or lower.
582 if (esdhc->vendor_ver <= VENDOR_V_22)
583 clk_en |= (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN |
584 ESDHC_CLOCK_PEREN);
586 val = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
588 if (enable)
589 val |= clk_en;
590 else
591 val &= ~clk_en;
593 sdhci_writel(host, val, ESDHC_SYSTEM_CONTROL);
596 * Wait max 20 ms. If vendor version is 2.2 or lower, do not
597 * wait clock stable bit which does not exist.
599 timeout = ktime_add_ms(ktime_get(), 20);
600 while (esdhc->vendor_ver > VENDOR_V_22) {
601 bool timedout = ktime_after(ktime_get(), timeout);
603 if (sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE)
604 break;
605 if (timedout) {
606 pr_err("%s: Internal clock never stabilised.\n",
607 mmc_hostname(host->mmc));
608 break;
610 usleep_range(10, 20);
614 static void esdhc_flush_async_fifo(struct sdhci_host *host)
616 ktime_t timeout;
617 u32 val;
619 val = sdhci_readl(host, ESDHC_DMA_SYSCTL);
620 val |= ESDHC_FLUSH_ASYNC_FIFO;
621 sdhci_writel(host, val, ESDHC_DMA_SYSCTL);
623 /* Wait max 20 ms */
624 timeout = ktime_add_ms(ktime_get(), 20);
625 while (1) {
626 bool timedout = ktime_after(ktime_get(), timeout);
628 if (!(sdhci_readl(host, ESDHC_DMA_SYSCTL) &
629 ESDHC_FLUSH_ASYNC_FIFO))
630 break;
631 if (timedout) {
632 pr_err("%s: flushing asynchronous FIFO timeout.\n",
633 mmc_hostname(host->mmc));
634 break;
636 usleep_range(10, 20);
640 static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
642 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
643 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
644 unsigned int pre_div = 1, div = 1;
645 unsigned int clock_fixup = 0;
646 ktime_t timeout;
647 u32 temp;
649 if (clock == 0) {
650 host->mmc->actual_clock = 0;
651 esdhc_clock_enable(host, false);
652 return;
655 /* Start pre_div at 2 for vendor version < 2.3. */
656 if (esdhc->vendor_ver < VENDOR_V_23)
657 pre_div = 2;
659 /* Fix clock value. */
660 if (host->mmc->card && mmc_card_sd(host->mmc->card) &&
661 esdhc->clk_fixup && host->mmc->ios.timing == MMC_TIMING_LEGACY)
662 clock_fixup = esdhc->clk_fixup->sd_dflt_max_clk;
663 else if (esdhc->clk_fixup)
664 clock_fixup = esdhc->clk_fixup->max_clk[host->mmc->ios.timing];
666 if (clock_fixup == 0 || clock < clock_fixup)
667 clock_fixup = clock;
669 /* Calculate pre_div and div. */
670 while (host->max_clk / pre_div / 16 > clock_fixup && pre_div < 256)
671 pre_div *= 2;
673 while (host->max_clk / pre_div / div > clock_fixup && div < 16)
674 div++;
676 esdhc->div_ratio = pre_div * div;
678 /* Limit clock division for HS400 200MHz clock for quirk. */
679 if (esdhc->quirk_limited_clk_division &&
680 clock == MMC_HS200_MAX_DTR &&
681 (host->mmc->ios.timing == MMC_TIMING_MMC_HS400 ||
682 host->flags & SDHCI_HS400_TUNING)) {
683 if (esdhc->div_ratio <= 4) {
684 pre_div = 4;
685 div = 1;
686 } else if (esdhc->div_ratio <= 8) {
687 pre_div = 4;
688 div = 2;
689 } else if (esdhc->div_ratio <= 12) {
690 pre_div = 4;
691 div = 3;
692 } else {
693 pr_warn("%s: using unsupported clock division.\n",
694 mmc_hostname(host->mmc));
696 esdhc->div_ratio = pre_div * div;
699 host->mmc->actual_clock = host->max_clk / esdhc->div_ratio;
701 dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n",
702 clock, host->mmc->actual_clock);
704 /* Set clock division into register. */
705 pre_div >>= 1;
706 div--;
708 esdhc_clock_enable(host, false);
710 temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
711 temp &= ~ESDHC_CLOCK_MASK;
712 temp |= ((div << ESDHC_DIVIDER_SHIFT) |
713 (pre_div << ESDHC_PREDIV_SHIFT));
714 sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
717 * Wait max 20 ms. If vendor version is 2.2 or lower, do not
718 * wait clock stable bit which does not exist.
720 timeout = ktime_add_ms(ktime_get(), 20);
721 while (esdhc->vendor_ver > VENDOR_V_22) {
722 bool timedout = ktime_after(ktime_get(), timeout);
724 if (sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE)
725 break;
726 if (timedout) {
727 pr_err("%s: Internal clock never stabilised.\n",
728 mmc_hostname(host->mmc));
729 break;
731 usleep_range(10, 20);
734 /* Additional setting for HS400. */
735 if (host->mmc->ios.timing == MMC_TIMING_MMC_HS400 &&
736 clock == MMC_HS200_MAX_DTR) {
737 temp = sdhci_readl(host, ESDHC_TBCTL);
738 sdhci_writel(host, temp | ESDHC_HS400_MODE, ESDHC_TBCTL);
739 temp = sdhci_readl(host, ESDHC_SDCLKCTL);
740 sdhci_writel(host, temp | ESDHC_CMD_CLK_CTL, ESDHC_SDCLKCTL);
741 esdhc_clock_enable(host, true);
743 temp = sdhci_readl(host, ESDHC_DLLCFG0);
744 temp |= ESDHC_DLL_ENABLE;
745 if (host->mmc->actual_clock == MMC_HS200_MAX_DTR)
746 temp |= ESDHC_DLL_FREQ_SEL;
747 sdhci_writel(host, temp, ESDHC_DLLCFG0);
749 temp |= ESDHC_DLL_RESET;
750 sdhci_writel(host, temp, ESDHC_DLLCFG0);
751 udelay(1);
752 temp &= ~ESDHC_DLL_RESET;
753 sdhci_writel(host, temp, ESDHC_DLLCFG0);
755 /* Wait max 20 ms */
756 if (read_poll_timeout(sdhci_readl, temp,
757 temp & ESDHC_DLL_STS_SLV_LOCK,
758 10, 20000, false,
759 host, ESDHC_DLLSTAT0))
760 pr_err("%s: timeout for delay chain lock.\n",
761 mmc_hostname(host->mmc));
763 temp = sdhci_readl(host, ESDHC_TBCTL);
764 sdhci_writel(host, temp | ESDHC_HS400_WNDW_ADJUST, ESDHC_TBCTL);
766 esdhc_clock_enable(host, false);
767 esdhc_flush_async_fifo(host);
769 esdhc_clock_enable(host, true);
772 static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width)
774 u32 ctrl;
776 ctrl = sdhci_readl(host, ESDHC_PROCTL);
777 ctrl &= (~ESDHC_CTRL_BUSWIDTH_MASK);
778 switch (width) {
779 case MMC_BUS_WIDTH_8:
780 ctrl |= ESDHC_CTRL_8BITBUS;
781 break;
783 case MMC_BUS_WIDTH_4:
784 ctrl |= ESDHC_CTRL_4BITBUS;
785 break;
787 default:
788 break;
791 sdhci_writel(host, ctrl, ESDHC_PROCTL);
794 static void esdhc_reset(struct sdhci_host *host, u8 mask)
796 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
797 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
798 u32 val, bus_width = 0;
801 * Add delay to make sure all the DMA transfers are finished
802 * for quirk.
804 if (esdhc->quirk_delay_before_data_reset &&
805 (mask & SDHCI_RESET_DATA) &&
806 (host->flags & SDHCI_REQ_USE_DMA))
807 mdelay(5);
810 * Save bus-width for eSDHC whose vendor version is 2.2
811 * or lower for data reset.
813 if ((mask & SDHCI_RESET_DATA) &&
814 (esdhc->vendor_ver <= VENDOR_V_22)) {
815 val = sdhci_readl(host, ESDHC_PROCTL);
816 bus_width = val & ESDHC_CTRL_BUSWIDTH_MASK;
819 sdhci_reset(host, mask);
822 * Restore bus-width setting and interrupt registers for eSDHC
823 * whose vendor version is 2.2 or lower for data reset.
825 if ((mask & SDHCI_RESET_DATA) &&
826 (esdhc->vendor_ver <= VENDOR_V_22)) {
827 val = sdhci_readl(host, ESDHC_PROCTL);
828 val &= ~ESDHC_CTRL_BUSWIDTH_MASK;
829 val |= bus_width;
830 sdhci_writel(host, val, ESDHC_PROCTL);
832 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
833 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
837 * Some bits have to be cleaned manually for eSDHC whose spec
838 * version is higher than 3.0 for all reset.
840 if ((mask & SDHCI_RESET_ALL) &&
841 (esdhc->spec_ver >= SDHCI_SPEC_300)) {
842 val = sdhci_readl(host, ESDHC_TBCTL);
843 val &= ~ESDHC_TB_EN;
844 sdhci_writel(host, val, ESDHC_TBCTL);
847 * Initialize eSDHC_DLLCFG1[DLL_PD_PULSE_STRETCH_SEL] to
848 * 0 for quirk.
850 if (esdhc->quirk_unreliable_pulse_detection) {
851 val = sdhci_readl(host, ESDHC_DLLCFG1);
852 val &= ~ESDHC_DLL_PD_PULSE_STRETCH_SEL;
853 sdhci_writel(host, val, ESDHC_DLLCFG1);
858 /* The SCFG, Supplemental Configuration Unit, provides SoC specific
859 * configuration and status registers for the device. There is a
860 * SDHC IO VSEL control register on SCFG for some platforms. It's
861 * used to support SDHC IO voltage switching.
863 static const struct of_device_id scfg_device_ids[] = {
864 { .compatible = "fsl,t1040-scfg", },
865 { .compatible = "fsl,ls1012a-scfg", },
866 { .compatible = "fsl,ls1046a-scfg", },
870 /* SDHC IO VSEL control register definition */
871 #define SCFG_SDHCIOVSELCR 0x408
872 #define SDHCIOVSELCR_TGLEN 0x80000000
873 #define SDHCIOVSELCR_VSELVAL 0x60000000
874 #define SDHCIOVSELCR_SDHC_VS 0x00000001
876 static int esdhc_signal_voltage_switch(struct mmc_host *mmc,
877 struct mmc_ios *ios)
879 struct sdhci_host *host = mmc_priv(mmc);
880 struct device_node *scfg_node;
881 void __iomem *scfg_base = NULL;
882 u32 sdhciovselcr;
883 u32 val;
886 * Signal Voltage Switching is only applicable for Host Controllers
887 * v3.00 and above.
889 if (host->version < SDHCI_SPEC_300)
890 return 0;
892 val = sdhci_readl(host, ESDHC_PROCTL);
894 switch (ios->signal_voltage) {
895 case MMC_SIGNAL_VOLTAGE_330:
896 val &= ~ESDHC_VOLT_SEL;
897 sdhci_writel(host, val, ESDHC_PROCTL);
898 return 0;
899 case MMC_SIGNAL_VOLTAGE_180:
900 scfg_node = of_find_matching_node(NULL, scfg_device_ids);
901 if (scfg_node)
902 scfg_base = of_iomap(scfg_node, 0);
903 if (scfg_base) {
904 sdhciovselcr = SDHCIOVSELCR_TGLEN |
905 SDHCIOVSELCR_VSELVAL;
906 iowrite32be(sdhciovselcr,
907 scfg_base + SCFG_SDHCIOVSELCR);
909 val |= ESDHC_VOLT_SEL;
910 sdhci_writel(host, val, ESDHC_PROCTL);
911 mdelay(5);
913 sdhciovselcr = SDHCIOVSELCR_TGLEN |
914 SDHCIOVSELCR_SDHC_VS;
915 iowrite32be(sdhciovselcr,
916 scfg_base + SCFG_SDHCIOVSELCR);
917 iounmap(scfg_base);
918 } else {
919 val |= ESDHC_VOLT_SEL;
920 sdhci_writel(host, val, ESDHC_PROCTL);
922 return 0;
923 default:
924 return 0;
928 static struct soc_device_attribute soc_tuning_erratum_type1[] = {
929 { .family = "QorIQ T1023", },
930 { .family = "QorIQ T1040", },
931 { .family = "QorIQ T2080", },
932 { .family = "QorIQ LS1021A", },
933 { },
936 static struct soc_device_attribute soc_tuning_erratum_type2[] = {
937 { .family = "QorIQ LS1012A", },
938 { .family = "QorIQ LS1043A", },
939 { .family = "QorIQ LS1046A", },
940 { .family = "QorIQ LS1080A", },
941 { .family = "QorIQ LS2080A", },
942 { .family = "QorIQ LA1575A", },
943 { },
946 static void esdhc_tuning_block_enable(struct sdhci_host *host, bool enable)
948 u32 val;
950 esdhc_clock_enable(host, false);
951 esdhc_flush_async_fifo(host);
953 val = sdhci_readl(host, ESDHC_TBCTL);
954 if (enable)
955 val |= ESDHC_TB_EN;
956 else
957 val &= ~ESDHC_TB_EN;
958 sdhci_writel(host, val, ESDHC_TBCTL);
960 esdhc_clock_enable(host, true);
963 static void esdhc_tuning_window_ptr(struct sdhci_host *host, u8 *window_start,
964 u8 *window_end)
966 u32 val;
968 /* Write TBCTL[11:8]=4'h8 */
969 val = sdhci_readl(host, ESDHC_TBCTL);
970 val &= ~(0xf << 8);
971 val |= 8 << 8;
972 sdhci_writel(host, val, ESDHC_TBCTL);
974 mdelay(1);
976 /* Read TBCTL[31:0] register and rewrite again */
977 val = sdhci_readl(host, ESDHC_TBCTL);
978 sdhci_writel(host, val, ESDHC_TBCTL);
980 mdelay(1);
982 /* Read the TBSTAT[31:0] register twice */
983 val = sdhci_readl(host, ESDHC_TBSTAT);
984 val = sdhci_readl(host, ESDHC_TBSTAT);
986 *window_end = val & 0xff;
987 *window_start = (val >> 8) & 0xff;
990 static void esdhc_prepare_sw_tuning(struct sdhci_host *host, u8 *window_start,
991 u8 *window_end)
993 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
994 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
995 u8 start_ptr, end_ptr;
997 if (esdhc->quirk_tuning_erratum_type1) {
998 *window_start = 5 * esdhc->div_ratio;
999 *window_end = 3 * esdhc->div_ratio;
1000 return;
1003 esdhc_tuning_window_ptr(host, &start_ptr, &end_ptr);
1005 /* Reset data lines by setting ESDHCCTL[RSTD] */
1006 sdhci_reset(host, SDHCI_RESET_DATA);
1007 /* Write 32'hFFFF_FFFF to IRQSTAT register */
1008 sdhci_writel(host, 0xFFFFFFFF, SDHCI_INT_STATUS);
1010 /* If TBSTAT[15:8]-TBSTAT[7:0] > (4 * div_ratio) + 2
1011 * or TBSTAT[7:0]-TBSTAT[15:8] > (4 * div_ratio) + 2,
1012 * then program TBPTR[TB_WNDW_END_PTR] = 4 * div_ratio
1013 * and program TBPTR[TB_WNDW_START_PTR] = 8 * div_ratio.
1016 if (abs(start_ptr - end_ptr) > (4 * esdhc->div_ratio + 2)) {
1017 *window_start = 8 * esdhc->div_ratio;
1018 *window_end = 4 * esdhc->div_ratio;
1019 } else {
1020 *window_start = 5 * esdhc->div_ratio;
1021 *window_end = 3 * esdhc->div_ratio;
1025 static int esdhc_execute_sw_tuning(struct mmc_host *mmc, u32 opcode,
1026 u8 window_start, u8 window_end)
1028 struct sdhci_host *host = mmc_priv(mmc);
1029 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1030 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
1031 u32 val;
1032 int ret;
1034 /* Program TBPTR[TB_WNDW_END_PTR] and TBPTR[TB_WNDW_START_PTR] */
1035 val = ((u32)window_start << ESDHC_WNDW_STRT_PTR_SHIFT) &
1036 ESDHC_WNDW_STRT_PTR_MASK;
1037 val |= window_end & ESDHC_WNDW_END_PTR_MASK;
1038 sdhci_writel(host, val, ESDHC_TBPTR);
1040 /* Program the software tuning mode by setting TBCTL[TB_MODE]=2'h3 */
1041 val = sdhci_readl(host, ESDHC_TBCTL);
1042 val &= ~ESDHC_TB_MODE_MASK;
1043 val |= ESDHC_TB_MODE_SW;
1044 sdhci_writel(host, val, ESDHC_TBCTL);
1046 esdhc->in_sw_tuning = true;
1047 ret = sdhci_execute_tuning(mmc, opcode);
1048 esdhc->in_sw_tuning = false;
1049 return ret;
1052 static int esdhc_execute_tuning(struct mmc_host *mmc, u32 opcode)
1054 struct sdhci_host *host = mmc_priv(mmc);
1055 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1056 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
1057 u8 window_start, window_end;
1058 int ret, retries = 1;
1059 bool hs400_tuning;
1060 unsigned int clk;
1061 u32 val;
1063 /* For tuning mode, the sd clock divisor value
1064 * must be larger than 3 according to reference manual.
1066 clk = esdhc->peripheral_clock / 3;
1067 if (host->clock > clk)
1068 esdhc_of_set_clock(host, clk);
1070 esdhc_tuning_block_enable(host, true);
1073 * The eSDHC controller takes the data timeout value into account
1074 * during tuning. If the SD card is too slow sending the response, the
1075 * timer will expire and a "Buffer Read Ready" interrupt without data
1076 * is triggered. This leads to tuning errors.
1078 * Just set the timeout to the maximum value because the core will
1079 * already take care of it in sdhci_send_tuning().
1081 sdhci_writeb(host, 0xe, SDHCI_TIMEOUT_CONTROL);
1083 hs400_tuning = host->flags & SDHCI_HS400_TUNING;
1085 do {
1086 if (esdhc->quirk_limited_clk_division &&
1087 hs400_tuning)
1088 esdhc_of_set_clock(host, host->clock);
1090 /* Do HW tuning */
1091 val = sdhci_readl(host, ESDHC_TBCTL);
1092 val &= ~ESDHC_TB_MODE_MASK;
1093 val |= ESDHC_TB_MODE_3;
1094 sdhci_writel(host, val, ESDHC_TBCTL);
1096 ret = sdhci_execute_tuning(mmc, opcode);
1097 if (ret)
1098 break;
1100 /* For type2 affected platforms of the tuning erratum,
1101 * tuning may succeed although eSDHC might not have
1102 * tuned properly. Need to check tuning window.
1104 if (esdhc->quirk_tuning_erratum_type2 &&
1105 !host->tuning_err) {
1106 esdhc_tuning_window_ptr(host, &window_start,
1107 &window_end);
1108 if (abs(window_start - window_end) >
1109 (4 * esdhc->div_ratio + 2))
1110 host->tuning_err = -EAGAIN;
1113 /* If HW tuning fails and triggers erratum,
1114 * try workaround.
1116 ret = host->tuning_err;
1117 if (ret == -EAGAIN &&
1118 (esdhc->quirk_tuning_erratum_type1 ||
1119 esdhc->quirk_tuning_erratum_type2)) {
1120 /* Recover HS400 tuning flag */
1121 if (hs400_tuning)
1122 host->flags |= SDHCI_HS400_TUNING;
1123 pr_info("%s: Hold on to use fixed sampling clock. Try SW tuning!\n",
1124 mmc_hostname(mmc));
1125 /* Do SW tuning */
1126 esdhc_prepare_sw_tuning(host, &window_start,
1127 &window_end);
1128 ret = esdhc_execute_sw_tuning(mmc, opcode,
1129 window_start,
1130 window_end);
1131 if (ret)
1132 break;
1134 /* Retry both HW/SW tuning with reduced clock. */
1135 ret = host->tuning_err;
1136 if (ret == -EAGAIN && retries) {
1137 /* Recover HS400 tuning flag */
1138 if (hs400_tuning)
1139 host->flags |= SDHCI_HS400_TUNING;
1141 clk = host->max_clk / (esdhc->div_ratio + 1);
1142 esdhc_of_set_clock(host, clk);
1143 pr_info("%s: Hold on to use fixed sampling clock. Try tuning with reduced clock!\n",
1144 mmc_hostname(mmc));
1145 } else {
1146 break;
1148 } else {
1149 break;
1151 } while (retries--);
1153 if (ret) {
1154 esdhc_tuning_block_enable(host, false);
1155 } else if (hs400_tuning) {
1156 val = sdhci_readl(host, ESDHC_SDTIMNGCTL);
1157 val |= ESDHC_FLW_CTL_BG;
1158 sdhci_writel(host, val, ESDHC_SDTIMNGCTL);
1161 return ret;
1164 static void esdhc_set_uhs_signaling(struct sdhci_host *host,
1165 unsigned int timing)
1167 u32 val;
1170 * There are specific registers setting for HS400 mode.
1171 * Clean all of them if controller is in HS400 mode to
1172 * exit HS400 mode before re-setting any speed mode.
1174 val = sdhci_readl(host, ESDHC_TBCTL);
1175 if (val & ESDHC_HS400_MODE) {
1176 val = sdhci_readl(host, ESDHC_SDTIMNGCTL);
1177 val &= ~ESDHC_FLW_CTL_BG;
1178 sdhci_writel(host, val, ESDHC_SDTIMNGCTL);
1180 val = sdhci_readl(host, ESDHC_SDCLKCTL);
1181 val &= ~ESDHC_CMD_CLK_CTL;
1182 sdhci_writel(host, val, ESDHC_SDCLKCTL);
1184 esdhc_clock_enable(host, false);
1185 val = sdhci_readl(host, ESDHC_TBCTL);
1186 val &= ~ESDHC_HS400_MODE;
1187 sdhci_writel(host, val, ESDHC_TBCTL);
1188 esdhc_clock_enable(host, true);
1190 val = sdhci_readl(host, ESDHC_DLLCFG0);
1191 val &= ~(ESDHC_DLL_ENABLE | ESDHC_DLL_FREQ_SEL);
1192 sdhci_writel(host, val, ESDHC_DLLCFG0);
1194 val = sdhci_readl(host, ESDHC_TBCTL);
1195 val &= ~ESDHC_HS400_WNDW_ADJUST;
1196 sdhci_writel(host, val, ESDHC_TBCTL);
1198 esdhc_tuning_block_enable(host, false);
1201 if (timing == MMC_TIMING_MMC_HS400)
1202 esdhc_tuning_block_enable(host, true);
1203 else
1204 sdhci_set_uhs_signaling(host, timing);
1207 static u32 esdhc_irq(struct sdhci_host *host, u32 intmask)
1209 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1210 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
1211 u32 command;
1213 if (esdhc->quirk_trans_complete_erratum) {
1214 command = SDHCI_GET_CMD(sdhci_readw(host,
1215 SDHCI_COMMAND));
1216 if (command == MMC_WRITE_MULTIPLE_BLOCK &&
1217 sdhci_readw(host, SDHCI_BLOCK_COUNT) &&
1218 intmask & SDHCI_INT_DATA_END) {
1219 intmask &= ~SDHCI_INT_DATA_END;
1220 sdhci_writel(host, SDHCI_INT_DATA_END,
1221 SDHCI_INT_STATUS);
1224 return intmask;
1227 #ifdef CONFIG_PM_SLEEP
1228 static u32 esdhc_proctl;
1229 static int esdhc_of_suspend(struct device *dev)
1231 struct sdhci_host *host = dev_get_drvdata(dev);
1233 esdhc_proctl = sdhci_readl(host, SDHCI_HOST_CONTROL);
1235 if (host->tuning_mode != SDHCI_TUNING_MODE_3)
1236 mmc_retune_needed(host->mmc);
1238 return sdhci_suspend_host(host);
1241 static int esdhc_of_resume(struct device *dev)
1243 struct sdhci_host *host = dev_get_drvdata(dev);
1244 int ret = sdhci_resume_host(host);
1246 if (ret == 0) {
1247 /* Isn't this already done by sdhci_resume_host() ? --rmk */
1248 esdhc_of_enable_dma(host);
1249 sdhci_writel(host, esdhc_proctl, SDHCI_HOST_CONTROL);
1251 return ret;
1253 #endif
1255 static SIMPLE_DEV_PM_OPS(esdhc_of_dev_pm_ops,
1256 esdhc_of_suspend,
1257 esdhc_of_resume);
1259 static const struct sdhci_ops sdhci_esdhc_be_ops = {
1260 .read_l = esdhc_be_readl,
1261 .read_w = esdhc_be_readw,
1262 .read_b = esdhc_be_readb,
1263 .write_l = esdhc_be_writel,
1264 .write_w = esdhc_be_writew,
1265 .write_b = esdhc_be_writeb,
1266 .set_clock = esdhc_of_set_clock,
1267 .enable_dma = esdhc_of_enable_dma,
1268 .get_max_clock = esdhc_of_get_max_clock,
1269 .get_min_clock = esdhc_of_get_min_clock,
1270 .adma_workaround = esdhc_of_adma_workaround,
1271 .set_bus_width = esdhc_pltfm_set_bus_width,
1272 .reset = esdhc_reset,
1273 .set_uhs_signaling = esdhc_set_uhs_signaling,
1274 .irq = esdhc_irq,
1277 static const struct sdhci_ops sdhci_esdhc_le_ops = {
1278 .read_l = esdhc_le_readl,
1279 .read_w = esdhc_le_readw,
1280 .read_b = esdhc_le_readb,
1281 .write_l = esdhc_le_writel,
1282 .write_w = esdhc_le_writew,
1283 .write_b = esdhc_le_writeb,
1284 .set_clock = esdhc_of_set_clock,
1285 .enable_dma = esdhc_of_enable_dma,
1286 .get_max_clock = esdhc_of_get_max_clock,
1287 .get_min_clock = esdhc_of_get_min_clock,
1288 .adma_workaround = esdhc_of_adma_workaround,
1289 .set_bus_width = esdhc_pltfm_set_bus_width,
1290 .reset = esdhc_reset,
1291 .set_uhs_signaling = esdhc_set_uhs_signaling,
1292 .irq = esdhc_irq,
1295 static const struct sdhci_pltfm_data sdhci_esdhc_be_pdata = {
1296 .quirks = ESDHC_DEFAULT_QUIRKS |
1297 #ifdef CONFIG_PPC
1298 SDHCI_QUIRK_BROKEN_CARD_DETECTION |
1299 #endif
1300 SDHCI_QUIRK_NO_CARD_NO_RESET |
1301 SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
1302 .ops = &sdhci_esdhc_be_ops,
1305 static const struct sdhci_pltfm_data sdhci_esdhc_le_pdata = {
1306 .quirks = ESDHC_DEFAULT_QUIRKS |
1307 SDHCI_QUIRK_NO_CARD_NO_RESET |
1308 SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
1309 .ops = &sdhci_esdhc_le_ops,
1312 static struct soc_device_attribute soc_incorrect_hostver[] = {
1313 { .family = "QorIQ T4240", .revision = "1.0", },
1314 { .family = "QorIQ T4240", .revision = "2.0", },
1315 { },
1318 static struct soc_device_attribute soc_fixup_sdhc_clkdivs[] = {
1319 { .family = "QorIQ LX2160A", .revision = "1.0", },
1320 { .family = "QorIQ LX2160A", .revision = "2.0", },
1321 { .family = "QorIQ LS1028A", .revision = "1.0", },
1322 { },
1325 static struct soc_device_attribute soc_unreliable_pulse_detection[] = {
1326 { .family = "QorIQ LX2160A", .revision = "1.0", },
1327 { .family = "QorIQ LX2160A", .revision = "2.0", },
1328 { .family = "QorIQ LS1028A", .revision = "1.0", },
1329 { },
1332 static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host)
1334 const struct of_device_id *match;
1335 struct sdhci_pltfm_host *pltfm_host;
1336 struct sdhci_esdhc *esdhc;
1337 struct device_node *np;
1338 struct clk *clk;
1339 u32 val;
1340 u16 host_ver;
1342 pltfm_host = sdhci_priv(host);
1343 esdhc = sdhci_pltfm_priv(pltfm_host);
1345 host_ver = sdhci_readw(host, SDHCI_HOST_VERSION);
1346 esdhc->vendor_ver = (host_ver & SDHCI_VENDOR_VER_MASK) >>
1347 SDHCI_VENDOR_VER_SHIFT;
1348 esdhc->spec_ver = host_ver & SDHCI_SPEC_VER_MASK;
1349 if (soc_device_match(soc_incorrect_hostver))
1350 esdhc->quirk_incorrect_hostver = true;
1351 else
1352 esdhc->quirk_incorrect_hostver = false;
1354 if (soc_device_match(soc_fixup_sdhc_clkdivs))
1355 esdhc->quirk_limited_clk_division = true;
1356 else
1357 esdhc->quirk_limited_clk_division = false;
1359 if (soc_device_match(soc_unreliable_pulse_detection))
1360 esdhc->quirk_unreliable_pulse_detection = true;
1361 else
1362 esdhc->quirk_unreliable_pulse_detection = false;
1364 match = of_match_node(sdhci_esdhc_of_match, pdev->dev.of_node);
1365 if (match)
1366 esdhc->clk_fixup = match->data;
1367 np = pdev->dev.of_node;
1369 if (of_device_is_compatible(np, "fsl,p2020-esdhc")) {
1370 esdhc->quirk_delay_before_data_reset = true;
1371 esdhc->quirk_trans_complete_erratum = true;
1374 clk = of_clk_get(np, 0);
1375 if (!IS_ERR(clk)) {
1377 * esdhc->peripheral_clock would be assigned with a value
1378 * which is eSDHC base clock when use periperal clock.
1379 * For some platforms, the clock value got by common clk
1380 * API is peripheral clock while the eSDHC base clock is
1381 * 1/2 peripheral clock.
1383 if (of_device_is_compatible(np, "fsl,ls1046a-esdhc") ||
1384 of_device_is_compatible(np, "fsl,ls1028a-esdhc") ||
1385 of_device_is_compatible(np, "fsl,ls1088a-esdhc"))
1386 esdhc->peripheral_clock = clk_get_rate(clk) / 2;
1387 else
1388 esdhc->peripheral_clock = clk_get_rate(clk);
1390 clk_put(clk);
1393 esdhc_clock_enable(host, false);
1394 val = sdhci_readl(host, ESDHC_DMA_SYSCTL);
1396 * This bit is not able to be reset by SDHCI_RESET_ALL. Need to
1397 * initialize it as 1 or 0 once, to override the different value
1398 * which may be configured in bootloader.
1400 if (esdhc->peripheral_clock)
1401 val |= ESDHC_PERIPHERAL_CLK_SEL;
1402 else
1403 val &= ~ESDHC_PERIPHERAL_CLK_SEL;
1404 sdhci_writel(host, val, ESDHC_DMA_SYSCTL);
1405 esdhc_clock_enable(host, true);
1408 static int esdhc_hs400_prepare_ddr(struct mmc_host *mmc)
1410 esdhc_tuning_block_enable(mmc_priv(mmc), false);
1411 return 0;
1414 static int sdhci_esdhc_probe(struct platform_device *pdev)
1416 struct sdhci_host *host;
1417 struct device_node *np;
1418 struct sdhci_pltfm_host *pltfm_host;
1419 struct sdhci_esdhc *esdhc;
1420 int ret;
1422 np = pdev->dev.of_node;
1424 if (of_property_read_bool(np, "little-endian"))
1425 host = sdhci_pltfm_init(pdev, &sdhci_esdhc_le_pdata,
1426 sizeof(struct sdhci_esdhc));
1427 else
1428 host = sdhci_pltfm_init(pdev, &sdhci_esdhc_be_pdata,
1429 sizeof(struct sdhci_esdhc));
1431 if (IS_ERR(host))
1432 return PTR_ERR(host);
1434 host->mmc_host_ops.start_signal_voltage_switch =
1435 esdhc_signal_voltage_switch;
1436 host->mmc_host_ops.execute_tuning = esdhc_execute_tuning;
1437 host->mmc_host_ops.hs400_prepare_ddr = esdhc_hs400_prepare_ddr;
1438 host->tuning_delay = 1;
1440 esdhc_init(pdev, host);
1442 sdhci_get_of_property(pdev);
1444 pltfm_host = sdhci_priv(host);
1445 esdhc = sdhci_pltfm_priv(pltfm_host);
1446 if (soc_device_match(soc_tuning_erratum_type1))
1447 esdhc->quirk_tuning_erratum_type1 = true;
1448 else
1449 esdhc->quirk_tuning_erratum_type1 = false;
1451 if (soc_device_match(soc_tuning_erratum_type2))
1452 esdhc->quirk_tuning_erratum_type2 = true;
1453 else
1454 esdhc->quirk_tuning_erratum_type2 = false;
1456 if (esdhc->vendor_ver == VENDOR_V_22)
1457 host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23;
1459 if (esdhc->vendor_ver > VENDOR_V_22)
1460 host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ;
1462 if (of_find_compatible_node(NULL, NULL, "fsl,p2020-esdhc")) {
1463 host->quirks |= SDHCI_QUIRK_RESET_AFTER_REQUEST;
1464 host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
1467 if (of_device_is_compatible(np, "fsl,p5040-esdhc") ||
1468 of_device_is_compatible(np, "fsl,p5020-esdhc") ||
1469 of_device_is_compatible(np, "fsl,p4080-esdhc") ||
1470 of_device_is_compatible(np, "fsl,p1020-esdhc") ||
1471 of_device_is_compatible(np, "fsl,t1040-esdhc"))
1472 host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
1474 if (of_device_is_compatible(np, "fsl,ls1021a-esdhc"))
1475 host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
1477 esdhc->quirk_ignore_data_inhibit = false;
1478 if (of_device_is_compatible(np, "fsl,p2020-esdhc")) {
1480 * Freescale messed up with P2020 as it has a non-standard
1481 * host control register
1483 host->quirks2 |= SDHCI_QUIRK2_BROKEN_HOST_CONTROL;
1484 esdhc->quirk_ignore_data_inhibit = true;
1487 /* call to generic mmc_of_parse to support additional capabilities */
1488 ret = mmc_of_parse(host->mmc);
1489 if (ret)
1490 goto err;
1492 mmc_of_parse_voltage(np, &host->ocr_mask);
1494 ret = sdhci_add_host(host);
1495 if (ret)
1496 goto err;
1498 return 0;
1499 err:
1500 sdhci_pltfm_free(pdev);
1501 return ret;
1504 static struct platform_driver sdhci_esdhc_driver = {
1505 .driver = {
1506 .name = "sdhci-esdhc",
1507 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
1508 .of_match_table = sdhci_esdhc_of_match,
1509 .pm = &esdhc_of_dev_pm_ops,
1511 .probe = sdhci_esdhc_probe,
1512 .remove = sdhci_pltfm_unregister,
1515 module_platform_driver(sdhci_esdhc_driver);
1517 MODULE_DESCRIPTION("SDHCI OF driver for Freescale MPC eSDHC");
1518 MODULE_AUTHOR("Xiaobo Xie <X.Xie@freescale.com>, "
1519 "Anton Vorontsov <avorontsov@ru.mvista.com>");
1520 MODULE_LICENSE("GPL v2");