1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Freescale eSDHC controller driver.
5 * Copyright (c) 2007, 2010, 2012 Freescale Semiconductor, Inc.
6 * Copyright (c) 2009 MontaVista Software, Inc.
9 * Authors: Xiaobo Xie <X.Xie@freescale.com>
10 * Anton Vorontsov <avorontsov@ru.mvista.com>
13 #include <linux/err.h>
16 #include <linux/of_address.h>
17 #include <linux/delay.h>
18 #include <linux/module.h>
19 #include <linux/sys_soc.h>
20 #include <linux/clk.h>
21 #include <linux/ktime.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/iopoll.h>
24 #include <linux/mmc/host.h>
25 #include <linux/mmc/mmc.h>
26 #include "sdhci-pltfm.h"
27 #include "sdhci-esdhc.h"
29 #define VENDOR_V_22 0x12
30 #define VENDOR_V_23 0x13
32 #define MMC_TIMING_NUM (MMC_TIMING_MMC_HS400 + 1)
34 struct esdhc_clk_fixup
{
35 const unsigned int sd_dflt_max_clk
;
36 const unsigned int max_clk
[MMC_TIMING_NUM
];
39 static const struct esdhc_clk_fixup ls1021a_esdhc_clk
= {
40 .sd_dflt_max_clk
= 25000000,
41 .max_clk
[MMC_TIMING_MMC_HS
] = 46500000,
42 .max_clk
[MMC_TIMING_SD_HS
] = 46500000,
45 static const struct esdhc_clk_fixup ls1043a_esdhc_clk
= {
46 .sd_dflt_max_clk
= 25000000,
47 .max_clk
[MMC_TIMING_UHS_SDR104
] = 116700000,
48 .max_clk
[MMC_TIMING_MMC_HS200
] = 116700000,
51 static const struct esdhc_clk_fixup ls1046a_esdhc_clk
= {
52 .sd_dflt_max_clk
= 25000000,
53 .max_clk
[MMC_TIMING_UHS_SDR104
] = 167000000,
54 .max_clk
[MMC_TIMING_MMC_HS200
] = 167000000,
57 static const struct esdhc_clk_fixup ls1012a_esdhc_clk
= {
58 .sd_dflt_max_clk
= 25000000,
59 .max_clk
[MMC_TIMING_UHS_SDR104
] = 125000000,
60 .max_clk
[MMC_TIMING_MMC_HS200
] = 125000000,
63 static const struct esdhc_clk_fixup p1010_esdhc_clk
= {
64 .sd_dflt_max_clk
= 20000000,
65 .max_clk
[MMC_TIMING_LEGACY
] = 20000000,
66 .max_clk
[MMC_TIMING_MMC_HS
] = 42000000,
67 .max_clk
[MMC_TIMING_SD_HS
] = 40000000,
70 static const struct of_device_id sdhci_esdhc_of_match
[] = {
71 { .compatible
= "fsl,ls1021a-esdhc", .data
= &ls1021a_esdhc_clk
},
72 { .compatible
= "fsl,ls1043a-esdhc", .data
= &ls1043a_esdhc_clk
},
73 { .compatible
= "fsl,ls1046a-esdhc", .data
= &ls1046a_esdhc_clk
},
74 { .compatible
= "fsl,ls1012a-esdhc", .data
= &ls1012a_esdhc_clk
},
75 { .compatible
= "fsl,p1010-esdhc", .data
= &p1010_esdhc_clk
},
76 { .compatible
= "fsl,mpc8379-esdhc" },
77 { .compatible
= "fsl,mpc8536-esdhc" },
78 { .compatible
= "fsl,esdhc" },
81 MODULE_DEVICE_TABLE(of
, sdhci_esdhc_of_match
);
86 bool quirk_incorrect_hostver
;
87 bool quirk_limited_clk_division
;
88 bool quirk_unreliable_pulse_detection
;
89 bool quirk_tuning_erratum_type1
;
90 bool quirk_tuning_erratum_type2
;
91 bool quirk_ignore_data_inhibit
;
92 bool quirk_delay_before_data_reset
;
93 bool quirk_trans_complete_erratum
;
95 unsigned int peripheral_clock
;
96 const struct esdhc_clk_fixup
*clk_fixup
;
101 * esdhc_readl_fixup - Fixup the value read from incompatible eSDHC register
102 * to make it compatible with SD spec.
104 * @host: pointer to sdhci_host
105 * @spec_reg: SD spec register address
106 * @value: 32bit eSDHC register value on spec_reg address
108 * In SD spec, there are 8/16/32/64 bits registers, while all of eSDHC
109 * registers are 32 bits. There are differences in register size, register
110 * address, register function, bit position and function between eSDHC spec
113 * Return a fixed up register value
115 static u32
esdhc_readl_fixup(struct sdhci_host
*host
,
116 int spec_reg
, u32 value
)
118 struct sdhci_pltfm_host
*pltfm_host
= sdhci_priv(host
);
119 struct sdhci_esdhc
*esdhc
= sdhci_pltfm_priv(pltfm_host
);
123 * The bit of ADMA flag in eSDHC is not compatible with standard
124 * SDHC register, so set fake flag SDHCI_CAN_DO_ADMA2 when ADMA is
125 * supported by eSDHC.
126 * And for many FSL eSDHC controller, the reset value of field
127 * SDHCI_CAN_DO_ADMA1 is 1, but some of them can't support ADMA,
128 * only these vendor version is greater than 2.2/0x12 support ADMA.
130 if ((spec_reg
== SDHCI_CAPABILITIES
) && (value
& SDHCI_CAN_DO_ADMA1
)) {
131 if (esdhc
->vendor_ver
> VENDOR_V_22
) {
132 ret
= value
| SDHCI_CAN_DO_ADMA2
;
138 * The DAT[3:0] line signal levels and the CMD line signal level are
139 * not compatible with standard SDHC register. The line signal levels
140 * DAT[7:0] are at bits 31:24 and the command line signal level is at
141 * bit 23. All other bits are the same as in the standard SDHC
144 if (spec_reg
== SDHCI_PRESENT_STATE
) {
145 ret
= value
& 0x000fffff;
146 ret
|= (value
>> 4) & SDHCI_DATA_LVL_MASK
;
147 ret
|= (value
<< 1) & SDHCI_CMD_LVL
;
150 * Some controllers have unreliable Data Line Active
151 * bit for commands with busy signal. This affects
152 * Command Inhibit (data) bit. Just ignore it since
153 * MMC core driver has already polled card status
154 * with CMD13 after any command with busy siganl.
156 if (esdhc
->quirk_ignore_data_inhibit
)
157 ret
&= ~SDHCI_DATA_INHIBIT
;
162 * DTS properties of mmc host are used to enable each speed mode
163 * according to soc and board capability. So clean up
164 * SDR50/SDR104/DDR50 support bits here.
166 if (spec_reg
== SDHCI_CAPABILITIES_1
) {
167 ret
= value
& ~(SDHCI_SUPPORT_SDR50
| SDHCI_SUPPORT_SDR104
|
168 SDHCI_SUPPORT_DDR50
);
176 static u16
esdhc_readw_fixup(struct sdhci_host
*host
,
177 int spec_reg
, u32 value
)
179 struct sdhci_pltfm_host
*pltfm_host
= sdhci_priv(host
);
180 struct sdhci_esdhc
*esdhc
= sdhci_pltfm_priv(pltfm_host
);
182 int shift
= (spec_reg
& 0x2) * 8;
184 if (spec_reg
== SDHCI_TRANSFER_MODE
)
185 return pltfm_host
->xfer_mode_shadow
;
187 if (spec_reg
== SDHCI_HOST_VERSION
)
188 ret
= value
& 0xffff;
190 ret
= (value
>> shift
) & 0xffff;
191 /* Workaround for T4240-R1.0-R2.0 eSDHC which has incorrect
192 * vendor version and spec version information.
194 if ((spec_reg
== SDHCI_HOST_VERSION
) &&
195 (esdhc
->quirk_incorrect_hostver
))
196 ret
= (VENDOR_V_23
<< SDHCI_VENDOR_VER_SHIFT
) | SDHCI_SPEC_200
;
200 static u8
esdhc_readb_fixup(struct sdhci_host
*host
,
201 int spec_reg
, u32 value
)
205 int shift
= (spec_reg
& 0x3) * 8;
207 ret
= (value
>> shift
) & 0xff;
210 * "DMA select" locates at offset 0x28 in SD specification, but on
211 * P5020 or P3041, it locates at 0x29.
213 if (spec_reg
== SDHCI_HOST_CONTROL
) {
214 /* DMA select is 22,23 bits in Protocol Control Register */
215 dma_bits
= (value
>> 5) & SDHCI_CTRL_DMA_MASK
;
216 /* fixup the result */
217 ret
&= ~SDHCI_CTRL_DMA_MASK
;
224 * esdhc_writel_fixup - Fixup the SD spec register value so that it could be
225 * written into eSDHC register.
227 * @host: pointer to sdhci_host
228 * @spec_reg: SD spec register address
229 * @value: 8/16/32bit SD spec register value that would be written
230 * @old_value: 32bit eSDHC register value on spec_reg address
232 * In SD spec, there are 8/16/32/64 bits registers, while all of eSDHC
233 * registers are 32 bits. There are differences in register size, register
234 * address, register function, bit position and function between eSDHC spec
237 * Return a fixed up register value
239 static u32
esdhc_writel_fixup(struct sdhci_host
*host
,
240 int spec_reg
, u32 value
, u32 old_value
)
245 * Enabling IRQSTATEN[BGESEN] is just to set IRQSTAT[BGE]
246 * when SYSCTL[RSTD] is set for some special operations.
247 * No any impact on other operation.
249 if (spec_reg
== SDHCI_INT_ENABLE
)
250 ret
= value
| SDHCI_INT_BLK_GAP
;
257 static u32
esdhc_writew_fixup(struct sdhci_host
*host
,
258 int spec_reg
, u16 value
, u32 old_value
)
260 struct sdhci_pltfm_host
*pltfm_host
= sdhci_priv(host
);
261 int shift
= (spec_reg
& 0x2) * 8;
265 case SDHCI_TRANSFER_MODE
:
267 * Postpone this write, we must do it together with a
268 * command write that is down below. Return old value.
270 pltfm_host
->xfer_mode_shadow
= value
;
273 ret
= (value
<< 16) | pltfm_host
->xfer_mode_shadow
;
277 ret
= old_value
& (~(0xffff << shift
));
278 ret
|= (value
<< shift
);
280 if (spec_reg
== SDHCI_BLOCK_SIZE
) {
282 * Two last DMA bits are reserved, and first one is used for
283 * non-standard blksz of 4096 bytes that we don't support
284 * yet. So clear the DMA boundary bits.
286 ret
&= (~SDHCI_MAKE_BLKSZ(0x7, 0));
291 static u32
esdhc_writeb_fixup(struct sdhci_host
*host
,
292 int spec_reg
, u8 value
, u32 old_value
)
297 int shift
= (spec_reg
& 0x3) * 8;
300 * eSDHC doesn't have a standard power control register, so we do
301 * nothing here to avoid incorrect operation.
303 if (spec_reg
== SDHCI_POWER_CONTROL
)
306 * "DMA select" location is offset 0x28 in SD specification, but on
307 * P5020 or P3041, it's located at 0x29.
309 if (spec_reg
== SDHCI_HOST_CONTROL
) {
311 * If host control register is not standard, exit
314 if (host
->quirks2
& SDHCI_QUIRK2_BROKEN_HOST_CONTROL
)
317 /* DMA select is 22,23 bits in Protocol Control Register */
318 dma_bits
= (value
& SDHCI_CTRL_DMA_MASK
) << 5;
319 ret
= (old_value
& (~(SDHCI_CTRL_DMA_MASK
<< 5))) | dma_bits
;
320 tmp
= (value
& (~SDHCI_CTRL_DMA_MASK
)) |
321 (old_value
& SDHCI_CTRL_DMA_MASK
);
322 ret
= (ret
& (~0xff)) | tmp
;
324 /* Prevent SDHCI core from writing reserved bits (e.g. HISPD) */
325 ret
&= ~ESDHC_HOST_CONTROL_RES
;
329 ret
= (old_value
& (~(0xff << shift
))) | (value
<< shift
);
333 static u32
esdhc_be_readl(struct sdhci_host
*host
, int reg
)
338 if (reg
== SDHCI_CAPABILITIES_1
)
339 value
= ioread32be(host
->ioaddr
+ ESDHC_CAPABILITIES_1
);
341 value
= ioread32be(host
->ioaddr
+ reg
);
343 ret
= esdhc_readl_fixup(host
, reg
, value
);
348 static u32
esdhc_le_readl(struct sdhci_host
*host
, int reg
)
353 if (reg
== SDHCI_CAPABILITIES_1
)
354 value
= ioread32(host
->ioaddr
+ ESDHC_CAPABILITIES_1
);
356 value
= ioread32(host
->ioaddr
+ reg
);
358 ret
= esdhc_readl_fixup(host
, reg
, value
);
363 static u16
esdhc_be_readw(struct sdhci_host
*host
, int reg
)
367 int base
= reg
& ~0x3;
369 value
= ioread32be(host
->ioaddr
+ base
);
370 ret
= esdhc_readw_fixup(host
, reg
, value
);
374 static u16
esdhc_le_readw(struct sdhci_host
*host
, int reg
)
378 int base
= reg
& ~0x3;
380 value
= ioread32(host
->ioaddr
+ base
);
381 ret
= esdhc_readw_fixup(host
, reg
, value
);
385 static u8
esdhc_be_readb(struct sdhci_host
*host
, int reg
)
389 int base
= reg
& ~0x3;
391 value
= ioread32be(host
->ioaddr
+ base
);
392 ret
= esdhc_readb_fixup(host
, reg
, value
);
396 static u8
esdhc_le_readb(struct sdhci_host
*host
, int reg
)
400 int base
= reg
& ~0x3;
402 value
= ioread32(host
->ioaddr
+ base
);
403 ret
= esdhc_readb_fixup(host
, reg
, value
);
407 static void esdhc_be_writel(struct sdhci_host
*host
, u32 val
, int reg
)
411 value
= esdhc_writel_fixup(host
, reg
, val
, 0);
412 iowrite32be(value
, host
->ioaddr
+ reg
);
415 static void esdhc_le_writel(struct sdhci_host
*host
, u32 val
, int reg
)
419 value
= esdhc_writel_fixup(host
, reg
, val
, 0);
420 iowrite32(value
, host
->ioaddr
+ reg
);
423 static void esdhc_be_writew(struct sdhci_host
*host
, u16 val
, int reg
)
425 struct sdhci_pltfm_host
*pltfm_host
= sdhci_priv(host
);
426 struct sdhci_esdhc
*esdhc
= sdhci_pltfm_priv(pltfm_host
);
427 int base
= reg
& ~0x3;
431 value
= ioread32be(host
->ioaddr
+ base
);
432 ret
= esdhc_writew_fixup(host
, reg
, val
, value
);
433 if (reg
!= SDHCI_TRANSFER_MODE
)
434 iowrite32be(ret
, host
->ioaddr
+ base
);
436 /* Starting SW tuning requires ESDHC_SMPCLKSEL to be set
437 * 1us later after ESDHC_EXTN is set.
439 if (base
== ESDHC_SYSTEM_CONTROL_2
) {
440 if (!(value
& ESDHC_EXTN
) && (ret
& ESDHC_EXTN
) &&
441 esdhc
->in_sw_tuning
) {
443 ret
|= ESDHC_SMPCLKSEL
;
444 iowrite32be(ret
, host
->ioaddr
+ base
);
449 static void esdhc_le_writew(struct sdhci_host
*host
, u16 val
, int reg
)
451 struct sdhci_pltfm_host
*pltfm_host
= sdhci_priv(host
);
452 struct sdhci_esdhc
*esdhc
= sdhci_pltfm_priv(pltfm_host
);
453 int base
= reg
& ~0x3;
457 value
= ioread32(host
->ioaddr
+ base
);
458 ret
= esdhc_writew_fixup(host
, reg
, val
, value
);
459 if (reg
!= SDHCI_TRANSFER_MODE
)
460 iowrite32(ret
, host
->ioaddr
+ base
);
462 /* Starting SW tuning requires ESDHC_SMPCLKSEL to be set
463 * 1us later after ESDHC_EXTN is set.
465 if (base
== ESDHC_SYSTEM_CONTROL_2
) {
466 if (!(value
& ESDHC_EXTN
) && (ret
& ESDHC_EXTN
) &&
467 esdhc
->in_sw_tuning
) {
469 ret
|= ESDHC_SMPCLKSEL
;
470 iowrite32(ret
, host
->ioaddr
+ base
);
475 static void esdhc_be_writeb(struct sdhci_host
*host
, u8 val
, int reg
)
477 int base
= reg
& ~0x3;
481 value
= ioread32be(host
->ioaddr
+ base
);
482 ret
= esdhc_writeb_fixup(host
, reg
, val
, value
);
483 iowrite32be(ret
, host
->ioaddr
+ base
);
486 static void esdhc_le_writeb(struct sdhci_host
*host
, u8 val
, int reg
)
488 int base
= reg
& ~0x3;
492 value
= ioread32(host
->ioaddr
+ base
);
493 ret
= esdhc_writeb_fixup(host
, reg
, val
, value
);
494 iowrite32(ret
, host
->ioaddr
+ base
);
498 * For Abort or Suspend after Stop at Block Gap, ignore the ADMA
499 * error(IRQSTAT[ADMAE]) if both Transfer Complete(IRQSTAT[TC])
500 * and Block Gap Event(IRQSTAT[BGE]) are also set.
501 * For Continue, apply soft reset for data(SYSCTL[RSTD]);
502 * and re-issue the entire read transaction from beginning.
504 static void esdhc_of_adma_workaround(struct sdhci_host
*host
, u32 intmask
)
506 struct sdhci_pltfm_host
*pltfm_host
= sdhci_priv(host
);
507 struct sdhci_esdhc
*esdhc
= sdhci_pltfm_priv(pltfm_host
);
512 applicable
= (intmask
& SDHCI_INT_DATA_END
) &&
513 (intmask
& SDHCI_INT_BLK_GAP
) &&
514 (esdhc
->vendor_ver
== VENDOR_V_23
);
518 host
->data
->error
= 0;
519 dmastart
= sg_dma_address(host
->data
->sg
);
520 dmanow
= dmastart
+ host
->data
->bytes_xfered
;
522 * Force update to the next DMA block boundary.
524 dmanow
= (dmanow
& ~(SDHCI_DEFAULT_BOUNDARY_SIZE
- 1)) +
525 SDHCI_DEFAULT_BOUNDARY_SIZE
;
526 host
->data
->bytes_xfered
= dmanow
- dmastart
;
527 sdhci_writel(host
, dmanow
, SDHCI_DMA_ADDRESS
);
530 static int esdhc_of_enable_dma(struct sdhci_host
*host
)
534 struct device
*dev
= mmc_dev(host
->mmc
);
536 if (of_device_is_compatible(dev
->of_node
, "fsl,ls1043a-esdhc") ||
537 of_device_is_compatible(dev
->of_node
, "fsl,ls1046a-esdhc")) {
538 ret
= dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(40));
543 value
= sdhci_readl(host
, ESDHC_DMA_SYSCTL
);
545 if (of_dma_is_coherent(dev
->of_node
))
546 value
|= ESDHC_DMA_SNOOP
;
548 value
&= ~ESDHC_DMA_SNOOP
;
550 sdhci_writel(host
, value
, ESDHC_DMA_SYSCTL
);
554 static unsigned int esdhc_of_get_max_clock(struct sdhci_host
*host
)
556 struct sdhci_pltfm_host
*pltfm_host
= sdhci_priv(host
);
557 struct sdhci_esdhc
*esdhc
= sdhci_pltfm_priv(pltfm_host
);
559 if (esdhc
->peripheral_clock
)
560 return esdhc
->peripheral_clock
;
562 return pltfm_host
->clock
;
565 static unsigned int esdhc_of_get_min_clock(struct sdhci_host
*host
)
567 struct sdhci_pltfm_host
*pltfm_host
= sdhci_priv(host
);
568 struct sdhci_esdhc
*esdhc
= sdhci_pltfm_priv(pltfm_host
);
571 if (esdhc
->peripheral_clock
)
572 clock
= esdhc
->peripheral_clock
;
574 clock
= pltfm_host
->clock
;
575 return clock
/ 256 / 16;
578 static void esdhc_clock_enable(struct sdhci_host
*host
, bool enable
)
580 struct sdhci_pltfm_host
*pltfm_host
= sdhci_priv(host
);
581 struct sdhci_esdhc
*esdhc
= sdhci_pltfm_priv(pltfm_host
);
585 clk_en
= ESDHC_CLOCK_SDCLKEN
;
588 * IPGEN/HCKEN/PEREN bits exist on eSDHC whose vendor version
591 if (esdhc
->vendor_ver
<= VENDOR_V_22
)
592 clk_en
|= (ESDHC_CLOCK_IPGEN
| ESDHC_CLOCK_HCKEN
|
595 val
= sdhci_readl(host
, ESDHC_SYSTEM_CONTROL
);
602 sdhci_writel(host
, val
, ESDHC_SYSTEM_CONTROL
);
605 * Wait max 20 ms. If vendor version is 2.2 or lower, do not
606 * wait clock stable bit which does not exist.
608 timeout
= ktime_add_ms(ktime_get(), 20);
609 while (esdhc
->vendor_ver
> VENDOR_V_22
) {
610 bool timedout
= ktime_after(ktime_get(), timeout
);
612 if (sdhci_readl(host
, ESDHC_PRSSTAT
) & ESDHC_CLOCK_STABLE
)
615 pr_err("%s: Internal clock never stabilised.\n",
616 mmc_hostname(host
->mmc
));
619 usleep_range(10, 20);
623 static void esdhc_flush_async_fifo(struct sdhci_host
*host
)
628 val
= sdhci_readl(host
, ESDHC_DMA_SYSCTL
);
629 val
|= ESDHC_FLUSH_ASYNC_FIFO
;
630 sdhci_writel(host
, val
, ESDHC_DMA_SYSCTL
);
633 timeout
= ktime_add_ms(ktime_get(), 20);
635 bool timedout
= ktime_after(ktime_get(), timeout
);
637 if (!(sdhci_readl(host
, ESDHC_DMA_SYSCTL
) &
638 ESDHC_FLUSH_ASYNC_FIFO
))
641 pr_err("%s: flushing asynchronous FIFO timeout.\n",
642 mmc_hostname(host
->mmc
));
645 usleep_range(10, 20);
649 static void esdhc_of_set_clock(struct sdhci_host
*host
, unsigned int clock
)
651 struct sdhci_pltfm_host
*pltfm_host
= sdhci_priv(host
);
652 struct sdhci_esdhc
*esdhc
= sdhci_pltfm_priv(pltfm_host
);
653 unsigned int pre_div
= 1, div
= 1;
654 unsigned int clock_fixup
= 0;
659 host
->mmc
->actual_clock
= 0;
660 esdhc_clock_enable(host
, false);
664 /* Start pre_div at 2 for vendor version < 2.3. */
665 if (esdhc
->vendor_ver
< VENDOR_V_23
)
668 /* Fix clock value. */
669 if (host
->mmc
->card
&& mmc_card_sd(host
->mmc
->card
) &&
670 esdhc
->clk_fixup
&& host
->mmc
->ios
.timing
== MMC_TIMING_LEGACY
)
671 clock_fixup
= esdhc
->clk_fixup
->sd_dflt_max_clk
;
672 else if (esdhc
->clk_fixup
)
673 clock_fixup
= esdhc
->clk_fixup
->max_clk
[host
->mmc
->ios
.timing
];
675 if (clock_fixup
== 0 || clock
< clock_fixup
)
678 /* Calculate pre_div and div. */
679 while (host
->max_clk
/ pre_div
/ 16 > clock_fixup
&& pre_div
< 256)
682 while (host
->max_clk
/ pre_div
/ div
> clock_fixup
&& div
< 16)
685 esdhc
->div_ratio
= pre_div
* div
;
687 /* Limit clock division for HS400 200MHz clock for quirk. */
688 if (esdhc
->quirk_limited_clk_division
&&
689 clock
== MMC_HS200_MAX_DTR
&&
690 (host
->mmc
->ios
.timing
== MMC_TIMING_MMC_HS400
||
691 host
->flags
& SDHCI_HS400_TUNING
)) {
692 if (esdhc
->div_ratio
<= 4) {
695 } else if (esdhc
->div_ratio
<= 8) {
698 } else if (esdhc
->div_ratio
<= 12) {
702 pr_warn("%s: using unsupported clock division.\n",
703 mmc_hostname(host
->mmc
));
705 esdhc
->div_ratio
= pre_div
* div
;
708 host
->mmc
->actual_clock
= host
->max_clk
/ esdhc
->div_ratio
;
710 dev_dbg(mmc_dev(host
->mmc
), "desired SD clock: %d, actual: %d\n",
711 clock
, host
->mmc
->actual_clock
);
713 /* Set clock division into register. */
717 esdhc_clock_enable(host
, false);
719 temp
= sdhci_readl(host
, ESDHC_SYSTEM_CONTROL
);
720 temp
&= ~ESDHC_CLOCK_MASK
;
721 temp
|= ((div
<< ESDHC_DIVIDER_SHIFT
) |
722 (pre_div
<< ESDHC_PREDIV_SHIFT
));
723 sdhci_writel(host
, temp
, ESDHC_SYSTEM_CONTROL
);
726 * Wait max 20 ms. If vendor version is 2.2 or lower, do not
727 * wait clock stable bit which does not exist.
729 timeout
= ktime_add_ms(ktime_get(), 20);
730 while (esdhc
->vendor_ver
> VENDOR_V_22
) {
731 bool timedout
= ktime_after(ktime_get(), timeout
);
733 if (sdhci_readl(host
, ESDHC_PRSSTAT
) & ESDHC_CLOCK_STABLE
)
736 pr_err("%s: Internal clock never stabilised.\n",
737 mmc_hostname(host
->mmc
));
740 usleep_range(10, 20);
743 /* Additional setting for HS400. */
744 if (host
->mmc
->ios
.timing
== MMC_TIMING_MMC_HS400
&&
745 clock
== MMC_HS200_MAX_DTR
) {
746 temp
= sdhci_readl(host
, ESDHC_TBCTL
);
747 sdhci_writel(host
, temp
| ESDHC_HS400_MODE
, ESDHC_TBCTL
);
748 temp
= sdhci_readl(host
, ESDHC_SDCLKCTL
);
749 sdhci_writel(host
, temp
| ESDHC_CMD_CLK_CTL
, ESDHC_SDCLKCTL
);
750 esdhc_clock_enable(host
, true);
752 temp
= sdhci_readl(host
, ESDHC_DLLCFG0
);
753 temp
|= ESDHC_DLL_ENABLE
;
754 if (host
->mmc
->actual_clock
== MMC_HS200_MAX_DTR
)
755 temp
|= ESDHC_DLL_FREQ_SEL
;
756 sdhci_writel(host
, temp
, ESDHC_DLLCFG0
);
758 temp
|= ESDHC_DLL_RESET
;
759 sdhci_writel(host
, temp
, ESDHC_DLLCFG0
);
761 temp
&= ~ESDHC_DLL_RESET
;
762 sdhci_writel(host
, temp
, ESDHC_DLLCFG0
);
765 if (read_poll_timeout(sdhci_readl
, temp
,
766 temp
& ESDHC_DLL_STS_SLV_LOCK
,
768 host
, ESDHC_DLLSTAT0
))
769 pr_err("%s: timeout for delay chain lock.\n",
770 mmc_hostname(host
->mmc
));
772 temp
= sdhci_readl(host
, ESDHC_TBCTL
);
773 sdhci_writel(host
, temp
| ESDHC_HS400_WNDW_ADJUST
, ESDHC_TBCTL
);
775 esdhc_clock_enable(host
, false);
776 esdhc_flush_async_fifo(host
);
778 esdhc_clock_enable(host
, true);
781 static void esdhc_pltfm_set_bus_width(struct sdhci_host
*host
, int width
)
785 ctrl
= sdhci_readl(host
, ESDHC_PROCTL
);
786 ctrl
&= (~ESDHC_CTRL_BUSWIDTH_MASK
);
788 case MMC_BUS_WIDTH_8
:
789 ctrl
|= ESDHC_CTRL_8BITBUS
;
792 case MMC_BUS_WIDTH_4
:
793 ctrl
|= ESDHC_CTRL_4BITBUS
;
800 sdhci_writel(host
, ctrl
, ESDHC_PROCTL
);
803 static void esdhc_reset(struct sdhci_host
*host
, u8 mask
)
805 struct sdhci_pltfm_host
*pltfm_host
= sdhci_priv(host
);
806 struct sdhci_esdhc
*esdhc
= sdhci_pltfm_priv(pltfm_host
);
807 u32 val
, bus_width
= 0;
810 * Add delay to make sure all the DMA transfers are finished
813 if (esdhc
->quirk_delay_before_data_reset
&&
814 (mask
& SDHCI_RESET_DATA
) &&
815 (host
->flags
& SDHCI_REQ_USE_DMA
))
819 * Save bus-width for eSDHC whose vendor version is 2.2
820 * or lower for data reset.
822 if ((mask
& SDHCI_RESET_DATA
) &&
823 (esdhc
->vendor_ver
<= VENDOR_V_22
)) {
824 val
= sdhci_readl(host
, ESDHC_PROCTL
);
825 bus_width
= val
& ESDHC_CTRL_BUSWIDTH_MASK
;
828 sdhci_reset(host
, mask
);
831 * Restore bus-width setting and interrupt registers for eSDHC
832 * whose vendor version is 2.2 or lower for data reset.
834 if ((mask
& SDHCI_RESET_DATA
) &&
835 (esdhc
->vendor_ver
<= VENDOR_V_22
)) {
836 val
= sdhci_readl(host
, ESDHC_PROCTL
);
837 val
&= ~ESDHC_CTRL_BUSWIDTH_MASK
;
839 sdhci_writel(host
, val
, ESDHC_PROCTL
);
841 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
842 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
846 * Some bits have to be cleaned manually for eSDHC whose spec
847 * version is higher than 3.0 for all reset.
849 if ((mask
& SDHCI_RESET_ALL
) &&
850 (esdhc
->spec_ver
>= SDHCI_SPEC_300
)) {
851 val
= sdhci_readl(host
, ESDHC_TBCTL
);
853 sdhci_writel(host
, val
, ESDHC_TBCTL
);
856 * Initialize eSDHC_DLLCFG1[DLL_PD_PULSE_STRETCH_SEL] to
859 if (esdhc
->quirk_unreliable_pulse_detection
) {
860 val
= sdhci_readl(host
, ESDHC_DLLCFG1
);
861 val
&= ~ESDHC_DLL_PD_PULSE_STRETCH_SEL
;
862 sdhci_writel(host
, val
, ESDHC_DLLCFG1
);
867 /* The SCFG, Supplemental Configuration Unit, provides SoC specific
868 * configuration and status registers for the device. There is a
869 * SDHC IO VSEL control register on SCFG for some platforms. It's
870 * used to support SDHC IO voltage switching.
872 static const struct of_device_id scfg_device_ids
[] = {
873 { .compatible
= "fsl,t1040-scfg", },
874 { .compatible
= "fsl,ls1012a-scfg", },
875 { .compatible
= "fsl,ls1046a-scfg", },
879 /* SDHC IO VSEL control register definition */
880 #define SCFG_SDHCIOVSELCR 0x408
881 #define SDHCIOVSELCR_TGLEN 0x80000000
882 #define SDHCIOVSELCR_VSELVAL 0x60000000
883 #define SDHCIOVSELCR_SDHC_VS 0x00000001
885 static int esdhc_signal_voltage_switch(struct mmc_host
*mmc
,
888 struct sdhci_host
*host
= mmc_priv(mmc
);
889 struct device_node
*scfg_node
;
890 void __iomem
*scfg_base
= NULL
;
895 * Signal Voltage Switching is only applicable for Host Controllers
898 if (host
->version
< SDHCI_SPEC_300
)
901 val
= sdhci_readl(host
, ESDHC_PROCTL
);
903 switch (ios
->signal_voltage
) {
904 case MMC_SIGNAL_VOLTAGE_330
:
905 val
&= ~ESDHC_VOLT_SEL
;
906 sdhci_writel(host
, val
, ESDHC_PROCTL
);
908 case MMC_SIGNAL_VOLTAGE_180
:
909 scfg_node
= of_find_matching_node(NULL
, scfg_device_ids
);
911 scfg_base
= of_iomap(scfg_node
, 0);
912 of_node_put(scfg_node
);
914 sdhciovselcr
= SDHCIOVSELCR_TGLEN
|
915 SDHCIOVSELCR_VSELVAL
;
916 iowrite32be(sdhciovselcr
,
917 scfg_base
+ SCFG_SDHCIOVSELCR
);
919 val
|= ESDHC_VOLT_SEL
;
920 sdhci_writel(host
, val
, ESDHC_PROCTL
);
923 sdhciovselcr
= SDHCIOVSELCR_TGLEN
|
924 SDHCIOVSELCR_SDHC_VS
;
925 iowrite32be(sdhciovselcr
,
926 scfg_base
+ SCFG_SDHCIOVSELCR
);
929 val
|= ESDHC_VOLT_SEL
;
930 sdhci_writel(host
, val
, ESDHC_PROCTL
);
938 static struct soc_device_attribute soc_tuning_erratum_type1
[] = {
939 { .family
= "QorIQ T1023", },
940 { .family
= "QorIQ T1040", },
941 { .family
= "QorIQ T2080", },
942 { .family
= "QorIQ LS1021A", },
946 static struct soc_device_attribute soc_tuning_erratum_type2
[] = {
947 { .family
= "QorIQ LS1012A", },
948 { .family
= "QorIQ LS1043A", },
949 { .family
= "QorIQ LS1046A", },
950 { .family
= "QorIQ LS1080A", },
951 { .family
= "QorIQ LS2080A", },
952 { .family
= "QorIQ LA1575A", },
956 static void esdhc_tuning_block_enable(struct sdhci_host
*host
, bool enable
)
960 esdhc_clock_enable(host
, false);
961 esdhc_flush_async_fifo(host
);
963 val
= sdhci_readl(host
, ESDHC_TBCTL
);
968 sdhci_writel(host
, val
, ESDHC_TBCTL
);
970 esdhc_clock_enable(host
, true);
973 static void esdhc_tuning_window_ptr(struct sdhci_host
*host
, u8
*window_start
,
978 /* Write TBCTL[11:8]=4'h8 */
979 val
= sdhci_readl(host
, ESDHC_TBCTL
);
982 sdhci_writel(host
, val
, ESDHC_TBCTL
);
986 /* Read TBCTL[31:0] register and rewrite again */
987 val
= sdhci_readl(host
, ESDHC_TBCTL
);
988 sdhci_writel(host
, val
, ESDHC_TBCTL
);
992 /* Read the TBSTAT[31:0] register twice */
993 val
= sdhci_readl(host
, ESDHC_TBSTAT
);
994 val
= sdhci_readl(host
, ESDHC_TBSTAT
);
996 *window_end
= val
& 0xff;
997 *window_start
= (val
>> 8) & 0xff;
1000 static void esdhc_prepare_sw_tuning(struct sdhci_host
*host
, u8
*window_start
,
1003 struct sdhci_pltfm_host
*pltfm_host
= sdhci_priv(host
);
1004 struct sdhci_esdhc
*esdhc
= sdhci_pltfm_priv(pltfm_host
);
1005 u8 start_ptr
, end_ptr
;
1007 if (esdhc
->quirk_tuning_erratum_type1
) {
1008 *window_start
= 5 * esdhc
->div_ratio
;
1009 *window_end
= 3 * esdhc
->div_ratio
;
1013 esdhc_tuning_window_ptr(host
, &start_ptr
, &end_ptr
);
1015 /* Reset data lines by setting ESDHCCTL[RSTD] */
1016 sdhci_reset(host
, SDHCI_RESET_DATA
);
1017 /* Write 32'hFFFF_FFFF to IRQSTAT register */
1018 sdhci_writel(host
, 0xFFFFFFFF, SDHCI_INT_STATUS
);
1020 /* If TBSTAT[15:8]-TBSTAT[7:0] > (4 * div_ratio) + 2
1021 * or TBSTAT[7:0]-TBSTAT[15:8] > (4 * div_ratio) + 2,
1022 * then program TBPTR[TB_WNDW_END_PTR] = 4 * div_ratio
1023 * and program TBPTR[TB_WNDW_START_PTR] = 8 * div_ratio.
1026 if (abs(start_ptr
- end_ptr
) > (4 * esdhc
->div_ratio
+ 2)) {
1027 *window_start
= 8 * esdhc
->div_ratio
;
1028 *window_end
= 4 * esdhc
->div_ratio
;
1030 *window_start
= 5 * esdhc
->div_ratio
;
1031 *window_end
= 3 * esdhc
->div_ratio
;
1035 static int esdhc_execute_sw_tuning(struct mmc_host
*mmc
, u32 opcode
,
1036 u8 window_start
, u8 window_end
)
1038 struct sdhci_host
*host
= mmc_priv(mmc
);
1039 struct sdhci_pltfm_host
*pltfm_host
= sdhci_priv(host
);
1040 struct sdhci_esdhc
*esdhc
= sdhci_pltfm_priv(pltfm_host
);
1044 /* Program TBPTR[TB_WNDW_END_PTR] and TBPTR[TB_WNDW_START_PTR] */
1045 val
= ((u32
)window_start
<< ESDHC_WNDW_STRT_PTR_SHIFT
) &
1046 ESDHC_WNDW_STRT_PTR_MASK
;
1047 val
|= window_end
& ESDHC_WNDW_END_PTR_MASK
;
1048 sdhci_writel(host
, val
, ESDHC_TBPTR
);
1050 /* Program the software tuning mode by setting TBCTL[TB_MODE]=2'h3 */
1051 val
= sdhci_readl(host
, ESDHC_TBCTL
);
1052 val
&= ~ESDHC_TB_MODE_MASK
;
1053 val
|= ESDHC_TB_MODE_SW
;
1054 sdhci_writel(host
, val
, ESDHC_TBCTL
);
1056 esdhc
->in_sw_tuning
= true;
1057 ret
= sdhci_execute_tuning(mmc
, opcode
);
1058 esdhc
->in_sw_tuning
= false;
1062 static int esdhc_execute_tuning(struct mmc_host
*mmc
, u32 opcode
)
1064 struct sdhci_host
*host
= mmc_priv(mmc
);
1065 struct sdhci_pltfm_host
*pltfm_host
= sdhci_priv(host
);
1066 struct sdhci_esdhc
*esdhc
= sdhci_pltfm_priv(pltfm_host
);
1067 u8 window_start
, window_end
;
1068 int ret
, retries
= 1;
1073 /* For tuning mode, the sd clock divisor value
1074 * must be larger than 3 according to reference manual.
1076 clk
= esdhc
->peripheral_clock
/ 3;
1077 if (host
->clock
> clk
)
1078 esdhc_of_set_clock(host
, clk
);
1080 esdhc_tuning_block_enable(host
, true);
1083 * The eSDHC controller takes the data timeout value into account
1084 * during tuning. If the SD card is too slow sending the response, the
1085 * timer will expire and a "Buffer Read Ready" interrupt without data
1086 * is triggered. This leads to tuning errors.
1088 * Just set the timeout to the maximum value because the core will
1089 * already take care of it in sdhci_send_tuning().
1091 sdhci_writeb(host
, 0xe, SDHCI_TIMEOUT_CONTROL
);
1093 hs400_tuning
= host
->flags
& SDHCI_HS400_TUNING
;
1096 if (esdhc
->quirk_limited_clk_division
&&
1098 esdhc_of_set_clock(host
, host
->clock
);
1101 val
= sdhci_readl(host
, ESDHC_TBCTL
);
1102 val
&= ~ESDHC_TB_MODE_MASK
;
1103 val
|= ESDHC_TB_MODE_3
;
1104 sdhci_writel(host
, val
, ESDHC_TBCTL
);
1106 ret
= sdhci_execute_tuning(mmc
, opcode
);
1110 /* For type2 affected platforms of the tuning erratum,
1111 * tuning may succeed although eSDHC might not have
1112 * tuned properly. Need to check tuning window.
1114 if (esdhc
->quirk_tuning_erratum_type2
&&
1115 !host
->tuning_err
) {
1116 esdhc_tuning_window_ptr(host
, &window_start
,
1118 if (abs(window_start
- window_end
) >
1119 (4 * esdhc
->div_ratio
+ 2))
1120 host
->tuning_err
= -EAGAIN
;
1123 /* If HW tuning fails and triggers erratum,
1126 ret
= host
->tuning_err
;
1127 if (ret
== -EAGAIN
&&
1128 (esdhc
->quirk_tuning_erratum_type1
||
1129 esdhc
->quirk_tuning_erratum_type2
)) {
1130 /* Recover HS400 tuning flag */
1132 host
->flags
|= SDHCI_HS400_TUNING
;
1133 pr_info("%s: Hold on to use fixed sampling clock. Try SW tuning!\n",
1136 esdhc_prepare_sw_tuning(host
, &window_start
,
1138 ret
= esdhc_execute_sw_tuning(mmc
, opcode
,
1144 /* Retry both HW/SW tuning with reduced clock. */
1145 ret
= host
->tuning_err
;
1146 if (ret
== -EAGAIN
&& retries
) {
1147 /* Recover HS400 tuning flag */
1149 host
->flags
|= SDHCI_HS400_TUNING
;
1151 clk
= host
->max_clk
/ (esdhc
->div_ratio
+ 1);
1152 esdhc_of_set_clock(host
, clk
);
1153 pr_info("%s: Hold on to use fixed sampling clock. Try tuning with reduced clock!\n",
1161 } while (retries
--);
1164 esdhc_tuning_block_enable(host
, false);
1165 } else if (hs400_tuning
) {
1166 val
= sdhci_readl(host
, ESDHC_SDTIMNGCTL
);
1167 val
|= ESDHC_FLW_CTL_BG
;
1168 sdhci_writel(host
, val
, ESDHC_SDTIMNGCTL
);
1174 static void esdhc_set_uhs_signaling(struct sdhci_host
*host
,
1175 unsigned int timing
)
1180 * There are specific registers setting for HS400 mode.
1181 * Clean all of them if controller is in HS400 mode to
1182 * exit HS400 mode before re-setting any speed mode.
1184 val
= sdhci_readl(host
, ESDHC_TBCTL
);
1185 if (val
& ESDHC_HS400_MODE
) {
1186 val
= sdhci_readl(host
, ESDHC_SDTIMNGCTL
);
1187 val
&= ~ESDHC_FLW_CTL_BG
;
1188 sdhci_writel(host
, val
, ESDHC_SDTIMNGCTL
);
1190 val
= sdhci_readl(host
, ESDHC_SDCLKCTL
);
1191 val
&= ~ESDHC_CMD_CLK_CTL
;
1192 sdhci_writel(host
, val
, ESDHC_SDCLKCTL
);
1194 esdhc_clock_enable(host
, false);
1195 val
= sdhci_readl(host
, ESDHC_TBCTL
);
1196 val
&= ~ESDHC_HS400_MODE
;
1197 sdhci_writel(host
, val
, ESDHC_TBCTL
);
1198 esdhc_clock_enable(host
, true);
1200 val
= sdhci_readl(host
, ESDHC_DLLCFG0
);
1201 val
&= ~(ESDHC_DLL_ENABLE
| ESDHC_DLL_FREQ_SEL
);
1202 sdhci_writel(host
, val
, ESDHC_DLLCFG0
);
1204 val
= sdhci_readl(host
, ESDHC_TBCTL
);
1205 val
&= ~ESDHC_HS400_WNDW_ADJUST
;
1206 sdhci_writel(host
, val
, ESDHC_TBCTL
);
1208 esdhc_tuning_block_enable(host
, false);
1211 if (timing
== MMC_TIMING_MMC_HS400
)
1212 esdhc_tuning_block_enable(host
, true);
1214 sdhci_set_uhs_signaling(host
, timing
);
1217 static u32
esdhc_irq(struct sdhci_host
*host
, u32 intmask
)
1219 struct sdhci_pltfm_host
*pltfm_host
= sdhci_priv(host
);
1220 struct sdhci_esdhc
*esdhc
= sdhci_pltfm_priv(pltfm_host
);
1223 if (esdhc
->quirk_trans_complete_erratum
) {
1224 command
= SDHCI_GET_CMD(sdhci_readw(host
,
1226 if (command
== MMC_WRITE_MULTIPLE_BLOCK
&&
1227 sdhci_readw(host
, SDHCI_BLOCK_COUNT
) &&
1228 intmask
& SDHCI_INT_DATA_END
) {
1229 intmask
&= ~SDHCI_INT_DATA_END
;
1230 sdhci_writel(host
, SDHCI_INT_DATA_END
,
1237 #ifdef CONFIG_PM_SLEEP
1238 static u32 esdhc_proctl
;
1239 static int esdhc_of_suspend(struct device
*dev
)
1241 struct sdhci_host
*host
= dev_get_drvdata(dev
);
1243 esdhc_proctl
= sdhci_readl(host
, SDHCI_HOST_CONTROL
);
1245 if (host
->tuning_mode
!= SDHCI_TUNING_MODE_3
)
1246 mmc_retune_needed(host
->mmc
);
1248 return sdhci_suspend_host(host
);
1251 static int esdhc_of_resume(struct device
*dev
)
1253 struct sdhci_host
*host
= dev_get_drvdata(dev
);
1254 int ret
= sdhci_resume_host(host
);
1257 /* Isn't this already done by sdhci_resume_host() ? --rmk */
1258 esdhc_of_enable_dma(host
);
1259 sdhci_writel(host
, esdhc_proctl
, SDHCI_HOST_CONTROL
);
1265 static SIMPLE_DEV_PM_OPS(esdhc_of_dev_pm_ops
,
1269 static const struct sdhci_ops sdhci_esdhc_be_ops
= {
1270 .read_l
= esdhc_be_readl
,
1271 .read_w
= esdhc_be_readw
,
1272 .read_b
= esdhc_be_readb
,
1273 .write_l
= esdhc_be_writel
,
1274 .write_w
= esdhc_be_writew
,
1275 .write_b
= esdhc_be_writeb
,
1276 .set_clock
= esdhc_of_set_clock
,
1277 .enable_dma
= esdhc_of_enable_dma
,
1278 .get_max_clock
= esdhc_of_get_max_clock
,
1279 .get_min_clock
= esdhc_of_get_min_clock
,
1280 .adma_workaround
= esdhc_of_adma_workaround
,
1281 .set_bus_width
= esdhc_pltfm_set_bus_width
,
1282 .reset
= esdhc_reset
,
1283 .set_uhs_signaling
= esdhc_set_uhs_signaling
,
1287 static const struct sdhci_ops sdhci_esdhc_le_ops
= {
1288 .read_l
= esdhc_le_readl
,
1289 .read_w
= esdhc_le_readw
,
1290 .read_b
= esdhc_le_readb
,
1291 .write_l
= esdhc_le_writel
,
1292 .write_w
= esdhc_le_writew
,
1293 .write_b
= esdhc_le_writeb
,
1294 .set_clock
= esdhc_of_set_clock
,
1295 .enable_dma
= esdhc_of_enable_dma
,
1296 .get_max_clock
= esdhc_of_get_max_clock
,
1297 .get_min_clock
= esdhc_of_get_min_clock
,
1298 .adma_workaround
= esdhc_of_adma_workaround
,
1299 .set_bus_width
= esdhc_pltfm_set_bus_width
,
1300 .reset
= esdhc_reset
,
1301 .set_uhs_signaling
= esdhc_set_uhs_signaling
,
1305 static const struct sdhci_pltfm_data sdhci_esdhc_be_pdata
= {
1306 .quirks
= ESDHC_DEFAULT_QUIRKS
|
1308 SDHCI_QUIRK_BROKEN_CARD_DETECTION
|
1310 SDHCI_QUIRK_NO_CARD_NO_RESET
|
1311 SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC
,
1312 .ops
= &sdhci_esdhc_be_ops
,
1315 static const struct sdhci_pltfm_data sdhci_esdhc_le_pdata
= {
1316 .quirks
= ESDHC_DEFAULT_QUIRKS
|
1317 SDHCI_QUIRK_NO_CARD_NO_RESET
|
1318 SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC
,
1319 .ops
= &sdhci_esdhc_le_ops
,
1322 static struct soc_device_attribute soc_incorrect_hostver
[] = {
1323 { .family
= "QorIQ T4240", .revision
= "1.0", },
1324 { .family
= "QorIQ T4240", .revision
= "2.0", },
1328 static struct soc_device_attribute soc_fixup_sdhc_clkdivs
[] = {
1329 { .family
= "QorIQ LX2160A", .revision
= "1.0", },
1330 { .family
= "QorIQ LX2160A", .revision
= "2.0", },
1331 { .family
= "QorIQ LS1028A", .revision
= "1.0", },
1335 static struct soc_device_attribute soc_unreliable_pulse_detection
[] = {
1336 { .family
= "QorIQ LX2160A", .revision
= "1.0", },
1337 { .family
= "QorIQ LX2160A", .revision
= "2.0", },
1338 { .family
= "QorIQ LS1028A", .revision
= "1.0", },
1342 static void esdhc_init(struct platform_device
*pdev
, struct sdhci_host
*host
)
1344 const struct of_device_id
*match
;
1345 struct sdhci_pltfm_host
*pltfm_host
;
1346 struct sdhci_esdhc
*esdhc
;
1347 struct device_node
*np
;
1352 pltfm_host
= sdhci_priv(host
);
1353 esdhc
= sdhci_pltfm_priv(pltfm_host
);
1355 host_ver
= sdhci_readw(host
, SDHCI_HOST_VERSION
);
1356 esdhc
->vendor_ver
= (host_ver
& SDHCI_VENDOR_VER_MASK
) >>
1357 SDHCI_VENDOR_VER_SHIFT
;
1358 esdhc
->spec_ver
= host_ver
& SDHCI_SPEC_VER_MASK
;
1359 if (soc_device_match(soc_incorrect_hostver
))
1360 esdhc
->quirk_incorrect_hostver
= true;
1362 esdhc
->quirk_incorrect_hostver
= false;
1364 if (soc_device_match(soc_fixup_sdhc_clkdivs
))
1365 esdhc
->quirk_limited_clk_division
= true;
1367 esdhc
->quirk_limited_clk_division
= false;
1369 if (soc_device_match(soc_unreliable_pulse_detection
))
1370 esdhc
->quirk_unreliable_pulse_detection
= true;
1372 esdhc
->quirk_unreliable_pulse_detection
= false;
1374 match
= of_match_node(sdhci_esdhc_of_match
, pdev
->dev
.of_node
);
1376 esdhc
->clk_fixup
= match
->data
;
1377 np
= pdev
->dev
.of_node
;
1379 if (of_device_is_compatible(np
, "fsl,p2020-esdhc")) {
1380 esdhc
->quirk_delay_before_data_reset
= true;
1381 esdhc
->quirk_trans_complete_erratum
= true;
1384 clk
= of_clk_get(np
, 0);
1387 * esdhc->peripheral_clock would be assigned with a value
1388 * which is eSDHC base clock when use periperal clock.
1389 * For some platforms, the clock value got by common clk
1390 * API is peripheral clock while the eSDHC base clock is
1391 * 1/2 peripheral clock.
1393 if (of_device_is_compatible(np
, "fsl,ls1046a-esdhc") ||
1394 of_device_is_compatible(np
, "fsl,ls1028a-esdhc") ||
1395 of_device_is_compatible(np
, "fsl,ls1088a-esdhc"))
1396 esdhc
->peripheral_clock
= clk_get_rate(clk
) / 2;
1398 esdhc
->peripheral_clock
= clk_get_rate(clk
);
1403 esdhc_clock_enable(host
, false);
1404 val
= sdhci_readl(host
, ESDHC_DMA_SYSCTL
);
1406 * This bit is not able to be reset by SDHCI_RESET_ALL. Need to
1407 * initialize it as 1 or 0 once, to override the different value
1408 * which may be configured in bootloader.
1410 if (esdhc
->peripheral_clock
)
1411 val
|= ESDHC_PERIPHERAL_CLK_SEL
;
1413 val
&= ~ESDHC_PERIPHERAL_CLK_SEL
;
1414 sdhci_writel(host
, val
, ESDHC_DMA_SYSCTL
);
1415 esdhc_clock_enable(host
, true);
1418 static int esdhc_hs400_prepare_ddr(struct mmc_host
*mmc
)
1420 esdhc_tuning_block_enable(mmc_priv(mmc
), false);
1424 static int sdhci_esdhc_probe(struct platform_device
*pdev
)
1426 struct sdhci_host
*host
;
1427 struct device_node
*np
, *tp
;
1428 struct sdhci_pltfm_host
*pltfm_host
;
1429 struct sdhci_esdhc
*esdhc
;
1432 np
= pdev
->dev
.of_node
;
1434 if (of_property_read_bool(np
, "little-endian"))
1435 host
= sdhci_pltfm_init(pdev
, &sdhci_esdhc_le_pdata
,
1436 sizeof(struct sdhci_esdhc
));
1438 host
= sdhci_pltfm_init(pdev
, &sdhci_esdhc_be_pdata
,
1439 sizeof(struct sdhci_esdhc
));
1442 return PTR_ERR(host
);
1444 host
->mmc_host_ops
.start_signal_voltage_switch
=
1445 esdhc_signal_voltage_switch
;
1446 host
->mmc_host_ops
.execute_tuning
= esdhc_execute_tuning
;
1447 host
->mmc_host_ops
.hs400_prepare_ddr
= esdhc_hs400_prepare_ddr
;
1448 host
->tuning_delay
= 1;
1450 esdhc_init(pdev
, host
);
1452 sdhci_get_of_property(pdev
);
1454 pltfm_host
= sdhci_priv(host
);
1455 esdhc
= sdhci_pltfm_priv(pltfm_host
);
1456 if (soc_device_match(soc_tuning_erratum_type1
))
1457 esdhc
->quirk_tuning_erratum_type1
= true;
1459 esdhc
->quirk_tuning_erratum_type1
= false;
1461 if (soc_device_match(soc_tuning_erratum_type2
))
1462 esdhc
->quirk_tuning_erratum_type2
= true;
1464 esdhc
->quirk_tuning_erratum_type2
= false;
1466 if (esdhc
->vendor_ver
== VENDOR_V_22
)
1467 host
->quirks2
|= SDHCI_QUIRK2_HOST_NO_CMD23
;
1469 if (esdhc
->vendor_ver
> VENDOR_V_22
)
1470 host
->quirks
&= ~SDHCI_QUIRK_NO_BUSY_IRQ
;
1472 tp
= of_find_compatible_node(NULL
, NULL
, "fsl,p2020-esdhc");
1475 host
->quirks
|= SDHCI_QUIRK_RESET_AFTER_REQUEST
;
1476 host
->quirks
|= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL
;
1479 if (of_device_is_compatible(np
, "fsl,p5040-esdhc") ||
1480 of_device_is_compatible(np
, "fsl,p5020-esdhc") ||
1481 of_device_is_compatible(np
, "fsl,p4080-esdhc") ||
1482 of_device_is_compatible(np
, "fsl,p1020-esdhc") ||
1483 of_device_is_compatible(np
, "fsl,t1040-esdhc"))
1484 host
->quirks
&= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION
;
1486 if (of_device_is_compatible(np
, "fsl,ls1021a-esdhc"))
1487 host
->quirks
|= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL
;
1489 esdhc
->quirk_ignore_data_inhibit
= false;
1490 if (of_device_is_compatible(np
, "fsl,p2020-esdhc")) {
1492 * Freescale messed up with P2020 as it has a non-standard
1493 * host control register
1495 host
->quirks2
|= SDHCI_QUIRK2_BROKEN_HOST_CONTROL
;
1496 esdhc
->quirk_ignore_data_inhibit
= true;
1499 /* call to generic mmc_of_parse to support additional capabilities */
1500 ret
= mmc_of_parse(host
->mmc
);
1504 mmc_of_parse_voltage(host
->mmc
, &host
->ocr_mask
);
1506 ret
= sdhci_add_host(host
);
1512 sdhci_pltfm_free(pdev
);
1516 static struct platform_driver sdhci_esdhc_driver
= {
1518 .name
= "sdhci-esdhc",
1519 .probe_type
= PROBE_PREFER_ASYNCHRONOUS
,
1520 .of_match_table
= sdhci_esdhc_of_match
,
1521 .pm
= &esdhc_of_dev_pm_ops
,
1523 .probe
= sdhci_esdhc_probe
,
1524 .remove
= sdhci_pltfm_remove
,
1527 module_platform_driver(sdhci_esdhc_driver
);
1529 MODULE_DESCRIPTION("SDHCI OF driver for Freescale MPC eSDHC");
1530 MODULE_AUTHOR("Xiaobo Xie <X.Xie@freescale.com>, "
1531 "Anton Vorontsov <avorontsov@ru.mvista.com>");
1532 MODULE_LICENSE("GPL v2");