1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Freescale eSDHC controller driver.
5 * Copyright (c) 2007, 2010, 2012 Freescale Semiconductor, Inc.
6 * Copyright (c) 2009 MontaVista Software, Inc.
8 * Authors: Xiaobo Xie <X.Xie@freescale.com>
9 * Anton Vorontsov <avorontsov@ru.mvista.com>
12 #include <linux/err.h>
15 #include <linux/of_address.h>
16 #include <linux/delay.h>
17 #include <linux/module.h>
18 #include <linux/sys_soc.h>
19 #include <linux/clk.h>
20 #include <linux/ktime.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/mmc/host.h>
23 #include <linux/mmc/mmc.h>
24 #include "sdhci-pltfm.h"
25 #include "sdhci-esdhc.h"
27 #define VENDOR_V_22 0x12
28 #define VENDOR_V_23 0x13
30 #define MMC_TIMING_NUM (MMC_TIMING_MMC_HS400 + 1)
32 struct esdhc_clk_fixup
{
33 const unsigned int sd_dflt_max_clk
;
34 const unsigned int max_clk
[MMC_TIMING_NUM
];
37 static const struct esdhc_clk_fixup ls1021a_esdhc_clk
= {
38 .sd_dflt_max_clk
= 25000000,
39 .max_clk
[MMC_TIMING_MMC_HS
] = 46500000,
40 .max_clk
[MMC_TIMING_SD_HS
] = 46500000,
43 static const struct esdhc_clk_fixup ls1046a_esdhc_clk
= {
44 .sd_dflt_max_clk
= 25000000,
45 .max_clk
[MMC_TIMING_UHS_SDR104
] = 167000000,
46 .max_clk
[MMC_TIMING_MMC_HS200
] = 167000000,
49 static const struct esdhc_clk_fixup ls1012a_esdhc_clk
= {
50 .sd_dflt_max_clk
= 25000000,
51 .max_clk
[MMC_TIMING_UHS_SDR104
] = 125000000,
52 .max_clk
[MMC_TIMING_MMC_HS200
] = 125000000,
55 static const struct esdhc_clk_fixup p1010_esdhc_clk
= {
56 .sd_dflt_max_clk
= 20000000,
57 .max_clk
[MMC_TIMING_LEGACY
] = 20000000,
58 .max_clk
[MMC_TIMING_MMC_HS
] = 42000000,
59 .max_clk
[MMC_TIMING_SD_HS
] = 40000000,
62 static const struct of_device_id sdhci_esdhc_of_match
[] = {
63 { .compatible
= "fsl,ls1021a-esdhc", .data
= &ls1021a_esdhc_clk
},
64 { .compatible
= "fsl,ls1046a-esdhc", .data
= &ls1046a_esdhc_clk
},
65 { .compatible
= "fsl,ls1012a-esdhc", .data
= &ls1012a_esdhc_clk
},
66 { .compatible
= "fsl,p1010-esdhc", .data
= &p1010_esdhc_clk
},
67 { .compatible
= "fsl,mpc8379-esdhc" },
68 { .compatible
= "fsl,mpc8536-esdhc" },
69 { .compatible
= "fsl,esdhc" },
72 MODULE_DEVICE_TABLE(of
, sdhci_esdhc_of_match
);
77 bool quirk_incorrect_hostver
;
78 bool quirk_limited_clk_division
;
79 bool quirk_unreliable_pulse_detection
;
80 bool quirk_tuning_erratum_type1
;
81 bool quirk_tuning_erratum_type2
;
82 bool quirk_ignore_data_inhibit
;
83 bool quirk_delay_before_data_reset
;
85 unsigned int peripheral_clock
;
86 const struct esdhc_clk_fixup
*clk_fixup
;
91 * esdhc_read*_fixup - Fixup the value read from incompatible eSDHC register
92 * to make it compatible with SD spec.
94 * @host: pointer to sdhci_host
95 * @spec_reg: SD spec register address
96 * @value: 32bit eSDHC register value on spec_reg address
98 * In SD spec, there are 8/16/32/64 bits registers, while all of eSDHC
99 * registers are 32 bits. There are differences in register size, register
100 * address, register function, bit position and function between eSDHC spec
103 * Return a fixed up register value
105 static u32
esdhc_readl_fixup(struct sdhci_host
*host
,
106 int spec_reg
, u32 value
)
108 struct sdhci_pltfm_host
*pltfm_host
= sdhci_priv(host
);
109 struct sdhci_esdhc
*esdhc
= sdhci_pltfm_priv(pltfm_host
);
113 * The bit of ADMA flag in eSDHC is not compatible with standard
114 * SDHC register, so set fake flag SDHCI_CAN_DO_ADMA2 when ADMA is
115 * supported by eSDHC.
116 * And for many FSL eSDHC controller, the reset value of field
117 * SDHCI_CAN_DO_ADMA1 is 1, but some of them can't support ADMA,
118 * only these vendor version is greater than 2.2/0x12 support ADMA.
120 if ((spec_reg
== SDHCI_CAPABILITIES
) && (value
& SDHCI_CAN_DO_ADMA1
)) {
121 if (esdhc
->vendor_ver
> VENDOR_V_22
) {
122 ret
= value
| SDHCI_CAN_DO_ADMA2
;
127 * The DAT[3:0] line signal levels and the CMD line signal level are
128 * not compatible with standard SDHC register. The line signal levels
129 * DAT[7:0] are at bits 31:24 and the command line signal level is at
130 * bit 23. All other bits are the same as in the standard SDHC
133 if (spec_reg
== SDHCI_PRESENT_STATE
) {
134 ret
= value
& 0x000fffff;
135 ret
|= (value
>> 4) & SDHCI_DATA_LVL_MASK
;
136 ret
|= (value
<< 1) & SDHCI_CMD_LVL
;
141 * DTS properties of mmc host are used to enable each speed mode
142 * according to soc and board capability. So clean up
143 * SDR50/SDR104/DDR50 support bits here.
145 if (spec_reg
== SDHCI_CAPABILITIES_1
) {
146 ret
= value
& ~(SDHCI_SUPPORT_SDR50
| SDHCI_SUPPORT_SDR104
|
147 SDHCI_SUPPORT_DDR50
);
152 * Some controllers have unreliable Data Line Active
153 * bit for commands with busy signal. This affects
154 * Command Inhibit (data) bit. Just ignore it since
155 * MMC core driver has already polled card status
156 * with CMD13 after any command with busy siganl.
158 if ((spec_reg
== SDHCI_PRESENT_STATE
) &&
159 (esdhc
->quirk_ignore_data_inhibit
== true)) {
160 ret
= value
& ~SDHCI_DATA_INHIBIT
;
168 static u16
esdhc_readw_fixup(struct sdhci_host
*host
,
169 int spec_reg
, u32 value
)
171 struct sdhci_pltfm_host
*pltfm_host
= sdhci_priv(host
);
172 struct sdhci_esdhc
*esdhc
= sdhci_pltfm_priv(pltfm_host
);
174 int shift
= (spec_reg
& 0x2) * 8;
176 if (spec_reg
== SDHCI_TRANSFER_MODE
)
177 return pltfm_host
->xfer_mode_shadow
;
179 if (spec_reg
== SDHCI_HOST_VERSION
)
180 ret
= value
& 0xffff;
182 ret
= (value
>> shift
) & 0xffff;
183 /* Workaround for T4240-R1.0-R2.0 eSDHC which has incorrect
184 * vendor version and spec version information.
186 if ((spec_reg
== SDHCI_HOST_VERSION
) &&
187 (esdhc
->quirk_incorrect_hostver
))
188 ret
= (VENDOR_V_23
<< SDHCI_VENDOR_VER_SHIFT
) | SDHCI_SPEC_200
;
192 static u8
esdhc_readb_fixup(struct sdhci_host
*host
,
193 int spec_reg
, u32 value
)
197 int shift
= (spec_reg
& 0x3) * 8;
199 ret
= (value
>> shift
) & 0xff;
202 * "DMA select" locates at offset 0x28 in SD specification, but on
203 * P5020 or P3041, it locates at 0x29.
205 if (spec_reg
== SDHCI_HOST_CONTROL
) {
206 /* DMA select is 22,23 bits in Protocol Control Register */
207 dma_bits
= (value
>> 5) & SDHCI_CTRL_DMA_MASK
;
208 /* fixup the result */
209 ret
&= ~SDHCI_CTRL_DMA_MASK
;
216 * esdhc_write*_fixup - Fixup the SD spec register value so that it could be
217 * written into eSDHC register.
219 * @host: pointer to sdhci_host
220 * @spec_reg: SD spec register address
221 * @value: 8/16/32bit SD spec register value that would be written
222 * @old_value: 32bit eSDHC register value on spec_reg address
224 * In SD spec, there are 8/16/32/64 bits registers, while all of eSDHC
225 * registers are 32 bits. There are differences in register size, register
226 * address, register function, bit position and function between eSDHC spec
229 * Return a fixed up register value
231 static u32
esdhc_writel_fixup(struct sdhci_host
*host
,
232 int spec_reg
, u32 value
, u32 old_value
)
237 * Enabling IRQSTATEN[BGESEN] is just to set IRQSTAT[BGE]
238 * when SYSCTL[RSTD] is set for some special operations.
239 * No any impact on other operation.
241 if (spec_reg
== SDHCI_INT_ENABLE
)
242 ret
= value
| SDHCI_INT_BLK_GAP
;
249 static u32
esdhc_writew_fixup(struct sdhci_host
*host
,
250 int spec_reg
, u16 value
, u32 old_value
)
252 struct sdhci_pltfm_host
*pltfm_host
= sdhci_priv(host
);
253 int shift
= (spec_reg
& 0x2) * 8;
257 case SDHCI_TRANSFER_MODE
:
259 * Postpone this write, we must do it together with a
260 * command write that is down below. Return old value.
262 pltfm_host
->xfer_mode_shadow
= value
;
265 ret
= (value
<< 16) | pltfm_host
->xfer_mode_shadow
;
269 ret
= old_value
& (~(0xffff << shift
));
270 ret
|= (value
<< shift
);
272 if (spec_reg
== SDHCI_BLOCK_SIZE
) {
274 * Two last DMA bits are reserved, and first one is used for
275 * non-standard blksz of 4096 bytes that we don't support
276 * yet. So clear the DMA boundary bits.
278 ret
&= (~SDHCI_MAKE_BLKSZ(0x7, 0));
283 static u32
esdhc_writeb_fixup(struct sdhci_host
*host
,
284 int spec_reg
, u8 value
, u32 old_value
)
289 int shift
= (spec_reg
& 0x3) * 8;
292 * eSDHC doesn't have a standard power control register, so we do
293 * nothing here to avoid incorrect operation.
295 if (spec_reg
== SDHCI_POWER_CONTROL
)
298 * "DMA select" location is offset 0x28 in SD specification, but on
299 * P5020 or P3041, it's located at 0x29.
301 if (spec_reg
== SDHCI_HOST_CONTROL
) {
303 * If host control register is not standard, exit
306 if (host
->quirks2
& SDHCI_QUIRK2_BROKEN_HOST_CONTROL
)
309 /* DMA select is 22,23 bits in Protocol Control Register */
310 dma_bits
= (value
& SDHCI_CTRL_DMA_MASK
) << 5;
311 ret
= (old_value
& (~(SDHCI_CTRL_DMA_MASK
<< 5))) | dma_bits
;
312 tmp
= (value
& (~SDHCI_CTRL_DMA_MASK
)) |
313 (old_value
& SDHCI_CTRL_DMA_MASK
);
314 ret
= (ret
& (~0xff)) | tmp
;
316 /* Prevent SDHCI core from writing reserved bits (e.g. HISPD) */
317 ret
&= ~ESDHC_HOST_CONTROL_RES
;
321 ret
= (old_value
& (~(0xff << shift
))) | (value
<< shift
);
325 static u32
esdhc_be_readl(struct sdhci_host
*host
, int reg
)
330 if (reg
== SDHCI_CAPABILITIES_1
)
331 value
= ioread32be(host
->ioaddr
+ ESDHC_CAPABILITIES_1
);
333 value
= ioread32be(host
->ioaddr
+ reg
);
335 ret
= esdhc_readl_fixup(host
, reg
, value
);
340 static u32
esdhc_le_readl(struct sdhci_host
*host
, int reg
)
345 if (reg
== SDHCI_CAPABILITIES_1
)
346 value
= ioread32(host
->ioaddr
+ ESDHC_CAPABILITIES_1
);
348 value
= ioread32(host
->ioaddr
+ reg
);
350 ret
= esdhc_readl_fixup(host
, reg
, value
);
355 static u16
esdhc_be_readw(struct sdhci_host
*host
, int reg
)
359 int base
= reg
& ~0x3;
361 value
= ioread32be(host
->ioaddr
+ base
);
362 ret
= esdhc_readw_fixup(host
, reg
, value
);
366 static u16
esdhc_le_readw(struct sdhci_host
*host
, int reg
)
370 int base
= reg
& ~0x3;
372 value
= ioread32(host
->ioaddr
+ base
);
373 ret
= esdhc_readw_fixup(host
, reg
, value
);
377 static u8
esdhc_be_readb(struct sdhci_host
*host
, int reg
)
381 int base
= reg
& ~0x3;
383 value
= ioread32be(host
->ioaddr
+ base
);
384 ret
= esdhc_readb_fixup(host
, reg
, value
);
388 static u8
esdhc_le_readb(struct sdhci_host
*host
, int reg
)
392 int base
= reg
& ~0x3;
394 value
= ioread32(host
->ioaddr
+ base
);
395 ret
= esdhc_readb_fixup(host
, reg
, value
);
399 static void esdhc_be_writel(struct sdhci_host
*host
, u32 val
, int reg
)
403 value
= esdhc_writel_fixup(host
, reg
, val
, 0);
404 iowrite32be(value
, host
->ioaddr
+ reg
);
407 static void esdhc_le_writel(struct sdhci_host
*host
, u32 val
, int reg
)
411 value
= esdhc_writel_fixup(host
, reg
, val
, 0);
412 iowrite32(value
, host
->ioaddr
+ reg
);
415 static void esdhc_be_writew(struct sdhci_host
*host
, u16 val
, int reg
)
417 struct sdhci_pltfm_host
*pltfm_host
= sdhci_priv(host
);
418 struct sdhci_esdhc
*esdhc
= sdhci_pltfm_priv(pltfm_host
);
419 int base
= reg
& ~0x3;
423 value
= ioread32be(host
->ioaddr
+ base
);
424 ret
= esdhc_writew_fixup(host
, reg
, val
, value
);
425 if (reg
!= SDHCI_TRANSFER_MODE
)
426 iowrite32be(ret
, host
->ioaddr
+ base
);
428 /* Starting SW tuning requires ESDHC_SMPCLKSEL to be set
429 * 1us later after ESDHC_EXTN is set.
431 if (base
== ESDHC_SYSTEM_CONTROL_2
) {
432 if (!(value
& ESDHC_EXTN
) && (ret
& ESDHC_EXTN
) &&
433 esdhc
->in_sw_tuning
) {
435 ret
|= ESDHC_SMPCLKSEL
;
436 iowrite32be(ret
, host
->ioaddr
+ base
);
441 static void esdhc_le_writew(struct sdhci_host
*host
, u16 val
, int reg
)
443 struct sdhci_pltfm_host
*pltfm_host
= sdhci_priv(host
);
444 struct sdhci_esdhc
*esdhc
= sdhci_pltfm_priv(pltfm_host
);
445 int base
= reg
& ~0x3;
449 value
= ioread32(host
->ioaddr
+ base
);
450 ret
= esdhc_writew_fixup(host
, reg
, val
, value
);
451 if (reg
!= SDHCI_TRANSFER_MODE
)
452 iowrite32(ret
, host
->ioaddr
+ base
);
454 /* Starting SW tuning requires ESDHC_SMPCLKSEL to be set
455 * 1us later after ESDHC_EXTN is set.
457 if (base
== ESDHC_SYSTEM_CONTROL_2
) {
458 if (!(value
& ESDHC_EXTN
) && (ret
& ESDHC_EXTN
) &&
459 esdhc
->in_sw_tuning
) {
461 ret
|= ESDHC_SMPCLKSEL
;
462 iowrite32(ret
, host
->ioaddr
+ base
);
467 static void esdhc_be_writeb(struct sdhci_host
*host
, u8 val
, int reg
)
469 int base
= reg
& ~0x3;
473 value
= ioread32be(host
->ioaddr
+ base
);
474 ret
= esdhc_writeb_fixup(host
, reg
, val
, value
);
475 iowrite32be(ret
, host
->ioaddr
+ base
);
478 static void esdhc_le_writeb(struct sdhci_host
*host
, u8 val
, int reg
)
480 int base
= reg
& ~0x3;
484 value
= ioread32(host
->ioaddr
+ base
);
485 ret
= esdhc_writeb_fixup(host
, reg
, val
, value
);
486 iowrite32(ret
, host
->ioaddr
+ base
);
490 * For Abort or Suspend after Stop at Block Gap, ignore the ADMA
491 * error(IRQSTAT[ADMAE]) if both Transfer Complete(IRQSTAT[TC])
492 * and Block Gap Event(IRQSTAT[BGE]) are also set.
493 * For Continue, apply soft reset for data(SYSCTL[RSTD]);
494 * and re-issue the entire read transaction from beginning.
496 static void esdhc_of_adma_workaround(struct sdhci_host
*host
, u32 intmask
)
498 struct sdhci_pltfm_host
*pltfm_host
= sdhci_priv(host
);
499 struct sdhci_esdhc
*esdhc
= sdhci_pltfm_priv(pltfm_host
);
504 applicable
= (intmask
& SDHCI_INT_DATA_END
) &&
505 (intmask
& SDHCI_INT_BLK_GAP
) &&
506 (esdhc
->vendor_ver
== VENDOR_V_23
);
510 host
->data
->error
= 0;
511 dmastart
= sg_dma_address(host
->data
->sg
);
512 dmanow
= dmastart
+ host
->data
->bytes_xfered
;
514 * Force update to the next DMA block boundary.
516 dmanow
= (dmanow
& ~(SDHCI_DEFAULT_BOUNDARY_SIZE
- 1)) +
517 SDHCI_DEFAULT_BOUNDARY_SIZE
;
518 host
->data
->bytes_xfered
= dmanow
- dmastart
;
519 sdhci_writel(host
, dmanow
, SDHCI_DMA_ADDRESS
);
522 static int esdhc_of_enable_dma(struct sdhci_host
*host
)
525 struct device
*dev
= mmc_dev(host
->mmc
);
527 if (of_device_is_compatible(dev
->of_node
, "fsl,ls1043a-esdhc") ||
528 of_device_is_compatible(dev
->of_node
, "fsl,ls1046a-esdhc"))
529 dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(40));
531 value
= sdhci_readl(host
, ESDHC_DMA_SYSCTL
);
533 if (of_dma_is_coherent(dev
->of_node
))
534 value
|= ESDHC_DMA_SNOOP
;
536 value
&= ~ESDHC_DMA_SNOOP
;
538 sdhci_writel(host
, value
, ESDHC_DMA_SYSCTL
);
542 static unsigned int esdhc_of_get_max_clock(struct sdhci_host
*host
)
544 struct sdhci_pltfm_host
*pltfm_host
= sdhci_priv(host
);
545 struct sdhci_esdhc
*esdhc
= sdhci_pltfm_priv(pltfm_host
);
547 if (esdhc
->peripheral_clock
)
548 return esdhc
->peripheral_clock
;
550 return pltfm_host
->clock
;
553 static unsigned int esdhc_of_get_min_clock(struct sdhci_host
*host
)
555 struct sdhci_pltfm_host
*pltfm_host
= sdhci_priv(host
);
556 struct sdhci_esdhc
*esdhc
= sdhci_pltfm_priv(pltfm_host
);
559 if (esdhc
->peripheral_clock
)
560 clock
= esdhc
->peripheral_clock
;
562 clock
= pltfm_host
->clock
;
563 return clock
/ 256 / 16;
566 static void esdhc_clock_enable(struct sdhci_host
*host
, bool enable
)
568 struct sdhci_pltfm_host
*pltfm_host
= sdhci_priv(host
);
569 struct sdhci_esdhc
*esdhc
= sdhci_pltfm_priv(pltfm_host
);
573 clk_en
= ESDHC_CLOCK_SDCLKEN
;
576 * IPGEN/HCKEN/PEREN bits exist on eSDHC whose vendor version
579 if (esdhc
->vendor_ver
<= VENDOR_V_22
)
580 clk_en
|= (ESDHC_CLOCK_IPGEN
| ESDHC_CLOCK_HCKEN
|
583 val
= sdhci_readl(host
, ESDHC_SYSTEM_CONTROL
);
590 sdhci_writel(host
, val
, ESDHC_SYSTEM_CONTROL
);
593 * Wait max 20 ms. If vendor version is 2.2 or lower, do not
594 * wait clock stable bit which does not exist.
596 timeout
= ktime_add_ms(ktime_get(), 20);
597 while (esdhc
->vendor_ver
> VENDOR_V_22
) {
598 bool timedout
= ktime_after(ktime_get(), timeout
);
600 if (sdhci_readl(host
, ESDHC_PRSSTAT
) & ESDHC_CLOCK_STABLE
)
603 pr_err("%s: Internal clock never stabilised.\n",
604 mmc_hostname(host
->mmc
));
607 usleep_range(10, 20);
611 static void esdhc_flush_async_fifo(struct sdhci_host
*host
)
616 val
= sdhci_readl(host
, ESDHC_DMA_SYSCTL
);
617 val
|= ESDHC_FLUSH_ASYNC_FIFO
;
618 sdhci_writel(host
, val
, ESDHC_DMA_SYSCTL
);
621 timeout
= ktime_add_ms(ktime_get(), 20);
623 bool timedout
= ktime_after(ktime_get(), timeout
);
625 if (!(sdhci_readl(host
, ESDHC_DMA_SYSCTL
) &
626 ESDHC_FLUSH_ASYNC_FIFO
))
629 pr_err("%s: flushing asynchronous FIFO timeout.\n",
630 mmc_hostname(host
->mmc
));
633 usleep_range(10, 20);
637 static void esdhc_of_set_clock(struct sdhci_host
*host
, unsigned int clock
)
639 struct sdhci_pltfm_host
*pltfm_host
= sdhci_priv(host
);
640 struct sdhci_esdhc
*esdhc
= sdhci_pltfm_priv(pltfm_host
);
641 unsigned int pre_div
= 1, div
= 1;
642 unsigned int clock_fixup
= 0;
647 host
->mmc
->actual_clock
= 0;
648 esdhc_clock_enable(host
, false);
652 /* Start pre_div at 2 for vendor version < 2.3. */
653 if (esdhc
->vendor_ver
< VENDOR_V_23
)
656 /* Fix clock value. */
657 if (host
->mmc
->card
&& mmc_card_sd(host
->mmc
->card
) &&
658 esdhc
->clk_fixup
&& host
->mmc
->ios
.timing
== MMC_TIMING_LEGACY
)
659 clock_fixup
= esdhc
->clk_fixup
->sd_dflt_max_clk
;
660 else if (esdhc
->clk_fixup
)
661 clock_fixup
= esdhc
->clk_fixup
->max_clk
[host
->mmc
->ios
.timing
];
663 if (clock_fixup
== 0 || clock
< clock_fixup
)
666 /* Calculate pre_div and div. */
667 while (host
->max_clk
/ pre_div
/ 16 > clock_fixup
&& pre_div
< 256)
670 while (host
->max_clk
/ pre_div
/ div
> clock_fixup
&& div
< 16)
673 esdhc
->div_ratio
= pre_div
* div
;
675 /* Limit clock division for HS400 200MHz clock for quirk. */
676 if (esdhc
->quirk_limited_clk_division
&&
677 clock
== MMC_HS200_MAX_DTR
&&
678 (host
->mmc
->ios
.timing
== MMC_TIMING_MMC_HS400
||
679 host
->flags
& SDHCI_HS400_TUNING
)) {
680 if (esdhc
->div_ratio
<= 4) {
683 } else if (esdhc
->div_ratio
<= 8) {
686 } else if (esdhc
->div_ratio
<= 12) {
690 pr_warn("%s: using unsupported clock division.\n",
691 mmc_hostname(host
->mmc
));
693 esdhc
->div_ratio
= pre_div
* div
;
696 host
->mmc
->actual_clock
= host
->max_clk
/ esdhc
->div_ratio
;
698 dev_dbg(mmc_dev(host
->mmc
), "desired SD clock: %d, actual: %d\n",
699 clock
, host
->mmc
->actual_clock
);
701 /* Set clock division into register. */
705 esdhc_clock_enable(host
, false);
707 temp
= sdhci_readl(host
, ESDHC_SYSTEM_CONTROL
);
708 temp
&= ~ESDHC_CLOCK_MASK
;
709 temp
|= ((div
<< ESDHC_DIVIDER_SHIFT
) |
710 (pre_div
<< ESDHC_PREDIV_SHIFT
));
711 sdhci_writel(host
, temp
, ESDHC_SYSTEM_CONTROL
);
714 * Wait max 20 ms. If vendor version is 2.2 or lower, do not
715 * wait clock stable bit which does not exist.
717 timeout
= ktime_add_ms(ktime_get(), 20);
718 while (esdhc
->vendor_ver
> VENDOR_V_22
) {
719 bool timedout
= ktime_after(ktime_get(), timeout
);
721 if (sdhci_readl(host
, ESDHC_PRSSTAT
) & ESDHC_CLOCK_STABLE
)
724 pr_err("%s: Internal clock never stabilised.\n",
725 mmc_hostname(host
->mmc
));
728 usleep_range(10, 20);
731 /* Additional setting for HS400. */
732 if (host
->mmc
->ios
.timing
== MMC_TIMING_MMC_HS400
&&
733 clock
== MMC_HS200_MAX_DTR
) {
734 temp
= sdhci_readl(host
, ESDHC_TBCTL
);
735 sdhci_writel(host
, temp
| ESDHC_HS400_MODE
, ESDHC_TBCTL
);
736 temp
= sdhci_readl(host
, ESDHC_SDCLKCTL
);
737 sdhci_writel(host
, temp
| ESDHC_CMD_CLK_CTL
, ESDHC_SDCLKCTL
);
738 esdhc_clock_enable(host
, true);
740 temp
= sdhci_readl(host
, ESDHC_DLLCFG0
);
741 temp
|= ESDHC_DLL_ENABLE
;
742 if (host
->mmc
->actual_clock
== MMC_HS200_MAX_DTR
)
743 temp
|= ESDHC_DLL_FREQ_SEL
;
744 sdhci_writel(host
, temp
, ESDHC_DLLCFG0
);
745 temp
= sdhci_readl(host
, ESDHC_TBCTL
);
746 sdhci_writel(host
, temp
| ESDHC_HS400_WNDW_ADJUST
, ESDHC_TBCTL
);
748 esdhc_clock_enable(host
, false);
749 esdhc_flush_async_fifo(host
);
751 esdhc_clock_enable(host
, true);
754 static void esdhc_pltfm_set_bus_width(struct sdhci_host
*host
, int width
)
758 ctrl
= sdhci_readl(host
, ESDHC_PROCTL
);
759 ctrl
&= (~ESDHC_CTRL_BUSWIDTH_MASK
);
761 case MMC_BUS_WIDTH_8
:
762 ctrl
|= ESDHC_CTRL_8BITBUS
;
765 case MMC_BUS_WIDTH_4
:
766 ctrl
|= ESDHC_CTRL_4BITBUS
;
773 sdhci_writel(host
, ctrl
, ESDHC_PROCTL
);
776 static void esdhc_reset(struct sdhci_host
*host
, u8 mask
)
778 struct sdhci_pltfm_host
*pltfm_host
= sdhci_priv(host
);
779 struct sdhci_esdhc
*esdhc
= sdhci_pltfm_priv(pltfm_host
);
780 u32 val
, bus_width
= 0;
783 * Add delay to make sure all the DMA transfers are finished
786 if (esdhc
->quirk_delay_before_data_reset
&&
787 (mask
& SDHCI_RESET_DATA
) &&
788 (host
->flags
& SDHCI_REQ_USE_DMA
))
792 * Save bus-width for eSDHC whose vendor version is 2.2
793 * or lower for data reset.
795 if ((mask
& SDHCI_RESET_DATA
) &&
796 (esdhc
->vendor_ver
<= VENDOR_V_22
)) {
797 val
= sdhci_readl(host
, ESDHC_PROCTL
);
798 bus_width
= val
& ESDHC_CTRL_BUSWIDTH_MASK
;
801 sdhci_reset(host
, mask
);
804 * Restore bus-width setting and interrupt registers for eSDHC
805 * whose vendor version is 2.2 or lower for data reset.
807 if ((mask
& SDHCI_RESET_DATA
) &&
808 (esdhc
->vendor_ver
<= VENDOR_V_22
)) {
809 val
= sdhci_readl(host
, ESDHC_PROCTL
);
810 val
&= ~ESDHC_CTRL_BUSWIDTH_MASK
;
812 sdhci_writel(host
, val
, ESDHC_PROCTL
);
814 sdhci_writel(host
, host
->ier
, SDHCI_INT_ENABLE
);
815 sdhci_writel(host
, host
->ier
, SDHCI_SIGNAL_ENABLE
);
819 * Some bits have to be cleaned manually for eSDHC whose spec
820 * version is higher than 3.0 for all reset.
822 if ((mask
& SDHCI_RESET_ALL
) &&
823 (esdhc
->spec_ver
>= SDHCI_SPEC_300
)) {
824 val
= sdhci_readl(host
, ESDHC_TBCTL
);
826 sdhci_writel(host
, val
, ESDHC_TBCTL
);
829 * Initialize eSDHC_DLLCFG1[DLL_PD_PULSE_STRETCH_SEL] to
832 if (esdhc
->quirk_unreliable_pulse_detection
) {
833 val
= sdhci_readl(host
, ESDHC_DLLCFG1
);
834 val
&= ~ESDHC_DLL_PD_PULSE_STRETCH_SEL
;
835 sdhci_writel(host
, val
, ESDHC_DLLCFG1
);
840 /* The SCFG, Supplemental Configuration Unit, provides SoC specific
841 * configuration and status registers for the device. There is a
842 * SDHC IO VSEL control register on SCFG for some platforms. It's
843 * used to support SDHC IO voltage switching.
845 static const struct of_device_id scfg_device_ids
[] = {
846 { .compatible
= "fsl,t1040-scfg", },
847 { .compatible
= "fsl,ls1012a-scfg", },
848 { .compatible
= "fsl,ls1046a-scfg", },
852 /* SDHC IO VSEL control register definition */
853 #define SCFG_SDHCIOVSELCR 0x408
854 #define SDHCIOVSELCR_TGLEN 0x80000000
855 #define SDHCIOVSELCR_VSELVAL 0x60000000
856 #define SDHCIOVSELCR_SDHC_VS 0x00000001
858 static int esdhc_signal_voltage_switch(struct mmc_host
*mmc
,
861 struct sdhci_host
*host
= mmc_priv(mmc
);
862 struct device_node
*scfg_node
;
863 void __iomem
*scfg_base
= NULL
;
868 * Signal Voltage Switching is only applicable for Host Controllers
871 if (host
->version
< SDHCI_SPEC_300
)
874 val
= sdhci_readl(host
, ESDHC_PROCTL
);
876 switch (ios
->signal_voltage
) {
877 case MMC_SIGNAL_VOLTAGE_330
:
878 val
&= ~ESDHC_VOLT_SEL
;
879 sdhci_writel(host
, val
, ESDHC_PROCTL
);
881 case MMC_SIGNAL_VOLTAGE_180
:
882 scfg_node
= of_find_matching_node(NULL
, scfg_device_ids
);
884 scfg_base
= of_iomap(scfg_node
, 0);
886 sdhciovselcr
= SDHCIOVSELCR_TGLEN
|
887 SDHCIOVSELCR_VSELVAL
;
888 iowrite32be(sdhciovselcr
,
889 scfg_base
+ SCFG_SDHCIOVSELCR
);
891 val
|= ESDHC_VOLT_SEL
;
892 sdhci_writel(host
, val
, ESDHC_PROCTL
);
895 sdhciovselcr
= SDHCIOVSELCR_TGLEN
|
896 SDHCIOVSELCR_SDHC_VS
;
897 iowrite32be(sdhciovselcr
,
898 scfg_base
+ SCFG_SDHCIOVSELCR
);
901 val
|= ESDHC_VOLT_SEL
;
902 sdhci_writel(host
, val
, ESDHC_PROCTL
);
910 static struct soc_device_attribute soc_tuning_erratum_type1
[] = {
911 { .family
= "QorIQ T1023", },
912 { .family
= "QorIQ T1040", },
913 { .family
= "QorIQ T2080", },
914 { .family
= "QorIQ LS1021A", },
918 static struct soc_device_attribute soc_tuning_erratum_type2
[] = {
919 { .family
= "QorIQ LS1012A", },
920 { .family
= "QorIQ LS1043A", },
921 { .family
= "QorIQ LS1046A", },
922 { .family
= "QorIQ LS1080A", },
923 { .family
= "QorIQ LS2080A", },
924 { .family
= "QorIQ LA1575A", },
928 static void esdhc_tuning_block_enable(struct sdhci_host
*host
, bool enable
)
932 esdhc_clock_enable(host
, false);
933 esdhc_flush_async_fifo(host
);
935 val
= sdhci_readl(host
, ESDHC_TBCTL
);
940 sdhci_writel(host
, val
, ESDHC_TBCTL
);
942 esdhc_clock_enable(host
, true);
945 static void esdhc_tuning_window_ptr(struct sdhci_host
*host
, u8
*window_start
,
950 /* Write TBCTL[11:8]=4'h8 */
951 val
= sdhci_readl(host
, ESDHC_TBCTL
);
954 sdhci_writel(host
, val
, ESDHC_TBCTL
);
958 /* Read TBCTL[31:0] register and rewrite again */
959 val
= sdhci_readl(host
, ESDHC_TBCTL
);
960 sdhci_writel(host
, val
, ESDHC_TBCTL
);
964 /* Read the TBSTAT[31:0] register twice */
965 val
= sdhci_readl(host
, ESDHC_TBSTAT
);
966 val
= sdhci_readl(host
, ESDHC_TBSTAT
);
968 *window_end
= val
& 0xff;
969 *window_start
= (val
>> 8) & 0xff;
972 static void esdhc_prepare_sw_tuning(struct sdhci_host
*host
, u8
*window_start
,
975 struct sdhci_pltfm_host
*pltfm_host
= sdhci_priv(host
);
976 struct sdhci_esdhc
*esdhc
= sdhci_pltfm_priv(pltfm_host
);
977 u8 start_ptr
, end_ptr
;
979 if (esdhc
->quirk_tuning_erratum_type1
) {
980 *window_start
= 5 * esdhc
->div_ratio
;
981 *window_end
= 3 * esdhc
->div_ratio
;
985 esdhc_tuning_window_ptr(host
, &start_ptr
, &end_ptr
);
987 /* Reset data lines by setting ESDHCCTL[RSTD] */
988 sdhci_reset(host
, SDHCI_RESET_DATA
);
989 /* Write 32'hFFFF_FFFF to IRQSTAT register */
990 sdhci_writel(host
, 0xFFFFFFFF, SDHCI_INT_STATUS
);
992 /* If TBSTAT[15:8]-TBSTAT[7:0] > (4 * div_ratio) + 2
993 * or TBSTAT[7:0]-TBSTAT[15:8] > (4 * div_ratio) + 2,
994 * then program TBPTR[TB_WNDW_END_PTR] = 4 * div_ratio
995 * and program TBPTR[TB_WNDW_START_PTR] = 8 * div_ratio.
998 if (abs(start_ptr
- end_ptr
) > (4 * esdhc
->div_ratio
+ 2)) {
999 *window_start
= 8 * esdhc
->div_ratio
;
1000 *window_end
= 4 * esdhc
->div_ratio
;
1002 *window_start
= 5 * esdhc
->div_ratio
;
1003 *window_end
= 3 * esdhc
->div_ratio
;
1007 static int esdhc_execute_sw_tuning(struct mmc_host
*mmc
, u32 opcode
,
1008 u8 window_start
, u8 window_end
)
1010 struct sdhci_host
*host
= mmc_priv(mmc
);
1011 struct sdhci_pltfm_host
*pltfm_host
= sdhci_priv(host
);
1012 struct sdhci_esdhc
*esdhc
= sdhci_pltfm_priv(pltfm_host
);
1016 /* Program TBPTR[TB_WNDW_END_PTR] and TBPTR[TB_WNDW_START_PTR] */
1017 val
= ((u32
)window_start
<< ESDHC_WNDW_STRT_PTR_SHIFT
) &
1018 ESDHC_WNDW_STRT_PTR_MASK
;
1019 val
|= window_end
& ESDHC_WNDW_END_PTR_MASK
;
1020 sdhci_writel(host
, val
, ESDHC_TBPTR
);
1022 /* Program the software tuning mode by setting TBCTL[TB_MODE]=2'h3 */
1023 val
= sdhci_readl(host
, ESDHC_TBCTL
);
1024 val
&= ~ESDHC_TB_MODE_MASK
;
1025 val
|= ESDHC_TB_MODE_SW
;
1026 sdhci_writel(host
, val
, ESDHC_TBCTL
);
1028 esdhc
->in_sw_tuning
= true;
1029 ret
= sdhci_execute_tuning(mmc
, opcode
);
1030 esdhc
->in_sw_tuning
= false;
1034 static int esdhc_execute_tuning(struct mmc_host
*mmc
, u32 opcode
)
1036 struct sdhci_host
*host
= mmc_priv(mmc
);
1037 struct sdhci_pltfm_host
*pltfm_host
= sdhci_priv(host
);
1038 struct sdhci_esdhc
*esdhc
= sdhci_pltfm_priv(pltfm_host
);
1039 u8 window_start
, window_end
;
1040 int ret
, retries
= 1;
1045 /* For tuning mode, the sd clock divisor value
1046 * must be larger than 3 according to reference manual.
1048 clk
= esdhc
->peripheral_clock
/ 3;
1049 if (host
->clock
> clk
)
1050 esdhc_of_set_clock(host
, clk
);
1052 esdhc_tuning_block_enable(host
, true);
1054 hs400_tuning
= host
->flags
& SDHCI_HS400_TUNING
;
1057 if (esdhc
->quirk_limited_clk_division
&&
1059 esdhc_of_set_clock(host
, host
->clock
);
1062 val
= sdhci_readl(host
, ESDHC_TBCTL
);
1063 val
&= ~ESDHC_TB_MODE_MASK
;
1064 val
|= ESDHC_TB_MODE_3
;
1065 sdhci_writel(host
, val
, ESDHC_TBCTL
);
1067 ret
= sdhci_execute_tuning(mmc
, opcode
);
1071 /* For type2 affected platforms of the tuning erratum,
1072 * tuning may succeed although eSDHC might not have
1073 * tuned properly. Need to check tuning window.
1075 if (esdhc
->quirk_tuning_erratum_type2
&&
1076 !host
->tuning_err
) {
1077 esdhc_tuning_window_ptr(host
, &window_start
,
1079 if (abs(window_start
- window_end
) >
1080 (4 * esdhc
->div_ratio
+ 2))
1081 host
->tuning_err
= -EAGAIN
;
1084 /* If HW tuning fails and triggers erratum,
1087 ret
= host
->tuning_err
;
1088 if (ret
== -EAGAIN
&&
1089 (esdhc
->quirk_tuning_erratum_type1
||
1090 esdhc
->quirk_tuning_erratum_type2
)) {
1091 /* Recover HS400 tuning flag */
1093 host
->flags
|= SDHCI_HS400_TUNING
;
1094 pr_info("%s: Hold on to use fixed sampling clock. Try SW tuning!\n",
1097 esdhc_prepare_sw_tuning(host
, &window_start
,
1099 ret
= esdhc_execute_sw_tuning(mmc
, opcode
,
1105 /* Retry both HW/SW tuning with reduced clock. */
1106 ret
= host
->tuning_err
;
1107 if (ret
== -EAGAIN
&& retries
) {
1108 /* Recover HS400 tuning flag */
1110 host
->flags
|= SDHCI_HS400_TUNING
;
1112 clk
= host
->max_clk
/ (esdhc
->div_ratio
+ 1);
1113 esdhc_of_set_clock(host
, clk
);
1114 pr_info("%s: Hold on to use fixed sampling clock. Try tuning with reduced clock!\n",
1122 } while (retries
--);
1125 esdhc_tuning_block_enable(host
, false);
1126 } else if (hs400_tuning
) {
1127 val
= sdhci_readl(host
, ESDHC_SDTIMNGCTL
);
1128 val
|= ESDHC_FLW_CTL_BG
;
1129 sdhci_writel(host
, val
, ESDHC_SDTIMNGCTL
);
1135 static void esdhc_set_uhs_signaling(struct sdhci_host
*host
,
1136 unsigned int timing
)
1138 if (timing
== MMC_TIMING_MMC_HS400
)
1139 esdhc_tuning_block_enable(host
, true);
1141 sdhci_set_uhs_signaling(host
, timing
);
1144 static u32
esdhc_irq(struct sdhci_host
*host
, u32 intmask
)
1148 if (of_find_compatible_node(NULL
, NULL
,
1149 "fsl,p2020-esdhc")) {
1150 command
= SDHCI_GET_CMD(sdhci_readw(host
,
1152 if (command
== MMC_WRITE_MULTIPLE_BLOCK
&&
1153 sdhci_readw(host
, SDHCI_BLOCK_COUNT
) &&
1154 intmask
& SDHCI_INT_DATA_END
) {
1155 intmask
&= ~SDHCI_INT_DATA_END
;
1156 sdhci_writel(host
, SDHCI_INT_DATA_END
,
1163 #ifdef CONFIG_PM_SLEEP
1164 static u32 esdhc_proctl
;
1165 static int esdhc_of_suspend(struct device
*dev
)
1167 struct sdhci_host
*host
= dev_get_drvdata(dev
);
1169 esdhc_proctl
= sdhci_readl(host
, SDHCI_HOST_CONTROL
);
1171 if (host
->tuning_mode
!= SDHCI_TUNING_MODE_3
)
1172 mmc_retune_needed(host
->mmc
);
1174 return sdhci_suspend_host(host
);
1177 static int esdhc_of_resume(struct device
*dev
)
1179 struct sdhci_host
*host
= dev_get_drvdata(dev
);
1180 int ret
= sdhci_resume_host(host
);
1183 /* Isn't this already done by sdhci_resume_host() ? --rmk */
1184 esdhc_of_enable_dma(host
);
1185 sdhci_writel(host
, esdhc_proctl
, SDHCI_HOST_CONTROL
);
1191 static SIMPLE_DEV_PM_OPS(esdhc_of_dev_pm_ops
,
1195 static const struct sdhci_ops sdhci_esdhc_be_ops
= {
1196 .read_l
= esdhc_be_readl
,
1197 .read_w
= esdhc_be_readw
,
1198 .read_b
= esdhc_be_readb
,
1199 .write_l
= esdhc_be_writel
,
1200 .write_w
= esdhc_be_writew
,
1201 .write_b
= esdhc_be_writeb
,
1202 .set_clock
= esdhc_of_set_clock
,
1203 .enable_dma
= esdhc_of_enable_dma
,
1204 .get_max_clock
= esdhc_of_get_max_clock
,
1205 .get_min_clock
= esdhc_of_get_min_clock
,
1206 .adma_workaround
= esdhc_of_adma_workaround
,
1207 .set_bus_width
= esdhc_pltfm_set_bus_width
,
1208 .reset
= esdhc_reset
,
1209 .set_uhs_signaling
= esdhc_set_uhs_signaling
,
1213 static const struct sdhci_ops sdhci_esdhc_le_ops
= {
1214 .read_l
= esdhc_le_readl
,
1215 .read_w
= esdhc_le_readw
,
1216 .read_b
= esdhc_le_readb
,
1217 .write_l
= esdhc_le_writel
,
1218 .write_w
= esdhc_le_writew
,
1219 .write_b
= esdhc_le_writeb
,
1220 .set_clock
= esdhc_of_set_clock
,
1221 .enable_dma
= esdhc_of_enable_dma
,
1222 .get_max_clock
= esdhc_of_get_max_clock
,
1223 .get_min_clock
= esdhc_of_get_min_clock
,
1224 .adma_workaround
= esdhc_of_adma_workaround
,
1225 .set_bus_width
= esdhc_pltfm_set_bus_width
,
1226 .reset
= esdhc_reset
,
1227 .set_uhs_signaling
= esdhc_set_uhs_signaling
,
1231 static const struct sdhci_pltfm_data sdhci_esdhc_be_pdata
= {
1232 .quirks
= ESDHC_DEFAULT_QUIRKS
|
1234 SDHCI_QUIRK_BROKEN_CARD_DETECTION
|
1236 SDHCI_QUIRK_NO_CARD_NO_RESET
|
1237 SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC
,
1238 .ops
= &sdhci_esdhc_be_ops
,
1241 static const struct sdhci_pltfm_data sdhci_esdhc_le_pdata
= {
1242 .quirks
= ESDHC_DEFAULT_QUIRKS
|
1243 SDHCI_QUIRK_NO_CARD_NO_RESET
|
1244 SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC
,
1245 .ops
= &sdhci_esdhc_le_ops
,
1248 static struct soc_device_attribute soc_incorrect_hostver
[] = {
1249 { .family
= "QorIQ T4240", .revision
= "1.0", },
1250 { .family
= "QorIQ T4240", .revision
= "2.0", },
1254 static struct soc_device_attribute soc_fixup_sdhc_clkdivs
[] = {
1255 { .family
= "QorIQ LX2160A", .revision
= "1.0", },
1256 { .family
= "QorIQ LX2160A", .revision
= "2.0", },
1257 { .family
= "QorIQ LS1028A", .revision
= "1.0", },
1261 static struct soc_device_attribute soc_unreliable_pulse_detection
[] = {
1262 { .family
= "QorIQ LX2160A", .revision
= "1.0", },
1266 static void esdhc_init(struct platform_device
*pdev
, struct sdhci_host
*host
)
1268 const struct of_device_id
*match
;
1269 struct sdhci_pltfm_host
*pltfm_host
;
1270 struct sdhci_esdhc
*esdhc
;
1271 struct device_node
*np
;
1276 pltfm_host
= sdhci_priv(host
);
1277 esdhc
= sdhci_pltfm_priv(pltfm_host
);
1279 host_ver
= sdhci_readw(host
, SDHCI_HOST_VERSION
);
1280 esdhc
->vendor_ver
= (host_ver
& SDHCI_VENDOR_VER_MASK
) >>
1281 SDHCI_VENDOR_VER_SHIFT
;
1282 esdhc
->spec_ver
= host_ver
& SDHCI_SPEC_VER_MASK
;
1283 if (soc_device_match(soc_incorrect_hostver
))
1284 esdhc
->quirk_incorrect_hostver
= true;
1286 esdhc
->quirk_incorrect_hostver
= false;
1288 if (soc_device_match(soc_fixup_sdhc_clkdivs
))
1289 esdhc
->quirk_limited_clk_division
= true;
1291 esdhc
->quirk_limited_clk_division
= false;
1293 if (soc_device_match(soc_unreliable_pulse_detection
))
1294 esdhc
->quirk_unreliable_pulse_detection
= true;
1296 esdhc
->quirk_unreliable_pulse_detection
= false;
1298 match
= of_match_node(sdhci_esdhc_of_match
, pdev
->dev
.of_node
);
1300 esdhc
->clk_fixup
= match
->data
;
1301 np
= pdev
->dev
.of_node
;
1303 if (of_device_is_compatible(np
, "fsl,p2020-esdhc"))
1304 esdhc
->quirk_delay_before_data_reset
= true;
1306 clk
= of_clk_get(np
, 0);
1309 * esdhc->peripheral_clock would be assigned with a value
1310 * which is eSDHC base clock when use periperal clock.
1311 * For some platforms, the clock value got by common clk
1312 * API is peripheral clock while the eSDHC base clock is
1313 * 1/2 peripheral clock.
1315 if (of_device_is_compatible(np
, "fsl,ls1046a-esdhc") ||
1316 of_device_is_compatible(np
, "fsl,ls1028a-esdhc") ||
1317 of_device_is_compatible(np
, "fsl,ls1088a-esdhc"))
1318 esdhc
->peripheral_clock
= clk_get_rate(clk
) / 2;
1320 esdhc
->peripheral_clock
= clk_get_rate(clk
);
1325 if (esdhc
->peripheral_clock
) {
1326 esdhc_clock_enable(host
, false);
1327 val
= sdhci_readl(host
, ESDHC_DMA_SYSCTL
);
1328 val
|= ESDHC_PERIPHERAL_CLK_SEL
;
1329 sdhci_writel(host
, val
, ESDHC_DMA_SYSCTL
);
1330 esdhc_clock_enable(host
, true);
1334 static int esdhc_hs400_prepare_ddr(struct mmc_host
*mmc
)
1336 esdhc_tuning_block_enable(mmc_priv(mmc
), false);
1340 static int sdhci_esdhc_probe(struct platform_device
*pdev
)
1342 struct sdhci_host
*host
;
1343 struct device_node
*np
;
1344 struct sdhci_pltfm_host
*pltfm_host
;
1345 struct sdhci_esdhc
*esdhc
;
1348 np
= pdev
->dev
.of_node
;
1350 if (of_property_read_bool(np
, "little-endian"))
1351 host
= sdhci_pltfm_init(pdev
, &sdhci_esdhc_le_pdata
,
1352 sizeof(struct sdhci_esdhc
));
1354 host
= sdhci_pltfm_init(pdev
, &sdhci_esdhc_be_pdata
,
1355 sizeof(struct sdhci_esdhc
));
1358 return PTR_ERR(host
);
1360 host
->mmc_host_ops
.start_signal_voltage_switch
=
1361 esdhc_signal_voltage_switch
;
1362 host
->mmc_host_ops
.execute_tuning
= esdhc_execute_tuning
;
1363 host
->mmc_host_ops
.hs400_prepare_ddr
= esdhc_hs400_prepare_ddr
;
1364 host
->tuning_delay
= 1;
1366 esdhc_init(pdev
, host
);
1368 sdhci_get_of_property(pdev
);
1370 pltfm_host
= sdhci_priv(host
);
1371 esdhc
= sdhci_pltfm_priv(pltfm_host
);
1372 if (soc_device_match(soc_tuning_erratum_type1
))
1373 esdhc
->quirk_tuning_erratum_type1
= true;
1375 esdhc
->quirk_tuning_erratum_type1
= false;
1377 if (soc_device_match(soc_tuning_erratum_type2
))
1378 esdhc
->quirk_tuning_erratum_type2
= true;
1380 esdhc
->quirk_tuning_erratum_type2
= false;
1382 if (esdhc
->vendor_ver
== VENDOR_V_22
)
1383 host
->quirks2
|= SDHCI_QUIRK2_HOST_NO_CMD23
;
1385 if (esdhc
->vendor_ver
> VENDOR_V_22
)
1386 host
->quirks
&= ~SDHCI_QUIRK_NO_BUSY_IRQ
;
1388 if (of_find_compatible_node(NULL
, NULL
, "fsl,p2020-esdhc")) {
1389 host
->quirks
|= SDHCI_QUIRK_RESET_AFTER_REQUEST
;
1390 host
->quirks
|= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL
;
1393 if (of_device_is_compatible(np
, "fsl,p5040-esdhc") ||
1394 of_device_is_compatible(np
, "fsl,p5020-esdhc") ||
1395 of_device_is_compatible(np
, "fsl,p4080-esdhc") ||
1396 of_device_is_compatible(np
, "fsl,p1020-esdhc") ||
1397 of_device_is_compatible(np
, "fsl,t1040-esdhc"))
1398 host
->quirks
&= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION
;
1400 if (of_device_is_compatible(np
, "fsl,ls1021a-esdhc"))
1401 host
->quirks
|= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL
;
1403 esdhc
->quirk_ignore_data_inhibit
= false;
1404 if (of_device_is_compatible(np
, "fsl,p2020-esdhc")) {
1406 * Freescale messed up with P2020 as it has a non-standard
1407 * host control register
1409 host
->quirks2
|= SDHCI_QUIRK2_BROKEN_HOST_CONTROL
;
1410 esdhc
->quirk_ignore_data_inhibit
= true;
1413 /* call to generic mmc_of_parse to support additional capabilities */
1414 ret
= mmc_of_parse(host
->mmc
);
1418 mmc_of_parse_voltage(np
, &host
->ocr_mask
);
1420 ret
= sdhci_add_host(host
);
1426 sdhci_pltfm_free(pdev
);
1430 static struct platform_driver sdhci_esdhc_driver
= {
1432 .name
= "sdhci-esdhc",
1433 .of_match_table
= sdhci_esdhc_of_match
,
1434 .pm
= &esdhc_of_dev_pm_ops
,
1436 .probe
= sdhci_esdhc_probe
,
1437 .remove
= sdhci_pltfm_unregister
,
1440 module_platform_driver(sdhci_esdhc_driver
);
1442 MODULE_DESCRIPTION("SDHCI OF driver for Freescale MPC eSDHC");
1443 MODULE_AUTHOR("Xiaobo Xie <X.Xie@freescale.com>, "
1444 "Anton Vorontsov <avorontsov@ru.mvista.com>");
1445 MODULE_LICENSE("GPL v2");