1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2010 Google, Inc.
6 #include <linux/bitfield.h>
8 #include <linux/delay.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/err.h>
11 #include <linux/gpio/consumer.h>
12 #include <linux/init.h>
14 #include <linux/iommu.h>
15 #include <linux/iopoll.h>
16 #include <linux/ktime.h>
17 #include <linux/mmc/card.h>
18 #include <linux/mmc/host.h>
19 #include <linux/mmc/mmc.h>
20 #include <linux/mmc/slot-gpio.h>
21 #include <linux/module.h>
23 #include <linux/pinctrl/consumer.h>
24 #include <linux/platform_device.h>
25 #include <linux/pm_opp.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/regulator/consumer.h>
28 #include <linux/reset.h>
30 #include <soc/tegra/common.h>
32 #include "sdhci-cqhci.h"
33 #include "sdhci-pltfm.h"
36 /* Tegra SDHOST controller vendor register definitions */
37 #define SDHCI_TEGRA_VENDOR_CLOCK_CTRL 0x100
38 #define SDHCI_CLOCK_CTRL_TAP_MASK 0x00ff0000
39 #define SDHCI_CLOCK_CTRL_TAP_SHIFT 16
40 #define SDHCI_CLOCK_CTRL_TRIM_MASK 0x1f000000
41 #define SDHCI_CLOCK_CTRL_TRIM_SHIFT 24
42 #define SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE BIT(5)
43 #define SDHCI_CLOCK_CTRL_PADPIPE_CLKEN_OVERRIDE BIT(3)
44 #define SDHCI_CLOCK_CTRL_SPI_MODE_CLKEN_OVERRIDE BIT(2)
46 #define SDHCI_TEGRA_VENDOR_SYS_SW_CTRL 0x104
47 #define SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE BIT(31)
49 #define SDHCI_TEGRA_VENDOR_CAP_OVERRIDES 0x10c
50 #define SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_MASK 0x00003f00
51 #define SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_SHIFT 8
53 #define SDHCI_TEGRA_VENDOR_MISC_CTRL 0x120
54 #define SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT BIT(0)
55 #define SDHCI_MISC_CTRL_ENABLE_SDR104 0x8
56 #define SDHCI_MISC_CTRL_ENABLE_SDR50 0x10
57 #define SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300 0x20
58 #define SDHCI_MISC_CTRL_ENABLE_DDR50 0x200
60 #define SDHCI_TEGRA_VENDOR_DLLCAL_CFG 0x1b0
61 #define SDHCI_TEGRA_DLLCAL_CALIBRATE BIT(31)
63 #define SDHCI_TEGRA_VENDOR_DLLCAL_STA 0x1bc
64 #define SDHCI_TEGRA_DLLCAL_STA_ACTIVE BIT(31)
66 #define SDHCI_VNDR_TUN_CTRL0_0 0x1c0
67 #define SDHCI_VNDR_TUN_CTRL0_TUN_HW_TAP 0x20000
68 #define SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_MASK 0x03fc0000
69 #define SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_SHIFT 18
70 #define SDHCI_VNDR_TUN_CTRL0_MUL_M_MASK 0x00001fc0
71 #define SDHCI_VNDR_TUN_CTRL0_MUL_M_SHIFT 6
72 #define SDHCI_VNDR_TUN_CTRL0_TUN_ITER_MASK 0x000e000
73 #define SDHCI_VNDR_TUN_CTRL0_TUN_ITER_SHIFT 13
76 #define SDHCI_VNDR_TUN_CTRL0_TUN_WORD_SEL_MASK 0x7
78 #define SDHCI_TEGRA_VNDR_TUN_CTRL1_0 0x1c4
79 #define SDHCI_TEGRA_VNDR_TUN_STATUS0 0x1C8
80 #define SDHCI_TEGRA_VNDR_TUN_STATUS1 0x1CC
81 #define SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK 0xFF
82 #define SDHCI_TEGRA_VNDR_TUN_STATUS1_END_TAP_SHIFT 0x8
83 #define TUNING_WORD_BIT_SIZE 32
85 #define SDHCI_TEGRA_AUTO_CAL_CONFIG 0x1e4
86 #define SDHCI_AUTO_CAL_START BIT(31)
87 #define SDHCI_AUTO_CAL_ENABLE BIT(29)
88 #define SDHCI_AUTO_CAL_PDPU_OFFSET_MASK 0x0000ffff
90 #define SDHCI_TEGRA_SDMEM_COMP_PADCTRL 0x1e0
91 #define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_MASK 0x0000000f
92 #define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_VAL 0x7
93 #define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD BIT(31)
94 #define SDHCI_COMP_PADCTRL_DRVUPDN_OFFSET_MASK 0x07FFF000
96 #define SDHCI_TEGRA_AUTO_CAL_STATUS 0x1ec
97 #define SDHCI_TEGRA_AUTO_CAL_ACTIVE BIT(31)
99 #define SDHCI_TEGRA_CIF2AXI_CTRL_0 0x1fc
101 #define NVQUIRK_FORCE_SDHCI_SPEC_200 BIT(0)
102 #define NVQUIRK_ENABLE_BLOCK_GAP_DET BIT(1)
103 #define NVQUIRK_ENABLE_SDHCI_SPEC_300 BIT(2)
104 #define NVQUIRK_ENABLE_SDR50 BIT(3)
105 #define NVQUIRK_ENABLE_SDR104 BIT(4)
106 #define NVQUIRK_ENABLE_DDR50 BIT(5)
108 * HAS_PADCALIB NVQUIRK is for SoC's supporting auto calibration of pads
111 #define NVQUIRK_HAS_PADCALIB BIT(6)
113 * NEEDS_PAD_CONTROL NVQUIRK is for SoC's having separate 3V3 and 1V8 pads.
114 * 3V3/1V8 pad selection happens through pinctrl state selection depending
115 * on the signaling mode.
117 #define NVQUIRK_NEEDS_PAD_CONTROL BIT(7)
118 #define NVQUIRK_DIS_CARD_CLK_CONFIG_TAP BIT(8)
119 #define NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING BIT(9)
122 * NVQUIRK_HAS_TMCLK is for SoC's having separate timeout clock for Tegra
123 * SDMMC hardware data timeout.
125 #define NVQUIRK_HAS_TMCLK BIT(10)
127 #define NVQUIRK_HAS_ANDROID_GPT_SECTOR BIT(11)
128 #define NVQUIRK_PROGRAM_STREAMID BIT(12)
130 /* SDMMC CQE Base Address for Tegra Host Ver 4.1 and Higher */
131 #define SDHCI_TEGRA_CQE_BASE_ADDR 0xF000
133 #define SDHCI_TEGRA_CQE_TRNS_MODE (SDHCI_TRNS_MULTI | \
134 SDHCI_TRNS_BLK_CNT_EN | \
137 struct sdhci_tegra_soc_data
{
138 const struct sdhci_pltfm_data
*pdata
;
145 /* Magic pull up and pull down pad calibration offsets */
146 struct sdhci_tegra_autocal_offsets
{
149 u32 pull_up_3v3_timeout
;
150 u32 pull_down_3v3_timeout
;
153 u32 pull_up_1v8_timeout
;
154 u32 pull_down_1v8_timeout
;
156 u32 pull_down_sdr104
;
162 const struct sdhci_tegra_soc_data
*soc_data
;
163 struct gpio_desc
*power_gpio
;
166 bool pad_calib_required
;
167 bool pad_control_available
;
169 struct reset_control
*rst
;
170 struct pinctrl
*pinctrl_sdmmc
;
171 struct pinctrl_state
*pinctrl_state_3v3
;
172 struct pinctrl_state
*pinctrl_state_1v8
;
173 struct pinctrl_state
*pinctrl_state_3v3_drv
;
174 struct pinctrl_state
*pinctrl_state_1v8_drv
;
176 struct sdhci_tegra_autocal_offsets autocal_offsets
;
183 unsigned long curr_clk_rate
;
188 static u16
tegra_sdhci_readw(struct sdhci_host
*host
, int reg
)
190 struct sdhci_pltfm_host
*pltfm_host
= sdhci_priv(host
);
191 struct sdhci_tegra
*tegra_host
= sdhci_pltfm_priv(pltfm_host
);
192 const struct sdhci_tegra_soc_data
*soc_data
= tegra_host
->soc_data
;
194 if (unlikely((soc_data
->nvquirks
& NVQUIRK_FORCE_SDHCI_SPEC_200
) &&
195 (reg
== SDHCI_HOST_VERSION
))) {
196 /* Erratum: Version register is invalid in HW. */
197 return SDHCI_SPEC_200
;
200 return readw(host
->ioaddr
+ reg
);
203 static void tegra_sdhci_writew(struct sdhci_host
*host
, u16 val
, int reg
)
205 struct sdhci_pltfm_host
*pltfm_host
= sdhci_priv(host
);
208 case SDHCI_TRANSFER_MODE
:
210 * Postpone this write, we must do it together with a
211 * command write that is down below.
213 pltfm_host
->xfer_mode_shadow
= val
;
216 writel((val
<< 16) | pltfm_host
->xfer_mode_shadow
,
217 host
->ioaddr
+ SDHCI_TRANSFER_MODE
);
221 writew(val
, host
->ioaddr
+ reg
);
224 static void tegra_sdhci_writel(struct sdhci_host
*host
, u32 val
, int reg
)
226 struct sdhci_pltfm_host
*pltfm_host
= sdhci_priv(host
);
227 struct sdhci_tegra
*tegra_host
= sdhci_pltfm_priv(pltfm_host
);
228 const struct sdhci_tegra_soc_data
*soc_data
= tegra_host
->soc_data
;
230 /* Seems like we're getting spurious timeout and crc errors, so
231 * disable signalling of them. In case of real errors software
232 * timers should take care of eventually detecting them.
234 if (unlikely(reg
== SDHCI_SIGNAL_ENABLE
))
235 val
&= ~(SDHCI_INT_TIMEOUT
|SDHCI_INT_CRC
);
237 writel(val
, host
->ioaddr
+ reg
);
239 if (unlikely((soc_data
->nvquirks
& NVQUIRK_ENABLE_BLOCK_GAP_DET
) &&
240 (reg
== SDHCI_INT_ENABLE
))) {
241 /* Erratum: Must enable block gap interrupt detection */
242 u8 gap_ctrl
= readb(host
->ioaddr
+ SDHCI_BLOCK_GAP_CONTROL
);
243 if (val
& SDHCI_INT_CARD_INT
)
247 writeb(gap_ctrl
, host
->ioaddr
+ SDHCI_BLOCK_GAP_CONTROL
);
251 static bool tegra_sdhci_configure_card_clk(struct sdhci_host
*host
, bool enable
)
256 reg
= sdhci_readw(host
, SDHCI_CLOCK_CONTROL
);
257 status
= !!(reg
& SDHCI_CLOCK_CARD_EN
);
259 if (status
== enable
)
263 reg
|= SDHCI_CLOCK_CARD_EN
;
265 reg
&= ~SDHCI_CLOCK_CARD_EN
;
267 sdhci_writew(host
, reg
, SDHCI_CLOCK_CONTROL
);
272 static void tegra210_sdhci_writew(struct sdhci_host
*host
, u16 val
, int reg
)
274 bool is_tuning_cmd
= 0;
277 if (reg
== SDHCI_COMMAND
)
278 is_tuning_cmd
= mmc_op_tuning(SDHCI_GET_CMD(val
));
281 clk_enabled
= tegra_sdhci_configure_card_clk(host
, 0);
283 writew(val
, host
->ioaddr
+ reg
);
287 sdhci_reset(host
, SDHCI_RESET_CMD
| SDHCI_RESET_DATA
);
288 tegra_sdhci_configure_card_clk(host
, clk_enabled
);
292 static unsigned int tegra_sdhci_get_ro(struct sdhci_host
*host
)
295 * Write-enable shall be assumed if GPIO is missing in a board's
296 * device-tree because SDHCI's WRITE_PROTECT bit doesn't work on
299 return mmc_gpio_get_ro(host
->mmc
);
302 static bool tegra_sdhci_is_pad_and_regulator_valid(struct sdhci_host
*host
)
304 struct sdhci_pltfm_host
*pltfm_host
= sdhci_priv(host
);
305 struct sdhci_tegra
*tegra_host
= sdhci_pltfm_priv(pltfm_host
);
306 int has_1v8
, has_3v3
;
309 * The SoCs which have NVQUIRK_NEEDS_PAD_CONTROL require software pad
310 * voltage configuration in order to perform voltage switching. This
311 * means that valid pinctrl info is required on SDHCI instances capable
312 * of performing voltage switching. Whether or not an SDHCI instance is
313 * capable of voltage switching is determined based on the regulator.
316 if (!(tegra_host
->soc_data
->nvquirks
& NVQUIRK_NEEDS_PAD_CONTROL
))
319 if (IS_ERR(host
->mmc
->supply
.vqmmc
))
322 has_1v8
= regulator_is_supported_voltage(host
->mmc
->supply
.vqmmc
,
325 has_3v3
= regulator_is_supported_voltage(host
->mmc
->supply
.vqmmc
,
328 if (has_1v8
== 1 && has_3v3
== 1)
329 return tegra_host
->pad_control_available
;
331 /* Fixed voltage, no pad control required. */
335 static void tegra_sdhci_set_tap(struct sdhci_host
*host
, unsigned int tap
)
337 struct sdhci_pltfm_host
*pltfm_host
= sdhci_priv(host
);
338 struct sdhci_tegra
*tegra_host
= sdhci_pltfm_priv(pltfm_host
);
339 const struct sdhci_tegra_soc_data
*soc_data
= tegra_host
->soc_data
;
340 bool card_clk_enabled
= false;
344 * Touching the tap values is a bit tricky on some SoC generations.
345 * The quirk enables a workaround for a glitch that sometimes occurs if
346 * the tap values are changed.
349 if (soc_data
->nvquirks
& NVQUIRK_DIS_CARD_CLK_CONFIG_TAP
)
350 card_clk_enabled
= tegra_sdhci_configure_card_clk(host
, false);
352 reg
= sdhci_readl(host
, SDHCI_TEGRA_VENDOR_CLOCK_CTRL
);
353 reg
&= ~SDHCI_CLOCK_CTRL_TAP_MASK
;
354 reg
|= tap
<< SDHCI_CLOCK_CTRL_TAP_SHIFT
;
355 sdhci_writel(host
, reg
, SDHCI_TEGRA_VENDOR_CLOCK_CTRL
);
357 if (soc_data
->nvquirks
& NVQUIRK_DIS_CARD_CLK_CONFIG_TAP
&&
360 sdhci_reset(host
, SDHCI_RESET_CMD
| SDHCI_RESET_DATA
);
361 tegra_sdhci_configure_card_clk(host
, card_clk_enabled
);
365 static void tegra_sdhci_reset(struct sdhci_host
*host
, u8 mask
)
367 struct sdhci_pltfm_host
*pltfm_host
= sdhci_priv(host
);
368 struct sdhci_tegra
*tegra_host
= sdhci_pltfm_priv(pltfm_host
);
369 const struct sdhci_tegra_soc_data
*soc_data
= tegra_host
->soc_data
;
370 u32 misc_ctrl
, clk_ctrl
, pad_ctrl
;
372 sdhci_and_cqhci_reset(host
, mask
);
374 if (!(mask
& SDHCI_RESET_ALL
))
377 tegra_sdhci_set_tap(host
, tegra_host
->default_tap
);
379 misc_ctrl
= sdhci_readl(host
, SDHCI_TEGRA_VENDOR_MISC_CTRL
);
380 clk_ctrl
= sdhci_readl(host
, SDHCI_TEGRA_VENDOR_CLOCK_CTRL
);
382 misc_ctrl
&= ~(SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300
|
383 SDHCI_MISC_CTRL_ENABLE_SDR50
|
384 SDHCI_MISC_CTRL_ENABLE_DDR50
|
385 SDHCI_MISC_CTRL_ENABLE_SDR104
);
387 clk_ctrl
&= ~(SDHCI_CLOCK_CTRL_TRIM_MASK
|
388 SDHCI_CLOCK_CTRL_SPI_MODE_CLKEN_OVERRIDE
);
390 if (tegra_sdhci_is_pad_and_regulator_valid(host
)) {
391 /* Erratum: Enable SDHCI spec v3.00 support */
392 if (soc_data
->nvquirks
& NVQUIRK_ENABLE_SDHCI_SPEC_300
)
393 misc_ctrl
|= SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300
;
394 /* Advertise UHS modes as supported by host */
395 if (soc_data
->nvquirks
& NVQUIRK_ENABLE_SDR50
)
396 misc_ctrl
|= SDHCI_MISC_CTRL_ENABLE_SDR50
;
397 if (soc_data
->nvquirks
& NVQUIRK_ENABLE_DDR50
)
398 misc_ctrl
|= SDHCI_MISC_CTRL_ENABLE_DDR50
;
399 if (soc_data
->nvquirks
& NVQUIRK_ENABLE_SDR104
)
400 misc_ctrl
|= SDHCI_MISC_CTRL_ENABLE_SDR104
;
401 if (soc_data
->nvquirks
& NVQUIRK_ENABLE_SDR50
)
402 clk_ctrl
|= SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE
;
405 clk_ctrl
|= tegra_host
->default_trim
<< SDHCI_CLOCK_CTRL_TRIM_SHIFT
;
407 sdhci_writel(host
, misc_ctrl
, SDHCI_TEGRA_VENDOR_MISC_CTRL
);
408 sdhci_writel(host
, clk_ctrl
, SDHCI_TEGRA_VENDOR_CLOCK_CTRL
);
410 if (soc_data
->nvquirks
& NVQUIRK_HAS_PADCALIB
) {
411 pad_ctrl
= sdhci_readl(host
, SDHCI_TEGRA_SDMEM_COMP_PADCTRL
);
412 pad_ctrl
&= ~SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_MASK
;
413 pad_ctrl
|= SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_VAL
;
414 sdhci_writel(host
, pad_ctrl
, SDHCI_TEGRA_SDMEM_COMP_PADCTRL
);
416 tegra_host
->pad_calib_required
= true;
419 tegra_host
->ddr_signaling
= false;
422 static void tegra_sdhci_configure_cal_pad(struct sdhci_host
*host
, bool enable
)
427 * Enable or disable the additional I/O pad used by the drive strength
428 * calibration process.
430 val
= sdhci_readl(host
, SDHCI_TEGRA_SDMEM_COMP_PADCTRL
);
433 val
|= SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD
;
435 val
&= ~SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD
;
437 sdhci_writel(host
, val
, SDHCI_TEGRA_SDMEM_COMP_PADCTRL
);
443 static void tegra_sdhci_set_pad_autocal_offset(struct sdhci_host
*host
,
448 reg
= sdhci_readl(host
, SDHCI_TEGRA_AUTO_CAL_CONFIG
);
449 reg
&= ~SDHCI_AUTO_CAL_PDPU_OFFSET_MASK
;
451 sdhci_writel(host
, reg
, SDHCI_TEGRA_AUTO_CAL_CONFIG
);
454 static int tegra_sdhci_set_padctrl(struct sdhci_host
*host
, int voltage
,
457 struct sdhci_pltfm_host
*pltfm_host
= sdhci_priv(host
);
458 struct sdhci_tegra
*tegra_host
= sdhci_pltfm_priv(pltfm_host
);
459 struct sdhci_tegra_autocal_offsets
*offsets
=
460 &tegra_host
->autocal_offsets
;
461 struct pinctrl_state
*pinctrl_drvupdn
= NULL
;
463 u8 drvup
= 0, drvdn
= 0;
466 if (!state_drvupdn
) {
467 /* PADS Drive Strength */
468 if (voltage
== MMC_SIGNAL_VOLTAGE_180
) {
469 if (tegra_host
->pinctrl_state_1v8_drv
) {
471 tegra_host
->pinctrl_state_1v8_drv
;
473 drvup
= offsets
->pull_up_1v8_timeout
;
474 drvdn
= offsets
->pull_down_1v8_timeout
;
477 if (tegra_host
->pinctrl_state_3v3_drv
) {
479 tegra_host
->pinctrl_state_3v3_drv
;
481 drvup
= offsets
->pull_up_3v3_timeout
;
482 drvdn
= offsets
->pull_down_3v3_timeout
;
486 if (pinctrl_drvupdn
!= NULL
) {
487 ret
= pinctrl_select_state(tegra_host
->pinctrl_sdmmc
,
490 dev_err(mmc_dev(host
->mmc
),
491 "failed pads drvupdn, ret: %d\n", ret
);
492 } else if ((drvup
) || (drvdn
)) {
493 reg
= sdhci_readl(host
,
494 SDHCI_TEGRA_SDMEM_COMP_PADCTRL
);
495 reg
&= ~SDHCI_COMP_PADCTRL_DRVUPDN_OFFSET_MASK
;
496 reg
|= (drvup
<< 20) | (drvdn
<< 12);
497 sdhci_writel(host
, reg
,
498 SDHCI_TEGRA_SDMEM_COMP_PADCTRL
);
502 /* Dual Voltage PADS Voltage selection */
503 if (!tegra_host
->pad_control_available
)
506 if (voltage
== MMC_SIGNAL_VOLTAGE_180
) {
507 ret
= pinctrl_select_state(tegra_host
->pinctrl_sdmmc
,
508 tegra_host
->pinctrl_state_1v8
);
510 dev_err(mmc_dev(host
->mmc
),
511 "setting 1.8V failed, ret: %d\n", ret
);
513 ret
= pinctrl_select_state(tegra_host
->pinctrl_sdmmc
,
514 tegra_host
->pinctrl_state_3v3
);
516 dev_err(mmc_dev(host
->mmc
),
517 "setting 3.3V failed, ret: %d\n", ret
);
524 static void tegra_sdhci_pad_autocalib(struct sdhci_host
*host
)
526 struct sdhci_pltfm_host
*pltfm_host
= sdhci_priv(host
);
527 struct sdhci_tegra
*tegra_host
= sdhci_pltfm_priv(pltfm_host
);
528 struct sdhci_tegra_autocal_offsets offsets
=
529 tegra_host
->autocal_offsets
;
530 struct mmc_ios
*ios
= &host
->mmc
->ios
;
531 bool card_clk_enabled
;
536 switch (ios
->timing
) {
537 case MMC_TIMING_UHS_SDR104
:
538 pdpu
= offsets
.pull_down_sdr104
<< 8 | offsets
.pull_up_sdr104
;
540 case MMC_TIMING_MMC_HS400
:
541 pdpu
= offsets
.pull_down_hs400
<< 8 | offsets
.pull_up_hs400
;
544 if (ios
->signal_voltage
== MMC_SIGNAL_VOLTAGE_180
)
545 pdpu
= offsets
.pull_down_1v8
<< 8 | offsets
.pull_up_1v8
;
547 pdpu
= offsets
.pull_down_3v3
<< 8 | offsets
.pull_up_3v3
;
550 /* Set initial offset before auto-calibration */
551 tegra_sdhci_set_pad_autocal_offset(host
, pdpu
);
553 card_clk_enabled
= tegra_sdhci_configure_card_clk(host
, false);
555 tegra_sdhci_configure_cal_pad(host
, true);
557 reg
= sdhci_readl(host
, SDHCI_TEGRA_AUTO_CAL_CONFIG
);
558 reg
|= SDHCI_AUTO_CAL_ENABLE
| SDHCI_AUTO_CAL_START
;
559 sdhci_writel(host
, reg
, SDHCI_TEGRA_AUTO_CAL_CONFIG
);
563 ret
= readl_poll_timeout(host
->ioaddr
+ SDHCI_TEGRA_AUTO_CAL_STATUS
,
564 reg
, !(reg
& SDHCI_TEGRA_AUTO_CAL_ACTIVE
),
567 tegra_sdhci_configure_cal_pad(host
, false);
569 tegra_sdhci_configure_card_clk(host
, card_clk_enabled
);
572 dev_err(mmc_dev(host
->mmc
), "Pad autocal timed out\n");
574 /* Disable automatic cal and use fixed Drive Strengths */
575 reg
= sdhci_readl(host
, SDHCI_TEGRA_AUTO_CAL_CONFIG
);
576 reg
&= ~SDHCI_AUTO_CAL_ENABLE
;
577 sdhci_writel(host
, reg
, SDHCI_TEGRA_AUTO_CAL_CONFIG
);
579 ret
= tegra_sdhci_set_padctrl(host
, ios
->signal_voltage
, false);
581 dev_err(mmc_dev(host
->mmc
),
582 "Setting drive strengths failed: %d\n", ret
);
586 static void tegra_sdhci_parse_pad_autocal_dt(struct sdhci_host
*host
)
588 struct sdhci_pltfm_host
*pltfm_host
= sdhci_priv(host
);
589 struct sdhci_tegra
*tegra_host
= sdhci_pltfm_priv(pltfm_host
);
590 struct sdhci_tegra_autocal_offsets
*autocal
=
591 &tegra_host
->autocal_offsets
;
594 err
= device_property_read_u32(mmc_dev(host
->mmc
),
595 "nvidia,pad-autocal-pull-up-offset-3v3",
596 &autocal
->pull_up_3v3
);
598 autocal
->pull_up_3v3
= 0;
600 err
= device_property_read_u32(mmc_dev(host
->mmc
),
601 "nvidia,pad-autocal-pull-down-offset-3v3",
602 &autocal
->pull_down_3v3
);
604 autocal
->pull_down_3v3
= 0;
606 err
= device_property_read_u32(mmc_dev(host
->mmc
),
607 "nvidia,pad-autocal-pull-up-offset-1v8",
608 &autocal
->pull_up_1v8
);
610 autocal
->pull_up_1v8
= 0;
612 err
= device_property_read_u32(mmc_dev(host
->mmc
),
613 "nvidia,pad-autocal-pull-down-offset-1v8",
614 &autocal
->pull_down_1v8
);
616 autocal
->pull_down_1v8
= 0;
618 err
= device_property_read_u32(mmc_dev(host
->mmc
),
619 "nvidia,pad-autocal-pull-up-offset-sdr104",
620 &autocal
->pull_up_sdr104
);
622 autocal
->pull_up_sdr104
= autocal
->pull_up_1v8
;
624 err
= device_property_read_u32(mmc_dev(host
->mmc
),
625 "nvidia,pad-autocal-pull-down-offset-sdr104",
626 &autocal
->pull_down_sdr104
);
628 autocal
->pull_down_sdr104
= autocal
->pull_down_1v8
;
630 err
= device_property_read_u32(mmc_dev(host
->mmc
),
631 "nvidia,pad-autocal-pull-up-offset-hs400",
632 &autocal
->pull_up_hs400
);
634 autocal
->pull_up_hs400
= autocal
->pull_up_1v8
;
636 err
= device_property_read_u32(mmc_dev(host
->mmc
),
637 "nvidia,pad-autocal-pull-down-offset-hs400",
638 &autocal
->pull_down_hs400
);
640 autocal
->pull_down_hs400
= autocal
->pull_down_1v8
;
643 * Different fail-safe drive strength values based on the signaling
644 * voltage are applicable for SoCs supporting 3V3 and 1V8 pad controls.
645 * So, avoid reading below device tree properties for SoCs that don't
646 * have NVQUIRK_NEEDS_PAD_CONTROL.
648 if (!(tegra_host
->soc_data
->nvquirks
& NVQUIRK_NEEDS_PAD_CONTROL
))
651 err
= device_property_read_u32(mmc_dev(host
->mmc
),
652 "nvidia,pad-autocal-pull-up-offset-3v3-timeout",
653 &autocal
->pull_up_3v3_timeout
);
655 if (!IS_ERR(tegra_host
->pinctrl_state_3v3
) &&
656 (tegra_host
->pinctrl_state_3v3_drv
== NULL
))
657 pr_warn("%s: Missing autocal timeout 3v3-pad drvs\n",
658 mmc_hostname(host
->mmc
));
659 autocal
->pull_up_3v3_timeout
= 0;
662 err
= device_property_read_u32(mmc_dev(host
->mmc
),
663 "nvidia,pad-autocal-pull-down-offset-3v3-timeout",
664 &autocal
->pull_down_3v3_timeout
);
666 if (!IS_ERR(tegra_host
->pinctrl_state_3v3
) &&
667 (tegra_host
->pinctrl_state_3v3_drv
== NULL
))
668 pr_warn("%s: Missing autocal timeout 3v3-pad drvs\n",
669 mmc_hostname(host
->mmc
));
670 autocal
->pull_down_3v3_timeout
= 0;
673 err
= device_property_read_u32(mmc_dev(host
->mmc
),
674 "nvidia,pad-autocal-pull-up-offset-1v8-timeout",
675 &autocal
->pull_up_1v8_timeout
);
677 if (!IS_ERR(tegra_host
->pinctrl_state_1v8
) &&
678 (tegra_host
->pinctrl_state_1v8_drv
== NULL
))
679 pr_warn("%s: Missing autocal timeout 1v8-pad drvs\n",
680 mmc_hostname(host
->mmc
));
681 autocal
->pull_up_1v8_timeout
= 0;
684 err
= device_property_read_u32(mmc_dev(host
->mmc
),
685 "nvidia,pad-autocal-pull-down-offset-1v8-timeout",
686 &autocal
->pull_down_1v8_timeout
);
688 if (!IS_ERR(tegra_host
->pinctrl_state_1v8
) &&
689 (tegra_host
->pinctrl_state_1v8_drv
== NULL
))
690 pr_warn("%s: Missing autocal timeout 1v8-pad drvs\n",
691 mmc_hostname(host
->mmc
));
692 autocal
->pull_down_1v8_timeout
= 0;
696 static void tegra_sdhci_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
698 struct sdhci_host
*host
= mmc_priv(mmc
);
699 struct sdhci_pltfm_host
*pltfm_host
= sdhci_priv(host
);
700 struct sdhci_tegra
*tegra_host
= sdhci_pltfm_priv(pltfm_host
);
701 ktime_t since_calib
= ktime_sub(ktime_get(), tegra_host
->last_calib
);
703 /* 100 ms calibration interval is specified in the TRM */
704 if (ktime_to_ms(since_calib
) > 100) {
705 tegra_sdhci_pad_autocalib(host
);
706 tegra_host
->last_calib
= ktime_get();
709 sdhci_request(mmc
, mrq
);
712 static void tegra_sdhci_parse_tap_and_trim(struct sdhci_host
*host
)
714 struct sdhci_pltfm_host
*pltfm_host
= sdhci_priv(host
);
715 struct sdhci_tegra
*tegra_host
= sdhci_pltfm_priv(pltfm_host
);
718 err
= device_property_read_u32(mmc_dev(host
->mmc
), "nvidia,default-tap",
719 &tegra_host
->default_tap
);
721 tegra_host
->default_tap
= 0;
723 err
= device_property_read_u32(mmc_dev(host
->mmc
), "nvidia,default-trim",
724 &tegra_host
->default_trim
);
726 tegra_host
->default_trim
= 0;
728 err
= device_property_read_u32(mmc_dev(host
->mmc
), "nvidia,dqs-trim",
729 &tegra_host
->dqs_trim
);
731 tegra_host
->dqs_trim
= 0x11;
734 static void tegra_sdhci_parse_dt(struct sdhci_host
*host
)
736 struct sdhci_pltfm_host
*pltfm_host
= sdhci_priv(host
);
737 struct sdhci_tegra
*tegra_host
= sdhci_pltfm_priv(pltfm_host
);
739 if (device_property_read_bool(mmc_dev(host
->mmc
), "supports-cqe"))
740 tegra_host
->enable_hwcq
= true;
742 tegra_host
->enable_hwcq
= false;
744 tegra_sdhci_parse_pad_autocal_dt(host
);
745 tegra_sdhci_parse_tap_and_trim(host
);
748 static void tegra_sdhci_set_clock(struct sdhci_host
*host
, unsigned int clock
)
750 struct sdhci_pltfm_host
*pltfm_host
= sdhci_priv(host
);
751 struct sdhci_tegra
*tegra_host
= sdhci_pltfm_priv(pltfm_host
);
752 struct device
*dev
= mmc_dev(host
->mmc
);
753 unsigned long host_clk
;
757 return sdhci_set_clock(host
, clock
);
760 * In DDR50/52 modes the Tegra SDHCI controllers require the SDHCI
761 * divider to be configured to divided the host clock by two. The SDHCI
762 * clock divider is calculated as part of sdhci_set_clock() by
763 * sdhci_calc_clk(). The divider is calculated from host->max_clk and
764 * the requested clock rate.
766 * By setting the host->max_clk to clock * 2 the divider calculation
767 * will always result in the correct value for DDR50/52 modes,
768 * regardless of clock rate rounding, which may happen if the value
769 * from clk_get_rate() is used.
771 host_clk
= tegra_host
->ddr_signaling
? clock
* 2 : clock
;
773 err
= dev_pm_opp_set_rate(dev
, host_clk
);
775 dev_err(dev
, "failed to set clk rate to %luHz: %d\n",
778 tegra_host
->curr_clk_rate
= clk_get_rate(pltfm_host
->clk
);
779 if (tegra_host
->ddr_signaling
)
780 host
->max_clk
= host_clk
;
782 host
->max_clk
= clk_get_rate(pltfm_host
->clk
);
784 sdhci_set_clock(host
, clock
);
786 if (tegra_host
->pad_calib_required
) {
787 tegra_sdhci_pad_autocalib(host
);
788 tegra_host
->pad_calib_required
= false;
792 static void tegra_sdhci_hs400_enhanced_strobe(struct mmc_host
*mmc
,
795 struct sdhci_host
*host
= mmc_priv(mmc
);
798 val
= sdhci_readl(host
, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL
);
800 if (ios
->enhanced_strobe
) {
801 val
|= SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE
;
803 * When CMD13 is sent from mmc_select_hs400es() after
804 * switching to HS400ES mode, the bus is operating at
805 * either MMC_HIGH_26_MAX_DTR or MMC_HIGH_52_MAX_DTR.
806 * To meet Tegra SDHCI requirement at HS400ES mode, force SDHCI
807 * interface clock to MMC_HS200_MAX_DTR (200 MHz) so that host
808 * controller CAR clock and the interface clock are rate matched.
810 tegra_sdhci_set_clock(host
, MMC_HS200_MAX_DTR
);
812 val
&= ~SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE
;
815 sdhci_writel(host
, val
, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL
);
818 static unsigned int tegra_sdhci_get_max_clock(struct sdhci_host
*host
)
820 struct sdhci_pltfm_host
*pltfm_host
= sdhci_priv(host
);
822 return clk_round_rate(pltfm_host
->clk
, UINT_MAX
);
825 static void tegra_sdhci_set_dqs_trim(struct sdhci_host
*host
, u8 trim
)
829 val
= sdhci_readl(host
, SDHCI_TEGRA_VENDOR_CAP_OVERRIDES
);
830 val
&= ~SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_MASK
;
831 val
|= trim
<< SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_SHIFT
;
832 sdhci_writel(host
, val
, SDHCI_TEGRA_VENDOR_CAP_OVERRIDES
);
835 static void tegra_sdhci_hs400_dll_cal(struct sdhci_host
*host
)
840 reg
= sdhci_readl(host
, SDHCI_TEGRA_VENDOR_DLLCAL_CFG
);
841 reg
|= SDHCI_TEGRA_DLLCAL_CALIBRATE
;
842 sdhci_writel(host
, reg
, SDHCI_TEGRA_VENDOR_DLLCAL_CFG
);
844 /* 1 ms sleep, 5 ms timeout */
845 err
= readl_poll_timeout(host
->ioaddr
+ SDHCI_TEGRA_VENDOR_DLLCAL_STA
,
846 reg
, !(reg
& SDHCI_TEGRA_DLLCAL_STA_ACTIVE
),
849 dev_err(mmc_dev(host
->mmc
),
850 "HS400 delay line calibration timed out\n");
853 static void tegra_sdhci_tap_correction(struct sdhci_host
*host
, u8 thd_up
,
854 u8 thd_low
, u8 fixed_tap
)
856 struct sdhci_pltfm_host
*pltfm_host
= sdhci_priv(host
);
857 struct sdhci_tegra
*tegra_host
= sdhci_pltfm_priv(pltfm_host
);
859 u8 word
, bit
, edge1
, tap
, window
;
861 bool start_fail
= false;
862 bool start_pass
= false;
863 bool end_pass
= false;
864 bool first_fail
= false;
865 bool first_pass
= false;
866 u8 start_pass_tap
= 0;
868 u8 first_fail_tap
= 0;
869 u8 first_pass_tap
= 0;
870 u8 total_tuning_words
= host
->tuning_loop_count
/ TUNING_WORD_BIT_SIZE
;
873 * Read auto-tuned results and extract good valid passing window by
874 * filtering out un-wanted bubble/partial/merged windows.
876 for (word
= 0; word
< total_tuning_words
; word
++) {
877 val
= sdhci_readl(host
, SDHCI_VNDR_TUN_CTRL0_0
);
878 val
&= ~SDHCI_VNDR_TUN_CTRL0_TUN_WORD_SEL_MASK
;
880 sdhci_writel(host
, val
, SDHCI_VNDR_TUN_CTRL0_0
);
881 tun_status
= sdhci_readl(host
, SDHCI_TEGRA_VNDR_TUN_STATUS0
);
883 while (bit
< TUNING_WORD_BIT_SIZE
) {
884 tap
= word
* TUNING_WORD_BIT_SIZE
+ bit
;
885 tap_result
= tun_status
& (1 << bit
);
886 if (!tap_result
&& !start_fail
) {
889 first_fail_tap
= tap
;
893 } else if (tap_result
&& start_fail
&& !start_pass
) {
894 start_pass_tap
= tap
;
897 first_pass_tap
= tap
;
901 } else if (!tap_result
&& start_fail
&& start_pass
&&
903 end_pass_tap
= tap
- 1;
905 } else if (tap_result
&& start_pass
&& start_fail
&&
907 window
= end_pass_tap
- start_pass_tap
;
908 /* discard merged window and bubble window */
909 if (window
>= thd_up
|| window
< thd_low
) {
910 start_pass_tap
= tap
;
913 /* set tap at middle of valid window */
914 tap
= start_pass_tap
+ window
/ 2;
915 tegra_host
->tuned_tap_delay
= tap
;
925 WARN(1, "no edge detected, continue with hw tuned delay.\n");
926 } else if (first_pass
) {
927 /* set tap location at fixed tap relative to the first edge */
928 edge1
= first_fail_tap
+ (first_pass_tap
- first_fail_tap
) / 2;
929 if (edge1
- 1 > fixed_tap
)
930 tegra_host
->tuned_tap_delay
= edge1
- fixed_tap
;
932 tegra_host
->tuned_tap_delay
= edge1
+ fixed_tap
;
936 static void tegra_sdhci_post_tuning(struct sdhci_host
*host
)
938 struct sdhci_pltfm_host
*pltfm_host
= sdhci_priv(host
);
939 struct sdhci_tegra
*tegra_host
= sdhci_pltfm_priv(pltfm_host
);
940 const struct sdhci_tegra_soc_data
*soc_data
= tegra_host
->soc_data
;
941 u32 avg_tap_dly
, val
, min_tap_dly
, max_tap_dly
;
942 u8 fixed_tap
, start_tap
, end_tap
, window_width
;
943 u8 thdupper
, thdlower
;
945 u32 clk_rate_mhz
, period_ps
, bestcase
, worstcase
;
947 /* retain HW tuned tap to use incase if no correction is needed */
948 val
= sdhci_readl(host
, SDHCI_TEGRA_VENDOR_CLOCK_CTRL
);
949 tegra_host
->tuned_tap_delay
= (val
& SDHCI_CLOCK_CTRL_TAP_MASK
) >>
950 SDHCI_CLOCK_CTRL_TAP_SHIFT
;
951 if (soc_data
->min_tap_delay
&& soc_data
->max_tap_delay
) {
952 min_tap_dly
= soc_data
->min_tap_delay
;
953 max_tap_dly
= soc_data
->max_tap_delay
;
954 clk_rate_mhz
= tegra_host
->curr_clk_rate
/ USEC_PER_SEC
;
955 period_ps
= USEC_PER_SEC
/ clk_rate_mhz
;
956 bestcase
= period_ps
/ min_tap_dly
;
957 worstcase
= period_ps
/ max_tap_dly
;
959 * Upper and Lower bound thresholds used to detect merged and
962 thdupper
= (2 * worstcase
+ bestcase
) / 2;
963 thdlower
= worstcase
/ 4;
965 * fixed tap is used when HW tuning result contains single edge
966 * and tap is set at fixed tap delay relative to the first edge
968 avg_tap_dly
= (period_ps
* 2) / (min_tap_dly
+ max_tap_dly
);
969 fixed_tap
= avg_tap_dly
/ 2;
971 val
= sdhci_readl(host
, SDHCI_TEGRA_VNDR_TUN_STATUS1
);
972 start_tap
= val
& SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK
;
973 end_tap
= (val
>> SDHCI_TEGRA_VNDR_TUN_STATUS1_END_TAP_SHIFT
) &
974 SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK
;
975 window_width
= end_tap
- start_tap
;
976 num_iter
= host
->tuning_loop_count
;
978 * partial window includes edges of the tuning range.
979 * merged window includes more taps so window width is higher
980 * than upper threshold.
982 if (start_tap
== 0 || (end_tap
== (num_iter
- 1)) ||
983 (end_tap
== num_iter
- 2) || window_width
>= thdupper
) {
984 pr_debug("%s: Apply tuning correction\n",
985 mmc_hostname(host
->mmc
));
986 tegra_sdhci_tap_correction(host
, thdupper
, thdlower
,
991 tegra_sdhci_set_tap(host
, tegra_host
->tuned_tap_delay
);
994 static int tegra_sdhci_execute_hw_tuning(struct mmc_host
*mmc
, u32 opcode
)
996 struct sdhci_host
*host
= mmc_priv(mmc
);
999 err
= sdhci_execute_tuning(mmc
, opcode
);
1000 if (!err
&& !host
->tuning_err
)
1001 tegra_sdhci_post_tuning(host
);
1006 static void tegra_sdhci_set_uhs_signaling(struct sdhci_host
*host
,
1009 struct sdhci_pltfm_host
*pltfm_host
= sdhci_priv(host
);
1010 struct sdhci_tegra
*tegra_host
= sdhci_pltfm_priv(pltfm_host
);
1011 bool set_default_tap
= false;
1012 bool set_dqs_trim
= false;
1013 bool do_hs400_dll_cal
= false;
1014 u8 iter
= TRIES_256
;
1017 tegra_host
->ddr_signaling
= false;
1019 case MMC_TIMING_UHS_SDR50
:
1021 case MMC_TIMING_UHS_SDR104
:
1022 case MMC_TIMING_MMC_HS200
:
1023 /* Don't set default tap on tunable modes. */
1026 case MMC_TIMING_MMC_HS400
:
1027 set_dqs_trim
= true;
1028 do_hs400_dll_cal
= true;
1031 case MMC_TIMING_MMC_DDR52
:
1032 case MMC_TIMING_UHS_DDR50
:
1033 tegra_host
->ddr_signaling
= true;
1034 set_default_tap
= true;
1037 set_default_tap
= true;
1041 val
= sdhci_readl(host
, SDHCI_VNDR_TUN_CTRL0_0
);
1042 val
&= ~(SDHCI_VNDR_TUN_CTRL0_TUN_ITER_MASK
|
1043 SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_MASK
|
1044 SDHCI_VNDR_TUN_CTRL0_MUL_M_MASK
);
1045 val
|= (iter
<< SDHCI_VNDR_TUN_CTRL0_TUN_ITER_SHIFT
|
1046 0 << SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_SHIFT
|
1047 1 << SDHCI_VNDR_TUN_CTRL0_MUL_M_SHIFT
);
1048 sdhci_writel(host
, val
, SDHCI_VNDR_TUN_CTRL0_0
);
1049 sdhci_writel(host
, 0, SDHCI_TEGRA_VNDR_TUN_CTRL1_0
);
1051 host
->tuning_loop_count
= (iter
== TRIES_128
) ? 128 : 256;
1053 sdhci_set_uhs_signaling(host
, timing
);
1055 tegra_sdhci_pad_autocalib(host
);
1057 if (tegra_host
->tuned_tap_delay
&& !set_default_tap
)
1058 tegra_sdhci_set_tap(host
, tegra_host
->tuned_tap_delay
);
1060 tegra_sdhci_set_tap(host
, tegra_host
->default_tap
);
1063 tegra_sdhci_set_dqs_trim(host
, tegra_host
->dqs_trim
);
1065 if (do_hs400_dll_cal
)
1066 tegra_sdhci_hs400_dll_cal(host
);
1069 static int tegra_sdhci_execute_tuning(struct sdhci_host
*host
, u32 opcode
)
1071 unsigned int min
, max
;
1074 * Start search for minimum tap value at 10, as smaller values are
1075 * may wrongly be reported as working but fail at higher speeds,
1076 * according to the TRM.
1080 tegra_sdhci_set_tap(host
, min
);
1081 if (!mmc_send_tuning(host
->mmc
, opcode
, NULL
))
1086 /* Find the maximum tap value that still passes. */
1089 tegra_sdhci_set_tap(host
, max
);
1090 if (mmc_send_tuning(host
->mmc
, opcode
, NULL
)) {
1097 /* The TRM states the ideal tap value is at 75% in the passing range. */
1098 tegra_sdhci_set_tap(host
, min
+ ((max
- min
) * 3 / 4));
1100 return mmc_send_tuning(host
->mmc
, opcode
, NULL
);
1103 static int sdhci_tegra_start_signal_voltage_switch(struct mmc_host
*mmc
,
1104 struct mmc_ios
*ios
)
1106 struct sdhci_host
*host
= mmc_priv(mmc
);
1107 struct sdhci_pltfm_host
*pltfm_host
= sdhci_priv(host
);
1108 struct sdhci_tegra
*tegra_host
= sdhci_pltfm_priv(pltfm_host
);
1111 if (ios
->signal_voltage
== MMC_SIGNAL_VOLTAGE_330
) {
1112 ret
= tegra_sdhci_set_padctrl(host
, ios
->signal_voltage
, true);
1115 ret
= sdhci_start_signal_voltage_switch(mmc
, ios
);
1116 } else if (ios
->signal_voltage
== MMC_SIGNAL_VOLTAGE_180
) {
1117 ret
= sdhci_start_signal_voltage_switch(mmc
, ios
);
1120 ret
= tegra_sdhci_set_padctrl(host
, ios
->signal_voltage
, true);
1123 if (tegra_host
->pad_calib_required
)
1124 tegra_sdhci_pad_autocalib(host
);
1129 static int tegra_sdhci_init_pinctrl_info(struct device
*dev
,
1130 struct sdhci_tegra
*tegra_host
)
1132 tegra_host
->pinctrl_sdmmc
= devm_pinctrl_get(dev
);
1133 if (IS_ERR(tegra_host
->pinctrl_sdmmc
)) {
1134 dev_dbg(dev
, "No pinctrl info, err: %ld\n",
1135 PTR_ERR(tegra_host
->pinctrl_sdmmc
));
1139 tegra_host
->pinctrl_state_1v8_drv
= pinctrl_lookup_state(
1140 tegra_host
->pinctrl_sdmmc
, "sdmmc-1v8-drv");
1141 if (IS_ERR(tegra_host
->pinctrl_state_1v8_drv
)) {
1142 if (PTR_ERR(tegra_host
->pinctrl_state_1v8_drv
) == -ENODEV
)
1143 tegra_host
->pinctrl_state_1v8_drv
= NULL
;
1146 tegra_host
->pinctrl_state_3v3_drv
= pinctrl_lookup_state(
1147 tegra_host
->pinctrl_sdmmc
, "sdmmc-3v3-drv");
1148 if (IS_ERR(tegra_host
->pinctrl_state_3v3_drv
)) {
1149 if (PTR_ERR(tegra_host
->pinctrl_state_3v3_drv
) == -ENODEV
)
1150 tegra_host
->pinctrl_state_3v3_drv
= NULL
;
1153 tegra_host
->pinctrl_state_3v3
=
1154 pinctrl_lookup_state(tegra_host
->pinctrl_sdmmc
, "sdmmc-3v3");
1155 if (IS_ERR(tegra_host
->pinctrl_state_3v3
)) {
1156 dev_warn(dev
, "Missing 3.3V pad state, err: %ld\n",
1157 PTR_ERR(tegra_host
->pinctrl_state_3v3
));
1161 tegra_host
->pinctrl_state_1v8
=
1162 pinctrl_lookup_state(tegra_host
->pinctrl_sdmmc
, "sdmmc-1v8");
1163 if (IS_ERR(tegra_host
->pinctrl_state_1v8
)) {
1164 dev_warn(dev
, "Missing 1.8V pad state, err: %ld\n",
1165 PTR_ERR(tegra_host
->pinctrl_state_1v8
));
1169 tegra_host
->pad_control_available
= true;
1174 static void tegra_sdhci_voltage_switch(struct sdhci_host
*host
)
1176 struct sdhci_pltfm_host
*pltfm_host
= sdhci_priv(host
);
1177 struct sdhci_tegra
*tegra_host
= sdhci_pltfm_priv(pltfm_host
);
1178 const struct sdhci_tegra_soc_data
*soc_data
= tegra_host
->soc_data
;
1180 if (soc_data
->nvquirks
& NVQUIRK_HAS_PADCALIB
)
1181 tegra_host
->pad_calib_required
= true;
1184 static void tegra_cqhci_writel(struct cqhci_host
*cq_host
, u32 val
, int reg
)
1186 struct mmc_host
*mmc
= cq_host
->mmc
;
1187 struct sdhci_host
*host
= mmc_priv(mmc
);
1193 * During CQE resume/unhalt, CQHCI driver unhalts CQE prior to
1194 * cqhci_host_ops enable where SDHCI DMA and BLOCK_SIZE registers need
1195 * to be re-configured.
1196 * Tegra CQHCI/SDHCI prevents write access to block size register when
1197 * CQE is unhalted. So handling CQE resume sequence here to configure
1198 * SDHCI block registers prior to exiting CQE halt state.
1200 if (reg
== CQHCI_CTL
&& !(val
& CQHCI_HALT
) &&
1201 cqhci_readl(cq_host
, CQHCI_CTL
) & CQHCI_HALT
) {
1202 sdhci_writew(host
, SDHCI_TEGRA_CQE_TRNS_MODE
, SDHCI_TRANSFER_MODE
);
1203 sdhci_cqe_enable(mmc
);
1204 writel(val
, cq_host
->mmio
+ reg
);
1205 timeout
= ktime_add_us(ktime_get(), 50);
1207 timed_out
= ktime_compare(ktime_get(), timeout
) > 0;
1208 ctrl
= cqhci_readl(cq_host
, CQHCI_CTL
);
1209 if (!(ctrl
& CQHCI_HALT
) || timed_out
)
1213 * CQE usually resumes very quick, but incase if Tegra CQE
1214 * doesn't resume retry unhalt.
1217 writel(val
, cq_host
->mmio
+ reg
);
1219 writel(val
, cq_host
->mmio
+ reg
);
1223 static void sdhci_tegra_update_dcmd_desc(struct mmc_host
*mmc
,
1224 struct mmc_request
*mrq
, u64
*data
)
1226 struct sdhci_pltfm_host
*pltfm_host
= sdhci_priv(mmc_priv(mmc
));
1227 struct sdhci_tegra
*tegra_host
= sdhci_pltfm_priv(pltfm_host
);
1228 const struct sdhci_tegra_soc_data
*soc_data
= tegra_host
->soc_data
;
1230 if (soc_data
->nvquirks
& NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING
&&
1231 mrq
->cmd
->flags
& MMC_RSP_R1B
)
1232 *data
|= CQHCI_CMD_TIMING(1);
1235 static void sdhci_tegra_cqe_enable(struct mmc_host
*mmc
)
1237 struct cqhci_host
*cq_host
= mmc
->cqe_private
;
1238 struct sdhci_host
*host
= mmc_priv(mmc
);
1242 * Tegra CQHCI/SDMMC design prevents write access to sdhci block size
1243 * register when CQE is enabled and unhalted.
1244 * CQHCI driver enables CQE prior to activation, so disable CQE before
1245 * programming block size in sdhci controller and enable it back.
1247 if (!cq_host
->activated
) {
1248 val
= cqhci_readl(cq_host
, CQHCI_CFG
);
1249 if (val
& CQHCI_ENABLE
)
1250 cqhci_writel(cq_host
, (val
& ~CQHCI_ENABLE
),
1252 sdhci_writew(host
, SDHCI_TEGRA_CQE_TRNS_MODE
, SDHCI_TRANSFER_MODE
);
1253 sdhci_cqe_enable(mmc
);
1254 if (val
& CQHCI_ENABLE
)
1255 cqhci_writel(cq_host
, val
, CQHCI_CFG
);
1259 * CMD CRC errors are seen sometimes with some eMMC devices when status
1260 * command is sent during transfer of last data block which is the
1261 * default case as send status command block counter (CBC) is 1.
1262 * Recommended fix to set CBC to 0 allowing send status command only
1263 * when data lines are idle.
1265 val
= cqhci_readl(cq_host
, CQHCI_SSC1
);
1266 val
&= ~CQHCI_SSC1_CBC_MASK
;
1267 cqhci_writel(cq_host
, val
, CQHCI_SSC1
);
1270 static void sdhci_tegra_dumpregs(struct mmc_host
*mmc
)
1272 sdhci_dumpregs(mmc_priv(mmc
));
1275 static u32
sdhci_tegra_cqhci_irq(struct sdhci_host
*host
, u32 intmask
)
1280 if (!sdhci_cqe_irq(host
, intmask
, &cmd_error
, &data_error
))
1283 cqhci_irq(host
->mmc
, intmask
, cmd_error
, data_error
);
1288 static void tegra_sdhci_set_timeout(struct sdhci_host
*host
,
1289 struct mmc_command
*cmd
)
1294 * HW busy detection timeout is based on programmed data timeout
1295 * counter and maximum supported timeout is 11s which may not be
1296 * enough for long operations like cache flush, sleep awake, erase.
1298 * ERASE_TIMEOUT_LIMIT bit of VENDOR_MISC_CTRL register allows
1299 * host controller to wait for busy state until the card is busy
1300 * without HW timeout.
1302 * So, use infinite busy wait mode for operations that may take
1303 * more than maximum HW busy timeout of 11s otherwise use finite
1306 val
= sdhci_readl(host
, SDHCI_TEGRA_VENDOR_MISC_CTRL
);
1307 if (cmd
&& cmd
->busy_timeout
>= 11 * MSEC_PER_SEC
)
1308 val
|= SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT
;
1310 val
&= ~SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT
;
1311 sdhci_writel(host
, val
, SDHCI_TEGRA_VENDOR_MISC_CTRL
);
1313 __sdhci_set_timeout(host
, cmd
);
1316 static void sdhci_tegra_cqe_pre_enable(struct mmc_host
*mmc
)
1318 struct cqhci_host
*cq_host
= mmc
->cqe_private
;
1321 reg
= cqhci_readl(cq_host
, CQHCI_CFG
);
1322 reg
|= CQHCI_ENABLE
;
1323 cqhci_writel(cq_host
, reg
, CQHCI_CFG
);
1326 static void sdhci_tegra_cqe_post_disable(struct mmc_host
*mmc
)
1328 struct cqhci_host
*cq_host
= mmc
->cqe_private
;
1329 struct sdhci_host
*host
= mmc_priv(mmc
);
1332 reg
= cqhci_readl(cq_host
, CQHCI_CFG
);
1333 reg
&= ~CQHCI_ENABLE
;
1334 cqhci_writel(cq_host
, reg
, CQHCI_CFG
);
1335 sdhci_writew(host
, 0x0, SDHCI_TRANSFER_MODE
);
1338 static const struct cqhci_host_ops sdhci_tegra_cqhci_ops
= {
1339 .write_l
= tegra_cqhci_writel
,
1340 .enable
= sdhci_tegra_cqe_enable
,
1341 .disable
= sdhci_cqe_disable
,
1342 .dumpregs
= sdhci_tegra_dumpregs
,
1343 .update_dcmd_desc
= sdhci_tegra_update_dcmd_desc
,
1344 .pre_enable
= sdhci_tegra_cqe_pre_enable
,
1345 .post_disable
= sdhci_tegra_cqe_post_disable
,
1348 static int tegra_sdhci_set_dma_mask(struct sdhci_host
*host
)
1350 struct sdhci_pltfm_host
*platform
= sdhci_priv(host
);
1351 struct sdhci_tegra
*tegra
= sdhci_pltfm_priv(platform
);
1352 const struct sdhci_tegra_soc_data
*soc
= tegra
->soc_data
;
1353 struct device
*dev
= mmc_dev(host
->mmc
);
1356 return dma_set_mask_and_coherent(dev
, soc
->dma_mask
);
1361 static const struct sdhci_ops tegra_sdhci_ops
= {
1362 .get_ro
= tegra_sdhci_get_ro
,
1363 .read_w
= tegra_sdhci_readw
,
1364 .write_l
= tegra_sdhci_writel
,
1365 .set_clock
= tegra_sdhci_set_clock
,
1366 .set_dma_mask
= tegra_sdhci_set_dma_mask
,
1367 .set_bus_width
= sdhci_set_bus_width
,
1368 .reset
= tegra_sdhci_reset
,
1369 .platform_execute_tuning
= tegra_sdhci_execute_tuning
,
1370 .set_uhs_signaling
= tegra_sdhci_set_uhs_signaling
,
1371 .voltage_switch
= tegra_sdhci_voltage_switch
,
1372 .get_max_clock
= tegra_sdhci_get_max_clock
,
1375 static const struct sdhci_pltfm_data sdhci_tegra20_pdata
= {
1376 .quirks
= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL
|
1377 SDHCI_QUIRK_SINGLE_POWER_WRITE
|
1378 SDHCI_QUIRK_NO_HISPD_BIT
|
1379 SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC
|
1380 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN
,
1381 .ops
= &tegra_sdhci_ops
,
1384 static const struct sdhci_tegra_soc_data soc_data_tegra20
= {
1385 .pdata
= &sdhci_tegra20_pdata
,
1386 .dma_mask
= DMA_BIT_MASK(32),
1387 .nvquirks
= NVQUIRK_FORCE_SDHCI_SPEC_200
|
1388 NVQUIRK_HAS_ANDROID_GPT_SECTOR
|
1389 NVQUIRK_ENABLE_BLOCK_GAP_DET
,
1392 static const struct sdhci_pltfm_data sdhci_tegra30_pdata
= {
1393 .quirks
= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL
|
1394 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK
|
1395 SDHCI_QUIRK_SINGLE_POWER_WRITE
|
1396 SDHCI_QUIRK_NO_HISPD_BIT
|
1397 SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC
|
1398 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN
,
1399 .quirks2
= SDHCI_QUIRK2_PRESET_VALUE_BROKEN
|
1400 SDHCI_QUIRK2_BROKEN_HS200
|
1402 * Auto-CMD23 leads to "Got command interrupt 0x00010000 even
1403 * though no command operation was in progress."
1405 * The exact reason is unknown, as the same hardware seems
1406 * to support Auto CMD23 on a downstream 3.1 kernel.
1408 SDHCI_QUIRK2_ACMD23_BROKEN
,
1409 .ops
= &tegra_sdhci_ops
,
1412 static const struct sdhci_tegra_soc_data soc_data_tegra30
= {
1413 .pdata
= &sdhci_tegra30_pdata
,
1414 .dma_mask
= DMA_BIT_MASK(32),
1415 .nvquirks
= NVQUIRK_ENABLE_SDHCI_SPEC_300
|
1416 NVQUIRK_ENABLE_SDR50
|
1417 NVQUIRK_ENABLE_SDR104
|
1418 NVQUIRK_HAS_ANDROID_GPT_SECTOR
|
1419 NVQUIRK_HAS_PADCALIB
,
1422 static const struct sdhci_ops tegra114_sdhci_ops
= {
1423 .get_ro
= tegra_sdhci_get_ro
,
1424 .read_w
= tegra_sdhci_readw
,
1425 .write_w
= tegra_sdhci_writew
,
1426 .write_l
= tegra_sdhci_writel
,
1427 .set_clock
= tegra_sdhci_set_clock
,
1428 .set_dma_mask
= tegra_sdhci_set_dma_mask
,
1429 .set_bus_width
= sdhci_set_bus_width
,
1430 .reset
= tegra_sdhci_reset
,
1431 .platform_execute_tuning
= tegra_sdhci_execute_tuning
,
1432 .set_uhs_signaling
= tegra_sdhci_set_uhs_signaling
,
1433 .voltage_switch
= tegra_sdhci_voltage_switch
,
1434 .get_max_clock
= tegra_sdhci_get_max_clock
,
1437 static const struct sdhci_pltfm_data sdhci_tegra114_pdata
= {
1438 .quirks
= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL
|
1439 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK
|
1440 SDHCI_QUIRK_SINGLE_POWER_WRITE
|
1441 SDHCI_QUIRK_NO_HISPD_BIT
|
1442 SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC
|
1443 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN
,
1444 .quirks2
= SDHCI_QUIRK2_PRESET_VALUE_BROKEN
,
1445 .ops
= &tegra114_sdhci_ops
,
1448 static const struct sdhci_tegra_soc_data soc_data_tegra114
= {
1449 .pdata
= &sdhci_tegra114_pdata
,
1450 .dma_mask
= DMA_BIT_MASK(32),
1451 .nvquirks
= NVQUIRK_HAS_ANDROID_GPT_SECTOR
,
1454 static const struct sdhci_pltfm_data sdhci_tegra124_pdata
= {
1455 .quirks
= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL
|
1456 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK
|
1457 SDHCI_QUIRK_SINGLE_POWER_WRITE
|
1458 SDHCI_QUIRK_NO_HISPD_BIT
|
1459 SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC
|
1460 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN
,
1461 .quirks2
= SDHCI_QUIRK2_PRESET_VALUE_BROKEN
,
1462 .ops
= &tegra114_sdhci_ops
,
1465 static const struct sdhci_tegra_soc_data soc_data_tegra124
= {
1466 .pdata
= &sdhci_tegra124_pdata
,
1467 .dma_mask
= DMA_BIT_MASK(34),
1468 .nvquirks
= NVQUIRK_HAS_ANDROID_GPT_SECTOR
,
1471 static const struct sdhci_ops tegra210_sdhci_ops
= {
1472 .get_ro
= tegra_sdhci_get_ro
,
1473 .read_w
= tegra_sdhci_readw
,
1474 .write_w
= tegra210_sdhci_writew
,
1475 .write_l
= tegra_sdhci_writel
,
1476 .set_clock
= tegra_sdhci_set_clock
,
1477 .set_dma_mask
= tegra_sdhci_set_dma_mask
,
1478 .set_bus_width
= sdhci_set_bus_width
,
1479 .reset
= tegra_sdhci_reset
,
1480 .set_uhs_signaling
= tegra_sdhci_set_uhs_signaling
,
1481 .voltage_switch
= tegra_sdhci_voltage_switch
,
1482 .get_max_clock
= tegra_sdhci_get_max_clock
,
1483 .set_timeout
= tegra_sdhci_set_timeout
,
1486 static const struct sdhci_pltfm_data sdhci_tegra210_pdata
= {
1487 .quirks
= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL
|
1488 SDHCI_QUIRK_SINGLE_POWER_WRITE
|
1489 SDHCI_QUIRK_NO_HISPD_BIT
|
1490 SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC
|
1491 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN
,
1492 .quirks2
= SDHCI_QUIRK2_PRESET_VALUE_BROKEN
,
1493 .ops
= &tegra210_sdhci_ops
,
1496 static const struct sdhci_tegra_soc_data soc_data_tegra210
= {
1497 .pdata
= &sdhci_tegra210_pdata
,
1498 .dma_mask
= DMA_BIT_MASK(34),
1499 .nvquirks
= NVQUIRK_NEEDS_PAD_CONTROL
|
1500 NVQUIRK_HAS_PADCALIB
|
1501 NVQUIRK_DIS_CARD_CLK_CONFIG_TAP
|
1502 NVQUIRK_ENABLE_SDR50
|
1503 NVQUIRK_ENABLE_SDR104
|
1505 .min_tap_delay
= 106,
1506 .max_tap_delay
= 185,
1509 static const struct sdhci_ops tegra186_sdhci_ops
= {
1510 .get_ro
= tegra_sdhci_get_ro
,
1511 .read_w
= tegra_sdhci_readw
,
1512 .write_l
= tegra_sdhci_writel
,
1513 .set_clock
= tegra_sdhci_set_clock
,
1514 .set_dma_mask
= tegra_sdhci_set_dma_mask
,
1515 .set_bus_width
= sdhci_set_bus_width
,
1516 .reset
= tegra_sdhci_reset
,
1517 .set_uhs_signaling
= tegra_sdhci_set_uhs_signaling
,
1518 .voltage_switch
= tegra_sdhci_voltage_switch
,
1519 .get_max_clock
= tegra_sdhci_get_max_clock
,
1520 .irq
= sdhci_tegra_cqhci_irq
,
1521 .set_timeout
= tegra_sdhci_set_timeout
,
1524 static const struct sdhci_pltfm_data sdhci_tegra186_pdata
= {
1525 .quirks
= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL
|
1526 SDHCI_QUIRK_SINGLE_POWER_WRITE
|
1527 SDHCI_QUIRK_NO_HISPD_BIT
|
1528 SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC
|
1529 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN
,
1530 .quirks2
= SDHCI_QUIRK2_PRESET_VALUE_BROKEN
|
1531 SDHCI_QUIRK2_ISSUE_CMD_DAT_RESET_TOGETHER
,
1532 .ops
= &tegra186_sdhci_ops
,
1535 static const struct sdhci_tegra_soc_data soc_data_tegra186
= {
1536 .pdata
= &sdhci_tegra186_pdata
,
1537 .dma_mask
= DMA_BIT_MASK(40),
1538 .nvquirks
= NVQUIRK_NEEDS_PAD_CONTROL
|
1539 NVQUIRK_HAS_PADCALIB
|
1540 NVQUIRK_DIS_CARD_CLK_CONFIG_TAP
|
1541 NVQUIRK_ENABLE_SDR50
|
1542 NVQUIRK_ENABLE_SDR104
|
1544 NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING
,
1545 .min_tap_delay
= 84,
1546 .max_tap_delay
= 136,
1549 static const struct sdhci_tegra_soc_data soc_data_tegra194
= {
1550 .pdata
= &sdhci_tegra186_pdata
,
1551 .dma_mask
= DMA_BIT_MASK(39),
1552 .nvquirks
= NVQUIRK_NEEDS_PAD_CONTROL
|
1553 NVQUIRK_HAS_PADCALIB
|
1554 NVQUIRK_DIS_CARD_CLK_CONFIG_TAP
|
1555 NVQUIRK_ENABLE_SDR50
|
1556 NVQUIRK_ENABLE_SDR104
|
1558 .min_tap_delay
= 96,
1559 .max_tap_delay
= 139,
1562 static const struct sdhci_tegra_soc_data soc_data_tegra234
= {
1563 .pdata
= &sdhci_tegra186_pdata
,
1564 .dma_mask
= DMA_BIT_MASK(39),
1565 .nvquirks
= NVQUIRK_NEEDS_PAD_CONTROL
|
1566 NVQUIRK_HAS_PADCALIB
|
1567 NVQUIRK_DIS_CARD_CLK_CONFIG_TAP
|
1568 NVQUIRK_ENABLE_SDR50
|
1569 NVQUIRK_ENABLE_SDR104
|
1570 NVQUIRK_PROGRAM_STREAMID
|
1572 .min_tap_delay
= 95,
1573 .max_tap_delay
= 111,
1576 static const struct of_device_id sdhci_tegra_dt_match
[] = {
1577 { .compatible
= "nvidia,tegra234-sdhci", .data
= &soc_data_tegra234
},
1578 { .compatible
= "nvidia,tegra194-sdhci", .data
= &soc_data_tegra194
},
1579 { .compatible
= "nvidia,tegra186-sdhci", .data
= &soc_data_tegra186
},
1580 { .compatible
= "nvidia,tegra210-sdhci", .data
= &soc_data_tegra210
},
1581 { .compatible
= "nvidia,tegra124-sdhci", .data
= &soc_data_tegra124
},
1582 { .compatible
= "nvidia,tegra114-sdhci", .data
= &soc_data_tegra114
},
1583 { .compatible
= "nvidia,tegra30-sdhci", .data
= &soc_data_tegra30
},
1584 { .compatible
= "nvidia,tegra20-sdhci", .data
= &soc_data_tegra20
},
1587 MODULE_DEVICE_TABLE(of
, sdhci_tegra_dt_match
);
1589 static int sdhci_tegra_add_host(struct sdhci_host
*host
)
1591 struct sdhci_pltfm_host
*pltfm_host
= sdhci_priv(host
);
1592 struct sdhci_tegra
*tegra_host
= sdhci_pltfm_priv(pltfm_host
);
1593 struct cqhci_host
*cq_host
;
1597 if (!tegra_host
->enable_hwcq
)
1598 return sdhci_add_host(host
);
1600 sdhci_enable_v4_mode(host
);
1602 ret
= sdhci_setup_host(host
);
1606 host
->mmc
->caps2
|= MMC_CAP2_CQE
| MMC_CAP2_CQE_DCMD
;
1608 cq_host
= devm_kzalloc(mmc_dev(host
->mmc
),
1609 sizeof(*cq_host
), GFP_KERNEL
);
1615 cq_host
->mmio
= host
->ioaddr
+ SDHCI_TEGRA_CQE_BASE_ADDR
;
1616 cq_host
->ops
= &sdhci_tegra_cqhci_ops
;
1618 dma64
= host
->flags
& SDHCI_USE_64_BIT_DMA
;
1620 cq_host
->caps
|= CQHCI_TASK_DESC_SZ_128
;
1622 ret
= cqhci_init(cq_host
, host
->mmc
, dma64
);
1626 ret
= __sdhci_add_host(host
);
1633 sdhci_cleanup_host(host
);
1637 /* Program MC streamID for DMA transfers */
1638 static void sdhci_tegra_program_stream_id(struct sdhci_host
*host
)
1640 struct sdhci_pltfm_host
*pltfm_host
= sdhci_priv(host
);
1641 struct sdhci_tegra
*tegra_host
= sdhci_pltfm_priv(pltfm_host
);
1643 if (tegra_host
->soc_data
->nvquirks
& NVQUIRK_PROGRAM_STREAMID
) {
1644 tegra_sdhci_writel(host
, FIELD_PREP(GENMASK(15, 8), tegra_host
->stream_id
) |
1645 FIELD_PREP(GENMASK(7, 0), tegra_host
->stream_id
),
1646 SDHCI_TEGRA_CIF2AXI_CTRL_0
);
1650 static int sdhci_tegra_probe(struct platform_device
*pdev
)
1652 const struct sdhci_tegra_soc_data
*soc_data
;
1653 struct sdhci_host
*host
;
1654 struct sdhci_pltfm_host
*pltfm_host
;
1655 struct sdhci_tegra
*tegra_host
;
1659 soc_data
= of_device_get_match_data(&pdev
->dev
);
1663 host
= sdhci_pltfm_init(pdev
, soc_data
->pdata
, sizeof(*tegra_host
));
1665 return PTR_ERR(host
);
1666 pltfm_host
= sdhci_priv(host
);
1668 tegra_host
= sdhci_pltfm_priv(pltfm_host
);
1669 tegra_host
->ddr_signaling
= false;
1670 tegra_host
->pad_calib_required
= false;
1671 tegra_host
->pad_control_available
= false;
1672 tegra_host
->soc_data
= soc_data
;
1674 if (soc_data
->nvquirks
& NVQUIRK_HAS_ANDROID_GPT_SECTOR
)
1675 host
->mmc
->caps2
|= MMC_CAP2_ALT_GPT_TEGRA
;
1677 if (soc_data
->nvquirks
& NVQUIRK_NEEDS_PAD_CONTROL
) {
1678 rc
= tegra_sdhci_init_pinctrl_info(&pdev
->dev
, tegra_host
);
1680 host
->mmc_host_ops
.start_signal_voltage_switch
=
1681 sdhci_tegra_start_signal_voltage_switch
;
1684 /* Hook to periodically rerun pad calibration */
1685 if (soc_data
->nvquirks
& NVQUIRK_HAS_PADCALIB
)
1686 host
->mmc_host_ops
.request
= tegra_sdhci_request
;
1688 host
->mmc_host_ops
.hs400_enhanced_strobe
=
1689 tegra_sdhci_hs400_enhanced_strobe
;
1691 if (!host
->ops
->platform_execute_tuning
)
1692 host
->mmc_host_ops
.execute_tuning
=
1693 tegra_sdhci_execute_hw_tuning
;
1695 rc
= mmc_of_parse(host
->mmc
);
1699 if (tegra_host
->soc_data
->nvquirks
& NVQUIRK_ENABLE_DDR50
)
1700 host
->mmc
->caps
|= MMC_CAP_1_8V_DDR
;
1702 /* HW busy detection is supported, but R1B responses are required. */
1703 host
->mmc
->caps
|= MMC_CAP_WAIT_WHILE_BUSY
| MMC_CAP_NEED_RSP_BUSY
;
1705 /* GPIO CD can be set as a wakeup source */
1706 host
->mmc
->caps
|= MMC_CAP_CD_WAKE
;
1708 tegra_sdhci_parse_dt(host
);
1710 if (tegra_host
->soc_data
->nvquirks
& NVQUIRK_PROGRAM_STREAMID
&&
1711 !tegra_dev_iommu_get_stream_id(&pdev
->dev
, &tegra_host
->stream_id
)) {
1712 dev_warn(mmc_dev(host
->mmc
), "missing IOMMU stream ID\n");
1713 tegra_host
->stream_id
= 0x7f;
1716 tegra_host
->power_gpio
= devm_gpiod_get_optional(&pdev
->dev
, "power",
1718 if (IS_ERR(tegra_host
->power_gpio
)) {
1719 rc
= PTR_ERR(tegra_host
->power_gpio
);
1724 * Tegra210 has a separate SDMMC_LEGACY_TM clock used for host
1725 * timeout clock and SW can choose TMCLK or SDCLK for hardware
1726 * data timeout through the bit USE_TMCLK_FOR_DATA_TIMEOUT of
1727 * the register SDHCI_TEGRA_VENDOR_SYS_SW_CTRL.
1729 * USE_TMCLK_FOR_DATA_TIMEOUT bit default is set to 1 and SDMMC uses
1730 * 12Mhz TMCLK which is advertised in host capability register.
1731 * With TMCLK of 12Mhz provides maximum data timeout period that can
1732 * be achieved is 11s better than using SDCLK for data timeout.
1734 * So, TMCLK is set to 12Mhz and kept enabled all the time on SoC's
1735 * supporting separate TMCLK.
1738 if (soc_data
->nvquirks
& NVQUIRK_HAS_TMCLK
) {
1739 clk
= devm_clk_get(&pdev
->dev
, "tmclk");
1742 if (rc
== -EPROBE_DEFER
)
1745 dev_warn(&pdev
->dev
, "failed to get tmclk: %d\n", rc
);
1749 clk_set_rate(clk
, 12000000);
1750 rc
= clk_prepare_enable(clk
);
1753 "failed to enable tmclk: %d\n", rc
);
1757 tegra_host
->tmclk
= clk
;
1760 clk
= devm_clk_get(mmc_dev(host
->mmc
), NULL
);
1762 rc
= dev_err_probe(&pdev
->dev
, PTR_ERR(clk
),
1763 "failed to get clock\n");
1766 pltfm_host
->clk
= clk
;
1768 tegra_host
->rst
= devm_reset_control_get_exclusive(&pdev
->dev
,
1770 if (IS_ERR(tegra_host
->rst
)) {
1771 rc
= PTR_ERR(tegra_host
->rst
);
1772 dev_err(&pdev
->dev
, "failed to get reset control: %d\n", rc
);
1776 rc
= devm_tegra_core_dev_init_opp_table_common(&pdev
->dev
);
1780 pm_runtime_enable(&pdev
->dev
);
1781 rc
= pm_runtime_resume_and_get(&pdev
->dev
);
1785 rc
= reset_control_assert(tegra_host
->rst
);
1787 goto err_rst_assert
;
1789 usleep_range(2000, 4000);
1791 rc
= reset_control_deassert(tegra_host
->rst
);
1793 goto err_rst_assert
;
1795 usleep_range(2000, 4000);
1797 rc
= sdhci_tegra_add_host(host
);
1801 sdhci_tegra_program_stream_id(host
);
1806 reset_control_assert(tegra_host
->rst
);
1808 pm_runtime_put_sync_suspend(&pdev
->dev
);
1810 pm_runtime_disable(&pdev
->dev
);
1813 clk_disable_unprepare(tegra_host
->tmclk
);
1816 sdhci_pltfm_free(pdev
);
1820 static void sdhci_tegra_remove(struct platform_device
*pdev
)
1822 struct sdhci_host
*host
= platform_get_drvdata(pdev
);
1823 struct sdhci_pltfm_host
*pltfm_host
= sdhci_priv(host
);
1824 struct sdhci_tegra
*tegra_host
= sdhci_pltfm_priv(pltfm_host
);
1826 sdhci_remove_host(host
, 0);
1828 reset_control_assert(tegra_host
->rst
);
1829 usleep_range(2000, 4000);
1831 pm_runtime_put_sync_suspend(&pdev
->dev
);
1832 pm_runtime_force_suspend(&pdev
->dev
);
1834 clk_disable_unprepare(tegra_host
->tmclk
);
1835 sdhci_pltfm_free(pdev
);
1838 static int __maybe_unused
sdhci_tegra_runtime_suspend(struct device
*dev
)
1840 struct sdhci_host
*host
= dev_get_drvdata(dev
);
1841 struct sdhci_pltfm_host
*pltfm_host
= sdhci_priv(host
);
1843 clk_disable_unprepare(pltfm_host
->clk
);
1848 static int __maybe_unused
sdhci_tegra_runtime_resume(struct device
*dev
)
1850 struct sdhci_host
*host
= dev_get_drvdata(dev
);
1851 struct sdhci_pltfm_host
*pltfm_host
= sdhci_priv(host
);
1853 return clk_prepare_enable(pltfm_host
->clk
);
1856 #ifdef CONFIG_PM_SLEEP
1857 static int sdhci_tegra_suspend(struct device
*dev
)
1859 struct sdhci_host
*host
= dev_get_drvdata(dev
);
1862 if (host
->mmc
->caps2
& MMC_CAP2_CQE
) {
1863 ret
= cqhci_suspend(host
->mmc
);
1868 ret
= sdhci_suspend_host(host
);
1870 cqhci_resume(host
->mmc
);
1874 ret
= pm_runtime_force_suspend(dev
);
1876 sdhci_resume_host(host
);
1877 cqhci_resume(host
->mmc
);
1881 return mmc_gpio_set_cd_wake(host
->mmc
, true);
1884 static int sdhci_tegra_resume(struct device
*dev
)
1886 struct sdhci_host
*host
= dev_get_drvdata(dev
);
1889 ret
= mmc_gpio_set_cd_wake(host
->mmc
, false);
1893 ret
= pm_runtime_force_resume(dev
);
1897 sdhci_tegra_program_stream_id(host
);
1899 ret
= sdhci_resume_host(host
);
1903 if (host
->mmc
->caps2
& MMC_CAP2_CQE
) {
1904 ret
= cqhci_resume(host
->mmc
);
1912 sdhci_suspend_host(host
);
1914 pm_runtime_force_suspend(dev
);
1919 static const struct dev_pm_ops sdhci_tegra_dev_pm_ops
= {
1920 SET_RUNTIME_PM_OPS(sdhci_tegra_runtime_suspend
, sdhci_tegra_runtime_resume
,
1922 SET_SYSTEM_SLEEP_PM_OPS(sdhci_tegra_suspend
, sdhci_tegra_resume
)
1925 static struct platform_driver sdhci_tegra_driver
= {
1927 .name
= "sdhci-tegra",
1928 .probe_type
= PROBE_PREFER_ASYNCHRONOUS
,
1929 .of_match_table
= sdhci_tegra_dt_match
,
1930 .pm
= &sdhci_tegra_dev_pm_ops
,
1932 .probe
= sdhci_tegra_probe
,
1933 .remove
= sdhci_tegra_remove
,
1936 module_platform_driver(sdhci_tegra_driver
);
1938 MODULE_DESCRIPTION("SDHCI driver for Tegra");
1939 MODULE_AUTHOR("Google, Inc.");
1940 MODULE_LICENSE("GPL v2");