1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) STMicroelectronics 2018 - All Rights Reserved
4 * Author: Ludovic.barre@st.com for STMicroelectronics.
6 #include <linux/bitfield.h>
7 #include <linux/delay.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/iopoll.h>
10 #include <linux/mmc/host.h>
11 #include <linux/mmc/card.h>
12 #include <linux/of_address.h>
13 #include <linux/reset.h>
14 #include <linux/scatterlist.h>
17 #define SDMMC_LLI_BUF_LEN PAGE_SIZE
20 #define DLYB_CR_DEN BIT(0)
21 #define DLYB_CR_SEN BIT(1)
24 #define DLYB_CFGR_SEL_MASK GENMASK(3, 0)
25 #define DLYB_CFGR_UNIT_MASK GENMASK(14, 8)
26 #define DLYB_CFGR_LNG_MASK GENMASK(27, 16)
27 #define DLYB_CFGR_LNGF BIT(31)
29 #define DLYB_NB_DELAY 11
30 #define DLYB_CFGR_SEL_MAX (DLYB_NB_DELAY + 1)
31 #define DLYB_CFGR_UNIT_MAX 127
33 #define DLYB_LNG_TIMEOUT_US 1000
34 #define SDMMC_VSWEND_TIMEOUT_US 10000
36 #define SYSCFG_DLYBSD_CR 0x0
37 #define DLYBSD_CR_EN BIT(0)
38 #define DLYBSD_CR_RXTAPSEL_MASK GENMASK(6, 1)
39 #define DLYBSD_TAPSEL_NB 32
40 #define DLYBSD_BYP_EN BIT(16)
41 #define DLYBSD_BYP_CMD GENMASK(21, 17)
42 #define DLYBSD_ANTIGLITCH_EN BIT(22)
44 #define SYSCFG_DLYBSD_SR 0x4
45 #define DLYBSD_SR_LOCK BIT(0)
46 #define DLYBSD_SR_RXTAPSEL_ACK BIT(1)
48 #define DLYBSD_TIMEOUT_1S_IN_US 1000000
50 struct sdmmc_lli_desc
{
59 dma_addr_t bounce_dma_addr
;
61 bool use_bounce_buffer
;
66 struct sdmmc_tuning_ops
{
67 int (*dlyb_enable
)(struct sdmmc_dlyb
*dlyb
);
68 void (*set_input_ck
)(struct sdmmc_dlyb
*dlyb
);
69 int (*tuning_prepare
)(struct mmci_host
*host
);
70 int (*set_cfg
)(struct sdmmc_dlyb
*dlyb
, int unit __maybe_unused
,
71 int phase
, bool sampler __maybe_unused
);
78 struct sdmmc_tuning_ops
*ops
;
81 static int sdmmc_idma_validate_data(struct mmci_host
*host
,
82 struct mmc_data
*data
)
84 struct sdmmc_idma
*idma
= host
->dma_priv
;
85 struct device
*dev
= mmc_dev(host
->mmc
);
86 struct scatterlist
*sg
;
90 * idma has constraints on idmabase & idmasize for each element
91 * excepted the last element which has no constraint on idmasize
93 idma
->use_bounce_buffer
= false;
94 for_each_sg(data
->sg
, sg
, data
->sg_len
- 1, i
) {
95 if (!IS_ALIGNED(sg
->offset
, sizeof(u32
)) ||
96 !IS_ALIGNED(sg
->length
,
97 host
->variant
->stm32_idmabsize_align
)) {
98 dev_dbg(mmc_dev(host
->mmc
),
99 "unaligned scatterlist: ofst:%x length:%d\n",
100 data
->sg
->offset
, data
->sg
->length
);
101 goto use_bounce_buffer
;
105 if (!IS_ALIGNED(sg
->offset
, sizeof(u32
))) {
106 dev_dbg(mmc_dev(host
->mmc
),
107 "unaligned last scatterlist: ofst:%x length:%d\n",
108 data
->sg
->offset
, data
->sg
->length
);
109 goto use_bounce_buffer
;
115 if (!idma
->bounce_buf
) {
116 idma
->bounce_buf
= dmam_alloc_coherent(dev
,
117 host
->mmc
->max_req_size
,
118 &idma
->bounce_dma_addr
,
120 if (!idma
->bounce_buf
) {
121 dev_err(dev
, "Unable to map allocate DMA bounce buffer.\n");
126 idma
->use_bounce_buffer
= true;
131 static int _sdmmc_idma_prep_data(struct mmci_host
*host
,
132 struct mmc_data
*data
)
134 struct sdmmc_idma
*idma
= host
->dma_priv
;
136 if (idma
->use_bounce_buffer
) {
137 if (data
->flags
& MMC_DATA_WRITE
) {
138 unsigned int xfer_bytes
= data
->blksz
* data
->blocks
;
140 sg_copy_to_buffer(data
->sg
, data
->sg_len
,
141 idma
->bounce_buf
, xfer_bytes
);
147 n_elem
= dma_map_sg(mmc_dev(host
->mmc
),
150 mmc_get_dma_dir(data
));
153 dev_err(mmc_dev(host
->mmc
), "dma_map_sg failed\n");
160 static int sdmmc_idma_prep_data(struct mmci_host
*host
,
161 struct mmc_data
*data
, bool next
)
163 /* Check if job is already prepared. */
164 if (!next
&& data
->host_cookie
== host
->next_cookie
)
167 return _sdmmc_idma_prep_data(host
, data
);
170 static void sdmmc_idma_unprep_data(struct mmci_host
*host
,
171 struct mmc_data
*data
, int err
)
173 struct sdmmc_idma
*idma
= host
->dma_priv
;
175 if (idma
->use_bounce_buffer
) {
176 if (data
->flags
& MMC_DATA_READ
) {
177 unsigned int xfer_bytes
= data
->blksz
* data
->blocks
;
179 sg_copy_from_buffer(data
->sg
, data
->sg_len
,
180 idma
->bounce_buf
, xfer_bytes
);
183 dma_unmap_sg(mmc_dev(host
->mmc
), data
->sg
, data
->sg_len
,
184 mmc_get_dma_dir(data
));
188 static int sdmmc_idma_setup(struct mmci_host
*host
)
190 struct sdmmc_idma
*idma
;
191 struct device
*dev
= mmc_dev(host
->mmc
);
193 idma
= devm_kzalloc(dev
, sizeof(*idma
), GFP_KERNEL
);
197 host
->dma_priv
= idma
;
199 if (host
->variant
->dma_lli
) {
200 idma
->sg_cpu
= dmam_alloc_coherent(dev
, SDMMC_LLI_BUF_LEN
,
201 &idma
->sg_dma
, GFP_KERNEL
);
203 dev_err(dev
, "Failed to alloc IDMA descriptor\n");
206 host
->mmc
->max_segs
= SDMMC_LLI_BUF_LEN
/
207 sizeof(struct sdmmc_lli_desc
);
208 host
->mmc
->max_seg_size
= host
->variant
->stm32_idmabsize_mask
;
210 host
->mmc
->max_req_size
= SZ_1M
;
212 host
->mmc
->max_segs
= 1;
213 host
->mmc
->max_seg_size
= host
->mmc
->max_req_size
;
216 dma_set_max_seg_size(dev
, host
->mmc
->max_seg_size
);
220 static int sdmmc_idma_start(struct mmci_host
*host
, unsigned int *datactrl
)
223 struct sdmmc_idma
*idma
= host
->dma_priv
;
224 struct sdmmc_lli_desc
*desc
= (struct sdmmc_lli_desc
*)idma
->sg_cpu
;
225 struct mmc_data
*data
= host
->data
;
226 struct scatterlist
*sg
;
229 host
->dma_in_progress
= true;
231 if (!host
->variant
->dma_lli
|| data
->sg_len
== 1 ||
232 idma
->use_bounce_buffer
) {
235 if (idma
->use_bounce_buffer
)
236 dma_addr
= idma
->bounce_dma_addr
;
238 dma_addr
= sg_dma_address(data
->sg
);
240 writel_relaxed(dma_addr
,
241 host
->base
+ MMCI_STM32_IDMABASE0R
);
242 writel_relaxed(MMCI_STM32_IDMAEN
,
243 host
->base
+ MMCI_STM32_IDMACTRLR
);
247 for_each_sg(data
->sg
, sg
, data
->sg_len
, i
) {
248 desc
[i
].idmalar
= (i
+ 1) * sizeof(struct sdmmc_lli_desc
);
249 desc
[i
].idmalar
|= MMCI_STM32_ULA
| MMCI_STM32_ULS
251 desc
[i
].idmabase
= sg_dma_address(sg
);
252 desc
[i
].idmasize
= sg_dma_len(sg
);
255 /* notice the end of link list */
256 desc
[data
->sg_len
- 1].idmalar
&= ~MMCI_STM32_ULA
;
259 writel_relaxed(idma
->sg_dma
, host
->base
+ MMCI_STM32_IDMABAR
);
260 writel_relaxed(desc
[0].idmalar
, host
->base
+ MMCI_STM32_IDMALAR
);
261 writel_relaxed(desc
[0].idmabase
, host
->base
+ MMCI_STM32_IDMABASE0R
);
262 writel_relaxed(desc
[0].idmasize
, host
->base
+ MMCI_STM32_IDMABSIZER
);
263 writel_relaxed(MMCI_STM32_IDMAEN
| MMCI_STM32_IDMALLIEN
,
264 host
->base
+ MMCI_STM32_IDMACTRLR
);
269 static void sdmmc_idma_error(struct mmci_host
*host
)
271 struct mmc_data
*data
= host
->data
;
272 struct sdmmc_idma
*idma
= host
->dma_priv
;
274 if (!dma_inprogress(host
))
277 writel_relaxed(0, host
->base
+ MMCI_STM32_IDMACTRLR
);
278 host
->dma_in_progress
= false;
279 data
->host_cookie
= 0;
281 if (!idma
->use_bounce_buffer
)
282 dma_unmap_sg(mmc_dev(host
->mmc
), data
->sg
, data
->sg_len
,
283 mmc_get_dma_dir(data
));
286 static void sdmmc_idma_finalize(struct mmci_host
*host
, struct mmc_data
*data
)
288 if (!dma_inprogress(host
))
291 writel_relaxed(0, host
->base
+ MMCI_STM32_IDMACTRLR
);
292 host
->dma_in_progress
= false;
294 if (!data
->host_cookie
)
295 sdmmc_idma_unprep_data(host
, data
, 0);
298 static void mmci_sdmmc_set_clkreg(struct mmci_host
*host
, unsigned int desired
)
300 unsigned int clk
= 0, ddr
= 0;
302 if (host
->mmc
->ios
.timing
== MMC_TIMING_MMC_DDR52
||
303 host
->mmc
->ios
.timing
== MMC_TIMING_UHS_DDR50
)
304 ddr
= MCI_STM32_CLK_DDR
;
307 * cclk = mclk / (2 * clkdiv)
309 * in ddr mode bypass is not possible
312 if (desired
>= host
->mclk
&& !ddr
) {
313 host
->cclk
= host
->mclk
;
315 clk
= DIV_ROUND_UP(host
->mclk
, 2 * desired
);
316 if (clk
> MCI_STM32_CLK_CLKDIV_MSK
)
317 clk
= MCI_STM32_CLK_CLKDIV_MSK
;
318 host
->cclk
= host
->mclk
/ (2 * clk
);
322 * while power-on phase the clock can't be define to 0,
323 * Only power-off and power-cyc deactivate the clock.
324 * if desired clock is 0, set max divider
326 clk
= MCI_STM32_CLK_CLKDIV_MSK
;
327 host
->cclk
= host
->mclk
/ (2 * clk
);
330 /* Set actual clock for debug */
331 if (host
->mmc
->ios
.power_mode
== MMC_POWER_ON
)
332 host
->mmc
->actual_clock
= host
->cclk
;
334 host
->mmc
->actual_clock
= 0;
336 if (host
->mmc
->ios
.bus_width
== MMC_BUS_WIDTH_4
)
337 clk
|= MCI_STM32_CLK_WIDEBUS_4
;
338 if (host
->mmc
->ios
.bus_width
== MMC_BUS_WIDTH_8
)
339 clk
|= MCI_STM32_CLK_WIDEBUS_8
;
341 clk
|= MCI_STM32_CLK_HWFCEN
;
342 clk
|= host
->clk_reg_add
;
345 if (host
->mmc
->ios
.timing
>= MMC_TIMING_UHS_SDR50
)
346 clk
|= MCI_STM32_CLK_BUSSPEED
;
348 mmci_write_clkreg(host
, clk
);
351 static void sdmmc_dlyb_mp15_input_ck(struct sdmmc_dlyb
*dlyb
)
353 if (!dlyb
|| !dlyb
->base
)
356 /* Output clock = Input clock */
357 writel_relaxed(0, dlyb
->base
+ DLYB_CR
);
360 static void mmci_sdmmc_set_pwrreg(struct mmci_host
*host
, unsigned int pwr
)
362 struct mmc_ios ios
= host
->mmc
->ios
;
363 struct sdmmc_dlyb
*dlyb
= host
->variant_priv
;
365 /* adds OF options */
366 pwr
= host
->pwr_reg_add
;
368 if (dlyb
&& dlyb
->ops
->set_input_ck
)
369 dlyb
->ops
->set_input_ck(dlyb
);
371 if (ios
.power_mode
== MMC_POWER_OFF
) {
372 /* Only a reset could power-off sdmmc */
373 reset_control_assert(host
->rst
);
375 reset_control_deassert(host
->rst
);
378 * Set the SDMMC in Power-cycle state.
379 * This will make that the SDMMC_D[7:0], SDMMC_CMD and SDMMC_CK
380 * are driven low, to prevent the Card from being supplied
381 * through the signal lines.
383 mmci_write_pwrreg(host
, MCI_STM32_PWR_CYC
| pwr
);
384 } else if (ios
.power_mode
== MMC_POWER_ON
) {
386 * After power-off (reset): the irq mask defined in probe
388 * ault irq mask (probe) must be activated
390 writel(MCI_IRQENABLE
| host
->variant
->start_err
,
391 host
->base
+ MMCIMASK0
);
393 /* preserves voltage switch bits */
394 pwr
|= host
->pwr_reg
& (MCI_STM32_VSWITCHEN
|
398 * After a power-cycle state, we must set the SDMMC in
399 * Power-off. The SDMMC_D[7:0], SDMMC_CMD and SDMMC_CK are
400 * driven high. Then we can set the SDMMC to Power-on state
402 mmci_write_pwrreg(host
, MCI_PWR_OFF
| pwr
);
404 mmci_write_pwrreg(host
, MCI_PWR_ON
| pwr
);
408 static u32
sdmmc_get_dctrl_cfg(struct mmci_host
*host
)
412 datactrl
= mmci_dctrl_blksz(host
);
414 if (host
->hw_revision
>= 3) {
417 if (host
->mmc
->ios
.timing
== MMC_TIMING_UHS_SDR104
||
418 host
->mmc
->ios
.timing
== MMC_TIMING_MMC_HS200
) {
419 thr
= ffs(min_t(unsigned int, host
->data
->blksz
,
420 host
->variant
->fifosize
));
421 thr
= min_t(u32
, thr
, MMCI_STM32_THR_MASK
);
424 writel_relaxed(thr
, host
->base
+ MMCI_STM32_FIFOTHRR
);
427 if (host
->mmc
->card
&& mmc_card_sdio(host
->mmc
->card
) &&
428 host
->data
->blocks
== 1)
429 datactrl
|= MCI_DPSM_STM32_MODE_SDIO
;
430 else if (host
->data
->stop
&& !host
->mrq
->sbc
)
431 datactrl
|= MCI_DPSM_STM32_MODE_BLOCK_STOP
;
433 datactrl
|= MCI_DPSM_STM32_MODE_BLOCK
;
438 static bool sdmmc_busy_complete(struct mmci_host
*host
, struct mmc_command
*cmd
,
439 u32 status
, u32 err_msk
)
441 void __iomem
*base
= host
->base
;
442 u32 busy_d0
, busy_d0end
, mask
, sdmmc_status
;
444 mask
= readl_relaxed(base
+ MMCIMASK0
);
445 sdmmc_status
= readl_relaxed(base
+ MMCISTATUS
);
446 busy_d0end
= sdmmc_status
& MCI_STM32_BUSYD0END
;
447 busy_d0
= sdmmc_status
& MCI_STM32_BUSYD0
;
449 /* complete if there is an error or busy_d0end */
450 if ((status
& err_msk
) || busy_d0end
)
454 * On response the busy signaling is reflected in the BUSYD0 flag.
455 * if busy_d0 is in-progress we must activate busyd0end interrupt
456 * to wait this completion. Else this request has no busy step.
459 if (!host
->busy_status
) {
460 writel_relaxed(mask
| host
->variant
->busy_detect_mask
,
462 host
->busy_status
= status
&
463 (MCI_CMDSENT
| MCI_CMDRESPEND
);
469 if (host
->busy_status
) {
470 writel_relaxed(mask
& ~host
->variant
->busy_detect_mask
,
472 host
->busy_status
= 0;
475 writel_relaxed(host
->variant
->busy_detect_mask
, base
+ MMCICLEAR
);
480 static int sdmmc_dlyb_mp15_enable(struct sdmmc_dlyb
*dlyb
)
482 writel_relaxed(DLYB_CR_DEN
, dlyb
->base
+ DLYB_CR
);
487 static int sdmmc_dlyb_mp15_set_cfg(struct sdmmc_dlyb
*dlyb
,
488 int unit
, int phase
, bool sampler
)
492 writel_relaxed(DLYB_CR_SEN
| DLYB_CR_DEN
, dlyb
->base
+ DLYB_CR
);
494 cfgr
= FIELD_PREP(DLYB_CFGR_UNIT_MASK
, unit
) |
495 FIELD_PREP(DLYB_CFGR_SEL_MASK
, phase
);
496 writel_relaxed(cfgr
, dlyb
->base
+ DLYB_CFGR
);
499 writel_relaxed(DLYB_CR_DEN
, dlyb
->base
+ DLYB_CR
);
504 static int sdmmc_dlyb_mp15_prepare(struct mmci_host
*host
)
506 struct sdmmc_dlyb
*dlyb
= host
->variant_priv
;
510 for (i
= 0; i
<= DLYB_CFGR_UNIT_MAX
; i
++) {
511 dlyb
->ops
->set_cfg(dlyb
, i
, DLYB_CFGR_SEL_MAX
, true);
513 ret
= readl_relaxed_poll_timeout(dlyb
->base
+ DLYB_CFGR
, cfgr
,
514 (cfgr
& DLYB_CFGR_LNGF
),
515 1, DLYB_LNG_TIMEOUT_US
);
517 dev_warn(mmc_dev(host
->mmc
),
518 "delay line cfg timeout unit:%d cfgr:%d\n",
523 lng
= FIELD_GET(DLYB_CFGR_LNG_MASK
, cfgr
);
524 if (lng
< BIT(DLYB_NB_DELAY
) && lng
> 0)
528 if (i
> DLYB_CFGR_UNIT_MAX
)
532 dlyb
->max
= __fls(lng
);
537 static int sdmmc_dlyb_mp25_enable(struct sdmmc_dlyb
*dlyb
)
541 cr
= readl_relaxed(dlyb
->base
+ SYSCFG_DLYBSD_CR
);
544 writel_relaxed(cr
, dlyb
->base
+ SYSCFG_DLYBSD_CR
);
546 return readl_relaxed_poll_timeout(dlyb
->base
+ SYSCFG_DLYBSD_SR
,
547 sr
, sr
& DLYBSD_SR_LOCK
, 1,
548 DLYBSD_TIMEOUT_1S_IN_US
);
551 static int sdmmc_dlyb_mp25_set_cfg(struct sdmmc_dlyb
*dlyb
,
552 int unit __maybe_unused
, int phase
,
553 bool sampler __maybe_unused
)
557 cr
= readl_relaxed(dlyb
->base
+ SYSCFG_DLYBSD_CR
);
558 cr
&= ~DLYBSD_CR_RXTAPSEL_MASK
;
559 cr
|= FIELD_PREP(DLYBSD_CR_RXTAPSEL_MASK
, phase
);
561 writel_relaxed(cr
, dlyb
->base
+ SYSCFG_DLYBSD_CR
);
563 return readl_relaxed_poll_timeout(dlyb
->base
+ SYSCFG_DLYBSD_SR
,
564 sr
, sr
& DLYBSD_SR_RXTAPSEL_ACK
, 1,
565 DLYBSD_TIMEOUT_1S_IN_US
);
568 static int sdmmc_dlyb_mp25_prepare(struct mmci_host
*host
)
570 struct sdmmc_dlyb
*dlyb
= host
->variant_priv
;
572 dlyb
->max
= DLYBSD_TAPSEL_NB
;
577 static int sdmmc_dlyb_phase_tuning(struct mmci_host
*host
, u32 opcode
)
579 struct sdmmc_dlyb
*dlyb
= host
->variant_priv
;
580 int cur_len
= 0, max_len
= 0, end_of_len
= 0;
583 for (phase
= 0; phase
<= dlyb
->max
; phase
++) {
584 ret
= dlyb
->ops
->set_cfg(dlyb
, dlyb
->unit
, phase
, false);
586 dev_err(mmc_dev(host
->mmc
), "tuning config failed\n");
590 if (mmc_send_tuning(host
->mmc
, opcode
, NULL
)) {
594 if (cur_len
> max_len
) {
602 dev_err(mmc_dev(host
->mmc
), "no tuning point found\n");
606 if (dlyb
->ops
->set_input_ck
)
607 dlyb
->ops
->set_input_ck(dlyb
);
609 phase
= end_of_len
- max_len
/ 2;
610 ret
= dlyb
->ops
->set_cfg(dlyb
, dlyb
->unit
, phase
, false);
612 dev_err(mmc_dev(host
->mmc
), "tuning reconfig failed\n");
616 dev_dbg(mmc_dev(host
->mmc
), "unit:%d max_dly:%d phase:%d\n",
617 dlyb
->unit
, dlyb
->max
, phase
);
622 static int sdmmc_execute_tuning(struct mmc_host
*mmc
, u32 opcode
)
624 struct mmci_host
*host
= mmc_priv(mmc
);
625 struct sdmmc_dlyb
*dlyb
= host
->variant_priv
;
629 if ((host
->mmc
->ios
.timing
!= MMC_TIMING_UHS_SDR104
&&
630 host
->mmc
->ios
.timing
!= MMC_TIMING_MMC_HS200
) ||
631 host
->mmc
->actual_clock
<= 50000000)
634 if (!dlyb
|| !dlyb
->base
)
637 ret
= dlyb
->ops
->dlyb_enable(dlyb
);
642 * SDMMC_FBCK is selected when an external Delay Block is needed
643 * with SDR104 or HS200.
646 clk
&= ~MCI_STM32_CLK_SEL_MSK
;
647 clk
|= MCI_STM32_CLK_SELFBCK
;
648 mmci_write_clkreg(host
, clk
);
650 ret
= dlyb
->ops
->tuning_prepare(host
);
654 return sdmmc_dlyb_phase_tuning(host
, opcode
);
657 static void sdmmc_pre_sig_volt_vswitch(struct mmci_host
*host
)
659 /* clear the voltage switch completion flag */
660 writel_relaxed(MCI_STM32_VSWENDC
, host
->base
+ MMCICLEAR
);
661 /* enable Voltage switch procedure */
662 mmci_write_pwrreg(host
, host
->pwr_reg
| MCI_STM32_VSWITCHEN
);
665 static int sdmmc_post_sig_volt_switch(struct mmci_host
*host
,
672 spin_lock_irqsave(&host
->lock
, flags
);
673 if (ios
->signal_voltage
== MMC_SIGNAL_VOLTAGE_180
&&
674 host
->pwr_reg
& MCI_STM32_VSWITCHEN
) {
675 mmci_write_pwrreg(host
, host
->pwr_reg
| MCI_STM32_VSWITCH
);
676 spin_unlock_irqrestore(&host
->lock
, flags
);
678 /* wait voltage switch completion while 10ms */
679 ret
= readl_relaxed_poll_timeout(host
->base
+ MMCISTATUS
,
681 (status
& MCI_STM32_VSWEND
),
682 10, SDMMC_VSWEND_TIMEOUT_US
);
684 writel_relaxed(MCI_STM32_VSWENDC
| MCI_STM32_CKSTOPC
,
685 host
->base
+ MMCICLEAR
);
686 spin_lock_irqsave(&host
->lock
, flags
);
687 mmci_write_pwrreg(host
, host
->pwr_reg
&
688 ~(MCI_STM32_VSWITCHEN
| MCI_STM32_VSWITCH
));
690 spin_unlock_irqrestore(&host
->lock
, flags
);
695 static struct mmci_host_ops sdmmc_variant_ops
= {
696 .validate_data
= sdmmc_idma_validate_data
,
697 .prep_data
= sdmmc_idma_prep_data
,
698 .unprep_data
= sdmmc_idma_unprep_data
,
699 .get_datactrl_cfg
= sdmmc_get_dctrl_cfg
,
700 .dma_setup
= sdmmc_idma_setup
,
701 .dma_start
= sdmmc_idma_start
,
702 .dma_finalize
= sdmmc_idma_finalize
,
703 .dma_error
= sdmmc_idma_error
,
704 .set_clkreg
= mmci_sdmmc_set_clkreg
,
705 .set_pwrreg
= mmci_sdmmc_set_pwrreg
,
706 .busy_complete
= sdmmc_busy_complete
,
707 .pre_sig_volt_switch
= sdmmc_pre_sig_volt_vswitch
,
708 .post_sig_volt_switch
= sdmmc_post_sig_volt_switch
,
711 static struct sdmmc_tuning_ops dlyb_tuning_mp15_ops
= {
712 .dlyb_enable
= sdmmc_dlyb_mp15_enable
,
713 .set_input_ck
= sdmmc_dlyb_mp15_input_ck
,
714 .tuning_prepare
= sdmmc_dlyb_mp15_prepare
,
715 .set_cfg
= sdmmc_dlyb_mp15_set_cfg
,
718 static struct sdmmc_tuning_ops dlyb_tuning_mp25_ops
= {
719 .dlyb_enable
= sdmmc_dlyb_mp25_enable
,
720 .tuning_prepare
= sdmmc_dlyb_mp25_prepare
,
721 .set_cfg
= sdmmc_dlyb_mp25_set_cfg
,
724 void sdmmc_variant_init(struct mmci_host
*host
)
726 struct device_node
*np
= host
->mmc
->parent
->of_node
;
727 void __iomem
*base_dlyb
;
728 struct sdmmc_dlyb
*dlyb
;
730 host
->ops
= &sdmmc_variant_ops
;
731 host
->pwr_reg
= readl_relaxed(host
->base
+ MMCIPOWER
);
733 base_dlyb
= devm_of_iomap(mmc_dev(host
->mmc
), np
, 1, NULL
);
734 if (IS_ERR(base_dlyb
))
737 dlyb
= devm_kzalloc(mmc_dev(host
->mmc
), sizeof(*dlyb
), GFP_KERNEL
);
741 dlyb
->base
= base_dlyb
;
742 if (of_device_is_compatible(np
, "st,stm32mp25-sdmmc2"))
743 dlyb
->ops
= &dlyb_tuning_mp25_ops
;
745 dlyb
->ops
= &dlyb_tuning_mp15_ops
;
747 host
->variant_priv
= dlyb
;
748 host
->mmc_ops
->execute_tuning
= sdmmc_execute_tuning
;