1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/drivers/mmc/host/mxcmmc.c - Freescale i.MX MMCI driver
5 * This is a driver for the SDHC controller found in Freescale MX2/MX3
6 * SoCs. It is basically the same hardware as found on MX1 (imxmmc.c).
7 * Unlike the hardware found on MX1, this hardware just works and does
8 * not need all the quirks found in imxmmc.c, hence the separate driver.
10 * Copyright (C) 2008 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
11 * Copyright (C) 2006 Pavel Pisa, PiKRON <ppisa@pikron.com>
13 * derived from pxamci.c by Russell King
16 #include <linux/module.h>
17 #include <linux/init.h>
18 #include <linux/ioport.h>
19 #include <linux/platform_device.h>
20 #include <linux/highmem.h>
21 #include <linux/interrupt.h>
22 #include <linux/irq.h>
23 #include <linux/blkdev.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/mmc/host.h>
26 #include <linux/mmc/card.h>
27 #include <linux/delay.h>
28 #include <linux/clk.h>
30 #include <linux/regulator/consumer.h>
31 #include <linux/dmaengine.h>
32 #include <linux/types.h>
34 #include <linux/of_device.h>
35 #include <linux/of_dma.h>
36 #include <linux/mmc/slot-gpio.h>
40 #include <linux/platform_data/mmc-mxcmmc.h>
42 #include <linux/platform_data/dma-imx.h>
44 #define DRIVER_NAME "mxc-mmc"
45 #define MXCMCI_TIMEOUT_MS 10000
47 #define MMC_REG_STR_STP_CLK 0x00
48 #define MMC_REG_STATUS 0x04
49 #define MMC_REG_CLK_RATE 0x08
50 #define MMC_REG_CMD_DAT_CONT 0x0C
51 #define MMC_REG_RES_TO 0x10
52 #define MMC_REG_READ_TO 0x14
53 #define MMC_REG_BLK_LEN 0x18
54 #define MMC_REG_NOB 0x1C
55 #define MMC_REG_REV_NO 0x20
56 #define MMC_REG_INT_CNTR 0x24
57 #define MMC_REG_CMD 0x28
58 #define MMC_REG_ARG 0x2C
59 #define MMC_REG_RES_FIFO 0x34
60 #define MMC_REG_BUFFER_ACCESS 0x38
62 #define STR_STP_CLK_RESET (1 << 3)
63 #define STR_STP_CLK_START_CLK (1 << 1)
64 #define STR_STP_CLK_STOP_CLK (1 << 0)
66 #define STATUS_CARD_INSERTION (1 << 31)
67 #define STATUS_CARD_REMOVAL (1 << 30)
68 #define STATUS_YBUF_EMPTY (1 << 29)
69 #define STATUS_XBUF_EMPTY (1 << 28)
70 #define STATUS_YBUF_FULL (1 << 27)
71 #define STATUS_XBUF_FULL (1 << 26)
72 #define STATUS_BUF_UND_RUN (1 << 25)
73 #define STATUS_BUF_OVFL (1 << 24)
74 #define STATUS_SDIO_INT_ACTIVE (1 << 14)
75 #define STATUS_END_CMD_RESP (1 << 13)
76 #define STATUS_WRITE_OP_DONE (1 << 12)
77 #define STATUS_DATA_TRANS_DONE (1 << 11)
78 #define STATUS_READ_OP_DONE (1 << 11)
79 #define STATUS_WR_CRC_ERROR_CODE_MASK (3 << 10)
80 #define STATUS_CARD_BUS_CLK_RUN (1 << 8)
81 #define STATUS_BUF_READ_RDY (1 << 7)
82 #define STATUS_BUF_WRITE_RDY (1 << 6)
83 #define STATUS_RESP_CRC_ERR (1 << 5)
84 #define STATUS_CRC_READ_ERR (1 << 3)
85 #define STATUS_CRC_WRITE_ERR (1 << 2)
86 #define STATUS_TIME_OUT_RESP (1 << 1)
87 #define STATUS_TIME_OUT_READ (1 << 0)
88 #define STATUS_ERR_MASK 0x2f
90 #define CMD_DAT_CONT_CMD_RESP_LONG_OFF (1 << 12)
91 #define CMD_DAT_CONT_STOP_READWAIT (1 << 11)
92 #define CMD_DAT_CONT_START_READWAIT (1 << 10)
93 #define CMD_DAT_CONT_BUS_WIDTH_4 (2 << 8)
94 #define CMD_DAT_CONT_INIT (1 << 7)
95 #define CMD_DAT_CONT_WRITE (1 << 4)
96 #define CMD_DAT_CONT_DATA_ENABLE (1 << 3)
97 #define CMD_DAT_CONT_RESPONSE_48BIT_CRC (1 << 0)
98 #define CMD_DAT_CONT_RESPONSE_136BIT (2 << 0)
99 #define CMD_DAT_CONT_RESPONSE_48BIT (3 << 0)
101 #define INT_SDIO_INT_WKP_EN (1 << 18)
102 #define INT_CARD_INSERTION_WKP_EN (1 << 17)
103 #define INT_CARD_REMOVAL_WKP_EN (1 << 16)
104 #define INT_CARD_INSERTION_EN (1 << 15)
105 #define INT_CARD_REMOVAL_EN (1 << 14)
106 #define INT_SDIO_IRQ_EN (1 << 13)
107 #define INT_DAT0_EN (1 << 12)
108 #define INT_BUF_READ_EN (1 << 4)
109 #define INT_BUF_WRITE_EN (1 << 3)
110 #define INT_END_CMD_RES_EN (1 << 2)
111 #define INT_WRITE_OP_DONE_EN (1 << 1)
112 #define INT_READ_OP_EN (1 << 0)
121 struct mmc_host
*mmc
;
123 dma_addr_t phys_base
;
125 struct dma_chan
*dma
;
126 struct dma_async_tx_descriptor
*desc
;
128 int default_irq_mask
;
130 unsigned int power_mode
;
131 struct imxmmc_platform_data
*pdata
;
133 struct mmc_request
*req
;
134 struct mmc_command
*cmd
;
135 struct mmc_data
*data
;
137 unsigned int datasize
;
138 unsigned int dma_dir
;
148 struct work_struct datawork
;
153 struct dma_slave_config dma_slave_config
;
154 struct imx_dma_data dma_data
;
156 struct timer_list watchdog
;
157 enum mxcmci_type devtype
;
160 static const struct platform_device_id mxcmci_devtype
[] = {
163 .driver_data
= IMX21_MMC
,
166 .driver_data
= IMX31_MMC
,
168 .name
= "mpc512x-sdhc",
169 .driver_data
= MPC512X_MMC
,
174 MODULE_DEVICE_TABLE(platform
, mxcmci_devtype
);
176 static const struct of_device_id mxcmci_of_match
[] = {
178 .compatible
= "fsl,imx21-mmc",
179 .data
= &mxcmci_devtype
[IMX21_MMC
],
181 .compatible
= "fsl,imx31-mmc",
182 .data
= &mxcmci_devtype
[IMX31_MMC
],
184 .compatible
= "fsl,mpc5121-sdhc",
185 .data
= &mxcmci_devtype
[MPC512X_MMC
],
190 MODULE_DEVICE_TABLE(of
, mxcmci_of_match
);
192 static inline int is_imx31_mmc(struct mxcmci_host
*host
)
194 return host
->devtype
== IMX31_MMC
;
197 static inline int is_mpc512x_mmc(struct mxcmci_host
*host
)
199 return host
->devtype
== MPC512X_MMC
;
202 static inline u32
mxcmci_readl(struct mxcmci_host
*host
, int reg
)
204 if (IS_ENABLED(CONFIG_PPC_MPC512x
))
205 return ioread32be(host
->base
+ reg
);
207 return readl(host
->base
+ reg
);
210 static inline void mxcmci_writel(struct mxcmci_host
*host
, u32 val
, int reg
)
212 if (IS_ENABLED(CONFIG_PPC_MPC512x
))
213 iowrite32be(val
, host
->base
+ reg
);
215 writel(val
, host
->base
+ reg
);
218 static inline u16
mxcmci_readw(struct mxcmci_host
*host
, int reg
)
220 if (IS_ENABLED(CONFIG_PPC_MPC512x
))
221 return ioread32be(host
->base
+ reg
);
223 return readw(host
->base
+ reg
);
226 static inline void mxcmci_writew(struct mxcmci_host
*host
, u16 val
, int reg
)
228 if (IS_ENABLED(CONFIG_PPC_MPC512x
))
229 iowrite32be(val
, host
->base
+ reg
);
231 writew(val
, host
->base
+ reg
);
234 static void mxcmci_set_clk_rate(struct mxcmci_host
*host
, unsigned int clk_ios
);
236 static void mxcmci_set_power(struct mxcmci_host
*host
, unsigned int vdd
)
238 if (!IS_ERR(host
->mmc
->supply
.vmmc
)) {
239 if (host
->power_mode
== MMC_POWER_UP
)
240 mmc_regulator_set_ocr(host
->mmc
,
241 host
->mmc
->supply
.vmmc
, vdd
);
242 else if (host
->power_mode
== MMC_POWER_OFF
)
243 mmc_regulator_set_ocr(host
->mmc
,
244 host
->mmc
->supply
.vmmc
, 0);
247 if (host
->pdata
&& host
->pdata
->setpower
)
248 host
->pdata
->setpower(mmc_dev(host
->mmc
), vdd
);
251 static inline int mxcmci_use_dma(struct mxcmci_host
*host
)
256 static void mxcmci_softreset(struct mxcmci_host
*host
)
260 dev_dbg(mmc_dev(host
->mmc
), "mxcmci_softreset\n");
263 mxcmci_writew(host
, STR_STP_CLK_RESET
, MMC_REG_STR_STP_CLK
);
264 mxcmci_writew(host
, STR_STP_CLK_RESET
| STR_STP_CLK_START_CLK
,
265 MMC_REG_STR_STP_CLK
);
267 for (i
= 0; i
< 8; i
++)
268 mxcmci_writew(host
, STR_STP_CLK_START_CLK
, MMC_REG_STR_STP_CLK
);
270 mxcmci_writew(host
, 0xff, MMC_REG_RES_TO
);
273 #if IS_ENABLED(CONFIG_PPC_MPC512x)
274 static inline void buffer_swap32(u32
*buf
, int len
)
278 for (i
= 0; i
< ((len
+ 3) / 4); i
++) {
284 static void mxcmci_swap_buffers(struct mmc_data
*data
)
286 struct scatterlist
*sg
;
289 for_each_sg(data
->sg
, sg
, data
->sg_len
, i
)
290 buffer_swap32(sg_virt(sg
), sg
->length
);
293 static inline void mxcmci_swap_buffers(struct mmc_data
*data
) {}
296 static int mxcmci_setup_data(struct mxcmci_host
*host
, struct mmc_data
*data
)
298 unsigned int nob
= data
->blocks
;
299 unsigned int blksz
= data
->blksz
;
300 unsigned int datasize
= nob
* blksz
;
301 struct scatterlist
*sg
;
302 enum dma_transfer_direction slave_dirn
;
306 data
->bytes_xfered
= 0;
308 mxcmci_writew(host
, nob
, MMC_REG_NOB
);
309 mxcmci_writew(host
, blksz
, MMC_REG_BLK_LEN
);
310 host
->datasize
= datasize
;
312 if (!mxcmci_use_dma(host
))
315 for_each_sg(data
->sg
, sg
, data
->sg_len
, i
) {
316 if (sg
->offset
& 3 || sg
->length
& 3 || sg
->length
< 512) {
322 if (data
->flags
& MMC_DATA_READ
) {
323 host
->dma_dir
= DMA_FROM_DEVICE
;
324 slave_dirn
= DMA_DEV_TO_MEM
;
326 host
->dma_dir
= DMA_TO_DEVICE
;
327 slave_dirn
= DMA_MEM_TO_DEV
;
329 mxcmci_swap_buffers(data
);
332 nents
= dma_map_sg(host
->dma
->device
->dev
, data
->sg
,
333 data
->sg_len
, host
->dma_dir
);
334 if (nents
!= data
->sg_len
)
337 host
->desc
= dmaengine_prep_slave_sg(host
->dma
,
338 data
->sg
, data
->sg_len
, slave_dirn
,
339 DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
342 dma_unmap_sg(host
->dma
->device
->dev
, data
->sg
, data
->sg_len
,
345 return 0; /* Fall back to PIO */
349 dmaengine_submit(host
->desc
);
350 dma_async_issue_pending(host
->dma
);
352 mod_timer(&host
->watchdog
, jiffies
+ msecs_to_jiffies(MXCMCI_TIMEOUT_MS
));
357 static void mxcmci_cmd_done(struct mxcmci_host
*host
, unsigned int stat
);
358 static void mxcmci_data_done(struct mxcmci_host
*host
, unsigned int stat
);
360 static void mxcmci_dma_callback(void *data
)
362 struct mxcmci_host
*host
= data
;
365 del_timer(&host
->watchdog
);
367 stat
= mxcmci_readl(host
, MMC_REG_STATUS
);
369 dev_dbg(mmc_dev(host
->mmc
), "%s: 0x%08x\n", __func__
, stat
);
371 mxcmci_data_done(host
, stat
);
374 static int mxcmci_start_cmd(struct mxcmci_host
*host
, struct mmc_command
*cmd
,
377 u32 int_cntr
= host
->default_irq_mask
;
380 WARN_ON(host
->cmd
!= NULL
);
383 switch (mmc_resp_type(cmd
)) {
384 case MMC_RSP_R1
: /* short CRC, OPCODE */
385 case MMC_RSP_R1B
:/* short CRC, OPCODE, BUSY */
386 cmdat
|= CMD_DAT_CONT_RESPONSE_48BIT_CRC
;
388 case MMC_RSP_R2
: /* long 136 bit + CRC */
389 cmdat
|= CMD_DAT_CONT_RESPONSE_136BIT
;
391 case MMC_RSP_R3
: /* short */
392 cmdat
|= CMD_DAT_CONT_RESPONSE_48BIT
;
397 dev_err(mmc_dev(host
->mmc
), "unhandled response type 0x%x\n",
399 cmd
->error
= -EINVAL
;
403 int_cntr
= INT_END_CMD_RES_EN
;
405 if (mxcmci_use_dma(host
)) {
406 if (host
->dma_dir
== DMA_FROM_DEVICE
) {
407 host
->desc
->callback
= mxcmci_dma_callback
;
408 host
->desc
->callback_param
= host
;
410 int_cntr
|= INT_WRITE_OP_DONE_EN
;
414 spin_lock_irqsave(&host
->lock
, flags
);
416 int_cntr
|= INT_SDIO_IRQ_EN
;
417 mxcmci_writel(host
, int_cntr
, MMC_REG_INT_CNTR
);
418 spin_unlock_irqrestore(&host
->lock
, flags
);
420 mxcmci_writew(host
, cmd
->opcode
, MMC_REG_CMD
);
421 mxcmci_writel(host
, cmd
->arg
, MMC_REG_ARG
);
422 mxcmci_writew(host
, cmdat
, MMC_REG_CMD_DAT_CONT
);
427 static void mxcmci_finish_request(struct mxcmci_host
*host
,
428 struct mmc_request
*req
)
430 u32 int_cntr
= host
->default_irq_mask
;
433 spin_lock_irqsave(&host
->lock
, flags
);
435 int_cntr
|= INT_SDIO_IRQ_EN
;
436 mxcmci_writel(host
, int_cntr
, MMC_REG_INT_CNTR
);
437 spin_unlock_irqrestore(&host
->lock
, flags
);
443 mmc_request_done(host
->mmc
, req
);
446 static int mxcmci_finish_data(struct mxcmci_host
*host
, unsigned int stat
)
448 struct mmc_data
*data
= host
->data
;
451 if (mxcmci_use_dma(host
)) {
452 dma_unmap_sg(host
->dma
->device
->dev
, data
->sg
, data
->sg_len
,
454 mxcmci_swap_buffers(data
);
457 if (stat
& STATUS_ERR_MASK
) {
458 dev_dbg(mmc_dev(host
->mmc
), "request failed. status: 0x%08x\n",
460 if (stat
& STATUS_CRC_READ_ERR
) {
461 dev_err(mmc_dev(host
->mmc
), "%s: -EILSEQ\n", __func__
);
462 data
->error
= -EILSEQ
;
463 } else if (stat
& STATUS_CRC_WRITE_ERR
) {
464 u32 err_code
= (stat
>> 9) & 0x3;
465 if (err_code
== 2) { /* No CRC response */
466 dev_err(mmc_dev(host
->mmc
),
467 "%s: No CRC -ETIMEDOUT\n", __func__
);
468 data
->error
= -ETIMEDOUT
;
470 dev_err(mmc_dev(host
->mmc
),
471 "%s: -EILSEQ\n", __func__
);
472 data
->error
= -EILSEQ
;
474 } else if (stat
& STATUS_TIME_OUT_READ
) {
475 dev_err(mmc_dev(host
->mmc
),
476 "%s: read -ETIMEDOUT\n", __func__
);
477 data
->error
= -ETIMEDOUT
;
479 dev_err(mmc_dev(host
->mmc
), "%s: -EIO\n", __func__
);
483 data
->bytes_xfered
= host
->datasize
;
486 data_error
= data
->error
;
493 static void mxcmci_read_response(struct mxcmci_host
*host
, unsigned int stat
)
495 struct mmc_command
*cmd
= host
->cmd
;
502 if (stat
& STATUS_TIME_OUT_RESP
) {
503 dev_dbg(mmc_dev(host
->mmc
), "CMD TIMEOUT\n");
504 cmd
->error
= -ETIMEDOUT
;
505 } else if (stat
& STATUS_RESP_CRC_ERR
&& cmd
->flags
& MMC_RSP_CRC
) {
506 dev_dbg(mmc_dev(host
->mmc
), "cmd crc error\n");
507 cmd
->error
= -EILSEQ
;
510 if (cmd
->flags
& MMC_RSP_PRESENT
) {
511 if (cmd
->flags
& MMC_RSP_136
) {
512 for (i
= 0; i
< 4; i
++) {
513 a
= mxcmci_readw(host
, MMC_REG_RES_FIFO
);
514 b
= mxcmci_readw(host
, MMC_REG_RES_FIFO
);
515 cmd
->resp
[i
] = a
<< 16 | b
;
518 a
= mxcmci_readw(host
, MMC_REG_RES_FIFO
);
519 b
= mxcmci_readw(host
, MMC_REG_RES_FIFO
);
520 c
= mxcmci_readw(host
, MMC_REG_RES_FIFO
);
521 cmd
->resp
[0] = a
<< 24 | b
<< 8 | c
>> 8;
526 static int mxcmci_poll_status(struct mxcmci_host
*host
, u32 mask
)
529 unsigned long timeout
= jiffies
+ HZ
;
532 stat
= mxcmci_readl(host
, MMC_REG_STATUS
);
533 if (stat
& STATUS_ERR_MASK
)
535 if (time_after(jiffies
, timeout
)) {
536 mxcmci_softreset(host
);
537 mxcmci_set_clk_rate(host
, host
->clock
);
538 return STATUS_TIME_OUT_READ
;
546 static int mxcmci_pull(struct mxcmci_host
*host
, void *_buf
, int bytes
)
552 stat
= mxcmci_poll_status(host
,
553 STATUS_BUF_READ_RDY
| STATUS_READ_OP_DONE
);
556 *buf
++ = cpu_to_le32(mxcmci_readl(host
, MMC_REG_BUFFER_ACCESS
));
564 stat
= mxcmci_poll_status(host
,
565 STATUS_BUF_READ_RDY
| STATUS_READ_OP_DONE
);
568 tmp
= cpu_to_le32(mxcmci_readl(host
, MMC_REG_BUFFER_ACCESS
));
569 memcpy(b
, &tmp
, bytes
);
575 static int mxcmci_push(struct mxcmci_host
*host
, void *_buf
, int bytes
)
581 stat
= mxcmci_poll_status(host
, STATUS_BUF_WRITE_RDY
);
584 mxcmci_writel(host
, cpu_to_le32(*buf
++), MMC_REG_BUFFER_ACCESS
);
592 stat
= mxcmci_poll_status(host
, STATUS_BUF_WRITE_RDY
);
596 memcpy(&tmp
, b
, bytes
);
597 mxcmci_writel(host
, cpu_to_le32(tmp
), MMC_REG_BUFFER_ACCESS
);
600 return mxcmci_poll_status(host
, STATUS_BUF_WRITE_RDY
);
603 static int mxcmci_transfer_data(struct mxcmci_host
*host
)
605 struct mmc_data
*data
= host
->req
->data
;
606 struct scatterlist
*sg
;
612 if (data
->flags
& MMC_DATA_READ
) {
613 for_each_sg(data
->sg
, sg
, data
->sg_len
, i
) {
614 stat
= mxcmci_pull(host
, sg_virt(sg
), sg
->length
);
617 host
->datasize
+= sg
->length
;
620 for_each_sg(data
->sg
, sg
, data
->sg_len
, i
) {
621 stat
= mxcmci_push(host
, sg_virt(sg
), sg
->length
);
624 host
->datasize
+= sg
->length
;
626 stat
= mxcmci_poll_status(host
, STATUS_WRITE_OP_DONE
);
633 static void mxcmci_datawork(struct work_struct
*work
)
635 struct mxcmci_host
*host
= container_of(work
, struct mxcmci_host
,
637 int datastat
= mxcmci_transfer_data(host
);
639 mxcmci_writel(host
, STATUS_READ_OP_DONE
| STATUS_WRITE_OP_DONE
,
641 mxcmci_finish_data(host
, datastat
);
643 if (host
->req
->stop
) {
644 if (mxcmci_start_cmd(host
, host
->req
->stop
, 0)) {
645 mxcmci_finish_request(host
, host
->req
);
649 mxcmci_finish_request(host
, host
->req
);
653 static void mxcmci_data_done(struct mxcmci_host
*host
, unsigned int stat
)
655 struct mmc_request
*req
;
659 spin_lock_irqsave(&host
->lock
, flags
);
662 spin_unlock_irqrestore(&host
->lock
, flags
);
667 spin_unlock_irqrestore(&host
->lock
, flags
);
673 host
->req
= NULL
; /* we will handle finish req below */
675 data_error
= mxcmci_finish_data(host
, stat
);
677 spin_unlock_irqrestore(&host
->lock
, flags
);
682 mxcmci_read_response(host
, stat
);
686 if (mxcmci_start_cmd(host
, req
->stop
, 0)) {
687 mxcmci_finish_request(host
, req
);
691 mxcmci_finish_request(host
, req
);
695 static void mxcmci_cmd_done(struct mxcmci_host
*host
, unsigned int stat
)
697 mxcmci_read_response(host
, stat
);
700 if (!host
->data
&& host
->req
) {
701 mxcmci_finish_request(host
, host
->req
);
705 /* For the DMA case the DMA engine handles the data transfer
706 * automatically. For non DMA we have to do it ourselves.
707 * Don't do it in interrupt context though.
709 if (!mxcmci_use_dma(host
) && host
->data
)
710 schedule_work(&host
->datawork
);
714 static irqreturn_t
mxcmci_irq(int irq
, void *devid
)
716 struct mxcmci_host
*host
= devid
;
720 stat
= mxcmci_readl(host
, MMC_REG_STATUS
);
722 stat
& ~(STATUS_SDIO_INT_ACTIVE
| STATUS_DATA_TRANS_DONE
|
723 STATUS_WRITE_OP_DONE
),
726 dev_dbg(mmc_dev(host
->mmc
), "%s: 0x%08x\n", __func__
, stat
);
728 spin_lock(&host
->lock
);
729 sdio_irq
= (stat
& STATUS_SDIO_INT_ACTIVE
) && host
->use_sdio
;
730 spin_unlock(&host
->lock
);
732 if (mxcmci_use_dma(host
) && (stat
& (STATUS_WRITE_OP_DONE
)))
733 mxcmci_writel(host
, STATUS_WRITE_OP_DONE
, MMC_REG_STATUS
);
736 mxcmci_writel(host
, STATUS_SDIO_INT_ACTIVE
, MMC_REG_STATUS
);
737 mmc_signal_sdio_irq(host
->mmc
);
740 if (stat
& STATUS_END_CMD_RESP
)
741 mxcmci_cmd_done(host
, stat
);
743 if (mxcmci_use_dma(host
) && (stat
& STATUS_WRITE_OP_DONE
)) {
744 del_timer(&host
->watchdog
);
745 mxcmci_data_done(host
, stat
);
748 if (host
->default_irq_mask
&&
749 (stat
& (STATUS_CARD_INSERTION
| STATUS_CARD_REMOVAL
)))
750 mmc_detect_change(host
->mmc
, msecs_to_jiffies(200));
755 static void mxcmci_request(struct mmc_host
*mmc
, struct mmc_request
*req
)
757 struct mxcmci_host
*host
= mmc_priv(mmc
);
758 unsigned int cmdat
= host
->cmdat
;
761 WARN_ON(host
->req
!= NULL
);
764 host
->cmdat
&= ~CMD_DAT_CONT_INIT
;
770 error
= mxcmci_setup_data(host
, req
->data
);
772 req
->cmd
->error
= error
;
777 cmdat
|= CMD_DAT_CONT_DATA_ENABLE
;
779 if (req
->data
->flags
& MMC_DATA_WRITE
)
780 cmdat
|= CMD_DAT_CONT_WRITE
;
783 error
= mxcmci_start_cmd(host
, req
->cmd
, cmdat
);
787 mxcmci_finish_request(host
, req
);
790 static void mxcmci_set_clk_rate(struct mxcmci_host
*host
, unsigned int clk_ios
)
792 unsigned int divider
;
794 unsigned int clk_in
= clk_get_rate(host
->clk_per
);
796 while (prescaler
<= 0x800) {
797 for (divider
= 1; divider
<= 0xF; divider
++) {
800 x
= (clk_in
/ (divider
+ 1));
803 x
/= (prescaler
* 2);
817 mxcmci_writew(host
, (prescaler
<< 4) | divider
, MMC_REG_CLK_RATE
);
819 dev_dbg(mmc_dev(host
->mmc
), "scaler: %d divider: %d in: %d out: %d\n",
820 prescaler
, divider
, clk_in
, clk_ios
);
823 static int mxcmci_setup_dma(struct mmc_host
*mmc
)
825 struct mxcmci_host
*host
= mmc_priv(mmc
);
826 struct dma_slave_config
*config
= &host
->dma_slave_config
;
828 config
->dst_addr
= host
->phys_base
+ MMC_REG_BUFFER_ACCESS
;
829 config
->src_addr
= host
->phys_base
+ MMC_REG_BUFFER_ACCESS
;
830 config
->dst_addr_width
= 4;
831 config
->src_addr_width
= 4;
832 config
->dst_maxburst
= host
->burstlen
;
833 config
->src_maxburst
= host
->burstlen
;
834 config
->device_fc
= false;
836 return dmaengine_slave_config(host
->dma
, config
);
839 static void mxcmci_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
841 struct mxcmci_host
*host
= mmc_priv(mmc
);
845 * use burstlen of 64 (16 words) in 4 bit mode (--> reg value 0)
846 * use burstlen of 16 (4 words) in 1 bit mode (--> reg value 16)
848 if (ios
->bus_width
== MMC_BUS_WIDTH_4
)
853 if (mxcmci_use_dma(host
) && burstlen
!= host
->burstlen
) {
854 host
->burstlen
= burstlen
;
855 ret
= mxcmci_setup_dma(mmc
);
857 dev_err(mmc_dev(host
->mmc
),
858 "failed to config DMA channel. Falling back to PIO\n");
859 dma_release_channel(host
->dma
);
865 if (ios
->bus_width
== MMC_BUS_WIDTH_4
)
866 host
->cmdat
|= CMD_DAT_CONT_BUS_WIDTH_4
;
868 host
->cmdat
&= ~CMD_DAT_CONT_BUS_WIDTH_4
;
870 if (host
->power_mode
!= ios
->power_mode
) {
871 host
->power_mode
= ios
->power_mode
;
872 mxcmci_set_power(host
, ios
->vdd
);
874 if (ios
->power_mode
== MMC_POWER_ON
)
875 host
->cmdat
|= CMD_DAT_CONT_INIT
;
879 mxcmci_set_clk_rate(host
, ios
->clock
);
880 mxcmci_writew(host
, STR_STP_CLK_START_CLK
, MMC_REG_STR_STP_CLK
);
882 mxcmci_writew(host
, STR_STP_CLK_STOP_CLK
, MMC_REG_STR_STP_CLK
);
885 host
->clock
= ios
->clock
;
888 static irqreturn_t
mxcmci_detect_irq(int irq
, void *data
)
890 struct mmc_host
*mmc
= data
;
892 dev_dbg(mmc_dev(mmc
), "%s\n", __func__
);
894 mmc_detect_change(mmc
, msecs_to_jiffies(250));
898 static int mxcmci_get_ro(struct mmc_host
*mmc
)
900 struct mxcmci_host
*host
= mmc_priv(mmc
);
902 if (host
->pdata
&& host
->pdata
->get_ro
)
903 return !!host
->pdata
->get_ro(mmc_dev(mmc
));
905 * If board doesn't support read only detection (no mmc_gpio
906 * context or gpio is invalid), then let the mmc core decide
909 return mmc_gpio_get_ro(mmc
);
912 static void mxcmci_enable_sdio_irq(struct mmc_host
*mmc
, int enable
)
914 struct mxcmci_host
*host
= mmc_priv(mmc
);
918 spin_lock_irqsave(&host
->lock
, flags
);
919 host
->use_sdio
= enable
;
920 int_cntr
= mxcmci_readl(host
, MMC_REG_INT_CNTR
);
923 int_cntr
|= INT_SDIO_IRQ_EN
;
925 int_cntr
&= ~INT_SDIO_IRQ_EN
;
927 mxcmci_writel(host
, int_cntr
, MMC_REG_INT_CNTR
);
928 spin_unlock_irqrestore(&host
->lock
, flags
);
931 static void mxcmci_init_card(struct mmc_host
*host
, struct mmc_card
*card
)
933 struct mxcmci_host
*mxcmci
= mmc_priv(host
);
936 * MX3 SoCs have a silicon bug which corrupts CRC calculation of
937 * multi-block transfers when connected SDIO peripheral doesn't
938 * drive the BUSY line as required by the specs.
939 * One way to prevent this is to only allow 1-bit transfers.
942 if (is_imx31_mmc(mxcmci
) && card
->type
== MMC_TYPE_SDIO
)
943 host
->caps
&= ~MMC_CAP_4_BIT_DATA
;
945 host
->caps
|= MMC_CAP_4_BIT_DATA
;
948 static bool filter(struct dma_chan
*chan
, void *param
)
950 struct mxcmci_host
*host
= param
;
952 if (!imx_dma_is_general_purpose(chan
))
955 chan
->private = &host
->dma_data
;
960 static void mxcmci_watchdog(struct timer_list
*t
)
962 struct mxcmci_host
*host
= from_timer(host
, t
, watchdog
);
963 struct mmc_request
*req
= host
->req
;
964 unsigned int stat
= mxcmci_readl(host
, MMC_REG_STATUS
);
966 if (host
->dma_dir
== DMA_FROM_DEVICE
) {
967 dmaengine_terminate_all(host
->dma
);
968 dev_err(mmc_dev(host
->mmc
),
969 "%s: read time out (status = 0x%08x)\n",
972 dev_err(mmc_dev(host
->mmc
),
973 "%s: write time out (status = 0x%08x)\n",
975 mxcmci_softreset(host
);
978 /* Mark transfer as erroneus and inform the upper layers */
981 host
->data
->error
= -ETIMEDOUT
;
985 mmc_request_done(host
->mmc
, req
);
988 static const struct mmc_host_ops mxcmci_ops
= {
989 .request
= mxcmci_request
,
990 .set_ios
= mxcmci_set_ios
,
991 .get_ro
= mxcmci_get_ro
,
992 .enable_sdio_irq
= mxcmci_enable_sdio_irq
,
993 .init_card
= mxcmci_init_card
,
996 static int mxcmci_probe(struct platform_device
*pdev
)
998 struct mmc_host
*mmc
;
999 struct mxcmci_host
*host
;
1000 struct resource
*res
;
1002 bool dat3_card_detect
= false;
1003 dma_cap_mask_t mask
;
1004 const struct of_device_id
*of_id
;
1005 struct imxmmc_platform_data
*pdata
= pdev
->dev
.platform_data
;
1007 pr_info("i.MX/MPC512x SDHC driver\n");
1009 of_id
= of_match_device(mxcmci_of_match
, &pdev
->dev
);
1011 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1012 irq
= platform_get_irq(pdev
, 0);
1016 mmc
= mmc_alloc_host(sizeof(*host
), &pdev
->dev
);
1020 host
= mmc_priv(mmc
);
1022 host
->base
= devm_ioremap_resource(&pdev
->dev
, res
);
1023 if (IS_ERR(host
->base
)) {
1024 ret
= PTR_ERR(host
->base
);
1028 host
->phys_base
= res
->start
;
1030 ret
= mmc_of_parse(mmc
);
1033 mmc
->ops
= &mxcmci_ops
;
1035 /* For devicetree parsing, the bus width is read from devicetree */
1037 mmc
->caps
= MMC_CAP_4_BIT_DATA
| MMC_CAP_SDIO_IRQ
;
1039 mmc
->caps
|= MMC_CAP_SDIO_IRQ
;
1041 /* MMC core transfer sizes tunable parameters */
1042 mmc
->max_blk_size
= 2048;
1043 mmc
->max_blk_count
= 65535;
1044 mmc
->max_req_size
= mmc
->max_blk_size
* mmc
->max_blk_count
;
1045 mmc
->max_seg_size
= mmc
->max_req_size
;
1048 const struct platform_device_id
*id_entry
= of_id
->data
;
1049 host
->devtype
= id_entry
->driver_data
;
1051 host
->devtype
= pdev
->id_entry
->driver_data
;
1054 /* adjust max_segs after devtype detection */
1055 if (!is_mpc512x_mmc(host
))
1059 host
->pdata
= pdata
;
1060 spin_lock_init(&host
->lock
);
1063 dat3_card_detect
= pdata
->dat3_card_detect
;
1064 else if (mmc_card_is_removable(mmc
)
1065 && !of_property_read_bool(pdev
->dev
.of_node
, "cd-gpios"))
1066 dat3_card_detect
= true;
1068 ret
= mmc_regulator_get_supply(mmc
);
1072 if (!mmc
->ocr_avail
) {
1073 if (pdata
&& pdata
->ocr_avail
)
1074 mmc
->ocr_avail
= pdata
->ocr_avail
;
1076 mmc
->ocr_avail
= MMC_VDD_32_33
| MMC_VDD_33_34
;
1079 if (dat3_card_detect
)
1080 host
->default_irq_mask
=
1081 INT_CARD_INSERTION_EN
| INT_CARD_REMOVAL_EN
;
1083 host
->default_irq_mask
= 0;
1085 host
->clk_ipg
= devm_clk_get(&pdev
->dev
, "ipg");
1086 if (IS_ERR(host
->clk_ipg
)) {
1087 ret
= PTR_ERR(host
->clk_ipg
);
1091 host
->clk_per
= devm_clk_get(&pdev
->dev
, "per");
1092 if (IS_ERR(host
->clk_per
)) {
1093 ret
= PTR_ERR(host
->clk_per
);
1097 ret
= clk_prepare_enable(host
->clk_per
);
1101 ret
= clk_prepare_enable(host
->clk_ipg
);
1103 goto out_clk_per_put
;
1105 mxcmci_softreset(host
);
1107 host
->rev_no
= mxcmci_readw(host
, MMC_REG_REV_NO
);
1108 if (host
->rev_no
!= 0x400) {
1110 dev_err(mmc_dev(host
->mmc
), "wrong rev.no. 0x%08x. aborting.\n",
1115 mmc
->f_min
= clk_get_rate(host
->clk_per
) >> 16;
1116 mmc
->f_max
= clk_get_rate(host
->clk_per
) >> 1;
1118 /* recommended in data sheet */
1119 mxcmci_writew(host
, 0x2db4, MMC_REG_READ_TO
);
1121 mxcmci_writel(host
, host
->default_irq_mask
, MMC_REG_INT_CNTR
);
1124 host
->dma
= dma_request_chan(&pdev
->dev
, "rx-tx");
1125 if (IS_ERR(host
->dma
)) {
1126 if (PTR_ERR(host
->dma
) == -EPROBE_DEFER
) {
1127 ret
= -EPROBE_DEFER
;
1131 /* Ignore errors to fall back to PIO mode */
1135 res
= platform_get_resource(pdev
, IORESOURCE_DMA
, 0);
1137 host
->dmareq
= res
->start
;
1138 host
->dma_data
.peripheral_type
= IMX_DMATYPE_SDHC
;
1139 host
->dma_data
.priority
= DMA_PRIO_LOW
;
1140 host
->dma_data
.dma_request
= host
->dmareq
;
1142 dma_cap_set(DMA_SLAVE
, mask
);
1143 host
->dma
= dma_request_channel(mask
, filter
, host
);
1147 mmc
->max_seg_size
= dma_get_max_seg_size(
1148 host
->dma
->device
->dev
);
1150 dev_info(mmc_dev(host
->mmc
), "dma not available. Using PIO\n");
1152 INIT_WORK(&host
->datawork
, mxcmci_datawork
);
1154 ret
= devm_request_irq(&pdev
->dev
, irq
, mxcmci_irq
, 0,
1155 dev_name(&pdev
->dev
), host
);
1159 platform_set_drvdata(pdev
, mmc
);
1161 if (host
->pdata
&& host
->pdata
->init
) {
1162 ret
= host
->pdata
->init(&pdev
->dev
, mxcmci_detect_irq
,
1168 timer_setup(&host
->watchdog
, mxcmci_watchdog
, 0);
1176 dma_release_channel(host
->dma
);
1179 clk_disable_unprepare(host
->clk_ipg
);
1181 clk_disable_unprepare(host
->clk_per
);
1189 static int mxcmci_remove(struct platform_device
*pdev
)
1191 struct mmc_host
*mmc
= platform_get_drvdata(pdev
);
1192 struct mxcmci_host
*host
= mmc_priv(mmc
);
1194 mmc_remove_host(mmc
);
1196 if (host
->pdata
&& host
->pdata
->exit
)
1197 host
->pdata
->exit(&pdev
->dev
, mmc
);
1200 dma_release_channel(host
->dma
);
1202 clk_disable_unprepare(host
->clk_per
);
1203 clk_disable_unprepare(host
->clk_ipg
);
1210 #ifdef CONFIG_PM_SLEEP
1211 static int mxcmci_suspend(struct device
*dev
)
1213 struct mmc_host
*mmc
= dev_get_drvdata(dev
);
1214 struct mxcmci_host
*host
= mmc_priv(mmc
);
1216 clk_disable_unprepare(host
->clk_per
);
1217 clk_disable_unprepare(host
->clk_ipg
);
1221 static int mxcmci_resume(struct device
*dev
)
1223 struct mmc_host
*mmc
= dev_get_drvdata(dev
);
1224 struct mxcmci_host
*host
= mmc_priv(mmc
);
1227 ret
= clk_prepare_enable(host
->clk_per
);
1231 ret
= clk_prepare_enable(host
->clk_ipg
);
1233 clk_disable_unprepare(host
->clk_per
);
1239 static SIMPLE_DEV_PM_OPS(mxcmci_pm_ops
, mxcmci_suspend
, mxcmci_resume
);
1241 static struct platform_driver mxcmci_driver
= {
1242 .probe
= mxcmci_probe
,
1243 .remove
= mxcmci_remove
,
1244 .id_table
= mxcmci_devtype
,
1246 .name
= DRIVER_NAME
,
1247 .pm
= &mxcmci_pm_ops
,
1248 .of_match_table
= mxcmci_of_match
,
1252 module_platform_driver(mxcmci_driver
);
1254 MODULE_DESCRIPTION("i.MX Multimedia Card Interface Driver");
1255 MODULE_AUTHOR("Sascha Hauer, Pengutronix");
1256 MODULE_LICENSE("GPL");
1257 MODULE_ALIAS("platform:mxc-mmc");