1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/drivers/mmc/host/mxcmmc.c - Freescale i.MX MMCI driver
5 * This is a driver for the SDHC controller found in Freescale MX2/MX3
6 * SoCs. It is basically the same hardware as found on MX1 (imxmmc.c).
7 * Unlike the hardware found on MX1, this hardware just works and does
8 * not need all the quirks found in imxmmc.c, hence the separate driver.
10 * Copyright (C) 2008 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
11 * Copyright (C) 2006 Pavel Pisa, PiKRON <ppisa@pikron.com>
13 * derived from pxamci.c by Russell King
16 #include <linux/module.h>
17 #include <linux/init.h>
18 #include <linux/ioport.h>
19 #include <linux/platform_device.h>
20 #include <linux/highmem.h>
21 #include <linux/interrupt.h>
22 #include <linux/irq.h>
23 #include <linux/blkdev.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/mmc/host.h>
26 #include <linux/mmc/card.h>
27 #include <linux/delay.h>
28 #include <linux/clk.h>
30 #include <linux/regulator/consumer.h>
31 #include <linux/dmaengine.h>
32 #include <linux/types.h>
34 #include <linux/of_device.h>
35 #include <linux/of_dma.h>
36 #include <linux/mmc/slot-gpio.h>
40 #include <linux/platform_data/mmc-mxcmmc.h>
42 #include <linux/platform_data/dma-imx.h>
44 #define DRIVER_NAME "mxc-mmc"
45 #define MXCMCI_TIMEOUT_MS 10000
47 #define MMC_REG_STR_STP_CLK 0x00
48 #define MMC_REG_STATUS 0x04
49 #define MMC_REG_CLK_RATE 0x08
50 #define MMC_REG_CMD_DAT_CONT 0x0C
51 #define MMC_REG_RES_TO 0x10
52 #define MMC_REG_READ_TO 0x14
53 #define MMC_REG_BLK_LEN 0x18
54 #define MMC_REG_NOB 0x1C
55 #define MMC_REG_REV_NO 0x20
56 #define MMC_REG_INT_CNTR 0x24
57 #define MMC_REG_CMD 0x28
58 #define MMC_REG_ARG 0x2C
59 #define MMC_REG_RES_FIFO 0x34
60 #define MMC_REG_BUFFER_ACCESS 0x38
62 #define STR_STP_CLK_RESET (1 << 3)
63 #define STR_STP_CLK_START_CLK (1 << 1)
64 #define STR_STP_CLK_STOP_CLK (1 << 0)
66 #define STATUS_CARD_INSERTION (1 << 31)
67 #define STATUS_CARD_REMOVAL (1 << 30)
68 #define STATUS_YBUF_EMPTY (1 << 29)
69 #define STATUS_XBUF_EMPTY (1 << 28)
70 #define STATUS_YBUF_FULL (1 << 27)
71 #define STATUS_XBUF_FULL (1 << 26)
72 #define STATUS_BUF_UND_RUN (1 << 25)
73 #define STATUS_BUF_OVFL (1 << 24)
74 #define STATUS_SDIO_INT_ACTIVE (1 << 14)
75 #define STATUS_END_CMD_RESP (1 << 13)
76 #define STATUS_WRITE_OP_DONE (1 << 12)
77 #define STATUS_DATA_TRANS_DONE (1 << 11)
78 #define STATUS_READ_OP_DONE (1 << 11)
79 #define STATUS_WR_CRC_ERROR_CODE_MASK (3 << 10)
80 #define STATUS_CARD_BUS_CLK_RUN (1 << 8)
81 #define STATUS_BUF_READ_RDY (1 << 7)
82 #define STATUS_BUF_WRITE_RDY (1 << 6)
83 #define STATUS_RESP_CRC_ERR (1 << 5)
84 #define STATUS_CRC_READ_ERR (1 << 3)
85 #define STATUS_CRC_WRITE_ERR (1 << 2)
86 #define STATUS_TIME_OUT_RESP (1 << 1)
87 #define STATUS_TIME_OUT_READ (1 << 0)
88 #define STATUS_ERR_MASK 0x2f
90 #define CMD_DAT_CONT_CMD_RESP_LONG_OFF (1 << 12)
91 #define CMD_DAT_CONT_STOP_READWAIT (1 << 11)
92 #define CMD_DAT_CONT_START_READWAIT (1 << 10)
93 #define CMD_DAT_CONT_BUS_WIDTH_4 (2 << 8)
94 #define CMD_DAT_CONT_INIT (1 << 7)
95 #define CMD_DAT_CONT_WRITE (1 << 4)
96 #define CMD_DAT_CONT_DATA_ENABLE (1 << 3)
97 #define CMD_DAT_CONT_RESPONSE_48BIT_CRC (1 << 0)
98 #define CMD_DAT_CONT_RESPONSE_136BIT (2 << 0)
99 #define CMD_DAT_CONT_RESPONSE_48BIT (3 << 0)
101 #define INT_SDIO_INT_WKP_EN (1 << 18)
102 #define INT_CARD_INSERTION_WKP_EN (1 << 17)
103 #define INT_CARD_REMOVAL_WKP_EN (1 << 16)
104 #define INT_CARD_INSERTION_EN (1 << 15)
105 #define INT_CARD_REMOVAL_EN (1 << 14)
106 #define INT_SDIO_IRQ_EN (1 << 13)
107 #define INT_DAT0_EN (1 << 12)
108 #define INT_BUF_READ_EN (1 << 4)
109 #define INT_BUF_WRITE_EN (1 << 3)
110 #define INT_END_CMD_RES_EN (1 << 2)
111 #define INT_WRITE_OP_DONE_EN (1 << 1)
112 #define INT_READ_OP_EN (1 << 0)
121 struct mmc_host
*mmc
;
123 dma_addr_t phys_base
;
125 struct dma_chan
*dma
;
126 struct dma_async_tx_descriptor
*desc
;
128 int default_irq_mask
;
130 unsigned int power_mode
;
131 struct imxmmc_platform_data
*pdata
;
133 struct mmc_request
*req
;
134 struct mmc_command
*cmd
;
135 struct mmc_data
*data
;
137 unsigned int datasize
;
138 unsigned int dma_dir
;
148 struct work_struct datawork
;
153 struct dma_slave_config dma_slave_config
;
154 struct imx_dma_data dma_data
;
156 struct timer_list watchdog
;
157 enum mxcmci_type devtype
;
160 static const struct of_device_id mxcmci_of_match
[] = {
162 .compatible
= "fsl,imx21-mmc",
163 .data
= (void *) IMX21_MMC
,
165 .compatible
= "fsl,imx31-mmc",
166 .data
= (void *) IMX31_MMC
,
168 .compatible
= "fsl,mpc5121-sdhc",
169 .data
= (void *) MPC512X_MMC
,
174 MODULE_DEVICE_TABLE(of
, mxcmci_of_match
);
176 static inline int is_imx31_mmc(struct mxcmci_host
*host
)
178 return host
->devtype
== IMX31_MMC
;
181 static inline int is_mpc512x_mmc(struct mxcmci_host
*host
)
183 return host
->devtype
== MPC512X_MMC
;
186 static inline u32
mxcmci_readl(struct mxcmci_host
*host
, int reg
)
188 if (IS_ENABLED(CONFIG_PPC_MPC512x
))
189 return ioread32be(host
->base
+ reg
);
191 return readl(host
->base
+ reg
);
194 static inline void mxcmci_writel(struct mxcmci_host
*host
, u32 val
, int reg
)
196 if (IS_ENABLED(CONFIG_PPC_MPC512x
))
197 iowrite32be(val
, host
->base
+ reg
);
199 writel(val
, host
->base
+ reg
);
202 static inline u16
mxcmci_readw(struct mxcmci_host
*host
, int reg
)
204 if (IS_ENABLED(CONFIG_PPC_MPC512x
))
205 return ioread32be(host
->base
+ reg
);
207 return readw(host
->base
+ reg
);
210 static inline void mxcmci_writew(struct mxcmci_host
*host
, u16 val
, int reg
)
212 if (IS_ENABLED(CONFIG_PPC_MPC512x
))
213 iowrite32be(val
, host
->base
+ reg
);
215 writew(val
, host
->base
+ reg
);
218 static void mxcmci_set_clk_rate(struct mxcmci_host
*host
, unsigned int clk_ios
);
220 static void mxcmci_set_power(struct mxcmci_host
*host
, unsigned int vdd
)
222 if (!IS_ERR(host
->mmc
->supply
.vmmc
)) {
223 if (host
->power_mode
== MMC_POWER_UP
)
224 mmc_regulator_set_ocr(host
->mmc
,
225 host
->mmc
->supply
.vmmc
, vdd
);
226 else if (host
->power_mode
== MMC_POWER_OFF
)
227 mmc_regulator_set_ocr(host
->mmc
,
228 host
->mmc
->supply
.vmmc
, 0);
231 if (host
->pdata
&& host
->pdata
->setpower
)
232 host
->pdata
->setpower(mmc_dev(host
->mmc
), vdd
);
235 static inline int mxcmci_use_dma(struct mxcmci_host
*host
)
240 static void mxcmci_softreset(struct mxcmci_host
*host
)
244 dev_dbg(mmc_dev(host
->mmc
), "mxcmci_softreset\n");
247 mxcmci_writew(host
, STR_STP_CLK_RESET
, MMC_REG_STR_STP_CLK
);
248 mxcmci_writew(host
, STR_STP_CLK_RESET
| STR_STP_CLK_START_CLK
,
249 MMC_REG_STR_STP_CLK
);
251 for (i
= 0; i
< 8; i
++)
252 mxcmci_writew(host
, STR_STP_CLK_START_CLK
, MMC_REG_STR_STP_CLK
);
254 mxcmci_writew(host
, 0xff, MMC_REG_RES_TO
);
257 #if IS_ENABLED(CONFIG_PPC_MPC512x)
258 static inline void buffer_swap32(u32
*buf
, int len
)
262 for (i
= 0; i
< ((len
+ 3) / 4); i
++) {
268 static void mxcmci_swap_buffers(struct mmc_data
*data
)
270 struct scatterlist
*sg
;
273 for_each_sg(data
->sg
, sg
, data
->sg_len
, i
)
274 buffer_swap32(sg_virt(sg
), sg
->length
);
277 static inline void mxcmci_swap_buffers(struct mmc_data
*data
) {}
280 static int mxcmci_setup_data(struct mxcmci_host
*host
, struct mmc_data
*data
)
282 unsigned int nob
= data
->blocks
;
283 unsigned int blksz
= data
->blksz
;
284 unsigned int datasize
= nob
* blksz
;
285 struct scatterlist
*sg
;
286 enum dma_transfer_direction slave_dirn
;
290 data
->bytes_xfered
= 0;
292 mxcmci_writew(host
, nob
, MMC_REG_NOB
);
293 mxcmci_writew(host
, blksz
, MMC_REG_BLK_LEN
);
294 host
->datasize
= datasize
;
296 if (!mxcmci_use_dma(host
))
299 for_each_sg(data
->sg
, sg
, data
->sg_len
, i
) {
300 if (sg
->offset
& 3 || sg
->length
& 3 || sg
->length
< 512) {
306 if (data
->flags
& MMC_DATA_READ
) {
307 host
->dma_dir
= DMA_FROM_DEVICE
;
308 slave_dirn
= DMA_DEV_TO_MEM
;
310 host
->dma_dir
= DMA_TO_DEVICE
;
311 slave_dirn
= DMA_MEM_TO_DEV
;
313 mxcmci_swap_buffers(data
);
316 nents
= dma_map_sg(host
->dma
->device
->dev
, data
->sg
,
317 data
->sg_len
, host
->dma_dir
);
318 if (nents
!= data
->sg_len
)
321 host
->desc
= dmaengine_prep_slave_sg(host
->dma
,
322 data
->sg
, data
->sg_len
, slave_dirn
,
323 DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
326 dma_unmap_sg(host
->dma
->device
->dev
, data
->sg
, data
->sg_len
,
329 return 0; /* Fall back to PIO */
333 dmaengine_submit(host
->desc
);
334 dma_async_issue_pending(host
->dma
);
336 mod_timer(&host
->watchdog
, jiffies
+ msecs_to_jiffies(MXCMCI_TIMEOUT_MS
));
341 static void mxcmci_cmd_done(struct mxcmci_host
*host
, unsigned int stat
);
342 static void mxcmci_data_done(struct mxcmci_host
*host
, unsigned int stat
);
344 static void mxcmci_dma_callback(void *data
)
346 struct mxcmci_host
*host
= data
;
349 del_timer(&host
->watchdog
);
351 stat
= mxcmci_readl(host
, MMC_REG_STATUS
);
353 dev_dbg(mmc_dev(host
->mmc
), "%s: 0x%08x\n", __func__
, stat
);
355 mxcmci_data_done(host
, stat
);
358 static int mxcmci_start_cmd(struct mxcmci_host
*host
, struct mmc_command
*cmd
,
361 u32 int_cntr
= host
->default_irq_mask
;
364 WARN_ON(host
->cmd
!= NULL
);
367 switch (mmc_resp_type(cmd
)) {
368 case MMC_RSP_R1
: /* short CRC, OPCODE */
369 case MMC_RSP_R1B
:/* short CRC, OPCODE, BUSY */
370 cmdat
|= CMD_DAT_CONT_RESPONSE_48BIT_CRC
;
372 case MMC_RSP_R2
: /* long 136 bit + CRC */
373 cmdat
|= CMD_DAT_CONT_RESPONSE_136BIT
;
375 case MMC_RSP_R3
: /* short */
376 cmdat
|= CMD_DAT_CONT_RESPONSE_48BIT
;
381 dev_err(mmc_dev(host
->mmc
), "unhandled response type 0x%x\n",
383 cmd
->error
= -EINVAL
;
387 int_cntr
= INT_END_CMD_RES_EN
;
389 if (mxcmci_use_dma(host
)) {
390 if (host
->dma_dir
== DMA_FROM_DEVICE
) {
391 host
->desc
->callback
= mxcmci_dma_callback
;
392 host
->desc
->callback_param
= host
;
394 int_cntr
|= INT_WRITE_OP_DONE_EN
;
398 spin_lock_irqsave(&host
->lock
, flags
);
400 int_cntr
|= INT_SDIO_IRQ_EN
;
401 mxcmci_writel(host
, int_cntr
, MMC_REG_INT_CNTR
);
402 spin_unlock_irqrestore(&host
->lock
, flags
);
404 mxcmci_writew(host
, cmd
->opcode
, MMC_REG_CMD
);
405 mxcmci_writel(host
, cmd
->arg
, MMC_REG_ARG
);
406 mxcmci_writew(host
, cmdat
, MMC_REG_CMD_DAT_CONT
);
411 static void mxcmci_finish_request(struct mxcmci_host
*host
,
412 struct mmc_request
*req
)
414 u32 int_cntr
= host
->default_irq_mask
;
417 spin_lock_irqsave(&host
->lock
, flags
);
419 int_cntr
|= INT_SDIO_IRQ_EN
;
420 mxcmci_writel(host
, int_cntr
, MMC_REG_INT_CNTR
);
421 spin_unlock_irqrestore(&host
->lock
, flags
);
427 mmc_request_done(host
->mmc
, req
);
430 static int mxcmci_finish_data(struct mxcmci_host
*host
, unsigned int stat
)
432 struct mmc_data
*data
= host
->data
;
435 if (mxcmci_use_dma(host
)) {
436 dma_unmap_sg(host
->dma
->device
->dev
, data
->sg
, data
->sg_len
,
438 mxcmci_swap_buffers(data
);
441 if (stat
& STATUS_ERR_MASK
) {
442 dev_dbg(mmc_dev(host
->mmc
), "request failed. status: 0x%08x\n",
444 if (stat
& STATUS_CRC_READ_ERR
) {
445 dev_err(mmc_dev(host
->mmc
), "%s: -EILSEQ\n", __func__
);
446 data
->error
= -EILSEQ
;
447 } else if (stat
& STATUS_CRC_WRITE_ERR
) {
448 u32 err_code
= (stat
>> 9) & 0x3;
449 if (err_code
== 2) { /* No CRC response */
450 dev_err(mmc_dev(host
->mmc
),
451 "%s: No CRC -ETIMEDOUT\n", __func__
);
452 data
->error
= -ETIMEDOUT
;
454 dev_err(mmc_dev(host
->mmc
),
455 "%s: -EILSEQ\n", __func__
);
456 data
->error
= -EILSEQ
;
458 } else if (stat
& STATUS_TIME_OUT_READ
) {
459 dev_err(mmc_dev(host
->mmc
),
460 "%s: read -ETIMEDOUT\n", __func__
);
461 data
->error
= -ETIMEDOUT
;
463 dev_err(mmc_dev(host
->mmc
), "%s: -EIO\n", __func__
);
467 data
->bytes_xfered
= host
->datasize
;
470 data_error
= data
->error
;
477 static void mxcmci_read_response(struct mxcmci_host
*host
, unsigned int stat
)
479 struct mmc_command
*cmd
= host
->cmd
;
486 if (stat
& STATUS_TIME_OUT_RESP
) {
487 dev_dbg(mmc_dev(host
->mmc
), "CMD TIMEOUT\n");
488 cmd
->error
= -ETIMEDOUT
;
489 } else if (stat
& STATUS_RESP_CRC_ERR
&& cmd
->flags
& MMC_RSP_CRC
) {
490 dev_dbg(mmc_dev(host
->mmc
), "cmd crc error\n");
491 cmd
->error
= -EILSEQ
;
494 if (cmd
->flags
& MMC_RSP_PRESENT
) {
495 if (cmd
->flags
& MMC_RSP_136
) {
496 for (i
= 0; i
< 4; i
++) {
497 a
= mxcmci_readw(host
, MMC_REG_RES_FIFO
);
498 b
= mxcmci_readw(host
, MMC_REG_RES_FIFO
);
499 cmd
->resp
[i
] = a
<< 16 | b
;
502 a
= mxcmci_readw(host
, MMC_REG_RES_FIFO
);
503 b
= mxcmci_readw(host
, MMC_REG_RES_FIFO
);
504 c
= mxcmci_readw(host
, MMC_REG_RES_FIFO
);
505 cmd
->resp
[0] = a
<< 24 | b
<< 8 | c
>> 8;
510 static int mxcmci_poll_status(struct mxcmci_host
*host
, u32 mask
)
513 unsigned long timeout
= jiffies
+ HZ
;
516 stat
= mxcmci_readl(host
, MMC_REG_STATUS
);
517 if (stat
& STATUS_ERR_MASK
)
519 if (time_after(jiffies
, timeout
)) {
520 mxcmci_softreset(host
);
521 mxcmci_set_clk_rate(host
, host
->clock
);
522 return STATUS_TIME_OUT_READ
;
530 static int mxcmci_pull(struct mxcmci_host
*host
, void *_buf
, int bytes
)
536 stat
= mxcmci_poll_status(host
,
537 STATUS_BUF_READ_RDY
| STATUS_READ_OP_DONE
);
540 *buf
++ = cpu_to_le32(mxcmci_readl(host
, MMC_REG_BUFFER_ACCESS
));
548 stat
= mxcmci_poll_status(host
,
549 STATUS_BUF_READ_RDY
| STATUS_READ_OP_DONE
);
552 tmp
= cpu_to_le32(mxcmci_readl(host
, MMC_REG_BUFFER_ACCESS
));
553 memcpy(b
, &tmp
, bytes
);
559 static int mxcmci_push(struct mxcmci_host
*host
, void *_buf
, int bytes
)
565 stat
= mxcmci_poll_status(host
, STATUS_BUF_WRITE_RDY
);
568 mxcmci_writel(host
, cpu_to_le32(*buf
++), MMC_REG_BUFFER_ACCESS
);
576 stat
= mxcmci_poll_status(host
, STATUS_BUF_WRITE_RDY
);
580 memcpy(&tmp
, b
, bytes
);
581 mxcmci_writel(host
, cpu_to_le32(tmp
), MMC_REG_BUFFER_ACCESS
);
584 return mxcmci_poll_status(host
, STATUS_BUF_WRITE_RDY
);
587 static int mxcmci_transfer_data(struct mxcmci_host
*host
)
589 struct mmc_data
*data
= host
->req
->data
;
590 struct scatterlist
*sg
;
596 if (data
->flags
& MMC_DATA_READ
) {
597 for_each_sg(data
->sg
, sg
, data
->sg_len
, i
) {
598 stat
= mxcmci_pull(host
, sg_virt(sg
), sg
->length
);
601 host
->datasize
+= sg
->length
;
604 for_each_sg(data
->sg
, sg
, data
->sg_len
, i
) {
605 stat
= mxcmci_push(host
, sg_virt(sg
), sg
->length
);
608 host
->datasize
+= sg
->length
;
610 stat
= mxcmci_poll_status(host
, STATUS_WRITE_OP_DONE
);
617 static void mxcmci_datawork(struct work_struct
*work
)
619 struct mxcmci_host
*host
= container_of(work
, struct mxcmci_host
,
621 int datastat
= mxcmci_transfer_data(host
);
623 mxcmci_writel(host
, STATUS_READ_OP_DONE
| STATUS_WRITE_OP_DONE
,
625 mxcmci_finish_data(host
, datastat
);
627 if (host
->req
->stop
) {
628 if (mxcmci_start_cmd(host
, host
->req
->stop
, 0)) {
629 mxcmci_finish_request(host
, host
->req
);
633 mxcmci_finish_request(host
, host
->req
);
637 static void mxcmci_data_done(struct mxcmci_host
*host
, unsigned int stat
)
639 struct mmc_request
*req
;
643 spin_lock_irqsave(&host
->lock
, flags
);
646 spin_unlock_irqrestore(&host
->lock
, flags
);
651 spin_unlock_irqrestore(&host
->lock
, flags
);
657 host
->req
= NULL
; /* we will handle finish req below */
659 data_error
= mxcmci_finish_data(host
, stat
);
661 spin_unlock_irqrestore(&host
->lock
, flags
);
666 mxcmci_read_response(host
, stat
);
670 if (mxcmci_start_cmd(host
, req
->stop
, 0)) {
671 mxcmci_finish_request(host
, req
);
675 mxcmci_finish_request(host
, req
);
679 static void mxcmci_cmd_done(struct mxcmci_host
*host
, unsigned int stat
)
681 mxcmci_read_response(host
, stat
);
684 if (!host
->data
&& host
->req
) {
685 mxcmci_finish_request(host
, host
->req
);
689 /* For the DMA case the DMA engine handles the data transfer
690 * automatically. For non DMA we have to do it ourselves.
691 * Don't do it in interrupt context though.
693 if (!mxcmci_use_dma(host
) && host
->data
)
694 schedule_work(&host
->datawork
);
698 static irqreturn_t
mxcmci_irq(int irq
, void *devid
)
700 struct mxcmci_host
*host
= devid
;
704 stat
= mxcmci_readl(host
, MMC_REG_STATUS
);
706 stat
& ~(STATUS_SDIO_INT_ACTIVE
| STATUS_DATA_TRANS_DONE
|
707 STATUS_WRITE_OP_DONE
),
710 dev_dbg(mmc_dev(host
->mmc
), "%s: 0x%08x\n", __func__
, stat
);
712 spin_lock(&host
->lock
);
713 sdio_irq
= (stat
& STATUS_SDIO_INT_ACTIVE
) && host
->use_sdio
;
714 spin_unlock(&host
->lock
);
716 if (mxcmci_use_dma(host
) && (stat
& (STATUS_WRITE_OP_DONE
)))
717 mxcmci_writel(host
, STATUS_WRITE_OP_DONE
, MMC_REG_STATUS
);
720 mxcmci_writel(host
, STATUS_SDIO_INT_ACTIVE
, MMC_REG_STATUS
);
721 mmc_signal_sdio_irq(host
->mmc
);
724 if (stat
& STATUS_END_CMD_RESP
)
725 mxcmci_cmd_done(host
, stat
);
727 if (mxcmci_use_dma(host
) && (stat
& STATUS_WRITE_OP_DONE
)) {
728 del_timer(&host
->watchdog
);
729 mxcmci_data_done(host
, stat
);
732 if (host
->default_irq_mask
&&
733 (stat
& (STATUS_CARD_INSERTION
| STATUS_CARD_REMOVAL
)))
734 mmc_detect_change(host
->mmc
, msecs_to_jiffies(200));
739 static void mxcmci_request(struct mmc_host
*mmc
, struct mmc_request
*req
)
741 struct mxcmci_host
*host
= mmc_priv(mmc
);
742 unsigned int cmdat
= host
->cmdat
;
745 WARN_ON(host
->req
!= NULL
);
748 host
->cmdat
&= ~CMD_DAT_CONT_INIT
;
754 error
= mxcmci_setup_data(host
, req
->data
);
756 req
->cmd
->error
= error
;
761 cmdat
|= CMD_DAT_CONT_DATA_ENABLE
;
763 if (req
->data
->flags
& MMC_DATA_WRITE
)
764 cmdat
|= CMD_DAT_CONT_WRITE
;
767 error
= mxcmci_start_cmd(host
, req
->cmd
, cmdat
);
771 mxcmci_finish_request(host
, req
);
774 static void mxcmci_set_clk_rate(struct mxcmci_host
*host
, unsigned int clk_ios
)
776 unsigned int divider
;
778 unsigned int clk_in
= clk_get_rate(host
->clk_per
);
780 while (prescaler
<= 0x800) {
781 for (divider
= 1; divider
<= 0xF; divider
++) {
784 x
= (clk_in
/ (divider
+ 1));
787 x
/= (prescaler
* 2);
801 mxcmci_writew(host
, (prescaler
<< 4) | divider
, MMC_REG_CLK_RATE
);
803 dev_dbg(mmc_dev(host
->mmc
), "scaler: %d divider: %d in: %d out: %d\n",
804 prescaler
, divider
, clk_in
, clk_ios
);
807 static int mxcmci_setup_dma(struct mmc_host
*mmc
)
809 struct mxcmci_host
*host
= mmc_priv(mmc
);
810 struct dma_slave_config
*config
= &host
->dma_slave_config
;
812 config
->dst_addr
= host
->phys_base
+ MMC_REG_BUFFER_ACCESS
;
813 config
->src_addr
= host
->phys_base
+ MMC_REG_BUFFER_ACCESS
;
814 config
->dst_addr_width
= 4;
815 config
->src_addr_width
= 4;
816 config
->dst_maxburst
= host
->burstlen
;
817 config
->src_maxburst
= host
->burstlen
;
818 config
->device_fc
= false;
820 return dmaengine_slave_config(host
->dma
, config
);
823 static void mxcmci_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
825 struct mxcmci_host
*host
= mmc_priv(mmc
);
829 * use burstlen of 64 (16 words) in 4 bit mode (--> reg value 0)
830 * use burstlen of 16 (4 words) in 1 bit mode (--> reg value 16)
832 if (ios
->bus_width
== MMC_BUS_WIDTH_4
)
837 if (mxcmci_use_dma(host
) && burstlen
!= host
->burstlen
) {
838 host
->burstlen
= burstlen
;
839 ret
= mxcmci_setup_dma(mmc
);
841 dev_err(mmc_dev(host
->mmc
),
842 "failed to config DMA channel. Falling back to PIO\n");
843 dma_release_channel(host
->dma
);
849 if (ios
->bus_width
== MMC_BUS_WIDTH_4
)
850 host
->cmdat
|= CMD_DAT_CONT_BUS_WIDTH_4
;
852 host
->cmdat
&= ~CMD_DAT_CONT_BUS_WIDTH_4
;
854 if (host
->power_mode
!= ios
->power_mode
) {
855 host
->power_mode
= ios
->power_mode
;
856 mxcmci_set_power(host
, ios
->vdd
);
858 if (ios
->power_mode
== MMC_POWER_ON
)
859 host
->cmdat
|= CMD_DAT_CONT_INIT
;
863 mxcmci_set_clk_rate(host
, ios
->clock
);
864 mxcmci_writew(host
, STR_STP_CLK_START_CLK
, MMC_REG_STR_STP_CLK
);
866 mxcmci_writew(host
, STR_STP_CLK_STOP_CLK
, MMC_REG_STR_STP_CLK
);
869 host
->clock
= ios
->clock
;
872 static irqreturn_t
mxcmci_detect_irq(int irq
, void *data
)
874 struct mmc_host
*mmc
= data
;
876 dev_dbg(mmc_dev(mmc
), "%s\n", __func__
);
878 mmc_detect_change(mmc
, msecs_to_jiffies(250));
882 static int mxcmci_get_ro(struct mmc_host
*mmc
)
884 struct mxcmci_host
*host
= mmc_priv(mmc
);
886 if (host
->pdata
&& host
->pdata
->get_ro
)
887 return !!host
->pdata
->get_ro(mmc_dev(mmc
));
889 * If board doesn't support read only detection (no mmc_gpio
890 * context or gpio is invalid), then let the mmc core decide
893 return mmc_gpio_get_ro(mmc
);
896 static void mxcmci_enable_sdio_irq(struct mmc_host
*mmc
, int enable
)
898 struct mxcmci_host
*host
= mmc_priv(mmc
);
902 spin_lock_irqsave(&host
->lock
, flags
);
903 host
->use_sdio
= enable
;
904 int_cntr
= mxcmci_readl(host
, MMC_REG_INT_CNTR
);
907 int_cntr
|= INT_SDIO_IRQ_EN
;
909 int_cntr
&= ~INT_SDIO_IRQ_EN
;
911 mxcmci_writel(host
, int_cntr
, MMC_REG_INT_CNTR
);
912 spin_unlock_irqrestore(&host
->lock
, flags
);
915 static void mxcmci_init_card(struct mmc_host
*host
, struct mmc_card
*card
)
917 struct mxcmci_host
*mxcmci
= mmc_priv(host
);
920 * MX3 SoCs have a silicon bug which corrupts CRC calculation of
921 * multi-block transfers when connected SDIO peripheral doesn't
922 * drive the BUSY line as required by the specs.
923 * One way to prevent this is to only allow 1-bit transfers.
926 if (is_imx31_mmc(mxcmci
) && card
->type
== MMC_TYPE_SDIO
)
927 host
->caps
&= ~MMC_CAP_4_BIT_DATA
;
929 host
->caps
|= MMC_CAP_4_BIT_DATA
;
932 static bool filter(struct dma_chan
*chan
, void *param
)
934 struct mxcmci_host
*host
= param
;
936 if (!imx_dma_is_general_purpose(chan
))
939 chan
->private = &host
->dma_data
;
944 static void mxcmci_watchdog(struct timer_list
*t
)
946 struct mxcmci_host
*host
= from_timer(host
, t
, watchdog
);
947 struct mmc_request
*req
= host
->req
;
948 unsigned int stat
= mxcmci_readl(host
, MMC_REG_STATUS
);
950 if (host
->dma_dir
== DMA_FROM_DEVICE
) {
951 dmaengine_terminate_all(host
->dma
);
952 dev_err(mmc_dev(host
->mmc
),
953 "%s: read time out (status = 0x%08x)\n",
956 dev_err(mmc_dev(host
->mmc
),
957 "%s: write time out (status = 0x%08x)\n",
959 mxcmci_softreset(host
);
962 /* Mark transfer as erroneus and inform the upper layers */
965 host
->data
->error
= -ETIMEDOUT
;
969 mmc_request_done(host
->mmc
, req
);
972 static const struct mmc_host_ops mxcmci_ops
= {
973 .request
= mxcmci_request
,
974 .set_ios
= mxcmci_set_ios
,
975 .get_ro
= mxcmci_get_ro
,
976 .enable_sdio_irq
= mxcmci_enable_sdio_irq
,
977 .init_card
= mxcmci_init_card
,
980 static int mxcmci_probe(struct platform_device
*pdev
)
982 struct mmc_host
*mmc
;
983 struct mxcmci_host
*host
;
984 struct resource
*res
;
986 bool dat3_card_detect
= false;
988 struct imxmmc_platform_data
*pdata
= pdev
->dev
.platform_data
;
990 pr_info("i.MX/MPC512x SDHC driver\n");
992 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
993 irq
= platform_get_irq(pdev
, 0);
997 mmc
= mmc_alloc_host(sizeof(*host
), &pdev
->dev
);
1001 host
= mmc_priv(mmc
);
1003 host
->base
= devm_ioremap_resource(&pdev
->dev
, res
);
1004 if (IS_ERR(host
->base
)) {
1005 ret
= PTR_ERR(host
->base
);
1009 host
->phys_base
= res
->start
;
1011 ret
= mmc_of_parse(mmc
);
1014 mmc
->ops
= &mxcmci_ops
;
1016 /* For devicetree parsing, the bus width is read from devicetree */
1018 mmc
->caps
= MMC_CAP_4_BIT_DATA
| MMC_CAP_SDIO_IRQ
;
1020 mmc
->caps
|= MMC_CAP_SDIO_IRQ
;
1022 /* MMC core transfer sizes tunable parameters */
1023 mmc
->max_blk_size
= 2048;
1024 mmc
->max_blk_count
= 65535;
1025 mmc
->max_req_size
= mmc
->max_blk_size
* mmc
->max_blk_count
;
1026 mmc
->max_seg_size
= mmc
->max_req_size
;
1028 host
->devtype
= (enum mxcmci_type
)of_device_get_match_data(&pdev
->dev
);
1030 /* adjust max_segs after devtype detection */
1031 if (!is_mpc512x_mmc(host
))
1035 host
->pdata
= pdata
;
1036 spin_lock_init(&host
->lock
);
1039 dat3_card_detect
= pdata
->dat3_card_detect
;
1040 else if (mmc_card_is_removable(mmc
)
1041 && !of_property_read_bool(pdev
->dev
.of_node
, "cd-gpios"))
1042 dat3_card_detect
= true;
1044 ret
= mmc_regulator_get_supply(mmc
);
1048 if (!mmc
->ocr_avail
) {
1049 if (pdata
&& pdata
->ocr_avail
)
1050 mmc
->ocr_avail
= pdata
->ocr_avail
;
1052 mmc
->ocr_avail
= MMC_VDD_32_33
| MMC_VDD_33_34
;
1055 if (dat3_card_detect
)
1056 host
->default_irq_mask
=
1057 INT_CARD_INSERTION_EN
| INT_CARD_REMOVAL_EN
;
1059 host
->default_irq_mask
= 0;
1061 host
->clk_ipg
= devm_clk_get(&pdev
->dev
, "ipg");
1062 if (IS_ERR(host
->clk_ipg
)) {
1063 ret
= PTR_ERR(host
->clk_ipg
);
1067 host
->clk_per
= devm_clk_get(&pdev
->dev
, "per");
1068 if (IS_ERR(host
->clk_per
)) {
1069 ret
= PTR_ERR(host
->clk_per
);
1073 ret
= clk_prepare_enable(host
->clk_per
);
1077 ret
= clk_prepare_enable(host
->clk_ipg
);
1079 goto out_clk_per_put
;
1081 mxcmci_softreset(host
);
1083 host
->rev_no
= mxcmci_readw(host
, MMC_REG_REV_NO
);
1084 if (host
->rev_no
!= 0x400) {
1086 dev_err(mmc_dev(host
->mmc
), "wrong rev.no. 0x%08x. aborting.\n",
1091 mmc
->f_min
= clk_get_rate(host
->clk_per
) >> 16;
1092 mmc
->f_max
= clk_get_rate(host
->clk_per
) >> 1;
1094 /* recommended in data sheet */
1095 mxcmci_writew(host
, 0x2db4, MMC_REG_READ_TO
);
1097 mxcmci_writel(host
, host
->default_irq_mask
, MMC_REG_INT_CNTR
);
1100 host
->dma
= dma_request_chan(&pdev
->dev
, "rx-tx");
1101 if (IS_ERR(host
->dma
)) {
1102 if (PTR_ERR(host
->dma
) == -EPROBE_DEFER
) {
1103 ret
= -EPROBE_DEFER
;
1107 /* Ignore errors to fall back to PIO mode */
1111 res
= platform_get_resource(pdev
, IORESOURCE_DMA
, 0);
1113 host
->dmareq
= res
->start
;
1114 host
->dma_data
.peripheral_type
= IMX_DMATYPE_SDHC
;
1115 host
->dma_data
.priority
= DMA_PRIO_LOW
;
1116 host
->dma_data
.dma_request
= host
->dmareq
;
1118 dma_cap_set(DMA_SLAVE
, mask
);
1119 host
->dma
= dma_request_channel(mask
, filter
, host
);
1123 mmc
->max_seg_size
= dma_get_max_seg_size(
1124 host
->dma
->device
->dev
);
1126 dev_info(mmc_dev(host
->mmc
), "dma not available. Using PIO\n");
1128 INIT_WORK(&host
->datawork
, mxcmci_datawork
);
1130 ret
= devm_request_irq(&pdev
->dev
, irq
, mxcmci_irq
, 0,
1131 dev_name(&pdev
->dev
), host
);
1135 platform_set_drvdata(pdev
, mmc
);
1137 if (host
->pdata
&& host
->pdata
->init
) {
1138 ret
= host
->pdata
->init(&pdev
->dev
, mxcmci_detect_irq
,
1144 timer_setup(&host
->watchdog
, mxcmci_watchdog
, 0);
1152 dma_release_channel(host
->dma
);
1155 clk_disable_unprepare(host
->clk_ipg
);
1157 clk_disable_unprepare(host
->clk_per
);
1165 static int mxcmci_remove(struct platform_device
*pdev
)
1167 struct mmc_host
*mmc
= platform_get_drvdata(pdev
);
1168 struct mxcmci_host
*host
= mmc_priv(mmc
);
1170 mmc_remove_host(mmc
);
1172 if (host
->pdata
&& host
->pdata
->exit
)
1173 host
->pdata
->exit(&pdev
->dev
, mmc
);
1176 dma_release_channel(host
->dma
);
1178 clk_disable_unprepare(host
->clk_per
);
1179 clk_disable_unprepare(host
->clk_ipg
);
1186 #ifdef CONFIG_PM_SLEEP
1187 static int mxcmci_suspend(struct device
*dev
)
1189 struct mmc_host
*mmc
= dev_get_drvdata(dev
);
1190 struct mxcmci_host
*host
= mmc_priv(mmc
);
1192 clk_disable_unprepare(host
->clk_per
);
1193 clk_disable_unprepare(host
->clk_ipg
);
1197 static int mxcmci_resume(struct device
*dev
)
1199 struct mmc_host
*mmc
= dev_get_drvdata(dev
);
1200 struct mxcmci_host
*host
= mmc_priv(mmc
);
1203 ret
= clk_prepare_enable(host
->clk_per
);
1207 ret
= clk_prepare_enable(host
->clk_ipg
);
1209 clk_disable_unprepare(host
->clk_per
);
1215 static SIMPLE_DEV_PM_OPS(mxcmci_pm_ops
, mxcmci_suspend
, mxcmci_resume
);
1217 static struct platform_driver mxcmci_driver
= {
1218 .probe
= mxcmci_probe
,
1219 .remove
= mxcmci_remove
,
1221 .name
= DRIVER_NAME
,
1222 .probe_type
= PROBE_PREFER_ASYNCHRONOUS
,
1223 .pm
= &mxcmci_pm_ops
,
1224 .of_match_table
= mxcmci_of_match
,
1228 module_platform_driver(mxcmci_driver
);
1230 MODULE_DESCRIPTION("i.MX Multimedia Card Interface Driver");
1231 MODULE_AUTHOR("Sascha Hauer, Pengutronix");
1232 MODULE_LICENSE("GPL");
1233 MODULE_ALIAS("platform:mxc-mmc");