2 * linux/drivers/mmc/host/mxcmmc.c - Freescale i.MX MMCI driver
4 * This is a driver for the SDHC controller found in Freescale MX2/MX3
5 * SoCs. It is basically the same hardware as found on MX1 (imxmmc.c).
6 * Unlike the hardware found on MX1, this hardware just works and does
7 * not need all the quirks found in imxmmc.c, hence the separate driver.
9 * Copyright (C) 2008 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
10 * Copyright (C) 2006 Pavel Pisa, PiKRON <ppisa@pikron.com>
12 * derived from pxamci.c by Russell King
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License version 2 as
16 * published by the Free Software Foundation.
20 #include <linux/module.h>
21 #include <linux/init.h>
22 #include <linux/ioport.h>
23 #include <linux/platform_device.h>
24 #include <linux/highmem.h>
25 #include <linux/interrupt.h>
26 #include <linux/irq.h>
27 #include <linux/blkdev.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/mmc/host.h>
30 #include <linux/mmc/card.h>
31 #include <linux/delay.h>
32 #include <linux/clk.h>
34 #include <linux/gpio.h>
35 #include <linux/regulator/consumer.h>
36 #include <linux/dmaengine.h>
37 #include <linux/types.h>
39 #include <linux/of_device.h>
40 #include <linux/of_dma.h>
41 #include <linux/of_gpio.h>
42 #include <linux/mmc/slot-gpio.h>
46 #include <linux/platform_data/mmc-mxcmmc.h>
48 #include <linux/platform_data/dma-imx.h>
50 #define DRIVER_NAME "mxc-mmc"
51 #define MXCMCI_TIMEOUT_MS 10000
53 #define MMC_REG_STR_STP_CLK 0x00
54 #define MMC_REG_STATUS 0x04
55 #define MMC_REG_CLK_RATE 0x08
56 #define MMC_REG_CMD_DAT_CONT 0x0C
57 #define MMC_REG_RES_TO 0x10
58 #define MMC_REG_READ_TO 0x14
59 #define MMC_REG_BLK_LEN 0x18
60 #define MMC_REG_NOB 0x1C
61 #define MMC_REG_REV_NO 0x20
62 #define MMC_REG_INT_CNTR 0x24
63 #define MMC_REG_CMD 0x28
64 #define MMC_REG_ARG 0x2C
65 #define MMC_REG_RES_FIFO 0x34
66 #define MMC_REG_BUFFER_ACCESS 0x38
68 #define STR_STP_CLK_RESET (1 << 3)
69 #define STR_STP_CLK_START_CLK (1 << 1)
70 #define STR_STP_CLK_STOP_CLK (1 << 0)
72 #define STATUS_CARD_INSERTION (1 << 31)
73 #define STATUS_CARD_REMOVAL (1 << 30)
74 #define STATUS_YBUF_EMPTY (1 << 29)
75 #define STATUS_XBUF_EMPTY (1 << 28)
76 #define STATUS_YBUF_FULL (1 << 27)
77 #define STATUS_XBUF_FULL (1 << 26)
78 #define STATUS_BUF_UND_RUN (1 << 25)
79 #define STATUS_BUF_OVFL (1 << 24)
80 #define STATUS_SDIO_INT_ACTIVE (1 << 14)
81 #define STATUS_END_CMD_RESP (1 << 13)
82 #define STATUS_WRITE_OP_DONE (1 << 12)
83 #define STATUS_DATA_TRANS_DONE (1 << 11)
84 #define STATUS_READ_OP_DONE (1 << 11)
85 #define STATUS_WR_CRC_ERROR_CODE_MASK (3 << 10)
86 #define STATUS_CARD_BUS_CLK_RUN (1 << 8)
87 #define STATUS_BUF_READ_RDY (1 << 7)
88 #define STATUS_BUF_WRITE_RDY (1 << 6)
89 #define STATUS_RESP_CRC_ERR (1 << 5)
90 #define STATUS_CRC_READ_ERR (1 << 3)
91 #define STATUS_CRC_WRITE_ERR (1 << 2)
92 #define STATUS_TIME_OUT_RESP (1 << 1)
93 #define STATUS_TIME_OUT_READ (1 << 0)
94 #define STATUS_ERR_MASK 0x2f
96 #define CMD_DAT_CONT_CMD_RESP_LONG_OFF (1 << 12)
97 #define CMD_DAT_CONT_STOP_READWAIT (1 << 11)
98 #define CMD_DAT_CONT_START_READWAIT (1 << 10)
99 #define CMD_DAT_CONT_BUS_WIDTH_4 (2 << 8)
100 #define CMD_DAT_CONT_INIT (1 << 7)
101 #define CMD_DAT_CONT_WRITE (1 << 4)
102 #define CMD_DAT_CONT_DATA_ENABLE (1 << 3)
103 #define CMD_DAT_CONT_RESPONSE_48BIT_CRC (1 << 0)
104 #define CMD_DAT_CONT_RESPONSE_136BIT (2 << 0)
105 #define CMD_DAT_CONT_RESPONSE_48BIT (3 << 0)
107 #define INT_SDIO_INT_WKP_EN (1 << 18)
108 #define INT_CARD_INSERTION_WKP_EN (1 << 17)
109 #define INT_CARD_REMOVAL_WKP_EN (1 << 16)
110 #define INT_CARD_INSERTION_EN (1 << 15)
111 #define INT_CARD_REMOVAL_EN (1 << 14)
112 #define INT_SDIO_IRQ_EN (1 << 13)
113 #define INT_DAT0_EN (1 << 12)
114 #define INT_BUF_READ_EN (1 << 4)
115 #define INT_BUF_WRITE_EN (1 << 3)
116 #define INT_END_CMD_RES_EN (1 << 2)
117 #define INT_WRITE_OP_DONE_EN (1 << 1)
118 #define INT_READ_OP_EN (1 << 0)
127 struct mmc_host
*mmc
;
129 dma_addr_t phys_base
;
131 struct dma_chan
*dma
;
132 struct dma_async_tx_descriptor
*desc
;
134 int default_irq_mask
;
136 unsigned int power_mode
;
137 struct imxmmc_platform_data
*pdata
;
139 struct mmc_request
*req
;
140 struct mmc_command
*cmd
;
141 struct mmc_data
*data
;
143 unsigned int datasize
;
144 unsigned int dma_dir
;
154 struct work_struct datawork
;
159 struct dma_slave_config dma_slave_config
;
160 struct imx_dma_data dma_data
;
162 struct timer_list watchdog
;
163 enum mxcmci_type devtype
;
166 static const struct platform_device_id mxcmci_devtype
[] = {
169 .driver_data
= IMX21_MMC
,
172 .driver_data
= IMX31_MMC
,
174 .name
= "mpc512x-sdhc",
175 .driver_data
= MPC512X_MMC
,
180 MODULE_DEVICE_TABLE(platform
, mxcmci_devtype
);
182 static const struct of_device_id mxcmci_of_match
[] = {
184 .compatible
= "fsl,imx21-mmc",
185 .data
= &mxcmci_devtype
[IMX21_MMC
],
187 .compatible
= "fsl,imx31-mmc",
188 .data
= &mxcmci_devtype
[IMX31_MMC
],
190 .compatible
= "fsl,mpc5121-sdhc",
191 .data
= &mxcmci_devtype
[MPC512X_MMC
],
196 MODULE_DEVICE_TABLE(of
, mxcmci_of_match
);
198 static inline int is_imx31_mmc(struct mxcmci_host
*host
)
200 return host
->devtype
== IMX31_MMC
;
203 static inline int is_mpc512x_mmc(struct mxcmci_host
*host
)
205 return host
->devtype
== MPC512X_MMC
;
208 static inline u32
mxcmci_readl(struct mxcmci_host
*host
, int reg
)
210 if (IS_ENABLED(CONFIG_PPC_MPC512x
))
211 return ioread32be(host
->base
+ reg
);
213 return readl(host
->base
+ reg
);
216 static inline void mxcmci_writel(struct mxcmci_host
*host
, u32 val
, int reg
)
218 if (IS_ENABLED(CONFIG_PPC_MPC512x
))
219 iowrite32be(val
, host
->base
+ reg
);
221 writel(val
, host
->base
+ reg
);
224 static inline u16
mxcmci_readw(struct mxcmci_host
*host
, int reg
)
226 if (IS_ENABLED(CONFIG_PPC_MPC512x
))
227 return ioread32be(host
->base
+ reg
);
229 return readw(host
->base
+ reg
);
232 static inline void mxcmci_writew(struct mxcmci_host
*host
, u16 val
, int reg
)
234 if (IS_ENABLED(CONFIG_PPC_MPC512x
))
235 iowrite32be(val
, host
->base
+ reg
);
237 writew(val
, host
->base
+ reg
);
240 static void mxcmci_set_clk_rate(struct mxcmci_host
*host
, unsigned int clk_ios
);
242 static void mxcmci_set_power(struct mxcmci_host
*host
, unsigned int vdd
)
244 if (!IS_ERR(host
->mmc
->supply
.vmmc
)) {
245 if (host
->power_mode
== MMC_POWER_UP
)
246 mmc_regulator_set_ocr(host
->mmc
,
247 host
->mmc
->supply
.vmmc
, vdd
);
248 else if (host
->power_mode
== MMC_POWER_OFF
)
249 mmc_regulator_set_ocr(host
->mmc
,
250 host
->mmc
->supply
.vmmc
, 0);
253 if (host
->pdata
&& host
->pdata
->setpower
)
254 host
->pdata
->setpower(mmc_dev(host
->mmc
), vdd
);
257 static inline int mxcmci_use_dma(struct mxcmci_host
*host
)
262 static void mxcmci_softreset(struct mxcmci_host
*host
)
266 dev_dbg(mmc_dev(host
->mmc
), "mxcmci_softreset\n");
269 mxcmci_writew(host
, STR_STP_CLK_RESET
, MMC_REG_STR_STP_CLK
);
270 mxcmci_writew(host
, STR_STP_CLK_RESET
| STR_STP_CLK_START_CLK
,
271 MMC_REG_STR_STP_CLK
);
273 for (i
= 0; i
< 8; i
++)
274 mxcmci_writew(host
, STR_STP_CLK_START_CLK
, MMC_REG_STR_STP_CLK
);
276 mxcmci_writew(host
, 0xff, MMC_REG_RES_TO
);
279 #if IS_ENABLED(CONFIG_PPC_MPC512x)
280 static inline void buffer_swap32(u32
*buf
, int len
)
284 for (i
= 0; i
< ((len
+ 3) / 4); i
++) {
290 static void mxcmci_swap_buffers(struct mmc_data
*data
)
292 struct scatterlist
*sg
;
295 for_each_sg(data
->sg
, sg
, data
->sg_len
, i
) {
296 void *buf
= kmap_atomic(sg_page(sg
) + sg
->offset
);
297 buffer_swap32(buf
, sg
->length
);
302 static inline void mxcmci_swap_buffers(struct mmc_data
*data
) {}
305 static int mxcmci_setup_data(struct mxcmci_host
*host
, struct mmc_data
*data
)
307 unsigned int nob
= data
->blocks
;
308 unsigned int blksz
= data
->blksz
;
309 unsigned int datasize
= nob
* blksz
;
310 struct scatterlist
*sg
;
311 enum dma_transfer_direction slave_dirn
;
315 data
->bytes_xfered
= 0;
317 mxcmci_writew(host
, nob
, MMC_REG_NOB
);
318 mxcmci_writew(host
, blksz
, MMC_REG_BLK_LEN
);
319 host
->datasize
= datasize
;
321 if (!mxcmci_use_dma(host
))
324 for_each_sg(data
->sg
, sg
, data
->sg_len
, i
) {
325 if (sg
->offset
& 3 || sg
->length
& 3 || sg
->length
< 512) {
331 if (data
->flags
& MMC_DATA_READ
) {
332 host
->dma_dir
= DMA_FROM_DEVICE
;
333 slave_dirn
= DMA_DEV_TO_MEM
;
335 host
->dma_dir
= DMA_TO_DEVICE
;
336 slave_dirn
= DMA_MEM_TO_DEV
;
338 mxcmci_swap_buffers(data
);
341 nents
= dma_map_sg(host
->dma
->device
->dev
, data
->sg
,
342 data
->sg_len
, host
->dma_dir
);
343 if (nents
!= data
->sg_len
)
346 host
->desc
= dmaengine_prep_slave_sg(host
->dma
,
347 data
->sg
, data
->sg_len
, slave_dirn
,
348 DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
351 dma_unmap_sg(host
->dma
->device
->dev
, data
->sg
, data
->sg_len
,
354 return 0; /* Fall back to PIO */
358 dmaengine_submit(host
->desc
);
359 dma_async_issue_pending(host
->dma
);
361 mod_timer(&host
->watchdog
, jiffies
+ msecs_to_jiffies(MXCMCI_TIMEOUT_MS
));
366 static void mxcmci_cmd_done(struct mxcmci_host
*host
, unsigned int stat
);
367 static void mxcmci_data_done(struct mxcmci_host
*host
, unsigned int stat
);
369 static void mxcmci_dma_callback(void *data
)
371 struct mxcmci_host
*host
= data
;
374 del_timer(&host
->watchdog
);
376 stat
= mxcmci_readl(host
, MMC_REG_STATUS
);
378 dev_dbg(mmc_dev(host
->mmc
), "%s: 0x%08x\n", __func__
, stat
);
380 mxcmci_data_done(host
, stat
);
383 static int mxcmci_start_cmd(struct mxcmci_host
*host
, struct mmc_command
*cmd
,
386 u32 int_cntr
= host
->default_irq_mask
;
389 WARN_ON(host
->cmd
!= NULL
);
392 switch (mmc_resp_type(cmd
)) {
393 case MMC_RSP_R1
: /* short CRC, OPCODE */
394 case MMC_RSP_R1B
:/* short CRC, OPCODE, BUSY */
395 cmdat
|= CMD_DAT_CONT_RESPONSE_48BIT_CRC
;
397 case MMC_RSP_R2
: /* long 136 bit + CRC */
398 cmdat
|= CMD_DAT_CONT_RESPONSE_136BIT
;
400 case MMC_RSP_R3
: /* short */
401 cmdat
|= CMD_DAT_CONT_RESPONSE_48BIT
;
406 dev_err(mmc_dev(host
->mmc
), "unhandled response type 0x%x\n",
408 cmd
->error
= -EINVAL
;
412 int_cntr
= INT_END_CMD_RES_EN
;
414 if (mxcmci_use_dma(host
)) {
415 if (host
->dma_dir
== DMA_FROM_DEVICE
) {
416 host
->desc
->callback
= mxcmci_dma_callback
;
417 host
->desc
->callback_param
= host
;
419 int_cntr
|= INT_WRITE_OP_DONE_EN
;
423 spin_lock_irqsave(&host
->lock
, flags
);
425 int_cntr
|= INT_SDIO_IRQ_EN
;
426 mxcmci_writel(host
, int_cntr
, MMC_REG_INT_CNTR
);
427 spin_unlock_irqrestore(&host
->lock
, flags
);
429 mxcmci_writew(host
, cmd
->opcode
, MMC_REG_CMD
);
430 mxcmci_writel(host
, cmd
->arg
, MMC_REG_ARG
);
431 mxcmci_writew(host
, cmdat
, MMC_REG_CMD_DAT_CONT
);
436 static void mxcmci_finish_request(struct mxcmci_host
*host
,
437 struct mmc_request
*req
)
439 u32 int_cntr
= host
->default_irq_mask
;
442 spin_lock_irqsave(&host
->lock
, flags
);
444 int_cntr
|= INT_SDIO_IRQ_EN
;
445 mxcmci_writel(host
, int_cntr
, MMC_REG_INT_CNTR
);
446 spin_unlock_irqrestore(&host
->lock
, flags
);
452 mmc_request_done(host
->mmc
, req
);
455 static int mxcmci_finish_data(struct mxcmci_host
*host
, unsigned int stat
)
457 struct mmc_data
*data
= host
->data
;
460 if (mxcmci_use_dma(host
)) {
461 dma_unmap_sg(host
->dma
->device
->dev
, data
->sg
, data
->sg_len
,
463 mxcmci_swap_buffers(data
);
466 if (stat
& STATUS_ERR_MASK
) {
467 dev_dbg(mmc_dev(host
->mmc
), "request failed. status: 0x%08x\n",
469 if (stat
& STATUS_CRC_READ_ERR
) {
470 dev_err(mmc_dev(host
->mmc
), "%s: -EILSEQ\n", __func__
);
471 data
->error
= -EILSEQ
;
472 } else if (stat
& STATUS_CRC_WRITE_ERR
) {
473 u32 err_code
= (stat
>> 9) & 0x3;
474 if (err_code
== 2) { /* No CRC response */
475 dev_err(mmc_dev(host
->mmc
),
476 "%s: No CRC -ETIMEDOUT\n", __func__
);
477 data
->error
= -ETIMEDOUT
;
479 dev_err(mmc_dev(host
->mmc
),
480 "%s: -EILSEQ\n", __func__
);
481 data
->error
= -EILSEQ
;
483 } else if (stat
& STATUS_TIME_OUT_READ
) {
484 dev_err(mmc_dev(host
->mmc
),
485 "%s: read -ETIMEDOUT\n", __func__
);
486 data
->error
= -ETIMEDOUT
;
488 dev_err(mmc_dev(host
->mmc
), "%s: -EIO\n", __func__
);
492 data
->bytes_xfered
= host
->datasize
;
495 data_error
= data
->error
;
502 static void mxcmci_read_response(struct mxcmci_host
*host
, unsigned int stat
)
504 struct mmc_command
*cmd
= host
->cmd
;
511 if (stat
& STATUS_TIME_OUT_RESP
) {
512 dev_dbg(mmc_dev(host
->mmc
), "CMD TIMEOUT\n");
513 cmd
->error
= -ETIMEDOUT
;
514 } else if (stat
& STATUS_RESP_CRC_ERR
&& cmd
->flags
& MMC_RSP_CRC
) {
515 dev_dbg(mmc_dev(host
->mmc
), "cmd crc error\n");
516 cmd
->error
= -EILSEQ
;
519 if (cmd
->flags
& MMC_RSP_PRESENT
) {
520 if (cmd
->flags
& MMC_RSP_136
) {
521 for (i
= 0; i
< 4; i
++) {
522 a
= mxcmci_readw(host
, MMC_REG_RES_FIFO
);
523 b
= mxcmci_readw(host
, MMC_REG_RES_FIFO
);
524 cmd
->resp
[i
] = a
<< 16 | b
;
527 a
= mxcmci_readw(host
, MMC_REG_RES_FIFO
);
528 b
= mxcmci_readw(host
, MMC_REG_RES_FIFO
);
529 c
= mxcmci_readw(host
, MMC_REG_RES_FIFO
);
530 cmd
->resp
[0] = a
<< 24 | b
<< 8 | c
>> 8;
535 static int mxcmci_poll_status(struct mxcmci_host
*host
, u32 mask
)
538 unsigned long timeout
= jiffies
+ HZ
;
541 stat
= mxcmci_readl(host
, MMC_REG_STATUS
);
542 if (stat
& STATUS_ERR_MASK
)
544 if (time_after(jiffies
, timeout
)) {
545 mxcmci_softreset(host
);
546 mxcmci_set_clk_rate(host
, host
->clock
);
547 return STATUS_TIME_OUT_READ
;
555 static int mxcmci_pull(struct mxcmci_host
*host
, void *_buf
, int bytes
)
561 stat
= mxcmci_poll_status(host
,
562 STATUS_BUF_READ_RDY
| STATUS_READ_OP_DONE
);
565 *buf
++ = cpu_to_le32(mxcmci_readl(host
, MMC_REG_BUFFER_ACCESS
));
573 stat
= mxcmci_poll_status(host
,
574 STATUS_BUF_READ_RDY
| STATUS_READ_OP_DONE
);
577 tmp
= cpu_to_le32(mxcmci_readl(host
, MMC_REG_BUFFER_ACCESS
));
578 memcpy(b
, &tmp
, bytes
);
584 static int mxcmci_push(struct mxcmci_host
*host
, void *_buf
, int bytes
)
590 stat
= mxcmci_poll_status(host
, STATUS_BUF_WRITE_RDY
);
593 mxcmci_writel(host
, cpu_to_le32(*buf
++), MMC_REG_BUFFER_ACCESS
);
601 stat
= mxcmci_poll_status(host
, STATUS_BUF_WRITE_RDY
);
605 memcpy(&tmp
, b
, bytes
);
606 mxcmci_writel(host
, cpu_to_le32(tmp
), MMC_REG_BUFFER_ACCESS
);
609 return mxcmci_poll_status(host
, STATUS_BUF_WRITE_RDY
);
612 static int mxcmci_transfer_data(struct mxcmci_host
*host
)
614 struct mmc_data
*data
= host
->req
->data
;
615 struct scatterlist
*sg
;
622 if (data
->flags
& MMC_DATA_READ
) {
623 for_each_sg(data
->sg
, sg
, data
->sg_len
, i
) {
624 buf
= kmap_atomic(sg_page(sg
) + sg
->offset
);
625 stat
= mxcmci_pull(host
, buf
, sg
->length
);
629 host
->datasize
+= sg
->length
;
632 for_each_sg(data
->sg
, sg
, data
->sg_len
, i
) {
633 buf
= kmap_atomic(sg_page(sg
) + sg
->offset
);
634 stat
= mxcmci_push(host
, buf
, sg
->length
);
638 host
->datasize
+= sg
->length
;
640 stat
= mxcmci_poll_status(host
, STATUS_WRITE_OP_DONE
);
647 static void mxcmci_datawork(struct work_struct
*work
)
649 struct mxcmci_host
*host
= container_of(work
, struct mxcmci_host
,
651 int datastat
= mxcmci_transfer_data(host
);
653 mxcmci_writel(host
, STATUS_READ_OP_DONE
| STATUS_WRITE_OP_DONE
,
655 mxcmci_finish_data(host
, datastat
);
657 if (host
->req
->stop
) {
658 if (mxcmci_start_cmd(host
, host
->req
->stop
, 0)) {
659 mxcmci_finish_request(host
, host
->req
);
663 mxcmci_finish_request(host
, host
->req
);
667 static void mxcmci_data_done(struct mxcmci_host
*host
, unsigned int stat
)
669 struct mmc_request
*req
;
673 spin_lock_irqsave(&host
->lock
, flags
);
676 spin_unlock_irqrestore(&host
->lock
, flags
);
681 spin_unlock_irqrestore(&host
->lock
, flags
);
687 host
->req
= NULL
; /* we will handle finish req below */
689 data_error
= mxcmci_finish_data(host
, stat
);
691 spin_unlock_irqrestore(&host
->lock
, flags
);
696 mxcmci_read_response(host
, stat
);
700 if (mxcmci_start_cmd(host
, req
->stop
, 0)) {
701 mxcmci_finish_request(host
, req
);
705 mxcmci_finish_request(host
, req
);
709 static void mxcmci_cmd_done(struct mxcmci_host
*host
, unsigned int stat
)
711 mxcmci_read_response(host
, stat
);
714 if (!host
->data
&& host
->req
) {
715 mxcmci_finish_request(host
, host
->req
);
719 /* For the DMA case the DMA engine handles the data transfer
720 * automatically. For non DMA we have to do it ourselves.
721 * Don't do it in interrupt context though.
723 if (!mxcmci_use_dma(host
) && host
->data
)
724 schedule_work(&host
->datawork
);
728 static irqreturn_t
mxcmci_irq(int irq
, void *devid
)
730 struct mxcmci_host
*host
= devid
;
734 stat
= mxcmci_readl(host
, MMC_REG_STATUS
);
736 stat
& ~(STATUS_SDIO_INT_ACTIVE
| STATUS_DATA_TRANS_DONE
|
737 STATUS_WRITE_OP_DONE
),
740 dev_dbg(mmc_dev(host
->mmc
), "%s: 0x%08x\n", __func__
, stat
);
742 spin_lock(&host
->lock
);
743 sdio_irq
= (stat
& STATUS_SDIO_INT_ACTIVE
) && host
->use_sdio
;
744 spin_unlock(&host
->lock
);
746 if (mxcmci_use_dma(host
) && (stat
& (STATUS_WRITE_OP_DONE
)))
747 mxcmci_writel(host
, STATUS_WRITE_OP_DONE
, MMC_REG_STATUS
);
750 mxcmci_writel(host
, STATUS_SDIO_INT_ACTIVE
, MMC_REG_STATUS
);
751 mmc_signal_sdio_irq(host
->mmc
);
754 if (stat
& STATUS_END_CMD_RESP
)
755 mxcmci_cmd_done(host
, stat
);
757 if (mxcmci_use_dma(host
) && (stat
& STATUS_WRITE_OP_DONE
)) {
758 del_timer(&host
->watchdog
);
759 mxcmci_data_done(host
, stat
);
762 if (host
->default_irq_mask
&&
763 (stat
& (STATUS_CARD_INSERTION
| STATUS_CARD_REMOVAL
)))
764 mmc_detect_change(host
->mmc
, msecs_to_jiffies(200));
769 static void mxcmci_request(struct mmc_host
*mmc
, struct mmc_request
*req
)
771 struct mxcmci_host
*host
= mmc_priv(mmc
);
772 unsigned int cmdat
= host
->cmdat
;
775 WARN_ON(host
->req
!= NULL
);
778 host
->cmdat
&= ~CMD_DAT_CONT_INIT
;
784 error
= mxcmci_setup_data(host
, req
->data
);
786 req
->cmd
->error
= error
;
791 cmdat
|= CMD_DAT_CONT_DATA_ENABLE
;
793 if (req
->data
->flags
& MMC_DATA_WRITE
)
794 cmdat
|= CMD_DAT_CONT_WRITE
;
797 error
= mxcmci_start_cmd(host
, req
->cmd
, cmdat
);
801 mxcmci_finish_request(host
, req
);
804 static void mxcmci_set_clk_rate(struct mxcmci_host
*host
, unsigned int clk_ios
)
806 unsigned int divider
;
808 unsigned int clk_in
= clk_get_rate(host
->clk_per
);
810 while (prescaler
<= 0x800) {
811 for (divider
= 1; divider
<= 0xF; divider
++) {
814 x
= (clk_in
/ (divider
+ 1));
817 x
/= (prescaler
* 2);
831 mxcmci_writew(host
, (prescaler
<< 4) | divider
, MMC_REG_CLK_RATE
);
833 dev_dbg(mmc_dev(host
->mmc
), "scaler: %d divider: %d in: %d out: %d\n",
834 prescaler
, divider
, clk_in
, clk_ios
);
837 static int mxcmci_setup_dma(struct mmc_host
*mmc
)
839 struct mxcmci_host
*host
= mmc_priv(mmc
);
840 struct dma_slave_config
*config
= &host
->dma_slave_config
;
842 config
->dst_addr
= host
->phys_base
+ MMC_REG_BUFFER_ACCESS
;
843 config
->src_addr
= host
->phys_base
+ MMC_REG_BUFFER_ACCESS
;
844 config
->dst_addr_width
= 4;
845 config
->src_addr_width
= 4;
846 config
->dst_maxburst
= host
->burstlen
;
847 config
->src_maxburst
= host
->burstlen
;
848 config
->device_fc
= false;
850 return dmaengine_slave_config(host
->dma
, config
);
853 static void mxcmci_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
855 struct mxcmci_host
*host
= mmc_priv(mmc
);
859 * use burstlen of 64 (16 words) in 4 bit mode (--> reg value 0)
860 * use burstlen of 16 (4 words) in 1 bit mode (--> reg value 16)
862 if (ios
->bus_width
== MMC_BUS_WIDTH_4
)
867 if (mxcmci_use_dma(host
) && burstlen
!= host
->burstlen
) {
868 host
->burstlen
= burstlen
;
869 ret
= mxcmci_setup_dma(mmc
);
871 dev_err(mmc_dev(host
->mmc
),
872 "failed to config DMA channel. Falling back to PIO\n");
873 dma_release_channel(host
->dma
);
879 if (ios
->bus_width
== MMC_BUS_WIDTH_4
)
880 host
->cmdat
|= CMD_DAT_CONT_BUS_WIDTH_4
;
882 host
->cmdat
&= ~CMD_DAT_CONT_BUS_WIDTH_4
;
884 if (host
->power_mode
!= ios
->power_mode
) {
885 host
->power_mode
= ios
->power_mode
;
886 mxcmci_set_power(host
, ios
->vdd
);
888 if (ios
->power_mode
== MMC_POWER_ON
)
889 host
->cmdat
|= CMD_DAT_CONT_INIT
;
893 mxcmci_set_clk_rate(host
, ios
->clock
);
894 mxcmci_writew(host
, STR_STP_CLK_START_CLK
, MMC_REG_STR_STP_CLK
);
896 mxcmci_writew(host
, STR_STP_CLK_STOP_CLK
, MMC_REG_STR_STP_CLK
);
899 host
->clock
= ios
->clock
;
902 static irqreturn_t
mxcmci_detect_irq(int irq
, void *data
)
904 struct mmc_host
*mmc
= data
;
906 dev_dbg(mmc_dev(mmc
), "%s\n", __func__
);
908 mmc_detect_change(mmc
, msecs_to_jiffies(250));
912 static int mxcmci_get_ro(struct mmc_host
*mmc
)
914 struct mxcmci_host
*host
= mmc_priv(mmc
);
916 if (host
->pdata
&& host
->pdata
->get_ro
)
917 return !!host
->pdata
->get_ro(mmc_dev(mmc
));
919 * If board doesn't support read only detection (no mmc_gpio
920 * context or gpio is invalid), then let the mmc core decide
923 return mmc_gpio_get_ro(mmc
);
926 static void mxcmci_enable_sdio_irq(struct mmc_host
*mmc
, int enable
)
928 struct mxcmci_host
*host
= mmc_priv(mmc
);
932 spin_lock_irqsave(&host
->lock
, flags
);
933 host
->use_sdio
= enable
;
934 int_cntr
= mxcmci_readl(host
, MMC_REG_INT_CNTR
);
937 int_cntr
|= INT_SDIO_IRQ_EN
;
939 int_cntr
&= ~INT_SDIO_IRQ_EN
;
941 mxcmci_writel(host
, int_cntr
, MMC_REG_INT_CNTR
);
942 spin_unlock_irqrestore(&host
->lock
, flags
);
945 static void mxcmci_init_card(struct mmc_host
*host
, struct mmc_card
*card
)
947 struct mxcmci_host
*mxcmci
= mmc_priv(host
);
950 * MX3 SoCs have a silicon bug which corrupts CRC calculation of
951 * multi-block transfers when connected SDIO peripheral doesn't
952 * drive the BUSY line as required by the specs.
953 * One way to prevent this is to only allow 1-bit transfers.
956 if (is_imx31_mmc(mxcmci
) && card
->type
== MMC_TYPE_SDIO
)
957 host
->caps
&= ~MMC_CAP_4_BIT_DATA
;
959 host
->caps
|= MMC_CAP_4_BIT_DATA
;
962 static bool filter(struct dma_chan
*chan
, void *param
)
964 struct mxcmci_host
*host
= param
;
966 if (!imx_dma_is_general_purpose(chan
))
969 chan
->private = &host
->dma_data
;
974 static void mxcmci_watchdog(struct timer_list
*t
)
976 struct mxcmci_host
*host
= from_timer(host
, t
, watchdog
);
977 struct mmc_request
*req
= host
->req
;
978 unsigned int stat
= mxcmci_readl(host
, MMC_REG_STATUS
);
980 if (host
->dma_dir
== DMA_FROM_DEVICE
) {
981 dmaengine_terminate_all(host
->dma
);
982 dev_err(mmc_dev(host
->mmc
),
983 "%s: read time out (status = 0x%08x)\n",
986 dev_err(mmc_dev(host
->mmc
),
987 "%s: write time out (status = 0x%08x)\n",
989 mxcmci_softreset(host
);
992 /* Mark transfer as erroneus and inform the upper layers */
995 host
->data
->error
= -ETIMEDOUT
;
999 mmc_request_done(host
->mmc
, req
);
1002 static const struct mmc_host_ops mxcmci_ops
= {
1003 .request
= mxcmci_request
,
1004 .set_ios
= mxcmci_set_ios
,
1005 .get_ro
= mxcmci_get_ro
,
1006 .enable_sdio_irq
= mxcmci_enable_sdio_irq
,
1007 .init_card
= mxcmci_init_card
,
1010 static int mxcmci_probe(struct platform_device
*pdev
)
1012 struct mmc_host
*mmc
;
1013 struct mxcmci_host
*host
;
1014 struct resource
*res
;
1016 bool dat3_card_detect
= false;
1017 dma_cap_mask_t mask
;
1018 const struct of_device_id
*of_id
;
1019 struct imxmmc_platform_data
*pdata
= pdev
->dev
.platform_data
;
1021 pr_info("i.MX/MPC512x SDHC driver\n");
1023 of_id
= of_match_device(mxcmci_of_match
, &pdev
->dev
);
1025 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1026 irq
= platform_get_irq(pdev
, 0);
1028 dev_err(&pdev
->dev
, "failed to get IRQ: %d\n", irq
);
1032 mmc
= mmc_alloc_host(sizeof(*host
), &pdev
->dev
);
1036 host
= mmc_priv(mmc
);
1038 host
->base
= devm_ioremap_resource(&pdev
->dev
, res
);
1039 if (IS_ERR(host
->base
)) {
1040 ret
= PTR_ERR(host
->base
);
1044 host
->phys_base
= res
->start
;
1046 ret
= mmc_of_parse(mmc
);
1049 mmc
->ops
= &mxcmci_ops
;
1051 /* For devicetree parsing, the bus width is read from devicetree */
1053 mmc
->caps
= MMC_CAP_4_BIT_DATA
| MMC_CAP_SDIO_IRQ
;
1055 mmc
->caps
|= MMC_CAP_SDIO_IRQ
;
1057 /* MMC core transfer sizes tunable parameters */
1058 mmc
->max_blk_size
= 2048;
1059 mmc
->max_blk_count
= 65535;
1060 mmc
->max_req_size
= mmc
->max_blk_size
* mmc
->max_blk_count
;
1061 mmc
->max_seg_size
= mmc
->max_req_size
;
1064 const struct platform_device_id
*id_entry
= of_id
->data
;
1065 host
->devtype
= id_entry
->driver_data
;
1067 host
->devtype
= pdev
->id_entry
->driver_data
;
1070 /* adjust max_segs after devtype detection */
1071 if (!is_mpc512x_mmc(host
))
1075 host
->pdata
= pdata
;
1076 spin_lock_init(&host
->lock
);
1079 dat3_card_detect
= pdata
->dat3_card_detect
;
1080 else if (mmc_card_is_removable(mmc
)
1081 && !of_property_read_bool(pdev
->dev
.of_node
, "cd-gpios"))
1082 dat3_card_detect
= true;
1084 ret
= mmc_regulator_get_supply(mmc
);
1088 if (!mmc
->ocr_avail
) {
1089 if (pdata
&& pdata
->ocr_avail
)
1090 mmc
->ocr_avail
= pdata
->ocr_avail
;
1092 mmc
->ocr_avail
= MMC_VDD_32_33
| MMC_VDD_33_34
;
1095 if (dat3_card_detect
)
1096 host
->default_irq_mask
=
1097 INT_CARD_INSERTION_EN
| INT_CARD_REMOVAL_EN
;
1099 host
->default_irq_mask
= 0;
1101 host
->clk_ipg
= devm_clk_get(&pdev
->dev
, "ipg");
1102 if (IS_ERR(host
->clk_ipg
)) {
1103 ret
= PTR_ERR(host
->clk_ipg
);
1107 host
->clk_per
= devm_clk_get(&pdev
->dev
, "per");
1108 if (IS_ERR(host
->clk_per
)) {
1109 ret
= PTR_ERR(host
->clk_per
);
1113 ret
= clk_prepare_enable(host
->clk_per
);
1117 ret
= clk_prepare_enable(host
->clk_ipg
);
1119 goto out_clk_per_put
;
1121 mxcmci_softreset(host
);
1123 host
->rev_no
= mxcmci_readw(host
, MMC_REG_REV_NO
);
1124 if (host
->rev_no
!= 0x400) {
1126 dev_err(mmc_dev(host
->mmc
), "wrong rev.no. 0x%08x. aborting.\n",
1131 mmc
->f_min
= clk_get_rate(host
->clk_per
) >> 16;
1132 mmc
->f_max
= clk_get_rate(host
->clk_per
) >> 1;
1134 /* recommended in data sheet */
1135 mxcmci_writew(host
, 0x2db4, MMC_REG_READ_TO
);
1137 mxcmci_writel(host
, host
->default_irq_mask
, MMC_REG_INT_CNTR
);
1140 host
->dma
= dma_request_slave_channel(&pdev
->dev
, "rx-tx");
1142 res
= platform_get_resource(pdev
, IORESOURCE_DMA
, 0);
1144 host
->dmareq
= res
->start
;
1145 host
->dma_data
.peripheral_type
= IMX_DMATYPE_SDHC
;
1146 host
->dma_data
.priority
= DMA_PRIO_LOW
;
1147 host
->dma_data
.dma_request
= host
->dmareq
;
1149 dma_cap_set(DMA_SLAVE
, mask
);
1150 host
->dma
= dma_request_channel(mask
, filter
, host
);
1154 mmc
->max_seg_size
= dma_get_max_seg_size(
1155 host
->dma
->device
->dev
);
1157 dev_info(mmc_dev(host
->mmc
), "dma not available. Using PIO\n");
1159 INIT_WORK(&host
->datawork
, mxcmci_datawork
);
1161 ret
= devm_request_irq(&pdev
->dev
, irq
, mxcmci_irq
, 0,
1162 dev_name(&pdev
->dev
), host
);
1166 platform_set_drvdata(pdev
, mmc
);
1168 if (host
->pdata
&& host
->pdata
->init
) {
1169 ret
= host
->pdata
->init(&pdev
->dev
, mxcmci_detect_irq
,
1175 timer_setup(&host
->watchdog
, mxcmci_watchdog
, 0);
1183 dma_release_channel(host
->dma
);
1186 clk_disable_unprepare(host
->clk_ipg
);
1188 clk_disable_unprepare(host
->clk_per
);
1196 static int mxcmci_remove(struct platform_device
*pdev
)
1198 struct mmc_host
*mmc
= platform_get_drvdata(pdev
);
1199 struct mxcmci_host
*host
= mmc_priv(mmc
);
1201 mmc_remove_host(mmc
);
1203 if (host
->pdata
&& host
->pdata
->exit
)
1204 host
->pdata
->exit(&pdev
->dev
, mmc
);
1207 dma_release_channel(host
->dma
);
1209 clk_disable_unprepare(host
->clk_per
);
1210 clk_disable_unprepare(host
->clk_ipg
);
1217 #ifdef CONFIG_PM_SLEEP
1218 static int mxcmci_suspend(struct device
*dev
)
1220 struct mmc_host
*mmc
= dev_get_drvdata(dev
);
1221 struct mxcmci_host
*host
= mmc_priv(mmc
);
1223 clk_disable_unprepare(host
->clk_per
);
1224 clk_disable_unprepare(host
->clk_ipg
);
1228 static int mxcmci_resume(struct device
*dev
)
1230 struct mmc_host
*mmc
= dev_get_drvdata(dev
);
1231 struct mxcmci_host
*host
= mmc_priv(mmc
);
1234 ret
= clk_prepare_enable(host
->clk_per
);
1238 ret
= clk_prepare_enable(host
->clk_ipg
);
1240 clk_disable_unprepare(host
->clk_per
);
1246 static SIMPLE_DEV_PM_OPS(mxcmci_pm_ops
, mxcmci_suspend
, mxcmci_resume
);
1248 static struct platform_driver mxcmci_driver
= {
1249 .probe
= mxcmci_probe
,
1250 .remove
= mxcmci_remove
,
1251 .id_table
= mxcmci_devtype
,
1253 .name
= DRIVER_NAME
,
1254 .pm
= &mxcmci_pm_ops
,
1255 .of_match_table
= mxcmci_of_match
,
1259 module_platform_driver(mxcmci_driver
);
1261 MODULE_DESCRIPTION("i.MX Multimedia Card Interface Driver");
1262 MODULE_AUTHOR("Sascha Hauer, Pengutronix");
1263 MODULE_LICENSE("GPL");
1264 MODULE_ALIAS("platform:mxc-mmc");