2 * linux/drivers/mmc/host/imxmmc.c - Motorola i.MX MMCI driver
4 * Copyright (C) 2004 Sascha Hauer, Pengutronix <sascha@saschahauer.de>
5 * Copyright (C) 2006 Pavel Pisa, PiKRON <ppisa@pikron.com>
7 * derived from pxamci.c by Russell King
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
15 #include <linux/module.h>
16 #include <linux/init.h>
17 #include <linux/ioport.h>
18 #include <linux/platform_device.h>
19 #include <linux/interrupt.h>
20 #include <linux/blkdev.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/mmc/host.h>
23 #include <linux/mmc/card.h>
24 #include <linux/delay.h>
25 #include <linux/clk.h>
30 #include <asm/sizes.h>
32 #include <mach/imx-dma.h>
36 #define DRIVER_NAME "imx-mmc"
38 #define IMXMCI_INT_MASK_DEFAULT (INT_MASK_BUF_READY | INT_MASK_DATA_TRAN | \
39 INT_MASK_WRITE_OP_DONE | INT_MASK_END_CMD_RES | \
40 INT_MASK_AUTO_CARD_DETECT | INT_MASK_DAT0_EN | INT_MASK_SDIO)
49 volatile unsigned int imask
;
50 unsigned int power_mode
;
52 struct imxmmc_platform_data
*pdata
;
54 struct mmc_request
*req
;
55 struct mmc_command
*cmd
;
56 struct mmc_data
*data
;
58 struct timer_list timer
;
59 struct tasklet_struct tasklet
;
60 unsigned int status_reg
;
61 unsigned long pending_events
;
62 /* Next two fields are there for CPU driven transfers to overcome SDHC deficiencies */
64 unsigned int data_cnt
;
65 atomic_t stuck_timeout
;
67 unsigned int dma_nents
;
68 unsigned int dma_size
;
72 unsigned char actual_bus_width
;
79 #define IMXMCI_PEND_IRQ_b 0
80 #define IMXMCI_PEND_DMA_END_b 1
81 #define IMXMCI_PEND_DMA_ERR_b 2
82 #define IMXMCI_PEND_WAIT_RESP_b 3
83 #define IMXMCI_PEND_DMA_DATA_b 4
84 #define IMXMCI_PEND_CPU_DATA_b 5
85 #define IMXMCI_PEND_CARD_XCHG_b 6
86 #define IMXMCI_PEND_SET_INIT_b 7
87 #define IMXMCI_PEND_STARTED_b 8
89 #define IMXMCI_PEND_IRQ_m (1 << IMXMCI_PEND_IRQ_b)
90 #define IMXMCI_PEND_DMA_END_m (1 << IMXMCI_PEND_DMA_END_b)
91 #define IMXMCI_PEND_DMA_ERR_m (1 << IMXMCI_PEND_DMA_ERR_b)
92 #define IMXMCI_PEND_WAIT_RESP_m (1 << IMXMCI_PEND_WAIT_RESP_b)
93 #define IMXMCI_PEND_DMA_DATA_m (1 << IMXMCI_PEND_DMA_DATA_b)
94 #define IMXMCI_PEND_CPU_DATA_m (1 << IMXMCI_PEND_CPU_DATA_b)
95 #define IMXMCI_PEND_CARD_XCHG_m (1 << IMXMCI_PEND_CARD_XCHG_b)
96 #define IMXMCI_PEND_SET_INIT_m (1 << IMXMCI_PEND_SET_INIT_b)
97 #define IMXMCI_PEND_STARTED_m (1 << IMXMCI_PEND_STARTED_b)
99 static void imxmci_stop_clock(struct imxmci_host
*host
)
104 reg
= readw(host
->base
+ MMC_REG_STR_STP_CLK
);
105 writew(reg
& ~STR_STP_CLK_START_CLK
, host
->base
+ MMC_REG_STR_STP_CLK
);
108 reg
= readw(host
->base
+ MMC_REG_STR_STP_CLK
);
109 writew(reg
| STR_STP_CLK_STOP_CLK
,
110 host
->base
+ MMC_REG_STR_STP_CLK
);
113 reg
= readw(host
->base
+ MMC_REG_STATUS
);
114 if (!(reg
& STATUS_CARD_BUS_CLK_RUN
)) {
115 /* Check twice before cut */
116 reg
= readw(host
->base
+ MMC_REG_STATUS
);
117 if (!(reg
& STATUS_CARD_BUS_CLK_RUN
))
123 dev_dbg(mmc_dev(host
->mmc
), "imxmci_stop_clock blocked, no luck\n");
126 static int imxmci_start_clock(struct imxmci_host
*host
)
128 unsigned int trials
= 0;
129 unsigned int delay_limit
= 128;
133 reg
= readw(host
->base
+ MMC_REG_STR_STP_CLK
);
134 writew(reg
& ~STR_STP_CLK_STOP_CLK
, host
->base
+ MMC_REG_STR_STP_CLK
);
136 clear_bit(IMXMCI_PEND_STARTED_b
, &host
->pending_events
);
139 * Command start of the clock, this usually succeeds in less
140 * then 6 delay loops, but during card detection (low clockrate)
141 * it takes up to 5000 delay loops and sometimes fails for the first time
143 reg
= readw(host
->base
+ MMC_REG_STR_STP_CLK
);
144 writew(reg
| STR_STP_CLK_START_CLK
, host
->base
+ MMC_REG_STR_STP_CLK
);
147 unsigned int delay
= delay_limit
;
150 reg
= readw(host
->base
+ MMC_REG_STATUS
);
151 if (reg
& STATUS_CARD_BUS_CLK_RUN
) {
152 /* Check twice before cut */
153 reg
= readw(host
->base
+ MMC_REG_STATUS
);
154 if (reg
& STATUS_CARD_BUS_CLK_RUN
)
158 if (test_bit(IMXMCI_PEND_STARTED_b
, &host
->pending_events
))
162 local_irq_save(flags
);
164 * Ensure, that request is not doubled under all possible circumstances.
165 * It is possible, that cock running state is missed, because some other
166 * IRQ or schedule delays this function execution and the clocks has
167 * been already stopped by other means (response processing, SDHC HW)
169 if (!test_bit(IMXMCI_PEND_STARTED_b
, &host
->pending_events
)) {
170 reg
= readw(host
->base
+ MMC_REG_STR_STP_CLK
);
171 writew(reg
| STR_STP_CLK_START_CLK
,
172 host
->base
+ MMC_REG_STR_STP_CLK
);
174 local_irq_restore(flags
);
176 } while (++trials
< 256);
178 dev_err(mmc_dev(host
->mmc
), "imxmci_start_clock blocked, no luck\n");
183 static void imxmci_softreset(struct imxmci_host
*host
)
188 writew(0x08, host
->base
+ MMC_REG_STR_STP_CLK
);
189 writew(0x0D, host
->base
+ MMC_REG_STR_STP_CLK
);
191 for (i
= 0; i
< 8; i
++)
192 writew(0x05, host
->base
+ MMC_REG_STR_STP_CLK
);
194 writew(0xff, host
->base
+ MMC_REG_RES_TO
);
195 writew(512, host
->base
+ MMC_REG_BLK_LEN
);
196 writew(1, host
->base
+ MMC_REG_NOB
);
199 static int imxmci_busy_wait_for_status(struct imxmci_host
*host
,
200 unsigned int *pstat
, unsigned int stat_mask
,
201 int timeout
, const char *where
)
205 while (!(*pstat
& stat_mask
)) {
207 if (loops
>= timeout
) {
208 dev_dbg(mmc_dev(host
->mmc
), "busy wait timeout in %s, STATUS = 0x%x (0x%x)\n",
209 where
, *pstat
, stat_mask
);
213 *pstat
|= readw(host
->base
+ MMC_REG_STATUS
);
218 /* The busy-wait is expected there for clock <8MHz due to SDHC hardware flaws */
219 if (!(stat_mask
& STATUS_END_CMD_RESP
) || (host
->mmc
->ios
.clock
>= 8000000))
220 dev_info(mmc_dev(host
->mmc
), "busy wait for %d usec in %s, STATUS = 0x%x (0x%x)\n",
221 loops
, where
, *pstat
, stat_mask
);
225 static void imxmci_setup_data(struct imxmci_host
*host
, struct mmc_data
*data
)
227 unsigned int nob
= data
->blocks
;
228 unsigned int blksz
= data
->blksz
;
229 unsigned int datasz
= nob
* blksz
;
232 if (data
->flags
& MMC_DATA_STREAM
)
236 data
->bytes_xfered
= 0;
238 writew(nob
, host
->base
+ MMC_REG_NOB
);
239 writew(blksz
, host
->base
+ MMC_REG_BLK_LEN
);
242 * DMA cannot be used for small block sizes, we have to use CPU driven transfers otherwise.
243 * We are in big troubles for non-512 byte transfers according to note in the paragraph
244 * 20.6.7 of User Manual anyway, but we need to be able to transfer SCR at least.
245 * The situation is even more complex in reality. The SDHC in not able to handle wll
246 * partial FIFO fills and reads. The length has to be rounded up to burst size multiple.
247 * This is required for SCR read at least.
250 host
->dma_size
= datasz
;
251 if (data
->flags
& MMC_DATA_READ
) {
252 host
->dma_dir
= DMA_FROM_DEVICE
;
254 /* Hack to enable read SCR */
255 writew(1, host
->base
+ MMC_REG_NOB
);
256 writew(512, host
->base
+ MMC_REG_BLK_LEN
);
258 host
->dma_dir
= DMA_TO_DEVICE
;
261 /* Convert back to virtual address */
262 host
->data_ptr
= (u16
*)sg_virt(data
->sg
);
265 clear_bit(IMXMCI_PEND_DMA_DATA_b
, &host
->pending_events
);
266 set_bit(IMXMCI_PEND_CPU_DATA_b
, &host
->pending_events
);
271 if (data
->flags
& MMC_DATA_READ
) {
272 host
->dma_dir
= DMA_FROM_DEVICE
;
273 host
->dma_nents
= dma_map_sg(mmc_dev(host
->mmc
), data
->sg
,
274 data
->sg_len
, host
->dma_dir
);
276 imx_dma_setup_sg(host
->dma
, data
->sg
, data
->sg_len
, datasz
,
277 host
->res
->start
+ MMC_REG_BUFFER_ACCESS
,
280 /*imx_dma_setup_mem2dev_ccr(host->dma, DMA_MODE_READ, IMX_DMA_WIDTH_16, CCR_REN);*/
281 CCR(host
->dma
) = CCR_DMOD_LINEAR
| CCR_DSIZ_32
| CCR_SMOD_FIFO
| CCR_SSIZ_16
| CCR_REN
;
283 host
->dma_dir
= DMA_TO_DEVICE
;
285 host
->dma_nents
= dma_map_sg(mmc_dev(host
->mmc
), data
->sg
,
286 data
->sg_len
, host
->dma_dir
);
288 imx_dma_setup_sg(host
->dma
, data
->sg
, data
->sg_len
, datasz
,
289 host
->res
->start
+ MMC_REG_BUFFER_ACCESS
,
292 /*imx_dma_setup_mem2dev_ccr(host->dma, DMA_MODE_WRITE, IMX_DMA_WIDTH_16, CCR_REN);*/
293 CCR(host
->dma
) = CCR_SMOD_LINEAR
| CCR_SSIZ_32
| CCR_DMOD_FIFO
| CCR_DSIZ_16
| CCR_REN
;
296 #if 1 /* This code is there only for consistency checking and can be disabled in future */
298 for (i
= 0; i
< host
->dma_nents
; i
++)
299 host
->dma_size
+= data
->sg
[i
].length
;
301 if (datasz
> host
->dma_size
) {
302 dev_err(mmc_dev(host
->mmc
), "imxmci_setup_data datasz 0x%x > 0x%x dm_size\n",
303 datasz
, host
->dma_size
);
307 host
->dma_size
= datasz
;
311 set_bit(IMXMCI_PEND_DMA_DATA_b
, &host
->pending_events
);
312 clear_bit(IMXMCI_PEND_CPU_DATA_b
, &host
->pending_events
);
314 /* start DMA engine for read, write is delayed after initial response */
315 if (host
->dma_dir
== DMA_FROM_DEVICE
)
316 imx_dma_enable(host
->dma
);
319 static void imxmci_start_cmd(struct imxmci_host
*host
, struct mmc_command
*cmd
, unsigned int cmdat
)
324 WARN_ON(host
->cmd
!= NULL
);
327 /* Ensure, that clock are stopped else command programming and start fails */
328 imxmci_stop_clock(host
);
330 if (cmd
->flags
& MMC_RSP_BUSY
)
331 cmdat
|= CMD_DAT_CONT_BUSY
;
333 switch (mmc_resp_type(cmd
)) {
334 case MMC_RSP_R1
: /* short CRC, OPCODE */
335 case MMC_RSP_R1B
:/* short CRC, OPCODE, BUSY */
336 cmdat
|= CMD_DAT_CONT_RESPONSE_FORMAT_R1
;
338 case MMC_RSP_R2
: /* long 136 bit + CRC */
339 cmdat
|= CMD_DAT_CONT_RESPONSE_FORMAT_R2
;
341 case MMC_RSP_R3
: /* short */
342 cmdat
|= CMD_DAT_CONT_RESPONSE_FORMAT_R3
;
348 if (test_and_clear_bit(IMXMCI_PEND_SET_INIT_b
, &host
->pending_events
))
349 cmdat
|= CMD_DAT_CONT_INIT
; /* This command needs init */
351 if (host
->actual_bus_width
== MMC_BUS_WIDTH_4
)
352 cmdat
|= CMD_DAT_CONT_BUS_WIDTH_4
;
354 writew(cmd
->opcode
, host
->base
+ MMC_REG_CMD
);
355 writew(cmd
->arg
>> 16, host
->base
+ MMC_REG_ARGH
);
356 writew(cmd
->arg
& 0xffff, host
->base
+ MMC_REG_ARGL
);
357 writew(cmdat
, host
->base
+ MMC_REG_CMD_DAT_CONT
);
359 atomic_set(&host
->stuck_timeout
, 0);
360 set_bit(IMXMCI_PEND_WAIT_RESP_b
, &host
->pending_events
);
363 imask
= IMXMCI_INT_MASK_DEFAULT
;
364 imask
&= ~INT_MASK_END_CMD_RES
;
365 if (cmdat
& CMD_DAT_CONT_DATA_ENABLE
) {
366 /* imask &= ~INT_MASK_BUF_READY; */
367 imask
&= ~INT_MASK_DATA_TRAN
;
368 if (cmdat
& CMD_DAT_CONT_WRITE
)
369 imask
&= ~INT_MASK_WRITE_OP_DONE
;
370 if (test_bit(IMXMCI_PEND_CPU_DATA_b
, &host
->pending_events
))
371 imask
&= ~INT_MASK_BUF_READY
;
374 spin_lock_irqsave(&host
->lock
, flags
);
376 writew(host
->imask
, host
->base
+ MMC_REG_INT_MASK
);
377 spin_unlock_irqrestore(&host
->lock
, flags
);
379 dev_dbg(mmc_dev(host
->mmc
), "CMD%02d (0x%02x) mask set to 0x%04x\n",
380 cmd
->opcode
, cmd
->opcode
, imask
);
382 imxmci_start_clock(host
);
385 static void imxmci_finish_request(struct imxmci_host
*host
, struct mmc_request
*req
)
389 spin_lock_irqsave(&host
->lock
, flags
);
391 host
->pending_events
&= ~(IMXMCI_PEND_WAIT_RESP_m
| IMXMCI_PEND_DMA_END_m
|
392 IMXMCI_PEND_DMA_DATA_m
| IMXMCI_PEND_CPU_DATA_m
);
394 host
->imask
= IMXMCI_INT_MASK_DEFAULT
;
395 writew(host
->imask
, host
->base
+ MMC_REG_INT_MASK
);
397 spin_unlock_irqrestore(&host
->lock
, flags
);
400 host
->prev_cmd_code
= req
->cmd
->opcode
;
405 mmc_request_done(host
->mmc
, req
);
408 static int imxmci_finish_data(struct imxmci_host
*host
, unsigned int stat
)
410 struct mmc_data
*data
= host
->data
;
413 if (test_and_clear_bit(IMXMCI_PEND_DMA_DATA_b
, &host
->pending_events
)) {
414 imx_dma_disable(host
->dma
);
415 dma_unmap_sg(mmc_dev(host
->mmc
), data
->sg
, host
->dma_nents
,
419 if (stat
& STATUS_ERR_MASK
) {
420 dev_dbg(mmc_dev(host
->mmc
), "request failed. status: 0x%08x\n", stat
);
421 if (stat
& (STATUS_CRC_READ_ERR
| STATUS_CRC_WRITE_ERR
))
422 data
->error
= -EILSEQ
;
423 else if (stat
& STATUS_TIME_OUT_READ
)
424 data
->error
= -ETIMEDOUT
;
428 data
->bytes_xfered
= host
->dma_size
;
431 data_error
= data
->error
;
438 static int imxmci_cmd_done(struct imxmci_host
*host
, unsigned int stat
)
440 struct mmc_command
*cmd
= host
->cmd
;
443 struct mmc_data
*data
= host
->data
;
450 if (stat
& STATUS_TIME_OUT_RESP
) {
451 dev_dbg(mmc_dev(host
->mmc
), "CMD TIMEOUT\n");
452 cmd
->error
= -ETIMEDOUT
;
453 } else if (stat
& STATUS_RESP_CRC_ERR
&& cmd
->flags
& MMC_RSP_CRC
) {
454 dev_dbg(mmc_dev(host
->mmc
), "cmd crc error\n");
455 cmd
->error
= -EILSEQ
;
458 if (cmd
->flags
& MMC_RSP_PRESENT
) {
459 if (cmd
->flags
& MMC_RSP_136
) {
460 for (i
= 0; i
< 4; i
++) {
461 a
= readw(host
->base
+ MMC_REG_RES_FIFO
);
462 b
= readw(host
->base
+ MMC_REG_RES_FIFO
);
463 cmd
->resp
[i
] = a
<< 16 | b
;
466 a
= readw(host
->base
+ MMC_REG_RES_FIFO
);
467 b
= readw(host
->base
+ MMC_REG_RES_FIFO
);
468 c
= readw(host
->base
+ MMC_REG_RES_FIFO
);
469 cmd
->resp
[0] = a
<< 24 | b
<< 8 | c
>> 8;
473 dev_dbg(mmc_dev(host
->mmc
), "RESP 0x%08x, 0x%08x, 0x%08x, 0x%08x, error %d\n",
474 cmd
->resp
[0], cmd
->resp
[1], cmd
->resp
[2], cmd
->resp
[3], cmd
->error
);
476 if (data
&& !cmd
->error
&& !(stat
& STATUS_ERR_MASK
)) {
477 if (host
->req
->data
->flags
& MMC_DATA_WRITE
) {
479 /* Wait for FIFO to be empty before starting DMA write */
481 stat
= readw(host
->base
+ MMC_REG_STATUS
);
482 if (imxmci_busy_wait_for_status(host
, &stat
,
484 40, "imxmci_cmd_done DMA WR") < 0) {
486 imxmci_finish_data(host
, stat
);
488 imxmci_finish_request(host
, host
->req
);
489 dev_warn(mmc_dev(host
->mmc
), "STATUS = 0x%04x\n",
494 if (test_bit(IMXMCI_PEND_DMA_DATA_b
, &host
->pending_events
))
495 imx_dma_enable(host
->dma
);
498 struct mmc_request
*req
;
499 imxmci_stop_clock(host
);
503 imxmci_finish_data(host
, stat
);
506 imxmci_finish_request(host
, req
);
508 dev_warn(mmc_dev(host
->mmc
), "imxmci_cmd_done: no request to finish\n");
514 static int imxmci_data_done(struct imxmci_host
*host
, unsigned int stat
)
516 struct mmc_data
*data
= host
->data
;
522 data_error
= imxmci_finish_data(host
, stat
);
524 if (host
->req
->stop
) {
525 imxmci_stop_clock(host
);
526 imxmci_start_cmd(host
, host
->req
->stop
, 0);
528 struct mmc_request
*req
;
531 imxmci_finish_request(host
, req
);
533 dev_warn(mmc_dev(host
->mmc
), "imxmci_data_done: no request to finish\n");
539 static int imxmci_cpu_driven_data(struct imxmci_host
*host
, unsigned int *pstat
)
544 unsigned int stat
= *pstat
;
546 if (host
->actual_bus_width
!= MMC_BUS_WIDTH_4
)
551 /* This is unfortunately required */
552 dev_dbg(mmc_dev(host
->mmc
), "imxmci_cpu_driven_data running STATUS = 0x%x\n",
555 udelay(20); /* required for clocks < 8MHz*/
557 if (host
->dma_dir
== DMA_FROM_DEVICE
) {
558 imxmci_busy_wait_for_status(host
, &stat
,
559 STATUS_APPL_BUFF_FF
| STATUS_DATA_TRANS_DONE
|
560 STATUS_TIME_OUT_READ
,
561 50, "imxmci_cpu_driven_data read");
563 while ((stat
& (STATUS_APPL_BUFF_FF
| STATUS_DATA_TRANS_DONE
)) &&
564 !(stat
& STATUS_TIME_OUT_READ
) &&
565 (host
->data_cnt
< 512)) {
567 udelay(20); /* required for clocks < 8MHz*/
569 for (i
= burst_len
; i
>= 2 ; i
-= 2) {
571 data
= readw(host
->base
+ MMC_REG_BUFFER_ACCESS
);
572 udelay(10); /* required for clocks < 8MHz*/
573 if (host
->data_cnt
+2 <= host
->dma_size
) {
574 *(host
->data_ptr
++) = data
;
576 if (host
->data_cnt
< host
->dma_size
)
577 *(u8
*)(host
->data_ptr
) = data
;
582 stat
= readw(host
->base
+ MMC_REG_STATUS
);
584 dev_dbg(mmc_dev(host
->mmc
), "imxmci_cpu_driven_data read %d burst %d STATUS = 0x%x\n",
585 host
->data_cnt
, burst_len
, stat
);
588 if ((stat
& STATUS_DATA_TRANS_DONE
) && (host
->data_cnt
>= 512))
591 if (host
->dma_size
& 0x1ff)
592 stat
&= ~STATUS_CRC_READ_ERR
;
594 if (stat
& STATUS_TIME_OUT_READ
) {
595 dev_dbg(mmc_dev(host
->mmc
), "imxmci_cpu_driven_data read timeout STATUS = 0x%x\n",
601 imxmci_busy_wait_for_status(host
, &stat
,
603 20, "imxmci_cpu_driven_data write");
605 while ((stat
& STATUS_APPL_BUFF_FE
) &&
606 (host
->data_cnt
< host
->dma_size
)) {
607 if (burst_len
>= host
->dma_size
- host
->data_cnt
) {
608 burst_len
= host
->dma_size
- host
->data_cnt
;
609 host
->data_cnt
= host
->dma_size
;
612 host
->data_cnt
+= burst_len
;
615 for (i
= burst_len
; i
> 0 ; i
-= 2)
616 writew(*(host
->data_ptr
++), host
->base
+ MMC_REG_BUFFER_ACCESS
);
618 stat
= readw(host
->base
+ MMC_REG_STATUS
);
620 dev_dbg(mmc_dev(host
->mmc
), "imxmci_cpu_driven_data write burst %d STATUS = 0x%x\n",
630 static void imxmci_dma_irq(int dma
, void *devid
)
632 struct imxmci_host
*host
= devid
;
633 u32 stat
= readw(host
->base
+ MMC_REG_STATUS
);
635 atomic_set(&host
->stuck_timeout
, 0);
636 host
->status_reg
= stat
;
637 set_bit(IMXMCI_PEND_DMA_END_b
, &host
->pending_events
);
638 tasklet_schedule(&host
->tasklet
);
641 static irqreturn_t
imxmci_irq(int irq
, void *devid
)
643 struct imxmci_host
*host
= devid
;
644 u32 stat
= readw(host
->base
+ MMC_REG_STATUS
);
647 writew(host
->imask
| INT_MASK_SDIO
| INT_MASK_AUTO_CARD_DETECT
,
648 host
->base
+ MMC_REG_INT_MASK
);
650 atomic_set(&host
->stuck_timeout
, 0);
651 host
->status_reg
= stat
;
652 set_bit(IMXMCI_PEND_IRQ_b
, &host
->pending_events
);
653 set_bit(IMXMCI_PEND_STARTED_b
, &host
->pending_events
);
654 tasklet_schedule(&host
->tasklet
);
656 return IRQ_RETVAL(handled
);
659 static void imxmci_tasklet_fnc(unsigned long data
)
661 struct imxmci_host
*host
= (struct imxmci_host
*)data
;
663 unsigned int data_dir_mask
= 0; /* STATUS_WR_CRC_ERROR_CODE_MASK */
666 if (atomic_read(&host
->stuck_timeout
) > 4) {
669 stat
= readw(host
->base
+ MMC_REG_STATUS
);
670 host
->status_reg
= stat
;
671 if (test_bit(IMXMCI_PEND_WAIT_RESP_b
, &host
->pending_events
))
672 if (test_bit(IMXMCI_PEND_DMA_DATA_b
, &host
->pending_events
))
677 if (test_bit(IMXMCI_PEND_DMA_DATA_b
, &host
->pending_events
))
678 if (test_bit(IMXMCI_PEND_DMA_END_b
, &host
->pending_events
))
685 dev_err(mmc_dev(host
->mmc
),
686 "%s TIMEOUT, hardware stucked STATUS = 0x%04x IMASK = 0x%04x\n",
688 readw(host
->base
+ MMC_REG_INT_MASK
));
689 dev_err(mmc_dev(host
->mmc
),
690 "CMD_DAT_CONT = 0x%04x, MMC_BLK_LEN = 0x%04x, MMC_NOB = 0x%04x, DMA_CCR = 0x%08x\n",
691 readw(host
->base
+ MMC_REG_CMD_DAT_CONT
),
692 readw(host
->base
+ MMC_REG_BLK_LEN
),
693 readw(host
->base
+ MMC_REG_NOB
),
695 dev_err(mmc_dev(host
->mmc
), "CMD%d, prevCMD%d, bus %d-bit, dma_size = 0x%x\n",
696 host
->cmd
? host
->cmd
->opcode
: 0,
698 1 << host
->actual_bus_width
, host
->dma_size
);
701 if (!host
->present
|| timeout
)
702 host
->status_reg
= STATUS_TIME_OUT_RESP
| STATUS_TIME_OUT_READ
|
703 STATUS_CRC_READ_ERR
| STATUS_CRC_WRITE_ERR
;
705 if (test_bit(IMXMCI_PEND_IRQ_b
, &host
->pending_events
) || timeout
) {
706 clear_bit(IMXMCI_PEND_IRQ_b
, &host
->pending_events
);
708 stat
= readw(host
->base
+ MMC_REG_STATUS
);
710 * This is not required in theory, but there is chance to miss some flag
711 * which clears automatically by mask write, FreeScale original code keeps
712 * stat from IRQ time so do I
714 stat
|= host
->status_reg
;
716 if (test_bit(IMXMCI_PEND_CPU_DATA_b
, &host
->pending_events
))
717 stat
&= ~STATUS_CRC_READ_ERR
;
719 if (test_bit(IMXMCI_PEND_WAIT_RESP_b
, &host
->pending_events
)) {
720 imxmci_busy_wait_for_status(host
, &stat
,
721 STATUS_END_CMD_RESP
| STATUS_ERR_MASK
,
722 20, "imxmci_tasklet_fnc resp (ERRATUM #4)");
725 if (stat
& (STATUS_END_CMD_RESP
| STATUS_ERR_MASK
)) {
726 if (test_and_clear_bit(IMXMCI_PEND_WAIT_RESP_b
, &host
->pending_events
))
727 imxmci_cmd_done(host
, stat
);
728 if (host
->data
&& (stat
& STATUS_ERR_MASK
))
729 imxmci_data_done(host
, stat
);
732 if (test_bit(IMXMCI_PEND_CPU_DATA_b
, &host
->pending_events
)) {
733 stat
|= readw(host
->base
+ MMC_REG_STATUS
);
734 if (imxmci_cpu_driven_data(host
, &stat
)) {
735 if (test_and_clear_bit(IMXMCI_PEND_WAIT_RESP_b
, &host
->pending_events
))
736 imxmci_cmd_done(host
, stat
);
737 atomic_clear_mask(IMXMCI_PEND_IRQ_m
|IMXMCI_PEND_CPU_DATA_m
,
738 &host
->pending_events
);
739 imxmci_data_done(host
, stat
);
744 if (test_bit(IMXMCI_PEND_DMA_END_b
, &host
->pending_events
) &&
745 !test_bit(IMXMCI_PEND_WAIT_RESP_b
, &host
->pending_events
)) {
747 stat
= readw(host
->base
+ MMC_REG_STATUS
);
749 stat
|= host
->status_reg
;
751 if (host
->dma_dir
== DMA_TO_DEVICE
)
752 data_dir_mask
= STATUS_WRITE_OP_DONE
;
754 data_dir_mask
= STATUS_DATA_TRANS_DONE
;
756 if (stat
& data_dir_mask
) {
757 clear_bit(IMXMCI_PEND_DMA_END_b
, &host
->pending_events
);
758 imxmci_data_done(host
, stat
);
762 if (test_and_clear_bit(IMXMCI_PEND_CARD_XCHG_b
, &host
->pending_events
)) {
765 imxmci_cmd_done(host
, STATUS_TIME_OUT_RESP
);
768 imxmci_data_done(host
, STATUS_TIME_OUT_READ
|
769 STATUS_CRC_READ_ERR
| STATUS_CRC_WRITE_ERR
);
772 imxmci_finish_request(host
, host
->req
);
774 mmc_detect_change(host
->mmc
, msecs_to_jiffies(100));
779 static void imxmci_request(struct mmc_host
*mmc
, struct mmc_request
*req
)
781 struct imxmci_host
*host
= mmc_priv(mmc
);
784 WARN_ON(host
->req
!= NULL
);
791 imxmci_setup_data(host
, req
->data
);
793 cmdat
|= CMD_DAT_CONT_DATA_ENABLE
;
795 if (req
->data
->flags
& MMC_DATA_WRITE
)
796 cmdat
|= CMD_DAT_CONT_WRITE
;
798 if (req
->data
->flags
& MMC_DATA_STREAM
)
799 cmdat
|= CMD_DAT_CONT_STREAM_BLOCK
;
802 imxmci_start_cmd(host
, req
->cmd
, cmdat
);
805 #define CLK_RATE 19200000
807 static void imxmci_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
809 struct imxmci_host
*host
= mmc_priv(mmc
);
812 if (ios
->bus_width
== MMC_BUS_WIDTH_4
) {
813 host
->actual_bus_width
= MMC_BUS_WIDTH_4
;
814 imx_gpio_mode(PB11_PF_SD_DAT3
);
815 BLR(host
->dma
) = 0; /* burst 64 byte read/write */
817 host
->actual_bus_width
= MMC_BUS_WIDTH_1
;
818 imx_gpio_mode(GPIO_PORTB
| GPIO_IN
| GPIO_PUEN
| 11);
819 BLR(host
->dma
) = 16; /* burst 16 byte read/write */
822 if (host
->power_mode
!= ios
->power_mode
) {
823 switch (ios
->power_mode
) {
827 set_bit(IMXMCI_PEND_SET_INIT_b
, &host
->pending_events
);
832 host
->power_mode
= ios
->power_mode
;
839 /* The prescaler is 5 for PERCLK2 equal to 96MHz
840 * then 96MHz / 5 = 19.2 MHz
842 clk
= clk_get_rate(host
->clk
);
843 prescaler
= (clk
+ (CLK_RATE
* 7) / 8) / CLK_RATE
;
846 case 1: prescaler
= 0;
848 case 2: prescaler
= 1;
850 case 3: prescaler
= 2;
852 case 4: prescaler
= 4;
855 case 5: prescaler
= 5;
859 dev_dbg(mmc_dev(host
->mmc
), "PERCLK2 %d MHz -> prescaler %d\n",
862 for (clk
= 0; clk
< 8; clk
++) {
864 x
= CLK_RATE
/ (1 << clk
);
869 /* enable controller */
870 reg
= readw(host
->base
+ MMC_REG_STR_STP_CLK
);
871 writew(reg
| STR_STP_CLK_ENABLE
,
872 host
->base
+ MMC_REG_STR_STP_CLK
);
874 imxmci_stop_clock(host
);
875 writew((prescaler
<< 3) | clk
, host
->base
+ MMC_REG_CLK_RATE
);
877 * Under my understanding, clock should not be started there, because it would
878 * initiate SDHC sequencer and send last or random command into card
880 /* imxmci_start_clock(host); */
882 dev_dbg(mmc_dev(host
->mmc
),
883 "MMC_CLK_RATE: 0x%08x\n",
884 readw(host
->base
+ MMC_REG_CLK_RATE
));
886 imxmci_stop_clock(host
);
890 static int imxmci_get_ro(struct mmc_host
*mmc
)
892 struct imxmci_host
*host
= mmc_priv(mmc
);
894 if (host
->pdata
&& host
->pdata
->get_ro
)
895 return !!host
->pdata
->get_ro(mmc_dev(mmc
));
897 * Board doesn't support read only detection; let the mmc core
904 static const struct mmc_host_ops imxmci_ops
= {
905 .request
= imxmci_request
,
906 .set_ios
= imxmci_set_ios
,
907 .get_ro
= imxmci_get_ro
,
910 static void imxmci_check_status(unsigned long data
)
912 struct imxmci_host
*host
= (struct imxmci_host
*)data
;
914 if (host
->pdata
&& host
->pdata
->card_present
&&
915 host
->pdata
->card_present(mmc_dev(host
->mmc
)) != host
->present
) {
917 dev_info(mmc_dev(host
->mmc
), "card %s\n",
918 host
->present
? "inserted" : "removed");
920 set_bit(IMXMCI_PEND_CARD_XCHG_b
, &host
->pending_events
);
921 tasklet_schedule(&host
->tasklet
);
924 if (test_bit(IMXMCI_PEND_WAIT_RESP_b
, &host
->pending_events
) ||
925 test_bit(IMXMCI_PEND_DMA_DATA_b
, &host
->pending_events
)) {
926 atomic_inc(&host
->stuck_timeout
);
927 if (atomic_read(&host
->stuck_timeout
) > 4)
928 tasklet_schedule(&host
->tasklet
);
930 atomic_set(&host
->stuck_timeout
, 0);
934 mod_timer(&host
->timer
, jiffies
+ (HZ
>>1));
937 static int __init
imxmci_probe(struct platform_device
*pdev
)
939 struct mmc_host
*mmc
;
940 struct imxmci_host
*host
= NULL
;
945 pr_info("i.MX mmc driver\n");
947 r
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
948 irq
= platform_get_irq(pdev
, 0);
952 r
= request_mem_region(r
->start
, resource_size(r
), pdev
->name
);
956 mmc
= mmc_alloc_host(sizeof(struct imxmci_host
), &pdev
->dev
);
962 mmc
->ops
= &imxmci_ops
;
964 mmc
->f_max
= CLK_RATE
/2;
965 mmc
->ocr_avail
= MMC_VDD_32_33
;
966 mmc
->caps
= MMC_CAP_4_BIT_DATA
;
968 /* MMC core transfer sizes tunable parameters */
970 mmc
->max_seg_size
= 64*512; /* default PAGE_CACHE_SIZE */
971 mmc
->max_req_size
= 64*512; /* default PAGE_CACHE_SIZE */
972 mmc
->max_blk_size
= 2048;
973 mmc
->max_blk_count
= 65535;
975 host
= mmc_priv(mmc
);
976 host
->base
= ioremap(r
->start
, resource_size(r
));
983 host
->dma_allocated
= 0;
984 host
->pdata
= pdev
->dev
.platform_data
;
986 dev_warn(&pdev
->dev
, "No platform data provided!\n");
988 spin_lock_init(&host
->lock
);
992 host
->clk
= clk_get(&pdev
->dev
, "perclk2");
993 if (IS_ERR(host
->clk
)) {
994 ret
= PTR_ERR(host
->clk
);
997 clk_enable(host
->clk
);
999 imx_gpio_mode(PB8_PF_SD_DAT0
);
1000 imx_gpio_mode(PB9_PF_SD_DAT1
);
1001 imx_gpio_mode(PB10_PF_SD_DAT2
);
1002 /* Configured as GPIO with pull-up to ensure right MCC card mode */
1003 /* Switched to PB11_PF_SD_DAT3 if 4 bit bus is configured */
1004 imx_gpio_mode(GPIO_PORTB
| GPIO_IN
| GPIO_PUEN
| 11);
1005 /* imx_gpio_mode(PB11_PF_SD_DAT3); */
1006 imx_gpio_mode(PB12_PF_SD_CLK
);
1007 imx_gpio_mode(PB13_PF_SD_CMD
);
1009 imxmci_softreset(host
);
1011 rev_no
= readw(host
->base
+ MMC_REG_REV_NO
);
1012 if (rev_no
!= 0x390) {
1013 dev_err(mmc_dev(host
->mmc
), "wrong rev.no. 0x%08x. aborting.\n",
1014 readw(host
->base
+ MMC_REG_REV_NO
));
1018 /* recommended in data sheet */
1019 writew(0x2db4, host
->base
+ MMC_REG_READ_TO
);
1021 host
->imask
= IMXMCI_INT_MASK_DEFAULT
;
1022 writew(host
->imask
, host
->base
+ MMC_REG_INT_MASK
);
1024 host
->dma
= imx_dma_request_by_prio(DRIVER_NAME
, DMA_PRIO_LOW
);
1026 dev_err(mmc_dev(host
->mmc
), "imx_dma_request_by_prio failed\n");
1030 host
->dma_allocated
= 1;
1031 imx_dma_setup_handlers(host
->dma
, imxmci_dma_irq
, NULL
, host
);
1032 RSSR(host
->dma
) = DMA_REQ_SDHC
;
1034 tasklet_init(&host
->tasklet
, imxmci_tasklet_fnc
, (unsigned long)host
);
1036 host
->pending_events
=0;
1038 ret
= request_irq(host
->irq
, imxmci_irq
, 0, DRIVER_NAME
, host
);
1042 if (host
->pdata
&& host
->pdata
->card_present
)
1043 host
->present
= host
->pdata
->card_present(mmc_dev(mmc
));
1044 else /* if there is no way to detect assume that card is present */
1047 init_timer(&host
->timer
);
1048 host
->timer
.data
= (unsigned long)host
;
1049 host
->timer
.function
= imxmci_check_status
;
1050 add_timer(&host
->timer
);
1051 mod_timer(&host
->timer
, jiffies
+ (HZ
>> 1));
1053 platform_set_drvdata(pdev
, mmc
);
1061 if (host
->dma_allocated
) {
1062 imx_dma_free(host
->dma
);
1063 host
->dma_allocated
= 0;
1066 clk_disable(host
->clk
);
1070 iounmap(host
->base
);
1074 release_mem_region(r
->start
, resource_size(r
));
1078 static int __exit
imxmci_remove(struct platform_device
*pdev
)
1080 struct mmc_host
*mmc
= platform_get_drvdata(pdev
);
1082 platform_set_drvdata(pdev
, NULL
);
1085 struct imxmci_host
*host
= mmc_priv(mmc
);
1087 tasklet_disable(&host
->tasklet
);
1089 del_timer_sync(&host
->timer
);
1090 mmc_remove_host(mmc
);
1092 free_irq(host
->irq
, host
);
1093 iounmap(host
->base
);
1094 if (host
->dma_allocated
) {
1095 imx_dma_free(host
->dma
);
1096 host
->dma_allocated
= 0;
1099 tasklet_kill(&host
->tasklet
);
1101 clk_disable(host
->clk
);
1104 release_mem_region(host
->res
->start
, resource_size(host
->res
));
1112 static int imxmci_suspend(struct platform_device
*dev
, pm_message_t state
)
1114 struct mmc_host
*mmc
= platform_get_drvdata(dev
);
1118 ret
= mmc_suspend_host(mmc
);
1123 static int imxmci_resume(struct platform_device
*dev
)
1125 struct mmc_host
*mmc
= platform_get_drvdata(dev
);
1126 struct imxmci_host
*host
;
1130 host
= mmc_priv(mmc
);
1132 set_bit(IMXMCI_PEND_SET_INIT_b
, &host
->pending_events
);
1133 ret
= mmc_resume_host(mmc
);
1139 #define imxmci_suspend NULL
1140 #define imxmci_resume NULL
1141 #endif /* CONFIG_PM */
1143 static struct platform_driver imxmci_driver
= {
1144 .remove
= __exit_p(imxmci_remove
),
1145 .suspend
= imxmci_suspend
,
1146 .resume
= imxmci_resume
,
1148 .name
= DRIVER_NAME
,
1149 .owner
= THIS_MODULE
,
1153 static int __init
imxmci_init(void)
1155 return platform_driver_probe(&imxmci_driver
, imxmci_probe
);
1158 static void __exit
imxmci_exit(void)
1160 platform_driver_unregister(&imxmci_driver
);
1163 module_init(imxmci_init
);
1164 module_exit(imxmci_exit
);
1166 MODULE_DESCRIPTION("i.MX Multimedia Card Interface Driver");
1167 MODULE_AUTHOR("Sascha Hauer, Pengutronix");
1168 MODULE_LICENSE("GPL");
1169 MODULE_ALIAS("platform:imx-mmc");