2 * linux/drivers/mmc/host/pxa.c - PXA MMCI driver
4 * Copyright (C) 2003 Russell King, All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This hardware is really sick:
11 * - No way to clear interrupts.
12 * - Have to turn off the clock whenever we touch the device.
13 * - Doesn't tell you how many data blocks were transferred.
16 * 1 and 3 byte data transfers not supported
17 * max block length up to 1023
19 #include <linux/module.h>
20 #include <linux/init.h>
21 #include <linux/ioport.h>
22 #include <linux/platform_device.h>
23 #include <linux/delay.h>
24 #include <linux/interrupt.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/clk.h>
27 #include <linux/err.h>
28 #include <linux/mmc/host.h>
31 #include <asm/sizes.h>
34 #include <mach/hardware.h>
35 #include <mach/pxa-regs.h>
40 #define DRIVER_NAME "pxa2xx-mci"
43 #define CLKRT_OFF (~0)
51 unsigned long clkrate
;
57 unsigned int power_mode
;
58 struct pxamci_platform_data
*pdata
;
60 struct mmc_request
*mrq
;
61 struct mmc_command
*cmd
;
62 struct mmc_data
*data
;
65 struct pxa_dma_desc
*sg_cpu
;
69 unsigned int dma_drcmrrx
;
70 unsigned int dma_drcmrtx
;
73 static void pxamci_stop_clock(struct pxamci_host
*host
)
75 if (readl(host
->base
+ MMC_STAT
) & STAT_CLK_EN
) {
76 unsigned long timeout
= 10000;
79 writel(STOP_CLOCK
, host
->base
+ MMC_STRPCL
);
82 v
= readl(host
->base
+ MMC_STAT
);
83 if (!(v
& STAT_CLK_EN
))
89 dev_err(mmc_dev(host
->mmc
), "unable to stop clock\n");
93 static void pxamci_enable_irq(struct pxamci_host
*host
, unsigned int mask
)
97 spin_lock_irqsave(&host
->lock
, flags
);
99 writel(host
->imask
, host
->base
+ MMC_I_MASK
);
100 spin_unlock_irqrestore(&host
->lock
, flags
);
103 static void pxamci_disable_irq(struct pxamci_host
*host
, unsigned int mask
)
107 spin_lock_irqsave(&host
->lock
, flags
);
109 writel(host
->imask
, host
->base
+ MMC_I_MASK
);
110 spin_unlock_irqrestore(&host
->lock
, flags
);
113 static void pxamci_setup_data(struct pxamci_host
*host
, struct mmc_data
*data
)
115 unsigned int nob
= data
->blocks
;
116 unsigned long long clks
;
117 unsigned int timeout
;
124 if (data
->flags
& MMC_DATA_STREAM
)
127 writel(nob
, host
->base
+ MMC_NOB
);
128 writel(data
->blksz
, host
->base
+ MMC_BLKLEN
);
130 clks
= (unsigned long long)data
->timeout_ns
* host
->clkrate
;
131 do_div(clks
, 1000000000UL);
132 timeout
= (unsigned int)clks
+ (data
->timeout_clks
<< host
->clkrt
);
133 writel((timeout
+ 255) / 256, host
->base
+ MMC_RDTO
);
135 if (data
->flags
& MMC_DATA_READ
) {
136 host
->dma_dir
= DMA_FROM_DEVICE
;
137 dcmd
= DCMD_INCTRGADDR
| DCMD_FLOWTRG
;
138 DRCMR(host
->dma_drcmrtx
) = 0;
139 DRCMR(host
->dma_drcmrrx
) = host
->dma
| DRCMR_MAPVLD
;
141 host
->dma_dir
= DMA_TO_DEVICE
;
142 dcmd
= DCMD_INCSRCADDR
| DCMD_FLOWSRC
;
143 DRCMR(host
->dma_drcmrrx
) = 0;
144 DRCMR(host
->dma_drcmrtx
) = host
->dma
| DRCMR_MAPVLD
;
147 dcmd
|= DCMD_BURST32
| DCMD_WIDTH1
;
149 host
->dma_len
= dma_map_sg(mmc_dev(host
->mmc
), data
->sg
, data
->sg_len
,
152 for (i
= 0; i
< host
->dma_len
; i
++) {
153 unsigned int length
= sg_dma_len(&data
->sg
[i
]);
154 host
->sg_cpu
[i
].dcmd
= dcmd
| length
;
155 if (length
& 31 && !(data
->flags
& MMC_DATA_READ
))
156 host
->sg_cpu
[i
].dcmd
|= DCMD_ENDIRQEN
;
157 /* Not aligned to 8-byte boundary? */
158 if (sg_dma_address(&data
->sg
[i
]) & 0x7)
160 if (data
->flags
& MMC_DATA_READ
) {
161 host
->sg_cpu
[i
].dsadr
= host
->res
->start
+ MMC_RXFIFO
;
162 host
->sg_cpu
[i
].dtadr
= sg_dma_address(&data
->sg
[i
]);
164 host
->sg_cpu
[i
].dsadr
= sg_dma_address(&data
->sg
[i
]);
165 host
->sg_cpu
[i
].dtadr
= host
->res
->start
+ MMC_TXFIFO
;
167 host
->sg_cpu
[i
].ddadr
= host
->sg_dma
+ (i
+ 1) *
168 sizeof(struct pxa_dma_desc
);
170 host
->sg_cpu
[host
->dma_len
- 1].ddadr
= DDADR_STOP
;
174 * The PXA27x DMA controller encounters overhead when working with
175 * unaligned (to 8-byte boundaries) data, so switch on byte alignment
176 * mode only if we have unaligned data.
179 DALGN
|= (1 << host
->dma
);
181 DALGN
&= ~(1 << host
->dma
);
182 DDADR(host
->dma
) = host
->sg_dma
;
185 * workaround for erratum #91:
186 * only start DMA now if we are doing a read,
187 * otherwise we wait until CMD/RESP has finished
188 * before starting DMA.
190 if (!cpu_is_pxa27x() || data
->flags
& MMC_DATA_READ
)
191 DCSR(host
->dma
) = DCSR_RUN
;
194 static void pxamci_start_cmd(struct pxamci_host
*host
, struct mmc_command
*cmd
, unsigned int cmdat
)
196 WARN_ON(host
->cmd
!= NULL
);
199 if (cmd
->flags
& MMC_RSP_BUSY
)
202 #define RSP_TYPE(x) ((x) & ~(MMC_RSP_BUSY|MMC_RSP_OPCODE))
203 switch (RSP_TYPE(mmc_resp_type(cmd
))) {
204 case RSP_TYPE(MMC_RSP_R1
): /* r1, r1b, r6, r7 */
205 cmdat
|= CMDAT_RESP_SHORT
;
207 case RSP_TYPE(MMC_RSP_R3
):
208 cmdat
|= CMDAT_RESP_R3
;
210 case RSP_TYPE(MMC_RSP_R2
):
211 cmdat
|= CMDAT_RESP_R2
;
217 writel(cmd
->opcode
, host
->base
+ MMC_CMD
);
218 writel(cmd
->arg
>> 16, host
->base
+ MMC_ARGH
);
219 writel(cmd
->arg
& 0xffff, host
->base
+ MMC_ARGL
);
220 writel(cmdat
, host
->base
+ MMC_CMDAT
);
221 writel(host
->clkrt
, host
->base
+ MMC_CLKRT
);
223 writel(START_CLOCK
, host
->base
+ MMC_STRPCL
);
225 pxamci_enable_irq(host
, END_CMD_RES
);
228 static void pxamci_finish_request(struct pxamci_host
*host
, struct mmc_request
*mrq
)
233 mmc_request_done(host
->mmc
, mrq
);
236 static int pxamci_cmd_done(struct pxamci_host
*host
, unsigned int stat
)
238 struct mmc_command
*cmd
= host
->cmd
;
248 * Did I mention this is Sick. We always need to
249 * discard the upper 8 bits of the first 16-bit word.
251 v
= readl(host
->base
+ MMC_RES
) & 0xffff;
252 for (i
= 0; i
< 4; i
++) {
253 u32 w1
= readl(host
->base
+ MMC_RES
) & 0xffff;
254 u32 w2
= readl(host
->base
+ MMC_RES
) & 0xffff;
255 cmd
->resp
[i
] = v
<< 24 | w1
<< 8 | w2
>> 8;
259 if (stat
& STAT_TIME_OUT_RESPONSE
) {
260 cmd
->error
= -ETIMEDOUT
;
261 } else if (stat
& STAT_RES_CRC_ERR
&& cmd
->flags
& MMC_RSP_CRC
) {
263 * workaround for erratum #42:
264 * Intel PXA27x Family Processor Specification Update Rev 001
265 * A bogus CRC error can appear if the msb of a 136 bit
268 if (cpu_is_pxa27x() &&
269 (cmd
->flags
& MMC_RSP_136
&& cmd
->resp
[0] & 0x80000000))
270 pr_debug("ignoring CRC from command %d - *risky*\n", cmd
->opcode
);
272 cmd
->error
= -EILSEQ
;
275 pxamci_disable_irq(host
, END_CMD_RES
);
276 if (host
->data
&& !cmd
->error
) {
277 pxamci_enable_irq(host
, DATA_TRAN_DONE
);
279 * workaround for erratum #91, if doing write
282 if (cpu_is_pxa27x() && host
->data
->flags
& MMC_DATA_WRITE
)
283 DCSR(host
->dma
) = DCSR_RUN
;
285 pxamci_finish_request(host
, host
->mrq
);
291 static int pxamci_data_done(struct pxamci_host
*host
, unsigned int stat
)
293 struct mmc_data
*data
= host
->data
;
299 dma_unmap_sg(mmc_dev(host
->mmc
), data
->sg
, data
->sg_len
,
302 if (stat
& STAT_READ_TIME_OUT
)
303 data
->error
= -ETIMEDOUT
;
304 else if (stat
& (STAT_CRC_READ_ERROR
|STAT_CRC_WRITE_ERROR
))
305 data
->error
= -EILSEQ
;
308 * There appears to be a hardware design bug here. There seems to
309 * be no way to find out how much data was transferred to the card.
310 * This means that if there was an error on any block, we mark all
311 * data blocks as being in error.
314 data
->bytes_xfered
= data
->blocks
* data
->blksz
;
316 data
->bytes_xfered
= 0;
318 pxamci_disable_irq(host
, DATA_TRAN_DONE
);
321 if (host
->mrq
->stop
) {
322 pxamci_stop_clock(host
);
323 pxamci_start_cmd(host
, host
->mrq
->stop
, host
->cmdat
);
325 pxamci_finish_request(host
, host
->mrq
);
331 static irqreturn_t
pxamci_irq(int irq
, void *devid
)
333 struct pxamci_host
*host
= devid
;
337 ireg
= readl(host
->base
+ MMC_I_REG
) & ~readl(host
->base
+ MMC_I_MASK
);
340 unsigned stat
= readl(host
->base
+ MMC_STAT
);
342 pr_debug("PXAMCI: irq %08x stat %08x\n", ireg
, stat
);
344 if (ireg
& END_CMD_RES
)
345 handled
|= pxamci_cmd_done(host
, stat
);
346 if (ireg
& DATA_TRAN_DONE
)
347 handled
|= pxamci_data_done(host
, stat
);
348 if (ireg
& SDIO_INT
) {
349 mmc_signal_sdio_irq(host
->mmc
);
354 return IRQ_RETVAL(handled
);
357 static void pxamci_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
359 struct pxamci_host
*host
= mmc_priv(mmc
);
362 WARN_ON(host
->mrq
!= NULL
);
366 pxamci_stop_clock(host
);
369 host
->cmdat
&= ~CMDAT_INIT
;
372 pxamci_setup_data(host
, mrq
->data
);
374 cmdat
&= ~CMDAT_BUSY
;
375 cmdat
|= CMDAT_DATAEN
| CMDAT_DMAEN
;
376 if (mrq
->data
->flags
& MMC_DATA_WRITE
)
377 cmdat
|= CMDAT_WRITE
;
379 if (mrq
->data
->flags
& MMC_DATA_STREAM
)
380 cmdat
|= CMDAT_STREAM
;
383 pxamci_start_cmd(host
, mrq
->cmd
, cmdat
);
386 static int pxamci_get_ro(struct mmc_host
*mmc
)
388 struct pxamci_host
*host
= mmc_priv(mmc
);
390 if (host
->pdata
&& host
->pdata
->get_ro
)
391 return !!host
->pdata
->get_ro(mmc_dev(mmc
));
393 * Board doesn't support read only detection; let the mmc core
399 static void pxamci_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
401 struct pxamci_host
*host
= mmc_priv(mmc
);
404 unsigned long rate
= host
->clkrate
;
405 unsigned int clk
= rate
/ ios
->clock
;
407 if (host
->clkrt
== CLKRT_OFF
)
408 clk_enable(host
->clk
);
410 if (ios
->clock
== 26000000) {
411 /* to support 26MHz on pxa300/pxa310 */
414 /* to handle (19.5MHz, 26MHz) */
419 * clk might result in a lower divisor than we
420 * desire. check for that condition and adjust
423 if (rate
/ clk
> ios
->clock
)
425 host
->clkrt
= fls(clk
) - 1;
429 * we write clkrt on the next command
432 pxamci_stop_clock(host
);
433 if (host
->clkrt
!= CLKRT_OFF
) {
434 host
->clkrt
= CLKRT_OFF
;
435 clk_disable(host
->clk
);
439 if (host
->power_mode
!= ios
->power_mode
) {
440 host
->power_mode
= ios
->power_mode
;
442 if (host
->pdata
&& host
->pdata
->setpower
)
443 host
->pdata
->setpower(mmc_dev(mmc
), ios
->vdd
);
445 if (ios
->power_mode
== MMC_POWER_ON
)
446 host
->cmdat
|= CMDAT_INIT
;
449 if (ios
->bus_width
== MMC_BUS_WIDTH_4
)
450 host
->cmdat
|= CMDAT_SD_4DAT
;
452 host
->cmdat
&= ~CMDAT_SD_4DAT
;
454 pr_debug("PXAMCI: clkrt = %x cmdat = %x\n",
455 host
->clkrt
, host
->cmdat
);
458 static void pxamci_enable_sdio_irq(struct mmc_host
*host
, int enable
)
460 struct pxamci_host
*pxa_host
= mmc_priv(host
);
463 pxamci_enable_irq(pxa_host
, SDIO_INT
);
465 pxamci_disable_irq(pxa_host
, SDIO_INT
);
468 static const struct mmc_host_ops pxamci_ops
= {
469 .request
= pxamci_request
,
470 .get_ro
= pxamci_get_ro
,
471 .set_ios
= pxamci_set_ios
,
472 .enable_sdio_irq
= pxamci_enable_sdio_irq
,
475 static void pxamci_dma_irq(int dma
, void *devid
)
477 struct pxamci_host
*host
= devid
;
478 int dcsr
= DCSR(dma
);
479 DCSR(dma
) = dcsr
& ~DCSR_STOPIRQEN
;
481 if (dcsr
& DCSR_ENDINTR
) {
482 writel(BUF_PART_FULL
, host
->base
+ MMC_PRTBUF
);
484 printk(KERN_ERR
"%s: DMA error on channel %d (DCSR=%#x)\n",
485 mmc_hostname(host
->mmc
), dma
, dcsr
);
486 host
->data
->error
= -EIO
;
487 pxamci_data_done(host
, 0);
491 static irqreturn_t
pxamci_detect_irq(int irq
, void *devid
)
493 struct pxamci_host
*host
= mmc_priv(devid
);
495 mmc_detect_change(devid
, host
->pdata
->detect_delay
);
499 static int pxamci_probe(struct platform_device
*pdev
)
501 struct mmc_host
*mmc
;
502 struct pxamci_host
*host
= NULL
;
503 struct resource
*r
, *dmarx
, *dmatx
;
506 r
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
507 irq
= platform_get_irq(pdev
, 0);
511 r
= request_mem_region(r
->start
, SZ_4K
, DRIVER_NAME
);
515 mmc
= mmc_alloc_host(sizeof(struct pxamci_host
), &pdev
->dev
);
521 mmc
->ops
= &pxamci_ops
;
524 * We can do SG-DMA, but we don't because we never know how much
525 * data we successfully wrote to the card.
527 mmc
->max_phys_segs
= NR_SG
;
530 * Our hardware DMA can handle a maximum of one page per SG entry.
532 mmc
->max_seg_size
= PAGE_SIZE
;
535 * Block length register is only 10 bits before PXA27x.
537 mmc
->max_blk_size
= cpu_is_pxa25x() ? 1023 : 2048;
540 * Block count register is 16 bits.
542 mmc
->max_blk_count
= 65535;
544 host
= mmc_priv(mmc
);
547 host
->pdata
= pdev
->dev
.platform_data
;
548 host
->clkrt
= CLKRT_OFF
;
550 host
->clk
= clk_get(&pdev
->dev
, NULL
);
551 if (IS_ERR(host
->clk
)) {
552 ret
= PTR_ERR(host
->clk
);
557 host
->clkrate
= clk_get_rate(host
->clk
);
560 * Calculate minimum clock rate, rounding up.
562 mmc
->f_min
= (host
->clkrate
+ 63) / 64;
563 mmc
->f_max
= (cpu_is_pxa300() || cpu_is_pxa310()) ? 26000000
566 mmc
->ocr_avail
= host
->pdata
?
567 host
->pdata
->ocr_mask
:
568 MMC_VDD_32_33
|MMC_VDD_33_34
;
571 if (!cpu_is_pxa25x()) {
572 mmc
->caps
|= MMC_CAP_4_BIT_DATA
| MMC_CAP_SDIO_IRQ
;
573 host
->cmdat
|= CMDAT_SDIO_INT_EN
;
574 if (cpu_is_pxa300() || cpu_is_pxa310())
575 mmc
->caps
|= MMC_CAP_MMC_HIGHSPEED
|
576 MMC_CAP_SD_HIGHSPEED
;
579 host
->sg_cpu
= dma_alloc_coherent(&pdev
->dev
, PAGE_SIZE
, &host
->sg_dma
, GFP_KERNEL
);
585 spin_lock_init(&host
->lock
);
588 host
->imask
= MMC_I_MASK_ALL
;
590 host
->base
= ioremap(r
->start
, SZ_4K
);
597 * Ensure that the host controller is shut down, and setup
600 pxamci_stop_clock(host
);
601 writel(0, host
->base
+ MMC_SPI
);
602 writel(64, host
->base
+ MMC_RESTO
);
603 writel(host
->imask
, host
->base
+ MMC_I_MASK
);
605 host
->dma
= pxa_request_dma(DRIVER_NAME
, DMA_PRIO_LOW
,
606 pxamci_dma_irq
, host
);
612 ret
= request_irq(host
->irq
, pxamci_irq
, 0, DRIVER_NAME
, host
);
616 platform_set_drvdata(pdev
, mmc
);
618 dmarx
= platform_get_resource(pdev
, IORESOURCE_DMA
, 0);
623 host
->dma_drcmrrx
= dmarx
->start
;
625 dmatx
= platform_get_resource(pdev
, IORESOURCE_DMA
, 1);
630 host
->dma_drcmrtx
= dmatx
->start
;
632 if (host
->pdata
&& host
->pdata
->init
)
633 host
->pdata
->init(&pdev
->dev
, pxamci_detect_irq
, mmc
);
642 pxa_free_dma(host
->dma
);
646 dma_free_coherent(&pdev
->dev
, PAGE_SIZE
, host
->sg_cpu
, host
->sg_dma
);
656 static int pxamci_remove(struct platform_device
*pdev
)
658 struct mmc_host
*mmc
= platform_get_drvdata(pdev
);
660 platform_set_drvdata(pdev
, NULL
);
663 struct pxamci_host
*host
= mmc_priv(mmc
);
665 if (host
->pdata
&& host
->pdata
->exit
)
666 host
->pdata
->exit(&pdev
->dev
, mmc
);
668 mmc_remove_host(mmc
);
670 pxamci_stop_clock(host
);
671 writel(TXFIFO_WR_REQ
|RXFIFO_RD_REQ
|CLK_IS_OFF
|STOP_CMD
|
672 END_CMD_RES
|PRG_DONE
|DATA_TRAN_DONE
,
673 host
->base
+ MMC_I_MASK
);
675 DRCMR(host
->dma_drcmrrx
) = 0;
676 DRCMR(host
->dma_drcmrtx
) = 0;
678 free_irq(host
->irq
, host
);
679 pxa_free_dma(host
->dma
);
681 dma_free_coherent(&pdev
->dev
, PAGE_SIZE
, host
->sg_cpu
, host
->sg_dma
);
685 release_resource(host
->res
);
693 static int pxamci_suspend(struct platform_device
*dev
, pm_message_t state
)
695 struct mmc_host
*mmc
= platform_get_drvdata(dev
);
699 ret
= mmc_suspend_host(mmc
, state
);
704 static int pxamci_resume(struct platform_device
*dev
)
706 struct mmc_host
*mmc
= platform_get_drvdata(dev
);
710 ret
= mmc_resume_host(mmc
);
715 #define pxamci_suspend NULL
716 #define pxamci_resume NULL
719 static struct platform_driver pxamci_driver
= {
720 .probe
= pxamci_probe
,
721 .remove
= pxamci_remove
,
722 .suspend
= pxamci_suspend
,
723 .resume
= pxamci_resume
,
726 .owner
= THIS_MODULE
,
730 static int __init
pxamci_init(void)
732 return platform_driver_register(&pxamci_driver
);
735 static void __exit
pxamci_exit(void)
737 platform_driver_unregister(&pxamci_driver
);
740 module_init(pxamci_init
);
741 module_exit(pxamci_exit
);
743 MODULE_DESCRIPTION("PXA Multimedia Card Interface Driver");
744 MODULE_LICENSE("GPL");
745 MODULE_ALIAS("platform:pxa2xx-mci");