2 * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver
4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #include <linux/module.h>
11 #include <linux/moduleparam.h>
12 #include <linux/init.h>
13 #include <linux/ioport.h>
14 #include <linux/device.h>
15 #include <linux/interrupt.h>
16 #include <linux/delay.h>
17 #include <linux/err.h>
18 #include <linux/highmem.h>
19 #include <linux/log2.h>
20 #include <linux/mmc/host.h>
21 #include <linux/amba/bus.h>
22 #include <linux/clk.h>
23 #include <linux/scatterlist.h>
25 #include <asm/cacheflush.h>
26 #include <asm/div64.h>
28 #include <asm/sizes.h>
29 #include <asm/mach/mmc.h>
33 #define DRIVER_NAME "mmci-pl18x"
35 #define DBG(host,fmt,args...) \
36 pr_debug("%s: %s: " fmt, mmc_hostname(host->mmc), __func__ , args)
38 static unsigned int fmax
= 515633;
41 mmci_request_end(struct mmci_host
*host
, struct mmc_request
*mrq
)
43 writel(0, host
->base
+ MMCICOMMAND
);
51 mrq
->data
->bytes_xfered
= host
->data_xfered
;
54 * Need to drop the host lock here; mmc_request_done may call
55 * back into the driver...
57 spin_unlock(&host
->lock
);
58 mmc_request_done(host
->mmc
, mrq
);
59 spin_lock(&host
->lock
);
62 static void mmci_stop_data(struct mmci_host
*host
)
64 writel(0, host
->base
+ MMCIDATACTRL
);
65 writel(0, host
->base
+ MMCIMASK1
);
69 static void mmci_start_data(struct mmci_host
*host
, struct mmc_data
*data
)
71 unsigned int datactrl
, timeout
, irqmask
;
72 unsigned long long clks
;
76 DBG(host
, "blksz %04x blks %04x flags %08x\n",
77 data
->blksz
, data
->blocks
, data
->flags
);
80 host
->size
= data
->blksz
;
81 host
->data_xfered
= 0;
83 mmci_init_sg(host
, data
);
85 clks
= (unsigned long long)data
->timeout_ns
* host
->cclk
;
86 do_div(clks
, 1000000000UL);
88 timeout
= data
->timeout_clks
+ (unsigned int)clks
;
91 writel(timeout
, base
+ MMCIDATATIMER
);
92 writel(host
->size
, base
+ MMCIDATALENGTH
);
94 blksz_bits
= ffs(data
->blksz
) - 1;
95 BUG_ON(1 << blksz_bits
!= data
->blksz
);
97 datactrl
= MCI_DPSM_ENABLE
| blksz_bits
<< 4;
98 if (data
->flags
& MMC_DATA_READ
) {
99 datactrl
|= MCI_DPSM_DIRECTION
;
100 irqmask
= MCI_RXFIFOHALFFULLMASK
;
103 * If we have less than a FIFOSIZE of bytes to transfer,
104 * trigger a PIO interrupt as soon as any data is available.
106 if (host
->size
< MCI_FIFOSIZE
)
107 irqmask
|= MCI_RXDATAAVLBLMASK
;
110 * We don't actually need to include "FIFO empty" here
111 * since its implicit in "FIFO half empty".
113 irqmask
= MCI_TXFIFOHALFEMPTYMASK
;
116 writel(datactrl
, base
+ MMCIDATACTRL
);
117 writel(readl(base
+ MMCIMASK0
) & ~MCI_DATAENDMASK
, base
+ MMCIMASK0
);
118 writel(irqmask
, base
+ MMCIMASK1
);
122 mmci_start_command(struct mmci_host
*host
, struct mmc_command
*cmd
, u32 c
)
124 void __iomem
*base
= host
->base
;
126 DBG(host
, "op %02x arg %08x flags %08x\n",
127 cmd
->opcode
, cmd
->arg
, cmd
->flags
);
129 if (readl(base
+ MMCICOMMAND
) & MCI_CPSM_ENABLE
) {
130 writel(0, base
+ MMCICOMMAND
);
134 c
|= cmd
->opcode
| MCI_CPSM_ENABLE
;
135 if (cmd
->flags
& MMC_RSP_PRESENT
) {
136 if (cmd
->flags
& MMC_RSP_136
)
137 c
|= MCI_CPSM_LONGRSP
;
138 c
|= MCI_CPSM_RESPONSE
;
141 c
|= MCI_CPSM_INTERRUPT
;
145 writel(cmd
->arg
, base
+ MMCIARGUMENT
);
146 writel(c
, base
+ MMCICOMMAND
);
150 mmci_data_irq(struct mmci_host
*host
, struct mmc_data
*data
,
153 if (status
& MCI_DATABLOCKEND
) {
154 host
->data_xfered
+= data
->blksz
;
156 if (status
& (MCI_DATACRCFAIL
|MCI_DATATIMEOUT
|MCI_TXUNDERRUN
|MCI_RXOVERRUN
)) {
157 if (status
& MCI_DATACRCFAIL
)
158 data
->error
= -EILSEQ
;
159 else if (status
& MCI_DATATIMEOUT
)
160 data
->error
= -ETIMEDOUT
;
161 else if (status
& (MCI_TXUNDERRUN
|MCI_RXOVERRUN
))
163 status
|= MCI_DATAEND
;
166 * We hit an error condition. Ensure that any data
167 * partially written to a page is properly coherent.
169 if (host
->sg_len
&& data
->flags
& MMC_DATA_READ
)
170 flush_dcache_page(sg_page(host
->sg_ptr
));
172 if (status
& MCI_DATAEND
) {
173 mmci_stop_data(host
);
176 mmci_request_end(host
, data
->mrq
);
178 mmci_start_command(host
, data
->stop
, 0);
184 mmci_cmd_irq(struct mmci_host
*host
, struct mmc_command
*cmd
,
187 void __iomem
*base
= host
->base
;
191 cmd
->resp
[0] = readl(base
+ MMCIRESPONSE0
);
192 cmd
->resp
[1] = readl(base
+ MMCIRESPONSE1
);
193 cmd
->resp
[2] = readl(base
+ MMCIRESPONSE2
);
194 cmd
->resp
[3] = readl(base
+ MMCIRESPONSE3
);
196 if (status
& MCI_CMDTIMEOUT
) {
197 cmd
->error
= -ETIMEDOUT
;
198 } else if (status
& MCI_CMDCRCFAIL
&& cmd
->flags
& MMC_RSP_CRC
) {
199 cmd
->error
= -EILSEQ
;
202 if (!cmd
->data
|| cmd
->error
) {
204 mmci_stop_data(host
);
205 mmci_request_end(host
, cmd
->mrq
);
206 } else if (!(cmd
->data
->flags
& MMC_DATA_READ
)) {
207 mmci_start_data(host
, cmd
->data
);
211 static int mmci_pio_read(struct mmci_host
*host
, char *buffer
, unsigned int remain
)
213 void __iomem
*base
= host
->base
;
216 int host_remain
= host
->size
;
219 int count
= host_remain
- (readl(base
+ MMCIFIFOCNT
) << 2);
227 readsl(base
+ MMCIFIFO
, ptr
, count
>> 2);
231 host_remain
-= count
;
236 status
= readl(base
+ MMCISTATUS
);
237 } while (status
& MCI_RXDATAAVLBL
);
242 static int mmci_pio_write(struct mmci_host
*host
, char *buffer
, unsigned int remain
, u32 status
)
244 void __iomem
*base
= host
->base
;
248 unsigned int count
, maxcnt
;
250 maxcnt
= status
& MCI_TXFIFOEMPTY
? MCI_FIFOSIZE
: MCI_FIFOHALFSIZE
;
251 count
= min(remain
, maxcnt
);
253 writesl(base
+ MMCIFIFO
, ptr
, count
>> 2);
261 status
= readl(base
+ MMCISTATUS
);
262 } while (status
& MCI_TXFIFOHALFEMPTY
);
268 * PIO data transfer IRQ handler.
270 static irqreturn_t
mmci_pio_irq(int irq
, void *dev_id
)
272 struct mmci_host
*host
= dev_id
;
273 void __iomem
*base
= host
->base
;
276 status
= readl(base
+ MMCISTATUS
);
278 DBG(host
, "irq1 %08x\n", status
);
282 unsigned int remain
, len
;
286 * For write, we only need to test the half-empty flag
287 * here - if the FIFO is completely empty, then by
288 * definition it is more than half empty.
290 * For read, check for data available.
292 if (!(status
& (MCI_TXFIFOHALFEMPTY
|MCI_RXDATAAVLBL
)))
296 * Map the current scatter buffer.
298 buffer
= mmci_kmap_atomic(host
, &flags
) + host
->sg_off
;
299 remain
= host
->sg_ptr
->length
- host
->sg_off
;
302 if (status
& MCI_RXACTIVE
)
303 len
= mmci_pio_read(host
, buffer
, remain
);
304 if (status
& MCI_TXACTIVE
)
305 len
= mmci_pio_write(host
, buffer
, remain
, status
);
310 mmci_kunmap_atomic(host
, buffer
, &flags
);
320 * If we were reading, and we have completed this
321 * page, ensure that the data cache is coherent.
323 if (status
& MCI_RXACTIVE
)
324 flush_dcache_page(sg_page(host
->sg_ptr
));
326 if (!mmci_next_sg(host
))
329 status
= readl(base
+ MMCISTATUS
);
333 * If we're nearing the end of the read, switch to
334 * "any data available" mode.
336 if (status
& MCI_RXACTIVE
&& host
->size
< MCI_FIFOSIZE
)
337 writel(MCI_RXDATAAVLBLMASK
, base
+ MMCIMASK1
);
340 * If we run out of data, disable the data IRQs; this
341 * prevents a race where the FIFO becomes empty before
342 * the chip itself has disabled the data path, and
343 * stops us racing with our data end IRQ.
345 if (host
->size
== 0) {
346 writel(0, base
+ MMCIMASK1
);
347 writel(readl(base
+ MMCIMASK0
) | MCI_DATAENDMASK
, base
+ MMCIMASK0
);
354 * Handle completion of command and data transfers.
356 static irqreturn_t
mmci_irq(int irq
, void *dev_id
)
358 struct mmci_host
*host
= dev_id
;
362 spin_lock(&host
->lock
);
365 struct mmc_command
*cmd
;
366 struct mmc_data
*data
;
368 status
= readl(host
->base
+ MMCISTATUS
);
369 status
&= readl(host
->base
+ MMCIMASK0
);
370 writel(status
, host
->base
+ MMCICLEAR
);
372 DBG(host
, "irq0 %08x\n", status
);
375 if (status
& (MCI_DATACRCFAIL
|MCI_DATATIMEOUT
|MCI_TXUNDERRUN
|
376 MCI_RXOVERRUN
|MCI_DATAEND
|MCI_DATABLOCKEND
) && data
)
377 mmci_data_irq(host
, data
, status
);
380 if (status
& (MCI_CMDCRCFAIL
|MCI_CMDTIMEOUT
|MCI_CMDSENT
|MCI_CMDRESPEND
) && cmd
)
381 mmci_cmd_irq(host
, cmd
, status
);
386 spin_unlock(&host
->lock
);
388 return IRQ_RETVAL(ret
);
391 static void mmci_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
393 struct mmci_host
*host
= mmc_priv(mmc
);
395 WARN_ON(host
->mrq
!= NULL
);
397 if (mrq
->data
&& !is_power_of_2(mrq
->data
->blksz
)) {
398 printk(KERN_ERR
"%s: Unsupported block size (%d bytes)\n",
399 mmc_hostname(mmc
), mrq
->data
->blksz
);
400 mrq
->cmd
->error
= -EINVAL
;
401 mmc_request_done(mmc
, mrq
);
405 spin_lock_irq(&host
->lock
);
409 if (mrq
->data
&& mrq
->data
->flags
& MMC_DATA_READ
)
410 mmci_start_data(host
, mrq
->data
);
412 mmci_start_command(host
, mrq
->cmd
, 0);
414 spin_unlock_irq(&host
->lock
);
417 static void mmci_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
419 struct mmci_host
*host
= mmc_priv(mmc
);
420 u32 clk
= 0, pwr
= 0;
423 if (ios
->clock
>= host
->mclk
) {
424 clk
= MCI_CLK_BYPASS
;
425 host
->cclk
= host
->mclk
;
427 clk
= host
->mclk
/ (2 * ios
->clock
) - 1;
430 host
->cclk
= host
->mclk
/ (2 * (clk
+ 1));
432 clk
|= MCI_CLK_ENABLE
;
435 if (host
->plat
->translate_vdd
)
436 pwr
|= host
->plat
->translate_vdd(mmc_dev(mmc
), ios
->vdd
);
438 switch (ios
->power_mode
) {
449 if (ios
->bus_mode
== MMC_BUSMODE_OPENDRAIN
)
452 writel(clk
, host
->base
+ MMCICLOCK
);
454 if (host
->pwr
!= pwr
) {
456 writel(pwr
, host
->base
+ MMCIPOWER
);
460 static const struct mmc_host_ops mmci_ops
= {
461 .request
= mmci_request
,
462 .set_ios
= mmci_set_ios
,
465 static void mmci_check_status(unsigned long data
)
467 struct mmci_host
*host
= (struct mmci_host
*)data
;
470 status
= host
->plat
->status(mmc_dev(host
->mmc
));
471 if (status
^ host
->oldstat
)
472 mmc_detect_change(host
->mmc
, 0);
474 host
->oldstat
= status
;
475 mod_timer(&host
->timer
, jiffies
+ HZ
);
478 static int mmci_probe(struct amba_device
*dev
, void *id
)
480 struct mmc_platform_data
*plat
= dev
->dev
.platform_data
;
481 struct mmci_host
*host
;
482 struct mmc_host
*mmc
;
485 /* must have platform data */
491 ret
= amba_request_regions(dev
, DRIVER_NAME
);
495 mmc
= mmc_alloc_host(sizeof(struct mmci_host
), &dev
->dev
);
501 host
= mmc_priv(mmc
);
502 host
->clk
= clk_get(&dev
->dev
, "MCLK");
503 if (IS_ERR(host
->clk
)) {
504 ret
= PTR_ERR(host
->clk
);
509 ret
= clk_enable(host
->clk
);
514 host
->mclk
= clk_get_rate(host
->clk
);
516 * According to the spec, mclk is max 100 MHz,
517 * so we try to adjust the clock down to this,
520 if (host
->mclk
> 100000000) {
521 ret
= clk_set_rate(host
->clk
, 100000000);
524 host
->mclk
= clk_get_rate(host
->clk
);
525 DBG(host
, "eventual mclk rate: %u Hz\n", host
->mclk
);
528 host
->base
= ioremap(dev
->res
.start
, SZ_4K
);
534 mmc
->ops
= &mmci_ops
;
535 mmc
->f_min
= (host
->mclk
+ 511) / 512;
536 mmc
->f_max
= min(host
->mclk
, fmax
);
537 mmc
->ocr_avail
= plat
->ocr_mask
;
538 mmc
->caps
= MMC_CAP_MULTIWRITE
;
543 mmc
->max_hw_segs
= 16;
544 mmc
->max_phys_segs
= NR_SG
;
547 * Since we only have a 16-bit data length register, we must
548 * ensure that we don't exceed 2^16-1 bytes in a single request.
550 mmc
->max_req_size
= 65535;
553 * Set the maximum segment size. Since we aren't doing DMA
554 * (yet) we are only limited by the data length register.
556 mmc
->max_seg_size
= mmc
->max_req_size
;
559 * Block size can be up to 2048 bytes, but must be a power of two.
561 mmc
->max_blk_size
= 2048;
564 * No limit on the number of blocks transferred.
566 mmc
->max_blk_count
= mmc
->max_req_size
;
568 spin_lock_init(&host
->lock
);
570 writel(0, host
->base
+ MMCIMASK0
);
571 writel(0, host
->base
+ MMCIMASK1
);
572 writel(0xfff, host
->base
+ MMCICLEAR
);
574 ret
= request_irq(dev
->irq
[0], mmci_irq
, IRQF_SHARED
, DRIVER_NAME
" (cmd)", host
);
578 ret
= request_irq(dev
->irq
[1], mmci_pio_irq
, IRQF_SHARED
, DRIVER_NAME
" (pio)", host
);
582 writel(MCI_IRQENABLE
, host
->base
+ MMCIMASK0
);
584 amba_set_drvdata(dev
, mmc
);
588 printk(KERN_INFO
"%s: MMCI rev %x cfg %02x at 0x%016llx irq %d,%d\n",
589 mmc_hostname(mmc
), amba_rev(dev
), amba_config(dev
),
590 (unsigned long long)dev
->res
.start
, dev
->irq
[0], dev
->irq
[1]);
592 init_timer(&host
->timer
);
593 host
->timer
.data
= (unsigned long)host
;
594 host
->timer
.function
= mmci_check_status
;
595 host
->timer
.expires
= jiffies
+ HZ
;
596 add_timer(&host
->timer
);
601 free_irq(dev
->irq
[0], host
);
605 clk_disable(host
->clk
);
611 amba_release_regions(dev
);
616 static int mmci_remove(struct amba_device
*dev
)
618 struct mmc_host
*mmc
= amba_get_drvdata(dev
);
620 amba_set_drvdata(dev
, NULL
);
623 struct mmci_host
*host
= mmc_priv(mmc
);
625 del_timer_sync(&host
->timer
);
627 mmc_remove_host(mmc
);
629 writel(0, host
->base
+ MMCIMASK0
);
630 writel(0, host
->base
+ MMCIMASK1
);
632 writel(0, host
->base
+ MMCICOMMAND
);
633 writel(0, host
->base
+ MMCIDATACTRL
);
635 free_irq(dev
->irq
[0], host
);
636 free_irq(dev
->irq
[1], host
);
639 clk_disable(host
->clk
);
644 amba_release_regions(dev
);
651 static int mmci_suspend(struct amba_device
*dev
, pm_message_t state
)
653 struct mmc_host
*mmc
= amba_get_drvdata(dev
);
657 struct mmci_host
*host
= mmc_priv(mmc
);
659 ret
= mmc_suspend_host(mmc
, state
);
661 writel(0, host
->base
+ MMCIMASK0
);
667 static int mmci_resume(struct amba_device
*dev
)
669 struct mmc_host
*mmc
= amba_get_drvdata(dev
);
673 struct mmci_host
*host
= mmc_priv(mmc
);
675 writel(MCI_IRQENABLE
, host
->base
+ MMCIMASK0
);
677 ret
= mmc_resume_host(mmc
);
683 #define mmci_suspend NULL
684 #define mmci_resume NULL
687 static struct amba_id mmci_ids
[] = {
699 static struct amba_driver mmci_driver
= {
704 .remove
= mmci_remove
,
705 .suspend
= mmci_suspend
,
706 .resume
= mmci_resume
,
707 .id_table
= mmci_ids
,
710 static int __init
mmci_init(void)
712 return amba_driver_register(&mmci_driver
);
715 static void __exit
mmci_exit(void)
717 amba_driver_unregister(&mmci_driver
);
720 module_init(mmci_init
);
721 module_exit(mmci_exit
);
722 module_param(fmax
, uint
, 0444);
724 MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver");
725 MODULE_LICENSE("GPL");