2 * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver
4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #include <linux/module.h>
11 #include <linux/moduleparam.h>
12 #include <linux/init.h>
13 #include <linux/ioport.h>
14 #include <linux/device.h>
15 #include <linux/interrupt.h>
16 #include <linux/delay.h>
17 #include <linux/err.h>
18 #include <linux/highmem.h>
19 #include <linux/log2.h>
20 #include <linux/mmc/host.h>
21 #include <linux/amba/bus.h>
22 #include <linux/clk.h>
23 #include <linux/scatterlist.h>
24 #include <linux/gpio.h>
25 #include <linux/amba/mmci.h>
26 #include <linux/regulator/consumer.h>
28 #include <asm/cacheflush.h>
29 #include <asm/div64.h>
31 #include <asm/sizes.h>
35 #define DRIVER_NAME "mmci-pl18x"
37 #define DBG(host,fmt,args...) \
38 pr_debug("%s: %s: " fmt, mmc_hostname(host->mmc), __func__ , args)
40 static unsigned int fmax
= 515633;
43 * This must be called with host->lock held
45 static void mmci_set_clkreg(struct mmci_host
*host
, unsigned int desired
)
50 if (desired
>= host
->mclk
) {
52 host
->cclk
= host
->mclk
;
54 clk
= host
->mclk
/ (2 * desired
) - 1;
57 host
->cclk
= host
->mclk
/ (2 * (clk
+ 1));
59 if (host
->hw_designer
== 0x80)
60 clk
|= MCI_FCEN
; /* Bug fix in ST IP block */
61 clk
|= MCI_CLK_ENABLE
;
62 /* This hasn't proven to be worthwhile */
63 /* clk |= MCI_CLK_PWRSAVE; */
66 if (host
->mmc
->ios
.bus_width
== MMC_BUS_WIDTH_4
)
69 writel(clk
, host
->base
+ MMCICLOCK
);
73 mmci_request_end(struct mmci_host
*host
, struct mmc_request
*mrq
)
75 writel(0, host
->base
+ MMCICOMMAND
);
83 mrq
->data
->bytes_xfered
= host
->data_xfered
;
86 * Need to drop the host lock here; mmc_request_done may call
87 * back into the driver...
89 spin_unlock(&host
->lock
);
90 mmc_request_done(host
->mmc
, mrq
);
91 spin_lock(&host
->lock
);
94 static void mmci_stop_data(struct mmci_host
*host
)
96 writel(0, host
->base
+ MMCIDATACTRL
);
97 writel(0, host
->base
+ MMCIMASK1
);
101 static void mmci_start_data(struct mmci_host
*host
, struct mmc_data
*data
)
103 unsigned int datactrl
, timeout
, irqmask
;
104 unsigned long long clks
;
108 DBG(host
, "blksz %04x blks %04x flags %08x\n",
109 data
->blksz
, data
->blocks
, data
->flags
);
112 host
->size
= data
->blksz
;
113 host
->data_xfered
= 0;
115 mmci_init_sg(host
, data
);
117 clks
= (unsigned long long)data
->timeout_ns
* host
->cclk
;
118 do_div(clks
, 1000000000UL);
120 timeout
= data
->timeout_clks
+ (unsigned int)clks
;
123 writel(timeout
, base
+ MMCIDATATIMER
);
124 writel(host
->size
, base
+ MMCIDATALENGTH
);
126 blksz_bits
= ffs(data
->blksz
) - 1;
127 BUG_ON(1 << blksz_bits
!= data
->blksz
);
129 datactrl
= MCI_DPSM_ENABLE
| blksz_bits
<< 4;
130 if (data
->flags
& MMC_DATA_READ
) {
131 datactrl
|= MCI_DPSM_DIRECTION
;
132 irqmask
= MCI_RXFIFOHALFFULLMASK
;
135 * If we have less than a FIFOSIZE of bytes to transfer,
136 * trigger a PIO interrupt as soon as any data is available.
138 if (host
->size
< MCI_FIFOSIZE
)
139 irqmask
|= MCI_RXDATAAVLBLMASK
;
142 * We don't actually need to include "FIFO empty" here
143 * since its implicit in "FIFO half empty".
145 irqmask
= MCI_TXFIFOHALFEMPTYMASK
;
148 writel(datactrl
, base
+ MMCIDATACTRL
);
149 writel(readl(base
+ MMCIMASK0
) & ~MCI_DATAENDMASK
, base
+ MMCIMASK0
);
150 writel(irqmask
, base
+ MMCIMASK1
);
154 mmci_start_command(struct mmci_host
*host
, struct mmc_command
*cmd
, u32 c
)
156 void __iomem
*base
= host
->base
;
158 DBG(host
, "op %02x arg %08x flags %08x\n",
159 cmd
->opcode
, cmd
->arg
, cmd
->flags
);
161 if (readl(base
+ MMCICOMMAND
) & MCI_CPSM_ENABLE
) {
162 writel(0, base
+ MMCICOMMAND
);
166 c
|= cmd
->opcode
| MCI_CPSM_ENABLE
;
167 if (cmd
->flags
& MMC_RSP_PRESENT
) {
168 if (cmd
->flags
& MMC_RSP_136
)
169 c
|= MCI_CPSM_LONGRSP
;
170 c
|= MCI_CPSM_RESPONSE
;
173 c
|= MCI_CPSM_INTERRUPT
;
177 writel(cmd
->arg
, base
+ MMCIARGUMENT
);
178 writel(c
, base
+ MMCICOMMAND
);
182 mmci_data_irq(struct mmci_host
*host
, struct mmc_data
*data
,
185 if (status
& MCI_DATABLOCKEND
) {
186 host
->data_xfered
+= data
->blksz
;
188 if (status
& (MCI_DATACRCFAIL
|MCI_DATATIMEOUT
|MCI_TXUNDERRUN
|MCI_RXOVERRUN
)) {
189 if (status
& MCI_DATACRCFAIL
)
190 data
->error
= -EILSEQ
;
191 else if (status
& MCI_DATATIMEOUT
)
192 data
->error
= -ETIMEDOUT
;
193 else if (status
& (MCI_TXUNDERRUN
|MCI_RXOVERRUN
))
195 status
|= MCI_DATAEND
;
198 * We hit an error condition. Ensure that any data
199 * partially written to a page is properly coherent.
201 if (host
->sg_len
&& data
->flags
& MMC_DATA_READ
)
202 flush_dcache_page(sg_page(host
->sg_ptr
));
204 if (status
& MCI_DATAEND
) {
205 mmci_stop_data(host
);
208 mmci_request_end(host
, data
->mrq
);
210 mmci_start_command(host
, data
->stop
, 0);
216 mmci_cmd_irq(struct mmci_host
*host
, struct mmc_command
*cmd
,
219 void __iomem
*base
= host
->base
;
223 cmd
->resp
[0] = readl(base
+ MMCIRESPONSE0
);
224 cmd
->resp
[1] = readl(base
+ MMCIRESPONSE1
);
225 cmd
->resp
[2] = readl(base
+ MMCIRESPONSE2
);
226 cmd
->resp
[3] = readl(base
+ MMCIRESPONSE3
);
228 if (status
& MCI_CMDTIMEOUT
) {
229 cmd
->error
= -ETIMEDOUT
;
230 } else if (status
& MCI_CMDCRCFAIL
&& cmd
->flags
& MMC_RSP_CRC
) {
231 cmd
->error
= -EILSEQ
;
234 if (!cmd
->data
|| cmd
->error
) {
236 mmci_stop_data(host
);
237 mmci_request_end(host
, cmd
->mrq
);
238 } else if (!(cmd
->data
->flags
& MMC_DATA_READ
)) {
239 mmci_start_data(host
, cmd
->data
);
243 static int mmci_pio_read(struct mmci_host
*host
, char *buffer
, unsigned int remain
)
245 void __iomem
*base
= host
->base
;
248 int host_remain
= host
->size
;
251 int count
= host_remain
- (readl(base
+ MMCIFIFOCNT
) << 2);
259 readsl(base
+ MMCIFIFO
, ptr
, count
>> 2);
263 host_remain
-= count
;
268 status
= readl(base
+ MMCISTATUS
);
269 } while (status
& MCI_RXDATAAVLBL
);
274 static int mmci_pio_write(struct mmci_host
*host
, char *buffer
, unsigned int remain
, u32 status
)
276 void __iomem
*base
= host
->base
;
280 unsigned int count
, maxcnt
;
282 maxcnt
= status
& MCI_TXFIFOEMPTY
? MCI_FIFOSIZE
: MCI_FIFOHALFSIZE
;
283 count
= min(remain
, maxcnt
);
285 writesl(base
+ MMCIFIFO
, ptr
, count
>> 2);
293 status
= readl(base
+ MMCISTATUS
);
294 } while (status
& MCI_TXFIFOHALFEMPTY
);
300 * PIO data transfer IRQ handler.
302 static irqreturn_t
mmci_pio_irq(int irq
, void *dev_id
)
304 struct mmci_host
*host
= dev_id
;
305 void __iomem
*base
= host
->base
;
308 status
= readl(base
+ MMCISTATUS
);
310 DBG(host
, "irq1 %08x\n", status
);
314 unsigned int remain
, len
;
318 * For write, we only need to test the half-empty flag
319 * here - if the FIFO is completely empty, then by
320 * definition it is more than half empty.
322 * For read, check for data available.
324 if (!(status
& (MCI_TXFIFOHALFEMPTY
|MCI_RXDATAAVLBL
)))
328 * Map the current scatter buffer.
330 buffer
= mmci_kmap_atomic(host
, &flags
) + host
->sg_off
;
331 remain
= host
->sg_ptr
->length
- host
->sg_off
;
334 if (status
& MCI_RXACTIVE
)
335 len
= mmci_pio_read(host
, buffer
, remain
);
336 if (status
& MCI_TXACTIVE
)
337 len
= mmci_pio_write(host
, buffer
, remain
, status
);
342 mmci_kunmap_atomic(host
, buffer
, &flags
);
352 * If we were reading, and we have completed this
353 * page, ensure that the data cache is coherent.
355 if (status
& MCI_RXACTIVE
)
356 flush_dcache_page(sg_page(host
->sg_ptr
));
358 if (!mmci_next_sg(host
))
361 status
= readl(base
+ MMCISTATUS
);
365 * If we're nearing the end of the read, switch to
366 * "any data available" mode.
368 if (status
& MCI_RXACTIVE
&& host
->size
< MCI_FIFOSIZE
)
369 writel(MCI_RXDATAAVLBLMASK
, base
+ MMCIMASK1
);
372 * If we run out of data, disable the data IRQs; this
373 * prevents a race where the FIFO becomes empty before
374 * the chip itself has disabled the data path, and
375 * stops us racing with our data end IRQ.
377 if (host
->size
== 0) {
378 writel(0, base
+ MMCIMASK1
);
379 writel(readl(base
+ MMCIMASK0
) | MCI_DATAENDMASK
, base
+ MMCIMASK0
);
386 * Handle completion of command and data transfers.
388 static irqreturn_t
mmci_irq(int irq
, void *dev_id
)
390 struct mmci_host
*host
= dev_id
;
394 spin_lock(&host
->lock
);
397 struct mmc_command
*cmd
;
398 struct mmc_data
*data
;
400 status
= readl(host
->base
+ MMCISTATUS
);
401 status
&= readl(host
->base
+ MMCIMASK0
);
402 writel(status
, host
->base
+ MMCICLEAR
);
404 DBG(host
, "irq0 %08x\n", status
);
407 if (status
& (MCI_DATACRCFAIL
|MCI_DATATIMEOUT
|MCI_TXUNDERRUN
|
408 MCI_RXOVERRUN
|MCI_DATAEND
|MCI_DATABLOCKEND
) && data
)
409 mmci_data_irq(host
, data
, status
);
412 if (status
& (MCI_CMDCRCFAIL
|MCI_CMDTIMEOUT
|MCI_CMDSENT
|MCI_CMDRESPEND
) && cmd
)
413 mmci_cmd_irq(host
, cmd
, status
);
418 spin_unlock(&host
->lock
);
420 return IRQ_RETVAL(ret
);
423 static void mmci_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
425 struct mmci_host
*host
= mmc_priv(mmc
);
428 WARN_ON(host
->mrq
!= NULL
);
430 if (mrq
->data
&& !is_power_of_2(mrq
->data
->blksz
)) {
431 printk(KERN_ERR
"%s: Unsupported block size (%d bytes)\n",
432 mmc_hostname(mmc
), mrq
->data
->blksz
);
433 mrq
->cmd
->error
= -EINVAL
;
434 mmc_request_done(mmc
, mrq
);
438 spin_lock_irqsave(&host
->lock
, flags
);
442 if (mrq
->data
&& mrq
->data
->flags
& MMC_DATA_READ
)
443 mmci_start_data(host
, mrq
->data
);
445 mmci_start_command(host
, mrq
->cmd
, 0);
447 spin_unlock_irqrestore(&host
->lock
, flags
);
450 static void mmci_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
452 struct mmci_host
*host
= mmc_priv(mmc
);
456 switch (ios
->power_mode
) {
459 regulator_is_enabled(host
->vcc
))
460 regulator_disable(host
->vcc
);
463 #ifdef CONFIG_REGULATOR
465 /* This implicitly enables the regulator */
466 mmc_regulator_set_ocr(host
->vcc
, ios
->vdd
);
469 * The translate_vdd function is not used if you have
470 * an external regulator, or your design is really weird.
471 * Using it would mean sending in power control BOTH using
472 * a regulator AND the 4 MMCIPWR bits. If we don't have
473 * a regulator, we might have some other platform specific
474 * power control behind this translate function.
476 if (!host
->vcc
&& host
->plat
->translate_vdd
)
477 pwr
|= host
->plat
->translate_vdd(mmc_dev(mmc
), ios
->vdd
);
478 /* The ST version does not have this, fall through to POWER_ON */
479 if (host
->hw_designer
!= AMBA_VENDOR_ST
) {
488 if (ios
->bus_mode
== MMC_BUSMODE_OPENDRAIN
) {
489 if (host
->hw_designer
!= AMBA_VENDOR_ST
)
493 * The ST Micro variant use the ROD bit for something
494 * else and only has OD (Open Drain).
500 spin_lock_irqsave(&host
->lock
, flags
);
502 mmci_set_clkreg(host
, ios
->clock
);
504 if (host
->pwr
!= pwr
) {
506 writel(pwr
, host
->base
+ MMCIPOWER
);
509 spin_unlock_irqrestore(&host
->lock
, flags
);
512 static int mmci_get_ro(struct mmc_host
*mmc
)
514 struct mmci_host
*host
= mmc_priv(mmc
);
516 if (host
->gpio_wp
== -ENOSYS
)
519 return gpio_get_value(host
->gpio_wp
);
522 static int mmci_get_cd(struct mmc_host
*mmc
)
524 struct mmci_host
*host
= mmc_priv(mmc
);
527 if (host
->gpio_cd
== -ENOSYS
)
528 status
= host
->plat
->status(mmc_dev(host
->mmc
));
530 status
= gpio_get_value(host
->gpio_cd
);
535 static const struct mmc_host_ops mmci_ops
= {
536 .request
= mmci_request
,
537 .set_ios
= mmci_set_ios
,
538 .get_ro
= mmci_get_ro
,
539 .get_cd
= mmci_get_cd
,
542 static void mmci_check_status(unsigned long data
)
544 struct mmci_host
*host
= (struct mmci_host
*)data
;
545 unsigned int status
= mmci_get_cd(host
->mmc
);
547 if (status
^ host
->oldstat
)
548 mmc_detect_change(host
->mmc
, 0);
550 host
->oldstat
= status
;
551 mod_timer(&host
->timer
, jiffies
+ HZ
);
554 static int __devinit
mmci_probe(struct amba_device
*dev
, struct amba_id
*id
)
556 struct mmci_platform_data
*plat
= dev
->dev
.platform_data
;
557 struct mmci_host
*host
;
558 struct mmc_host
*mmc
;
561 /* must have platform data */
567 ret
= amba_request_regions(dev
, DRIVER_NAME
);
571 mmc
= mmc_alloc_host(sizeof(struct mmci_host
), &dev
->dev
);
577 host
= mmc_priv(mmc
);
580 host
->gpio_wp
= -ENOSYS
;
581 host
->gpio_cd
= -ENOSYS
;
583 host
->hw_designer
= amba_manf(dev
);
584 host
->hw_revision
= amba_rev(dev
);
585 DBG(host
, "designer ID = 0x%02x\n", host
->hw_designer
);
586 DBG(host
, "revision = 0x%01x\n", host
->hw_revision
);
588 host
->clk
= clk_get(&dev
->dev
, NULL
);
589 if (IS_ERR(host
->clk
)) {
590 ret
= PTR_ERR(host
->clk
);
595 ret
= clk_enable(host
->clk
);
600 host
->mclk
= clk_get_rate(host
->clk
);
602 * According to the spec, mclk is max 100 MHz,
603 * so we try to adjust the clock down to this,
606 if (host
->mclk
> 100000000) {
607 ret
= clk_set_rate(host
->clk
, 100000000);
610 host
->mclk
= clk_get_rate(host
->clk
);
611 DBG(host
, "eventual mclk rate: %u Hz\n", host
->mclk
);
613 host
->base
= ioremap(dev
->res
.start
, resource_size(&dev
->res
));
619 mmc
->ops
= &mmci_ops
;
620 mmc
->f_min
= (host
->mclk
+ 511) / 512;
621 mmc
->f_max
= min(host
->mclk
, fmax
);
622 #ifdef CONFIG_REGULATOR
623 /* If we're using the regulator framework, try to fetch a regulator */
624 host
->vcc
= regulator_get(&dev
->dev
, "vmmc");
625 if (IS_ERR(host
->vcc
))
628 int mask
= mmc_regulator_get_ocrmask(host
->vcc
);
631 dev_err(&dev
->dev
, "error getting OCR mask (%d)\n",
634 host
->mmc
->ocr_avail
= (u32
) mask
;
637 "Provided ocr_mask/setpower will not be used "
638 "(using regulator instead)\n");
642 /* Fall back to platform data if no regulator is found */
643 if (host
->vcc
== NULL
)
644 mmc
->ocr_avail
= plat
->ocr_mask
;
645 mmc
->caps
= plat
->capabilities
;
650 mmc
->max_hw_segs
= 16;
651 mmc
->max_phys_segs
= NR_SG
;
654 * Since we only have a 16-bit data length register, we must
655 * ensure that we don't exceed 2^16-1 bytes in a single request.
657 mmc
->max_req_size
= 65535;
660 * Set the maximum segment size. Since we aren't doing DMA
661 * (yet) we are only limited by the data length register.
663 mmc
->max_seg_size
= mmc
->max_req_size
;
666 * Block size can be up to 2048 bytes, but must be a power of two.
668 mmc
->max_blk_size
= 2048;
671 * No limit on the number of blocks transferred.
673 mmc
->max_blk_count
= mmc
->max_req_size
;
675 spin_lock_init(&host
->lock
);
677 writel(0, host
->base
+ MMCIMASK0
);
678 writel(0, host
->base
+ MMCIMASK1
);
679 writel(0xfff, host
->base
+ MMCICLEAR
);
681 if (gpio_is_valid(plat
->gpio_cd
)) {
682 ret
= gpio_request(plat
->gpio_cd
, DRIVER_NAME
" (cd)");
684 ret
= gpio_direction_input(plat
->gpio_cd
);
686 host
->gpio_cd
= plat
->gpio_cd
;
687 else if (ret
!= -ENOSYS
)
690 if (gpio_is_valid(plat
->gpio_wp
)) {
691 ret
= gpio_request(plat
->gpio_wp
, DRIVER_NAME
" (wp)");
693 ret
= gpio_direction_input(plat
->gpio_wp
);
695 host
->gpio_wp
= plat
->gpio_wp
;
696 else if (ret
!= -ENOSYS
)
700 ret
= request_irq(dev
->irq
[0], mmci_irq
, IRQF_SHARED
, DRIVER_NAME
" (cmd)", host
);
704 ret
= request_irq(dev
->irq
[1], mmci_pio_irq
, IRQF_SHARED
, DRIVER_NAME
" (pio)", host
);
708 writel(MCI_IRQENABLE
, host
->base
+ MMCIMASK0
);
710 amba_set_drvdata(dev
, mmc
);
711 host
->oldstat
= mmci_get_cd(host
->mmc
);
715 printk(KERN_INFO
"%s: MMCI rev %x cfg %02x at 0x%016llx irq %d,%d\n",
716 mmc_hostname(mmc
), amba_rev(dev
), amba_config(dev
),
717 (unsigned long long)dev
->res
.start
, dev
->irq
[0], dev
->irq
[1]);
719 init_timer(&host
->timer
);
720 host
->timer
.data
= (unsigned long)host
;
721 host
->timer
.function
= mmci_check_status
;
722 host
->timer
.expires
= jiffies
+ HZ
;
723 add_timer(&host
->timer
);
728 free_irq(dev
->irq
[0], host
);
730 if (host
->gpio_wp
!= -ENOSYS
)
731 gpio_free(host
->gpio_wp
);
733 if (host
->gpio_cd
!= -ENOSYS
)
734 gpio_free(host
->gpio_cd
);
738 clk_disable(host
->clk
);
744 amba_release_regions(dev
);
749 static int __devexit
mmci_remove(struct amba_device
*dev
)
751 struct mmc_host
*mmc
= amba_get_drvdata(dev
);
753 amba_set_drvdata(dev
, NULL
);
756 struct mmci_host
*host
= mmc_priv(mmc
);
758 del_timer_sync(&host
->timer
);
760 mmc_remove_host(mmc
);
762 writel(0, host
->base
+ MMCIMASK0
);
763 writel(0, host
->base
+ MMCIMASK1
);
765 writel(0, host
->base
+ MMCICOMMAND
);
766 writel(0, host
->base
+ MMCIDATACTRL
);
768 free_irq(dev
->irq
[0], host
);
769 free_irq(dev
->irq
[1], host
);
771 if (host
->gpio_wp
!= -ENOSYS
)
772 gpio_free(host
->gpio_wp
);
773 if (host
->gpio_cd
!= -ENOSYS
)
774 gpio_free(host
->gpio_cd
);
777 clk_disable(host
->clk
);
780 if (regulator_is_enabled(host
->vcc
))
781 regulator_disable(host
->vcc
);
782 regulator_put(host
->vcc
);
786 amba_release_regions(dev
);
793 static int mmci_suspend(struct amba_device
*dev
, pm_message_t state
)
795 struct mmc_host
*mmc
= amba_get_drvdata(dev
);
799 struct mmci_host
*host
= mmc_priv(mmc
);
801 ret
= mmc_suspend_host(mmc
, state
);
803 writel(0, host
->base
+ MMCIMASK0
);
809 static int mmci_resume(struct amba_device
*dev
)
811 struct mmc_host
*mmc
= amba_get_drvdata(dev
);
815 struct mmci_host
*host
= mmc_priv(mmc
);
817 writel(MCI_IRQENABLE
, host
->base
+ MMCIMASK0
);
819 ret
= mmc_resume_host(mmc
);
825 #define mmci_suspend NULL
826 #define mmci_resume NULL
829 static struct amba_id mmci_ids
[] = {
838 /* ST Micro variants */
850 static struct amba_driver mmci_driver
= {
855 .remove
= __devexit_p(mmci_remove
),
856 .suspend
= mmci_suspend
,
857 .resume
= mmci_resume
,
858 .id_table
= mmci_ids
,
861 static int __init
mmci_init(void)
863 return amba_driver_register(&mmci_driver
);
866 static void __exit
mmci_exit(void)
868 amba_driver_unregister(&mmci_driver
);
871 module_init(mmci_init
);
872 module_exit(mmci_exit
);
873 module_param(fmax
, uint
, 0444);
875 MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver");
876 MODULE_LICENSE("GPL");