2 * MOXA ART MMC host driver.
4 * Copyright (C) 2014 Jonas Jensen
6 * Jonas Jensen <jonas.jensen@gmail.com>
9 * Moxa Technologies Co., Ltd. <www.moxa.com>
11 * This file is licensed under the terms of the GNU General Public
12 * License version 2. This program is licensed "as is" without any
13 * warranty of any kind, whether express or implied.
16 #include <linux/module.h>
17 #include <linux/init.h>
18 #include <linux/platform_device.h>
19 #include <linux/delay.h>
20 #include <linux/errno.h>
21 #include <linux/interrupt.h>
22 #include <linux/blkdev.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/dmaengine.h>
25 #include <linux/mmc/host.h>
26 #include <linux/mmc/sd.h>
27 #include <linux/sched.h>
29 #include <linux/of_address.h>
30 #include <linux/of_irq.h>
31 #include <linux/clk.h>
32 #include <linux/bitops.h>
33 #include <linux/of_dma.h>
34 #include <linux/spinlock.h>
37 #define REG_ARGUMENT 4
38 #define REG_RESPONSE0 8
39 #define REG_RESPONSE1 12
40 #define REG_RESPONSE2 16
41 #define REG_RESPONSE3 20
42 #define REG_RESPONSE_COMMAND 24
43 #define REG_DATA_CONTROL 28
44 #define REG_DATA_TIMER 32
45 #define REG_DATA_LENGTH 36
48 #define REG_INTERRUPT_MASK 48
49 #define REG_POWER_CONTROL 52
50 #define REG_CLOCK_CONTROL 56
51 #define REG_BUS_WIDTH 60
52 #define REG_DATA_WINDOW 64
53 #define REG_FEATURE 68
54 #define REG_REVISION 72
57 #define CMD_SDC_RESET BIT(10)
59 #define CMD_APP_CMD BIT(8)
60 #define CMD_LONG_RSP BIT(7)
61 #define CMD_NEED_RSP BIT(6)
62 #define CMD_IDX_MASK 0x3f
64 /* REG_RESPONSE_COMMAND */
65 #define RSP_CMD_APP BIT(6)
66 #define RSP_CMD_IDX_MASK 0x3f
68 /* REG_DATA_CONTROL */
69 #define DCR_DATA_FIFO_RESET BIT(8)
70 #define DCR_DATA_THRES BIT(7)
71 #define DCR_DATA_EN BIT(6)
72 #define DCR_DMA_EN BIT(5)
73 #define DCR_DATA_WRITE BIT(4)
74 #define DCR_BLK_SIZE 0x0f
77 #define DATA_LEN_MASK 0xffffff
80 #define WRITE_PROT BIT(12)
81 #define CARD_DETECT BIT(11)
82 /* 1-10 below can be sent to either registers, interrupt or clear. */
83 #define CARD_CHANGE BIT(10)
84 #define FIFO_ORUN BIT(9)
85 #define FIFO_URUN BIT(8)
86 #define DATA_END BIT(7)
87 #define CMD_SENT BIT(6)
88 #define DATA_CRC_OK BIT(5)
89 #define RSP_CRC_OK BIT(4)
90 #define DATA_TIMEOUT BIT(3)
91 #define RSP_TIMEOUT BIT(2)
92 #define DATA_CRC_FAIL BIT(1)
93 #define RSP_CRC_FAIL BIT(0)
95 #define MASK_RSP (RSP_TIMEOUT | RSP_CRC_FAIL | \
96 RSP_CRC_OK | CARD_DETECT | CMD_SENT)
98 #define MASK_DATA (DATA_CRC_OK | DATA_END | \
99 DATA_CRC_FAIL | DATA_TIMEOUT)
101 #define MASK_INTR_PIO (FIFO_URUN | FIFO_ORUN | CARD_CHANGE)
103 /* REG_POWER_CONTROL */
104 #define SD_POWER_ON BIT(4)
105 #define SD_POWER_MASK 0x0f
107 /* REG_CLOCK_CONTROL */
108 #define CLK_HISPD BIT(9)
109 #define CLK_OFF BIT(8)
110 #define CLK_SD BIT(7)
111 #define CLK_DIV_MASK 0x7f
114 #define BUS_WIDTH_8 BIT(2)
115 #define BUS_WIDTH_4 BIT(1)
116 #define BUS_WIDTH_1 BIT(0)
118 #define MMC_VDD_360 23
119 #define MIN_POWER (MMC_VDD_360 - SD_POWER_MASK)
120 #define MAX_RETRIES 500000
127 phys_addr_t reg_phys
;
129 struct dma_chan
*dma_chan_tx
;
130 struct dma_chan
*dma_chan_rx
;
131 struct dma_async_tx_descriptor
*tx_desc
;
132 struct mmc_host
*mmc
;
133 struct mmc_request
*mrq
;
134 struct scatterlist
*cur_sg
;
135 struct completion dma_complete
;
136 struct completion pio_complete
;
151 static inline void moxart_init_sg(struct moxart_host
*host
,
152 struct mmc_data
*data
)
154 host
->cur_sg
= data
->sg
;
155 host
->num_sg
= data
->sg_len
;
156 host
->data_remain
= host
->cur_sg
->length
;
158 if (host
->data_remain
> host
->data_len
)
159 host
->data_remain
= host
->data_len
;
162 static inline int moxart_next_sg(struct moxart_host
*host
)
165 struct mmc_data
*data
= host
->mrq
->cmd
->data
;
170 if (host
->num_sg
> 0) {
171 host
->data_remain
= host
->cur_sg
->length
;
172 remain
= host
->data_len
- data
->bytes_xfered
;
173 if (remain
> 0 && remain
< host
->data_remain
)
174 host
->data_remain
= remain
;
180 static int moxart_wait_for_status(struct moxart_host
*host
,
181 u32 mask
, u32
*status
)
183 int ret
= -ETIMEDOUT
;
186 for (i
= 0; i
< MAX_RETRIES
; i
++) {
187 *status
= readl(host
->base
+ REG_STATUS
);
188 if (!(*status
& mask
)) {
192 writel(*status
& mask
, host
->base
+ REG_CLEAR
);
198 dev_err(mmc_dev(host
->mmc
), "timed out waiting for status\n");
204 static void moxart_send_command(struct moxart_host
*host
,
205 struct mmc_command
*cmd
)
209 writel(RSP_TIMEOUT
| RSP_CRC_OK
|
210 RSP_CRC_FAIL
| CMD_SENT
, host
->base
+ REG_CLEAR
);
211 writel(cmd
->arg
, host
->base
+ REG_ARGUMENT
);
213 cmdctrl
= cmd
->opcode
& CMD_IDX_MASK
;
214 if (cmdctrl
== SD_APP_SET_BUS_WIDTH
|| cmdctrl
== SD_APP_OP_COND
||
215 cmdctrl
== SD_APP_SEND_SCR
|| cmdctrl
== SD_APP_SD_STATUS
||
216 cmdctrl
== SD_APP_SEND_NUM_WR_BLKS
)
217 cmdctrl
|= CMD_APP_CMD
;
219 if (cmd
->flags
& MMC_RSP_PRESENT
)
220 cmdctrl
|= CMD_NEED_RSP
;
222 if (cmd
->flags
& MMC_RSP_136
)
223 cmdctrl
|= CMD_LONG_RSP
;
225 writel(cmdctrl
| CMD_EN
, host
->base
+ REG_COMMAND
);
227 if (moxart_wait_for_status(host
, MASK_RSP
, &status
) == -ETIMEDOUT
)
228 cmd
->error
= -ETIMEDOUT
;
230 if (status
& RSP_TIMEOUT
) {
231 cmd
->error
= -ETIMEDOUT
;
234 if (status
& RSP_CRC_FAIL
) {
238 if (status
& RSP_CRC_OK
) {
239 if (cmd
->flags
& MMC_RSP_136
) {
240 cmd
->resp
[3] = readl(host
->base
+ REG_RESPONSE0
);
241 cmd
->resp
[2] = readl(host
->base
+ REG_RESPONSE1
);
242 cmd
->resp
[1] = readl(host
->base
+ REG_RESPONSE2
);
243 cmd
->resp
[0] = readl(host
->base
+ REG_RESPONSE3
);
245 cmd
->resp
[0] = readl(host
->base
+ REG_RESPONSE0
);
250 static void moxart_dma_complete(void *param
)
252 struct moxart_host
*host
= param
;
254 complete(&host
->dma_complete
);
257 static void moxart_transfer_dma(struct mmc_data
*data
, struct moxart_host
*host
)
261 struct dma_async_tx_descriptor
*desc
= NULL
;
262 struct dma_chan
*dma_chan
;
264 if (host
->data_len
== data
->bytes_xfered
)
267 if (data
->flags
& MMC_DATA_WRITE
) {
268 dma_chan
= host
->dma_chan_tx
;
269 dir_slave
= DMA_MEM_TO_DEV
;
271 dma_chan
= host
->dma_chan_rx
;
272 dir_slave
= DMA_DEV_TO_MEM
;
275 len
= dma_map_sg(dma_chan
->device
->dev
, data
->sg
,
276 data
->sg_len
, mmc_get_dma_dir(data
));
279 desc
= dmaengine_prep_slave_sg(dma_chan
, data
->sg
,
284 dev_err(mmc_dev(host
->mmc
), "dma_map_sg returned zero length\n");
288 host
->tx_desc
= desc
;
289 desc
->callback
= moxart_dma_complete
;
290 desc
->callback_param
= host
;
291 dmaengine_submit(desc
);
292 dma_async_issue_pending(dma_chan
);
295 data
->bytes_xfered
+= host
->data_remain
;
297 dma_time
= wait_for_completion_interruptible_timeout(
298 &host
->dma_complete
, host
->timeout
);
300 dma_unmap_sg(dma_chan
->device
->dev
,
301 data
->sg
, data
->sg_len
,
302 mmc_get_dma_dir(data
));
306 static void moxart_transfer_pio(struct moxart_host
*host
)
308 struct mmc_data
*data
= host
->mrq
->cmd
->data
;
309 u32
*sgp
, len
= 0, remain
, status
;
311 if (host
->data_len
== data
->bytes_xfered
)
314 sgp
= sg_virt(host
->cur_sg
);
315 remain
= host
->data_remain
;
317 if (data
->flags
& MMC_DATA_WRITE
) {
319 if (moxart_wait_for_status(host
, FIFO_URUN
, &status
)
321 data
->error
= -ETIMEDOUT
;
322 complete(&host
->pio_complete
);
325 for (len
= 0; len
< remain
&& len
< host
->fifo_width
;) {
326 iowrite32(*sgp
, host
->base
+ REG_DATA_WINDOW
);
335 if (moxart_wait_for_status(host
, FIFO_ORUN
, &status
)
337 data
->error
= -ETIMEDOUT
;
338 complete(&host
->pio_complete
);
341 for (len
= 0; len
< remain
&& len
< host
->fifo_width
;) {
342 /* SCR data must be read in big endian. */
343 if (data
->mrq
->cmd
->opcode
== SD_APP_SEND_SCR
)
344 *sgp
= ioread32be(host
->base
+
347 *sgp
= ioread32(host
->base
+
356 data
->bytes_xfered
+= host
->data_remain
- remain
;
357 host
->data_remain
= remain
;
359 if (host
->data_len
!= data
->bytes_xfered
)
360 moxart_next_sg(host
);
362 complete(&host
->pio_complete
);
365 static void moxart_prepare_data(struct moxart_host
*host
)
367 struct mmc_data
*data
= host
->mrq
->cmd
->data
;
374 host
->data_len
= data
->blocks
* data
->blksz
;
375 blksz_bits
= ffs(data
->blksz
) - 1;
376 BUG_ON(1 << blksz_bits
!= data
->blksz
);
378 moxart_init_sg(host
, data
);
380 datactrl
= DCR_DATA_EN
| (blksz_bits
& DCR_BLK_SIZE
);
382 if (data
->flags
& MMC_DATA_WRITE
)
383 datactrl
|= DCR_DATA_WRITE
;
385 if ((host
->data_len
> host
->fifo_width
) && host
->have_dma
)
386 datactrl
|= DCR_DMA_EN
;
388 writel(DCR_DATA_FIFO_RESET
, host
->base
+ REG_DATA_CONTROL
);
389 writel(MASK_DATA
| FIFO_URUN
| FIFO_ORUN
, host
->base
+ REG_CLEAR
);
390 writel(host
->rate
, host
->base
+ REG_DATA_TIMER
);
391 writel(host
->data_len
, host
->base
+ REG_DATA_LENGTH
);
392 writel(datactrl
, host
->base
+ REG_DATA_CONTROL
);
395 static void moxart_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
397 struct moxart_host
*host
= mmc_priv(mmc
);
402 spin_lock_irqsave(&host
->lock
, flags
);
404 init_completion(&host
->dma_complete
);
405 init_completion(&host
->pio_complete
);
409 if (readl(host
->base
+ REG_STATUS
) & CARD_DETECT
) {
410 mrq
->cmd
->error
= -ETIMEDOUT
;
414 moxart_prepare_data(host
);
415 moxart_send_command(host
, host
->mrq
->cmd
);
417 if (mrq
->cmd
->data
) {
418 if ((host
->data_len
> host
->fifo_width
) && host
->have_dma
) {
420 writel(CARD_CHANGE
, host
->base
+ REG_INTERRUPT_MASK
);
422 spin_unlock_irqrestore(&host
->lock
, flags
);
424 moxart_transfer_dma(mrq
->cmd
->data
, host
);
426 spin_lock_irqsave(&host
->lock
, flags
);
429 writel(MASK_INTR_PIO
, host
->base
+ REG_INTERRUPT_MASK
);
431 spin_unlock_irqrestore(&host
->lock
, flags
);
433 /* PIO transfers start from interrupt. */
434 pio_time
= wait_for_completion_interruptible_timeout(
435 &host
->pio_complete
, host
->timeout
);
437 spin_lock_irqsave(&host
->lock
, flags
);
440 if (host
->is_removed
) {
441 dev_err(mmc_dev(host
->mmc
), "card removed\n");
442 mrq
->cmd
->error
= -ETIMEDOUT
;
446 if (moxart_wait_for_status(host
, MASK_DATA
, &status
)
448 mrq
->cmd
->data
->error
= -ETIMEDOUT
;
452 if (status
& DATA_CRC_FAIL
)
453 mrq
->cmd
->data
->error
= -ETIMEDOUT
;
455 if (mrq
->cmd
->data
->stop
)
456 moxart_send_command(host
, mrq
->cmd
->data
->stop
);
460 spin_unlock_irqrestore(&host
->lock
, flags
);
461 mmc_request_done(host
->mmc
, mrq
);
464 static irqreturn_t
moxart_irq(int irq
, void *devid
)
466 struct moxart_host
*host
= (struct moxart_host
*)devid
;
469 spin_lock(&host
->lock
);
471 status
= readl(host
->base
+ REG_STATUS
);
472 if (status
& CARD_CHANGE
) {
473 host
->is_removed
= status
& CARD_DETECT
;
474 if (host
->is_removed
&& host
->have_dma
) {
475 dmaengine_terminate_all(host
->dma_chan_tx
);
476 dmaengine_terminate_all(host
->dma_chan_rx
);
479 writel(MASK_INTR_PIO
, host
->base
+ REG_CLEAR
);
480 writel(CARD_CHANGE
, host
->base
+ REG_INTERRUPT_MASK
);
481 mmc_detect_change(host
->mmc
, 0);
483 if (status
& (FIFO_ORUN
| FIFO_URUN
) && host
->mrq
)
484 moxart_transfer_pio(host
);
486 spin_unlock(&host
->lock
);
491 static void moxart_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
493 struct moxart_host
*host
= mmc_priv(mmc
);
498 spin_lock_irqsave(&host
->lock
, flags
);
501 for (div
= 0; div
< CLK_DIV_MASK
; ++div
) {
502 if (ios
->clock
>= host
->sysclk
/ (2 * (div
+ 1)))
506 host
->rate
= host
->sysclk
/ (2 * (div
+ 1));
507 if (host
->rate
> host
->sysclk
)
509 writel(ctrl
, host
->base
+ REG_CLOCK_CONTROL
);
512 if (ios
->power_mode
== MMC_POWER_OFF
) {
513 writel(readl(host
->base
+ REG_POWER_CONTROL
) & ~SD_POWER_ON
,
514 host
->base
+ REG_POWER_CONTROL
);
516 if (ios
->vdd
< MIN_POWER
)
519 power
= ios
->vdd
- MIN_POWER
;
521 writel(SD_POWER_ON
| (u32
) power
,
522 host
->base
+ REG_POWER_CONTROL
);
525 switch (ios
->bus_width
) {
526 case MMC_BUS_WIDTH_4
:
527 writel(BUS_WIDTH_4
, host
->base
+ REG_BUS_WIDTH
);
529 case MMC_BUS_WIDTH_8
:
530 writel(BUS_WIDTH_8
, host
->base
+ REG_BUS_WIDTH
);
533 writel(BUS_WIDTH_1
, host
->base
+ REG_BUS_WIDTH
);
537 spin_unlock_irqrestore(&host
->lock
, flags
);
541 static int moxart_get_ro(struct mmc_host
*mmc
)
543 struct moxart_host
*host
= mmc_priv(mmc
);
545 return !!(readl(host
->base
+ REG_STATUS
) & WRITE_PROT
);
548 static const struct mmc_host_ops moxart_ops
= {
549 .request
= moxart_request
,
550 .set_ios
= moxart_set_ios
,
551 .get_ro
= moxart_get_ro
,
554 static int moxart_probe(struct platform_device
*pdev
)
556 struct device
*dev
= &pdev
->dev
;
557 struct device_node
*node
= dev
->of_node
;
558 struct resource res_mmc
;
559 struct mmc_host
*mmc
;
560 struct moxart_host
*host
= NULL
;
561 struct dma_slave_config cfg
;
563 void __iomem
*reg_mmc
;
567 mmc
= mmc_alloc_host(sizeof(struct moxart_host
), dev
);
569 dev_err(dev
, "mmc_alloc_host failed\n");
574 ret
= of_address_to_resource(node
, 0, &res_mmc
);
576 dev_err(dev
, "of_address_to_resource failed\n");
580 irq
= irq_of_parse_and_map(node
, 0);
582 dev_err(dev
, "irq_of_parse_and_map failed\n");
587 clk
= devm_clk_get(dev
, NULL
);
593 reg_mmc
= devm_ioremap_resource(dev
, &res_mmc
);
594 if (IS_ERR(reg_mmc
)) {
595 ret
= PTR_ERR(reg_mmc
);
599 ret
= mmc_of_parse(mmc
);
603 host
= mmc_priv(mmc
);
605 host
->base
= reg_mmc
;
606 host
->reg_phys
= res_mmc
.start
;
607 host
->timeout
= msecs_to_jiffies(1000);
608 host
->sysclk
= clk_get_rate(clk
);
609 host
->fifo_width
= readl(host
->base
+ REG_FEATURE
) << 2;
610 host
->dma_chan_tx
= dma_request_chan(dev
, "tx");
611 host
->dma_chan_rx
= dma_request_chan(dev
, "rx");
613 spin_lock_init(&host
->lock
);
615 mmc
->ops
= &moxart_ops
;
616 mmc
->f_max
= DIV_ROUND_CLOSEST(host
->sysclk
, 2);
617 mmc
->f_min
= DIV_ROUND_CLOSEST(host
->sysclk
, CLK_DIV_MASK
* 2);
618 mmc
->ocr_avail
= 0xffff00; /* Support 2.0v - 3.6v power. */
620 if (IS_ERR(host
->dma_chan_tx
) || IS_ERR(host
->dma_chan_rx
)) {
621 if (PTR_ERR(host
->dma_chan_tx
) == -EPROBE_DEFER
||
622 PTR_ERR(host
->dma_chan_rx
) == -EPROBE_DEFER
) {
626 dev_dbg(dev
, "PIO mode transfer enabled\n");
627 host
->have_dma
= false;
629 dev_dbg(dev
, "DMA channels found (%p,%p)\n",
630 host
->dma_chan_tx
, host
->dma_chan_rx
);
631 host
->have_dma
= true;
633 cfg
.src_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
634 cfg
.dst_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
636 cfg
.direction
= DMA_MEM_TO_DEV
;
638 cfg
.dst_addr
= host
->reg_phys
+ REG_DATA_WINDOW
;
639 dmaengine_slave_config(host
->dma_chan_tx
, &cfg
);
641 cfg
.direction
= DMA_DEV_TO_MEM
;
642 cfg
.src_addr
= host
->reg_phys
+ REG_DATA_WINDOW
;
644 dmaengine_slave_config(host
->dma_chan_rx
, &cfg
);
647 switch ((readl(host
->base
+ REG_BUS_WIDTH
) >> 3) & 3) {
649 mmc
->caps
|= MMC_CAP_4_BIT_DATA
;
652 mmc
->caps
|= MMC_CAP_4_BIT_DATA
| MMC_CAP_8_BIT_DATA
;
658 writel(0, host
->base
+ REG_INTERRUPT_MASK
);
660 writel(CMD_SDC_RESET
, host
->base
+ REG_COMMAND
);
661 for (i
= 0; i
< MAX_RETRIES
; i
++) {
662 if (!(readl(host
->base
+ REG_COMMAND
) & CMD_SDC_RESET
))
667 ret
= devm_request_irq(dev
, irq
, moxart_irq
, 0, "moxart-mmc", host
);
671 dev_set_drvdata(dev
, mmc
);
674 dev_dbg(dev
, "IRQ=%d, FIFO is %d bytes\n", irq
, host
->fifo_width
);
684 static int moxart_remove(struct platform_device
*pdev
)
686 struct mmc_host
*mmc
= dev_get_drvdata(&pdev
->dev
);
687 struct moxart_host
*host
= mmc_priv(mmc
);
689 dev_set_drvdata(&pdev
->dev
, NULL
);
691 if (!IS_ERR(host
->dma_chan_tx
))
692 dma_release_channel(host
->dma_chan_tx
);
693 if (!IS_ERR(host
->dma_chan_rx
))
694 dma_release_channel(host
->dma_chan_rx
);
695 mmc_remove_host(mmc
);
698 writel(0, host
->base
+ REG_INTERRUPT_MASK
);
699 writel(0, host
->base
+ REG_POWER_CONTROL
);
700 writel(readl(host
->base
+ REG_CLOCK_CONTROL
) | CLK_OFF
,
701 host
->base
+ REG_CLOCK_CONTROL
);
706 static const struct of_device_id moxart_mmc_match
[] = {
707 { .compatible
= "moxa,moxart-mmc" },
708 { .compatible
= "faraday,ftsdc010" },
711 MODULE_DEVICE_TABLE(of
, moxart_mmc_match
);
713 static struct platform_driver moxart_mmc_driver
= {
714 .probe
= moxart_probe
,
715 .remove
= moxart_remove
,
717 .name
= "mmc-moxart",
718 .probe_type
= PROBE_PREFER_ASYNCHRONOUS
,
719 .of_match_table
= moxart_mmc_match
,
722 module_platform_driver(moxart_mmc_driver
);
724 MODULE_ALIAS("platform:mmc-moxart");
725 MODULE_DESCRIPTION("MOXA ART MMC driver");
726 MODULE_LICENSE("GPL v2");
727 MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");