2 * MOXA ART MMC host driver.
4 * Copyright (C) 2014 Jonas Jensen
6 * Jonas Jensen <jonas.jensen@gmail.com>
9 * Moxa Technologies Co., Ltd. <www.moxa.com>
11 * This file is licensed under the terms of the GNU General Public
12 * License version 2. This program is licensed "as is" without any
13 * warranty of any kind, whether express or implied.
16 #include <linux/module.h>
17 #include <linux/init.h>
18 #include <linux/platform_device.h>
19 #include <linux/delay.h>
20 #include <linux/errno.h>
21 #include <linux/interrupt.h>
22 #include <linux/blkdev.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/dmaengine.h>
25 #include <linux/mmc/host.h>
26 #include <linux/mmc/sd.h>
27 #include <linux/sched.h>
29 #include <linux/of_address.h>
30 #include <linux/of_irq.h>
31 #include <linux/clk.h>
32 #include <linux/bitops.h>
33 #include <linux/of_dma.h>
34 #include <linux/spinlock.h>
37 #define REG_ARGUMENT 4
38 #define REG_RESPONSE0 8
39 #define REG_RESPONSE1 12
40 #define REG_RESPONSE2 16
41 #define REG_RESPONSE3 20
42 #define REG_RESPONSE_COMMAND 24
43 #define REG_DATA_CONTROL 28
44 #define REG_DATA_TIMER 32
45 #define REG_DATA_LENGTH 36
48 #define REG_INTERRUPT_MASK 48
49 #define REG_POWER_CONTROL 52
50 #define REG_CLOCK_CONTROL 56
51 #define REG_BUS_WIDTH 60
52 #define REG_DATA_WINDOW 64
53 #define REG_FEATURE 68
54 #define REG_REVISION 72
57 #define CMD_SDC_RESET BIT(10)
59 #define CMD_APP_CMD BIT(8)
60 #define CMD_LONG_RSP BIT(7)
61 #define CMD_NEED_RSP BIT(6)
62 #define CMD_IDX_MASK 0x3f
64 /* REG_RESPONSE_COMMAND */
65 #define RSP_CMD_APP BIT(6)
66 #define RSP_CMD_IDX_MASK 0x3f
68 /* REG_DATA_CONTROL */
69 #define DCR_DATA_FIFO_RESET BIT(8)
70 #define DCR_DATA_THRES BIT(7)
71 #define DCR_DATA_EN BIT(6)
72 #define DCR_DMA_EN BIT(5)
73 #define DCR_DATA_WRITE BIT(4)
74 #define DCR_BLK_SIZE 0x0f
77 #define DATA_LEN_MASK 0xffffff
80 #define WRITE_PROT BIT(12)
81 #define CARD_DETECT BIT(11)
82 /* 1-10 below can be sent to either registers, interrupt or clear. */
83 #define CARD_CHANGE BIT(10)
84 #define FIFO_ORUN BIT(9)
85 #define FIFO_URUN BIT(8)
86 #define DATA_END BIT(7)
87 #define CMD_SENT BIT(6)
88 #define DATA_CRC_OK BIT(5)
89 #define RSP_CRC_OK BIT(4)
90 #define DATA_TIMEOUT BIT(3)
91 #define RSP_TIMEOUT BIT(2)
92 #define DATA_CRC_FAIL BIT(1)
93 #define RSP_CRC_FAIL BIT(0)
95 #define MASK_RSP (RSP_TIMEOUT | RSP_CRC_FAIL | \
96 RSP_CRC_OK | CARD_DETECT | CMD_SENT)
98 #define MASK_DATA (DATA_CRC_OK | DATA_END | \
99 DATA_CRC_FAIL | DATA_TIMEOUT)
101 #define MASK_INTR_PIO (FIFO_URUN | FIFO_ORUN | CARD_CHANGE)
103 /* REG_POWER_CONTROL */
104 #define SD_POWER_ON BIT(4)
105 #define SD_POWER_MASK 0x0f
107 /* REG_CLOCK_CONTROL */
108 #define CLK_HISPD BIT(9)
109 #define CLK_OFF BIT(8)
110 #define CLK_SD BIT(7)
111 #define CLK_DIV_MASK 0x7f
114 #define BUS_WIDTH_8 BIT(2)
115 #define BUS_WIDTH_4 BIT(1)
116 #define BUS_WIDTH_1 BIT(0)
118 #define MMC_VDD_360 23
119 #define MIN_POWER (MMC_VDD_360 - SD_POWER_MASK)
120 #define MAX_RETRIES 500000
127 phys_addr_t reg_phys
;
129 struct dma_chan
*dma_chan_tx
;
130 struct dma_chan
*dma_chan_rx
;
131 struct dma_async_tx_descriptor
*tx_desc
;
132 struct mmc_host
*mmc
;
133 struct mmc_request
*mrq
;
134 struct scatterlist
*cur_sg
;
135 struct completion dma_complete
;
136 struct completion pio_complete
;
151 static inline void moxart_init_sg(struct moxart_host
*host
,
152 struct mmc_data
*data
)
154 host
->cur_sg
= data
->sg
;
155 host
->num_sg
= data
->sg_len
;
156 host
->data_remain
= host
->cur_sg
->length
;
158 if (host
->data_remain
> host
->data_len
)
159 host
->data_remain
= host
->data_len
;
162 static inline int moxart_next_sg(struct moxart_host
*host
)
165 struct mmc_data
*data
= host
->mrq
->cmd
->data
;
170 if (host
->num_sg
> 0) {
171 host
->data_remain
= host
->cur_sg
->length
;
172 remain
= host
->data_len
- data
->bytes_xfered
;
173 if (remain
> 0 && remain
< host
->data_remain
)
174 host
->data_remain
= remain
;
180 static int moxart_wait_for_status(struct moxart_host
*host
,
181 u32 mask
, u32
*status
)
183 int ret
= -ETIMEDOUT
;
186 for (i
= 0; i
< MAX_RETRIES
; i
++) {
187 *status
= readl(host
->base
+ REG_STATUS
);
188 if (!(*status
& mask
)) {
192 writel(*status
& mask
, host
->base
+ REG_CLEAR
);
198 dev_err(mmc_dev(host
->mmc
), "timed out waiting for status\n");
204 static void moxart_send_command(struct moxart_host
*host
,
205 struct mmc_command
*cmd
)
209 writel(RSP_TIMEOUT
| RSP_CRC_OK
|
210 RSP_CRC_FAIL
| CMD_SENT
, host
->base
+ REG_CLEAR
);
211 writel(cmd
->arg
, host
->base
+ REG_ARGUMENT
);
213 cmdctrl
= cmd
->opcode
& CMD_IDX_MASK
;
214 if (cmdctrl
== SD_APP_SET_BUS_WIDTH
|| cmdctrl
== SD_APP_OP_COND
||
215 cmdctrl
== SD_APP_SEND_SCR
|| cmdctrl
== SD_APP_SD_STATUS
||
216 cmdctrl
== SD_APP_SEND_NUM_WR_BLKS
)
217 cmdctrl
|= CMD_APP_CMD
;
219 if (cmd
->flags
& MMC_RSP_PRESENT
)
220 cmdctrl
|= CMD_NEED_RSP
;
222 if (cmd
->flags
& MMC_RSP_136
)
223 cmdctrl
|= CMD_LONG_RSP
;
225 writel(cmdctrl
| CMD_EN
, host
->base
+ REG_COMMAND
);
227 if (moxart_wait_for_status(host
, MASK_RSP
, &status
) == -ETIMEDOUT
)
228 cmd
->error
= -ETIMEDOUT
;
230 if (status
& RSP_TIMEOUT
) {
231 cmd
->error
= -ETIMEDOUT
;
234 if (status
& RSP_CRC_FAIL
) {
238 if (status
& RSP_CRC_OK
) {
239 if (cmd
->flags
& MMC_RSP_136
) {
240 cmd
->resp
[3] = readl(host
->base
+ REG_RESPONSE0
);
241 cmd
->resp
[2] = readl(host
->base
+ REG_RESPONSE1
);
242 cmd
->resp
[1] = readl(host
->base
+ REG_RESPONSE2
);
243 cmd
->resp
[0] = readl(host
->base
+ REG_RESPONSE3
);
245 cmd
->resp
[0] = readl(host
->base
+ REG_RESPONSE0
);
250 static void moxart_dma_complete(void *param
)
252 struct moxart_host
*host
= param
;
254 complete(&host
->dma_complete
);
257 static void moxart_transfer_dma(struct mmc_data
*data
, struct moxart_host
*host
)
259 u32 len
, dir_data
, dir_slave
;
260 unsigned long dma_time
;
261 struct dma_async_tx_descriptor
*desc
= NULL
;
262 struct dma_chan
*dma_chan
;
264 if (host
->data_len
== data
->bytes_xfered
)
267 if (data
->flags
& MMC_DATA_WRITE
) {
268 dma_chan
= host
->dma_chan_tx
;
269 dir_data
= DMA_TO_DEVICE
;
270 dir_slave
= DMA_MEM_TO_DEV
;
272 dma_chan
= host
->dma_chan_rx
;
273 dir_data
= DMA_FROM_DEVICE
;
274 dir_slave
= DMA_DEV_TO_MEM
;
277 len
= dma_map_sg(dma_chan
->device
->dev
, data
->sg
,
278 data
->sg_len
, dir_data
);
281 desc
= dmaengine_prep_slave_sg(dma_chan
, data
->sg
,
286 dev_err(mmc_dev(host
->mmc
), "dma_map_sg returned zero length\n");
290 host
->tx_desc
= desc
;
291 desc
->callback
= moxart_dma_complete
;
292 desc
->callback_param
= host
;
293 dmaengine_submit(desc
);
294 dma_async_issue_pending(dma_chan
);
297 data
->bytes_xfered
+= host
->data_remain
;
299 dma_time
= wait_for_completion_interruptible_timeout(
300 &host
->dma_complete
, host
->timeout
);
302 dma_unmap_sg(dma_chan
->device
->dev
,
303 data
->sg
, data
->sg_len
,
308 static void moxart_transfer_pio(struct moxart_host
*host
)
310 struct mmc_data
*data
= host
->mrq
->cmd
->data
;
311 u32
*sgp
, len
= 0, remain
, status
;
313 if (host
->data_len
== data
->bytes_xfered
)
316 sgp
= sg_virt(host
->cur_sg
);
317 remain
= host
->data_remain
;
319 if (data
->flags
& MMC_DATA_WRITE
) {
321 if (moxart_wait_for_status(host
, FIFO_URUN
, &status
)
323 data
->error
= -ETIMEDOUT
;
324 complete(&host
->pio_complete
);
327 for (len
= 0; len
< remain
&& len
< host
->fifo_width
;) {
328 iowrite32(*sgp
, host
->base
+ REG_DATA_WINDOW
);
337 if (moxart_wait_for_status(host
, FIFO_ORUN
, &status
)
339 data
->error
= -ETIMEDOUT
;
340 complete(&host
->pio_complete
);
343 for (len
= 0; len
< remain
&& len
< host
->fifo_width
;) {
344 /* SCR data must be read in big endian. */
345 if (data
->mrq
->cmd
->opcode
== SD_APP_SEND_SCR
)
346 *sgp
= ioread32be(host
->base
+
349 *sgp
= ioread32(host
->base
+
358 data
->bytes_xfered
+= host
->data_remain
- remain
;
359 host
->data_remain
= remain
;
361 if (host
->data_len
!= data
->bytes_xfered
)
362 moxart_next_sg(host
);
364 complete(&host
->pio_complete
);
367 static void moxart_prepare_data(struct moxart_host
*host
)
369 struct mmc_data
*data
= host
->mrq
->cmd
->data
;
376 host
->data_len
= data
->blocks
* data
->blksz
;
377 blksz_bits
= ffs(data
->blksz
) - 1;
378 BUG_ON(1 << blksz_bits
!= data
->blksz
);
380 moxart_init_sg(host
, data
);
382 datactrl
= DCR_DATA_EN
| (blksz_bits
& DCR_BLK_SIZE
);
384 if (data
->flags
& MMC_DATA_WRITE
)
385 datactrl
|= DCR_DATA_WRITE
;
387 if ((host
->data_len
> host
->fifo_width
) && host
->have_dma
)
388 datactrl
|= DCR_DMA_EN
;
390 writel(DCR_DATA_FIFO_RESET
, host
->base
+ REG_DATA_CONTROL
);
391 writel(MASK_DATA
| FIFO_URUN
| FIFO_ORUN
, host
->base
+ REG_CLEAR
);
392 writel(host
->rate
, host
->base
+ REG_DATA_TIMER
);
393 writel(host
->data_len
, host
->base
+ REG_DATA_LENGTH
);
394 writel(datactrl
, host
->base
+ REG_DATA_CONTROL
);
397 static void moxart_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
399 struct moxart_host
*host
= mmc_priv(mmc
);
400 unsigned long pio_time
, flags
;
403 spin_lock_irqsave(&host
->lock
, flags
);
405 init_completion(&host
->dma_complete
);
406 init_completion(&host
->pio_complete
);
410 if (readl(host
->base
+ REG_STATUS
) & CARD_DETECT
) {
411 mrq
->cmd
->error
= -ETIMEDOUT
;
415 moxart_prepare_data(host
);
416 moxart_send_command(host
, host
->mrq
->cmd
);
418 if (mrq
->cmd
->data
) {
419 if ((host
->data_len
> host
->fifo_width
) && host
->have_dma
) {
421 writel(CARD_CHANGE
, host
->base
+ REG_INTERRUPT_MASK
);
423 spin_unlock_irqrestore(&host
->lock
, flags
);
425 moxart_transfer_dma(mrq
->cmd
->data
, host
);
427 spin_lock_irqsave(&host
->lock
, flags
);
430 writel(MASK_INTR_PIO
, host
->base
+ REG_INTERRUPT_MASK
);
432 spin_unlock_irqrestore(&host
->lock
, flags
);
434 /* PIO transfers start from interrupt. */
435 pio_time
= wait_for_completion_interruptible_timeout(
436 &host
->pio_complete
, host
->timeout
);
438 spin_lock_irqsave(&host
->lock
, flags
);
441 if (host
->is_removed
) {
442 dev_err(mmc_dev(host
->mmc
), "card removed\n");
443 mrq
->cmd
->error
= -ETIMEDOUT
;
447 if (moxart_wait_for_status(host
, MASK_DATA
, &status
)
449 mrq
->cmd
->data
->error
= -ETIMEDOUT
;
453 if (status
& DATA_CRC_FAIL
)
454 mrq
->cmd
->data
->error
= -ETIMEDOUT
;
456 if (mrq
->cmd
->data
->stop
)
457 moxart_send_command(host
, mrq
->cmd
->data
->stop
);
461 spin_unlock_irqrestore(&host
->lock
, flags
);
462 mmc_request_done(host
->mmc
, mrq
);
465 static irqreturn_t
moxart_irq(int irq
, void *devid
)
467 struct moxart_host
*host
= (struct moxart_host
*)devid
;
471 spin_lock_irqsave(&host
->lock
, flags
);
473 status
= readl(host
->base
+ REG_STATUS
);
474 if (status
& CARD_CHANGE
) {
475 host
->is_removed
= status
& CARD_DETECT
;
476 if (host
->is_removed
&& host
->have_dma
) {
477 dmaengine_terminate_all(host
->dma_chan_tx
);
478 dmaengine_terminate_all(host
->dma_chan_rx
);
481 writel(MASK_INTR_PIO
, host
->base
+ REG_CLEAR
);
482 writel(CARD_CHANGE
, host
->base
+ REG_INTERRUPT_MASK
);
483 mmc_detect_change(host
->mmc
, 0);
485 if (status
& (FIFO_ORUN
| FIFO_URUN
) && host
->mrq
)
486 moxart_transfer_pio(host
);
488 spin_unlock_irqrestore(&host
->lock
, flags
);
493 static void moxart_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
495 struct moxart_host
*host
= mmc_priv(mmc
);
500 spin_lock_irqsave(&host
->lock
, flags
);
503 for (div
= 0; div
< CLK_DIV_MASK
; ++div
) {
504 if (ios
->clock
>= host
->sysclk
/ (2 * (div
+ 1)))
508 host
->rate
= host
->sysclk
/ (2 * (div
+ 1));
509 if (host
->rate
> host
->sysclk
)
511 writel(ctrl
, host
->base
+ REG_CLOCK_CONTROL
);
514 if (ios
->power_mode
== MMC_POWER_OFF
) {
515 writel(readl(host
->base
+ REG_POWER_CONTROL
) & ~SD_POWER_ON
,
516 host
->base
+ REG_POWER_CONTROL
);
518 if (ios
->vdd
< MIN_POWER
)
521 power
= ios
->vdd
- MIN_POWER
;
523 writel(SD_POWER_ON
| (u32
) power
,
524 host
->base
+ REG_POWER_CONTROL
);
527 switch (ios
->bus_width
) {
528 case MMC_BUS_WIDTH_4
:
529 writel(BUS_WIDTH_4
, host
->base
+ REG_BUS_WIDTH
);
531 case MMC_BUS_WIDTH_8
:
532 writel(BUS_WIDTH_8
, host
->base
+ REG_BUS_WIDTH
);
535 writel(BUS_WIDTH_1
, host
->base
+ REG_BUS_WIDTH
);
539 spin_unlock_irqrestore(&host
->lock
, flags
);
543 static int moxart_get_ro(struct mmc_host
*mmc
)
545 struct moxart_host
*host
= mmc_priv(mmc
);
547 return !!(readl(host
->base
+ REG_STATUS
) & WRITE_PROT
);
550 static struct mmc_host_ops moxart_ops
= {
551 .request
= moxart_request
,
552 .set_ios
= moxart_set_ios
,
553 .get_ro
= moxart_get_ro
,
556 static int moxart_probe(struct platform_device
*pdev
)
558 struct device
*dev
= &pdev
->dev
;
559 struct device_node
*node
= dev
->of_node
;
560 struct resource res_mmc
;
561 struct mmc_host
*mmc
;
562 struct moxart_host
*host
= NULL
;
563 struct dma_slave_config cfg
;
565 void __iomem
*reg_mmc
;
569 mmc
= mmc_alloc_host(sizeof(struct moxart_host
), dev
);
571 dev_err(dev
, "mmc_alloc_host failed\n");
576 ret
= of_address_to_resource(node
, 0, &res_mmc
);
578 dev_err(dev
, "of_address_to_resource failed\n");
582 irq
= irq_of_parse_and_map(node
, 0);
584 dev_err(dev
, "irq_of_parse_and_map failed\n");
589 clk
= devm_clk_get(dev
, NULL
);
595 reg_mmc
= devm_ioremap_resource(dev
, &res_mmc
);
596 if (IS_ERR(reg_mmc
)) {
597 ret
= PTR_ERR(reg_mmc
);
601 ret
= mmc_of_parse(mmc
);
605 host
= mmc_priv(mmc
);
607 host
->base
= reg_mmc
;
608 host
->reg_phys
= res_mmc
.start
;
609 host
->timeout
= msecs_to_jiffies(1000);
610 host
->sysclk
= clk_get_rate(clk
);
611 host
->fifo_width
= readl(host
->base
+ REG_FEATURE
) << 2;
612 host
->dma_chan_tx
= dma_request_slave_channel_reason(dev
, "tx");
613 host
->dma_chan_rx
= dma_request_slave_channel_reason(dev
, "rx");
615 spin_lock_init(&host
->lock
);
617 mmc
->ops
= &moxart_ops
;
618 mmc
->f_max
= DIV_ROUND_CLOSEST(host
->sysclk
, 2);
619 mmc
->f_min
= DIV_ROUND_CLOSEST(host
->sysclk
, CLK_DIV_MASK
* 2);
620 mmc
->ocr_avail
= 0xffff00; /* Support 2.0v - 3.6v power. */
622 if (IS_ERR(host
->dma_chan_tx
) || IS_ERR(host
->dma_chan_rx
)) {
623 if (PTR_ERR(host
->dma_chan_tx
) == -EPROBE_DEFER
||
624 PTR_ERR(host
->dma_chan_rx
) == -EPROBE_DEFER
) {
628 dev_dbg(dev
, "PIO mode transfer enabled\n");
629 host
->have_dma
= false;
631 dev_dbg(dev
, "DMA channels found (%p,%p)\n",
632 host
->dma_chan_tx
, host
->dma_chan_rx
);
633 host
->have_dma
= true;
635 cfg
.src_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
636 cfg
.dst_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
638 cfg
.direction
= DMA_MEM_TO_DEV
;
640 cfg
.dst_addr
= host
->reg_phys
+ REG_DATA_WINDOW
;
641 dmaengine_slave_config(host
->dma_chan_tx
, &cfg
);
643 cfg
.direction
= DMA_DEV_TO_MEM
;
644 cfg
.src_addr
= host
->reg_phys
+ REG_DATA_WINDOW
;
646 dmaengine_slave_config(host
->dma_chan_rx
, &cfg
);
649 switch ((readl(host
->base
+ REG_BUS_WIDTH
) >> 3) & 3) {
651 mmc
->caps
|= MMC_CAP_4_BIT_DATA
;
654 mmc
->caps
|= MMC_CAP_4_BIT_DATA
| MMC_CAP_8_BIT_DATA
;
660 writel(0, host
->base
+ REG_INTERRUPT_MASK
);
662 writel(CMD_SDC_RESET
, host
->base
+ REG_COMMAND
);
663 for (i
= 0; i
< MAX_RETRIES
; i
++) {
664 if (!(readl(host
->base
+ REG_COMMAND
) & CMD_SDC_RESET
))
669 ret
= devm_request_irq(dev
, irq
, moxart_irq
, 0, "moxart-mmc", host
);
673 dev_set_drvdata(dev
, mmc
);
676 dev_dbg(dev
, "IRQ=%d, FIFO is %d bytes\n", irq
, host
->fifo_width
);
686 static int moxart_remove(struct platform_device
*pdev
)
688 struct mmc_host
*mmc
= dev_get_drvdata(&pdev
->dev
);
689 struct moxart_host
*host
= mmc_priv(mmc
);
691 dev_set_drvdata(&pdev
->dev
, NULL
);
694 if (!IS_ERR(host
->dma_chan_tx
))
695 dma_release_channel(host
->dma_chan_tx
);
696 if (!IS_ERR(host
->dma_chan_rx
))
697 dma_release_channel(host
->dma_chan_rx
);
698 mmc_remove_host(mmc
);
701 writel(0, host
->base
+ REG_INTERRUPT_MASK
);
702 writel(0, host
->base
+ REG_POWER_CONTROL
);
703 writel(readl(host
->base
+ REG_CLOCK_CONTROL
) | CLK_OFF
,
704 host
->base
+ REG_CLOCK_CONTROL
);
709 static const struct of_device_id moxart_mmc_match
[] = {
710 { .compatible
= "moxa,moxart-mmc" },
711 { .compatible
= "faraday,ftsdc010" },
714 MODULE_DEVICE_TABLE(of
, moxart_mmc_match
);
716 static struct platform_driver moxart_mmc_driver
= {
717 .probe
= moxart_probe
,
718 .remove
= moxart_remove
,
720 .name
= "mmc-moxart",
721 .of_match_table
= moxart_mmc_match
,
724 module_platform_driver(moxart_mmc_driver
);
726 MODULE_ALIAS("platform:mmc-moxart");
727 MODULE_DESCRIPTION("MOXA ART MMC driver");
728 MODULE_LICENSE("GPL v2");
729 MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");