2 * MOXA ART MMC host driver.
4 * Copyright (C) 2014 Jonas Jensen
6 * Jonas Jensen <jonas.jensen@gmail.com>
9 * Moxa Technologies Co., Ltd. <www.moxa.com>
11 * This file is licensed under the terms of the GNU General Public
12 * License version 2. This program is licensed "as is" without any
13 * warranty of any kind, whether express or implied.
16 #include <linux/module.h>
17 #include <linux/init.h>
18 #include <linux/platform_device.h>
19 #include <linux/delay.h>
20 #include <linux/errno.h>
21 #include <linux/interrupt.h>
22 #include <linux/blkdev.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/dmaengine.h>
25 #include <linux/mmc/host.h>
26 #include <linux/mmc/sd.h>
27 #include <linux/sched.h>
29 #include <linux/of_address.h>
30 #include <linux/of_irq.h>
31 #include <linux/clk.h>
32 #include <linux/bitops.h>
33 #include <linux/of_dma.h>
34 #include <linux/spinlock.h>
37 #define REG_ARGUMENT 4
38 #define REG_RESPONSE0 8
39 #define REG_RESPONSE1 12
40 #define REG_RESPONSE2 16
41 #define REG_RESPONSE3 20
42 #define REG_RESPONSE_COMMAND 24
43 #define REG_DATA_CONTROL 28
44 #define REG_DATA_TIMER 32
45 #define REG_DATA_LENGTH 36
48 #define REG_INTERRUPT_MASK 48
49 #define REG_POWER_CONTROL 52
50 #define REG_CLOCK_CONTROL 56
51 #define REG_BUS_WIDTH 60
52 #define REG_DATA_WINDOW 64
53 #define REG_FEATURE 68
54 #define REG_REVISION 72
57 #define CMD_SDC_RESET BIT(10)
59 #define CMD_APP_CMD BIT(8)
60 #define CMD_LONG_RSP BIT(7)
61 #define CMD_NEED_RSP BIT(6)
62 #define CMD_IDX_MASK 0x3f
64 /* REG_RESPONSE_COMMAND */
65 #define RSP_CMD_APP BIT(6)
66 #define RSP_CMD_IDX_MASK 0x3f
68 /* REG_DATA_CONTROL */
69 #define DCR_DATA_FIFO_RESET BIT(8)
70 #define DCR_DATA_THRES BIT(7)
71 #define DCR_DATA_EN BIT(6)
72 #define DCR_DMA_EN BIT(5)
73 #define DCR_DATA_WRITE BIT(4)
74 #define DCR_BLK_SIZE 0x0f
77 #define DATA_LEN_MASK 0xffffff
80 #define WRITE_PROT BIT(12)
81 #define CARD_DETECT BIT(11)
82 /* 1-10 below can be sent to either registers, interrupt or clear. */
83 #define CARD_CHANGE BIT(10)
84 #define FIFO_ORUN BIT(9)
85 #define FIFO_URUN BIT(8)
86 #define DATA_END BIT(7)
87 #define CMD_SENT BIT(6)
88 #define DATA_CRC_OK BIT(5)
89 #define RSP_CRC_OK BIT(4)
90 #define DATA_TIMEOUT BIT(3)
91 #define RSP_TIMEOUT BIT(2)
92 #define DATA_CRC_FAIL BIT(1)
93 #define RSP_CRC_FAIL BIT(0)
95 #define MASK_RSP (RSP_TIMEOUT | RSP_CRC_FAIL | \
96 RSP_CRC_OK | CARD_DETECT | CMD_SENT)
98 #define MASK_DATA (DATA_CRC_OK | DATA_END | \
99 DATA_CRC_FAIL | DATA_TIMEOUT)
101 #define MASK_INTR_PIO (FIFO_URUN | FIFO_ORUN | CARD_CHANGE)
103 /* REG_POWER_CONTROL */
104 #define SD_POWER_ON BIT(4)
105 #define SD_POWER_MASK 0x0f
107 /* REG_CLOCK_CONTROL */
108 #define CLK_HISPD BIT(9)
109 #define CLK_OFF BIT(8)
110 #define CLK_SD BIT(7)
111 #define CLK_DIV_MASK 0x7f
114 #define BUS_WIDTH_4_SUPPORT BIT(3)
115 #define BUS_WIDTH_4 BIT(2)
116 #define BUS_WIDTH_1 BIT(0)
118 #define MMC_VDD_360 23
119 #define MIN_POWER (MMC_VDD_360 - SD_POWER_MASK)
120 #define MAX_RETRIES 500000
127 phys_addr_t reg_phys
;
129 struct dma_chan
*dma_chan_tx
;
130 struct dma_chan
*dma_chan_rx
;
131 struct dma_async_tx_descriptor
*tx_desc
;
132 struct mmc_host
*mmc
;
133 struct mmc_request
*mrq
;
134 struct scatterlist
*cur_sg
;
135 struct completion dma_complete
;
136 struct completion pio_complete
;
151 static inline void moxart_init_sg(struct moxart_host
*host
,
152 struct mmc_data
*data
)
154 host
->cur_sg
= data
->sg
;
155 host
->num_sg
= data
->sg_len
;
156 host
->data_remain
= host
->cur_sg
->length
;
158 if (host
->data_remain
> host
->data_len
)
159 host
->data_remain
= host
->data_len
;
162 static inline int moxart_next_sg(struct moxart_host
*host
)
165 struct mmc_data
*data
= host
->mrq
->cmd
->data
;
170 if (host
->num_sg
> 0) {
171 host
->data_remain
= host
->cur_sg
->length
;
172 remain
= host
->data_len
- data
->bytes_xfered
;
173 if (remain
> 0 && remain
< host
->data_remain
)
174 host
->data_remain
= remain
;
180 static int moxart_wait_for_status(struct moxart_host
*host
,
181 u32 mask
, u32
*status
)
183 int ret
= -ETIMEDOUT
;
186 for (i
= 0; i
< MAX_RETRIES
; i
++) {
187 *status
= readl(host
->base
+ REG_STATUS
);
188 if (!(*status
& mask
)) {
192 writel(*status
& mask
, host
->base
+ REG_CLEAR
);
198 dev_err(mmc_dev(host
->mmc
), "timed out waiting for status\n");
204 static void moxart_send_command(struct moxart_host
*host
,
205 struct mmc_command
*cmd
)
209 writel(RSP_TIMEOUT
| RSP_CRC_OK
|
210 RSP_CRC_FAIL
| CMD_SENT
, host
->base
+ REG_CLEAR
);
211 writel(cmd
->arg
, host
->base
+ REG_ARGUMENT
);
213 cmdctrl
= cmd
->opcode
& CMD_IDX_MASK
;
214 if (cmdctrl
== SD_APP_SET_BUS_WIDTH
|| cmdctrl
== SD_APP_OP_COND
||
215 cmdctrl
== SD_APP_SEND_SCR
|| cmdctrl
== SD_APP_SD_STATUS
||
216 cmdctrl
== SD_APP_SEND_NUM_WR_BLKS
)
217 cmdctrl
|= CMD_APP_CMD
;
219 if (cmd
->flags
& MMC_RSP_PRESENT
)
220 cmdctrl
|= CMD_NEED_RSP
;
222 if (cmd
->flags
& MMC_RSP_136
)
223 cmdctrl
|= CMD_LONG_RSP
;
225 writel(cmdctrl
| CMD_EN
, host
->base
+ REG_COMMAND
);
227 if (moxart_wait_for_status(host
, MASK_RSP
, &status
) == -ETIMEDOUT
)
228 cmd
->error
= -ETIMEDOUT
;
230 if (status
& RSP_TIMEOUT
) {
231 cmd
->error
= -ETIMEDOUT
;
234 if (status
& RSP_CRC_FAIL
) {
238 if (status
& RSP_CRC_OK
) {
239 if (cmd
->flags
& MMC_RSP_136
) {
240 cmd
->resp
[3] = readl(host
->base
+ REG_RESPONSE0
);
241 cmd
->resp
[2] = readl(host
->base
+ REG_RESPONSE1
);
242 cmd
->resp
[1] = readl(host
->base
+ REG_RESPONSE2
);
243 cmd
->resp
[0] = readl(host
->base
+ REG_RESPONSE3
);
245 cmd
->resp
[0] = readl(host
->base
+ REG_RESPONSE0
);
250 static void moxart_dma_complete(void *param
)
252 struct moxart_host
*host
= param
;
254 complete(&host
->dma_complete
);
257 static bool moxart_use_dma(struct moxart_host
*host
)
259 return (host
->data_len
> host
->fifo_width
) && host
->have_dma
;
262 static void moxart_transfer_dma(struct mmc_data
*data
, struct moxart_host
*host
)
265 struct dma_async_tx_descriptor
*desc
= NULL
;
266 struct dma_chan
*dma_chan
;
268 if (host
->data_len
== data
->bytes_xfered
)
271 if (data
->flags
& MMC_DATA_WRITE
) {
272 dma_chan
= host
->dma_chan_tx
;
273 dir_slave
= DMA_MEM_TO_DEV
;
275 dma_chan
= host
->dma_chan_rx
;
276 dir_slave
= DMA_DEV_TO_MEM
;
279 len
= dma_map_sg(dma_chan
->device
->dev
, data
->sg
,
280 data
->sg_len
, mmc_get_dma_dir(data
));
283 desc
= dmaengine_prep_slave_sg(dma_chan
, data
->sg
,
288 dev_err(mmc_dev(host
->mmc
), "dma_map_sg returned zero length\n");
292 host
->tx_desc
= desc
;
293 desc
->callback
= moxart_dma_complete
;
294 desc
->callback_param
= host
;
295 dmaengine_submit(desc
);
296 dma_async_issue_pending(dma_chan
);
299 wait_for_completion_interruptible_timeout(&host
->dma_complete
,
302 data
->bytes_xfered
= host
->data_len
;
304 dma_unmap_sg(dma_chan
->device
->dev
,
305 data
->sg
, data
->sg_len
,
306 mmc_get_dma_dir(data
));
310 static void moxart_transfer_pio(struct moxart_host
*host
)
312 struct mmc_data
*data
= host
->mrq
->cmd
->data
;
313 u32
*sgp
, len
= 0, remain
, status
;
315 if (host
->data_len
== data
->bytes_xfered
)
318 sgp
= sg_virt(host
->cur_sg
);
319 remain
= host
->data_remain
;
321 if (data
->flags
& MMC_DATA_WRITE
) {
323 if (moxart_wait_for_status(host
, FIFO_URUN
, &status
)
325 data
->error
= -ETIMEDOUT
;
326 complete(&host
->pio_complete
);
329 for (len
= 0; len
< remain
&& len
< host
->fifo_width
;) {
330 iowrite32(*sgp
, host
->base
+ REG_DATA_WINDOW
);
339 if (moxart_wait_for_status(host
, FIFO_ORUN
, &status
)
341 data
->error
= -ETIMEDOUT
;
342 complete(&host
->pio_complete
);
345 for (len
= 0; len
< remain
&& len
< host
->fifo_width
;) {
346 *sgp
= ioread32(host
->base
+ REG_DATA_WINDOW
);
354 data
->bytes_xfered
+= host
->data_remain
- remain
;
355 host
->data_remain
= remain
;
357 if (host
->data_len
!= data
->bytes_xfered
)
358 moxart_next_sg(host
);
360 complete(&host
->pio_complete
);
363 static void moxart_prepare_data(struct moxart_host
*host
)
365 struct mmc_data
*data
= host
->mrq
->cmd
->data
;
372 host
->data_len
= data
->blocks
* data
->blksz
;
373 blksz_bits
= ffs(data
->blksz
) - 1;
374 BUG_ON(1 << blksz_bits
!= data
->blksz
);
376 moxart_init_sg(host
, data
);
378 datactrl
= DCR_DATA_EN
| (blksz_bits
& DCR_BLK_SIZE
);
380 if (data
->flags
& MMC_DATA_WRITE
)
381 datactrl
|= DCR_DATA_WRITE
;
383 if (moxart_use_dma(host
))
384 datactrl
|= DCR_DMA_EN
;
386 writel(DCR_DATA_FIFO_RESET
, host
->base
+ REG_DATA_CONTROL
);
387 writel(MASK_DATA
| FIFO_URUN
| FIFO_ORUN
, host
->base
+ REG_CLEAR
);
388 writel(host
->rate
, host
->base
+ REG_DATA_TIMER
);
389 writel(host
->data_len
, host
->base
+ REG_DATA_LENGTH
);
390 writel(datactrl
, host
->base
+ REG_DATA_CONTROL
);
393 static void moxart_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
395 struct moxart_host
*host
= mmc_priv(mmc
);
399 spin_lock_irqsave(&host
->lock
, flags
);
401 init_completion(&host
->dma_complete
);
402 init_completion(&host
->pio_complete
);
406 if (readl(host
->base
+ REG_STATUS
) & CARD_DETECT
) {
407 mrq
->cmd
->error
= -ETIMEDOUT
;
411 moxart_prepare_data(host
);
412 moxart_send_command(host
, host
->mrq
->cmd
);
414 if (mrq
->cmd
->data
) {
415 if (moxart_use_dma(host
)) {
417 writel(CARD_CHANGE
, host
->base
+ REG_INTERRUPT_MASK
);
419 spin_unlock_irqrestore(&host
->lock
, flags
);
421 moxart_transfer_dma(mrq
->cmd
->data
, host
);
423 spin_lock_irqsave(&host
->lock
, flags
);
426 writel(MASK_INTR_PIO
, host
->base
+ REG_INTERRUPT_MASK
);
428 spin_unlock_irqrestore(&host
->lock
, flags
);
430 /* PIO transfers start from interrupt. */
431 wait_for_completion_interruptible_timeout(&host
->pio_complete
,
434 spin_lock_irqsave(&host
->lock
, flags
);
437 if (host
->is_removed
) {
438 dev_err(mmc_dev(host
->mmc
), "card removed\n");
439 mrq
->cmd
->error
= -ETIMEDOUT
;
443 if (moxart_wait_for_status(host
, MASK_DATA
, &status
)
445 mrq
->cmd
->data
->error
= -ETIMEDOUT
;
449 if (status
& DATA_CRC_FAIL
)
450 mrq
->cmd
->data
->error
= -ETIMEDOUT
;
452 if (mrq
->cmd
->data
->stop
)
453 moxart_send_command(host
, mrq
->cmd
->data
->stop
);
457 spin_unlock_irqrestore(&host
->lock
, flags
);
458 mmc_request_done(host
->mmc
, mrq
);
461 static irqreturn_t
moxart_irq(int irq
, void *devid
)
463 struct moxart_host
*host
= (struct moxart_host
*)devid
;
466 spin_lock(&host
->lock
);
468 status
= readl(host
->base
+ REG_STATUS
);
469 if (status
& CARD_CHANGE
) {
470 host
->is_removed
= status
& CARD_DETECT
;
471 if (host
->is_removed
&& host
->have_dma
) {
472 dmaengine_terminate_all(host
->dma_chan_tx
);
473 dmaengine_terminate_all(host
->dma_chan_rx
);
476 writel(MASK_INTR_PIO
, host
->base
+ REG_CLEAR
);
477 writel(CARD_CHANGE
, host
->base
+ REG_INTERRUPT_MASK
);
478 mmc_detect_change(host
->mmc
, 0);
480 if (status
& (FIFO_ORUN
| FIFO_URUN
) && host
->mrq
)
481 moxart_transfer_pio(host
);
483 spin_unlock(&host
->lock
);
488 static void moxart_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
490 struct moxart_host
*host
= mmc_priv(mmc
);
495 spin_lock_irqsave(&host
->lock
, flags
);
498 for (div
= 0; div
< CLK_DIV_MASK
; ++div
) {
499 if (ios
->clock
>= host
->sysclk
/ (2 * (div
+ 1)))
503 host
->rate
= host
->sysclk
/ (2 * (div
+ 1));
504 if (host
->rate
> host
->sysclk
)
506 writel(ctrl
, host
->base
+ REG_CLOCK_CONTROL
);
509 if (ios
->power_mode
== MMC_POWER_OFF
) {
510 writel(readl(host
->base
+ REG_POWER_CONTROL
) & ~SD_POWER_ON
,
511 host
->base
+ REG_POWER_CONTROL
);
513 if (ios
->vdd
< MIN_POWER
)
516 power
= ios
->vdd
- MIN_POWER
;
518 writel(SD_POWER_ON
| (u32
) power
,
519 host
->base
+ REG_POWER_CONTROL
);
522 switch (ios
->bus_width
) {
523 case MMC_BUS_WIDTH_4
:
524 writel(BUS_WIDTH_4
, host
->base
+ REG_BUS_WIDTH
);
527 writel(BUS_WIDTH_1
, host
->base
+ REG_BUS_WIDTH
);
531 spin_unlock_irqrestore(&host
->lock
, flags
);
535 static int moxart_get_ro(struct mmc_host
*mmc
)
537 struct moxart_host
*host
= mmc_priv(mmc
);
539 return !!(readl(host
->base
+ REG_STATUS
) & WRITE_PROT
);
542 static const struct mmc_host_ops moxart_ops
= {
543 .request
= moxart_request
,
544 .set_ios
= moxart_set_ios
,
545 .get_ro
= moxart_get_ro
,
548 static int moxart_probe(struct platform_device
*pdev
)
550 struct device
*dev
= &pdev
->dev
;
551 struct device_node
*node
= dev
->of_node
;
552 struct resource res_mmc
;
553 struct mmc_host
*mmc
;
554 struct moxart_host
*host
= NULL
;
555 struct dma_slave_config cfg
;
557 void __iomem
*reg_mmc
;
561 mmc
= mmc_alloc_host(sizeof(struct moxart_host
), dev
);
563 dev_err(dev
, "mmc_alloc_host failed\n");
568 ret
= of_address_to_resource(node
, 0, &res_mmc
);
570 dev_err(dev
, "of_address_to_resource failed\n");
574 irq
= irq_of_parse_and_map(node
, 0);
576 dev_err(dev
, "irq_of_parse_and_map failed\n");
581 clk
= devm_clk_get(dev
, NULL
);
587 reg_mmc
= devm_ioremap_resource(dev
, &res_mmc
);
588 if (IS_ERR(reg_mmc
)) {
589 ret
= PTR_ERR(reg_mmc
);
593 ret
= mmc_of_parse(mmc
);
597 host
= mmc_priv(mmc
);
599 host
->base
= reg_mmc
;
600 host
->reg_phys
= res_mmc
.start
;
601 host
->timeout
= msecs_to_jiffies(1000);
602 host
->sysclk
= clk_get_rate(clk
);
603 host
->fifo_width
= readl(host
->base
+ REG_FEATURE
) << 2;
604 host
->dma_chan_tx
= dma_request_chan(dev
, "tx");
605 host
->dma_chan_rx
= dma_request_chan(dev
, "rx");
607 spin_lock_init(&host
->lock
);
609 mmc
->ops
= &moxart_ops
;
610 mmc
->f_max
= DIV_ROUND_CLOSEST(host
->sysclk
, 2);
611 mmc
->f_min
= DIV_ROUND_CLOSEST(host
->sysclk
, CLK_DIV_MASK
* 2);
612 mmc
->ocr_avail
= 0xffff00; /* Support 2.0v - 3.6v power. */
613 mmc
->max_blk_size
= 2048; /* Max. block length in REG_DATA_CONTROL */
614 mmc
->max_req_size
= DATA_LEN_MASK
; /* bits 0-23 in REG_DATA_LENGTH */
615 mmc
->max_blk_count
= mmc
->max_req_size
/ 512;
617 if (IS_ERR(host
->dma_chan_tx
) || IS_ERR(host
->dma_chan_rx
)) {
618 if (PTR_ERR(host
->dma_chan_tx
) == -EPROBE_DEFER
||
619 PTR_ERR(host
->dma_chan_rx
) == -EPROBE_DEFER
) {
623 if (!IS_ERR(host
->dma_chan_tx
)) {
624 dma_release_channel(host
->dma_chan_tx
);
625 host
->dma_chan_tx
= NULL
;
627 if (!IS_ERR(host
->dma_chan_rx
)) {
628 dma_release_channel(host
->dma_chan_rx
);
629 host
->dma_chan_rx
= NULL
;
631 dev_dbg(dev
, "PIO mode transfer enabled\n");
632 host
->have_dma
= false;
634 mmc
->max_seg_size
= mmc
->max_req_size
;
636 dev_dbg(dev
, "DMA channels found (%p,%p)\n",
637 host
->dma_chan_tx
, host
->dma_chan_rx
);
638 host
->have_dma
= true;
640 memset(&cfg
, 0, sizeof(cfg
));
641 cfg
.src_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
642 cfg
.dst_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
644 cfg
.direction
= DMA_MEM_TO_DEV
;
646 cfg
.dst_addr
= host
->reg_phys
+ REG_DATA_WINDOW
;
647 dmaengine_slave_config(host
->dma_chan_tx
, &cfg
);
649 cfg
.direction
= DMA_DEV_TO_MEM
;
650 cfg
.src_addr
= host
->reg_phys
+ REG_DATA_WINDOW
;
652 dmaengine_slave_config(host
->dma_chan_rx
, &cfg
);
654 mmc
->max_seg_size
= min3(mmc
->max_req_size
,
655 dma_get_max_seg_size(host
->dma_chan_rx
->device
->dev
),
656 dma_get_max_seg_size(host
->dma_chan_tx
->device
->dev
));
659 if (readl(host
->base
+ REG_BUS_WIDTH
) & BUS_WIDTH_4_SUPPORT
)
660 mmc
->caps
|= MMC_CAP_4_BIT_DATA
;
662 writel(0, host
->base
+ REG_INTERRUPT_MASK
);
664 writel(CMD_SDC_RESET
, host
->base
+ REG_COMMAND
);
665 for (i
= 0; i
< MAX_RETRIES
; i
++) {
666 if (!(readl(host
->base
+ REG_COMMAND
) & CMD_SDC_RESET
))
671 ret
= devm_request_irq(dev
, irq
, moxart_irq
, 0, "moxart-mmc", host
);
675 dev_set_drvdata(dev
, mmc
);
676 ret
= mmc_add_host(mmc
);
680 dev_dbg(dev
, "IRQ=%d, FIFO is %d bytes\n", irq
, host
->fifo_width
);
685 if (!IS_ERR_OR_NULL(host
->dma_chan_tx
))
686 dma_release_channel(host
->dma_chan_tx
);
687 if (!IS_ERR_OR_NULL(host
->dma_chan_rx
))
688 dma_release_channel(host
->dma_chan_rx
);
695 static void moxart_remove(struct platform_device
*pdev
)
697 struct mmc_host
*mmc
= dev_get_drvdata(&pdev
->dev
);
698 struct moxart_host
*host
= mmc_priv(mmc
);
700 if (!IS_ERR_OR_NULL(host
->dma_chan_tx
))
701 dma_release_channel(host
->dma_chan_tx
);
702 if (!IS_ERR_OR_NULL(host
->dma_chan_rx
))
703 dma_release_channel(host
->dma_chan_rx
);
704 mmc_remove_host(mmc
);
706 writel(0, host
->base
+ REG_INTERRUPT_MASK
);
707 writel(0, host
->base
+ REG_POWER_CONTROL
);
708 writel(readl(host
->base
+ REG_CLOCK_CONTROL
) | CLK_OFF
,
709 host
->base
+ REG_CLOCK_CONTROL
);
713 static const struct of_device_id moxart_mmc_match
[] = {
714 { .compatible
= "moxa,moxart-mmc" },
715 { .compatible
= "faraday,ftsdc010" },
718 MODULE_DEVICE_TABLE(of
, moxart_mmc_match
);
720 static struct platform_driver moxart_mmc_driver
= {
721 .probe
= moxart_probe
,
722 .remove
= moxart_remove
,
724 .name
= "mmc-moxart",
725 .probe_type
= PROBE_PREFER_ASYNCHRONOUS
,
726 .of_match_table
= moxart_mmc_match
,
729 module_platform_driver(moxart_mmc_driver
);
731 MODULE_ALIAS("platform:mmc-moxart");
732 MODULE_DESCRIPTION("MOXA ART MMC driver");
733 MODULE_LICENSE("GPL v2");
734 MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");