2 * MOXA ART MMC host driver.
4 * Copyright (C) 2014 Jonas Jensen
6 * Jonas Jensen <jonas.jensen@gmail.com>
9 * Moxa Technologies Co., Ltd. <www.moxa.com>
11 * This file is licensed under the terms of the GNU General Public
12 * License version 2. This program is licensed "as is" without any
13 * warranty of any kind, whether express or implied.
16 #include <linux/module.h>
17 #include <linux/init.h>
18 #include <linux/platform_device.h>
19 #include <linux/delay.h>
20 #include <linux/errno.h>
21 #include <linux/interrupt.h>
22 #include <linux/blkdev.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/dmaengine.h>
25 #include <linux/mmc/host.h>
26 #include <linux/mmc/sd.h>
27 #include <linux/sched.h>
29 #include <linux/of_address.h>
30 #include <linux/of_irq.h>
31 #include <linux/clk.h>
32 #include <linux/bitops.h>
33 #include <linux/of_dma.h>
34 #include <linux/spinlock.h>
37 #define REG_ARGUMENT 4
38 #define REG_RESPONSE0 8
39 #define REG_RESPONSE1 12
40 #define REG_RESPONSE2 16
41 #define REG_RESPONSE3 20
42 #define REG_RESPONSE_COMMAND 24
43 #define REG_DATA_CONTROL 28
44 #define REG_DATA_TIMER 32
45 #define REG_DATA_LENGTH 36
48 #define REG_INTERRUPT_MASK 48
49 #define REG_POWER_CONTROL 52
50 #define REG_CLOCK_CONTROL 56
51 #define REG_BUS_WIDTH 60
52 #define REG_DATA_WINDOW 64
53 #define REG_FEATURE 68
54 #define REG_REVISION 72
57 #define CMD_SDC_RESET BIT(10)
59 #define CMD_APP_CMD BIT(8)
60 #define CMD_LONG_RSP BIT(7)
61 #define CMD_NEED_RSP BIT(6)
62 #define CMD_IDX_MASK 0x3f
64 /* REG_RESPONSE_COMMAND */
65 #define RSP_CMD_APP BIT(6)
66 #define RSP_CMD_IDX_MASK 0x3f
68 /* REG_DATA_CONTROL */
69 #define DCR_DATA_FIFO_RESET BIT(8)
70 #define DCR_DATA_THRES BIT(7)
71 #define DCR_DATA_EN BIT(6)
72 #define DCR_DMA_EN BIT(5)
73 #define DCR_DATA_WRITE BIT(4)
74 #define DCR_BLK_SIZE 0x0f
77 #define DATA_LEN_MASK 0xffffff
80 #define WRITE_PROT BIT(12)
81 #define CARD_DETECT BIT(11)
82 /* 1-10 below can be sent to either registers, interrupt or clear. */
83 #define CARD_CHANGE BIT(10)
84 #define FIFO_ORUN BIT(9)
85 #define FIFO_URUN BIT(8)
86 #define DATA_END BIT(7)
87 #define CMD_SENT BIT(6)
88 #define DATA_CRC_OK BIT(5)
89 #define RSP_CRC_OK BIT(4)
90 #define DATA_TIMEOUT BIT(3)
91 #define RSP_TIMEOUT BIT(2)
92 #define DATA_CRC_FAIL BIT(1)
93 #define RSP_CRC_FAIL BIT(0)
95 #define MASK_RSP (RSP_TIMEOUT | RSP_CRC_FAIL | \
96 RSP_CRC_OK | CARD_DETECT | CMD_SENT)
98 #define MASK_DATA (DATA_CRC_OK | DATA_END | \
99 DATA_CRC_FAIL | DATA_TIMEOUT)
101 #define MASK_INTR_PIO (FIFO_URUN | FIFO_ORUN | CARD_CHANGE)
103 /* REG_POWER_CONTROL */
104 #define SD_POWER_ON BIT(4)
105 #define SD_POWER_MASK 0x0f
107 /* REG_CLOCK_CONTROL */
108 #define CLK_HISPD BIT(9)
109 #define CLK_OFF BIT(8)
110 #define CLK_SD BIT(7)
111 #define CLK_DIV_MASK 0x7f
114 #define BUS_WIDTH_8 BIT(2)
115 #define BUS_WIDTH_4 BIT(1)
116 #define BUS_WIDTH_1 BIT(0)
118 #define MMC_VDD_360 23
119 #define MIN_POWER (MMC_VDD_360 - SD_POWER_MASK)
120 #define MAX_RETRIES 500000
127 phys_addr_t reg_phys
;
129 struct dma_chan
*dma_chan_tx
;
130 struct dma_chan
*dma_chan_rx
;
131 struct dma_async_tx_descriptor
*tx_desc
;
132 struct mmc_host
*mmc
;
133 struct mmc_request
*mrq
;
134 struct scatterlist
*cur_sg
;
135 struct completion dma_complete
;
136 struct completion pio_complete
;
151 static inline void moxart_init_sg(struct moxart_host
*host
,
152 struct mmc_data
*data
)
154 host
->cur_sg
= data
->sg
;
155 host
->num_sg
= data
->sg_len
;
156 host
->data_remain
= host
->cur_sg
->length
;
158 if (host
->data_remain
> host
->data_len
)
159 host
->data_remain
= host
->data_len
;
162 static inline int moxart_next_sg(struct moxart_host
*host
)
165 struct mmc_data
*data
= host
->mrq
->cmd
->data
;
170 if (host
->num_sg
> 0) {
171 host
->data_remain
= host
->cur_sg
->length
;
172 remain
= host
->data_len
- data
->bytes_xfered
;
173 if (remain
> 0 && remain
< host
->data_remain
)
174 host
->data_remain
= remain
;
180 static int moxart_wait_for_status(struct moxart_host
*host
,
181 u32 mask
, u32
*status
)
183 int ret
= -ETIMEDOUT
;
186 for (i
= 0; i
< MAX_RETRIES
; i
++) {
187 *status
= readl(host
->base
+ REG_STATUS
);
188 if (!(*status
& mask
)) {
192 writel(*status
& mask
, host
->base
+ REG_CLEAR
);
198 dev_err(mmc_dev(host
->mmc
), "timed out waiting for status\n");
204 static void moxart_send_command(struct moxart_host
*host
,
205 struct mmc_command
*cmd
)
209 writel(RSP_TIMEOUT
| RSP_CRC_OK
|
210 RSP_CRC_FAIL
| CMD_SENT
, host
->base
+ REG_CLEAR
);
211 writel(cmd
->arg
, host
->base
+ REG_ARGUMENT
);
213 cmdctrl
= cmd
->opcode
& CMD_IDX_MASK
;
214 if (cmdctrl
== SD_APP_SET_BUS_WIDTH
|| cmdctrl
== SD_APP_OP_COND
||
215 cmdctrl
== SD_APP_SEND_SCR
|| cmdctrl
== SD_APP_SD_STATUS
||
216 cmdctrl
== SD_APP_SEND_NUM_WR_BLKS
)
217 cmdctrl
|= CMD_APP_CMD
;
219 if (cmd
->flags
& MMC_RSP_PRESENT
)
220 cmdctrl
|= CMD_NEED_RSP
;
222 if (cmd
->flags
& MMC_RSP_136
)
223 cmdctrl
|= CMD_LONG_RSP
;
225 writel(cmdctrl
| CMD_EN
, host
->base
+ REG_COMMAND
);
227 if (moxart_wait_for_status(host
, MASK_RSP
, &status
) == -ETIMEDOUT
)
228 cmd
->error
= -ETIMEDOUT
;
230 if (status
& RSP_TIMEOUT
) {
231 cmd
->error
= -ETIMEDOUT
;
234 if (status
& RSP_CRC_FAIL
) {
238 if (status
& RSP_CRC_OK
) {
239 if (cmd
->flags
& MMC_RSP_136
) {
240 cmd
->resp
[3] = readl(host
->base
+ REG_RESPONSE0
);
241 cmd
->resp
[2] = readl(host
->base
+ REG_RESPONSE1
);
242 cmd
->resp
[1] = readl(host
->base
+ REG_RESPONSE2
);
243 cmd
->resp
[0] = readl(host
->base
+ REG_RESPONSE3
);
245 cmd
->resp
[0] = readl(host
->base
+ REG_RESPONSE0
);
250 static void moxart_dma_complete(void *param
)
252 struct moxart_host
*host
= param
;
254 complete(&host
->dma_complete
);
257 static void moxart_transfer_dma(struct mmc_data
*data
, struct moxart_host
*host
)
259 u32 len
, dir_data
, dir_slave
;
261 struct dma_async_tx_descriptor
*desc
= NULL
;
262 struct dma_chan
*dma_chan
;
264 if (host
->data_len
== data
->bytes_xfered
)
267 if (data
->flags
& MMC_DATA_WRITE
) {
268 dma_chan
= host
->dma_chan_tx
;
269 dir_data
= DMA_TO_DEVICE
;
270 dir_slave
= DMA_MEM_TO_DEV
;
272 dma_chan
= host
->dma_chan_rx
;
273 dir_data
= DMA_FROM_DEVICE
;
274 dir_slave
= DMA_DEV_TO_MEM
;
277 len
= dma_map_sg(dma_chan
->device
->dev
, data
->sg
,
278 data
->sg_len
, dir_data
);
281 desc
= dmaengine_prep_slave_sg(dma_chan
, data
->sg
,
286 dev_err(mmc_dev(host
->mmc
), "dma_map_sg returned zero length\n");
290 host
->tx_desc
= desc
;
291 desc
->callback
= moxart_dma_complete
;
292 desc
->callback_param
= host
;
293 dmaengine_submit(desc
);
294 dma_async_issue_pending(dma_chan
);
297 data
->bytes_xfered
+= host
->data_remain
;
299 dma_time
= wait_for_completion_interruptible_timeout(
300 &host
->dma_complete
, host
->timeout
);
302 dma_unmap_sg(dma_chan
->device
->dev
,
303 data
->sg
, data
->sg_len
,
308 static void moxart_transfer_pio(struct moxart_host
*host
)
310 struct mmc_data
*data
= host
->mrq
->cmd
->data
;
311 u32
*sgp
, len
= 0, remain
, status
;
313 if (host
->data_len
== data
->bytes_xfered
)
316 sgp
= sg_virt(host
->cur_sg
);
317 remain
= host
->data_remain
;
319 if (data
->flags
& MMC_DATA_WRITE
) {
321 if (moxart_wait_for_status(host
, FIFO_URUN
, &status
)
323 data
->error
= -ETIMEDOUT
;
324 complete(&host
->pio_complete
);
327 for (len
= 0; len
< remain
&& len
< host
->fifo_width
;) {
328 iowrite32(*sgp
, host
->base
+ REG_DATA_WINDOW
);
337 if (moxart_wait_for_status(host
, FIFO_ORUN
, &status
)
339 data
->error
= -ETIMEDOUT
;
340 complete(&host
->pio_complete
);
343 for (len
= 0; len
< remain
&& len
< host
->fifo_width
;) {
344 /* SCR data must be read in big endian. */
345 if (data
->mrq
->cmd
->opcode
== SD_APP_SEND_SCR
)
346 *sgp
= ioread32be(host
->base
+
349 *sgp
= ioread32(host
->base
+
358 data
->bytes_xfered
+= host
->data_remain
- remain
;
359 host
->data_remain
= remain
;
361 if (host
->data_len
!= data
->bytes_xfered
)
362 moxart_next_sg(host
);
364 complete(&host
->pio_complete
);
367 static void moxart_prepare_data(struct moxart_host
*host
)
369 struct mmc_data
*data
= host
->mrq
->cmd
->data
;
376 host
->data_len
= data
->blocks
* data
->blksz
;
377 blksz_bits
= ffs(data
->blksz
) - 1;
378 BUG_ON(1 << blksz_bits
!= data
->blksz
);
380 moxart_init_sg(host
, data
);
382 datactrl
= DCR_DATA_EN
| (blksz_bits
& DCR_BLK_SIZE
);
384 if (data
->flags
& MMC_DATA_WRITE
)
385 datactrl
|= DCR_DATA_WRITE
;
387 if ((host
->data_len
> host
->fifo_width
) && host
->have_dma
)
388 datactrl
|= DCR_DMA_EN
;
390 writel(DCR_DATA_FIFO_RESET
, host
->base
+ REG_DATA_CONTROL
);
391 writel(MASK_DATA
| FIFO_URUN
| FIFO_ORUN
, host
->base
+ REG_CLEAR
);
392 writel(host
->rate
, host
->base
+ REG_DATA_TIMER
);
393 writel(host
->data_len
, host
->base
+ REG_DATA_LENGTH
);
394 writel(datactrl
, host
->base
+ REG_DATA_CONTROL
);
397 static void moxart_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
399 struct moxart_host
*host
= mmc_priv(mmc
);
404 spin_lock_irqsave(&host
->lock
, flags
);
406 init_completion(&host
->dma_complete
);
407 init_completion(&host
->pio_complete
);
411 if (readl(host
->base
+ REG_STATUS
) & CARD_DETECT
) {
412 mrq
->cmd
->error
= -ETIMEDOUT
;
416 moxart_prepare_data(host
);
417 moxart_send_command(host
, host
->mrq
->cmd
);
419 if (mrq
->cmd
->data
) {
420 if ((host
->data_len
> host
->fifo_width
) && host
->have_dma
) {
422 writel(CARD_CHANGE
, host
->base
+ REG_INTERRUPT_MASK
);
424 spin_unlock_irqrestore(&host
->lock
, flags
);
426 moxart_transfer_dma(mrq
->cmd
->data
, host
);
428 spin_lock_irqsave(&host
->lock
, flags
);
431 writel(MASK_INTR_PIO
, host
->base
+ REG_INTERRUPT_MASK
);
433 spin_unlock_irqrestore(&host
->lock
, flags
);
435 /* PIO transfers start from interrupt. */
436 pio_time
= wait_for_completion_interruptible_timeout(
437 &host
->pio_complete
, host
->timeout
);
439 spin_lock_irqsave(&host
->lock
, flags
);
442 if (host
->is_removed
) {
443 dev_err(mmc_dev(host
->mmc
), "card removed\n");
444 mrq
->cmd
->error
= -ETIMEDOUT
;
448 if (moxart_wait_for_status(host
, MASK_DATA
, &status
)
450 mrq
->cmd
->data
->error
= -ETIMEDOUT
;
454 if (status
& DATA_CRC_FAIL
)
455 mrq
->cmd
->data
->error
= -ETIMEDOUT
;
457 if (mrq
->cmd
->data
->stop
)
458 moxart_send_command(host
, mrq
->cmd
->data
->stop
);
462 spin_unlock_irqrestore(&host
->lock
, flags
);
463 mmc_request_done(host
->mmc
, mrq
);
466 static irqreturn_t
moxart_irq(int irq
, void *devid
)
468 struct moxart_host
*host
= (struct moxart_host
*)devid
;
472 spin_lock_irqsave(&host
->lock
, flags
);
474 status
= readl(host
->base
+ REG_STATUS
);
475 if (status
& CARD_CHANGE
) {
476 host
->is_removed
= status
& CARD_DETECT
;
477 if (host
->is_removed
&& host
->have_dma
) {
478 dmaengine_terminate_all(host
->dma_chan_tx
);
479 dmaengine_terminate_all(host
->dma_chan_rx
);
482 writel(MASK_INTR_PIO
, host
->base
+ REG_CLEAR
);
483 writel(CARD_CHANGE
, host
->base
+ REG_INTERRUPT_MASK
);
484 mmc_detect_change(host
->mmc
, 0);
486 if (status
& (FIFO_ORUN
| FIFO_URUN
) && host
->mrq
)
487 moxart_transfer_pio(host
);
489 spin_unlock_irqrestore(&host
->lock
, flags
);
494 static void moxart_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
496 struct moxart_host
*host
= mmc_priv(mmc
);
501 spin_lock_irqsave(&host
->lock
, flags
);
504 for (div
= 0; div
< CLK_DIV_MASK
; ++div
) {
505 if (ios
->clock
>= host
->sysclk
/ (2 * (div
+ 1)))
509 host
->rate
= host
->sysclk
/ (2 * (div
+ 1));
510 if (host
->rate
> host
->sysclk
)
512 writel(ctrl
, host
->base
+ REG_CLOCK_CONTROL
);
515 if (ios
->power_mode
== MMC_POWER_OFF
) {
516 writel(readl(host
->base
+ REG_POWER_CONTROL
) & ~SD_POWER_ON
,
517 host
->base
+ REG_POWER_CONTROL
);
519 if (ios
->vdd
< MIN_POWER
)
522 power
= ios
->vdd
- MIN_POWER
;
524 writel(SD_POWER_ON
| (u32
) power
,
525 host
->base
+ REG_POWER_CONTROL
);
528 switch (ios
->bus_width
) {
529 case MMC_BUS_WIDTH_4
:
530 writel(BUS_WIDTH_4
, host
->base
+ REG_BUS_WIDTH
);
532 case MMC_BUS_WIDTH_8
:
533 writel(BUS_WIDTH_8
, host
->base
+ REG_BUS_WIDTH
);
536 writel(BUS_WIDTH_1
, host
->base
+ REG_BUS_WIDTH
);
540 spin_unlock_irqrestore(&host
->lock
, flags
);
544 static int moxart_get_ro(struct mmc_host
*mmc
)
546 struct moxart_host
*host
= mmc_priv(mmc
);
548 return !!(readl(host
->base
+ REG_STATUS
) & WRITE_PROT
);
551 static struct mmc_host_ops moxart_ops
= {
552 .request
= moxart_request
,
553 .set_ios
= moxart_set_ios
,
554 .get_ro
= moxart_get_ro
,
557 static int moxart_probe(struct platform_device
*pdev
)
559 struct device
*dev
= &pdev
->dev
;
560 struct device_node
*node
= dev
->of_node
;
561 struct resource res_mmc
;
562 struct mmc_host
*mmc
;
563 struct moxart_host
*host
= NULL
;
564 struct dma_slave_config cfg
;
566 void __iomem
*reg_mmc
;
570 mmc
= mmc_alloc_host(sizeof(struct moxart_host
), dev
);
572 dev_err(dev
, "mmc_alloc_host failed\n");
577 ret
= of_address_to_resource(node
, 0, &res_mmc
);
579 dev_err(dev
, "of_address_to_resource failed\n");
583 irq
= irq_of_parse_and_map(node
, 0);
585 dev_err(dev
, "irq_of_parse_and_map failed\n");
590 clk
= devm_clk_get(dev
, NULL
);
596 reg_mmc
= devm_ioremap_resource(dev
, &res_mmc
);
597 if (IS_ERR(reg_mmc
)) {
598 ret
= PTR_ERR(reg_mmc
);
602 ret
= mmc_of_parse(mmc
);
606 host
= mmc_priv(mmc
);
608 host
->base
= reg_mmc
;
609 host
->reg_phys
= res_mmc
.start
;
610 host
->timeout
= msecs_to_jiffies(1000);
611 host
->sysclk
= clk_get_rate(clk
);
612 host
->fifo_width
= readl(host
->base
+ REG_FEATURE
) << 2;
613 host
->dma_chan_tx
= dma_request_slave_channel_reason(dev
, "tx");
614 host
->dma_chan_rx
= dma_request_slave_channel_reason(dev
, "rx");
616 spin_lock_init(&host
->lock
);
618 mmc
->ops
= &moxart_ops
;
619 mmc
->f_max
= DIV_ROUND_CLOSEST(host
->sysclk
, 2);
620 mmc
->f_min
= DIV_ROUND_CLOSEST(host
->sysclk
, CLK_DIV_MASK
* 2);
621 mmc
->ocr_avail
= 0xffff00; /* Support 2.0v - 3.6v power. */
623 if (IS_ERR(host
->dma_chan_tx
) || IS_ERR(host
->dma_chan_rx
)) {
624 if (PTR_ERR(host
->dma_chan_tx
) == -EPROBE_DEFER
||
625 PTR_ERR(host
->dma_chan_rx
) == -EPROBE_DEFER
) {
629 dev_dbg(dev
, "PIO mode transfer enabled\n");
630 host
->have_dma
= false;
632 dev_dbg(dev
, "DMA channels found (%p,%p)\n",
633 host
->dma_chan_tx
, host
->dma_chan_rx
);
634 host
->have_dma
= true;
636 cfg
.src_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
637 cfg
.dst_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
639 cfg
.direction
= DMA_MEM_TO_DEV
;
641 cfg
.dst_addr
= host
->reg_phys
+ REG_DATA_WINDOW
;
642 dmaengine_slave_config(host
->dma_chan_tx
, &cfg
);
644 cfg
.direction
= DMA_DEV_TO_MEM
;
645 cfg
.src_addr
= host
->reg_phys
+ REG_DATA_WINDOW
;
647 dmaengine_slave_config(host
->dma_chan_rx
, &cfg
);
650 switch ((readl(host
->base
+ REG_BUS_WIDTH
) >> 3) & 3) {
652 mmc
->caps
|= MMC_CAP_4_BIT_DATA
;
655 mmc
->caps
|= MMC_CAP_4_BIT_DATA
| MMC_CAP_8_BIT_DATA
;
661 writel(0, host
->base
+ REG_INTERRUPT_MASK
);
663 writel(CMD_SDC_RESET
, host
->base
+ REG_COMMAND
);
664 for (i
= 0; i
< MAX_RETRIES
; i
++) {
665 if (!(readl(host
->base
+ REG_COMMAND
) & CMD_SDC_RESET
))
670 ret
= devm_request_irq(dev
, irq
, moxart_irq
, 0, "moxart-mmc", host
);
674 dev_set_drvdata(dev
, mmc
);
677 dev_dbg(dev
, "IRQ=%d, FIFO is %d bytes\n", irq
, host
->fifo_width
);
687 static int moxart_remove(struct platform_device
*pdev
)
689 struct mmc_host
*mmc
= dev_get_drvdata(&pdev
->dev
);
690 struct moxart_host
*host
= mmc_priv(mmc
);
692 dev_set_drvdata(&pdev
->dev
, NULL
);
695 if (!IS_ERR(host
->dma_chan_tx
))
696 dma_release_channel(host
->dma_chan_tx
);
697 if (!IS_ERR(host
->dma_chan_rx
))
698 dma_release_channel(host
->dma_chan_rx
);
699 mmc_remove_host(mmc
);
702 writel(0, host
->base
+ REG_INTERRUPT_MASK
);
703 writel(0, host
->base
+ REG_POWER_CONTROL
);
704 writel(readl(host
->base
+ REG_CLOCK_CONTROL
) | CLK_OFF
,
705 host
->base
+ REG_CLOCK_CONTROL
);
710 static const struct of_device_id moxart_mmc_match
[] = {
711 { .compatible
= "moxa,moxart-mmc" },
712 { .compatible
= "faraday,ftsdc010" },
715 MODULE_DEVICE_TABLE(of
, moxart_mmc_match
);
717 static struct platform_driver moxart_mmc_driver
= {
718 .probe
= moxart_probe
,
719 .remove
= moxart_remove
,
721 .name
= "mmc-moxart",
722 .of_match_table
= moxart_mmc_match
,
725 module_platform_driver(moxart_mmc_driver
);
727 MODULE_ALIAS("platform:mmc-moxart");
728 MODULE_DESCRIPTION("MOXA ART MMC driver");
729 MODULE_LICENSE("GPL v2");
730 MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");