2 * MOXA ART MMC host driver.
4 * Copyright (C) 2014 Jonas Jensen
6 * Jonas Jensen <jonas.jensen@gmail.com>
9 * Moxa Technologies Co., Ltd. <www.moxa.com>
11 * This file is licensed under the terms of the GNU General Public
12 * License version 2. This program is licensed "as is" without any
13 * warranty of any kind, whether express or implied.
16 #include <linux/module.h>
17 #include <linux/init.h>
18 #include <linux/platform_device.h>
19 #include <linux/delay.h>
20 #include <linux/errno.h>
21 #include <linux/interrupt.h>
22 #include <linux/blkdev.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/dmaengine.h>
25 #include <linux/mmc/host.h>
26 #include <linux/mmc/sd.h>
27 #include <linux/sched.h>
29 #include <linux/of_address.h>
30 #include <linux/of_irq.h>
31 #include <linux/clk.h>
32 #include <linux/bitops.h>
33 #include <linux/of_dma.h>
34 #include <linux/spinlock.h>
37 #define REG_ARGUMENT 4
38 #define REG_RESPONSE0 8
39 #define REG_RESPONSE1 12
40 #define REG_RESPONSE2 16
41 #define REG_RESPONSE3 20
42 #define REG_RESPONSE_COMMAND 24
43 #define REG_DATA_CONTROL 28
44 #define REG_DATA_TIMER 32
45 #define REG_DATA_LENGTH 36
48 #define REG_INTERRUPT_MASK 48
49 #define REG_POWER_CONTROL 52
50 #define REG_CLOCK_CONTROL 56
51 #define REG_BUS_WIDTH 60
52 #define REG_DATA_WINDOW 64
53 #define REG_FEATURE 68
54 #define REG_REVISION 72
57 #define CMD_SDC_RESET BIT(10)
59 #define CMD_APP_CMD BIT(8)
60 #define CMD_LONG_RSP BIT(7)
61 #define CMD_NEED_RSP BIT(6)
62 #define CMD_IDX_MASK 0x3f
64 /* REG_RESPONSE_COMMAND */
65 #define RSP_CMD_APP BIT(6)
66 #define RSP_CMD_IDX_MASK 0x3f
68 /* REG_DATA_CONTROL */
69 #define DCR_DATA_FIFO_RESET BIT(8)
70 #define DCR_DATA_THRES BIT(7)
71 #define DCR_DATA_EN BIT(6)
72 #define DCR_DMA_EN BIT(5)
73 #define DCR_DATA_WRITE BIT(4)
74 #define DCR_BLK_SIZE 0x0f
77 #define DATA_LEN_MASK 0xffffff
80 #define WRITE_PROT BIT(12)
81 #define CARD_DETECT BIT(11)
82 /* 1-10 below can be sent to either registers, interrupt or clear. */
83 #define CARD_CHANGE BIT(10)
84 #define FIFO_ORUN BIT(9)
85 #define FIFO_URUN BIT(8)
86 #define DATA_END BIT(7)
87 #define CMD_SENT BIT(6)
88 #define DATA_CRC_OK BIT(5)
89 #define RSP_CRC_OK BIT(4)
90 #define DATA_TIMEOUT BIT(3)
91 #define RSP_TIMEOUT BIT(2)
92 #define DATA_CRC_FAIL BIT(1)
93 #define RSP_CRC_FAIL BIT(0)
95 #define MASK_RSP (RSP_TIMEOUT | RSP_CRC_FAIL | \
96 RSP_CRC_OK | CARD_DETECT | CMD_SENT)
98 #define MASK_DATA (DATA_CRC_OK | DATA_END | \
99 DATA_CRC_FAIL | DATA_TIMEOUT)
101 #define MASK_INTR_PIO (FIFO_URUN | FIFO_ORUN | CARD_CHANGE)
103 /* REG_POWER_CONTROL */
104 #define SD_POWER_ON BIT(4)
105 #define SD_POWER_MASK 0x0f
107 /* REG_CLOCK_CONTROL */
108 #define CLK_HISPD BIT(9)
109 #define CLK_OFF BIT(8)
110 #define CLK_SD BIT(7)
111 #define CLK_DIV_MASK 0x7f
114 #define BUS_WIDTH_8 BIT(2)
115 #define BUS_WIDTH_4 BIT(1)
116 #define BUS_WIDTH_1 BIT(0)
118 #define MMC_VDD_360 23
119 #define MIN_POWER (MMC_VDD_360 - SD_POWER_MASK)
120 #define MAX_RETRIES 500000
127 phys_addr_t reg_phys
;
129 struct dma_chan
*dma_chan_tx
;
130 struct dma_chan
*dma_chan_rx
;
131 struct dma_async_tx_descriptor
*tx_desc
;
132 struct mmc_host
*mmc
;
133 struct mmc_request
*mrq
;
134 struct scatterlist
*cur_sg
;
135 struct completion dma_complete
;
136 struct completion pio_complete
;
151 static inline void moxart_init_sg(struct moxart_host
*host
,
152 struct mmc_data
*data
)
154 host
->cur_sg
= data
->sg
;
155 host
->num_sg
= data
->sg_len
;
156 host
->data_remain
= host
->cur_sg
->length
;
158 if (host
->data_remain
> host
->data_len
)
159 host
->data_remain
= host
->data_len
;
162 static inline int moxart_next_sg(struct moxart_host
*host
)
165 struct mmc_data
*data
= host
->mrq
->cmd
->data
;
170 if (host
->num_sg
> 0) {
171 host
->data_remain
= host
->cur_sg
->length
;
172 remain
= host
->data_len
- data
->bytes_xfered
;
173 if (remain
> 0 && remain
< host
->data_remain
)
174 host
->data_remain
= remain
;
180 static int moxart_wait_for_status(struct moxart_host
*host
,
181 u32 mask
, u32
*status
)
183 int ret
= -ETIMEDOUT
;
186 for (i
= 0; i
< MAX_RETRIES
; i
++) {
187 *status
= readl(host
->base
+ REG_STATUS
);
188 if (!(*status
& mask
)) {
192 writel(*status
& mask
, host
->base
+ REG_CLEAR
);
198 dev_err(mmc_dev(host
->mmc
), "timed out waiting for status\n");
204 static void moxart_send_command(struct moxart_host
*host
,
205 struct mmc_command
*cmd
)
209 writel(RSP_TIMEOUT
| RSP_CRC_OK
|
210 RSP_CRC_FAIL
| CMD_SENT
, host
->base
+ REG_CLEAR
);
211 writel(cmd
->arg
, host
->base
+ REG_ARGUMENT
);
213 cmdctrl
= cmd
->opcode
& CMD_IDX_MASK
;
214 if (cmdctrl
== SD_APP_SET_BUS_WIDTH
|| cmdctrl
== SD_APP_OP_COND
||
215 cmdctrl
== SD_APP_SEND_SCR
|| cmdctrl
== SD_APP_SD_STATUS
||
216 cmdctrl
== SD_APP_SEND_NUM_WR_BLKS
)
217 cmdctrl
|= CMD_APP_CMD
;
219 if (cmd
->flags
& MMC_RSP_PRESENT
)
220 cmdctrl
|= CMD_NEED_RSP
;
222 if (cmd
->flags
& MMC_RSP_136
)
223 cmdctrl
|= CMD_LONG_RSP
;
225 writel(cmdctrl
| CMD_EN
, host
->base
+ REG_COMMAND
);
227 if (moxart_wait_for_status(host
, MASK_RSP
, &status
) == -ETIMEDOUT
)
228 cmd
->error
= -ETIMEDOUT
;
230 if (status
& RSP_TIMEOUT
) {
231 cmd
->error
= -ETIMEDOUT
;
234 if (status
& RSP_CRC_FAIL
) {
238 if (status
& RSP_CRC_OK
) {
239 if (cmd
->flags
& MMC_RSP_136
) {
240 cmd
->resp
[3] = readl(host
->base
+ REG_RESPONSE0
);
241 cmd
->resp
[2] = readl(host
->base
+ REG_RESPONSE1
);
242 cmd
->resp
[1] = readl(host
->base
+ REG_RESPONSE2
);
243 cmd
->resp
[0] = readl(host
->base
+ REG_RESPONSE3
);
245 cmd
->resp
[0] = readl(host
->base
+ REG_RESPONSE0
);
250 static void moxart_dma_complete(void *param
)
252 struct moxart_host
*host
= param
;
254 complete(&host
->dma_complete
);
257 static void moxart_transfer_dma(struct mmc_data
*data
, struct moxart_host
*host
)
261 struct dma_async_tx_descriptor
*desc
= NULL
;
262 struct dma_chan
*dma_chan
;
264 if (host
->data_len
== data
->bytes_xfered
)
267 if (data
->flags
& MMC_DATA_WRITE
) {
268 dma_chan
= host
->dma_chan_tx
;
269 dir_slave
= DMA_MEM_TO_DEV
;
271 dma_chan
= host
->dma_chan_rx
;
272 dir_slave
= DMA_DEV_TO_MEM
;
275 len
= dma_map_sg(dma_chan
->device
->dev
, data
->sg
,
276 data
->sg_len
, mmc_get_dma_dir(data
));
279 desc
= dmaengine_prep_slave_sg(dma_chan
, data
->sg
,
284 dev_err(mmc_dev(host
->mmc
), "dma_map_sg returned zero length\n");
288 host
->tx_desc
= desc
;
289 desc
->callback
= moxart_dma_complete
;
290 desc
->callback_param
= host
;
291 dmaengine_submit(desc
);
292 dma_async_issue_pending(dma_chan
);
295 data
->bytes_xfered
+= host
->data_remain
;
297 dma_time
= wait_for_completion_interruptible_timeout(
298 &host
->dma_complete
, host
->timeout
);
300 dma_unmap_sg(dma_chan
->device
->dev
,
301 data
->sg
, data
->sg_len
,
302 mmc_get_dma_dir(data
));
306 static void moxart_transfer_pio(struct moxart_host
*host
)
308 struct mmc_data
*data
= host
->mrq
->cmd
->data
;
309 u32
*sgp
, len
= 0, remain
, status
;
311 if (host
->data_len
== data
->bytes_xfered
)
314 sgp
= sg_virt(host
->cur_sg
);
315 remain
= host
->data_remain
;
317 if (data
->flags
& MMC_DATA_WRITE
) {
319 if (moxart_wait_for_status(host
, FIFO_URUN
, &status
)
321 data
->error
= -ETIMEDOUT
;
322 complete(&host
->pio_complete
);
325 for (len
= 0; len
< remain
&& len
< host
->fifo_width
;) {
326 iowrite32(*sgp
, host
->base
+ REG_DATA_WINDOW
);
335 if (moxart_wait_for_status(host
, FIFO_ORUN
, &status
)
337 data
->error
= -ETIMEDOUT
;
338 complete(&host
->pio_complete
);
341 for (len
= 0; len
< remain
&& len
< host
->fifo_width
;) {
342 /* SCR data must be read in big endian. */
343 if (data
->mrq
->cmd
->opcode
== SD_APP_SEND_SCR
)
344 *sgp
= ioread32be(host
->base
+
347 *sgp
= ioread32(host
->base
+
356 data
->bytes_xfered
+= host
->data_remain
- remain
;
357 host
->data_remain
= remain
;
359 if (host
->data_len
!= data
->bytes_xfered
)
360 moxart_next_sg(host
);
362 complete(&host
->pio_complete
);
365 static void moxart_prepare_data(struct moxart_host
*host
)
367 struct mmc_data
*data
= host
->mrq
->cmd
->data
;
374 host
->data_len
= data
->blocks
* data
->blksz
;
375 blksz_bits
= ffs(data
->blksz
) - 1;
376 BUG_ON(1 << blksz_bits
!= data
->blksz
);
378 moxart_init_sg(host
, data
);
380 datactrl
= DCR_DATA_EN
| (blksz_bits
& DCR_BLK_SIZE
);
382 if (data
->flags
& MMC_DATA_WRITE
)
383 datactrl
|= DCR_DATA_WRITE
;
385 if ((host
->data_len
> host
->fifo_width
) && host
->have_dma
)
386 datactrl
|= DCR_DMA_EN
;
388 writel(DCR_DATA_FIFO_RESET
, host
->base
+ REG_DATA_CONTROL
);
389 writel(MASK_DATA
| FIFO_URUN
| FIFO_ORUN
, host
->base
+ REG_CLEAR
);
390 writel(host
->rate
, host
->base
+ REG_DATA_TIMER
);
391 writel(host
->data_len
, host
->base
+ REG_DATA_LENGTH
);
392 writel(datactrl
, host
->base
+ REG_DATA_CONTROL
);
395 static void moxart_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
397 struct moxart_host
*host
= mmc_priv(mmc
);
402 spin_lock_irqsave(&host
->lock
, flags
);
404 init_completion(&host
->dma_complete
);
405 init_completion(&host
->pio_complete
);
409 if (readl(host
->base
+ REG_STATUS
) & CARD_DETECT
) {
410 mrq
->cmd
->error
= -ETIMEDOUT
;
414 moxart_prepare_data(host
);
415 moxart_send_command(host
, host
->mrq
->cmd
);
417 if (mrq
->cmd
->data
) {
418 if ((host
->data_len
> host
->fifo_width
) && host
->have_dma
) {
420 writel(CARD_CHANGE
, host
->base
+ REG_INTERRUPT_MASK
);
422 spin_unlock_irqrestore(&host
->lock
, flags
);
424 moxart_transfer_dma(mrq
->cmd
->data
, host
);
426 spin_lock_irqsave(&host
->lock
, flags
);
429 writel(MASK_INTR_PIO
, host
->base
+ REG_INTERRUPT_MASK
);
431 spin_unlock_irqrestore(&host
->lock
, flags
);
433 /* PIO transfers start from interrupt. */
434 pio_time
= wait_for_completion_interruptible_timeout(
435 &host
->pio_complete
, host
->timeout
);
437 spin_lock_irqsave(&host
->lock
, flags
);
440 if (host
->is_removed
) {
441 dev_err(mmc_dev(host
->mmc
), "card removed\n");
442 mrq
->cmd
->error
= -ETIMEDOUT
;
446 if (moxart_wait_for_status(host
, MASK_DATA
, &status
)
448 mrq
->cmd
->data
->error
= -ETIMEDOUT
;
452 if (status
& DATA_CRC_FAIL
)
453 mrq
->cmd
->data
->error
= -ETIMEDOUT
;
455 if (mrq
->cmd
->data
->stop
)
456 moxart_send_command(host
, mrq
->cmd
->data
->stop
);
460 spin_unlock_irqrestore(&host
->lock
, flags
);
461 mmc_request_done(host
->mmc
, mrq
);
464 static irqreturn_t
moxart_irq(int irq
, void *devid
)
466 struct moxart_host
*host
= (struct moxart_host
*)devid
;
470 spin_lock_irqsave(&host
->lock
, flags
);
472 status
= readl(host
->base
+ REG_STATUS
);
473 if (status
& CARD_CHANGE
) {
474 host
->is_removed
= status
& CARD_DETECT
;
475 if (host
->is_removed
&& host
->have_dma
) {
476 dmaengine_terminate_all(host
->dma_chan_tx
);
477 dmaengine_terminate_all(host
->dma_chan_rx
);
480 writel(MASK_INTR_PIO
, host
->base
+ REG_CLEAR
);
481 writel(CARD_CHANGE
, host
->base
+ REG_INTERRUPT_MASK
);
482 mmc_detect_change(host
->mmc
, 0);
484 if (status
& (FIFO_ORUN
| FIFO_URUN
) && host
->mrq
)
485 moxart_transfer_pio(host
);
487 spin_unlock_irqrestore(&host
->lock
, flags
);
492 static void moxart_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
494 struct moxart_host
*host
= mmc_priv(mmc
);
499 spin_lock_irqsave(&host
->lock
, flags
);
502 for (div
= 0; div
< CLK_DIV_MASK
; ++div
) {
503 if (ios
->clock
>= host
->sysclk
/ (2 * (div
+ 1)))
507 host
->rate
= host
->sysclk
/ (2 * (div
+ 1));
508 if (host
->rate
> host
->sysclk
)
510 writel(ctrl
, host
->base
+ REG_CLOCK_CONTROL
);
513 if (ios
->power_mode
== MMC_POWER_OFF
) {
514 writel(readl(host
->base
+ REG_POWER_CONTROL
) & ~SD_POWER_ON
,
515 host
->base
+ REG_POWER_CONTROL
);
517 if (ios
->vdd
< MIN_POWER
)
520 power
= ios
->vdd
- MIN_POWER
;
522 writel(SD_POWER_ON
| (u32
) power
,
523 host
->base
+ REG_POWER_CONTROL
);
526 switch (ios
->bus_width
) {
527 case MMC_BUS_WIDTH_4
:
528 writel(BUS_WIDTH_4
, host
->base
+ REG_BUS_WIDTH
);
530 case MMC_BUS_WIDTH_8
:
531 writel(BUS_WIDTH_8
, host
->base
+ REG_BUS_WIDTH
);
534 writel(BUS_WIDTH_1
, host
->base
+ REG_BUS_WIDTH
);
538 spin_unlock_irqrestore(&host
->lock
, flags
);
542 static int moxart_get_ro(struct mmc_host
*mmc
)
544 struct moxart_host
*host
= mmc_priv(mmc
);
546 return !!(readl(host
->base
+ REG_STATUS
) & WRITE_PROT
);
549 static const struct mmc_host_ops moxart_ops
= {
550 .request
= moxart_request
,
551 .set_ios
= moxart_set_ios
,
552 .get_ro
= moxart_get_ro
,
555 static int moxart_probe(struct platform_device
*pdev
)
557 struct device
*dev
= &pdev
->dev
;
558 struct device_node
*node
= dev
->of_node
;
559 struct resource res_mmc
;
560 struct mmc_host
*mmc
;
561 struct moxart_host
*host
= NULL
;
562 struct dma_slave_config cfg
;
564 void __iomem
*reg_mmc
;
568 mmc
= mmc_alloc_host(sizeof(struct moxart_host
), dev
);
570 dev_err(dev
, "mmc_alloc_host failed\n");
575 ret
= of_address_to_resource(node
, 0, &res_mmc
);
577 dev_err(dev
, "of_address_to_resource failed\n");
581 irq
= irq_of_parse_and_map(node
, 0);
583 dev_err(dev
, "irq_of_parse_and_map failed\n");
588 clk
= devm_clk_get(dev
, NULL
);
594 reg_mmc
= devm_ioremap_resource(dev
, &res_mmc
);
595 if (IS_ERR(reg_mmc
)) {
596 ret
= PTR_ERR(reg_mmc
);
600 ret
= mmc_of_parse(mmc
);
604 host
= mmc_priv(mmc
);
606 host
->base
= reg_mmc
;
607 host
->reg_phys
= res_mmc
.start
;
608 host
->timeout
= msecs_to_jiffies(1000);
609 host
->sysclk
= clk_get_rate(clk
);
610 host
->fifo_width
= readl(host
->base
+ REG_FEATURE
) << 2;
611 host
->dma_chan_tx
= dma_request_chan(dev
, "tx");
612 host
->dma_chan_rx
= dma_request_chan(dev
, "rx");
614 spin_lock_init(&host
->lock
);
616 mmc
->ops
= &moxart_ops
;
617 mmc
->f_max
= DIV_ROUND_CLOSEST(host
->sysclk
, 2);
618 mmc
->f_min
= DIV_ROUND_CLOSEST(host
->sysclk
, CLK_DIV_MASK
* 2);
619 mmc
->ocr_avail
= 0xffff00; /* Support 2.0v - 3.6v power. */
621 if (IS_ERR(host
->dma_chan_tx
) || IS_ERR(host
->dma_chan_rx
)) {
622 if (PTR_ERR(host
->dma_chan_tx
) == -EPROBE_DEFER
||
623 PTR_ERR(host
->dma_chan_rx
) == -EPROBE_DEFER
) {
627 dev_dbg(dev
, "PIO mode transfer enabled\n");
628 host
->have_dma
= false;
630 dev_dbg(dev
, "DMA channels found (%p,%p)\n",
631 host
->dma_chan_tx
, host
->dma_chan_rx
);
632 host
->have_dma
= true;
634 cfg
.src_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
635 cfg
.dst_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
637 cfg
.direction
= DMA_MEM_TO_DEV
;
639 cfg
.dst_addr
= host
->reg_phys
+ REG_DATA_WINDOW
;
640 dmaengine_slave_config(host
->dma_chan_tx
, &cfg
);
642 cfg
.direction
= DMA_DEV_TO_MEM
;
643 cfg
.src_addr
= host
->reg_phys
+ REG_DATA_WINDOW
;
645 dmaengine_slave_config(host
->dma_chan_rx
, &cfg
);
648 switch ((readl(host
->base
+ REG_BUS_WIDTH
) >> 3) & 3) {
650 mmc
->caps
|= MMC_CAP_4_BIT_DATA
;
653 mmc
->caps
|= MMC_CAP_4_BIT_DATA
| MMC_CAP_8_BIT_DATA
;
659 writel(0, host
->base
+ REG_INTERRUPT_MASK
);
661 writel(CMD_SDC_RESET
, host
->base
+ REG_COMMAND
);
662 for (i
= 0; i
< MAX_RETRIES
; i
++) {
663 if (!(readl(host
->base
+ REG_COMMAND
) & CMD_SDC_RESET
))
668 ret
= devm_request_irq(dev
, irq
, moxart_irq
, 0, "moxart-mmc", host
);
672 dev_set_drvdata(dev
, mmc
);
675 dev_dbg(dev
, "IRQ=%d, FIFO is %d bytes\n", irq
, host
->fifo_width
);
685 static int moxart_remove(struct platform_device
*pdev
)
687 struct mmc_host
*mmc
= dev_get_drvdata(&pdev
->dev
);
688 struct moxart_host
*host
= mmc_priv(mmc
);
690 dev_set_drvdata(&pdev
->dev
, NULL
);
693 if (!IS_ERR(host
->dma_chan_tx
))
694 dma_release_channel(host
->dma_chan_tx
);
695 if (!IS_ERR(host
->dma_chan_rx
))
696 dma_release_channel(host
->dma_chan_rx
);
697 mmc_remove_host(mmc
);
700 writel(0, host
->base
+ REG_INTERRUPT_MASK
);
701 writel(0, host
->base
+ REG_POWER_CONTROL
);
702 writel(readl(host
->base
+ REG_CLOCK_CONTROL
) | CLK_OFF
,
703 host
->base
+ REG_CLOCK_CONTROL
);
708 static const struct of_device_id moxart_mmc_match
[] = {
709 { .compatible
= "moxa,moxart-mmc" },
710 { .compatible
= "faraday,ftsdc010" },
713 MODULE_DEVICE_TABLE(of
, moxart_mmc_match
);
715 static struct platform_driver moxart_mmc_driver
= {
716 .probe
= moxart_probe
,
717 .remove
= moxart_remove
,
719 .name
= "mmc-moxart",
720 .of_match_table
= moxart_mmc_match
,
723 module_platform_driver(moxart_mmc_driver
);
725 MODULE_ALIAS("platform:mmc-moxart");
726 MODULE_DESCRIPTION("MOXA ART MMC driver");
727 MODULE_LICENSE("GPL v2");
728 MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");