2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
14 #include <linux/blkdev.h>
15 #include <linux/clk.h>
16 #include <linux/debugfs.h>
17 #include <linux/device.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/err.h>
20 #include <linux/init.h>
21 #include <linux/interrupt.h>
22 #include <linux/ioport.h>
23 #include <linux/module.h>
24 #include <linux/platform_device.h>
25 #include <linux/seq_file.h>
26 #include <linux/slab.h>
27 #include <linux/stat.h>
28 #include <linux/delay.h>
29 #include <linux/irq.h>
30 #include <linux/mmc/card.h>
31 #include <linux/mmc/host.h>
32 #include <linux/mmc/mmc.h>
33 #include <linux/mmc/sd.h>
34 #include <linux/mmc/sdio.h>
35 #include <linux/mmc/dw_mmc.h>
36 #include <linux/bitops.h>
37 #include <linux/regulator/consumer.h>
39 #include <linux/of_gpio.h>
40 #include <linux/mmc/slot-gpio.h>
44 /* Common flag combinations */
45 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
46 SDMMC_INT_HTO | SDMMC_INT_SBE | \
47 SDMMC_INT_EBE | SDMMC_INT_HLE)
48 #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
49 SDMMC_INT_RESP_ERR | SDMMC_INT_HLE)
50 #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
51 DW_MCI_CMD_ERROR_FLAGS)
52 #define DW_MCI_SEND_STATUS 1
53 #define DW_MCI_RECV_STATUS 2
54 #define DW_MCI_DMA_THRESHOLD 16
56 #define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */
57 #define DW_MCI_FREQ_MIN 100000 /* unit: HZ */
59 #define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
60 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
61 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
64 #define DESC_RING_BUF_SZ PAGE_SIZE
66 struct idmac_desc_64addr
{
67 u32 des0
; /* Control Descriptor */
69 u32 des1
; /* Reserved */
71 u32 des2
; /*Buffer sizes */
72 #define IDMAC_64ADDR_SET_BUFFER1_SIZE(d, s) \
73 ((d)->des2 = ((d)->des2 & cpu_to_le32(0x03ffe000)) | \
74 ((cpu_to_le32(s)) & cpu_to_le32(0x1fff)))
76 u32 des3
; /* Reserved */
78 u32 des4
; /* Lower 32-bits of Buffer Address Pointer 1*/
79 u32 des5
; /* Upper 32-bits of Buffer Address Pointer 1*/
81 u32 des6
; /* Lower 32-bits of Next Descriptor Address */
82 u32 des7
; /* Upper 32-bits of Next Descriptor Address */
86 __le32 des0
; /* Control Descriptor */
87 #define IDMAC_DES0_DIC BIT(1)
88 #define IDMAC_DES0_LD BIT(2)
89 #define IDMAC_DES0_FD BIT(3)
90 #define IDMAC_DES0_CH BIT(4)
91 #define IDMAC_DES0_ER BIT(5)
92 #define IDMAC_DES0_CES BIT(30)
93 #define IDMAC_DES0_OWN BIT(31)
95 __le32 des1
; /* Buffer sizes */
96 #define IDMAC_SET_BUFFER1_SIZE(d, s) \
97 ((d)->des1 = ((d)->des1 & cpu_to_le32(0x03ffe000)) | (cpu_to_le32((s) & 0x1fff)))
99 __le32 des2
; /* buffer 1 physical address */
101 __le32 des3
; /* buffer 2 physical address */
104 /* Each descriptor can transfer up to 4KB of data in chained mode */
105 #define DW_MCI_DESC_DATA_LENGTH 0x1000
107 static bool dw_mci_reset(struct dw_mci
*host
);
108 static bool dw_mci_ctrl_reset(struct dw_mci
*host
, u32 reset
);
109 static int dw_mci_card_busy(struct mmc_host
*mmc
);
110 static int dw_mci_get_cd(struct mmc_host
*mmc
);
112 #if defined(CONFIG_DEBUG_FS)
113 static int dw_mci_req_show(struct seq_file
*s
, void *v
)
115 struct dw_mci_slot
*slot
= s
->private;
116 struct mmc_request
*mrq
;
117 struct mmc_command
*cmd
;
118 struct mmc_command
*stop
;
119 struct mmc_data
*data
;
121 /* Make sure we get a consistent snapshot */
122 spin_lock_bh(&slot
->host
->lock
);
132 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
133 cmd
->opcode
, cmd
->arg
, cmd
->flags
,
134 cmd
->resp
[0], cmd
->resp
[1], cmd
->resp
[2],
135 cmd
->resp
[2], cmd
->error
);
137 seq_printf(s
, "DATA %u / %u * %u flg %x err %d\n",
138 data
->bytes_xfered
, data
->blocks
,
139 data
->blksz
, data
->flags
, data
->error
);
142 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
143 stop
->opcode
, stop
->arg
, stop
->flags
,
144 stop
->resp
[0], stop
->resp
[1], stop
->resp
[2],
145 stop
->resp
[2], stop
->error
);
148 spin_unlock_bh(&slot
->host
->lock
);
153 static int dw_mci_req_open(struct inode
*inode
, struct file
*file
)
155 return single_open(file
, dw_mci_req_show
, inode
->i_private
);
158 static const struct file_operations dw_mci_req_fops
= {
159 .owner
= THIS_MODULE
,
160 .open
= dw_mci_req_open
,
163 .release
= single_release
,
166 static int dw_mci_regs_show(struct seq_file
*s
, void *v
)
168 struct dw_mci
*host
= s
->private;
170 seq_printf(s
, "STATUS:\t0x%08x\n", mci_readl(host
, STATUS
));
171 seq_printf(s
, "RINTSTS:\t0x%08x\n", mci_readl(host
, RINTSTS
));
172 seq_printf(s
, "CMD:\t0x%08x\n", mci_readl(host
, CMD
));
173 seq_printf(s
, "CTRL:\t0x%08x\n", mci_readl(host
, CTRL
));
174 seq_printf(s
, "INTMASK:\t0x%08x\n", mci_readl(host
, INTMASK
));
175 seq_printf(s
, "CLKENA:\t0x%08x\n", mci_readl(host
, CLKENA
));
180 static int dw_mci_regs_open(struct inode
*inode
, struct file
*file
)
182 return single_open(file
, dw_mci_regs_show
, inode
->i_private
);
185 static const struct file_operations dw_mci_regs_fops
= {
186 .owner
= THIS_MODULE
,
187 .open
= dw_mci_regs_open
,
190 .release
= single_release
,
193 static void dw_mci_init_debugfs(struct dw_mci_slot
*slot
)
195 struct mmc_host
*mmc
= slot
->mmc
;
196 struct dw_mci
*host
= slot
->host
;
200 root
= mmc
->debugfs_root
;
204 node
= debugfs_create_file("regs", S_IRUSR
, root
, host
,
209 node
= debugfs_create_file("req", S_IRUSR
, root
, slot
,
214 node
= debugfs_create_u32("state", S_IRUSR
, root
, (u32
*)&host
->state
);
218 node
= debugfs_create_x32("pending_events", S_IRUSR
, root
,
219 (u32
*)&host
->pending_events
);
223 node
= debugfs_create_x32("completed_events", S_IRUSR
, root
,
224 (u32
*)&host
->completed_events
);
231 dev_err(&mmc
->class_dev
, "failed to initialize debugfs for slot\n");
233 #endif /* defined(CONFIG_DEBUG_FS) */
235 static void mci_send_cmd(struct dw_mci_slot
*slot
, u32 cmd
, u32 arg
);
237 static u32
dw_mci_prepare_command(struct mmc_host
*mmc
, struct mmc_command
*cmd
)
239 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
240 struct dw_mci
*host
= slot
->host
;
243 cmd
->error
= -EINPROGRESS
;
246 if (cmd
->opcode
== MMC_STOP_TRANSMISSION
||
247 cmd
->opcode
== MMC_GO_IDLE_STATE
||
248 cmd
->opcode
== MMC_GO_INACTIVE_STATE
||
249 (cmd
->opcode
== SD_IO_RW_DIRECT
&&
250 ((cmd
->arg
>> 9) & 0x1FFFF) == SDIO_CCCR_ABORT
))
251 cmdr
|= SDMMC_CMD_STOP
;
252 else if (cmd
->opcode
!= MMC_SEND_STATUS
&& cmd
->data
)
253 cmdr
|= SDMMC_CMD_PRV_DAT_WAIT
;
255 if (cmd
->opcode
== SD_SWITCH_VOLTAGE
) {
258 /* Special bit makes CMD11 not die */
259 cmdr
|= SDMMC_CMD_VOLT_SWITCH
;
261 /* Change state to continue to handle CMD11 weirdness */
262 WARN_ON(slot
->host
->state
!= STATE_SENDING_CMD
);
263 slot
->host
->state
= STATE_SENDING_CMD11
;
266 * We need to disable low power mode (automatic clock stop)
267 * while doing voltage switch so we don't confuse the card,
268 * since stopping the clock is a specific part of the UHS
269 * voltage change dance.
271 * Note that low power mode (SDMMC_CLKEN_LOW_PWR) will be
272 * unconditionally turned back on in dw_mci_setup_bus() if it's
273 * ever called with a non-zero clock. That shouldn't happen
274 * until the voltage change is all done.
276 clk_en_a
= mci_readl(host
, CLKENA
);
277 clk_en_a
&= ~(SDMMC_CLKEN_LOW_PWR
<< slot
->id
);
278 mci_writel(host
, CLKENA
, clk_en_a
);
279 mci_send_cmd(slot
, SDMMC_CMD_UPD_CLK
|
280 SDMMC_CMD_PRV_DAT_WAIT
, 0);
283 if (cmd
->flags
& MMC_RSP_PRESENT
) {
284 /* We expect a response, so set this bit */
285 cmdr
|= SDMMC_CMD_RESP_EXP
;
286 if (cmd
->flags
& MMC_RSP_136
)
287 cmdr
|= SDMMC_CMD_RESP_LONG
;
290 if (cmd
->flags
& MMC_RSP_CRC
)
291 cmdr
|= SDMMC_CMD_RESP_CRC
;
294 cmdr
|= SDMMC_CMD_DAT_EXP
;
295 if (cmd
->data
->flags
& MMC_DATA_WRITE
)
296 cmdr
|= SDMMC_CMD_DAT_WR
;
299 if (!test_bit(DW_MMC_CARD_NO_USE_HOLD
, &slot
->flags
))
300 cmdr
|= SDMMC_CMD_USE_HOLD_REG
;
305 static u32
dw_mci_prep_stop_abort(struct dw_mci
*host
, struct mmc_command
*cmd
)
307 struct mmc_command
*stop
;
313 stop
= &host
->stop_abort
;
315 memset(stop
, 0, sizeof(struct mmc_command
));
317 if (cmdr
== MMC_READ_SINGLE_BLOCK
||
318 cmdr
== MMC_READ_MULTIPLE_BLOCK
||
319 cmdr
== MMC_WRITE_BLOCK
||
320 cmdr
== MMC_WRITE_MULTIPLE_BLOCK
||
321 cmdr
== MMC_SEND_TUNING_BLOCK
||
322 cmdr
== MMC_SEND_TUNING_BLOCK_HS200
) {
323 stop
->opcode
= MMC_STOP_TRANSMISSION
;
325 stop
->flags
= MMC_RSP_R1B
| MMC_CMD_AC
;
326 } else if (cmdr
== SD_IO_RW_EXTENDED
) {
327 stop
->opcode
= SD_IO_RW_DIRECT
;
328 stop
->arg
|= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT
<< 9) |
329 ((cmd
->arg
>> 28) & 0x7);
330 stop
->flags
= MMC_RSP_SPI_R5
| MMC_RSP_R5
| MMC_CMD_AC
;
335 cmdr
= stop
->opcode
| SDMMC_CMD_STOP
|
336 SDMMC_CMD_RESP_CRC
| SDMMC_CMD_RESP_EXP
;
338 if (!test_bit(DW_MMC_CARD_NO_USE_HOLD
, &host
->cur_slot
->flags
))
339 cmdr
|= SDMMC_CMD_USE_HOLD_REG
;
344 static void dw_mci_wait_while_busy(struct dw_mci
*host
, u32 cmd_flags
)
346 unsigned long timeout
= jiffies
+ msecs_to_jiffies(500);
349 * Databook says that before issuing a new data transfer command
350 * we need to check to see if the card is busy. Data transfer commands
351 * all have SDMMC_CMD_PRV_DAT_WAIT set, so we'll key off that.
353 * ...also allow sending for SDMMC_CMD_VOLT_SWITCH where busy is
356 if ((cmd_flags
& SDMMC_CMD_PRV_DAT_WAIT
) &&
357 !(cmd_flags
& SDMMC_CMD_VOLT_SWITCH
)) {
358 while (mci_readl(host
, STATUS
) & SDMMC_STATUS_BUSY
) {
359 if (time_after(jiffies
, timeout
)) {
360 /* Command will fail; we'll pass error then */
361 dev_err(host
->dev
, "Busy; trying anyway\n");
369 static void dw_mci_start_command(struct dw_mci
*host
,
370 struct mmc_command
*cmd
, u32 cmd_flags
)
374 "start command: ARGR=0x%08x CMDR=0x%08x\n",
375 cmd
->arg
, cmd_flags
);
377 mci_writel(host
, CMDARG
, cmd
->arg
);
378 wmb(); /* drain writebuffer */
379 dw_mci_wait_while_busy(host
, cmd_flags
);
381 mci_writel(host
, CMD
, cmd_flags
| SDMMC_CMD_START
);
384 static inline void send_stop_abort(struct dw_mci
*host
, struct mmc_data
*data
)
386 struct mmc_command
*stop
= &host
->stop_abort
;
388 dw_mci_start_command(host
, stop
, host
->stop_cmdr
);
391 /* DMA interface functions */
392 static void dw_mci_stop_dma(struct dw_mci
*host
)
394 if (host
->using_dma
) {
395 host
->dma_ops
->stop(host
);
396 host
->dma_ops
->cleanup(host
);
399 /* Data transfer was stopped by the interrupt handler */
400 set_bit(EVENT_XFER_COMPLETE
, &host
->pending_events
);
403 static int dw_mci_get_dma_dir(struct mmc_data
*data
)
405 if (data
->flags
& MMC_DATA_WRITE
)
406 return DMA_TO_DEVICE
;
408 return DMA_FROM_DEVICE
;
411 static void dw_mci_dma_cleanup(struct dw_mci
*host
)
413 struct mmc_data
*data
= host
->data
;
415 if (data
&& data
->host_cookie
== COOKIE_MAPPED
) {
416 dma_unmap_sg(host
->dev
,
419 dw_mci_get_dma_dir(data
));
420 data
->host_cookie
= COOKIE_UNMAPPED
;
424 static void dw_mci_idmac_reset(struct dw_mci
*host
)
426 u32 bmod
= mci_readl(host
, BMOD
);
427 /* Software reset of DMA */
428 bmod
|= SDMMC_IDMAC_SWRESET
;
429 mci_writel(host
, BMOD
, bmod
);
432 static void dw_mci_idmac_stop_dma(struct dw_mci
*host
)
436 /* Disable and reset the IDMAC interface */
437 temp
= mci_readl(host
, CTRL
);
438 temp
&= ~SDMMC_CTRL_USE_IDMAC
;
439 temp
|= SDMMC_CTRL_DMA_RESET
;
440 mci_writel(host
, CTRL
, temp
);
442 /* Stop the IDMAC running */
443 temp
= mci_readl(host
, BMOD
);
444 temp
&= ~(SDMMC_IDMAC_ENABLE
| SDMMC_IDMAC_FB
);
445 temp
|= SDMMC_IDMAC_SWRESET
;
446 mci_writel(host
, BMOD
, temp
);
449 static void dw_mci_dmac_complete_dma(void *arg
)
451 struct dw_mci
*host
= arg
;
452 struct mmc_data
*data
= host
->data
;
454 dev_vdbg(host
->dev
, "DMA complete\n");
456 if ((host
->use_dma
== TRANS_MODE_EDMAC
) &&
457 data
&& (data
->flags
& MMC_DATA_READ
))
458 /* Invalidate cache after read */
459 dma_sync_sg_for_cpu(mmc_dev(host
->cur_slot
->mmc
),
464 host
->dma_ops
->cleanup(host
);
467 * If the card was removed, data will be NULL. No point in trying to
468 * send the stop command or waiting for NBUSY in this case.
471 set_bit(EVENT_XFER_COMPLETE
, &host
->pending_events
);
472 tasklet_schedule(&host
->tasklet
);
476 static int dw_mci_idmac_init(struct dw_mci
*host
)
480 if (host
->dma_64bit_address
== 1) {
481 struct idmac_desc_64addr
*p
;
482 /* Number of descriptors in the ring buffer */
484 DESC_RING_BUF_SZ
/ sizeof(struct idmac_desc_64addr
);
486 /* Forward link the descriptor list */
487 for (i
= 0, p
= host
->sg_cpu
; i
< host
->ring_size
- 1;
489 p
->des6
= (host
->sg_dma
+
490 (sizeof(struct idmac_desc_64addr
) *
491 (i
+ 1))) & 0xffffffff;
493 p
->des7
= (u64
)(host
->sg_dma
+
494 (sizeof(struct idmac_desc_64addr
) *
496 /* Initialize reserved and buffer size fields to "0" */
502 /* Set the last descriptor as the end-of-ring descriptor */
503 p
->des6
= host
->sg_dma
& 0xffffffff;
504 p
->des7
= (u64
)host
->sg_dma
>> 32;
505 p
->des0
= IDMAC_DES0_ER
;
508 struct idmac_desc
*p
;
509 /* Number of descriptors in the ring buffer */
511 DESC_RING_BUF_SZ
/ sizeof(struct idmac_desc
);
513 /* Forward link the descriptor list */
514 for (i
= 0, p
= host
->sg_cpu
;
515 i
< host
->ring_size
- 1;
517 p
->des3
= cpu_to_le32(host
->sg_dma
+
518 (sizeof(struct idmac_desc
) * (i
+ 1)));
522 /* Set the last descriptor as the end-of-ring descriptor */
523 p
->des3
= cpu_to_le32(host
->sg_dma
);
524 p
->des0
= cpu_to_le32(IDMAC_DES0_ER
);
527 dw_mci_idmac_reset(host
);
529 if (host
->dma_64bit_address
== 1) {
530 /* Mask out interrupts - get Tx & Rx complete only */
531 mci_writel(host
, IDSTS64
, IDMAC_INT_CLR
);
532 mci_writel(host
, IDINTEN64
, SDMMC_IDMAC_INT_NI
|
533 SDMMC_IDMAC_INT_RI
| SDMMC_IDMAC_INT_TI
);
535 /* Set the descriptor base address */
536 mci_writel(host
, DBADDRL
, host
->sg_dma
& 0xffffffff);
537 mci_writel(host
, DBADDRU
, (u64
)host
->sg_dma
>> 32);
540 /* Mask out interrupts - get Tx & Rx complete only */
541 mci_writel(host
, IDSTS
, IDMAC_INT_CLR
);
542 mci_writel(host
, IDINTEN
, SDMMC_IDMAC_INT_NI
|
543 SDMMC_IDMAC_INT_RI
| SDMMC_IDMAC_INT_TI
);
545 /* Set the descriptor base address */
546 mci_writel(host
, DBADDR
, host
->sg_dma
);
552 static inline int dw_mci_prepare_desc64(struct dw_mci
*host
,
553 struct mmc_data
*data
,
556 unsigned int desc_len
;
557 struct idmac_desc_64addr
*desc_first
, *desc_last
, *desc
;
558 unsigned long timeout
;
561 desc_first
= desc_last
= desc
= host
->sg_cpu
;
563 for (i
= 0; i
< sg_len
; i
++) {
564 unsigned int length
= sg_dma_len(&data
->sg
[i
]);
566 u64 mem_addr
= sg_dma_address(&data
->sg
[i
]);
568 for ( ; length
; desc
++) {
569 desc_len
= (length
<= DW_MCI_DESC_DATA_LENGTH
) ?
570 length
: DW_MCI_DESC_DATA_LENGTH
;
575 * Wait for the former clear OWN bit operation
576 * of IDMAC to make sure that this descriptor
577 * isn't still owned by IDMAC as IDMAC's write
578 * ops and CPU's read ops are asynchronous.
580 timeout
= jiffies
+ msecs_to_jiffies(100);
581 while (readl(&desc
->des0
) & IDMAC_DES0_OWN
) {
582 if (time_after(jiffies
, timeout
))
588 * Set the OWN bit and disable interrupts
589 * for this descriptor
591 desc
->des0
= IDMAC_DES0_OWN
| IDMAC_DES0_DIC
|
595 IDMAC_64ADDR_SET_BUFFER1_SIZE(desc
, desc_len
);
597 /* Physical address to DMA to/from */
598 desc
->des4
= mem_addr
& 0xffffffff;
599 desc
->des5
= mem_addr
>> 32;
601 /* Update physical address for the next desc */
602 mem_addr
+= desc_len
;
604 /* Save pointer to the last descriptor */
609 /* Set first descriptor */
610 desc_first
->des0
|= IDMAC_DES0_FD
;
612 /* Set last descriptor */
613 desc_last
->des0
&= ~(IDMAC_DES0_CH
| IDMAC_DES0_DIC
);
614 desc_last
->des0
|= IDMAC_DES0_LD
;
618 /* restore the descriptor chain as it's polluted */
619 dev_dbg(host
->dev
, "descriptor is still owned by IDMAC.\n");
620 memset(host
->sg_cpu
, 0, DESC_RING_BUF_SZ
);
621 dw_mci_idmac_init(host
);
626 static inline int dw_mci_prepare_desc32(struct dw_mci
*host
,
627 struct mmc_data
*data
,
630 unsigned int desc_len
;
631 struct idmac_desc
*desc_first
, *desc_last
, *desc
;
632 unsigned long timeout
;
635 desc_first
= desc_last
= desc
= host
->sg_cpu
;
637 for (i
= 0; i
< sg_len
; i
++) {
638 unsigned int length
= sg_dma_len(&data
->sg
[i
]);
640 u32 mem_addr
= sg_dma_address(&data
->sg
[i
]);
642 for ( ; length
; desc
++) {
643 desc_len
= (length
<= DW_MCI_DESC_DATA_LENGTH
) ?
644 length
: DW_MCI_DESC_DATA_LENGTH
;
649 * Wait for the former clear OWN bit operation
650 * of IDMAC to make sure that this descriptor
651 * isn't still owned by IDMAC as IDMAC's write
652 * ops and CPU's read ops are asynchronous.
654 timeout
= jiffies
+ msecs_to_jiffies(100);
655 while (readl(&desc
->des0
) &
656 cpu_to_le32(IDMAC_DES0_OWN
)) {
657 if (time_after(jiffies
, timeout
))
663 * Set the OWN bit and disable interrupts
664 * for this descriptor
666 desc
->des0
= cpu_to_le32(IDMAC_DES0_OWN
|
671 IDMAC_SET_BUFFER1_SIZE(desc
, desc_len
);
673 /* Physical address to DMA to/from */
674 desc
->des2
= cpu_to_le32(mem_addr
);
676 /* Update physical address for the next desc */
677 mem_addr
+= desc_len
;
679 /* Save pointer to the last descriptor */
684 /* Set first descriptor */
685 desc_first
->des0
|= cpu_to_le32(IDMAC_DES0_FD
);
687 /* Set last descriptor */
688 desc_last
->des0
&= cpu_to_le32(~(IDMAC_DES0_CH
|
690 desc_last
->des0
|= cpu_to_le32(IDMAC_DES0_LD
);
694 /* restore the descriptor chain as it's polluted */
695 dev_dbg(host
->dev
, "descriptor is still owned by IDMAC.\n");
696 memset(host
->sg_cpu
, 0, DESC_RING_BUF_SZ
);
697 dw_mci_idmac_init(host
);
701 static int dw_mci_idmac_start_dma(struct dw_mci
*host
, unsigned int sg_len
)
706 if (host
->dma_64bit_address
== 1)
707 ret
= dw_mci_prepare_desc64(host
, host
->data
, sg_len
);
709 ret
= dw_mci_prepare_desc32(host
, host
->data
, sg_len
);
714 /* drain writebuffer */
717 /* Make sure to reset DMA in case we did PIO before this */
718 dw_mci_ctrl_reset(host
, SDMMC_CTRL_DMA_RESET
);
719 dw_mci_idmac_reset(host
);
721 /* Select IDMAC interface */
722 temp
= mci_readl(host
, CTRL
);
723 temp
|= SDMMC_CTRL_USE_IDMAC
;
724 mci_writel(host
, CTRL
, temp
);
726 /* drain writebuffer */
729 /* Enable the IDMAC */
730 temp
= mci_readl(host
, BMOD
);
731 temp
|= SDMMC_IDMAC_ENABLE
| SDMMC_IDMAC_FB
;
732 mci_writel(host
, BMOD
, temp
);
734 /* Start it running */
735 mci_writel(host
, PLDMND
, 1);
741 static const struct dw_mci_dma_ops dw_mci_idmac_ops
= {
742 .init
= dw_mci_idmac_init
,
743 .start
= dw_mci_idmac_start_dma
,
744 .stop
= dw_mci_idmac_stop_dma
,
745 .complete
= dw_mci_dmac_complete_dma
,
746 .cleanup
= dw_mci_dma_cleanup
,
749 static void dw_mci_edmac_stop_dma(struct dw_mci
*host
)
751 dmaengine_terminate_async(host
->dms
->ch
);
754 static int dw_mci_edmac_start_dma(struct dw_mci
*host
,
757 struct dma_slave_config cfg
;
758 struct dma_async_tx_descriptor
*desc
= NULL
;
759 struct scatterlist
*sgl
= host
->data
->sg
;
760 const u32 mszs
[] = {1, 4, 8, 16, 32, 64, 128, 256};
761 u32 sg_elems
= host
->data
->sg_len
;
763 u32 fifo_offset
= host
->fifo_reg
- host
->regs
;
766 /* Set external dma config: burst size, burst width */
767 cfg
.dst_addr
= host
->phy_regs
+ fifo_offset
;
768 cfg
.src_addr
= cfg
.dst_addr
;
769 cfg
.dst_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
770 cfg
.src_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
772 /* Match burst msize with external dma config */
773 fifoth_val
= mci_readl(host
, FIFOTH
);
774 cfg
.dst_maxburst
= mszs
[(fifoth_val
>> 28) & 0x7];
775 cfg
.src_maxburst
= cfg
.dst_maxburst
;
777 if (host
->data
->flags
& MMC_DATA_WRITE
)
778 cfg
.direction
= DMA_MEM_TO_DEV
;
780 cfg
.direction
= DMA_DEV_TO_MEM
;
782 ret
= dmaengine_slave_config(host
->dms
->ch
, &cfg
);
784 dev_err(host
->dev
, "Failed to config edmac.\n");
788 desc
= dmaengine_prep_slave_sg(host
->dms
->ch
, sgl
,
789 sg_len
, cfg
.direction
,
790 DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
792 dev_err(host
->dev
, "Can't prepare slave sg.\n");
796 /* Set dw_mci_dmac_complete_dma as callback */
797 desc
->callback
= dw_mci_dmac_complete_dma
;
798 desc
->callback_param
= (void *)host
;
799 dmaengine_submit(desc
);
801 /* Flush cache before write */
802 if (host
->data
->flags
& MMC_DATA_WRITE
)
803 dma_sync_sg_for_device(mmc_dev(host
->cur_slot
->mmc
), sgl
,
804 sg_elems
, DMA_TO_DEVICE
);
806 dma_async_issue_pending(host
->dms
->ch
);
811 static int dw_mci_edmac_init(struct dw_mci
*host
)
813 /* Request external dma channel */
814 host
->dms
= kzalloc(sizeof(struct dw_mci_dma_slave
), GFP_KERNEL
);
818 host
->dms
->ch
= dma_request_slave_channel(host
->dev
, "rx-tx");
819 if (!host
->dms
->ch
) {
820 dev_err(host
->dev
, "Failed to get external DMA channel.\n");
829 static void dw_mci_edmac_exit(struct dw_mci
*host
)
833 dma_release_channel(host
->dms
->ch
);
834 host
->dms
->ch
= NULL
;
841 static const struct dw_mci_dma_ops dw_mci_edmac_ops
= {
842 .init
= dw_mci_edmac_init
,
843 .exit
= dw_mci_edmac_exit
,
844 .start
= dw_mci_edmac_start_dma
,
845 .stop
= dw_mci_edmac_stop_dma
,
846 .complete
= dw_mci_dmac_complete_dma
,
847 .cleanup
= dw_mci_dma_cleanup
,
850 static int dw_mci_pre_dma_transfer(struct dw_mci
*host
,
851 struct mmc_data
*data
,
854 struct scatterlist
*sg
;
855 unsigned int i
, sg_len
;
857 if (data
->host_cookie
== COOKIE_PRE_MAPPED
)
861 * We don't do DMA on "complex" transfers, i.e. with
862 * non-word-aligned buffers or lengths. Also, we don't bother
863 * with all the DMA setup overhead for short transfers.
865 if (data
->blocks
* data
->blksz
< DW_MCI_DMA_THRESHOLD
)
871 for_each_sg(data
->sg
, sg
, data
->sg_len
, i
) {
872 if (sg
->offset
& 3 || sg
->length
& 3)
876 sg_len
= dma_map_sg(host
->dev
,
879 dw_mci_get_dma_dir(data
));
883 data
->host_cookie
= cookie
;
888 static void dw_mci_pre_req(struct mmc_host
*mmc
,
889 struct mmc_request
*mrq
)
891 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
892 struct mmc_data
*data
= mrq
->data
;
894 if (!slot
->host
->use_dma
|| !data
)
897 /* This data might be unmapped at this time */
898 data
->host_cookie
= COOKIE_UNMAPPED
;
900 if (dw_mci_pre_dma_transfer(slot
->host
, mrq
->data
,
901 COOKIE_PRE_MAPPED
) < 0)
902 data
->host_cookie
= COOKIE_UNMAPPED
;
905 static void dw_mci_post_req(struct mmc_host
*mmc
,
906 struct mmc_request
*mrq
,
909 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
910 struct mmc_data
*data
= mrq
->data
;
912 if (!slot
->host
->use_dma
|| !data
)
915 if (data
->host_cookie
!= COOKIE_UNMAPPED
)
916 dma_unmap_sg(slot
->host
->dev
,
919 dw_mci_get_dma_dir(data
));
920 data
->host_cookie
= COOKIE_UNMAPPED
;
923 static void dw_mci_adjust_fifoth(struct dw_mci
*host
, struct mmc_data
*data
)
925 unsigned int blksz
= data
->blksz
;
926 const u32 mszs
[] = {1, 4, 8, 16, 32, 64, 128, 256};
927 u32 fifo_width
= 1 << host
->data_shift
;
928 u32 blksz_depth
= blksz
/ fifo_width
, fifoth_val
;
929 u32 msize
= 0, rx_wmark
= 1, tx_wmark
, tx_wmark_invers
;
930 int idx
= ARRAY_SIZE(mszs
) - 1;
932 /* pio should ship this scenario */
936 tx_wmark
= (host
->fifo_depth
) / 2;
937 tx_wmark_invers
= host
->fifo_depth
- tx_wmark
;
941 * if blksz is not a multiple of the FIFO width
943 if (blksz
% fifo_width
)
947 if (!((blksz_depth
% mszs
[idx
]) ||
948 (tx_wmark_invers
% mszs
[idx
]))) {
950 rx_wmark
= mszs
[idx
] - 1;
955 * If idx is '0', it won't be tried
956 * Thus, initial values are uesed
959 fifoth_val
= SDMMC_SET_FIFOTH(msize
, rx_wmark
, tx_wmark
);
960 mci_writel(host
, FIFOTH
, fifoth_val
);
963 static void dw_mci_ctrl_thld(struct dw_mci
*host
, struct mmc_data
*data
)
965 unsigned int blksz
= data
->blksz
;
966 u32 blksz_depth
, fifo_depth
;
971 * CDTHRCTL doesn't exist prior to 240A (in fact that register offset is
972 * in the FIFO region, so we really shouldn't access it).
974 if (host
->verid
< DW_MMC_240A
||
975 (host
->verid
< DW_MMC_280A
&& data
->flags
& MMC_DATA_WRITE
))
979 * Card write Threshold is introduced since 2.80a
980 * It's used when HS400 mode is enabled.
982 if (data
->flags
& MMC_DATA_WRITE
&&
983 !(host
->timing
!= MMC_TIMING_MMC_HS400
))
986 if (data
->flags
& MMC_DATA_WRITE
)
987 enable
= SDMMC_CARD_WR_THR_EN
;
989 enable
= SDMMC_CARD_RD_THR_EN
;
991 if (host
->timing
!= MMC_TIMING_MMC_HS200
&&
992 host
->timing
!= MMC_TIMING_UHS_SDR104
)
995 blksz_depth
= blksz
/ (1 << host
->data_shift
);
996 fifo_depth
= host
->fifo_depth
;
998 if (blksz_depth
> fifo_depth
)
1002 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
1003 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
1004 * Currently just choose blksz.
1007 mci_writel(host
, CDTHRCTL
, SDMMC_SET_THLD(thld_size
, enable
));
1011 mci_writel(host
, CDTHRCTL
, 0);
1014 static int dw_mci_submit_data_dma(struct dw_mci
*host
, struct mmc_data
*data
)
1016 unsigned long irqflags
;
1020 host
->using_dma
= 0;
1022 /* If we don't have a channel, we can't do DMA */
1026 sg_len
= dw_mci_pre_dma_transfer(host
, data
, COOKIE_MAPPED
);
1028 host
->dma_ops
->stop(host
);
1032 host
->using_dma
= 1;
1034 if (host
->use_dma
== TRANS_MODE_IDMAC
)
1036 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
1037 (unsigned long)host
->sg_cpu
,
1038 (unsigned long)host
->sg_dma
,
1042 * Decide the MSIZE and RX/TX Watermark.
1043 * If current block size is same with previous size,
1044 * no need to update fifoth.
1046 if (host
->prev_blksz
!= data
->blksz
)
1047 dw_mci_adjust_fifoth(host
, data
);
1049 /* Enable the DMA interface */
1050 temp
= mci_readl(host
, CTRL
);
1051 temp
|= SDMMC_CTRL_DMA_ENABLE
;
1052 mci_writel(host
, CTRL
, temp
);
1054 /* Disable RX/TX IRQs, let DMA handle it */
1055 spin_lock_irqsave(&host
->irq_lock
, irqflags
);
1056 temp
= mci_readl(host
, INTMASK
);
1057 temp
&= ~(SDMMC_INT_RXDR
| SDMMC_INT_TXDR
);
1058 mci_writel(host
, INTMASK
, temp
);
1059 spin_unlock_irqrestore(&host
->irq_lock
, irqflags
);
1061 if (host
->dma_ops
->start(host
, sg_len
)) {
1062 host
->dma_ops
->stop(host
);
1063 /* We can't do DMA, try PIO for this one */
1065 "%s: fall back to PIO mode for current transfer\n",
1073 static void dw_mci_submit_data(struct dw_mci
*host
, struct mmc_data
*data
)
1075 unsigned long irqflags
;
1076 int flags
= SG_MITER_ATOMIC
;
1079 data
->error
= -EINPROGRESS
;
1081 WARN_ON(host
->data
);
1085 if (data
->flags
& MMC_DATA_READ
)
1086 host
->dir_status
= DW_MCI_RECV_STATUS
;
1088 host
->dir_status
= DW_MCI_SEND_STATUS
;
1090 dw_mci_ctrl_thld(host
, data
);
1092 if (dw_mci_submit_data_dma(host
, data
)) {
1093 if (host
->data
->flags
& MMC_DATA_READ
)
1094 flags
|= SG_MITER_TO_SG
;
1096 flags
|= SG_MITER_FROM_SG
;
1098 sg_miter_start(&host
->sg_miter
, data
->sg
, data
->sg_len
, flags
);
1099 host
->sg
= data
->sg
;
1100 host
->part_buf_start
= 0;
1101 host
->part_buf_count
= 0;
1103 mci_writel(host
, RINTSTS
, SDMMC_INT_TXDR
| SDMMC_INT_RXDR
);
1105 spin_lock_irqsave(&host
->irq_lock
, irqflags
);
1106 temp
= mci_readl(host
, INTMASK
);
1107 temp
|= SDMMC_INT_TXDR
| SDMMC_INT_RXDR
;
1108 mci_writel(host
, INTMASK
, temp
);
1109 spin_unlock_irqrestore(&host
->irq_lock
, irqflags
);
1111 temp
= mci_readl(host
, CTRL
);
1112 temp
&= ~SDMMC_CTRL_DMA_ENABLE
;
1113 mci_writel(host
, CTRL
, temp
);
1116 * Use the initial fifoth_val for PIO mode.
1117 * If next issued data may be transfered by DMA mode,
1118 * prev_blksz should be invalidated.
1120 mci_writel(host
, FIFOTH
, host
->fifoth_val
);
1121 host
->prev_blksz
= 0;
1124 * Keep the current block size.
1125 * It will be used to decide whether to update
1126 * fifoth register next time.
1128 host
->prev_blksz
= data
->blksz
;
1132 static void mci_send_cmd(struct dw_mci_slot
*slot
, u32 cmd
, u32 arg
)
1134 struct dw_mci
*host
= slot
->host
;
1135 unsigned long timeout
= jiffies
+ msecs_to_jiffies(500);
1136 unsigned int cmd_status
= 0;
1138 mci_writel(host
, CMDARG
, arg
);
1139 wmb(); /* drain writebuffer */
1140 dw_mci_wait_while_busy(host
, cmd
);
1141 mci_writel(host
, CMD
, SDMMC_CMD_START
| cmd
);
1143 while (time_before(jiffies
, timeout
)) {
1144 cmd_status
= mci_readl(host
, CMD
);
1145 if (!(cmd_status
& SDMMC_CMD_START
))
1148 dev_err(&slot
->mmc
->class_dev
,
1149 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
1150 cmd
, arg
, cmd_status
);
1153 static void dw_mci_setup_bus(struct dw_mci_slot
*slot
, bool force_clkinit
)
1155 struct dw_mci
*host
= slot
->host
;
1156 unsigned int clock
= slot
->clock
;
1159 u32 sdmmc_cmd_bits
= SDMMC_CMD_UPD_CLK
| SDMMC_CMD_PRV_DAT_WAIT
;
1161 /* We must continue to set bit 28 in CMD until the change is complete */
1162 if (host
->state
== STATE_WAITING_CMD11_DONE
)
1163 sdmmc_cmd_bits
|= SDMMC_CMD_VOLT_SWITCH
;
1166 mci_writel(host
, CLKENA
, 0);
1167 mci_send_cmd(slot
, sdmmc_cmd_bits
, 0);
1168 } else if (clock
!= host
->current_speed
|| force_clkinit
) {
1169 div
= host
->bus_hz
/ clock
;
1170 if (host
->bus_hz
% clock
&& host
->bus_hz
> clock
)
1172 * move the + 1 after the divide to prevent
1173 * over-clocking the card.
1177 div
= (host
->bus_hz
!= clock
) ? DIV_ROUND_UP(div
, 2) : 0;
1179 if ((clock
!= slot
->__clk_old
&&
1180 !test_bit(DW_MMC_CARD_NEEDS_POLL
, &slot
->flags
)) ||
1182 dev_info(&slot
->mmc
->class_dev
,
1183 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
1184 slot
->id
, host
->bus_hz
, clock
,
1185 div
? ((host
->bus_hz
/ div
) >> 1) :
1189 * If card is polling, display the message only
1190 * one time at boot time.
1192 if (slot
->mmc
->caps
& MMC_CAP_NEEDS_POLL
&&
1193 slot
->mmc
->f_min
== clock
)
1194 set_bit(DW_MMC_CARD_NEEDS_POLL
, &slot
->flags
);
1198 mci_writel(host
, CLKENA
, 0);
1199 mci_writel(host
, CLKSRC
, 0);
1202 mci_send_cmd(slot
, sdmmc_cmd_bits
, 0);
1204 /* set clock to desired speed */
1205 mci_writel(host
, CLKDIV
, div
);
1208 mci_send_cmd(slot
, sdmmc_cmd_bits
, 0);
1210 /* enable clock; only low power if no SDIO */
1211 clk_en_a
= SDMMC_CLKEN_ENABLE
<< slot
->id
;
1212 if (!test_bit(DW_MMC_CARD_NO_LOW_PWR
, &slot
->flags
))
1213 clk_en_a
|= SDMMC_CLKEN_LOW_PWR
<< slot
->id
;
1214 mci_writel(host
, CLKENA
, clk_en_a
);
1217 mci_send_cmd(slot
, sdmmc_cmd_bits
, 0);
1219 /* keep the last clock value that was requested from core */
1220 slot
->__clk_old
= clock
;
1223 host
->current_speed
= clock
;
1225 /* Set the current slot bus width */
1226 mci_writel(host
, CTYPE
, (slot
->ctype
<< slot
->id
));
1229 static void __dw_mci_start_request(struct dw_mci
*host
,
1230 struct dw_mci_slot
*slot
,
1231 struct mmc_command
*cmd
)
1233 struct mmc_request
*mrq
;
1234 struct mmc_data
*data
;
1239 host
->cur_slot
= slot
;
1242 host
->pending_events
= 0;
1243 host
->completed_events
= 0;
1244 host
->cmd_status
= 0;
1245 host
->data_status
= 0;
1246 host
->dir_status
= 0;
1250 mci_writel(host
, TMOUT
, 0xFFFFFFFF);
1251 mci_writel(host
, BYTCNT
, data
->blksz
*data
->blocks
);
1252 mci_writel(host
, BLKSIZ
, data
->blksz
);
1255 cmdflags
= dw_mci_prepare_command(slot
->mmc
, cmd
);
1257 /* this is the first command, send the initialization clock */
1258 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT
, &slot
->flags
))
1259 cmdflags
|= SDMMC_CMD_INIT
;
1262 dw_mci_submit_data(host
, data
);
1263 wmb(); /* drain writebuffer */
1266 dw_mci_start_command(host
, cmd
, cmdflags
);
1268 if (cmd
->opcode
== SD_SWITCH_VOLTAGE
) {
1269 unsigned long irqflags
;
1272 * Databook says to fail after 2ms w/ no response, but evidence
1273 * shows that sometimes the cmd11 interrupt takes over 130ms.
1274 * We'll set to 500ms, plus an extra jiffy just in case jiffies
1275 * is just about to roll over.
1277 * We do this whole thing under spinlock and only if the
1278 * command hasn't already completed (indicating the the irq
1279 * already ran so we don't want the timeout).
1281 spin_lock_irqsave(&host
->irq_lock
, irqflags
);
1282 if (!test_bit(EVENT_CMD_COMPLETE
, &host
->pending_events
))
1283 mod_timer(&host
->cmd11_timer
,
1284 jiffies
+ msecs_to_jiffies(500) + 1);
1285 spin_unlock_irqrestore(&host
->irq_lock
, irqflags
);
1288 host
->stop_cmdr
= dw_mci_prep_stop_abort(host
, cmd
);
1291 static void dw_mci_start_request(struct dw_mci
*host
,
1292 struct dw_mci_slot
*slot
)
1294 struct mmc_request
*mrq
= slot
->mrq
;
1295 struct mmc_command
*cmd
;
1297 cmd
= mrq
->sbc
? mrq
->sbc
: mrq
->cmd
;
1298 __dw_mci_start_request(host
, slot
, cmd
);
1301 /* must be called with host->lock held */
1302 static void dw_mci_queue_request(struct dw_mci
*host
, struct dw_mci_slot
*slot
,
1303 struct mmc_request
*mrq
)
1305 dev_vdbg(&slot
->mmc
->class_dev
, "queue request: state=%d\n",
1310 if (host
->state
== STATE_WAITING_CMD11_DONE
) {
1311 dev_warn(&slot
->mmc
->class_dev
,
1312 "Voltage change didn't complete\n");
1314 * this case isn't expected to happen, so we can
1315 * either crash here or just try to continue on
1316 * in the closest possible state
1318 host
->state
= STATE_IDLE
;
1321 if (host
->state
== STATE_IDLE
) {
1322 host
->state
= STATE_SENDING_CMD
;
1323 dw_mci_start_request(host
, slot
);
1325 list_add_tail(&slot
->queue_node
, &host
->queue
);
1329 static void dw_mci_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
1331 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
1332 struct dw_mci
*host
= slot
->host
;
1337 * The check for card presence and queueing of the request must be
1338 * atomic, otherwise the card could be removed in between and the
1339 * request wouldn't fail until another card was inserted.
1342 if (!dw_mci_get_cd(mmc
)) {
1343 mrq
->cmd
->error
= -ENOMEDIUM
;
1344 mmc_request_done(mmc
, mrq
);
1348 spin_lock_bh(&host
->lock
);
1350 dw_mci_queue_request(host
, slot
, mrq
);
1352 spin_unlock_bh(&host
->lock
);
1355 static void dw_mci_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
1357 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
1358 const struct dw_mci_drv_data
*drv_data
= slot
->host
->drv_data
;
1362 switch (ios
->bus_width
) {
1363 case MMC_BUS_WIDTH_4
:
1364 slot
->ctype
= SDMMC_CTYPE_4BIT
;
1366 case MMC_BUS_WIDTH_8
:
1367 slot
->ctype
= SDMMC_CTYPE_8BIT
;
1370 /* set default 1 bit mode */
1371 slot
->ctype
= SDMMC_CTYPE_1BIT
;
1374 regs
= mci_readl(slot
->host
, UHS_REG
);
1377 if (ios
->timing
== MMC_TIMING_MMC_DDR52
||
1378 ios
->timing
== MMC_TIMING_UHS_DDR50
||
1379 ios
->timing
== MMC_TIMING_MMC_HS400
)
1380 regs
|= ((0x1 << slot
->id
) << 16);
1382 regs
&= ~((0x1 << slot
->id
) << 16);
1384 mci_writel(slot
->host
, UHS_REG
, regs
);
1385 slot
->host
->timing
= ios
->timing
;
1388 * Use mirror of ios->clock to prevent race with mmc
1389 * core ios update when finding the minimum.
1391 slot
->clock
= ios
->clock
;
1393 if (drv_data
&& drv_data
->set_ios
)
1394 drv_data
->set_ios(slot
->host
, ios
);
1396 switch (ios
->power_mode
) {
1398 if (!IS_ERR(mmc
->supply
.vmmc
)) {
1399 ret
= mmc_regulator_set_ocr(mmc
, mmc
->supply
.vmmc
,
1402 dev_err(slot
->host
->dev
,
1403 "failed to enable vmmc regulator\n");
1404 /*return, if failed turn on vmmc*/
1408 set_bit(DW_MMC_CARD_NEED_INIT
, &slot
->flags
);
1409 regs
= mci_readl(slot
->host
, PWREN
);
1410 regs
|= (1 << slot
->id
);
1411 mci_writel(slot
->host
, PWREN
, regs
);
1414 if (!slot
->host
->vqmmc_enabled
) {
1415 if (!IS_ERR(mmc
->supply
.vqmmc
)) {
1416 ret
= regulator_enable(mmc
->supply
.vqmmc
);
1418 dev_err(slot
->host
->dev
,
1419 "failed to enable vqmmc\n");
1421 slot
->host
->vqmmc_enabled
= true;
1424 /* Keep track so we don't reset again */
1425 slot
->host
->vqmmc_enabled
= true;
1428 /* Reset our state machine after powering on */
1429 dw_mci_ctrl_reset(slot
->host
,
1430 SDMMC_CTRL_ALL_RESET_FLAGS
);
1433 /* Adjust clock / bus width after power is up */
1434 dw_mci_setup_bus(slot
, false);
1438 /* Turn clock off before power goes down */
1439 dw_mci_setup_bus(slot
, false);
1441 if (!IS_ERR(mmc
->supply
.vmmc
))
1442 mmc_regulator_set_ocr(mmc
, mmc
->supply
.vmmc
, 0);
1444 if (!IS_ERR(mmc
->supply
.vqmmc
) && slot
->host
->vqmmc_enabled
)
1445 regulator_disable(mmc
->supply
.vqmmc
);
1446 slot
->host
->vqmmc_enabled
= false;
1448 regs
= mci_readl(slot
->host
, PWREN
);
1449 regs
&= ~(1 << slot
->id
);
1450 mci_writel(slot
->host
, PWREN
, regs
);
1456 if (slot
->host
->state
== STATE_WAITING_CMD11_DONE
&& ios
->clock
!= 0)
1457 slot
->host
->state
= STATE_IDLE
;
1460 static int dw_mci_card_busy(struct mmc_host
*mmc
)
1462 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
1466 * Check the busy bit which is low when DAT[3:0]
1467 * (the data lines) are 0000
1469 status
= mci_readl(slot
->host
, STATUS
);
1471 return !!(status
& SDMMC_STATUS_BUSY
);
1474 static int dw_mci_switch_voltage(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
1476 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
1477 struct dw_mci
*host
= slot
->host
;
1478 const struct dw_mci_drv_data
*drv_data
= host
->drv_data
;
1480 u32 v18
= SDMMC_UHS_18V
<< slot
->id
;
1483 if (drv_data
&& drv_data
->switch_voltage
)
1484 return drv_data
->switch_voltage(mmc
, ios
);
1487 * Program the voltage. Note that some instances of dw_mmc may use
1488 * the UHS_REG for this. For other instances (like exynos) the UHS_REG
1489 * does no harm but you need to set the regulator directly. Try both.
1491 uhs
= mci_readl(host
, UHS_REG
);
1492 if (ios
->signal_voltage
== MMC_SIGNAL_VOLTAGE_330
)
1497 if (!IS_ERR(mmc
->supply
.vqmmc
)) {
1498 ret
= mmc_regulator_set_vqmmc(mmc
, ios
);
1501 dev_dbg(&mmc
->class_dev
,
1502 "Regulator set error %d - %s V\n",
1503 ret
, uhs
& v18
? "1.8" : "3.3");
1507 mci_writel(host
, UHS_REG
, uhs
);
1512 static int dw_mci_get_ro(struct mmc_host
*mmc
)
1515 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
1516 int gpio_ro
= mmc_gpio_get_ro(mmc
);
1518 /* Use platform get_ro function, else try on board write protect */
1520 read_only
= gpio_ro
;
1523 mci_readl(slot
->host
, WRTPRT
) & (1 << slot
->id
) ? 1 : 0;
1525 dev_dbg(&mmc
->class_dev
, "card is %s\n",
1526 read_only
? "read-only" : "read-write");
1531 static int dw_mci_get_cd(struct mmc_host
*mmc
)
1534 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
1535 struct dw_mci
*host
= slot
->host
;
1536 int gpio_cd
= mmc_gpio_get_cd(mmc
);
1538 /* Use platform get_cd function, else try onboard card detect */
1539 if (((mmc
->caps
& MMC_CAP_NEEDS_POLL
)
1540 || !mmc_card_is_removable(mmc
))) {
1543 if (!test_bit(DW_MMC_CARD_PRESENT
, &slot
->flags
)) {
1544 if (mmc
->caps
& MMC_CAP_NEEDS_POLL
) {
1545 dev_info(&mmc
->class_dev
,
1546 "card is polling.\n");
1548 dev_info(&mmc
->class_dev
,
1549 "card is non-removable.\n");
1551 set_bit(DW_MMC_CARD_PRESENT
, &slot
->flags
);
1555 } else if (gpio_cd
>= 0)
1558 present
= (mci_readl(slot
->host
, CDETECT
) & (1 << slot
->id
))
1561 spin_lock_bh(&host
->lock
);
1562 if (present
&& !test_and_set_bit(DW_MMC_CARD_PRESENT
, &slot
->flags
))
1563 dev_dbg(&mmc
->class_dev
, "card is present\n");
1564 else if (!present
&&
1565 !test_and_clear_bit(DW_MMC_CARD_PRESENT
, &slot
->flags
))
1566 dev_dbg(&mmc
->class_dev
, "card is not present\n");
1567 spin_unlock_bh(&host
->lock
);
1572 static void dw_mci_hw_reset(struct mmc_host
*mmc
)
1574 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
1575 struct dw_mci
*host
= slot
->host
;
1578 if (host
->use_dma
== TRANS_MODE_IDMAC
)
1579 dw_mci_idmac_reset(host
);
1581 if (!dw_mci_ctrl_reset(host
, SDMMC_CTRL_DMA_RESET
|
1582 SDMMC_CTRL_FIFO_RESET
))
1586 * According to eMMC spec, card reset procedure:
1587 * tRstW >= 1us: RST_n pulse width
1588 * tRSCA >= 200us: RST_n to Command time
1589 * tRSTH >= 1us: RST_n high period
1591 reset
= mci_readl(host
, RST_N
);
1592 reset
&= ~(SDMMC_RST_HWACTIVE
<< slot
->id
);
1593 mci_writel(host
, RST_N
, reset
);
1595 reset
|= SDMMC_RST_HWACTIVE
<< slot
->id
;
1596 mci_writel(host
, RST_N
, reset
);
1597 usleep_range(200, 300);
1600 static void dw_mci_init_card(struct mmc_host
*mmc
, struct mmc_card
*card
)
1602 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
1603 struct dw_mci
*host
= slot
->host
;
1606 * Low power mode will stop the card clock when idle. According to the
1607 * description of the CLKENA register we should disable low power mode
1608 * for SDIO cards if we need SDIO interrupts to work.
1610 if (mmc
->caps
& MMC_CAP_SDIO_IRQ
) {
1611 const u32 clken_low_pwr
= SDMMC_CLKEN_LOW_PWR
<< slot
->id
;
1615 clk_en_a_old
= mci_readl(host
, CLKENA
);
1617 if (card
->type
== MMC_TYPE_SDIO
||
1618 card
->type
== MMC_TYPE_SD_COMBO
) {
1619 set_bit(DW_MMC_CARD_NO_LOW_PWR
, &slot
->flags
);
1620 clk_en_a
= clk_en_a_old
& ~clken_low_pwr
;
1622 clear_bit(DW_MMC_CARD_NO_LOW_PWR
, &slot
->flags
);
1623 clk_en_a
= clk_en_a_old
| clken_low_pwr
;
1626 if (clk_en_a
!= clk_en_a_old
) {
1627 mci_writel(host
, CLKENA
, clk_en_a
);
1628 mci_send_cmd(slot
, SDMMC_CMD_UPD_CLK
|
1629 SDMMC_CMD_PRV_DAT_WAIT
, 0);
1634 static void dw_mci_enable_sdio_irq(struct mmc_host
*mmc
, int enb
)
1636 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
1637 struct dw_mci
*host
= slot
->host
;
1638 unsigned long irqflags
;
1641 spin_lock_irqsave(&host
->irq_lock
, irqflags
);
1643 /* Enable/disable Slot Specific SDIO interrupt */
1644 int_mask
= mci_readl(host
, INTMASK
);
1646 int_mask
|= SDMMC_INT_SDIO(slot
->sdio_id
);
1648 int_mask
&= ~SDMMC_INT_SDIO(slot
->sdio_id
);
1649 mci_writel(host
, INTMASK
, int_mask
);
1651 spin_unlock_irqrestore(&host
->irq_lock
, irqflags
);
1654 static int dw_mci_execute_tuning(struct mmc_host
*mmc
, u32 opcode
)
1656 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
1657 struct dw_mci
*host
= slot
->host
;
1658 const struct dw_mci_drv_data
*drv_data
= host
->drv_data
;
1661 if (drv_data
&& drv_data
->execute_tuning
)
1662 err
= drv_data
->execute_tuning(slot
, opcode
);
1666 static int dw_mci_prepare_hs400_tuning(struct mmc_host
*mmc
,
1667 struct mmc_ios
*ios
)
1669 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
1670 struct dw_mci
*host
= slot
->host
;
1671 const struct dw_mci_drv_data
*drv_data
= host
->drv_data
;
1673 if (drv_data
&& drv_data
->prepare_hs400_tuning
)
1674 return drv_data
->prepare_hs400_tuning(host
, ios
);
1679 static const struct mmc_host_ops dw_mci_ops
= {
1680 .request
= dw_mci_request
,
1681 .pre_req
= dw_mci_pre_req
,
1682 .post_req
= dw_mci_post_req
,
1683 .set_ios
= dw_mci_set_ios
,
1684 .get_ro
= dw_mci_get_ro
,
1685 .get_cd
= dw_mci_get_cd
,
1686 .hw_reset
= dw_mci_hw_reset
,
1687 .enable_sdio_irq
= dw_mci_enable_sdio_irq
,
1688 .execute_tuning
= dw_mci_execute_tuning
,
1689 .card_busy
= dw_mci_card_busy
,
1690 .start_signal_voltage_switch
= dw_mci_switch_voltage
,
1691 .init_card
= dw_mci_init_card
,
1692 .prepare_hs400_tuning
= dw_mci_prepare_hs400_tuning
,
1695 static void dw_mci_request_end(struct dw_mci
*host
, struct mmc_request
*mrq
)
1696 __releases(&host
->lock
)
1697 __acquires(&host
->lock
)
1699 struct dw_mci_slot
*slot
;
1700 struct mmc_host
*prev_mmc
= host
->cur_slot
->mmc
;
1702 WARN_ON(host
->cmd
|| host
->data
);
1704 host
->cur_slot
->mrq
= NULL
;
1706 if (!list_empty(&host
->queue
)) {
1707 slot
= list_entry(host
->queue
.next
,
1708 struct dw_mci_slot
, queue_node
);
1709 list_del(&slot
->queue_node
);
1710 dev_vdbg(host
->dev
, "list not empty: %s is next\n",
1711 mmc_hostname(slot
->mmc
));
1712 host
->state
= STATE_SENDING_CMD
;
1713 dw_mci_start_request(host
, slot
);
1715 dev_vdbg(host
->dev
, "list empty\n");
1717 if (host
->state
== STATE_SENDING_CMD11
)
1718 host
->state
= STATE_WAITING_CMD11_DONE
;
1720 host
->state
= STATE_IDLE
;
1723 spin_unlock(&host
->lock
);
1724 mmc_request_done(prev_mmc
, mrq
);
1725 spin_lock(&host
->lock
);
1728 static int dw_mci_command_complete(struct dw_mci
*host
, struct mmc_command
*cmd
)
1730 u32 status
= host
->cmd_status
;
1732 host
->cmd_status
= 0;
1734 /* Read the response from the card (up to 16 bytes) */
1735 if (cmd
->flags
& MMC_RSP_PRESENT
) {
1736 if (cmd
->flags
& MMC_RSP_136
) {
1737 cmd
->resp
[3] = mci_readl(host
, RESP0
);
1738 cmd
->resp
[2] = mci_readl(host
, RESP1
);
1739 cmd
->resp
[1] = mci_readl(host
, RESP2
);
1740 cmd
->resp
[0] = mci_readl(host
, RESP3
);
1742 cmd
->resp
[0] = mci_readl(host
, RESP0
);
1749 if (status
& SDMMC_INT_RTO
)
1750 cmd
->error
= -ETIMEDOUT
;
1751 else if ((cmd
->flags
& MMC_RSP_CRC
) && (status
& SDMMC_INT_RCRC
))
1752 cmd
->error
= -EILSEQ
;
1753 else if (status
& SDMMC_INT_RESP_ERR
)
1761 static int dw_mci_data_complete(struct dw_mci
*host
, struct mmc_data
*data
)
1763 u32 status
= host
->data_status
;
1765 if (status
& DW_MCI_DATA_ERROR_FLAGS
) {
1766 if (status
& SDMMC_INT_DRTO
) {
1767 data
->error
= -ETIMEDOUT
;
1768 } else if (status
& SDMMC_INT_DCRC
) {
1769 data
->error
= -EILSEQ
;
1770 } else if (status
& SDMMC_INT_EBE
) {
1771 if (host
->dir_status
==
1772 DW_MCI_SEND_STATUS
) {
1774 * No data CRC status was returned.
1775 * The number of bytes transferred
1776 * will be exaggerated in PIO mode.
1778 data
->bytes_xfered
= 0;
1779 data
->error
= -ETIMEDOUT
;
1780 } else if (host
->dir_status
==
1781 DW_MCI_RECV_STATUS
) {
1782 data
->error
= -EILSEQ
;
1785 /* SDMMC_INT_SBE is included */
1786 data
->error
= -EILSEQ
;
1789 dev_dbg(host
->dev
, "data error, status 0x%08x\n", status
);
1792 * After an error, there may be data lingering
1797 data
->bytes_xfered
= data
->blocks
* data
->blksz
;
1804 static void dw_mci_set_drto(struct dw_mci
*host
)
1806 unsigned int drto_clks
;
1807 unsigned int drto_ms
;
1809 drto_clks
= mci_readl(host
, TMOUT
) >> 8;
1810 drto_ms
= DIV_ROUND_UP(drto_clks
, host
->bus_hz
/ 1000);
1812 /* add a bit spare time */
1815 mod_timer(&host
->dto_timer
, jiffies
+ msecs_to_jiffies(drto_ms
));
1818 static void dw_mci_tasklet_func(unsigned long priv
)
1820 struct dw_mci
*host
= (struct dw_mci
*)priv
;
1821 struct mmc_data
*data
;
1822 struct mmc_command
*cmd
;
1823 struct mmc_request
*mrq
;
1824 enum dw_mci_state state
;
1825 enum dw_mci_state prev_state
;
1828 spin_lock(&host
->lock
);
1830 state
= host
->state
;
1839 case STATE_WAITING_CMD11_DONE
:
1842 case STATE_SENDING_CMD11
:
1843 case STATE_SENDING_CMD
:
1844 if (!test_and_clear_bit(EVENT_CMD_COMPLETE
,
1845 &host
->pending_events
))
1850 set_bit(EVENT_CMD_COMPLETE
, &host
->completed_events
);
1851 err
= dw_mci_command_complete(host
, cmd
);
1852 if (cmd
== mrq
->sbc
&& !err
) {
1853 prev_state
= state
= STATE_SENDING_CMD
;
1854 __dw_mci_start_request(host
, host
->cur_slot
,
1859 if (cmd
->data
&& err
) {
1861 * During UHS tuning sequence, sending the stop
1862 * command after the response CRC error would
1863 * throw the system into a confused state
1864 * causing all future tuning phases to report
1867 * In such case controller will move into a data
1868 * transfer state after a response error or
1869 * response CRC error. Let's let that finish
1870 * before trying to send a stop, so we'll go to
1871 * STATE_SENDING_DATA.
1873 * Although letting the data transfer take place
1874 * will waste a bit of time (we already know
1875 * the command was bad), it can't cause any
1876 * errors since it's possible it would have
1877 * taken place anyway if this tasklet got
1878 * delayed. Allowing the transfer to take place
1879 * avoids races and keeps things simple.
1881 if ((err
!= -ETIMEDOUT
) &&
1882 (cmd
->opcode
== MMC_SEND_TUNING_BLOCK
)) {
1883 state
= STATE_SENDING_DATA
;
1887 dw_mci_stop_dma(host
);
1888 send_stop_abort(host
, data
);
1889 state
= STATE_SENDING_STOP
;
1893 if (!cmd
->data
|| err
) {
1894 dw_mci_request_end(host
, mrq
);
1898 prev_state
= state
= STATE_SENDING_DATA
;
1901 case STATE_SENDING_DATA
:
1903 * We could get a data error and never a transfer
1904 * complete so we'd better check for it here.
1906 * Note that we don't really care if we also got a
1907 * transfer complete; stopping the DMA and sending an
1910 if (test_and_clear_bit(EVENT_DATA_ERROR
,
1911 &host
->pending_events
)) {
1912 dw_mci_stop_dma(host
);
1913 if (!(host
->data_status
& (SDMMC_INT_DRTO
|
1915 send_stop_abort(host
, data
);
1916 state
= STATE_DATA_ERROR
;
1920 if (!test_and_clear_bit(EVENT_XFER_COMPLETE
,
1921 &host
->pending_events
)) {
1923 * If all data-related interrupts don't come
1924 * within the given time in reading data state.
1926 if (host
->dir_status
== DW_MCI_RECV_STATUS
)
1927 dw_mci_set_drto(host
);
1931 set_bit(EVENT_XFER_COMPLETE
, &host
->completed_events
);
1934 * Handle an EVENT_DATA_ERROR that might have shown up
1935 * before the transfer completed. This might not have
1936 * been caught by the check above because the interrupt
1937 * could have gone off between the previous check and
1938 * the check for transfer complete.
1940 * Technically this ought not be needed assuming we
1941 * get a DATA_COMPLETE eventually (we'll notice the
1942 * error and end the request), but it shouldn't hurt.
1944 * This has the advantage of sending the stop command.
1946 if (test_and_clear_bit(EVENT_DATA_ERROR
,
1947 &host
->pending_events
)) {
1948 dw_mci_stop_dma(host
);
1949 if (!(host
->data_status
& (SDMMC_INT_DRTO
|
1951 send_stop_abort(host
, data
);
1952 state
= STATE_DATA_ERROR
;
1955 prev_state
= state
= STATE_DATA_BUSY
;
1959 case STATE_DATA_BUSY
:
1960 if (!test_and_clear_bit(EVENT_DATA_COMPLETE
,
1961 &host
->pending_events
)) {
1963 * If data error interrupt comes but data over
1964 * interrupt doesn't come within the given time.
1965 * in reading data state.
1967 if (host
->dir_status
== DW_MCI_RECV_STATUS
)
1968 dw_mci_set_drto(host
);
1973 set_bit(EVENT_DATA_COMPLETE
, &host
->completed_events
);
1974 err
= dw_mci_data_complete(host
, data
);
1977 if (!data
->stop
|| mrq
->sbc
) {
1978 if (mrq
->sbc
&& data
->stop
)
1979 data
->stop
->error
= 0;
1980 dw_mci_request_end(host
, mrq
);
1984 /* stop command for open-ended transfer*/
1986 send_stop_abort(host
, data
);
1989 * If we don't have a command complete now we'll
1990 * never get one since we just reset everything;
1991 * better end the request.
1993 * If we do have a command complete we'll fall
1994 * through to the SENDING_STOP command and
1995 * everything will be peachy keen.
1997 if (!test_bit(EVENT_CMD_COMPLETE
,
1998 &host
->pending_events
)) {
2000 dw_mci_request_end(host
, mrq
);
2006 * If err has non-zero,
2007 * stop-abort command has been already issued.
2009 prev_state
= state
= STATE_SENDING_STOP
;
2013 case STATE_SENDING_STOP
:
2014 if (!test_and_clear_bit(EVENT_CMD_COMPLETE
,
2015 &host
->pending_events
))
2018 /* CMD error in data command */
2019 if (mrq
->cmd
->error
&& mrq
->data
)
2025 if (!mrq
->sbc
&& mrq
->stop
)
2026 dw_mci_command_complete(host
, mrq
->stop
);
2028 host
->cmd_status
= 0;
2030 dw_mci_request_end(host
, mrq
);
2033 case STATE_DATA_ERROR
:
2034 if (!test_and_clear_bit(EVENT_XFER_COMPLETE
,
2035 &host
->pending_events
))
2038 state
= STATE_DATA_BUSY
;
2041 } while (state
!= prev_state
);
2043 host
->state
= state
;
2045 spin_unlock(&host
->lock
);
2049 /* push final bytes to part_buf, only use during push */
2050 static void dw_mci_set_part_bytes(struct dw_mci
*host
, void *buf
, int cnt
)
2052 memcpy((void *)&host
->part_buf
, buf
, cnt
);
2053 host
->part_buf_count
= cnt
;
2056 /* append bytes to part_buf, only use during push */
2057 static int dw_mci_push_part_bytes(struct dw_mci
*host
, void *buf
, int cnt
)
2059 cnt
= min(cnt
, (1 << host
->data_shift
) - host
->part_buf_count
);
2060 memcpy((void *)&host
->part_buf
+ host
->part_buf_count
, buf
, cnt
);
2061 host
->part_buf_count
+= cnt
;
2065 /* pull first bytes from part_buf, only use during pull */
2066 static int dw_mci_pull_part_bytes(struct dw_mci
*host
, void *buf
, int cnt
)
2068 cnt
= min_t(int, cnt
, host
->part_buf_count
);
2070 memcpy(buf
, (void *)&host
->part_buf
+ host
->part_buf_start
,
2072 host
->part_buf_count
-= cnt
;
2073 host
->part_buf_start
+= cnt
;
2078 /* pull final bytes from the part_buf, assuming it's just been filled */
2079 static void dw_mci_pull_final_bytes(struct dw_mci
*host
, void *buf
, int cnt
)
2081 memcpy(buf
, &host
->part_buf
, cnt
);
2082 host
->part_buf_start
= cnt
;
2083 host
->part_buf_count
= (1 << host
->data_shift
) - cnt
;
2086 static void dw_mci_push_data16(struct dw_mci
*host
, void *buf
, int cnt
)
2088 struct mmc_data
*data
= host
->data
;
2091 /* try and push anything in the part_buf */
2092 if (unlikely(host
->part_buf_count
)) {
2093 int len
= dw_mci_push_part_bytes(host
, buf
, cnt
);
2097 if (host
->part_buf_count
== 2) {
2098 mci_fifo_writew(host
->fifo_reg
, host
->part_buf16
);
2099 host
->part_buf_count
= 0;
2102 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2103 if (unlikely((unsigned long)buf
& 0x1)) {
2105 u16 aligned_buf
[64];
2106 int len
= min(cnt
& -2, (int)sizeof(aligned_buf
));
2107 int items
= len
>> 1;
2109 /* memcpy from input buffer into aligned buffer */
2110 memcpy(aligned_buf
, buf
, len
);
2113 /* push data from aligned buffer into fifo */
2114 for (i
= 0; i
< items
; ++i
)
2115 mci_fifo_writew(host
->fifo_reg
, aligned_buf
[i
]);
2122 for (; cnt
>= 2; cnt
-= 2)
2123 mci_fifo_writew(host
->fifo_reg
, *pdata
++);
2126 /* put anything remaining in the part_buf */
2128 dw_mci_set_part_bytes(host
, buf
, cnt
);
2129 /* Push data if we have reached the expected data length */
2130 if ((data
->bytes_xfered
+ init_cnt
) ==
2131 (data
->blksz
* data
->blocks
))
2132 mci_fifo_writew(host
->fifo_reg
, host
->part_buf16
);
2136 static void dw_mci_pull_data16(struct dw_mci
*host
, void *buf
, int cnt
)
2138 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2139 if (unlikely((unsigned long)buf
& 0x1)) {
2141 /* pull data from fifo into aligned buffer */
2142 u16 aligned_buf
[64];
2143 int len
= min(cnt
& -2, (int)sizeof(aligned_buf
));
2144 int items
= len
>> 1;
2147 for (i
= 0; i
< items
; ++i
)
2148 aligned_buf
[i
] = mci_fifo_readw(host
->fifo_reg
);
2149 /* memcpy from aligned buffer into output buffer */
2150 memcpy(buf
, aligned_buf
, len
);
2159 for (; cnt
>= 2; cnt
-= 2)
2160 *pdata
++ = mci_fifo_readw(host
->fifo_reg
);
2164 host
->part_buf16
= mci_fifo_readw(host
->fifo_reg
);
2165 dw_mci_pull_final_bytes(host
, buf
, cnt
);
2169 static void dw_mci_push_data32(struct dw_mci
*host
, void *buf
, int cnt
)
2171 struct mmc_data
*data
= host
->data
;
2174 /* try and push anything in the part_buf */
2175 if (unlikely(host
->part_buf_count
)) {
2176 int len
= dw_mci_push_part_bytes(host
, buf
, cnt
);
2180 if (host
->part_buf_count
== 4) {
2181 mci_fifo_writel(host
->fifo_reg
, host
->part_buf32
);
2182 host
->part_buf_count
= 0;
2185 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2186 if (unlikely((unsigned long)buf
& 0x3)) {
2188 u32 aligned_buf
[32];
2189 int len
= min(cnt
& -4, (int)sizeof(aligned_buf
));
2190 int items
= len
>> 2;
2192 /* memcpy from input buffer into aligned buffer */
2193 memcpy(aligned_buf
, buf
, len
);
2196 /* push data from aligned buffer into fifo */
2197 for (i
= 0; i
< items
; ++i
)
2198 mci_fifo_writel(host
->fifo_reg
, aligned_buf
[i
]);
2205 for (; cnt
>= 4; cnt
-= 4)
2206 mci_fifo_writel(host
->fifo_reg
, *pdata
++);
2209 /* put anything remaining in the part_buf */
2211 dw_mci_set_part_bytes(host
, buf
, cnt
);
2212 /* Push data if we have reached the expected data length */
2213 if ((data
->bytes_xfered
+ init_cnt
) ==
2214 (data
->blksz
* data
->blocks
))
2215 mci_fifo_writel(host
->fifo_reg
, host
->part_buf32
);
2219 static void dw_mci_pull_data32(struct dw_mci
*host
, void *buf
, int cnt
)
2221 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2222 if (unlikely((unsigned long)buf
& 0x3)) {
2224 /* pull data from fifo into aligned buffer */
2225 u32 aligned_buf
[32];
2226 int len
= min(cnt
& -4, (int)sizeof(aligned_buf
));
2227 int items
= len
>> 2;
2230 for (i
= 0; i
< items
; ++i
)
2231 aligned_buf
[i
] = mci_fifo_readl(host
->fifo_reg
);
2232 /* memcpy from aligned buffer into output buffer */
2233 memcpy(buf
, aligned_buf
, len
);
2242 for (; cnt
>= 4; cnt
-= 4)
2243 *pdata
++ = mci_fifo_readl(host
->fifo_reg
);
2247 host
->part_buf32
= mci_fifo_readl(host
->fifo_reg
);
2248 dw_mci_pull_final_bytes(host
, buf
, cnt
);
2252 static void dw_mci_push_data64(struct dw_mci
*host
, void *buf
, int cnt
)
2254 struct mmc_data
*data
= host
->data
;
2257 /* try and push anything in the part_buf */
2258 if (unlikely(host
->part_buf_count
)) {
2259 int len
= dw_mci_push_part_bytes(host
, buf
, cnt
);
2264 if (host
->part_buf_count
== 8) {
2265 mci_fifo_writeq(host
->fifo_reg
, host
->part_buf
);
2266 host
->part_buf_count
= 0;
2269 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2270 if (unlikely((unsigned long)buf
& 0x7)) {
2272 u64 aligned_buf
[16];
2273 int len
= min(cnt
& -8, (int)sizeof(aligned_buf
));
2274 int items
= len
>> 3;
2276 /* memcpy from input buffer into aligned buffer */
2277 memcpy(aligned_buf
, buf
, len
);
2280 /* push data from aligned buffer into fifo */
2281 for (i
= 0; i
< items
; ++i
)
2282 mci_fifo_writeq(host
->fifo_reg
, aligned_buf
[i
]);
2289 for (; cnt
>= 8; cnt
-= 8)
2290 mci_fifo_writeq(host
->fifo_reg
, *pdata
++);
2293 /* put anything remaining in the part_buf */
2295 dw_mci_set_part_bytes(host
, buf
, cnt
);
2296 /* Push data if we have reached the expected data length */
2297 if ((data
->bytes_xfered
+ init_cnt
) ==
2298 (data
->blksz
* data
->blocks
))
2299 mci_fifo_writeq(host
->fifo_reg
, host
->part_buf
);
2303 static void dw_mci_pull_data64(struct dw_mci
*host
, void *buf
, int cnt
)
2305 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2306 if (unlikely((unsigned long)buf
& 0x7)) {
2308 /* pull data from fifo into aligned buffer */
2309 u64 aligned_buf
[16];
2310 int len
= min(cnt
& -8, (int)sizeof(aligned_buf
));
2311 int items
= len
>> 3;
2314 for (i
= 0; i
< items
; ++i
)
2315 aligned_buf
[i
] = mci_fifo_readq(host
->fifo_reg
);
2317 /* memcpy from aligned buffer into output buffer */
2318 memcpy(buf
, aligned_buf
, len
);
2327 for (; cnt
>= 8; cnt
-= 8)
2328 *pdata
++ = mci_fifo_readq(host
->fifo_reg
);
2332 host
->part_buf
= mci_fifo_readq(host
->fifo_reg
);
2333 dw_mci_pull_final_bytes(host
, buf
, cnt
);
2337 static void dw_mci_pull_data(struct dw_mci
*host
, void *buf
, int cnt
)
2341 /* get remaining partial bytes */
2342 len
= dw_mci_pull_part_bytes(host
, buf
, cnt
);
2343 if (unlikely(len
== cnt
))
2348 /* get the rest of the data */
2349 host
->pull_data(host
, buf
, cnt
);
2352 static void dw_mci_read_data_pio(struct dw_mci
*host
, bool dto
)
2354 struct sg_mapping_iter
*sg_miter
= &host
->sg_miter
;
2356 unsigned int offset
;
2357 struct mmc_data
*data
= host
->data
;
2358 int shift
= host
->data_shift
;
2361 unsigned int remain
, fcnt
;
2364 if (!sg_miter_next(sg_miter
))
2367 host
->sg
= sg_miter
->piter
.sg
;
2368 buf
= sg_miter
->addr
;
2369 remain
= sg_miter
->length
;
2373 fcnt
= (SDMMC_GET_FCNT(mci_readl(host
, STATUS
))
2374 << shift
) + host
->part_buf_count
;
2375 len
= min(remain
, fcnt
);
2378 dw_mci_pull_data(host
, (void *)(buf
+ offset
), len
);
2379 data
->bytes_xfered
+= len
;
2384 sg_miter
->consumed
= offset
;
2385 status
= mci_readl(host
, MINTSTS
);
2386 mci_writel(host
, RINTSTS
, SDMMC_INT_RXDR
);
2387 /* if the RXDR is ready read again */
2388 } while ((status
& SDMMC_INT_RXDR
) ||
2389 (dto
&& SDMMC_GET_FCNT(mci_readl(host
, STATUS
))));
2392 if (!sg_miter_next(sg_miter
))
2394 sg_miter
->consumed
= 0;
2396 sg_miter_stop(sg_miter
);
2400 sg_miter_stop(sg_miter
);
2402 smp_wmb(); /* drain writebuffer */
2403 set_bit(EVENT_XFER_COMPLETE
, &host
->pending_events
);
2406 static void dw_mci_write_data_pio(struct dw_mci
*host
)
2408 struct sg_mapping_iter
*sg_miter
= &host
->sg_miter
;
2410 unsigned int offset
;
2411 struct mmc_data
*data
= host
->data
;
2412 int shift
= host
->data_shift
;
2415 unsigned int fifo_depth
= host
->fifo_depth
;
2416 unsigned int remain
, fcnt
;
2419 if (!sg_miter_next(sg_miter
))
2422 host
->sg
= sg_miter
->piter
.sg
;
2423 buf
= sg_miter
->addr
;
2424 remain
= sg_miter
->length
;
2428 fcnt
= ((fifo_depth
-
2429 SDMMC_GET_FCNT(mci_readl(host
, STATUS
)))
2430 << shift
) - host
->part_buf_count
;
2431 len
= min(remain
, fcnt
);
2434 host
->push_data(host
, (void *)(buf
+ offset
), len
);
2435 data
->bytes_xfered
+= len
;
2440 sg_miter
->consumed
= offset
;
2441 status
= mci_readl(host
, MINTSTS
);
2442 mci_writel(host
, RINTSTS
, SDMMC_INT_TXDR
);
2443 } while (status
& SDMMC_INT_TXDR
); /* if TXDR write again */
2446 if (!sg_miter_next(sg_miter
))
2448 sg_miter
->consumed
= 0;
2450 sg_miter_stop(sg_miter
);
2454 sg_miter_stop(sg_miter
);
2456 smp_wmb(); /* drain writebuffer */
2457 set_bit(EVENT_XFER_COMPLETE
, &host
->pending_events
);
2460 static void dw_mci_cmd_interrupt(struct dw_mci
*host
, u32 status
)
2462 if (!host
->cmd_status
)
2463 host
->cmd_status
= status
;
2465 smp_wmb(); /* drain writebuffer */
2467 set_bit(EVENT_CMD_COMPLETE
, &host
->pending_events
);
2468 tasklet_schedule(&host
->tasklet
);
2471 static void dw_mci_handle_cd(struct dw_mci
*host
)
2475 for (i
= 0; i
< host
->num_slots
; i
++) {
2476 struct dw_mci_slot
*slot
= host
->slot
[i
];
2481 if (slot
->mmc
->ops
->card_event
)
2482 slot
->mmc
->ops
->card_event(slot
->mmc
);
2483 mmc_detect_change(slot
->mmc
,
2484 msecs_to_jiffies(host
->pdata
->detect_delay_ms
));
2488 static irqreturn_t
dw_mci_interrupt(int irq
, void *dev_id
)
2490 struct dw_mci
*host
= dev_id
;
2494 pending
= mci_readl(host
, MINTSTS
); /* read-only mask reg */
2497 /* Check volt switch first, since it can look like an error */
2498 if ((host
->state
== STATE_SENDING_CMD11
) &&
2499 (pending
& SDMMC_INT_VOLT_SWITCH
)) {
2500 unsigned long irqflags
;
2502 mci_writel(host
, RINTSTS
, SDMMC_INT_VOLT_SWITCH
);
2503 pending
&= ~SDMMC_INT_VOLT_SWITCH
;
2506 * Hold the lock; we know cmd11_timer can't be kicked
2507 * off after the lock is released, so safe to delete.
2509 spin_lock_irqsave(&host
->irq_lock
, irqflags
);
2510 dw_mci_cmd_interrupt(host
, pending
);
2511 spin_unlock_irqrestore(&host
->irq_lock
, irqflags
);
2513 del_timer(&host
->cmd11_timer
);
2516 if (pending
& DW_MCI_CMD_ERROR_FLAGS
) {
2517 mci_writel(host
, RINTSTS
, DW_MCI_CMD_ERROR_FLAGS
);
2518 host
->cmd_status
= pending
;
2519 smp_wmb(); /* drain writebuffer */
2520 set_bit(EVENT_CMD_COMPLETE
, &host
->pending_events
);
2523 if (pending
& DW_MCI_DATA_ERROR_FLAGS
) {
2524 /* if there is an error report DATA_ERROR */
2525 mci_writel(host
, RINTSTS
, DW_MCI_DATA_ERROR_FLAGS
);
2526 host
->data_status
= pending
;
2527 smp_wmb(); /* drain writebuffer */
2528 set_bit(EVENT_DATA_ERROR
, &host
->pending_events
);
2529 tasklet_schedule(&host
->tasklet
);
2532 if (pending
& SDMMC_INT_DATA_OVER
) {
2533 del_timer(&host
->dto_timer
);
2535 mci_writel(host
, RINTSTS
, SDMMC_INT_DATA_OVER
);
2536 if (!host
->data_status
)
2537 host
->data_status
= pending
;
2538 smp_wmb(); /* drain writebuffer */
2539 if (host
->dir_status
== DW_MCI_RECV_STATUS
) {
2540 if (host
->sg
!= NULL
)
2541 dw_mci_read_data_pio(host
, true);
2543 set_bit(EVENT_DATA_COMPLETE
, &host
->pending_events
);
2544 tasklet_schedule(&host
->tasklet
);
2547 if (pending
& SDMMC_INT_RXDR
) {
2548 mci_writel(host
, RINTSTS
, SDMMC_INT_RXDR
);
2549 if (host
->dir_status
== DW_MCI_RECV_STATUS
&& host
->sg
)
2550 dw_mci_read_data_pio(host
, false);
2553 if (pending
& SDMMC_INT_TXDR
) {
2554 mci_writel(host
, RINTSTS
, SDMMC_INT_TXDR
);
2555 if (host
->dir_status
== DW_MCI_SEND_STATUS
&& host
->sg
)
2556 dw_mci_write_data_pio(host
);
2559 if (pending
& SDMMC_INT_CMD_DONE
) {
2560 mci_writel(host
, RINTSTS
, SDMMC_INT_CMD_DONE
);
2561 dw_mci_cmd_interrupt(host
, pending
);
2564 if (pending
& SDMMC_INT_CD
) {
2565 mci_writel(host
, RINTSTS
, SDMMC_INT_CD
);
2566 dw_mci_handle_cd(host
);
2569 /* Handle SDIO Interrupts */
2570 for (i
= 0; i
< host
->num_slots
; i
++) {
2571 struct dw_mci_slot
*slot
= host
->slot
[i
];
2576 if (pending
& SDMMC_INT_SDIO(slot
->sdio_id
)) {
2577 mci_writel(host
, RINTSTS
,
2578 SDMMC_INT_SDIO(slot
->sdio_id
));
2579 mmc_signal_sdio_irq(slot
->mmc
);
2585 if (host
->use_dma
!= TRANS_MODE_IDMAC
)
2588 /* Handle IDMA interrupts */
2589 if (host
->dma_64bit_address
== 1) {
2590 pending
= mci_readl(host
, IDSTS64
);
2591 if (pending
& (SDMMC_IDMAC_INT_TI
| SDMMC_IDMAC_INT_RI
)) {
2592 mci_writel(host
, IDSTS64
, SDMMC_IDMAC_INT_TI
|
2593 SDMMC_IDMAC_INT_RI
);
2594 mci_writel(host
, IDSTS64
, SDMMC_IDMAC_INT_NI
);
2595 if (!test_bit(EVENT_DATA_ERROR
, &host
->pending_events
))
2596 host
->dma_ops
->complete((void *)host
);
2599 pending
= mci_readl(host
, IDSTS
);
2600 if (pending
& (SDMMC_IDMAC_INT_TI
| SDMMC_IDMAC_INT_RI
)) {
2601 mci_writel(host
, IDSTS
, SDMMC_IDMAC_INT_TI
|
2602 SDMMC_IDMAC_INT_RI
);
2603 mci_writel(host
, IDSTS
, SDMMC_IDMAC_INT_NI
);
2604 if (!test_bit(EVENT_DATA_ERROR
, &host
->pending_events
))
2605 host
->dma_ops
->complete((void *)host
);
2612 static int dw_mci_init_slot(struct dw_mci
*host
, unsigned int id
)
2614 struct mmc_host
*mmc
;
2615 struct dw_mci_slot
*slot
;
2616 const struct dw_mci_drv_data
*drv_data
= host
->drv_data
;
2620 mmc
= mmc_alloc_host(sizeof(struct dw_mci_slot
), host
->dev
);
2624 slot
= mmc_priv(mmc
);
2626 slot
->sdio_id
= host
->sdio_id0
+ id
;
2629 host
->slot
[id
] = slot
;
2631 mmc
->ops
= &dw_mci_ops
;
2632 if (of_property_read_u32_array(host
->dev
->of_node
,
2633 "clock-freq-min-max", freq
, 2)) {
2634 mmc
->f_min
= DW_MCI_FREQ_MIN
;
2635 mmc
->f_max
= DW_MCI_FREQ_MAX
;
2638 "'clock-freq-min-max' property was deprecated.\n");
2639 mmc
->f_min
= freq
[0];
2640 mmc
->f_max
= freq
[1];
2643 /*if there are external regulators, get them*/
2644 ret
= mmc_regulator_get_supply(mmc
);
2645 if (ret
== -EPROBE_DEFER
)
2646 goto err_host_allocated
;
2648 if (!mmc
->ocr_avail
)
2649 mmc
->ocr_avail
= MMC_VDD_32_33
| MMC_VDD_33_34
;
2651 if (host
->pdata
->caps
)
2652 mmc
->caps
= host
->pdata
->caps
;
2655 * Support MMC_CAP_ERASE by default.
2656 * It needs to use trim/discard/erase commands.
2658 mmc
->caps
|= MMC_CAP_ERASE
;
2660 if (host
->pdata
->pm_caps
)
2661 mmc
->pm_caps
= host
->pdata
->pm_caps
;
2663 if (host
->dev
->of_node
) {
2664 ctrl_id
= of_alias_get_id(host
->dev
->of_node
, "mshc");
2668 ctrl_id
= to_platform_device(host
->dev
)->id
;
2670 if (drv_data
&& drv_data
->caps
)
2671 mmc
->caps
|= drv_data
->caps
[ctrl_id
];
2673 if (host
->pdata
->caps2
)
2674 mmc
->caps2
= host
->pdata
->caps2
;
2676 ret
= mmc_of_parse(mmc
);
2678 goto err_host_allocated
;
2680 /* Useful defaults if platform data is unset. */
2681 if (host
->use_dma
== TRANS_MODE_IDMAC
) {
2682 mmc
->max_segs
= host
->ring_size
;
2683 mmc
->max_blk_size
= 65535;
2684 mmc
->max_seg_size
= 0x1000;
2685 mmc
->max_req_size
= mmc
->max_seg_size
* host
->ring_size
;
2686 mmc
->max_blk_count
= mmc
->max_req_size
/ 512;
2687 } else if (host
->use_dma
== TRANS_MODE_EDMAC
) {
2689 mmc
->max_blk_size
= 65535;
2690 mmc
->max_blk_count
= 65535;
2692 mmc
->max_blk_size
* mmc
->max_blk_count
;
2693 mmc
->max_seg_size
= mmc
->max_req_size
;
2695 /* TRANS_MODE_PIO */
2697 mmc
->max_blk_size
= 65535; /* BLKSIZ is 16 bits */
2698 mmc
->max_blk_count
= 512;
2699 mmc
->max_req_size
= mmc
->max_blk_size
*
2701 mmc
->max_seg_size
= mmc
->max_req_size
;
2706 ret
= mmc_add_host(mmc
);
2708 goto err_host_allocated
;
2710 #if defined(CONFIG_DEBUG_FS)
2711 dw_mci_init_debugfs(slot
);
2721 static void dw_mci_cleanup_slot(struct dw_mci_slot
*slot
, unsigned int id
)
2723 /* Debugfs stuff is cleaned up by mmc core */
2724 mmc_remove_host(slot
->mmc
);
2725 slot
->host
->slot
[id
] = NULL
;
2726 mmc_free_host(slot
->mmc
);
2729 static void dw_mci_init_dma(struct dw_mci
*host
)
2732 struct device
*dev
= host
->dev
;
2733 struct device_node
*np
= dev
->of_node
;
2736 * Check tansfer mode from HCON[17:16]
2737 * Clear the ambiguous description of dw_mmc databook:
2738 * 2b'00: No DMA Interface -> Actually means using Internal DMA block
2739 * 2b'01: DesignWare DMA Interface -> Synopsys DW-DMA block
2740 * 2b'10: Generic DMA Interface -> non-Synopsys generic DMA block
2741 * 2b'11: Non DW DMA Interface -> pio only
2742 * Compared to DesignWare DMA Interface, Generic DMA Interface has a
2743 * simpler request/acknowledge handshake mechanism and both of them
2744 * are regarded as external dma master for dw_mmc.
2746 host
->use_dma
= SDMMC_GET_TRANS_MODE(mci_readl(host
, HCON
));
2747 if (host
->use_dma
== DMA_INTERFACE_IDMA
) {
2748 host
->use_dma
= TRANS_MODE_IDMAC
;
2749 } else if (host
->use_dma
== DMA_INTERFACE_DWDMA
||
2750 host
->use_dma
== DMA_INTERFACE_GDMA
) {
2751 host
->use_dma
= TRANS_MODE_EDMAC
;
2756 /* Determine which DMA interface to use */
2757 if (host
->use_dma
== TRANS_MODE_IDMAC
) {
2759 * Check ADDR_CONFIG bit in HCON to find
2760 * IDMAC address bus width
2762 addr_config
= SDMMC_GET_ADDR_CONFIG(mci_readl(host
, HCON
));
2764 if (addr_config
== 1) {
2765 /* host supports IDMAC in 64-bit address mode */
2766 host
->dma_64bit_address
= 1;
2768 "IDMAC supports 64-bit address mode.\n");
2769 if (!dma_set_mask(host
->dev
, DMA_BIT_MASK(64)))
2770 dma_set_coherent_mask(host
->dev
,
2773 /* host supports IDMAC in 32-bit address mode */
2774 host
->dma_64bit_address
= 0;
2776 "IDMAC supports 32-bit address mode.\n");
2779 /* Alloc memory for sg translation */
2780 host
->sg_cpu
= dmam_alloc_coherent(host
->dev
,
2782 &host
->sg_dma
, GFP_KERNEL
);
2783 if (!host
->sg_cpu
) {
2785 "%s: could not alloc DMA memory\n",
2790 host
->dma_ops
= &dw_mci_idmac_ops
;
2791 dev_info(host
->dev
, "Using internal DMA controller.\n");
2793 /* TRANS_MODE_EDMAC: check dma bindings again */
2794 if ((of_property_count_strings(np
, "dma-names") < 0) ||
2795 (!of_find_property(np
, "dmas", NULL
))) {
2798 host
->dma_ops
= &dw_mci_edmac_ops
;
2799 dev_info(host
->dev
, "Using external DMA controller.\n");
2802 if (host
->dma_ops
->init
&& host
->dma_ops
->start
&&
2803 host
->dma_ops
->stop
&& host
->dma_ops
->cleanup
) {
2804 if (host
->dma_ops
->init(host
)) {
2805 dev_err(host
->dev
, "%s: Unable to initialize DMA Controller.\n",
2810 dev_err(host
->dev
, "DMA initialization not found.\n");
2817 dev_info(host
->dev
, "Using PIO mode.\n");
2818 host
->use_dma
= TRANS_MODE_PIO
;
2821 static bool dw_mci_ctrl_reset(struct dw_mci
*host
, u32 reset
)
2823 unsigned long timeout
= jiffies
+ msecs_to_jiffies(500);
2826 ctrl
= mci_readl(host
, CTRL
);
2828 mci_writel(host
, CTRL
, ctrl
);
2830 /* wait till resets clear */
2832 ctrl
= mci_readl(host
, CTRL
);
2833 if (!(ctrl
& reset
))
2835 } while (time_before(jiffies
, timeout
));
2838 "Timeout resetting block (ctrl reset %#x)\n",
2844 static bool dw_mci_reset(struct dw_mci
*host
)
2846 u32 flags
= SDMMC_CTRL_RESET
| SDMMC_CTRL_FIFO_RESET
;
2850 * Reseting generates a block interrupt, hence setting
2851 * the scatter-gather pointer to NULL.
2854 sg_miter_stop(&host
->sg_miter
);
2859 flags
|= SDMMC_CTRL_DMA_RESET
;
2861 if (dw_mci_ctrl_reset(host
, flags
)) {
2863 * In all cases we clear the RAWINTS register to clear any
2866 mci_writel(host
, RINTSTS
, 0xFFFFFFFF);
2868 /* if using dma we wait for dma_req to clear */
2869 if (host
->use_dma
) {
2870 unsigned long timeout
= jiffies
+ msecs_to_jiffies(500);
2874 status
= mci_readl(host
, STATUS
);
2875 if (!(status
& SDMMC_STATUS_DMA_REQ
))
2878 } while (time_before(jiffies
, timeout
));
2880 if (status
& SDMMC_STATUS_DMA_REQ
) {
2882 "%s: Timeout waiting for dma_req to clear during reset\n",
2887 /* when using DMA next we reset the fifo again */
2888 if (!dw_mci_ctrl_reset(host
, SDMMC_CTRL_FIFO_RESET
))
2892 /* if the controller reset bit did clear, then set clock regs */
2893 if (!(mci_readl(host
, CTRL
) & SDMMC_CTRL_RESET
)) {
2895 "%s: fifo/dma reset bits didn't clear but ciu was reset, doing clock update\n",
2901 if (host
->use_dma
== TRANS_MODE_IDMAC
)
2902 /* It is also recommended that we reset and reprogram idmac */
2903 dw_mci_idmac_reset(host
);
2908 /* After a CTRL reset we need to have CIU set clock registers */
2909 mci_send_cmd(host
->cur_slot
, SDMMC_CMD_UPD_CLK
, 0);
2914 static void dw_mci_cmd11_timer(unsigned long arg
)
2916 struct dw_mci
*host
= (struct dw_mci
*)arg
;
2918 if (host
->state
!= STATE_SENDING_CMD11
) {
2919 dev_warn(host
->dev
, "Unexpected CMD11 timeout\n");
2923 host
->cmd_status
= SDMMC_INT_RTO
;
2924 set_bit(EVENT_CMD_COMPLETE
, &host
->pending_events
);
2925 tasklet_schedule(&host
->tasklet
);
2928 static void dw_mci_dto_timer(unsigned long arg
)
2930 struct dw_mci
*host
= (struct dw_mci
*)arg
;
2932 switch (host
->state
) {
2933 case STATE_SENDING_DATA
:
2934 case STATE_DATA_BUSY
:
2936 * If DTO interrupt does NOT come in sending data state,
2937 * we should notify the driver to terminate current transfer
2938 * and report a data timeout to the core.
2940 host
->data_status
= SDMMC_INT_DRTO
;
2941 set_bit(EVENT_DATA_ERROR
, &host
->pending_events
);
2942 set_bit(EVENT_DATA_COMPLETE
, &host
->pending_events
);
2943 tasklet_schedule(&host
->tasklet
);
2951 static struct dw_mci_board
*dw_mci_parse_dt(struct dw_mci
*host
)
2953 struct dw_mci_board
*pdata
;
2954 struct device
*dev
= host
->dev
;
2955 struct device_node
*np
= dev
->of_node
;
2956 const struct dw_mci_drv_data
*drv_data
= host
->drv_data
;
2958 u32 clock_frequency
;
2960 pdata
= devm_kzalloc(dev
, sizeof(*pdata
), GFP_KERNEL
);
2962 return ERR_PTR(-ENOMEM
);
2964 /* find reset controller when exist */
2965 pdata
->rstc
= devm_reset_control_get_optional(dev
, "reset");
2966 if (IS_ERR(pdata
->rstc
)) {
2967 if (PTR_ERR(pdata
->rstc
) == -EPROBE_DEFER
)
2968 return ERR_PTR(-EPROBE_DEFER
);
2971 /* find out number of slots supported */
2972 of_property_read_u32(np
, "num-slots", &pdata
->num_slots
);
2974 if (of_property_read_u32(np
, "fifo-depth", &pdata
->fifo_depth
))
2976 "fifo-depth property not found, using value of FIFOTH register as default\n");
2978 of_property_read_u32(np
, "card-detect-delay", &pdata
->detect_delay_ms
);
2980 if (!of_property_read_u32(np
, "clock-frequency", &clock_frequency
))
2981 pdata
->bus_hz
= clock_frequency
;
2983 if (drv_data
&& drv_data
->parse_dt
) {
2984 ret
= drv_data
->parse_dt(host
);
2986 return ERR_PTR(ret
);
2992 #else /* CONFIG_OF */
2993 static struct dw_mci_board
*dw_mci_parse_dt(struct dw_mci
*host
)
2995 return ERR_PTR(-EINVAL
);
2997 #endif /* CONFIG_OF */
2999 static void dw_mci_enable_cd(struct dw_mci
*host
)
3001 unsigned long irqflags
;
3004 struct dw_mci_slot
*slot
;
3007 * No need for CD if all slots have a non-error GPIO
3008 * as well as broken card detection is found.
3010 for (i
= 0; i
< host
->num_slots
; i
++) {
3011 slot
= host
->slot
[i
];
3012 if (slot
->mmc
->caps
& MMC_CAP_NEEDS_POLL
)
3015 if (mmc_gpio_get_cd(slot
->mmc
) < 0)
3018 if (i
== host
->num_slots
)
3021 spin_lock_irqsave(&host
->irq_lock
, irqflags
);
3022 temp
= mci_readl(host
, INTMASK
);
3023 temp
|= SDMMC_INT_CD
;
3024 mci_writel(host
, INTMASK
, temp
);
3025 spin_unlock_irqrestore(&host
->irq_lock
, irqflags
);
3028 int dw_mci_probe(struct dw_mci
*host
)
3030 const struct dw_mci_drv_data
*drv_data
= host
->drv_data
;
3031 int width
, i
, ret
= 0;
3036 host
->pdata
= dw_mci_parse_dt(host
);
3037 if (PTR_ERR(host
->pdata
) == -EPROBE_DEFER
) {
3038 return -EPROBE_DEFER
;
3039 } else if (IS_ERR(host
->pdata
)) {
3040 dev_err(host
->dev
, "platform data not available\n");
3045 host
->biu_clk
= devm_clk_get(host
->dev
, "biu");
3046 if (IS_ERR(host
->biu_clk
)) {
3047 dev_dbg(host
->dev
, "biu clock not available\n");
3049 ret
= clk_prepare_enable(host
->biu_clk
);
3051 dev_err(host
->dev
, "failed to enable biu clock\n");
3056 host
->ciu_clk
= devm_clk_get(host
->dev
, "ciu");
3057 if (IS_ERR(host
->ciu_clk
)) {
3058 dev_dbg(host
->dev
, "ciu clock not available\n");
3059 host
->bus_hz
= host
->pdata
->bus_hz
;
3061 ret
= clk_prepare_enable(host
->ciu_clk
);
3063 dev_err(host
->dev
, "failed to enable ciu clock\n");
3067 if (host
->pdata
->bus_hz
) {
3068 ret
= clk_set_rate(host
->ciu_clk
, host
->pdata
->bus_hz
);
3071 "Unable to set bus rate to %uHz\n",
3072 host
->pdata
->bus_hz
);
3074 host
->bus_hz
= clk_get_rate(host
->ciu_clk
);
3077 if (!host
->bus_hz
) {
3079 "Platform data must supply bus speed\n");
3084 if (drv_data
&& drv_data
->init
) {
3085 ret
= drv_data
->init(host
);
3088 "implementation specific init failed\n");
3093 if (!IS_ERR(host
->pdata
->rstc
)) {
3094 reset_control_assert(host
->pdata
->rstc
);
3095 usleep_range(10, 50);
3096 reset_control_deassert(host
->pdata
->rstc
);
3099 setup_timer(&host
->cmd11_timer
,
3100 dw_mci_cmd11_timer
, (unsigned long)host
);
3102 setup_timer(&host
->dto_timer
,
3103 dw_mci_dto_timer
, (unsigned long)host
);
3105 spin_lock_init(&host
->lock
);
3106 spin_lock_init(&host
->irq_lock
);
3107 INIT_LIST_HEAD(&host
->queue
);
3110 * Get the host data width - this assumes that HCON has been set with
3111 * the correct values.
3113 i
= SDMMC_GET_HDATA_WIDTH(mci_readl(host
, HCON
));
3115 host
->push_data
= dw_mci_push_data16
;
3116 host
->pull_data
= dw_mci_pull_data16
;
3118 host
->data_shift
= 1;
3119 } else if (i
== 2) {
3120 host
->push_data
= dw_mci_push_data64
;
3121 host
->pull_data
= dw_mci_pull_data64
;
3123 host
->data_shift
= 3;
3125 /* Check for a reserved value, and warn if it is */
3127 "HCON reports a reserved host data width!\n"
3128 "Defaulting to 32-bit access.\n");
3129 host
->push_data
= dw_mci_push_data32
;
3130 host
->pull_data
= dw_mci_pull_data32
;
3132 host
->data_shift
= 2;
3135 /* Reset all blocks */
3136 if (!dw_mci_ctrl_reset(host
, SDMMC_CTRL_ALL_RESET_FLAGS
)) {
3141 host
->dma_ops
= host
->pdata
->dma_ops
;
3142 dw_mci_init_dma(host
);
3144 /* Clear the interrupts for the host controller */
3145 mci_writel(host
, RINTSTS
, 0xFFFFFFFF);
3146 mci_writel(host
, INTMASK
, 0); /* disable all mmc interrupt first */
3148 /* Put in max timeout */
3149 mci_writel(host
, TMOUT
, 0xFFFFFFFF);
3152 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
3153 * Tx Mark = fifo_size / 2 DMA Size = 8
3155 if (!host
->pdata
->fifo_depth
) {
3157 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
3158 * have been overwritten by the bootloader, just like we're
3159 * about to do, so if you know the value for your hardware, you
3160 * should put it in the platform data.
3162 fifo_size
= mci_readl(host
, FIFOTH
);
3163 fifo_size
= 1 + ((fifo_size
>> 16) & 0xfff);
3165 fifo_size
= host
->pdata
->fifo_depth
;
3167 host
->fifo_depth
= fifo_size
;
3169 SDMMC_SET_FIFOTH(0x2, fifo_size
/ 2 - 1, fifo_size
/ 2);
3170 mci_writel(host
, FIFOTH
, host
->fifoth_val
);
3172 /* disable clock to CIU */
3173 mci_writel(host
, CLKENA
, 0);
3174 mci_writel(host
, CLKSRC
, 0);
3177 * In 2.40a spec, Data offset is changed.
3178 * Need to check the version-id and set data-offset for DATA register.
3180 host
->verid
= SDMMC_GET_VERID(mci_readl(host
, VERID
));
3181 dev_info(host
->dev
, "Version ID is %04x\n", host
->verid
);
3183 if (host
->verid
< DW_MMC_240A
)
3184 host
->fifo_reg
= host
->regs
+ DATA_OFFSET
;
3186 host
->fifo_reg
= host
->regs
+ DATA_240A_OFFSET
;
3188 tasklet_init(&host
->tasklet
, dw_mci_tasklet_func
, (unsigned long)host
);
3189 ret
= devm_request_irq(host
->dev
, host
->irq
, dw_mci_interrupt
,
3190 host
->irq_flags
, "dw-mci", host
);
3194 if (host
->pdata
->num_slots
)
3195 host
->num_slots
= host
->pdata
->num_slots
;
3197 host
->num_slots
= 1;
3199 if (host
->num_slots
< 1 ||
3200 host
->num_slots
> SDMMC_GET_SLOT_NUM(mci_readl(host
, HCON
))) {
3202 "Platform data must supply correct num_slots.\n");
3208 * Enable interrupts for command done, data over, data empty,
3209 * receive ready and error such as transmit, receive timeout, crc error
3211 mci_writel(host
, INTMASK
, SDMMC_INT_CMD_DONE
| SDMMC_INT_DATA_OVER
|
3212 SDMMC_INT_TXDR
| SDMMC_INT_RXDR
|
3213 DW_MCI_ERROR_FLAGS
);
3214 /* Enable mci interrupt */
3215 mci_writel(host
, CTRL
, SDMMC_CTRL_INT_ENABLE
);
3218 "DW MMC controller at irq %d,%d bit host data width,%u deep fifo\n",
3219 host
->irq
, width
, fifo_size
);
3221 /* We need at least one slot to succeed */
3222 for (i
= 0; i
< host
->num_slots
; i
++) {
3223 ret
= dw_mci_init_slot(host
, i
);
3225 dev_dbg(host
->dev
, "slot %d init failed\n", i
);
3231 dev_info(host
->dev
, "%d slots initialized\n", init_slots
);
3234 "attempted to initialize %d slots, but failed on all\n",
3239 /* Now that slots are all setup, we can enable card detect */
3240 dw_mci_enable_cd(host
);
3245 if (host
->use_dma
&& host
->dma_ops
->exit
)
3246 host
->dma_ops
->exit(host
);
3248 if (!IS_ERR(host
->pdata
->rstc
))
3249 reset_control_assert(host
->pdata
->rstc
);
3252 clk_disable_unprepare(host
->ciu_clk
);
3255 clk_disable_unprepare(host
->biu_clk
);
3259 EXPORT_SYMBOL(dw_mci_probe
);
3261 void dw_mci_remove(struct dw_mci
*host
)
3265 for (i
= 0; i
< host
->num_slots
; i
++) {
3266 dev_dbg(host
->dev
, "remove slot %d\n", i
);
3268 dw_mci_cleanup_slot(host
->slot
[i
], i
);
3271 mci_writel(host
, RINTSTS
, 0xFFFFFFFF);
3272 mci_writel(host
, INTMASK
, 0); /* disable all mmc interrupt first */
3274 /* disable clock to CIU */
3275 mci_writel(host
, CLKENA
, 0);
3276 mci_writel(host
, CLKSRC
, 0);
3278 if (host
->use_dma
&& host
->dma_ops
->exit
)
3279 host
->dma_ops
->exit(host
);
3281 if (!IS_ERR(host
->pdata
->rstc
))
3282 reset_control_assert(host
->pdata
->rstc
);
3284 clk_disable_unprepare(host
->ciu_clk
);
3285 clk_disable_unprepare(host
->biu_clk
);
3287 EXPORT_SYMBOL(dw_mci_remove
);
3292 int dw_mci_runtime_suspend(struct device
*dev
)
3294 struct dw_mci
*host
= dev_get_drvdata(dev
);
3296 if (host
->use_dma
&& host
->dma_ops
->exit
)
3297 host
->dma_ops
->exit(host
);
3299 clk_disable_unprepare(host
->ciu_clk
);
3301 if (host
->cur_slot
&&
3302 (mmc_can_gpio_cd(host
->cur_slot
->mmc
) ||
3303 !mmc_card_is_removable(host
->cur_slot
->mmc
)))
3304 clk_disable_unprepare(host
->biu_clk
);
3308 EXPORT_SYMBOL(dw_mci_runtime_suspend
);
3310 int dw_mci_runtime_resume(struct device
*dev
)
3313 struct dw_mci
*host
= dev_get_drvdata(dev
);
3315 if (host
->cur_slot
&&
3316 (mmc_can_gpio_cd(host
->cur_slot
->mmc
) ||
3317 !mmc_card_is_removable(host
->cur_slot
->mmc
))) {
3318 ret
= clk_prepare_enable(host
->biu_clk
);
3323 ret
= clk_prepare_enable(host
->ciu_clk
);
3327 if (!dw_mci_ctrl_reset(host
, SDMMC_CTRL_ALL_RESET_FLAGS
)) {
3328 clk_disable_unprepare(host
->ciu_clk
);
3333 if (host
->use_dma
&& host
->dma_ops
->init
)
3334 host
->dma_ops
->init(host
);
3337 * Restore the initial value at FIFOTH register
3338 * And Invalidate the prev_blksz with zero
3340 mci_writel(host
, FIFOTH
, host
->fifoth_val
);
3341 host
->prev_blksz
= 0;
3343 /* Put in max timeout */
3344 mci_writel(host
, TMOUT
, 0xFFFFFFFF);
3346 mci_writel(host
, RINTSTS
, 0xFFFFFFFF);
3347 mci_writel(host
, INTMASK
, SDMMC_INT_CMD_DONE
| SDMMC_INT_DATA_OVER
|
3348 SDMMC_INT_TXDR
| SDMMC_INT_RXDR
|
3349 DW_MCI_ERROR_FLAGS
);
3350 mci_writel(host
, CTRL
, SDMMC_CTRL_INT_ENABLE
);
3352 for (i
= 0; i
< host
->num_slots
; i
++) {
3353 struct dw_mci_slot
*slot
= host
->slot
[i
];
3357 if (slot
->mmc
->pm_flags
& MMC_PM_KEEP_POWER
) {
3358 dw_mci_set_ios(slot
->mmc
, &slot
->mmc
->ios
);
3359 dw_mci_setup_bus(slot
, true);
3363 /* Now that slots are all setup, we can enable card detect */
3364 dw_mci_enable_cd(host
);
3369 if (host
->cur_slot
&&
3370 (mmc_can_gpio_cd(host
->cur_slot
->mmc
) ||
3371 !mmc_card_is_removable(host
->cur_slot
->mmc
)))
3372 clk_disable_unprepare(host
->biu_clk
);
3376 EXPORT_SYMBOL(dw_mci_runtime_resume
);
3377 #endif /* CONFIG_PM */
3379 static int __init
dw_mci_init(void)
3381 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
3385 static void __exit
dw_mci_exit(void)
3389 module_init(dw_mci_init
);
3390 module_exit(dw_mci_exit
);
3392 MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
3393 MODULE_AUTHOR("NXP Semiconductor VietNam");
3394 MODULE_AUTHOR("Imagination Technologies Ltd");
3395 MODULE_LICENSE("GPL v2");