2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
14 #include <linux/blkdev.h>
15 #include <linux/clk.h>
16 #include <linux/debugfs.h>
17 #include <linux/device.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/err.h>
20 #include <linux/init.h>
21 #include <linux/interrupt.h>
22 #include <linux/iopoll.h>
23 #include <linux/ioport.h>
24 #include <linux/module.h>
25 #include <linux/platform_device.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/seq_file.h>
28 #include <linux/slab.h>
29 #include <linux/stat.h>
30 #include <linux/delay.h>
31 #include <linux/irq.h>
32 #include <linux/mmc/card.h>
33 #include <linux/mmc/host.h>
34 #include <linux/mmc/mmc.h>
35 #include <linux/mmc/sd.h>
36 #include <linux/mmc/sdio.h>
37 #include <linux/bitops.h>
38 #include <linux/regulator/consumer.h>
40 #include <linux/of_gpio.h>
41 #include <linux/mmc/slot-gpio.h>
45 /* Common flag combinations */
46 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
47 SDMMC_INT_HTO | SDMMC_INT_SBE | \
48 SDMMC_INT_EBE | SDMMC_INT_HLE)
49 #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
50 SDMMC_INT_RESP_ERR | SDMMC_INT_HLE)
51 #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
52 DW_MCI_CMD_ERROR_FLAGS)
53 #define DW_MCI_SEND_STATUS 1
54 #define DW_MCI_RECV_STATUS 2
55 #define DW_MCI_DMA_THRESHOLD 16
57 #define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */
58 #define DW_MCI_FREQ_MIN 100000 /* unit: HZ */
60 #define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
61 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
62 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
65 #define DESC_RING_BUF_SZ PAGE_SIZE
67 struct idmac_desc_64addr
{
68 u32 des0
; /* Control Descriptor */
69 #define IDMAC_OWN_CLR64(x) \
70 !((x) & cpu_to_le32(IDMAC_DES0_OWN))
72 u32 des1
; /* Reserved */
74 u32 des2
; /*Buffer sizes */
75 #define IDMAC_64ADDR_SET_BUFFER1_SIZE(d, s) \
76 ((d)->des2 = ((d)->des2 & cpu_to_le32(0x03ffe000)) | \
77 ((cpu_to_le32(s)) & cpu_to_le32(0x1fff)))
79 u32 des3
; /* Reserved */
81 u32 des4
; /* Lower 32-bits of Buffer Address Pointer 1*/
82 u32 des5
; /* Upper 32-bits of Buffer Address Pointer 1*/
84 u32 des6
; /* Lower 32-bits of Next Descriptor Address */
85 u32 des7
; /* Upper 32-bits of Next Descriptor Address */
89 __le32 des0
; /* Control Descriptor */
90 #define IDMAC_DES0_DIC BIT(1)
91 #define IDMAC_DES0_LD BIT(2)
92 #define IDMAC_DES0_FD BIT(3)
93 #define IDMAC_DES0_CH BIT(4)
94 #define IDMAC_DES0_ER BIT(5)
95 #define IDMAC_DES0_CES BIT(30)
96 #define IDMAC_DES0_OWN BIT(31)
98 __le32 des1
; /* Buffer sizes */
99 #define IDMAC_SET_BUFFER1_SIZE(d, s) \
100 ((d)->des1 = ((d)->des1 & cpu_to_le32(0x03ffe000)) | (cpu_to_le32((s) & 0x1fff)))
102 __le32 des2
; /* buffer 1 physical address */
104 __le32 des3
; /* buffer 2 physical address */
107 /* Each descriptor can transfer up to 4KB of data in chained mode */
108 #define DW_MCI_DESC_DATA_LENGTH 0x1000
110 #if defined(CONFIG_DEBUG_FS)
111 static int dw_mci_req_show(struct seq_file
*s
, void *v
)
113 struct dw_mci_slot
*slot
= s
->private;
114 struct mmc_request
*mrq
;
115 struct mmc_command
*cmd
;
116 struct mmc_command
*stop
;
117 struct mmc_data
*data
;
119 /* Make sure we get a consistent snapshot */
120 spin_lock_bh(&slot
->host
->lock
);
130 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
131 cmd
->opcode
, cmd
->arg
, cmd
->flags
,
132 cmd
->resp
[0], cmd
->resp
[1], cmd
->resp
[2],
133 cmd
->resp
[2], cmd
->error
);
135 seq_printf(s
, "DATA %u / %u * %u flg %x err %d\n",
136 data
->bytes_xfered
, data
->blocks
,
137 data
->blksz
, data
->flags
, data
->error
);
140 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
141 stop
->opcode
, stop
->arg
, stop
->flags
,
142 stop
->resp
[0], stop
->resp
[1], stop
->resp
[2],
143 stop
->resp
[2], stop
->error
);
146 spin_unlock_bh(&slot
->host
->lock
);
151 static int dw_mci_req_open(struct inode
*inode
, struct file
*file
)
153 return single_open(file
, dw_mci_req_show
, inode
->i_private
);
156 static const struct file_operations dw_mci_req_fops
= {
157 .owner
= THIS_MODULE
,
158 .open
= dw_mci_req_open
,
161 .release
= single_release
,
164 static int dw_mci_regs_show(struct seq_file
*s
, void *v
)
166 struct dw_mci
*host
= s
->private;
168 seq_printf(s
, "STATUS:\t0x%08x\n", mci_readl(host
, STATUS
));
169 seq_printf(s
, "RINTSTS:\t0x%08x\n", mci_readl(host
, RINTSTS
));
170 seq_printf(s
, "CMD:\t0x%08x\n", mci_readl(host
, CMD
));
171 seq_printf(s
, "CTRL:\t0x%08x\n", mci_readl(host
, CTRL
));
172 seq_printf(s
, "INTMASK:\t0x%08x\n", mci_readl(host
, INTMASK
));
173 seq_printf(s
, "CLKENA:\t0x%08x\n", mci_readl(host
, CLKENA
));
178 static int dw_mci_regs_open(struct inode
*inode
, struct file
*file
)
180 return single_open(file
, dw_mci_regs_show
, inode
->i_private
);
183 static const struct file_operations dw_mci_regs_fops
= {
184 .owner
= THIS_MODULE
,
185 .open
= dw_mci_regs_open
,
188 .release
= single_release
,
191 static void dw_mci_init_debugfs(struct dw_mci_slot
*slot
)
193 struct mmc_host
*mmc
= slot
->mmc
;
194 struct dw_mci
*host
= slot
->host
;
198 root
= mmc
->debugfs_root
;
202 node
= debugfs_create_file("regs", S_IRUSR
, root
, host
,
207 node
= debugfs_create_file("req", S_IRUSR
, root
, slot
,
212 node
= debugfs_create_u32("state", S_IRUSR
, root
, (u32
*)&host
->state
);
216 node
= debugfs_create_x32("pending_events", S_IRUSR
, root
,
217 (u32
*)&host
->pending_events
);
221 node
= debugfs_create_x32("completed_events", S_IRUSR
, root
,
222 (u32
*)&host
->completed_events
);
229 dev_err(&mmc
->class_dev
, "failed to initialize debugfs for slot\n");
231 #endif /* defined(CONFIG_DEBUG_FS) */
233 static bool dw_mci_ctrl_reset(struct dw_mci
*host
, u32 reset
)
237 ctrl
= mci_readl(host
, CTRL
);
239 mci_writel(host
, CTRL
, ctrl
);
241 /* wait till resets clear */
242 if (readl_poll_timeout_atomic(host
->regs
+ SDMMC_CTRL
, ctrl
,
244 1, 500 * USEC_PER_MSEC
)) {
246 "Timeout resetting block (ctrl reset %#x)\n",
254 static void dw_mci_wait_while_busy(struct dw_mci
*host
, u32 cmd_flags
)
259 * Databook says that before issuing a new data transfer command
260 * we need to check to see if the card is busy. Data transfer commands
261 * all have SDMMC_CMD_PRV_DAT_WAIT set, so we'll key off that.
263 * ...also allow sending for SDMMC_CMD_VOLT_SWITCH where busy is
266 if ((cmd_flags
& SDMMC_CMD_PRV_DAT_WAIT
) &&
267 !(cmd_flags
& SDMMC_CMD_VOLT_SWITCH
)) {
268 if (readl_poll_timeout_atomic(host
->regs
+ SDMMC_STATUS
,
270 !(status
& SDMMC_STATUS_BUSY
),
271 10, 500 * USEC_PER_MSEC
))
272 dev_err(host
->dev
, "Busy; trying anyway\n");
276 static void mci_send_cmd(struct dw_mci_slot
*slot
, u32 cmd
, u32 arg
)
278 struct dw_mci
*host
= slot
->host
;
279 unsigned int cmd_status
= 0;
281 mci_writel(host
, CMDARG
, arg
);
282 wmb(); /* drain writebuffer */
283 dw_mci_wait_while_busy(host
, cmd
);
284 mci_writel(host
, CMD
, SDMMC_CMD_START
| cmd
);
286 if (readl_poll_timeout_atomic(host
->regs
+ SDMMC_CMD
, cmd_status
,
287 !(cmd_status
& SDMMC_CMD_START
),
288 1, 500 * USEC_PER_MSEC
))
289 dev_err(&slot
->mmc
->class_dev
,
290 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
291 cmd
, arg
, cmd_status
);
294 static u32
dw_mci_prepare_command(struct mmc_host
*mmc
, struct mmc_command
*cmd
)
296 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
297 struct dw_mci
*host
= slot
->host
;
300 cmd
->error
= -EINPROGRESS
;
303 if (cmd
->opcode
== MMC_STOP_TRANSMISSION
||
304 cmd
->opcode
== MMC_GO_IDLE_STATE
||
305 cmd
->opcode
== MMC_GO_INACTIVE_STATE
||
306 (cmd
->opcode
== SD_IO_RW_DIRECT
&&
307 ((cmd
->arg
>> 9) & 0x1FFFF) == SDIO_CCCR_ABORT
))
308 cmdr
|= SDMMC_CMD_STOP
;
309 else if (cmd
->opcode
!= MMC_SEND_STATUS
&& cmd
->data
)
310 cmdr
|= SDMMC_CMD_PRV_DAT_WAIT
;
312 if (cmd
->opcode
== SD_SWITCH_VOLTAGE
) {
315 /* Special bit makes CMD11 not die */
316 cmdr
|= SDMMC_CMD_VOLT_SWITCH
;
318 /* Change state to continue to handle CMD11 weirdness */
319 WARN_ON(slot
->host
->state
!= STATE_SENDING_CMD
);
320 slot
->host
->state
= STATE_SENDING_CMD11
;
323 * We need to disable low power mode (automatic clock stop)
324 * while doing voltage switch so we don't confuse the card,
325 * since stopping the clock is a specific part of the UHS
326 * voltage change dance.
328 * Note that low power mode (SDMMC_CLKEN_LOW_PWR) will be
329 * unconditionally turned back on in dw_mci_setup_bus() if it's
330 * ever called with a non-zero clock. That shouldn't happen
331 * until the voltage change is all done.
333 clk_en_a
= mci_readl(host
, CLKENA
);
334 clk_en_a
&= ~(SDMMC_CLKEN_LOW_PWR
<< slot
->id
);
335 mci_writel(host
, CLKENA
, clk_en_a
);
336 mci_send_cmd(slot
, SDMMC_CMD_UPD_CLK
|
337 SDMMC_CMD_PRV_DAT_WAIT
, 0);
340 if (cmd
->flags
& MMC_RSP_PRESENT
) {
341 /* We expect a response, so set this bit */
342 cmdr
|= SDMMC_CMD_RESP_EXP
;
343 if (cmd
->flags
& MMC_RSP_136
)
344 cmdr
|= SDMMC_CMD_RESP_LONG
;
347 if (cmd
->flags
& MMC_RSP_CRC
)
348 cmdr
|= SDMMC_CMD_RESP_CRC
;
351 cmdr
|= SDMMC_CMD_DAT_EXP
;
352 if (cmd
->data
->flags
& MMC_DATA_WRITE
)
353 cmdr
|= SDMMC_CMD_DAT_WR
;
356 if (!test_bit(DW_MMC_CARD_NO_USE_HOLD
, &slot
->flags
))
357 cmdr
|= SDMMC_CMD_USE_HOLD_REG
;
362 static u32
dw_mci_prep_stop_abort(struct dw_mci
*host
, struct mmc_command
*cmd
)
364 struct mmc_command
*stop
;
370 stop
= &host
->stop_abort
;
372 memset(stop
, 0, sizeof(struct mmc_command
));
374 if (cmdr
== MMC_READ_SINGLE_BLOCK
||
375 cmdr
== MMC_READ_MULTIPLE_BLOCK
||
376 cmdr
== MMC_WRITE_BLOCK
||
377 cmdr
== MMC_WRITE_MULTIPLE_BLOCK
||
378 cmdr
== MMC_SEND_TUNING_BLOCK
||
379 cmdr
== MMC_SEND_TUNING_BLOCK_HS200
) {
380 stop
->opcode
= MMC_STOP_TRANSMISSION
;
382 stop
->flags
= MMC_RSP_R1B
| MMC_CMD_AC
;
383 } else if (cmdr
== SD_IO_RW_EXTENDED
) {
384 stop
->opcode
= SD_IO_RW_DIRECT
;
385 stop
->arg
|= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT
<< 9) |
386 ((cmd
->arg
>> 28) & 0x7);
387 stop
->flags
= MMC_RSP_SPI_R5
| MMC_RSP_R5
| MMC_CMD_AC
;
392 cmdr
= stop
->opcode
| SDMMC_CMD_STOP
|
393 SDMMC_CMD_RESP_CRC
| SDMMC_CMD_RESP_EXP
;
395 if (!test_bit(DW_MMC_CARD_NO_USE_HOLD
, &host
->slot
->flags
))
396 cmdr
|= SDMMC_CMD_USE_HOLD_REG
;
401 static inline void dw_mci_set_cto(struct dw_mci
*host
)
403 unsigned int cto_clks
;
404 unsigned int cto_div
;
406 unsigned long irqflags
;
408 cto_clks
= mci_readl(host
, TMOUT
) & 0xff;
409 cto_div
= (mci_readl(host
, CLKDIV
) & 0xff) * 2;
412 cto_ms
= DIV_ROUND_UP(MSEC_PER_SEC
* cto_clks
* cto_div
, host
->bus_hz
);
414 /* add a bit spare time */
418 * The durations we're working with are fairly short so we have to be
419 * extra careful about synchronization here. Specifically in hardware a
420 * command timeout is _at most_ 5.1 ms, so that means we expect an
421 * interrupt (either command done or timeout) to come rather quickly
422 * after the mci_writel. ...but just in case we have a long interrupt
423 * latency let's add a bit of paranoia.
425 * In general we'll assume that at least an interrupt will be asserted
426 * in hardware by the time the cto_timer runs. ...and if it hasn't
427 * been asserted in hardware by that time then we'll assume it'll never
430 spin_lock_irqsave(&host
->irq_lock
, irqflags
);
431 if (!test_bit(EVENT_CMD_COMPLETE
, &host
->pending_events
))
432 mod_timer(&host
->cto_timer
,
433 jiffies
+ msecs_to_jiffies(cto_ms
) + 1);
434 spin_unlock_irqrestore(&host
->irq_lock
, irqflags
);
437 static void dw_mci_start_command(struct dw_mci
*host
,
438 struct mmc_command
*cmd
, u32 cmd_flags
)
442 "start command: ARGR=0x%08x CMDR=0x%08x\n",
443 cmd
->arg
, cmd_flags
);
445 mci_writel(host
, CMDARG
, cmd
->arg
);
446 wmb(); /* drain writebuffer */
447 dw_mci_wait_while_busy(host
, cmd_flags
);
449 mci_writel(host
, CMD
, cmd_flags
| SDMMC_CMD_START
);
451 /* response expected command only */
452 if (cmd_flags
& SDMMC_CMD_RESP_EXP
)
453 dw_mci_set_cto(host
);
456 static inline void send_stop_abort(struct dw_mci
*host
, struct mmc_data
*data
)
458 struct mmc_command
*stop
= &host
->stop_abort
;
460 dw_mci_start_command(host
, stop
, host
->stop_cmdr
);
463 /* DMA interface functions */
464 static void dw_mci_stop_dma(struct dw_mci
*host
)
466 if (host
->using_dma
) {
467 host
->dma_ops
->stop(host
);
468 host
->dma_ops
->cleanup(host
);
471 /* Data transfer was stopped by the interrupt handler */
472 set_bit(EVENT_XFER_COMPLETE
, &host
->pending_events
);
475 static void dw_mci_dma_cleanup(struct dw_mci
*host
)
477 struct mmc_data
*data
= host
->data
;
479 if (data
&& data
->host_cookie
== COOKIE_MAPPED
) {
480 dma_unmap_sg(host
->dev
,
483 mmc_get_dma_dir(data
));
484 data
->host_cookie
= COOKIE_UNMAPPED
;
488 static void dw_mci_idmac_reset(struct dw_mci
*host
)
490 u32 bmod
= mci_readl(host
, BMOD
);
491 /* Software reset of DMA */
492 bmod
|= SDMMC_IDMAC_SWRESET
;
493 mci_writel(host
, BMOD
, bmod
);
496 static void dw_mci_idmac_stop_dma(struct dw_mci
*host
)
500 /* Disable and reset the IDMAC interface */
501 temp
= mci_readl(host
, CTRL
);
502 temp
&= ~SDMMC_CTRL_USE_IDMAC
;
503 temp
|= SDMMC_CTRL_DMA_RESET
;
504 mci_writel(host
, CTRL
, temp
);
506 /* Stop the IDMAC running */
507 temp
= mci_readl(host
, BMOD
);
508 temp
&= ~(SDMMC_IDMAC_ENABLE
| SDMMC_IDMAC_FB
);
509 temp
|= SDMMC_IDMAC_SWRESET
;
510 mci_writel(host
, BMOD
, temp
);
513 static void dw_mci_dmac_complete_dma(void *arg
)
515 struct dw_mci
*host
= arg
;
516 struct mmc_data
*data
= host
->data
;
518 dev_vdbg(host
->dev
, "DMA complete\n");
520 if ((host
->use_dma
== TRANS_MODE_EDMAC
) &&
521 data
&& (data
->flags
& MMC_DATA_READ
))
522 /* Invalidate cache after read */
523 dma_sync_sg_for_cpu(mmc_dev(host
->slot
->mmc
),
528 host
->dma_ops
->cleanup(host
);
531 * If the card was removed, data will be NULL. No point in trying to
532 * send the stop command or waiting for NBUSY in this case.
535 set_bit(EVENT_XFER_COMPLETE
, &host
->pending_events
);
536 tasklet_schedule(&host
->tasklet
);
540 static int dw_mci_idmac_init(struct dw_mci
*host
)
544 if (host
->dma_64bit_address
== 1) {
545 struct idmac_desc_64addr
*p
;
546 /* Number of descriptors in the ring buffer */
548 DESC_RING_BUF_SZ
/ sizeof(struct idmac_desc_64addr
);
550 /* Forward link the descriptor list */
551 for (i
= 0, p
= host
->sg_cpu
; i
< host
->ring_size
- 1;
553 p
->des6
= (host
->sg_dma
+
554 (sizeof(struct idmac_desc_64addr
) *
555 (i
+ 1))) & 0xffffffff;
557 p
->des7
= (u64
)(host
->sg_dma
+
558 (sizeof(struct idmac_desc_64addr
) *
560 /* Initialize reserved and buffer size fields to "0" */
566 /* Set the last descriptor as the end-of-ring descriptor */
567 p
->des6
= host
->sg_dma
& 0xffffffff;
568 p
->des7
= (u64
)host
->sg_dma
>> 32;
569 p
->des0
= IDMAC_DES0_ER
;
572 struct idmac_desc
*p
;
573 /* Number of descriptors in the ring buffer */
575 DESC_RING_BUF_SZ
/ sizeof(struct idmac_desc
);
577 /* Forward link the descriptor list */
578 for (i
= 0, p
= host
->sg_cpu
;
579 i
< host
->ring_size
- 1;
581 p
->des3
= cpu_to_le32(host
->sg_dma
+
582 (sizeof(struct idmac_desc
) * (i
+ 1)));
586 /* Set the last descriptor as the end-of-ring descriptor */
587 p
->des3
= cpu_to_le32(host
->sg_dma
);
588 p
->des0
= cpu_to_le32(IDMAC_DES0_ER
);
591 dw_mci_idmac_reset(host
);
593 if (host
->dma_64bit_address
== 1) {
594 /* Mask out interrupts - get Tx & Rx complete only */
595 mci_writel(host
, IDSTS64
, IDMAC_INT_CLR
);
596 mci_writel(host
, IDINTEN64
, SDMMC_IDMAC_INT_NI
|
597 SDMMC_IDMAC_INT_RI
| SDMMC_IDMAC_INT_TI
);
599 /* Set the descriptor base address */
600 mci_writel(host
, DBADDRL
, host
->sg_dma
& 0xffffffff);
601 mci_writel(host
, DBADDRU
, (u64
)host
->sg_dma
>> 32);
604 /* Mask out interrupts - get Tx & Rx complete only */
605 mci_writel(host
, IDSTS
, IDMAC_INT_CLR
);
606 mci_writel(host
, IDINTEN
, SDMMC_IDMAC_INT_NI
|
607 SDMMC_IDMAC_INT_RI
| SDMMC_IDMAC_INT_TI
);
609 /* Set the descriptor base address */
610 mci_writel(host
, DBADDR
, host
->sg_dma
);
616 static inline int dw_mci_prepare_desc64(struct dw_mci
*host
,
617 struct mmc_data
*data
,
620 unsigned int desc_len
;
621 struct idmac_desc_64addr
*desc_first
, *desc_last
, *desc
;
625 desc_first
= desc_last
= desc
= host
->sg_cpu
;
627 for (i
= 0; i
< sg_len
; i
++) {
628 unsigned int length
= sg_dma_len(&data
->sg
[i
]);
630 u64 mem_addr
= sg_dma_address(&data
->sg
[i
]);
632 for ( ; length
; desc
++) {
633 desc_len
= (length
<= DW_MCI_DESC_DATA_LENGTH
) ?
634 length
: DW_MCI_DESC_DATA_LENGTH
;
639 * Wait for the former clear OWN bit operation
640 * of IDMAC to make sure that this descriptor
641 * isn't still owned by IDMAC as IDMAC's write
642 * ops and CPU's read ops are asynchronous.
644 if (readl_poll_timeout_atomic(&desc
->des0
, val
,
645 !(val
& IDMAC_DES0_OWN
),
646 10, 100 * USEC_PER_MSEC
))
650 * Set the OWN bit and disable interrupts
651 * for this descriptor
653 desc
->des0
= IDMAC_DES0_OWN
| IDMAC_DES0_DIC
|
657 IDMAC_64ADDR_SET_BUFFER1_SIZE(desc
, desc_len
);
659 /* Physical address to DMA to/from */
660 desc
->des4
= mem_addr
& 0xffffffff;
661 desc
->des5
= mem_addr
>> 32;
663 /* Update physical address for the next desc */
664 mem_addr
+= desc_len
;
666 /* Save pointer to the last descriptor */
671 /* Set first descriptor */
672 desc_first
->des0
|= IDMAC_DES0_FD
;
674 /* Set last descriptor */
675 desc_last
->des0
&= ~(IDMAC_DES0_CH
| IDMAC_DES0_DIC
);
676 desc_last
->des0
|= IDMAC_DES0_LD
;
680 /* restore the descriptor chain as it's polluted */
681 dev_dbg(host
->dev
, "descriptor is still owned by IDMAC.\n");
682 memset(host
->sg_cpu
, 0, DESC_RING_BUF_SZ
);
683 dw_mci_idmac_init(host
);
688 static inline int dw_mci_prepare_desc32(struct dw_mci
*host
,
689 struct mmc_data
*data
,
692 unsigned int desc_len
;
693 struct idmac_desc
*desc_first
, *desc_last
, *desc
;
697 desc_first
= desc_last
= desc
= host
->sg_cpu
;
699 for (i
= 0; i
< sg_len
; i
++) {
700 unsigned int length
= sg_dma_len(&data
->sg
[i
]);
702 u32 mem_addr
= sg_dma_address(&data
->sg
[i
]);
704 for ( ; length
; desc
++) {
705 desc_len
= (length
<= DW_MCI_DESC_DATA_LENGTH
) ?
706 length
: DW_MCI_DESC_DATA_LENGTH
;
711 * Wait for the former clear OWN bit operation
712 * of IDMAC to make sure that this descriptor
713 * isn't still owned by IDMAC as IDMAC's write
714 * ops and CPU's read ops are asynchronous.
716 if (readl_poll_timeout_atomic(&desc
->des0
, val
,
717 IDMAC_OWN_CLR64(val
),
719 100 * USEC_PER_MSEC
))
723 * Set the OWN bit and disable interrupts
724 * for this descriptor
726 desc
->des0
= cpu_to_le32(IDMAC_DES0_OWN
|
731 IDMAC_SET_BUFFER1_SIZE(desc
, desc_len
);
733 /* Physical address to DMA to/from */
734 desc
->des2
= cpu_to_le32(mem_addr
);
736 /* Update physical address for the next desc */
737 mem_addr
+= desc_len
;
739 /* Save pointer to the last descriptor */
744 /* Set first descriptor */
745 desc_first
->des0
|= cpu_to_le32(IDMAC_DES0_FD
);
747 /* Set last descriptor */
748 desc_last
->des0
&= cpu_to_le32(~(IDMAC_DES0_CH
|
750 desc_last
->des0
|= cpu_to_le32(IDMAC_DES0_LD
);
754 /* restore the descriptor chain as it's polluted */
755 dev_dbg(host
->dev
, "descriptor is still owned by IDMAC.\n");
756 memset(host
->sg_cpu
, 0, DESC_RING_BUF_SZ
);
757 dw_mci_idmac_init(host
);
761 static int dw_mci_idmac_start_dma(struct dw_mci
*host
, unsigned int sg_len
)
766 if (host
->dma_64bit_address
== 1)
767 ret
= dw_mci_prepare_desc64(host
, host
->data
, sg_len
);
769 ret
= dw_mci_prepare_desc32(host
, host
->data
, sg_len
);
774 /* drain writebuffer */
777 /* Make sure to reset DMA in case we did PIO before this */
778 dw_mci_ctrl_reset(host
, SDMMC_CTRL_DMA_RESET
);
779 dw_mci_idmac_reset(host
);
781 /* Select IDMAC interface */
782 temp
= mci_readl(host
, CTRL
);
783 temp
|= SDMMC_CTRL_USE_IDMAC
;
784 mci_writel(host
, CTRL
, temp
);
786 /* drain writebuffer */
789 /* Enable the IDMAC */
790 temp
= mci_readl(host
, BMOD
);
791 temp
|= SDMMC_IDMAC_ENABLE
| SDMMC_IDMAC_FB
;
792 mci_writel(host
, BMOD
, temp
);
794 /* Start it running */
795 mci_writel(host
, PLDMND
, 1);
801 static const struct dw_mci_dma_ops dw_mci_idmac_ops
= {
802 .init
= dw_mci_idmac_init
,
803 .start
= dw_mci_idmac_start_dma
,
804 .stop
= dw_mci_idmac_stop_dma
,
805 .complete
= dw_mci_dmac_complete_dma
,
806 .cleanup
= dw_mci_dma_cleanup
,
809 static void dw_mci_edmac_stop_dma(struct dw_mci
*host
)
811 dmaengine_terminate_async(host
->dms
->ch
);
814 static int dw_mci_edmac_start_dma(struct dw_mci
*host
,
817 struct dma_slave_config cfg
;
818 struct dma_async_tx_descriptor
*desc
= NULL
;
819 struct scatterlist
*sgl
= host
->data
->sg
;
820 static const u32 mszs
[] = {1, 4, 8, 16, 32, 64, 128, 256};
821 u32 sg_elems
= host
->data
->sg_len
;
823 u32 fifo_offset
= host
->fifo_reg
- host
->regs
;
826 /* Set external dma config: burst size, burst width */
827 cfg
.dst_addr
= host
->phy_regs
+ fifo_offset
;
828 cfg
.src_addr
= cfg
.dst_addr
;
829 cfg
.dst_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
830 cfg
.src_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
832 /* Match burst msize with external dma config */
833 fifoth_val
= mci_readl(host
, FIFOTH
);
834 cfg
.dst_maxburst
= mszs
[(fifoth_val
>> 28) & 0x7];
835 cfg
.src_maxburst
= cfg
.dst_maxburst
;
837 if (host
->data
->flags
& MMC_DATA_WRITE
)
838 cfg
.direction
= DMA_MEM_TO_DEV
;
840 cfg
.direction
= DMA_DEV_TO_MEM
;
842 ret
= dmaengine_slave_config(host
->dms
->ch
, &cfg
);
844 dev_err(host
->dev
, "Failed to config edmac.\n");
848 desc
= dmaengine_prep_slave_sg(host
->dms
->ch
, sgl
,
849 sg_len
, cfg
.direction
,
850 DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
852 dev_err(host
->dev
, "Can't prepare slave sg.\n");
856 /* Set dw_mci_dmac_complete_dma as callback */
857 desc
->callback
= dw_mci_dmac_complete_dma
;
858 desc
->callback_param
= (void *)host
;
859 dmaengine_submit(desc
);
861 /* Flush cache before write */
862 if (host
->data
->flags
& MMC_DATA_WRITE
)
863 dma_sync_sg_for_device(mmc_dev(host
->slot
->mmc
), sgl
,
864 sg_elems
, DMA_TO_DEVICE
);
866 dma_async_issue_pending(host
->dms
->ch
);
871 static int dw_mci_edmac_init(struct dw_mci
*host
)
873 /* Request external dma channel */
874 host
->dms
= kzalloc(sizeof(struct dw_mci_dma_slave
), GFP_KERNEL
);
878 host
->dms
->ch
= dma_request_slave_channel(host
->dev
, "rx-tx");
879 if (!host
->dms
->ch
) {
880 dev_err(host
->dev
, "Failed to get external DMA channel.\n");
889 static void dw_mci_edmac_exit(struct dw_mci
*host
)
893 dma_release_channel(host
->dms
->ch
);
894 host
->dms
->ch
= NULL
;
901 static const struct dw_mci_dma_ops dw_mci_edmac_ops
= {
902 .init
= dw_mci_edmac_init
,
903 .exit
= dw_mci_edmac_exit
,
904 .start
= dw_mci_edmac_start_dma
,
905 .stop
= dw_mci_edmac_stop_dma
,
906 .complete
= dw_mci_dmac_complete_dma
,
907 .cleanup
= dw_mci_dma_cleanup
,
910 static int dw_mci_pre_dma_transfer(struct dw_mci
*host
,
911 struct mmc_data
*data
,
914 struct scatterlist
*sg
;
915 unsigned int i
, sg_len
;
917 if (data
->host_cookie
== COOKIE_PRE_MAPPED
)
921 * We don't do DMA on "complex" transfers, i.e. with
922 * non-word-aligned buffers or lengths. Also, we don't bother
923 * with all the DMA setup overhead for short transfers.
925 if (data
->blocks
* data
->blksz
< DW_MCI_DMA_THRESHOLD
)
931 for_each_sg(data
->sg
, sg
, data
->sg_len
, i
) {
932 if (sg
->offset
& 3 || sg
->length
& 3)
936 sg_len
= dma_map_sg(host
->dev
,
939 mmc_get_dma_dir(data
));
943 data
->host_cookie
= cookie
;
948 static void dw_mci_pre_req(struct mmc_host
*mmc
,
949 struct mmc_request
*mrq
)
951 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
952 struct mmc_data
*data
= mrq
->data
;
954 if (!slot
->host
->use_dma
|| !data
)
957 /* This data might be unmapped at this time */
958 data
->host_cookie
= COOKIE_UNMAPPED
;
960 if (dw_mci_pre_dma_transfer(slot
->host
, mrq
->data
,
961 COOKIE_PRE_MAPPED
) < 0)
962 data
->host_cookie
= COOKIE_UNMAPPED
;
965 static void dw_mci_post_req(struct mmc_host
*mmc
,
966 struct mmc_request
*mrq
,
969 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
970 struct mmc_data
*data
= mrq
->data
;
972 if (!slot
->host
->use_dma
|| !data
)
975 if (data
->host_cookie
!= COOKIE_UNMAPPED
)
976 dma_unmap_sg(slot
->host
->dev
,
979 mmc_get_dma_dir(data
));
980 data
->host_cookie
= COOKIE_UNMAPPED
;
983 static int dw_mci_get_cd(struct mmc_host
*mmc
)
986 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
987 struct dw_mci
*host
= slot
->host
;
988 int gpio_cd
= mmc_gpio_get_cd(mmc
);
990 /* Use platform get_cd function, else try onboard card detect */
991 if (((mmc
->caps
& MMC_CAP_NEEDS_POLL
)
992 || !mmc_card_is_removable(mmc
))) {
995 if (!test_bit(DW_MMC_CARD_PRESENT
, &slot
->flags
)) {
996 if (mmc
->caps
& MMC_CAP_NEEDS_POLL
) {
997 dev_info(&mmc
->class_dev
,
998 "card is polling.\n");
1000 dev_info(&mmc
->class_dev
,
1001 "card is non-removable.\n");
1003 set_bit(DW_MMC_CARD_PRESENT
, &slot
->flags
);
1007 } else if (gpio_cd
>= 0)
1010 present
= (mci_readl(slot
->host
, CDETECT
) & (1 << slot
->id
))
1013 spin_lock_bh(&host
->lock
);
1014 if (present
&& !test_and_set_bit(DW_MMC_CARD_PRESENT
, &slot
->flags
))
1015 dev_dbg(&mmc
->class_dev
, "card is present\n");
1016 else if (!present
&&
1017 !test_and_clear_bit(DW_MMC_CARD_PRESENT
, &slot
->flags
))
1018 dev_dbg(&mmc
->class_dev
, "card is not present\n");
1019 spin_unlock_bh(&host
->lock
);
1024 static void dw_mci_adjust_fifoth(struct dw_mci
*host
, struct mmc_data
*data
)
1026 unsigned int blksz
= data
->blksz
;
1027 static const u32 mszs
[] = {1, 4, 8, 16, 32, 64, 128, 256};
1028 u32 fifo_width
= 1 << host
->data_shift
;
1029 u32 blksz_depth
= blksz
/ fifo_width
, fifoth_val
;
1030 u32 msize
= 0, rx_wmark
= 1, tx_wmark
, tx_wmark_invers
;
1031 int idx
= ARRAY_SIZE(mszs
) - 1;
1033 /* pio should ship this scenario */
1037 tx_wmark
= (host
->fifo_depth
) / 2;
1038 tx_wmark_invers
= host
->fifo_depth
- tx_wmark
;
1042 * if blksz is not a multiple of the FIFO width
1044 if (blksz
% fifo_width
)
1048 if (!((blksz_depth
% mszs
[idx
]) ||
1049 (tx_wmark_invers
% mszs
[idx
]))) {
1051 rx_wmark
= mszs
[idx
] - 1;
1054 } while (--idx
> 0);
1056 * If idx is '0', it won't be tried
1057 * Thus, initial values are uesed
1060 fifoth_val
= SDMMC_SET_FIFOTH(msize
, rx_wmark
, tx_wmark
);
1061 mci_writel(host
, FIFOTH
, fifoth_val
);
1064 static void dw_mci_ctrl_thld(struct dw_mci
*host
, struct mmc_data
*data
)
1066 unsigned int blksz
= data
->blksz
;
1067 u32 blksz_depth
, fifo_depth
;
1072 * CDTHRCTL doesn't exist prior to 240A (in fact that register offset is
1073 * in the FIFO region, so we really shouldn't access it).
1075 if (host
->verid
< DW_MMC_240A
||
1076 (host
->verid
< DW_MMC_280A
&& data
->flags
& MMC_DATA_WRITE
))
1080 * Card write Threshold is introduced since 2.80a
1081 * It's used when HS400 mode is enabled.
1083 if (data
->flags
& MMC_DATA_WRITE
&&
1084 !(host
->timing
!= MMC_TIMING_MMC_HS400
))
1087 if (data
->flags
& MMC_DATA_WRITE
)
1088 enable
= SDMMC_CARD_WR_THR_EN
;
1090 enable
= SDMMC_CARD_RD_THR_EN
;
1092 if (host
->timing
!= MMC_TIMING_MMC_HS200
&&
1093 host
->timing
!= MMC_TIMING_UHS_SDR104
)
1096 blksz_depth
= blksz
/ (1 << host
->data_shift
);
1097 fifo_depth
= host
->fifo_depth
;
1099 if (blksz_depth
> fifo_depth
)
1103 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
1104 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
1105 * Currently just choose blksz.
1108 mci_writel(host
, CDTHRCTL
, SDMMC_SET_THLD(thld_size
, enable
));
1112 mci_writel(host
, CDTHRCTL
, 0);
1115 static int dw_mci_submit_data_dma(struct dw_mci
*host
, struct mmc_data
*data
)
1117 unsigned long irqflags
;
1121 host
->using_dma
= 0;
1123 /* If we don't have a channel, we can't do DMA */
1127 sg_len
= dw_mci_pre_dma_transfer(host
, data
, COOKIE_MAPPED
);
1129 host
->dma_ops
->stop(host
);
1133 host
->using_dma
= 1;
1135 if (host
->use_dma
== TRANS_MODE_IDMAC
)
1137 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
1138 (unsigned long)host
->sg_cpu
,
1139 (unsigned long)host
->sg_dma
,
1143 * Decide the MSIZE and RX/TX Watermark.
1144 * If current block size is same with previous size,
1145 * no need to update fifoth.
1147 if (host
->prev_blksz
!= data
->blksz
)
1148 dw_mci_adjust_fifoth(host
, data
);
1150 /* Enable the DMA interface */
1151 temp
= mci_readl(host
, CTRL
);
1152 temp
|= SDMMC_CTRL_DMA_ENABLE
;
1153 mci_writel(host
, CTRL
, temp
);
1155 /* Disable RX/TX IRQs, let DMA handle it */
1156 spin_lock_irqsave(&host
->irq_lock
, irqflags
);
1157 temp
= mci_readl(host
, INTMASK
);
1158 temp
&= ~(SDMMC_INT_RXDR
| SDMMC_INT_TXDR
);
1159 mci_writel(host
, INTMASK
, temp
);
1160 spin_unlock_irqrestore(&host
->irq_lock
, irqflags
);
1162 if (host
->dma_ops
->start(host
, sg_len
)) {
1163 host
->dma_ops
->stop(host
);
1164 /* We can't do DMA, try PIO for this one */
1166 "%s: fall back to PIO mode for current transfer\n",
1174 static void dw_mci_submit_data(struct dw_mci
*host
, struct mmc_data
*data
)
1176 unsigned long irqflags
;
1177 int flags
= SG_MITER_ATOMIC
;
1180 data
->error
= -EINPROGRESS
;
1182 WARN_ON(host
->data
);
1186 if (data
->flags
& MMC_DATA_READ
)
1187 host
->dir_status
= DW_MCI_RECV_STATUS
;
1189 host
->dir_status
= DW_MCI_SEND_STATUS
;
1191 dw_mci_ctrl_thld(host
, data
);
1193 if (dw_mci_submit_data_dma(host
, data
)) {
1194 if (host
->data
->flags
& MMC_DATA_READ
)
1195 flags
|= SG_MITER_TO_SG
;
1197 flags
|= SG_MITER_FROM_SG
;
1199 sg_miter_start(&host
->sg_miter
, data
->sg
, data
->sg_len
, flags
);
1200 host
->sg
= data
->sg
;
1201 host
->part_buf_start
= 0;
1202 host
->part_buf_count
= 0;
1204 mci_writel(host
, RINTSTS
, SDMMC_INT_TXDR
| SDMMC_INT_RXDR
);
1206 spin_lock_irqsave(&host
->irq_lock
, irqflags
);
1207 temp
= mci_readl(host
, INTMASK
);
1208 temp
|= SDMMC_INT_TXDR
| SDMMC_INT_RXDR
;
1209 mci_writel(host
, INTMASK
, temp
);
1210 spin_unlock_irqrestore(&host
->irq_lock
, irqflags
);
1212 temp
= mci_readl(host
, CTRL
);
1213 temp
&= ~SDMMC_CTRL_DMA_ENABLE
;
1214 mci_writel(host
, CTRL
, temp
);
1217 * Use the initial fifoth_val for PIO mode. If wm_algined
1218 * is set, we set watermark same as data size.
1219 * If next issued data may be transfered by DMA mode,
1220 * prev_blksz should be invalidated.
1222 if (host
->wm_aligned
)
1223 dw_mci_adjust_fifoth(host
, data
);
1225 mci_writel(host
, FIFOTH
, host
->fifoth_val
);
1226 host
->prev_blksz
= 0;
1229 * Keep the current block size.
1230 * It will be used to decide whether to update
1231 * fifoth register next time.
1233 host
->prev_blksz
= data
->blksz
;
1237 static void dw_mci_setup_bus(struct dw_mci_slot
*slot
, bool force_clkinit
)
1239 struct dw_mci
*host
= slot
->host
;
1240 unsigned int clock
= slot
->clock
;
1243 u32 sdmmc_cmd_bits
= SDMMC_CMD_UPD_CLK
| SDMMC_CMD_PRV_DAT_WAIT
;
1245 /* We must continue to set bit 28 in CMD until the change is complete */
1246 if (host
->state
== STATE_WAITING_CMD11_DONE
)
1247 sdmmc_cmd_bits
|= SDMMC_CMD_VOLT_SWITCH
;
1250 mci_writel(host
, CLKENA
, 0);
1251 mci_send_cmd(slot
, sdmmc_cmd_bits
, 0);
1252 } else if (clock
!= host
->current_speed
|| force_clkinit
) {
1253 div
= host
->bus_hz
/ clock
;
1254 if (host
->bus_hz
% clock
&& host
->bus_hz
> clock
)
1256 * move the + 1 after the divide to prevent
1257 * over-clocking the card.
1261 div
= (host
->bus_hz
!= clock
) ? DIV_ROUND_UP(div
, 2) : 0;
1263 if ((clock
!= slot
->__clk_old
&&
1264 !test_bit(DW_MMC_CARD_NEEDS_POLL
, &slot
->flags
)) ||
1266 /* Silent the verbose log if calling from PM context */
1268 dev_info(&slot
->mmc
->class_dev
,
1269 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
1270 slot
->id
, host
->bus_hz
, clock
,
1271 div
? ((host
->bus_hz
/ div
) >> 1) :
1275 * If card is polling, display the message only
1276 * one time at boot time.
1278 if (slot
->mmc
->caps
& MMC_CAP_NEEDS_POLL
&&
1279 slot
->mmc
->f_min
== clock
)
1280 set_bit(DW_MMC_CARD_NEEDS_POLL
, &slot
->flags
);
1284 mci_writel(host
, CLKENA
, 0);
1285 mci_writel(host
, CLKSRC
, 0);
1288 mci_send_cmd(slot
, sdmmc_cmd_bits
, 0);
1290 /* set clock to desired speed */
1291 mci_writel(host
, CLKDIV
, div
);
1294 mci_send_cmd(slot
, sdmmc_cmd_bits
, 0);
1296 /* enable clock; only low power if no SDIO */
1297 clk_en_a
= SDMMC_CLKEN_ENABLE
<< slot
->id
;
1298 if (!test_bit(DW_MMC_CARD_NO_LOW_PWR
, &slot
->flags
))
1299 clk_en_a
|= SDMMC_CLKEN_LOW_PWR
<< slot
->id
;
1300 mci_writel(host
, CLKENA
, clk_en_a
);
1303 mci_send_cmd(slot
, sdmmc_cmd_bits
, 0);
1305 /* keep the last clock value that was requested from core */
1306 slot
->__clk_old
= clock
;
1309 host
->current_speed
= clock
;
1311 /* Set the current slot bus width */
1312 mci_writel(host
, CTYPE
, (slot
->ctype
<< slot
->id
));
1315 static void __dw_mci_start_request(struct dw_mci
*host
,
1316 struct dw_mci_slot
*slot
,
1317 struct mmc_command
*cmd
)
1319 struct mmc_request
*mrq
;
1320 struct mmc_data
*data
;
1327 host
->pending_events
= 0;
1328 host
->completed_events
= 0;
1329 host
->cmd_status
= 0;
1330 host
->data_status
= 0;
1331 host
->dir_status
= 0;
1335 mci_writel(host
, TMOUT
, 0xFFFFFFFF);
1336 mci_writel(host
, BYTCNT
, data
->blksz
*data
->blocks
);
1337 mci_writel(host
, BLKSIZ
, data
->blksz
);
1340 cmdflags
= dw_mci_prepare_command(slot
->mmc
, cmd
);
1342 /* this is the first command, send the initialization clock */
1343 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT
, &slot
->flags
))
1344 cmdflags
|= SDMMC_CMD_INIT
;
1347 dw_mci_submit_data(host
, data
);
1348 wmb(); /* drain writebuffer */
1351 dw_mci_start_command(host
, cmd
, cmdflags
);
1353 if (cmd
->opcode
== SD_SWITCH_VOLTAGE
) {
1354 unsigned long irqflags
;
1357 * Databook says to fail after 2ms w/ no response, but evidence
1358 * shows that sometimes the cmd11 interrupt takes over 130ms.
1359 * We'll set to 500ms, plus an extra jiffy just in case jiffies
1360 * is just about to roll over.
1362 * We do this whole thing under spinlock and only if the
1363 * command hasn't already completed (indicating the the irq
1364 * already ran so we don't want the timeout).
1366 spin_lock_irqsave(&host
->irq_lock
, irqflags
);
1367 if (!test_bit(EVENT_CMD_COMPLETE
, &host
->pending_events
))
1368 mod_timer(&host
->cmd11_timer
,
1369 jiffies
+ msecs_to_jiffies(500) + 1);
1370 spin_unlock_irqrestore(&host
->irq_lock
, irqflags
);
1373 host
->stop_cmdr
= dw_mci_prep_stop_abort(host
, cmd
);
1376 static void dw_mci_start_request(struct dw_mci
*host
,
1377 struct dw_mci_slot
*slot
)
1379 struct mmc_request
*mrq
= slot
->mrq
;
1380 struct mmc_command
*cmd
;
1382 cmd
= mrq
->sbc
? mrq
->sbc
: mrq
->cmd
;
1383 __dw_mci_start_request(host
, slot
, cmd
);
1386 /* must be called with host->lock held */
1387 static void dw_mci_queue_request(struct dw_mci
*host
, struct dw_mci_slot
*slot
,
1388 struct mmc_request
*mrq
)
1390 dev_vdbg(&slot
->mmc
->class_dev
, "queue request: state=%d\n",
1395 if (host
->state
== STATE_WAITING_CMD11_DONE
) {
1396 dev_warn(&slot
->mmc
->class_dev
,
1397 "Voltage change didn't complete\n");
1399 * this case isn't expected to happen, so we can
1400 * either crash here or just try to continue on
1401 * in the closest possible state
1403 host
->state
= STATE_IDLE
;
1406 if (host
->state
== STATE_IDLE
) {
1407 host
->state
= STATE_SENDING_CMD
;
1408 dw_mci_start_request(host
, slot
);
1410 list_add_tail(&slot
->queue_node
, &host
->queue
);
1414 static void dw_mci_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
1416 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
1417 struct dw_mci
*host
= slot
->host
;
1422 * The check for card presence and queueing of the request must be
1423 * atomic, otherwise the card could be removed in between and the
1424 * request wouldn't fail until another card was inserted.
1427 if (!dw_mci_get_cd(mmc
)) {
1428 mrq
->cmd
->error
= -ENOMEDIUM
;
1429 mmc_request_done(mmc
, mrq
);
1433 spin_lock_bh(&host
->lock
);
1435 dw_mci_queue_request(host
, slot
, mrq
);
1437 spin_unlock_bh(&host
->lock
);
1440 static void dw_mci_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
1442 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
1443 const struct dw_mci_drv_data
*drv_data
= slot
->host
->drv_data
;
1447 switch (ios
->bus_width
) {
1448 case MMC_BUS_WIDTH_4
:
1449 slot
->ctype
= SDMMC_CTYPE_4BIT
;
1451 case MMC_BUS_WIDTH_8
:
1452 slot
->ctype
= SDMMC_CTYPE_8BIT
;
1455 /* set default 1 bit mode */
1456 slot
->ctype
= SDMMC_CTYPE_1BIT
;
1459 regs
= mci_readl(slot
->host
, UHS_REG
);
1462 if (ios
->timing
== MMC_TIMING_MMC_DDR52
||
1463 ios
->timing
== MMC_TIMING_UHS_DDR50
||
1464 ios
->timing
== MMC_TIMING_MMC_HS400
)
1465 regs
|= ((0x1 << slot
->id
) << 16);
1467 regs
&= ~((0x1 << slot
->id
) << 16);
1469 mci_writel(slot
->host
, UHS_REG
, regs
);
1470 slot
->host
->timing
= ios
->timing
;
1473 * Use mirror of ios->clock to prevent race with mmc
1474 * core ios update when finding the minimum.
1476 slot
->clock
= ios
->clock
;
1478 if (drv_data
&& drv_data
->set_ios
)
1479 drv_data
->set_ios(slot
->host
, ios
);
1481 switch (ios
->power_mode
) {
1483 if (!IS_ERR(mmc
->supply
.vmmc
)) {
1484 ret
= mmc_regulator_set_ocr(mmc
, mmc
->supply
.vmmc
,
1487 dev_err(slot
->host
->dev
,
1488 "failed to enable vmmc regulator\n");
1489 /*return, if failed turn on vmmc*/
1493 set_bit(DW_MMC_CARD_NEED_INIT
, &slot
->flags
);
1494 regs
= mci_readl(slot
->host
, PWREN
);
1495 regs
|= (1 << slot
->id
);
1496 mci_writel(slot
->host
, PWREN
, regs
);
1499 if (!slot
->host
->vqmmc_enabled
) {
1500 if (!IS_ERR(mmc
->supply
.vqmmc
)) {
1501 ret
= regulator_enable(mmc
->supply
.vqmmc
);
1503 dev_err(slot
->host
->dev
,
1504 "failed to enable vqmmc\n");
1506 slot
->host
->vqmmc_enabled
= true;
1509 /* Keep track so we don't reset again */
1510 slot
->host
->vqmmc_enabled
= true;
1513 /* Reset our state machine after powering on */
1514 dw_mci_ctrl_reset(slot
->host
,
1515 SDMMC_CTRL_ALL_RESET_FLAGS
);
1518 /* Adjust clock / bus width after power is up */
1519 dw_mci_setup_bus(slot
, false);
1523 /* Turn clock off before power goes down */
1524 dw_mci_setup_bus(slot
, false);
1526 if (!IS_ERR(mmc
->supply
.vmmc
))
1527 mmc_regulator_set_ocr(mmc
, mmc
->supply
.vmmc
, 0);
1529 if (!IS_ERR(mmc
->supply
.vqmmc
) && slot
->host
->vqmmc_enabled
)
1530 regulator_disable(mmc
->supply
.vqmmc
);
1531 slot
->host
->vqmmc_enabled
= false;
1533 regs
= mci_readl(slot
->host
, PWREN
);
1534 regs
&= ~(1 << slot
->id
);
1535 mci_writel(slot
->host
, PWREN
, regs
);
1541 if (slot
->host
->state
== STATE_WAITING_CMD11_DONE
&& ios
->clock
!= 0)
1542 slot
->host
->state
= STATE_IDLE
;
1545 static int dw_mci_card_busy(struct mmc_host
*mmc
)
1547 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
1551 * Check the busy bit which is low when DAT[3:0]
1552 * (the data lines) are 0000
1554 status
= mci_readl(slot
->host
, STATUS
);
1556 return !!(status
& SDMMC_STATUS_BUSY
);
1559 static int dw_mci_switch_voltage(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
1561 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
1562 struct dw_mci
*host
= slot
->host
;
1563 const struct dw_mci_drv_data
*drv_data
= host
->drv_data
;
1565 u32 v18
= SDMMC_UHS_18V
<< slot
->id
;
1568 if (drv_data
&& drv_data
->switch_voltage
)
1569 return drv_data
->switch_voltage(mmc
, ios
);
1572 * Program the voltage. Note that some instances of dw_mmc may use
1573 * the UHS_REG for this. For other instances (like exynos) the UHS_REG
1574 * does no harm but you need to set the regulator directly. Try both.
1576 uhs
= mci_readl(host
, UHS_REG
);
1577 if (ios
->signal_voltage
== MMC_SIGNAL_VOLTAGE_330
)
1582 if (!IS_ERR(mmc
->supply
.vqmmc
)) {
1583 ret
= mmc_regulator_set_vqmmc(mmc
, ios
);
1586 dev_dbg(&mmc
->class_dev
,
1587 "Regulator set error %d - %s V\n",
1588 ret
, uhs
& v18
? "1.8" : "3.3");
1592 mci_writel(host
, UHS_REG
, uhs
);
1597 static int dw_mci_get_ro(struct mmc_host
*mmc
)
1600 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
1601 int gpio_ro
= mmc_gpio_get_ro(mmc
);
1603 /* Use platform get_ro function, else try on board write protect */
1605 read_only
= gpio_ro
;
1608 mci_readl(slot
->host
, WRTPRT
) & (1 << slot
->id
) ? 1 : 0;
1610 dev_dbg(&mmc
->class_dev
, "card is %s\n",
1611 read_only
? "read-only" : "read-write");
1616 static void dw_mci_hw_reset(struct mmc_host
*mmc
)
1618 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
1619 struct dw_mci
*host
= slot
->host
;
1622 if (host
->use_dma
== TRANS_MODE_IDMAC
)
1623 dw_mci_idmac_reset(host
);
1625 if (!dw_mci_ctrl_reset(host
, SDMMC_CTRL_DMA_RESET
|
1626 SDMMC_CTRL_FIFO_RESET
))
1630 * According to eMMC spec, card reset procedure:
1631 * tRstW >= 1us: RST_n pulse width
1632 * tRSCA >= 200us: RST_n to Command time
1633 * tRSTH >= 1us: RST_n high period
1635 reset
= mci_readl(host
, RST_N
);
1636 reset
&= ~(SDMMC_RST_HWACTIVE
<< slot
->id
);
1637 mci_writel(host
, RST_N
, reset
);
1639 reset
|= SDMMC_RST_HWACTIVE
<< slot
->id
;
1640 mci_writel(host
, RST_N
, reset
);
1641 usleep_range(200, 300);
1644 static void dw_mci_init_card(struct mmc_host
*mmc
, struct mmc_card
*card
)
1646 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
1647 struct dw_mci
*host
= slot
->host
;
1650 * Low power mode will stop the card clock when idle. According to the
1651 * description of the CLKENA register we should disable low power mode
1652 * for SDIO cards if we need SDIO interrupts to work.
1654 if (mmc
->caps
& MMC_CAP_SDIO_IRQ
) {
1655 const u32 clken_low_pwr
= SDMMC_CLKEN_LOW_PWR
<< slot
->id
;
1659 clk_en_a_old
= mci_readl(host
, CLKENA
);
1661 if (card
->type
== MMC_TYPE_SDIO
||
1662 card
->type
== MMC_TYPE_SD_COMBO
) {
1663 set_bit(DW_MMC_CARD_NO_LOW_PWR
, &slot
->flags
);
1664 clk_en_a
= clk_en_a_old
& ~clken_low_pwr
;
1666 clear_bit(DW_MMC_CARD_NO_LOW_PWR
, &slot
->flags
);
1667 clk_en_a
= clk_en_a_old
| clken_low_pwr
;
1670 if (clk_en_a
!= clk_en_a_old
) {
1671 mci_writel(host
, CLKENA
, clk_en_a
);
1672 mci_send_cmd(slot
, SDMMC_CMD_UPD_CLK
|
1673 SDMMC_CMD_PRV_DAT_WAIT
, 0);
1678 static void __dw_mci_enable_sdio_irq(struct dw_mci_slot
*slot
, int enb
)
1680 struct dw_mci
*host
= slot
->host
;
1681 unsigned long irqflags
;
1684 spin_lock_irqsave(&host
->irq_lock
, irqflags
);
1686 /* Enable/disable Slot Specific SDIO interrupt */
1687 int_mask
= mci_readl(host
, INTMASK
);
1689 int_mask
|= SDMMC_INT_SDIO(slot
->sdio_id
);
1691 int_mask
&= ~SDMMC_INT_SDIO(slot
->sdio_id
);
1692 mci_writel(host
, INTMASK
, int_mask
);
1694 spin_unlock_irqrestore(&host
->irq_lock
, irqflags
);
1697 static void dw_mci_enable_sdio_irq(struct mmc_host
*mmc
, int enb
)
1699 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
1700 struct dw_mci
*host
= slot
->host
;
1702 __dw_mci_enable_sdio_irq(slot
, enb
);
1704 /* Avoid runtime suspending the device when SDIO IRQ is enabled */
1706 pm_runtime_get_noresume(host
->dev
);
1708 pm_runtime_put_noidle(host
->dev
);
1711 static void dw_mci_ack_sdio_irq(struct mmc_host
*mmc
)
1713 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
1715 __dw_mci_enable_sdio_irq(slot
, 1);
1718 static int dw_mci_execute_tuning(struct mmc_host
*mmc
, u32 opcode
)
1720 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
1721 struct dw_mci
*host
= slot
->host
;
1722 const struct dw_mci_drv_data
*drv_data
= host
->drv_data
;
1725 if (drv_data
&& drv_data
->execute_tuning
)
1726 err
= drv_data
->execute_tuning(slot
, opcode
);
1730 static int dw_mci_prepare_hs400_tuning(struct mmc_host
*mmc
,
1731 struct mmc_ios
*ios
)
1733 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
1734 struct dw_mci
*host
= slot
->host
;
1735 const struct dw_mci_drv_data
*drv_data
= host
->drv_data
;
1737 if (drv_data
&& drv_data
->prepare_hs400_tuning
)
1738 return drv_data
->prepare_hs400_tuning(host
, ios
);
1743 static bool dw_mci_reset(struct dw_mci
*host
)
1745 u32 flags
= SDMMC_CTRL_RESET
| SDMMC_CTRL_FIFO_RESET
;
1750 * Resetting generates a block interrupt, hence setting
1751 * the scatter-gather pointer to NULL.
1754 sg_miter_stop(&host
->sg_miter
);
1759 flags
|= SDMMC_CTRL_DMA_RESET
;
1761 if (dw_mci_ctrl_reset(host
, flags
)) {
1763 * In all cases we clear the RAWINTS
1764 * register to clear any interrupts.
1766 mci_writel(host
, RINTSTS
, 0xFFFFFFFF);
1768 if (!host
->use_dma
) {
1773 /* Wait for dma_req to be cleared */
1774 if (readl_poll_timeout_atomic(host
->regs
+ SDMMC_STATUS
,
1776 !(status
& SDMMC_STATUS_DMA_REQ
),
1777 1, 500 * USEC_PER_MSEC
)) {
1779 "%s: Timeout waiting for dma_req to be cleared\n",
1784 /* when using DMA next we reset the fifo again */
1785 if (!dw_mci_ctrl_reset(host
, SDMMC_CTRL_FIFO_RESET
))
1788 /* if the controller reset bit did clear, then set clock regs */
1789 if (!(mci_readl(host
, CTRL
) & SDMMC_CTRL_RESET
)) {
1791 "%s: fifo/dma reset bits didn't clear but ciu was reset, doing clock update\n",
1797 if (host
->use_dma
== TRANS_MODE_IDMAC
)
1798 /* It is also recommended that we reset and reprogram idmac */
1799 dw_mci_idmac_reset(host
);
1804 /* After a CTRL reset we need to have CIU set clock registers */
1805 mci_send_cmd(host
->slot
, SDMMC_CMD_UPD_CLK
, 0);
1810 static const struct mmc_host_ops dw_mci_ops
= {
1811 .request
= dw_mci_request
,
1812 .pre_req
= dw_mci_pre_req
,
1813 .post_req
= dw_mci_post_req
,
1814 .set_ios
= dw_mci_set_ios
,
1815 .get_ro
= dw_mci_get_ro
,
1816 .get_cd
= dw_mci_get_cd
,
1817 .hw_reset
= dw_mci_hw_reset
,
1818 .enable_sdio_irq
= dw_mci_enable_sdio_irq
,
1819 .ack_sdio_irq
= dw_mci_ack_sdio_irq
,
1820 .execute_tuning
= dw_mci_execute_tuning
,
1821 .card_busy
= dw_mci_card_busy
,
1822 .start_signal_voltage_switch
= dw_mci_switch_voltage
,
1823 .init_card
= dw_mci_init_card
,
1824 .prepare_hs400_tuning
= dw_mci_prepare_hs400_tuning
,
1827 static void dw_mci_request_end(struct dw_mci
*host
, struct mmc_request
*mrq
)
1828 __releases(&host
->lock
)
1829 __acquires(&host
->lock
)
1831 struct dw_mci_slot
*slot
;
1832 struct mmc_host
*prev_mmc
= host
->slot
->mmc
;
1834 WARN_ON(host
->cmd
|| host
->data
);
1836 host
->slot
->mrq
= NULL
;
1838 if (!list_empty(&host
->queue
)) {
1839 slot
= list_entry(host
->queue
.next
,
1840 struct dw_mci_slot
, queue_node
);
1841 list_del(&slot
->queue_node
);
1842 dev_vdbg(host
->dev
, "list not empty: %s is next\n",
1843 mmc_hostname(slot
->mmc
));
1844 host
->state
= STATE_SENDING_CMD
;
1845 dw_mci_start_request(host
, slot
);
1847 dev_vdbg(host
->dev
, "list empty\n");
1849 if (host
->state
== STATE_SENDING_CMD11
)
1850 host
->state
= STATE_WAITING_CMD11_DONE
;
1852 host
->state
= STATE_IDLE
;
1855 spin_unlock(&host
->lock
);
1856 mmc_request_done(prev_mmc
, mrq
);
1857 spin_lock(&host
->lock
);
1860 static int dw_mci_command_complete(struct dw_mci
*host
, struct mmc_command
*cmd
)
1862 u32 status
= host
->cmd_status
;
1864 host
->cmd_status
= 0;
1866 /* Read the response from the card (up to 16 bytes) */
1867 if (cmd
->flags
& MMC_RSP_PRESENT
) {
1868 if (cmd
->flags
& MMC_RSP_136
) {
1869 cmd
->resp
[3] = mci_readl(host
, RESP0
);
1870 cmd
->resp
[2] = mci_readl(host
, RESP1
);
1871 cmd
->resp
[1] = mci_readl(host
, RESP2
);
1872 cmd
->resp
[0] = mci_readl(host
, RESP3
);
1874 cmd
->resp
[0] = mci_readl(host
, RESP0
);
1881 if (status
& SDMMC_INT_RTO
)
1882 cmd
->error
= -ETIMEDOUT
;
1883 else if ((cmd
->flags
& MMC_RSP_CRC
) && (status
& SDMMC_INT_RCRC
))
1884 cmd
->error
= -EILSEQ
;
1885 else if (status
& SDMMC_INT_RESP_ERR
)
1893 static int dw_mci_data_complete(struct dw_mci
*host
, struct mmc_data
*data
)
1895 u32 status
= host
->data_status
;
1897 if (status
& DW_MCI_DATA_ERROR_FLAGS
) {
1898 if (status
& SDMMC_INT_DRTO
) {
1899 data
->error
= -ETIMEDOUT
;
1900 } else if (status
& SDMMC_INT_DCRC
) {
1901 data
->error
= -EILSEQ
;
1902 } else if (status
& SDMMC_INT_EBE
) {
1903 if (host
->dir_status
==
1904 DW_MCI_SEND_STATUS
) {
1906 * No data CRC status was returned.
1907 * The number of bytes transferred
1908 * will be exaggerated in PIO mode.
1910 data
->bytes_xfered
= 0;
1911 data
->error
= -ETIMEDOUT
;
1912 } else if (host
->dir_status
==
1913 DW_MCI_RECV_STATUS
) {
1914 data
->error
= -EILSEQ
;
1917 /* SDMMC_INT_SBE is included */
1918 data
->error
= -EILSEQ
;
1921 dev_dbg(host
->dev
, "data error, status 0x%08x\n", status
);
1924 * After an error, there may be data lingering
1929 data
->bytes_xfered
= data
->blocks
* data
->blksz
;
1936 static void dw_mci_set_drto(struct dw_mci
*host
)
1938 unsigned int drto_clks
;
1939 unsigned int drto_div
;
1940 unsigned int drto_ms
;
1941 unsigned long irqflags
;
1943 drto_clks
= mci_readl(host
, TMOUT
) >> 8;
1944 drto_div
= (mci_readl(host
, CLKDIV
) & 0xff) * 2;
1947 drto_ms
= DIV_ROUND_UP(MSEC_PER_SEC
* drto_clks
* drto_div
,
1950 /* add a bit spare time */
1953 spin_lock_irqsave(&host
->irq_lock
, irqflags
);
1954 if (!test_bit(EVENT_DATA_COMPLETE
, &host
->pending_events
))
1955 mod_timer(&host
->dto_timer
,
1956 jiffies
+ msecs_to_jiffies(drto_ms
));
1957 spin_unlock_irqrestore(&host
->irq_lock
, irqflags
);
1960 static bool dw_mci_clear_pending_cmd_complete(struct dw_mci
*host
)
1962 if (!test_bit(EVENT_CMD_COMPLETE
, &host
->pending_events
))
1966 * Really be certain that the timer has stopped. This is a bit of
1967 * paranoia and could only really happen if we had really bad
1968 * interrupt latency and the interrupt routine and timeout were
1969 * running concurrently so that the del_timer() in the interrupt
1970 * handler couldn't run.
1972 WARN_ON(del_timer_sync(&host
->cto_timer
));
1973 clear_bit(EVENT_CMD_COMPLETE
, &host
->pending_events
);
1978 static bool dw_mci_clear_pending_data_complete(struct dw_mci
*host
)
1980 if (!test_bit(EVENT_DATA_COMPLETE
, &host
->pending_events
))
1983 /* Extra paranoia just like dw_mci_clear_pending_cmd_complete() */
1984 WARN_ON(del_timer_sync(&host
->dto_timer
));
1985 clear_bit(EVENT_DATA_COMPLETE
, &host
->pending_events
);
1990 static void dw_mci_tasklet_func(unsigned long priv
)
1992 struct dw_mci
*host
= (struct dw_mci
*)priv
;
1993 struct mmc_data
*data
;
1994 struct mmc_command
*cmd
;
1995 struct mmc_request
*mrq
;
1996 enum dw_mci_state state
;
1997 enum dw_mci_state prev_state
;
2000 spin_lock(&host
->lock
);
2002 state
= host
->state
;
2011 case STATE_WAITING_CMD11_DONE
:
2014 case STATE_SENDING_CMD11
:
2015 case STATE_SENDING_CMD
:
2016 if (!dw_mci_clear_pending_cmd_complete(host
))
2021 set_bit(EVENT_CMD_COMPLETE
, &host
->completed_events
);
2022 err
= dw_mci_command_complete(host
, cmd
);
2023 if (cmd
== mrq
->sbc
&& !err
) {
2024 prev_state
= state
= STATE_SENDING_CMD
;
2025 __dw_mci_start_request(host
, host
->slot
,
2030 if (cmd
->data
&& err
) {
2032 * During UHS tuning sequence, sending the stop
2033 * command after the response CRC error would
2034 * throw the system into a confused state
2035 * causing all future tuning phases to report
2038 * In such case controller will move into a data
2039 * transfer state after a response error or
2040 * response CRC error. Let's let that finish
2041 * before trying to send a stop, so we'll go to
2042 * STATE_SENDING_DATA.
2044 * Although letting the data transfer take place
2045 * will waste a bit of time (we already know
2046 * the command was bad), it can't cause any
2047 * errors since it's possible it would have
2048 * taken place anyway if this tasklet got
2049 * delayed. Allowing the transfer to take place
2050 * avoids races and keeps things simple.
2052 if ((err
!= -ETIMEDOUT
) &&
2053 (cmd
->opcode
== MMC_SEND_TUNING_BLOCK
)) {
2054 state
= STATE_SENDING_DATA
;
2058 dw_mci_stop_dma(host
);
2059 send_stop_abort(host
, data
);
2060 state
= STATE_SENDING_STOP
;
2064 if (!cmd
->data
|| err
) {
2065 dw_mci_request_end(host
, mrq
);
2069 prev_state
= state
= STATE_SENDING_DATA
;
2072 case STATE_SENDING_DATA
:
2074 * We could get a data error and never a transfer
2075 * complete so we'd better check for it here.
2077 * Note that we don't really care if we also got a
2078 * transfer complete; stopping the DMA and sending an
2081 if (test_and_clear_bit(EVENT_DATA_ERROR
,
2082 &host
->pending_events
)) {
2083 dw_mci_stop_dma(host
);
2084 if (!(host
->data_status
& (SDMMC_INT_DRTO
|
2086 send_stop_abort(host
, data
);
2087 state
= STATE_DATA_ERROR
;
2091 if (!test_and_clear_bit(EVENT_XFER_COMPLETE
,
2092 &host
->pending_events
)) {
2094 * If all data-related interrupts don't come
2095 * within the given time in reading data state.
2097 if (host
->dir_status
== DW_MCI_RECV_STATUS
)
2098 dw_mci_set_drto(host
);
2102 set_bit(EVENT_XFER_COMPLETE
, &host
->completed_events
);
2105 * Handle an EVENT_DATA_ERROR that might have shown up
2106 * before the transfer completed. This might not have
2107 * been caught by the check above because the interrupt
2108 * could have gone off between the previous check and
2109 * the check for transfer complete.
2111 * Technically this ought not be needed assuming we
2112 * get a DATA_COMPLETE eventually (we'll notice the
2113 * error and end the request), but it shouldn't hurt.
2115 * This has the advantage of sending the stop command.
2117 if (test_and_clear_bit(EVENT_DATA_ERROR
,
2118 &host
->pending_events
)) {
2119 dw_mci_stop_dma(host
);
2120 if (!(host
->data_status
& (SDMMC_INT_DRTO
|
2122 send_stop_abort(host
, data
);
2123 state
= STATE_DATA_ERROR
;
2126 prev_state
= state
= STATE_DATA_BUSY
;
2130 case STATE_DATA_BUSY
:
2131 if (!dw_mci_clear_pending_data_complete(host
)) {
2133 * If data error interrupt comes but data over
2134 * interrupt doesn't come within the given time.
2135 * in reading data state.
2137 if (host
->dir_status
== DW_MCI_RECV_STATUS
)
2138 dw_mci_set_drto(host
);
2143 set_bit(EVENT_DATA_COMPLETE
, &host
->completed_events
);
2144 err
= dw_mci_data_complete(host
, data
);
2147 if (!data
->stop
|| mrq
->sbc
) {
2148 if (mrq
->sbc
&& data
->stop
)
2149 data
->stop
->error
= 0;
2150 dw_mci_request_end(host
, mrq
);
2154 /* stop command for open-ended transfer*/
2156 send_stop_abort(host
, data
);
2159 * If we don't have a command complete now we'll
2160 * never get one since we just reset everything;
2161 * better end the request.
2163 * If we do have a command complete we'll fall
2164 * through to the SENDING_STOP command and
2165 * everything will be peachy keen.
2167 if (!test_bit(EVENT_CMD_COMPLETE
,
2168 &host
->pending_events
)) {
2170 dw_mci_request_end(host
, mrq
);
2176 * If err has non-zero,
2177 * stop-abort command has been already issued.
2179 prev_state
= state
= STATE_SENDING_STOP
;
2183 case STATE_SENDING_STOP
:
2184 if (!dw_mci_clear_pending_cmd_complete(host
))
2187 /* CMD error in data command */
2188 if (mrq
->cmd
->error
&& mrq
->data
)
2194 if (!mrq
->sbc
&& mrq
->stop
)
2195 dw_mci_command_complete(host
, mrq
->stop
);
2197 host
->cmd_status
= 0;
2199 dw_mci_request_end(host
, mrq
);
2202 case STATE_DATA_ERROR
:
2203 if (!test_and_clear_bit(EVENT_XFER_COMPLETE
,
2204 &host
->pending_events
))
2207 state
= STATE_DATA_BUSY
;
2210 } while (state
!= prev_state
);
2212 host
->state
= state
;
2214 spin_unlock(&host
->lock
);
2218 /* push final bytes to part_buf, only use during push */
2219 static void dw_mci_set_part_bytes(struct dw_mci
*host
, void *buf
, int cnt
)
2221 memcpy((void *)&host
->part_buf
, buf
, cnt
);
2222 host
->part_buf_count
= cnt
;
2225 /* append bytes to part_buf, only use during push */
2226 static int dw_mci_push_part_bytes(struct dw_mci
*host
, void *buf
, int cnt
)
2228 cnt
= min(cnt
, (1 << host
->data_shift
) - host
->part_buf_count
);
2229 memcpy((void *)&host
->part_buf
+ host
->part_buf_count
, buf
, cnt
);
2230 host
->part_buf_count
+= cnt
;
2234 /* pull first bytes from part_buf, only use during pull */
2235 static int dw_mci_pull_part_bytes(struct dw_mci
*host
, void *buf
, int cnt
)
2237 cnt
= min_t(int, cnt
, host
->part_buf_count
);
2239 memcpy(buf
, (void *)&host
->part_buf
+ host
->part_buf_start
,
2241 host
->part_buf_count
-= cnt
;
2242 host
->part_buf_start
+= cnt
;
2247 /* pull final bytes from the part_buf, assuming it's just been filled */
2248 static void dw_mci_pull_final_bytes(struct dw_mci
*host
, void *buf
, int cnt
)
2250 memcpy(buf
, &host
->part_buf
, cnt
);
2251 host
->part_buf_start
= cnt
;
2252 host
->part_buf_count
= (1 << host
->data_shift
) - cnt
;
2255 static void dw_mci_push_data16(struct dw_mci
*host
, void *buf
, int cnt
)
2257 struct mmc_data
*data
= host
->data
;
2260 /* try and push anything in the part_buf */
2261 if (unlikely(host
->part_buf_count
)) {
2262 int len
= dw_mci_push_part_bytes(host
, buf
, cnt
);
2266 if (host
->part_buf_count
== 2) {
2267 mci_fifo_writew(host
->fifo_reg
, host
->part_buf16
);
2268 host
->part_buf_count
= 0;
2271 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2272 if (unlikely((unsigned long)buf
& 0x1)) {
2274 u16 aligned_buf
[64];
2275 int len
= min(cnt
& -2, (int)sizeof(aligned_buf
));
2276 int items
= len
>> 1;
2278 /* memcpy from input buffer into aligned buffer */
2279 memcpy(aligned_buf
, buf
, len
);
2282 /* push data from aligned buffer into fifo */
2283 for (i
= 0; i
< items
; ++i
)
2284 mci_fifo_writew(host
->fifo_reg
, aligned_buf
[i
]);
2291 for (; cnt
>= 2; cnt
-= 2)
2292 mci_fifo_writew(host
->fifo_reg
, *pdata
++);
2295 /* put anything remaining in the part_buf */
2297 dw_mci_set_part_bytes(host
, buf
, cnt
);
2298 /* Push data if we have reached the expected data length */
2299 if ((data
->bytes_xfered
+ init_cnt
) ==
2300 (data
->blksz
* data
->blocks
))
2301 mci_fifo_writew(host
->fifo_reg
, host
->part_buf16
);
2305 static void dw_mci_pull_data16(struct dw_mci
*host
, void *buf
, int cnt
)
2307 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2308 if (unlikely((unsigned long)buf
& 0x1)) {
2310 /* pull data from fifo into aligned buffer */
2311 u16 aligned_buf
[64];
2312 int len
= min(cnt
& -2, (int)sizeof(aligned_buf
));
2313 int items
= len
>> 1;
2316 for (i
= 0; i
< items
; ++i
)
2317 aligned_buf
[i
] = mci_fifo_readw(host
->fifo_reg
);
2318 /* memcpy from aligned buffer into output buffer */
2319 memcpy(buf
, aligned_buf
, len
);
2328 for (; cnt
>= 2; cnt
-= 2)
2329 *pdata
++ = mci_fifo_readw(host
->fifo_reg
);
2333 host
->part_buf16
= mci_fifo_readw(host
->fifo_reg
);
2334 dw_mci_pull_final_bytes(host
, buf
, cnt
);
2338 static void dw_mci_push_data32(struct dw_mci
*host
, void *buf
, int cnt
)
2340 struct mmc_data
*data
= host
->data
;
2343 /* try and push anything in the part_buf */
2344 if (unlikely(host
->part_buf_count
)) {
2345 int len
= dw_mci_push_part_bytes(host
, buf
, cnt
);
2349 if (host
->part_buf_count
== 4) {
2350 mci_fifo_writel(host
->fifo_reg
, host
->part_buf32
);
2351 host
->part_buf_count
= 0;
2354 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2355 if (unlikely((unsigned long)buf
& 0x3)) {
2357 u32 aligned_buf
[32];
2358 int len
= min(cnt
& -4, (int)sizeof(aligned_buf
));
2359 int items
= len
>> 2;
2361 /* memcpy from input buffer into aligned buffer */
2362 memcpy(aligned_buf
, buf
, len
);
2365 /* push data from aligned buffer into fifo */
2366 for (i
= 0; i
< items
; ++i
)
2367 mci_fifo_writel(host
->fifo_reg
, aligned_buf
[i
]);
2374 for (; cnt
>= 4; cnt
-= 4)
2375 mci_fifo_writel(host
->fifo_reg
, *pdata
++);
2378 /* put anything remaining in the part_buf */
2380 dw_mci_set_part_bytes(host
, buf
, cnt
);
2381 /* Push data if we have reached the expected data length */
2382 if ((data
->bytes_xfered
+ init_cnt
) ==
2383 (data
->blksz
* data
->blocks
))
2384 mci_fifo_writel(host
->fifo_reg
, host
->part_buf32
);
2388 static void dw_mci_pull_data32(struct dw_mci
*host
, void *buf
, int cnt
)
2390 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2391 if (unlikely((unsigned long)buf
& 0x3)) {
2393 /* pull data from fifo into aligned buffer */
2394 u32 aligned_buf
[32];
2395 int len
= min(cnt
& -4, (int)sizeof(aligned_buf
));
2396 int items
= len
>> 2;
2399 for (i
= 0; i
< items
; ++i
)
2400 aligned_buf
[i
] = mci_fifo_readl(host
->fifo_reg
);
2401 /* memcpy from aligned buffer into output buffer */
2402 memcpy(buf
, aligned_buf
, len
);
2411 for (; cnt
>= 4; cnt
-= 4)
2412 *pdata
++ = mci_fifo_readl(host
->fifo_reg
);
2416 host
->part_buf32
= mci_fifo_readl(host
->fifo_reg
);
2417 dw_mci_pull_final_bytes(host
, buf
, cnt
);
2421 static void dw_mci_push_data64(struct dw_mci
*host
, void *buf
, int cnt
)
2423 struct mmc_data
*data
= host
->data
;
2426 /* try and push anything in the part_buf */
2427 if (unlikely(host
->part_buf_count
)) {
2428 int len
= dw_mci_push_part_bytes(host
, buf
, cnt
);
2433 if (host
->part_buf_count
== 8) {
2434 mci_fifo_writeq(host
->fifo_reg
, host
->part_buf
);
2435 host
->part_buf_count
= 0;
2438 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2439 if (unlikely((unsigned long)buf
& 0x7)) {
2441 u64 aligned_buf
[16];
2442 int len
= min(cnt
& -8, (int)sizeof(aligned_buf
));
2443 int items
= len
>> 3;
2445 /* memcpy from input buffer into aligned buffer */
2446 memcpy(aligned_buf
, buf
, len
);
2449 /* push data from aligned buffer into fifo */
2450 for (i
= 0; i
< items
; ++i
)
2451 mci_fifo_writeq(host
->fifo_reg
, aligned_buf
[i
]);
2458 for (; cnt
>= 8; cnt
-= 8)
2459 mci_fifo_writeq(host
->fifo_reg
, *pdata
++);
2462 /* put anything remaining in the part_buf */
2464 dw_mci_set_part_bytes(host
, buf
, cnt
);
2465 /* Push data if we have reached the expected data length */
2466 if ((data
->bytes_xfered
+ init_cnt
) ==
2467 (data
->blksz
* data
->blocks
))
2468 mci_fifo_writeq(host
->fifo_reg
, host
->part_buf
);
2472 static void dw_mci_pull_data64(struct dw_mci
*host
, void *buf
, int cnt
)
2474 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2475 if (unlikely((unsigned long)buf
& 0x7)) {
2477 /* pull data from fifo into aligned buffer */
2478 u64 aligned_buf
[16];
2479 int len
= min(cnt
& -8, (int)sizeof(aligned_buf
));
2480 int items
= len
>> 3;
2483 for (i
= 0; i
< items
; ++i
)
2484 aligned_buf
[i
] = mci_fifo_readq(host
->fifo_reg
);
2486 /* memcpy from aligned buffer into output buffer */
2487 memcpy(buf
, aligned_buf
, len
);
2496 for (; cnt
>= 8; cnt
-= 8)
2497 *pdata
++ = mci_fifo_readq(host
->fifo_reg
);
2501 host
->part_buf
= mci_fifo_readq(host
->fifo_reg
);
2502 dw_mci_pull_final_bytes(host
, buf
, cnt
);
2506 static void dw_mci_pull_data(struct dw_mci
*host
, void *buf
, int cnt
)
2510 /* get remaining partial bytes */
2511 len
= dw_mci_pull_part_bytes(host
, buf
, cnt
);
2512 if (unlikely(len
== cnt
))
2517 /* get the rest of the data */
2518 host
->pull_data(host
, buf
, cnt
);
2521 static void dw_mci_read_data_pio(struct dw_mci
*host
, bool dto
)
2523 struct sg_mapping_iter
*sg_miter
= &host
->sg_miter
;
2525 unsigned int offset
;
2526 struct mmc_data
*data
= host
->data
;
2527 int shift
= host
->data_shift
;
2530 unsigned int remain
, fcnt
;
2533 if (!sg_miter_next(sg_miter
))
2536 host
->sg
= sg_miter
->piter
.sg
;
2537 buf
= sg_miter
->addr
;
2538 remain
= sg_miter
->length
;
2542 fcnt
= (SDMMC_GET_FCNT(mci_readl(host
, STATUS
))
2543 << shift
) + host
->part_buf_count
;
2544 len
= min(remain
, fcnt
);
2547 dw_mci_pull_data(host
, (void *)(buf
+ offset
), len
);
2548 data
->bytes_xfered
+= len
;
2553 sg_miter
->consumed
= offset
;
2554 status
= mci_readl(host
, MINTSTS
);
2555 mci_writel(host
, RINTSTS
, SDMMC_INT_RXDR
);
2556 /* if the RXDR is ready read again */
2557 } while ((status
& SDMMC_INT_RXDR
) ||
2558 (dto
&& SDMMC_GET_FCNT(mci_readl(host
, STATUS
))));
2561 if (!sg_miter_next(sg_miter
))
2563 sg_miter
->consumed
= 0;
2565 sg_miter_stop(sg_miter
);
2569 sg_miter_stop(sg_miter
);
2571 smp_wmb(); /* drain writebuffer */
2572 set_bit(EVENT_XFER_COMPLETE
, &host
->pending_events
);
2575 static void dw_mci_write_data_pio(struct dw_mci
*host
)
2577 struct sg_mapping_iter
*sg_miter
= &host
->sg_miter
;
2579 unsigned int offset
;
2580 struct mmc_data
*data
= host
->data
;
2581 int shift
= host
->data_shift
;
2584 unsigned int fifo_depth
= host
->fifo_depth
;
2585 unsigned int remain
, fcnt
;
2588 if (!sg_miter_next(sg_miter
))
2591 host
->sg
= sg_miter
->piter
.sg
;
2592 buf
= sg_miter
->addr
;
2593 remain
= sg_miter
->length
;
2597 fcnt
= ((fifo_depth
-
2598 SDMMC_GET_FCNT(mci_readl(host
, STATUS
)))
2599 << shift
) - host
->part_buf_count
;
2600 len
= min(remain
, fcnt
);
2603 host
->push_data(host
, (void *)(buf
+ offset
), len
);
2604 data
->bytes_xfered
+= len
;
2609 sg_miter
->consumed
= offset
;
2610 status
= mci_readl(host
, MINTSTS
);
2611 mci_writel(host
, RINTSTS
, SDMMC_INT_TXDR
);
2612 } while (status
& SDMMC_INT_TXDR
); /* if TXDR write again */
2615 if (!sg_miter_next(sg_miter
))
2617 sg_miter
->consumed
= 0;
2619 sg_miter_stop(sg_miter
);
2623 sg_miter_stop(sg_miter
);
2625 smp_wmb(); /* drain writebuffer */
2626 set_bit(EVENT_XFER_COMPLETE
, &host
->pending_events
);
2629 static void dw_mci_cmd_interrupt(struct dw_mci
*host
, u32 status
)
2631 del_timer(&host
->cto_timer
);
2633 if (!host
->cmd_status
)
2634 host
->cmd_status
= status
;
2636 smp_wmb(); /* drain writebuffer */
2638 set_bit(EVENT_CMD_COMPLETE
, &host
->pending_events
);
2639 tasklet_schedule(&host
->tasklet
);
2642 static void dw_mci_handle_cd(struct dw_mci
*host
)
2644 struct dw_mci_slot
*slot
= host
->slot
;
2646 if (slot
->mmc
->ops
->card_event
)
2647 slot
->mmc
->ops
->card_event(slot
->mmc
);
2648 mmc_detect_change(slot
->mmc
,
2649 msecs_to_jiffies(host
->pdata
->detect_delay_ms
));
2652 static irqreturn_t
dw_mci_interrupt(int irq
, void *dev_id
)
2654 struct dw_mci
*host
= dev_id
;
2656 struct dw_mci_slot
*slot
= host
->slot
;
2657 unsigned long irqflags
;
2659 pending
= mci_readl(host
, MINTSTS
); /* read-only mask reg */
2662 /* Check volt switch first, since it can look like an error */
2663 if ((host
->state
== STATE_SENDING_CMD11
) &&
2664 (pending
& SDMMC_INT_VOLT_SWITCH
)) {
2665 mci_writel(host
, RINTSTS
, SDMMC_INT_VOLT_SWITCH
);
2666 pending
&= ~SDMMC_INT_VOLT_SWITCH
;
2669 * Hold the lock; we know cmd11_timer can't be kicked
2670 * off after the lock is released, so safe to delete.
2672 spin_lock_irqsave(&host
->irq_lock
, irqflags
);
2673 dw_mci_cmd_interrupt(host
, pending
);
2674 spin_unlock_irqrestore(&host
->irq_lock
, irqflags
);
2676 del_timer(&host
->cmd11_timer
);
2679 if (pending
& DW_MCI_CMD_ERROR_FLAGS
) {
2680 spin_lock_irqsave(&host
->irq_lock
, irqflags
);
2682 del_timer(&host
->cto_timer
);
2683 mci_writel(host
, RINTSTS
, DW_MCI_CMD_ERROR_FLAGS
);
2684 host
->cmd_status
= pending
;
2685 smp_wmb(); /* drain writebuffer */
2686 set_bit(EVENT_CMD_COMPLETE
, &host
->pending_events
);
2688 spin_unlock_irqrestore(&host
->irq_lock
, irqflags
);
2691 if (pending
& DW_MCI_DATA_ERROR_FLAGS
) {
2692 /* if there is an error report DATA_ERROR */
2693 mci_writel(host
, RINTSTS
, DW_MCI_DATA_ERROR_FLAGS
);
2694 host
->data_status
= pending
;
2695 smp_wmb(); /* drain writebuffer */
2696 set_bit(EVENT_DATA_ERROR
, &host
->pending_events
);
2697 tasklet_schedule(&host
->tasklet
);
2700 if (pending
& SDMMC_INT_DATA_OVER
) {
2701 spin_lock_irqsave(&host
->irq_lock
, irqflags
);
2703 del_timer(&host
->dto_timer
);
2705 mci_writel(host
, RINTSTS
, SDMMC_INT_DATA_OVER
);
2706 if (!host
->data_status
)
2707 host
->data_status
= pending
;
2708 smp_wmb(); /* drain writebuffer */
2709 if (host
->dir_status
== DW_MCI_RECV_STATUS
) {
2710 if (host
->sg
!= NULL
)
2711 dw_mci_read_data_pio(host
, true);
2713 set_bit(EVENT_DATA_COMPLETE
, &host
->pending_events
);
2714 tasklet_schedule(&host
->tasklet
);
2716 spin_unlock_irqrestore(&host
->irq_lock
, irqflags
);
2719 if (pending
& SDMMC_INT_RXDR
) {
2720 mci_writel(host
, RINTSTS
, SDMMC_INT_RXDR
);
2721 if (host
->dir_status
== DW_MCI_RECV_STATUS
&& host
->sg
)
2722 dw_mci_read_data_pio(host
, false);
2725 if (pending
& SDMMC_INT_TXDR
) {
2726 mci_writel(host
, RINTSTS
, SDMMC_INT_TXDR
);
2727 if (host
->dir_status
== DW_MCI_SEND_STATUS
&& host
->sg
)
2728 dw_mci_write_data_pio(host
);
2731 if (pending
& SDMMC_INT_CMD_DONE
) {
2732 spin_lock_irqsave(&host
->irq_lock
, irqflags
);
2734 mci_writel(host
, RINTSTS
, SDMMC_INT_CMD_DONE
);
2735 dw_mci_cmd_interrupt(host
, pending
);
2737 spin_unlock_irqrestore(&host
->irq_lock
, irqflags
);
2740 if (pending
& SDMMC_INT_CD
) {
2741 mci_writel(host
, RINTSTS
, SDMMC_INT_CD
);
2742 dw_mci_handle_cd(host
);
2745 if (pending
& SDMMC_INT_SDIO(slot
->sdio_id
)) {
2746 mci_writel(host
, RINTSTS
,
2747 SDMMC_INT_SDIO(slot
->sdio_id
));
2748 __dw_mci_enable_sdio_irq(slot
, 0);
2749 sdio_signal_irq(slot
->mmc
);
2754 if (host
->use_dma
!= TRANS_MODE_IDMAC
)
2757 /* Handle IDMA interrupts */
2758 if (host
->dma_64bit_address
== 1) {
2759 pending
= mci_readl(host
, IDSTS64
);
2760 if (pending
& (SDMMC_IDMAC_INT_TI
| SDMMC_IDMAC_INT_RI
)) {
2761 mci_writel(host
, IDSTS64
, SDMMC_IDMAC_INT_TI
|
2762 SDMMC_IDMAC_INT_RI
);
2763 mci_writel(host
, IDSTS64
, SDMMC_IDMAC_INT_NI
);
2764 if (!test_bit(EVENT_DATA_ERROR
, &host
->pending_events
))
2765 host
->dma_ops
->complete((void *)host
);
2768 pending
= mci_readl(host
, IDSTS
);
2769 if (pending
& (SDMMC_IDMAC_INT_TI
| SDMMC_IDMAC_INT_RI
)) {
2770 mci_writel(host
, IDSTS
, SDMMC_IDMAC_INT_TI
|
2771 SDMMC_IDMAC_INT_RI
);
2772 mci_writel(host
, IDSTS
, SDMMC_IDMAC_INT_NI
);
2773 if (!test_bit(EVENT_DATA_ERROR
, &host
->pending_events
))
2774 host
->dma_ops
->complete((void *)host
);
2781 static int dw_mci_init_slot(struct dw_mci
*host
)
2783 struct mmc_host
*mmc
;
2784 struct dw_mci_slot
*slot
;
2785 const struct dw_mci_drv_data
*drv_data
= host
->drv_data
;
2789 mmc
= mmc_alloc_host(sizeof(struct dw_mci_slot
), host
->dev
);
2793 slot
= mmc_priv(mmc
);
2795 slot
->sdio_id
= host
->sdio_id0
+ slot
->id
;
2800 mmc
->ops
= &dw_mci_ops
;
2801 if (device_property_read_u32_array(host
->dev
, "clock-freq-min-max",
2803 mmc
->f_min
= DW_MCI_FREQ_MIN
;
2804 mmc
->f_max
= DW_MCI_FREQ_MAX
;
2807 "'clock-freq-min-max' property was deprecated.\n");
2808 mmc
->f_min
= freq
[0];
2809 mmc
->f_max
= freq
[1];
2812 /*if there are external regulators, get them*/
2813 ret
= mmc_regulator_get_supply(mmc
);
2815 goto err_host_allocated
;
2817 if (!mmc
->ocr_avail
)
2818 mmc
->ocr_avail
= MMC_VDD_32_33
| MMC_VDD_33_34
;
2820 if (host
->pdata
->caps
)
2821 mmc
->caps
= host
->pdata
->caps
;
2824 * Support MMC_CAP_ERASE by default.
2825 * It needs to use trim/discard/erase commands.
2827 mmc
->caps
|= MMC_CAP_ERASE
;
2829 if (host
->pdata
->pm_caps
)
2830 mmc
->pm_caps
= host
->pdata
->pm_caps
;
2832 if (host
->dev
->of_node
) {
2833 ctrl_id
= of_alias_get_id(host
->dev
->of_node
, "mshc");
2837 ctrl_id
= to_platform_device(host
->dev
)->id
;
2839 if (drv_data
&& drv_data
->caps
)
2840 mmc
->caps
|= drv_data
->caps
[ctrl_id
];
2842 if (host
->pdata
->caps2
)
2843 mmc
->caps2
= host
->pdata
->caps2
;
2845 ret
= mmc_of_parse(mmc
);
2847 goto err_host_allocated
;
2849 /* Process SDIO IRQs through the sdio_irq_work. */
2850 if (mmc
->caps
& MMC_CAP_SDIO_IRQ
)
2851 mmc
->caps2
|= MMC_CAP2_SDIO_IRQ_NOTHREAD
;
2853 /* Useful defaults if platform data is unset. */
2854 if (host
->use_dma
== TRANS_MODE_IDMAC
) {
2855 mmc
->max_segs
= host
->ring_size
;
2856 mmc
->max_blk_size
= 65535;
2857 mmc
->max_seg_size
= 0x1000;
2858 mmc
->max_req_size
= mmc
->max_seg_size
* host
->ring_size
;
2859 mmc
->max_blk_count
= mmc
->max_req_size
/ 512;
2860 } else if (host
->use_dma
== TRANS_MODE_EDMAC
) {
2862 mmc
->max_blk_size
= 65535;
2863 mmc
->max_blk_count
= 65535;
2865 mmc
->max_blk_size
* mmc
->max_blk_count
;
2866 mmc
->max_seg_size
= mmc
->max_req_size
;
2868 /* TRANS_MODE_PIO */
2870 mmc
->max_blk_size
= 65535; /* BLKSIZ is 16 bits */
2871 mmc
->max_blk_count
= 512;
2872 mmc
->max_req_size
= mmc
->max_blk_size
*
2874 mmc
->max_seg_size
= mmc
->max_req_size
;
2879 ret
= mmc_add_host(mmc
);
2881 goto err_host_allocated
;
2883 #if defined(CONFIG_DEBUG_FS)
2884 dw_mci_init_debugfs(slot
);
2894 static void dw_mci_cleanup_slot(struct dw_mci_slot
*slot
)
2896 /* Debugfs stuff is cleaned up by mmc core */
2897 mmc_remove_host(slot
->mmc
);
2898 slot
->host
->slot
= NULL
;
2899 mmc_free_host(slot
->mmc
);
2902 static void dw_mci_init_dma(struct dw_mci
*host
)
2905 struct device
*dev
= host
->dev
;
2908 * Check tansfer mode from HCON[17:16]
2909 * Clear the ambiguous description of dw_mmc databook:
2910 * 2b'00: No DMA Interface -> Actually means using Internal DMA block
2911 * 2b'01: DesignWare DMA Interface -> Synopsys DW-DMA block
2912 * 2b'10: Generic DMA Interface -> non-Synopsys generic DMA block
2913 * 2b'11: Non DW DMA Interface -> pio only
2914 * Compared to DesignWare DMA Interface, Generic DMA Interface has a
2915 * simpler request/acknowledge handshake mechanism and both of them
2916 * are regarded as external dma master for dw_mmc.
2918 host
->use_dma
= SDMMC_GET_TRANS_MODE(mci_readl(host
, HCON
));
2919 if (host
->use_dma
== DMA_INTERFACE_IDMA
) {
2920 host
->use_dma
= TRANS_MODE_IDMAC
;
2921 } else if (host
->use_dma
== DMA_INTERFACE_DWDMA
||
2922 host
->use_dma
== DMA_INTERFACE_GDMA
) {
2923 host
->use_dma
= TRANS_MODE_EDMAC
;
2928 /* Determine which DMA interface to use */
2929 if (host
->use_dma
== TRANS_MODE_IDMAC
) {
2931 * Check ADDR_CONFIG bit in HCON to find
2932 * IDMAC address bus width
2934 addr_config
= SDMMC_GET_ADDR_CONFIG(mci_readl(host
, HCON
));
2936 if (addr_config
== 1) {
2937 /* host supports IDMAC in 64-bit address mode */
2938 host
->dma_64bit_address
= 1;
2940 "IDMAC supports 64-bit address mode.\n");
2941 if (!dma_set_mask(host
->dev
, DMA_BIT_MASK(64)))
2942 dma_set_coherent_mask(host
->dev
,
2945 /* host supports IDMAC in 32-bit address mode */
2946 host
->dma_64bit_address
= 0;
2948 "IDMAC supports 32-bit address mode.\n");
2951 /* Alloc memory for sg translation */
2952 host
->sg_cpu
= dmam_alloc_coherent(host
->dev
,
2954 &host
->sg_dma
, GFP_KERNEL
);
2955 if (!host
->sg_cpu
) {
2957 "%s: could not alloc DMA memory\n",
2962 host
->dma_ops
= &dw_mci_idmac_ops
;
2963 dev_info(host
->dev
, "Using internal DMA controller.\n");
2965 /* TRANS_MODE_EDMAC: check dma bindings again */
2966 if ((device_property_read_string_array(dev
, "dma-names",
2968 !device_property_present(dev
, "dmas")) {
2971 host
->dma_ops
= &dw_mci_edmac_ops
;
2972 dev_info(host
->dev
, "Using external DMA controller.\n");
2975 if (host
->dma_ops
->init
&& host
->dma_ops
->start
&&
2976 host
->dma_ops
->stop
&& host
->dma_ops
->cleanup
) {
2977 if (host
->dma_ops
->init(host
)) {
2978 dev_err(host
->dev
, "%s: Unable to initialize DMA Controller.\n",
2983 dev_err(host
->dev
, "DMA initialization not found.\n");
2990 dev_info(host
->dev
, "Using PIO mode.\n");
2991 host
->use_dma
= TRANS_MODE_PIO
;
2994 static void dw_mci_cmd11_timer(struct timer_list
*t
)
2996 struct dw_mci
*host
= from_timer(host
, t
, cmd11_timer
);
2998 if (host
->state
!= STATE_SENDING_CMD11
) {
2999 dev_warn(host
->dev
, "Unexpected CMD11 timeout\n");
3003 host
->cmd_status
= SDMMC_INT_RTO
;
3004 set_bit(EVENT_CMD_COMPLETE
, &host
->pending_events
);
3005 tasklet_schedule(&host
->tasklet
);
3008 static void dw_mci_cto_timer(struct timer_list
*t
)
3010 struct dw_mci
*host
= from_timer(host
, t
, cto_timer
);
3011 unsigned long irqflags
;
3014 spin_lock_irqsave(&host
->irq_lock
, irqflags
);
3017 * If somehow we have very bad interrupt latency it's remotely possible
3018 * that the timer could fire while the interrupt is still pending or
3019 * while the interrupt is midway through running. Let's be paranoid
3020 * and detect those two cases. Note that this is paranoia is somewhat
3021 * justified because in this function we don't actually cancel the
3022 * pending command in the controller--we just assume it will never come.
3024 pending
= mci_readl(host
, MINTSTS
); /* read-only mask reg */
3025 if (pending
& (DW_MCI_CMD_ERROR_FLAGS
| SDMMC_INT_CMD_DONE
)) {
3026 /* The interrupt should fire; no need to act but we can warn */
3027 dev_warn(host
->dev
, "Unexpected interrupt latency\n");
3030 if (test_bit(EVENT_CMD_COMPLETE
, &host
->pending_events
)) {
3031 /* Presumably interrupt handler couldn't delete the timer */
3032 dev_warn(host
->dev
, "CTO timeout when already completed\n");
3037 * Continued paranoia to make sure we're in the state we expect.
3038 * This paranoia isn't really justified but it seems good to be safe.
3040 switch (host
->state
) {
3041 case STATE_SENDING_CMD11
:
3042 case STATE_SENDING_CMD
:
3043 case STATE_SENDING_STOP
:
3045 * If CMD_DONE interrupt does NOT come in sending command
3046 * state, we should notify the driver to terminate current
3047 * transfer and report a command timeout to the core.
3049 host
->cmd_status
= SDMMC_INT_RTO
;
3050 set_bit(EVENT_CMD_COMPLETE
, &host
->pending_events
);
3051 tasklet_schedule(&host
->tasklet
);
3054 dev_warn(host
->dev
, "Unexpected command timeout, state %d\n",
3060 spin_unlock_irqrestore(&host
->irq_lock
, irqflags
);
3063 static void dw_mci_dto_timer(struct timer_list
*t
)
3065 struct dw_mci
*host
= from_timer(host
, t
, dto_timer
);
3066 unsigned long irqflags
;
3069 spin_lock_irqsave(&host
->irq_lock
, irqflags
);
3072 * The DTO timer is much longer than the CTO timer, so it's even less
3073 * likely that we'll these cases, but it pays to be paranoid.
3075 pending
= mci_readl(host
, MINTSTS
); /* read-only mask reg */
3076 if (pending
& SDMMC_INT_DATA_OVER
) {
3077 /* The interrupt should fire; no need to act but we can warn */
3078 dev_warn(host
->dev
, "Unexpected data interrupt latency\n");
3081 if (test_bit(EVENT_DATA_COMPLETE
, &host
->pending_events
)) {
3082 /* Presumably interrupt handler couldn't delete the timer */
3083 dev_warn(host
->dev
, "DTO timeout when already completed\n");
3088 * Continued paranoia to make sure we're in the state we expect.
3089 * This paranoia isn't really justified but it seems good to be safe.
3091 switch (host
->state
) {
3092 case STATE_SENDING_DATA
:
3093 case STATE_DATA_BUSY
:
3095 * If DTO interrupt does NOT come in sending data state,
3096 * we should notify the driver to terminate current transfer
3097 * and report a data timeout to the core.
3099 host
->data_status
= SDMMC_INT_DRTO
;
3100 set_bit(EVENT_DATA_ERROR
, &host
->pending_events
);
3101 set_bit(EVENT_DATA_COMPLETE
, &host
->pending_events
);
3102 tasklet_schedule(&host
->tasklet
);
3105 dev_warn(host
->dev
, "Unexpected data timeout, state %d\n",
3111 spin_unlock_irqrestore(&host
->irq_lock
, irqflags
);
3115 static struct dw_mci_board
*dw_mci_parse_dt(struct dw_mci
*host
)
3117 struct dw_mci_board
*pdata
;
3118 struct device
*dev
= host
->dev
;
3119 const struct dw_mci_drv_data
*drv_data
= host
->drv_data
;
3121 u32 clock_frequency
;
3123 pdata
= devm_kzalloc(dev
, sizeof(*pdata
), GFP_KERNEL
);
3125 return ERR_PTR(-ENOMEM
);
3127 /* find reset controller when exist */
3128 pdata
->rstc
= devm_reset_control_get_optional_exclusive(dev
, "reset");
3129 if (IS_ERR(pdata
->rstc
)) {
3130 if (PTR_ERR(pdata
->rstc
) == -EPROBE_DEFER
)
3131 return ERR_PTR(-EPROBE_DEFER
);
3134 /* find out number of slots supported */
3135 if (!device_property_read_u32(dev
, "num-slots", &pdata
->num_slots
))
3136 dev_info(dev
, "'num-slots' was deprecated.\n");
3138 if (device_property_read_u32(dev
, "fifo-depth", &pdata
->fifo_depth
))
3140 "fifo-depth property not found, using value of FIFOTH register as default\n");
3142 device_property_read_u32(dev
, "card-detect-delay",
3143 &pdata
->detect_delay_ms
);
3145 device_property_read_u32(dev
, "data-addr", &host
->data_addr_override
);
3147 if (device_property_present(dev
, "fifo-watermark-aligned"))
3148 host
->wm_aligned
= true;
3150 if (!device_property_read_u32(dev
, "clock-frequency", &clock_frequency
))
3151 pdata
->bus_hz
= clock_frequency
;
3153 if (drv_data
&& drv_data
->parse_dt
) {
3154 ret
= drv_data
->parse_dt(host
);
3156 return ERR_PTR(ret
);
3162 #else /* CONFIG_OF */
3163 static struct dw_mci_board
*dw_mci_parse_dt(struct dw_mci
*host
)
3165 return ERR_PTR(-EINVAL
);
3167 #endif /* CONFIG_OF */
3169 static void dw_mci_enable_cd(struct dw_mci
*host
)
3171 unsigned long irqflags
;
3175 * No need for CD if all slots have a non-error GPIO
3176 * as well as broken card detection is found.
3178 if (host
->slot
->mmc
->caps
& MMC_CAP_NEEDS_POLL
)
3181 if (mmc_gpio_get_cd(host
->slot
->mmc
) < 0) {
3182 spin_lock_irqsave(&host
->irq_lock
, irqflags
);
3183 temp
= mci_readl(host
, INTMASK
);
3184 temp
|= SDMMC_INT_CD
;
3185 mci_writel(host
, INTMASK
, temp
);
3186 spin_unlock_irqrestore(&host
->irq_lock
, irqflags
);
3190 int dw_mci_probe(struct dw_mci
*host
)
3192 const struct dw_mci_drv_data
*drv_data
= host
->drv_data
;
3193 int width
, i
, ret
= 0;
3197 host
->pdata
= dw_mci_parse_dt(host
);
3198 if (PTR_ERR(host
->pdata
) == -EPROBE_DEFER
) {
3199 return -EPROBE_DEFER
;
3200 } else if (IS_ERR(host
->pdata
)) {
3201 dev_err(host
->dev
, "platform data not available\n");
3206 host
->biu_clk
= devm_clk_get(host
->dev
, "biu");
3207 if (IS_ERR(host
->biu_clk
)) {
3208 dev_dbg(host
->dev
, "biu clock not available\n");
3210 ret
= clk_prepare_enable(host
->biu_clk
);
3212 dev_err(host
->dev
, "failed to enable biu clock\n");
3217 host
->ciu_clk
= devm_clk_get(host
->dev
, "ciu");
3218 if (IS_ERR(host
->ciu_clk
)) {
3219 dev_dbg(host
->dev
, "ciu clock not available\n");
3220 host
->bus_hz
= host
->pdata
->bus_hz
;
3222 ret
= clk_prepare_enable(host
->ciu_clk
);
3224 dev_err(host
->dev
, "failed to enable ciu clock\n");
3228 if (host
->pdata
->bus_hz
) {
3229 ret
= clk_set_rate(host
->ciu_clk
, host
->pdata
->bus_hz
);
3232 "Unable to set bus rate to %uHz\n",
3233 host
->pdata
->bus_hz
);
3235 host
->bus_hz
= clk_get_rate(host
->ciu_clk
);
3238 if (!host
->bus_hz
) {
3240 "Platform data must supply bus speed\n");
3245 if (!IS_ERR(host
->pdata
->rstc
)) {
3246 reset_control_assert(host
->pdata
->rstc
);
3247 usleep_range(10, 50);
3248 reset_control_deassert(host
->pdata
->rstc
);
3251 if (drv_data
&& drv_data
->init
) {
3252 ret
= drv_data
->init(host
);
3255 "implementation specific init failed\n");
3260 timer_setup(&host
->cmd11_timer
, dw_mci_cmd11_timer
, 0);
3261 timer_setup(&host
->cto_timer
, dw_mci_cto_timer
, 0);
3262 timer_setup(&host
->dto_timer
, dw_mci_dto_timer
, 0);
3264 spin_lock_init(&host
->lock
);
3265 spin_lock_init(&host
->irq_lock
);
3266 INIT_LIST_HEAD(&host
->queue
);
3269 * Get the host data width - this assumes that HCON has been set with
3270 * the correct values.
3272 i
= SDMMC_GET_HDATA_WIDTH(mci_readl(host
, HCON
));
3274 host
->push_data
= dw_mci_push_data16
;
3275 host
->pull_data
= dw_mci_pull_data16
;
3277 host
->data_shift
= 1;
3278 } else if (i
== 2) {
3279 host
->push_data
= dw_mci_push_data64
;
3280 host
->pull_data
= dw_mci_pull_data64
;
3282 host
->data_shift
= 3;
3284 /* Check for a reserved value, and warn if it is */
3286 "HCON reports a reserved host data width!\n"
3287 "Defaulting to 32-bit access.\n");
3288 host
->push_data
= dw_mci_push_data32
;
3289 host
->pull_data
= dw_mci_pull_data32
;
3291 host
->data_shift
= 2;
3294 /* Reset all blocks */
3295 if (!dw_mci_ctrl_reset(host
, SDMMC_CTRL_ALL_RESET_FLAGS
)) {
3300 host
->dma_ops
= host
->pdata
->dma_ops
;
3301 dw_mci_init_dma(host
);
3303 /* Clear the interrupts for the host controller */
3304 mci_writel(host
, RINTSTS
, 0xFFFFFFFF);
3305 mci_writel(host
, INTMASK
, 0); /* disable all mmc interrupt first */
3307 /* Put in max timeout */
3308 mci_writel(host
, TMOUT
, 0xFFFFFFFF);
3311 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
3312 * Tx Mark = fifo_size / 2 DMA Size = 8
3314 if (!host
->pdata
->fifo_depth
) {
3316 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
3317 * have been overwritten by the bootloader, just like we're
3318 * about to do, so if you know the value for your hardware, you
3319 * should put it in the platform data.
3321 fifo_size
= mci_readl(host
, FIFOTH
);
3322 fifo_size
= 1 + ((fifo_size
>> 16) & 0xfff);
3324 fifo_size
= host
->pdata
->fifo_depth
;
3326 host
->fifo_depth
= fifo_size
;
3328 SDMMC_SET_FIFOTH(0x2, fifo_size
/ 2 - 1, fifo_size
/ 2);
3329 mci_writel(host
, FIFOTH
, host
->fifoth_val
);
3331 /* disable clock to CIU */
3332 mci_writel(host
, CLKENA
, 0);
3333 mci_writel(host
, CLKSRC
, 0);
3336 * In 2.40a spec, Data offset is changed.
3337 * Need to check the version-id and set data-offset for DATA register.
3339 host
->verid
= SDMMC_GET_VERID(mci_readl(host
, VERID
));
3340 dev_info(host
->dev
, "Version ID is %04x\n", host
->verid
);
3342 if (host
->data_addr_override
)
3343 host
->fifo_reg
= host
->regs
+ host
->data_addr_override
;
3344 else if (host
->verid
< DW_MMC_240A
)
3345 host
->fifo_reg
= host
->regs
+ DATA_OFFSET
;
3347 host
->fifo_reg
= host
->regs
+ DATA_240A_OFFSET
;
3349 tasklet_init(&host
->tasklet
, dw_mci_tasklet_func
, (unsigned long)host
);
3350 ret
= devm_request_irq(host
->dev
, host
->irq
, dw_mci_interrupt
,
3351 host
->irq_flags
, "dw-mci", host
);
3356 * Enable interrupts for command done, data over, data empty,
3357 * receive ready and error such as transmit, receive timeout, crc error
3359 mci_writel(host
, INTMASK
, SDMMC_INT_CMD_DONE
| SDMMC_INT_DATA_OVER
|
3360 SDMMC_INT_TXDR
| SDMMC_INT_RXDR
|
3361 DW_MCI_ERROR_FLAGS
);
3362 /* Enable mci interrupt */
3363 mci_writel(host
, CTRL
, SDMMC_CTRL_INT_ENABLE
);
3366 "DW MMC controller at irq %d,%d bit host data width,%u deep fifo\n",
3367 host
->irq
, width
, fifo_size
);
3369 /* We need at least one slot to succeed */
3370 ret
= dw_mci_init_slot(host
);
3372 dev_dbg(host
->dev
, "slot %d init failed\n", i
);
3376 /* Now that slots are all setup, we can enable card detect */
3377 dw_mci_enable_cd(host
);
3382 if (host
->use_dma
&& host
->dma_ops
->exit
)
3383 host
->dma_ops
->exit(host
);
3385 if (!IS_ERR(host
->pdata
->rstc
))
3386 reset_control_assert(host
->pdata
->rstc
);
3389 clk_disable_unprepare(host
->ciu_clk
);
3392 clk_disable_unprepare(host
->biu_clk
);
3396 EXPORT_SYMBOL(dw_mci_probe
);
3398 void dw_mci_remove(struct dw_mci
*host
)
3400 dev_dbg(host
->dev
, "remove slot\n");
3402 dw_mci_cleanup_slot(host
->slot
);
3404 mci_writel(host
, RINTSTS
, 0xFFFFFFFF);
3405 mci_writel(host
, INTMASK
, 0); /* disable all mmc interrupt first */
3407 /* disable clock to CIU */
3408 mci_writel(host
, CLKENA
, 0);
3409 mci_writel(host
, CLKSRC
, 0);
3411 if (host
->use_dma
&& host
->dma_ops
->exit
)
3412 host
->dma_ops
->exit(host
);
3414 if (!IS_ERR(host
->pdata
->rstc
))
3415 reset_control_assert(host
->pdata
->rstc
);
3417 clk_disable_unprepare(host
->ciu_clk
);
3418 clk_disable_unprepare(host
->biu_clk
);
3420 EXPORT_SYMBOL(dw_mci_remove
);
3425 int dw_mci_runtime_suspend(struct device
*dev
)
3427 struct dw_mci
*host
= dev_get_drvdata(dev
);
3429 if (host
->use_dma
&& host
->dma_ops
->exit
)
3430 host
->dma_ops
->exit(host
);
3432 clk_disable_unprepare(host
->ciu_clk
);
3435 (mmc_can_gpio_cd(host
->slot
->mmc
) ||
3436 !mmc_card_is_removable(host
->slot
->mmc
)))
3437 clk_disable_unprepare(host
->biu_clk
);
3441 EXPORT_SYMBOL(dw_mci_runtime_suspend
);
3443 int dw_mci_runtime_resume(struct device
*dev
)
3446 struct dw_mci
*host
= dev_get_drvdata(dev
);
3449 (mmc_can_gpio_cd(host
->slot
->mmc
) ||
3450 !mmc_card_is_removable(host
->slot
->mmc
))) {
3451 ret
= clk_prepare_enable(host
->biu_clk
);
3456 ret
= clk_prepare_enable(host
->ciu_clk
);
3460 if (!dw_mci_ctrl_reset(host
, SDMMC_CTRL_ALL_RESET_FLAGS
)) {
3461 clk_disable_unprepare(host
->ciu_clk
);
3466 if (host
->use_dma
&& host
->dma_ops
->init
)
3467 host
->dma_ops
->init(host
);
3470 * Restore the initial value at FIFOTH register
3471 * And Invalidate the prev_blksz with zero
3473 mci_writel(host
, FIFOTH
, host
->fifoth_val
);
3474 host
->prev_blksz
= 0;
3476 /* Put in max timeout */
3477 mci_writel(host
, TMOUT
, 0xFFFFFFFF);
3479 mci_writel(host
, RINTSTS
, 0xFFFFFFFF);
3480 mci_writel(host
, INTMASK
, SDMMC_INT_CMD_DONE
| SDMMC_INT_DATA_OVER
|
3481 SDMMC_INT_TXDR
| SDMMC_INT_RXDR
|
3482 DW_MCI_ERROR_FLAGS
);
3483 mci_writel(host
, CTRL
, SDMMC_CTRL_INT_ENABLE
);
3486 if (host
->slot
->mmc
->pm_flags
& MMC_PM_KEEP_POWER
)
3487 dw_mci_set_ios(host
->slot
->mmc
, &host
->slot
->mmc
->ios
);
3489 /* Force setup bus to guarantee available clock output */
3490 dw_mci_setup_bus(host
->slot
, true);
3492 /* Now that slots are all setup, we can enable card detect */
3493 dw_mci_enable_cd(host
);
3499 (mmc_can_gpio_cd(host
->slot
->mmc
) ||
3500 !mmc_card_is_removable(host
->slot
->mmc
)))
3501 clk_disable_unprepare(host
->biu_clk
);
3505 EXPORT_SYMBOL(dw_mci_runtime_resume
);
3506 #endif /* CONFIG_PM */
3508 static int __init
dw_mci_init(void)
3510 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
3514 static void __exit
dw_mci_exit(void)
3518 module_init(dw_mci_init
);
3519 module_exit(dw_mci_exit
);
3521 MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
3522 MODULE_AUTHOR("NXP Semiconductor VietNam");
3523 MODULE_AUTHOR("Imagination Technologies Ltd");
3524 MODULE_LICENSE("GPL v2");