1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Synopsys DesignWare Multimedia Card Interface driver
4 * (Based on NXP driver for lpc 31xx)
6 * Copyright (C) 2009 NXP Semiconductors
7 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
10 #include <linux/blkdev.h>
11 #include <linux/clk.h>
12 #include <linux/debugfs.h>
13 #include <linux/device.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/err.h>
16 #include <linux/init.h>
17 #include <linux/interrupt.h>
18 #include <linux/iopoll.h>
19 #include <linux/ioport.h>
20 #include <linux/ktime.h>
21 #include <linux/module.h>
22 #include <linux/platform_device.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/prandom.h>
25 #include <linux/seq_file.h>
26 #include <linux/slab.h>
27 #include <linux/stat.h>
28 #include <linux/delay.h>
29 #include <linux/irq.h>
30 #include <linux/mmc/card.h>
31 #include <linux/mmc/host.h>
32 #include <linux/mmc/mmc.h>
33 #include <linux/mmc/sd.h>
34 #include <linux/mmc/sdio.h>
35 #include <linux/bitops.h>
36 #include <linux/regulator/consumer.h>
38 #include <linux/mmc/slot-gpio.h>
42 /* Common flag combinations */
43 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
44 SDMMC_INT_HTO | SDMMC_INT_SBE | \
45 SDMMC_INT_EBE | SDMMC_INT_HLE)
46 #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
47 SDMMC_INT_RESP_ERR | SDMMC_INT_HLE)
48 #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
49 DW_MCI_CMD_ERROR_FLAGS)
50 #define DW_MCI_SEND_STATUS 1
51 #define DW_MCI_RECV_STATUS 2
52 #define DW_MCI_DMA_THRESHOLD 16
54 #define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */
55 #define DW_MCI_FREQ_MIN 100000 /* unit: HZ */
57 #define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
58 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
59 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
62 #define DESC_RING_BUF_SZ PAGE_SIZE
64 struct idmac_desc_64addr
{
65 u32 des0
; /* Control Descriptor */
66 #define IDMAC_OWN_CLR64(x) \
67 !((x) & cpu_to_le32(IDMAC_DES0_OWN))
69 u32 des1
; /* Reserved */
71 u32 des2
; /*Buffer sizes */
72 #define IDMAC_64ADDR_SET_BUFFER1_SIZE(d, s) \
73 ((d)->des2 = ((d)->des2 & cpu_to_le32(0x03ffe000)) | \
74 ((cpu_to_le32(s)) & cpu_to_le32(0x1fff)))
76 u32 des3
; /* Reserved */
78 u32 des4
; /* Lower 32-bits of Buffer Address Pointer 1*/
79 u32 des5
; /* Upper 32-bits of Buffer Address Pointer 1*/
81 u32 des6
; /* Lower 32-bits of Next Descriptor Address */
82 u32 des7
; /* Upper 32-bits of Next Descriptor Address */
86 __le32 des0
; /* Control Descriptor */
87 #define IDMAC_DES0_DIC BIT(1)
88 #define IDMAC_DES0_LD BIT(2)
89 #define IDMAC_DES0_FD BIT(3)
90 #define IDMAC_DES0_CH BIT(4)
91 #define IDMAC_DES0_ER BIT(5)
92 #define IDMAC_DES0_CES BIT(30)
93 #define IDMAC_DES0_OWN BIT(31)
95 __le32 des1
; /* Buffer sizes */
96 #define IDMAC_SET_BUFFER1_SIZE(d, s) \
97 ((d)->des1 = ((d)->des1 & cpu_to_le32(0x03ffe000)) | (cpu_to_le32((s) & 0x1fff)))
99 __le32 des2
; /* buffer 1 physical address */
101 __le32 des3
; /* buffer 2 physical address */
104 /* Each descriptor can transfer up to 4KB of data in chained mode */
105 #define DW_MCI_DESC_DATA_LENGTH 0x1000
107 #if defined(CONFIG_DEBUG_FS)
108 static int dw_mci_req_show(struct seq_file
*s
, void *v
)
110 struct dw_mci_slot
*slot
= s
->private;
111 struct mmc_request
*mrq
;
112 struct mmc_command
*cmd
;
113 struct mmc_command
*stop
;
114 struct mmc_data
*data
;
116 /* Make sure we get a consistent snapshot */
117 spin_lock_bh(&slot
->host
->lock
);
127 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
128 cmd
->opcode
, cmd
->arg
, cmd
->flags
,
129 cmd
->resp
[0], cmd
->resp
[1], cmd
->resp
[2],
130 cmd
->resp
[2], cmd
->error
);
132 seq_printf(s
, "DATA %u / %u * %u flg %x err %d\n",
133 data
->bytes_xfered
, data
->blocks
,
134 data
->blksz
, data
->flags
, data
->error
);
137 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
138 stop
->opcode
, stop
->arg
, stop
->flags
,
139 stop
->resp
[0], stop
->resp
[1], stop
->resp
[2],
140 stop
->resp
[2], stop
->error
);
143 spin_unlock_bh(&slot
->host
->lock
);
147 DEFINE_SHOW_ATTRIBUTE(dw_mci_req
);
149 static int dw_mci_regs_show(struct seq_file
*s
, void *v
)
151 struct dw_mci
*host
= s
->private;
153 pm_runtime_get_sync(host
->dev
);
155 seq_printf(s
, "STATUS:\t0x%08x\n", mci_readl(host
, STATUS
));
156 seq_printf(s
, "RINTSTS:\t0x%08x\n", mci_readl(host
, RINTSTS
));
157 seq_printf(s
, "CMD:\t0x%08x\n", mci_readl(host
, CMD
));
158 seq_printf(s
, "CTRL:\t0x%08x\n", mci_readl(host
, CTRL
));
159 seq_printf(s
, "INTMASK:\t0x%08x\n", mci_readl(host
, INTMASK
));
160 seq_printf(s
, "CLKENA:\t0x%08x\n", mci_readl(host
, CLKENA
));
162 pm_runtime_put_autosuspend(host
->dev
);
166 DEFINE_SHOW_ATTRIBUTE(dw_mci_regs
);
168 static void dw_mci_init_debugfs(struct dw_mci_slot
*slot
)
170 struct mmc_host
*mmc
= slot
->mmc
;
171 struct dw_mci
*host
= slot
->host
;
174 root
= mmc
->debugfs_root
;
178 debugfs_create_file("regs", S_IRUSR
, root
, host
, &dw_mci_regs_fops
);
179 debugfs_create_file("req", S_IRUSR
, root
, slot
, &dw_mci_req_fops
);
180 debugfs_create_u32("state", S_IRUSR
, root
, &host
->state
);
181 debugfs_create_xul("pending_events", S_IRUSR
, root
,
182 &host
->pending_events
);
183 debugfs_create_xul("completed_events", S_IRUSR
, root
,
184 &host
->completed_events
);
185 #ifdef CONFIG_FAULT_INJECTION
186 fault_create_debugfs_attr("fail_data_crc", root
, &host
->fail_data_crc
);
189 #endif /* defined(CONFIG_DEBUG_FS) */
191 static bool dw_mci_ctrl_reset(struct dw_mci
*host
, u32 reset
)
195 ctrl
= mci_readl(host
, CTRL
);
197 mci_writel(host
, CTRL
, ctrl
);
199 /* wait till resets clear */
200 if (readl_poll_timeout_atomic(host
->regs
+ SDMMC_CTRL
, ctrl
,
202 1, 500 * USEC_PER_MSEC
)) {
204 "Timeout resetting block (ctrl reset %#x)\n",
212 static void dw_mci_wait_while_busy(struct dw_mci
*host
, u32 cmd_flags
)
217 * Databook says that before issuing a new data transfer command
218 * we need to check to see if the card is busy. Data transfer commands
219 * all have SDMMC_CMD_PRV_DAT_WAIT set, so we'll key off that.
221 * ...also allow sending for SDMMC_CMD_VOLT_SWITCH where busy is
224 if ((cmd_flags
& SDMMC_CMD_PRV_DAT_WAIT
) &&
225 !(cmd_flags
& SDMMC_CMD_VOLT_SWITCH
)) {
226 if (readl_poll_timeout_atomic(host
->regs
+ SDMMC_STATUS
,
228 !(status
& SDMMC_STATUS_BUSY
),
229 10, 500 * USEC_PER_MSEC
))
230 dev_err(host
->dev
, "Busy; trying anyway\n");
234 static void mci_send_cmd(struct dw_mci_slot
*slot
, u32 cmd
, u32 arg
)
236 struct dw_mci
*host
= slot
->host
;
237 unsigned int cmd_status
= 0;
239 mci_writel(host
, CMDARG
, arg
);
240 wmb(); /* drain writebuffer */
241 dw_mci_wait_while_busy(host
, cmd
);
242 mci_writel(host
, CMD
, SDMMC_CMD_START
| cmd
);
244 if (readl_poll_timeout_atomic(host
->regs
+ SDMMC_CMD
, cmd_status
,
245 !(cmd_status
& SDMMC_CMD_START
),
246 1, 500 * USEC_PER_MSEC
))
247 dev_err(&slot
->mmc
->class_dev
,
248 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
249 cmd
, arg
, cmd_status
);
252 static u32
dw_mci_prepare_command(struct mmc_host
*mmc
, struct mmc_command
*cmd
)
254 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
255 struct dw_mci
*host
= slot
->host
;
258 cmd
->error
= -EINPROGRESS
;
261 if (cmd
->opcode
== MMC_STOP_TRANSMISSION
||
262 cmd
->opcode
== MMC_GO_IDLE_STATE
||
263 cmd
->opcode
== MMC_GO_INACTIVE_STATE
||
264 (cmd
->opcode
== SD_IO_RW_DIRECT
&&
265 ((cmd
->arg
>> 9) & 0x1FFFF) == SDIO_CCCR_ABORT
))
266 cmdr
|= SDMMC_CMD_STOP
;
267 else if (cmd
->opcode
!= MMC_SEND_STATUS
&& cmd
->data
)
268 cmdr
|= SDMMC_CMD_PRV_DAT_WAIT
;
270 if (cmd
->opcode
== SD_SWITCH_VOLTAGE
) {
273 /* Special bit makes CMD11 not die */
274 cmdr
|= SDMMC_CMD_VOLT_SWITCH
;
276 /* Change state to continue to handle CMD11 weirdness */
277 WARN_ON(slot
->host
->state
!= STATE_SENDING_CMD
);
278 slot
->host
->state
= STATE_SENDING_CMD11
;
281 * We need to disable low power mode (automatic clock stop)
282 * while doing voltage switch so we don't confuse the card,
283 * since stopping the clock is a specific part of the UHS
284 * voltage change dance.
286 * Note that low power mode (SDMMC_CLKEN_LOW_PWR) will be
287 * unconditionally turned back on in dw_mci_setup_bus() if it's
288 * ever called with a non-zero clock. That shouldn't happen
289 * until the voltage change is all done.
291 clk_en_a
= mci_readl(host
, CLKENA
);
292 clk_en_a
&= ~(SDMMC_CLKEN_LOW_PWR
<< slot
->id
);
293 mci_writel(host
, CLKENA
, clk_en_a
);
294 mci_send_cmd(slot
, SDMMC_CMD_UPD_CLK
|
295 SDMMC_CMD_PRV_DAT_WAIT
, 0);
298 if (cmd
->flags
& MMC_RSP_PRESENT
) {
299 /* We expect a response, so set this bit */
300 cmdr
|= SDMMC_CMD_RESP_EXP
;
301 if (cmd
->flags
& MMC_RSP_136
)
302 cmdr
|= SDMMC_CMD_RESP_LONG
;
305 if (cmd
->flags
& MMC_RSP_CRC
)
306 cmdr
|= SDMMC_CMD_RESP_CRC
;
309 cmdr
|= SDMMC_CMD_DAT_EXP
;
310 if (cmd
->data
->flags
& MMC_DATA_WRITE
)
311 cmdr
|= SDMMC_CMD_DAT_WR
;
314 if (!test_bit(DW_MMC_CARD_NO_USE_HOLD
, &slot
->flags
))
315 cmdr
|= SDMMC_CMD_USE_HOLD_REG
;
320 static u32
dw_mci_prep_stop_abort(struct dw_mci
*host
, struct mmc_command
*cmd
)
322 struct mmc_command
*stop
;
328 stop
= &host
->stop_abort
;
330 memset(stop
, 0, sizeof(struct mmc_command
));
332 if (cmdr
== MMC_READ_SINGLE_BLOCK
||
333 cmdr
== MMC_READ_MULTIPLE_BLOCK
||
334 cmdr
== MMC_WRITE_BLOCK
||
335 cmdr
== MMC_WRITE_MULTIPLE_BLOCK
||
336 mmc_op_tuning(cmdr
) ||
337 cmdr
== MMC_GEN_CMD
) {
338 stop
->opcode
= MMC_STOP_TRANSMISSION
;
340 stop
->flags
= MMC_RSP_R1B
| MMC_CMD_AC
;
341 } else if (cmdr
== SD_IO_RW_EXTENDED
) {
342 stop
->opcode
= SD_IO_RW_DIRECT
;
343 stop
->arg
|= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT
<< 9) |
344 ((cmd
->arg
>> 28) & 0x7);
345 stop
->flags
= MMC_RSP_SPI_R5
| MMC_RSP_R5
| MMC_CMD_AC
;
350 cmdr
= stop
->opcode
| SDMMC_CMD_STOP
|
351 SDMMC_CMD_RESP_CRC
| SDMMC_CMD_RESP_EXP
;
353 if (!test_bit(DW_MMC_CARD_NO_USE_HOLD
, &host
->slot
->flags
))
354 cmdr
|= SDMMC_CMD_USE_HOLD_REG
;
359 static inline void dw_mci_set_cto(struct dw_mci
*host
)
361 unsigned int cto_clks
;
362 unsigned int cto_div
;
364 unsigned long irqflags
;
366 cto_clks
= mci_readl(host
, TMOUT
) & 0xff;
367 cto_div
= (mci_readl(host
, CLKDIV
) & 0xff) * 2;
371 cto_ms
= DIV_ROUND_UP_ULL((u64
)MSEC_PER_SEC
* cto_clks
* cto_div
,
374 /* add a bit spare time */
378 * The durations we're working with are fairly short so we have to be
379 * extra careful about synchronization here. Specifically in hardware a
380 * command timeout is _at most_ 5.1 ms, so that means we expect an
381 * interrupt (either command done or timeout) to come rather quickly
382 * after the mci_writel. ...but just in case we have a long interrupt
383 * latency let's add a bit of paranoia.
385 * In general we'll assume that at least an interrupt will be asserted
386 * in hardware by the time the cto_timer runs. ...and if it hasn't
387 * been asserted in hardware by that time then we'll assume it'll never
390 spin_lock_irqsave(&host
->irq_lock
, irqflags
);
391 if (!test_bit(EVENT_CMD_COMPLETE
, &host
->pending_events
))
392 mod_timer(&host
->cto_timer
,
393 jiffies
+ msecs_to_jiffies(cto_ms
) + 1);
394 spin_unlock_irqrestore(&host
->irq_lock
, irqflags
);
397 static void dw_mci_start_command(struct dw_mci
*host
,
398 struct mmc_command
*cmd
, u32 cmd_flags
)
402 "start command: ARGR=0x%08x CMDR=0x%08x\n",
403 cmd
->arg
, cmd_flags
);
405 mci_writel(host
, CMDARG
, cmd
->arg
);
406 wmb(); /* drain writebuffer */
407 dw_mci_wait_while_busy(host
, cmd_flags
);
409 mci_writel(host
, CMD
, cmd_flags
| SDMMC_CMD_START
);
411 /* response expected command only */
412 if (cmd_flags
& SDMMC_CMD_RESP_EXP
)
413 dw_mci_set_cto(host
);
416 static inline void send_stop_abort(struct dw_mci
*host
, struct mmc_data
*data
)
418 struct mmc_command
*stop
= &host
->stop_abort
;
420 dw_mci_start_command(host
, stop
, host
->stop_cmdr
);
423 /* DMA interface functions */
424 static void dw_mci_stop_dma(struct dw_mci
*host
)
426 if (host
->using_dma
) {
427 host
->dma_ops
->stop(host
);
428 host
->dma_ops
->cleanup(host
);
431 /* Data transfer was stopped by the interrupt handler */
432 set_bit(EVENT_XFER_COMPLETE
, &host
->pending_events
);
435 static void dw_mci_dma_cleanup(struct dw_mci
*host
)
437 struct mmc_data
*data
= host
->data
;
439 if (data
&& data
->host_cookie
== COOKIE_MAPPED
) {
440 dma_unmap_sg(host
->dev
,
443 mmc_get_dma_dir(data
));
444 data
->host_cookie
= COOKIE_UNMAPPED
;
448 static void dw_mci_idmac_reset(struct dw_mci
*host
)
450 u32 bmod
= mci_readl(host
, BMOD
);
451 /* Software reset of DMA */
452 bmod
|= SDMMC_IDMAC_SWRESET
;
453 mci_writel(host
, BMOD
, bmod
);
456 static void dw_mci_idmac_stop_dma(struct dw_mci
*host
)
460 /* Disable and reset the IDMAC interface */
461 temp
= mci_readl(host
, CTRL
);
462 temp
&= ~SDMMC_CTRL_USE_IDMAC
;
463 temp
|= SDMMC_CTRL_DMA_RESET
;
464 mci_writel(host
, CTRL
, temp
);
466 /* Stop the IDMAC running */
467 temp
= mci_readl(host
, BMOD
);
468 temp
&= ~(SDMMC_IDMAC_ENABLE
| SDMMC_IDMAC_FB
);
469 temp
|= SDMMC_IDMAC_SWRESET
;
470 mci_writel(host
, BMOD
, temp
);
473 static void dw_mci_dmac_complete_dma(void *arg
)
475 struct dw_mci
*host
= arg
;
476 struct mmc_data
*data
= host
->data
;
478 dev_vdbg(host
->dev
, "DMA complete\n");
480 if ((host
->use_dma
== TRANS_MODE_EDMAC
) &&
481 data
&& (data
->flags
& MMC_DATA_READ
))
482 /* Invalidate cache after read */
483 dma_sync_sg_for_cpu(mmc_dev(host
->slot
->mmc
),
488 host
->dma_ops
->cleanup(host
);
491 * If the card was removed, data will be NULL. No point in trying to
492 * send the stop command or waiting for NBUSY in this case.
495 set_bit(EVENT_XFER_COMPLETE
, &host
->pending_events
);
496 queue_work(system_bh_wq
, &host
->bh_work
);
500 static int dw_mci_idmac_init(struct dw_mci
*host
)
504 if (host
->dma_64bit_address
== 1) {
505 struct idmac_desc_64addr
*p
;
506 /* Number of descriptors in the ring buffer */
508 DESC_RING_BUF_SZ
/ sizeof(struct idmac_desc_64addr
);
510 /* Forward link the descriptor list */
511 for (i
= 0, p
= host
->sg_cpu
; i
< host
->ring_size
- 1;
513 p
->des6
= (host
->sg_dma
+
514 (sizeof(struct idmac_desc_64addr
) *
515 (i
+ 1))) & 0xffffffff;
517 p
->des7
= (u64
)(host
->sg_dma
+
518 (sizeof(struct idmac_desc_64addr
) *
520 /* Initialize reserved and buffer size fields to "0" */
527 /* Set the last descriptor as the end-of-ring descriptor */
528 p
->des6
= host
->sg_dma
& 0xffffffff;
529 p
->des7
= (u64
)host
->sg_dma
>> 32;
530 p
->des0
= IDMAC_DES0_ER
;
533 struct idmac_desc
*p
;
534 /* Number of descriptors in the ring buffer */
536 DESC_RING_BUF_SZ
/ sizeof(struct idmac_desc
);
538 /* Forward link the descriptor list */
539 for (i
= 0, p
= host
->sg_cpu
;
540 i
< host
->ring_size
- 1;
542 p
->des3
= cpu_to_le32(host
->sg_dma
+
543 (sizeof(struct idmac_desc
) * (i
+ 1)));
548 /* Set the last descriptor as the end-of-ring descriptor */
549 p
->des3
= cpu_to_le32(host
->sg_dma
);
550 p
->des0
= cpu_to_le32(IDMAC_DES0_ER
);
553 dw_mci_idmac_reset(host
);
555 if (host
->dma_64bit_address
== 1) {
556 /* Mask out interrupts - get Tx & Rx complete only */
557 mci_writel(host
, IDSTS64
, IDMAC_INT_CLR
);
558 mci_writel(host
, IDINTEN64
, SDMMC_IDMAC_INT_NI
|
559 SDMMC_IDMAC_INT_RI
| SDMMC_IDMAC_INT_TI
);
561 /* Set the descriptor base address */
562 mci_writel(host
, DBADDRL
, host
->sg_dma
& 0xffffffff);
563 mci_writel(host
, DBADDRU
, (u64
)host
->sg_dma
>> 32);
566 /* Mask out interrupts - get Tx & Rx complete only */
567 mci_writel(host
, IDSTS
, IDMAC_INT_CLR
);
568 mci_writel(host
, IDINTEN
, SDMMC_IDMAC_INT_NI
|
569 SDMMC_IDMAC_INT_RI
| SDMMC_IDMAC_INT_TI
);
571 /* Set the descriptor base address */
572 mci_writel(host
, DBADDR
, host
->sg_dma
);
578 static inline int dw_mci_prepare_desc64(struct dw_mci
*host
,
579 struct mmc_data
*data
,
582 unsigned int desc_len
;
583 struct idmac_desc_64addr
*desc_first
, *desc_last
, *desc
;
587 desc_first
= desc_last
= desc
= host
->sg_cpu
;
589 for (i
= 0; i
< sg_len
; i
++) {
590 unsigned int length
= sg_dma_len(&data
->sg
[i
]);
592 u64 mem_addr
= sg_dma_address(&data
->sg
[i
]);
594 for ( ; length
; desc
++) {
595 desc_len
= (length
<= DW_MCI_DESC_DATA_LENGTH
) ?
596 length
: DW_MCI_DESC_DATA_LENGTH
;
601 * Wait for the former clear OWN bit operation
602 * of IDMAC to make sure that this descriptor
603 * isn't still owned by IDMAC as IDMAC's write
604 * ops and CPU's read ops are asynchronous.
606 if (readl_poll_timeout_atomic(&desc
->des0
, val
,
607 !(val
& IDMAC_DES0_OWN
),
608 10, 100 * USEC_PER_MSEC
))
612 * Set the OWN bit and disable interrupts
613 * for this descriptor
615 desc
->des0
= IDMAC_DES0_OWN
| IDMAC_DES0_DIC
|
619 IDMAC_64ADDR_SET_BUFFER1_SIZE(desc
, desc_len
);
621 /* Physical address to DMA to/from */
622 desc
->des4
= mem_addr
& 0xffffffff;
623 desc
->des5
= mem_addr
>> 32;
625 /* Update physical address for the next desc */
626 mem_addr
+= desc_len
;
628 /* Save pointer to the last descriptor */
633 /* Set first descriptor */
634 desc_first
->des0
|= IDMAC_DES0_FD
;
636 /* Set last descriptor */
637 desc_last
->des0
&= ~(IDMAC_DES0_CH
| IDMAC_DES0_DIC
);
638 desc_last
->des0
|= IDMAC_DES0_LD
;
642 /* restore the descriptor chain as it's polluted */
643 dev_dbg(host
->dev
, "descriptor is still owned by IDMAC.\n");
644 memset(host
->sg_cpu
, 0, DESC_RING_BUF_SZ
);
645 dw_mci_idmac_init(host
);
650 static inline int dw_mci_prepare_desc32(struct dw_mci
*host
,
651 struct mmc_data
*data
,
654 unsigned int desc_len
;
655 struct idmac_desc
*desc_first
, *desc_last
, *desc
;
659 desc_first
= desc_last
= desc
= host
->sg_cpu
;
661 for (i
= 0; i
< sg_len
; i
++) {
662 unsigned int length
= sg_dma_len(&data
->sg
[i
]);
664 u32 mem_addr
= sg_dma_address(&data
->sg
[i
]);
666 for ( ; length
; desc
++) {
667 desc_len
= (length
<= DW_MCI_DESC_DATA_LENGTH
) ?
668 length
: DW_MCI_DESC_DATA_LENGTH
;
673 * Wait for the former clear OWN bit operation
674 * of IDMAC to make sure that this descriptor
675 * isn't still owned by IDMAC as IDMAC's write
676 * ops and CPU's read ops are asynchronous.
678 if (readl_poll_timeout_atomic(&desc
->des0
, val
,
679 IDMAC_OWN_CLR64(val
),
681 100 * USEC_PER_MSEC
))
685 * Set the OWN bit and disable interrupts
686 * for this descriptor
688 desc
->des0
= cpu_to_le32(IDMAC_DES0_OWN
|
693 IDMAC_SET_BUFFER1_SIZE(desc
, desc_len
);
695 /* Physical address to DMA to/from */
696 desc
->des2
= cpu_to_le32(mem_addr
);
698 /* Update physical address for the next desc */
699 mem_addr
+= desc_len
;
701 /* Save pointer to the last descriptor */
706 /* Set first descriptor */
707 desc_first
->des0
|= cpu_to_le32(IDMAC_DES0_FD
);
709 /* Set last descriptor */
710 desc_last
->des0
&= cpu_to_le32(~(IDMAC_DES0_CH
|
712 desc_last
->des0
|= cpu_to_le32(IDMAC_DES0_LD
);
716 /* restore the descriptor chain as it's polluted */
717 dev_dbg(host
->dev
, "descriptor is still owned by IDMAC.\n");
718 memset(host
->sg_cpu
, 0, DESC_RING_BUF_SZ
);
719 dw_mci_idmac_init(host
);
723 static int dw_mci_idmac_start_dma(struct dw_mci
*host
, unsigned int sg_len
)
728 if (host
->dma_64bit_address
== 1)
729 ret
= dw_mci_prepare_desc64(host
, host
->data
, sg_len
);
731 ret
= dw_mci_prepare_desc32(host
, host
->data
, sg_len
);
736 /* drain writebuffer */
739 /* Make sure to reset DMA in case we did PIO before this */
740 dw_mci_ctrl_reset(host
, SDMMC_CTRL_DMA_RESET
);
741 dw_mci_idmac_reset(host
);
743 /* Select IDMAC interface */
744 temp
= mci_readl(host
, CTRL
);
745 temp
|= SDMMC_CTRL_USE_IDMAC
;
746 mci_writel(host
, CTRL
, temp
);
748 /* drain writebuffer */
751 /* Enable the IDMAC */
752 temp
= mci_readl(host
, BMOD
);
753 temp
|= SDMMC_IDMAC_ENABLE
| SDMMC_IDMAC_FB
;
754 mci_writel(host
, BMOD
, temp
);
756 /* Start it running */
757 mci_writel(host
, PLDMND
, 1);
763 static const struct dw_mci_dma_ops dw_mci_idmac_ops
= {
764 .init
= dw_mci_idmac_init
,
765 .start
= dw_mci_idmac_start_dma
,
766 .stop
= dw_mci_idmac_stop_dma
,
767 .complete
= dw_mci_dmac_complete_dma
,
768 .cleanup
= dw_mci_dma_cleanup
,
771 static void dw_mci_edmac_stop_dma(struct dw_mci
*host
)
773 dmaengine_terminate_async(host
->dms
->ch
);
776 static int dw_mci_edmac_start_dma(struct dw_mci
*host
,
779 struct dma_slave_config cfg
;
780 struct dma_async_tx_descriptor
*desc
= NULL
;
781 struct scatterlist
*sgl
= host
->data
->sg
;
782 static const u32 mszs
[] = {1, 4, 8, 16, 32, 64, 128, 256};
783 u32 sg_elems
= host
->data
->sg_len
;
785 u32 fifo_offset
= host
->fifo_reg
- host
->regs
;
788 /* Set external dma config: burst size, burst width */
789 memset(&cfg
, 0, sizeof(cfg
));
790 cfg
.dst_addr
= host
->phy_regs
+ fifo_offset
;
791 cfg
.src_addr
= cfg
.dst_addr
;
792 cfg
.dst_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
793 cfg
.src_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
795 /* Match burst msize with external dma config */
796 fifoth_val
= mci_readl(host
, FIFOTH
);
797 cfg
.dst_maxburst
= mszs
[(fifoth_val
>> 28) & 0x7];
798 cfg
.src_maxburst
= cfg
.dst_maxburst
;
800 if (host
->data
->flags
& MMC_DATA_WRITE
)
801 cfg
.direction
= DMA_MEM_TO_DEV
;
803 cfg
.direction
= DMA_DEV_TO_MEM
;
805 ret
= dmaengine_slave_config(host
->dms
->ch
, &cfg
);
807 dev_err(host
->dev
, "Failed to config edmac.\n");
811 desc
= dmaengine_prep_slave_sg(host
->dms
->ch
, sgl
,
812 sg_len
, cfg
.direction
,
813 DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
815 dev_err(host
->dev
, "Can't prepare slave sg.\n");
819 /* Set dw_mci_dmac_complete_dma as callback */
820 desc
->callback
= dw_mci_dmac_complete_dma
;
821 desc
->callback_param
= (void *)host
;
822 dmaengine_submit(desc
);
824 /* Flush cache before write */
825 if (host
->data
->flags
& MMC_DATA_WRITE
)
826 dma_sync_sg_for_device(mmc_dev(host
->slot
->mmc
), sgl
,
827 sg_elems
, DMA_TO_DEVICE
);
829 dma_async_issue_pending(host
->dms
->ch
);
834 static int dw_mci_edmac_init(struct dw_mci
*host
)
836 /* Request external dma channel */
837 host
->dms
= kzalloc(sizeof(struct dw_mci_dma_slave
), GFP_KERNEL
);
841 host
->dms
->ch
= dma_request_chan(host
->dev
, "rx-tx");
842 if (IS_ERR(host
->dms
->ch
)) {
843 int ret
= PTR_ERR(host
->dms
->ch
);
845 dev_err(host
->dev
, "Failed to get external DMA channel.\n");
854 static void dw_mci_edmac_exit(struct dw_mci
*host
)
858 dma_release_channel(host
->dms
->ch
);
859 host
->dms
->ch
= NULL
;
866 static const struct dw_mci_dma_ops dw_mci_edmac_ops
= {
867 .init
= dw_mci_edmac_init
,
868 .exit
= dw_mci_edmac_exit
,
869 .start
= dw_mci_edmac_start_dma
,
870 .stop
= dw_mci_edmac_stop_dma
,
871 .complete
= dw_mci_dmac_complete_dma
,
872 .cleanup
= dw_mci_dma_cleanup
,
875 static int dw_mci_pre_dma_transfer(struct dw_mci
*host
,
876 struct mmc_data
*data
,
879 struct scatterlist
*sg
;
880 unsigned int i
, sg_len
;
882 if (data
->host_cookie
== COOKIE_PRE_MAPPED
)
886 * We don't do DMA on "complex" transfers, i.e. with
887 * non-word-aligned buffers or lengths. Also, we don't bother
888 * with all the DMA setup overhead for short transfers.
890 if (data
->blocks
* data
->blksz
< DW_MCI_DMA_THRESHOLD
)
896 for_each_sg(data
->sg
, sg
, data
->sg_len
, i
) {
897 if (sg
->offset
& 3 || sg
->length
& 3)
901 sg_len
= dma_map_sg(host
->dev
,
904 mmc_get_dma_dir(data
));
908 data
->host_cookie
= cookie
;
913 static void dw_mci_pre_req(struct mmc_host
*mmc
,
914 struct mmc_request
*mrq
)
916 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
917 struct mmc_data
*data
= mrq
->data
;
919 if (!slot
->host
->use_dma
|| !data
)
922 /* This data might be unmapped at this time */
923 data
->host_cookie
= COOKIE_UNMAPPED
;
925 if (dw_mci_pre_dma_transfer(slot
->host
, mrq
->data
,
926 COOKIE_PRE_MAPPED
) < 0)
927 data
->host_cookie
= COOKIE_UNMAPPED
;
930 static void dw_mci_post_req(struct mmc_host
*mmc
,
931 struct mmc_request
*mrq
,
934 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
935 struct mmc_data
*data
= mrq
->data
;
937 if (!slot
->host
->use_dma
|| !data
)
940 if (data
->host_cookie
!= COOKIE_UNMAPPED
)
941 dma_unmap_sg(slot
->host
->dev
,
944 mmc_get_dma_dir(data
));
945 data
->host_cookie
= COOKIE_UNMAPPED
;
948 static int dw_mci_get_cd(struct mmc_host
*mmc
)
951 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
952 struct dw_mci
*host
= slot
->host
;
953 int gpio_cd
= mmc_gpio_get_cd(mmc
);
955 /* Use platform get_cd function, else try onboard card detect */
956 if (((mmc
->caps
& MMC_CAP_NEEDS_POLL
)
957 || !mmc_card_is_removable(mmc
))) {
960 if (!test_bit(DW_MMC_CARD_PRESENT
, &slot
->flags
)) {
961 if (mmc
->caps
& MMC_CAP_NEEDS_POLL
) {
962 dev_info(&mmc
->class_dev
,
963 "card is polling.\n");
965 dev_info(&mmc
->class_dev
,
966 "card is non-removable.\n");
968 set_bit(DW_MMC_CARD_PRESENT
, &slot
->flags
);
972 } else if (gpio_cd
>= 0)
975 present
= (mci_readl(slot
->host
, CDETECT
) & (1 << slot
->id
))
978 spin_lock_bh(&host
->lock
);
979 if (present
&& !test_and_set_bit(DW_MMC_CARD_PRESENT
, &slot
->flags
))
980 dev_dbg(&mmc
->class_dev
, "card is present\n");
982 !test_and_clear_bit(DW_MMC_CARD_PRESENT
, &slot
->flags
))
983 dev_dbg(&mmc
->class_dev
, "card is not present\n");
984 spin_unlock_bh(&host
->lock
);
989 static void dw_mci_adjust_fifoth(struct dw_mci
*host
, struct mmc_data
*data
)
991 unsigned int blksz
= data
->blksz
;
992 static const u32 mszs
[] = {1, 4, 8, 16, 32, 64, 128, 256};
993 u32 fifo_width
= 1 << host
->data_shift
;
994 u32 blksz_depth
= blksz
/ fifo_width
, fifoth_val
;
995 u32 msize
= 0, rx_wmark
= 1, tx_wmark
, tx_wmark_invers
;
996 int idx
= ARRAY_SIZE(mszs
) - 1;
998 /* pio should ship this scenario */
1002 tx_wmark
= (host
->fifo_depth
) / 2;
1003 tx_wmark_invers
= host
->fifo_depth
- tx_wmark
;
1007 * if blksz is not a multiple of the FIFO width
1009 if (blksz
% fifo_width
)
1013 if (!((blksz_depth
% mszs
[idx
]) ||
1014 (tx_wmark_invers
% mszs
[idx
]))) {
1016 rx_wmark
= mszs
[idx
] - 1;
1019 } while (--idx
> 0);
1021 * If idx is '0', it won't be tried
1022 * Thus, initial values are uesed
1025 fifoth_val
= SDMMC_SET_FIFOTH(msize
, rx_wmark
, tx_wmark
);
1026 mci_writel(host
, FIFOTH
, fifoth_val
);
1029 static void dw_mci_ctrl_thld(struct dw_mci
*host
, struct mmc_data
*data
)
1031 unsigned int blksz
= data
->blksz
;
1032 u32 blksz_depth
, fifo_depth
;
1037 * CDTHRCTL doesn't exist prior to 240A (in fact that register offset is
1038 * in the FIFO region, so we really shouldn't access it).
1040 if (host
->verid
< DW_MMC_240A
||
1041 (host
->verid
< DW_MMC_280A
&& data
->flags
& MMC_DATA_WRITE
))
1045 * Card write Threshold is introduced since 2.80a
1046 * It's used when HS400 mode is enabled.
1048 if (data
->flags
& MMC_DATA_WRITE
&&
1049 host
->timing
!= MMC_TIMING_MMC_HS400
)
1052 if (data
->flags
& MMC_DATA_WRITE
)
1053 enable
= SDMMC_CARD_WR_THR_EN
;
1055 enable
= SDMMC_CARD_RD_THR_EN
;
1057 if (host
->timing
!= MMC_TIMING_MMC_HS200
&&
1058 host
->timing
!= MMC_TIMING_UHS_SDR104
&&
1059 host
->timing
!= MMC_TIMING_MMC_HS400
)
1062 blksz_depth
= blksz
/ (1 << host
->data_shift
);
1063 fifo_depth
= host
->fifo_depth
;
1065 if (blksz_depth
> fifo_depth
)
1069 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
1070 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
1071 * Currently just choose blksz.
1074 mci_writel(host
, CDTHRCTL
, SDMMC_SET_THLD(thld_size
, enable
));
1078 mci_writel(host
, CDTHRCTL
, 0);
1081 static int dw_mci_submit_data_dma(struct dw_mci
*host
, struct mmc_data
*data
)
1083 unsigned long irqflags
;
1087 host
->using_dma
= 0;
1089 /* If we don't have a channel, we can't do DMA */
1093 sg_len
= dw_mci_pre_dma_transfer(host
, data
, COOKIE_MAPPED
);
1095 host
->dma_ops
->stop(host
);
1099 host
->using_dma
= 1;
1101 if (host
->use_dma
== TRANS_MODE_IDMAC
)
1103 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
1104 (unsigned long)host
->sg_cpu
,
1105 (unsigned long)host
->sg_dma
,
1109 * Decide the MSIZE and RX/TX Watermark.
1110 * If current block size is same with previous size,
1111 * no need to update fifoth.
1113 if (host
->prev_blksz
!= data
->blksz
)
1114 dw_mci_adjust_fifoth(host
, data
);
1116 /* Enable the DMA interface */
1117 temp
= mci_readl(host
, CTRL
);
1118 temp
|= SDMMC_CTRL_DMA_ENABLE
;
1119 mci_writel(host
, CTRL
, temp
);
1121 /* Disable RX/TX IRQs, let DMA handle it */
1122 spin_lock_irqsave(&host
->irq_lock
, irqflags
);
1123 temp
= mci_readl(host
, INTMASK
);
1124 temp
&= ~(SDMMC_INT_RXDR
| SDMMC_INT_TXDR
);
1125 mci_writel(host
, INTMASK
, temp
);
1126 spin_unlock_irqrestore(&host
->irq_lock
, irqflags
);
1128 if (host
->dma_ops
->start(host
, sg_len
)) {
1129 host
->dma_ops
->stop(host
);
1130 /* We can't do DMA, try PIO for this one */
1132 "%s: fall back to PIO mode for current transfer\n",
1140 static void dw_mci_submit_data(struct dw_mci
*host
, struct mmc_data
*data
)
1142 unsigned long irqflags
;
1143 int flags
= SG_MITER_ATOMIC
;
1146 data
->error
= -EINPROGRESS
;
1148 WARN_ON(host
->data
);
1152 if (data
->flags
& MMC_DATA_READ
)
1153 host
->dir_status
= DW_MCI_RECV_STATUS
;
1155 host
->dir_status
= DW_MCI_SEND_STATUS
;
1157 dw_mci_ctrl_thld(host
, data
);
1159 if (dw_mci_submit_data_dma(host
, data
)) {
1160 if (host
->data
->flags
& MMC_DATA_READ
)
1161 flags
|= SG_MITER_TO_SG
;
1163 flags
|= SG_MITER_FROM_SG
;
1165 sg_miter_start(&host
->sg_miter
, data
->sg
, data
->sg_len
, flags
);
1166 host
->sg
= data
->sg
;
1167 host
->part_buf_start
= 0;
1168 host
->part_buf_count
= 0;
1170 mci_writel(host
, RINTSTS
, SDMMC_INT_TXDR
| SDMMC_INT_RXDR
);
1172 spin_lock_irqsave(&host
->irq_lock
, irqflags
);
1173 temp
= mci_readl(host
, INTMASK
);
1174 temp
|= SDMMC_INT_TXDR
| SDMMC_INT_RXDR
;
1175 mci_writel(host
, INTMASK
, temp
);
1176 spin_unlock_irqrestore(&host
->irq_lock
, irqflags
);
1178 temp
= mci_readl(host
, CTRL
);
1179 temp
&= ~SDMMC_CTRL_DMA_ENABLE
;
1180 mci_writel(host
, CTRL
, temp
);
1183 * Use the initial fifoth_val for PIO mode. If wm_algined
1184 * is set, we set watermark same as data size.
1185 * If next issued data may be transferred by DMA mode,
1186 * prev_blksz should be invalidated.
1188 if (host
->wm_aligned
)
1189 dw_mci_adjust_fifoth(host
, data
);
1191 mci_writel(host
, FIFOTH
, host
->fifoth_val
);
1192 host
->prev_blksz
= 0;
1195 * Keep the current block size.
1196 * It will be used to decide whether to update
1197 * fifoth register next time.
1199 host
->prev_blksz
= data
->blksz
;
1203 static void dw_mci_setup_bus(struct dw_mci_slot
*slot
, bool force_clkinit
)
1205 struct dw_mci
*host
= slot
->host
;
1206 unsigned int clock
= slot
->clock
;
1209 u32 sdmmc_cmd_bits
= SDMMC_CMD_UPD_CLK
| SDMMC_CMD_PRV_DAT_WAIT
;
1211 /* We must continue to set bit 28 in CMD until the change is complete */
1212 if (host
->state
== STATE_WAITING_CMD11_DONE
)
1213 sdmmc_cmd_bits
|= SDMMC_CMD_VOLT_SWITCH
;
1215 slot
->mmc
->actual_clock
= 0;
1218 mci_writel(host
, CLKENA
, 0);
1219 mci_send_cmd(slot
, sdmmc_cmd_bits
, 0);
1220 } else if (clock
!= host
->current_speed
|| force_clkinit
) {
1221 div
= host
->bus_hz
/ clock
;
1222 if (host
->bus_hz
% clock
&& host
->bus_hz
> clock
)
1224 * move the + 1 after the divide to prevent
1225 * over-clocking the card.
1229 div
= (host
->bus_hz
!= clock
) ? DIV_ROUND_UP(div
, 2) : 0;
1231 if ((clock
!= slot
->__clk_old
&&
1232 !test_bit(DW_MMC_CARD_NEEDS_POLL
, &slot
->flags
)) ||
1234 /* Silent the verbose log if calling from PM context */
1236 dev_info(&slot
->mmc
->class_dev
,
1237 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
1238 slot
->id
, host
->bus_hz
, clock
,
1239 div
? ((host
->bus_hz
/ div
) >> 1) :
1243 * If card is polling, display the message only
1244 * one time at boot time.
1246 if (slot
->mmc
->caps
& MMC_CAP_NEEDS_POLL
&&
1247 slot
->mmc
->f_min
== clock
)
1248 set_bit(DW_MMC_CARD_NEEDS_POLL
, &slot
->flags
);
1252 mci_writel(host
, CLKENA
, 0);
1253 mci_writel(host
, CLKSRC
, 0);
1256 mci_send_cmd(slot
, sdmmc_cmd_bits
, 0);
1258 /* set clock to desired speed */
1259 mci_writel(host
, CLKDIV
, div
);
1262 mci_send_cmd(slot
, sdmmc_cmd_bits
, 0);
1264 /* enable clock; only low power if no SDIO */
1265 clk_en_a
= SDMMC_CLKEN_ENABLE
<< slot
->id
;
1266 if (!test_bit(DW_MMC_CARD_NO_LOW_PWR
, &slot
->flags
))
1267 clk_en_a
|= SDMMC_CLKEN_LOW_PWR
<< slot
->id
;
1268 mci_writel(host
, CLKENA
, clk_en_a
);
1271 mci_send_cmd(slot
, sdmmc_cmd_bits
, 0);
1273 /* keep the last clock value that was requested from core */
1274 slot
->__clk_old
= clock
;
1275 slot
->mmc
->actual_clock
= div
? ((host
->bus_hz
/ div
) >> 1) :
1279 host
->current_speed
= clock
;
1281 /* Set the current slot bus width */
1282 mci_writel(host
, CTYPE
, (slot
->ctype
<< slot
->id
));
1285 static void dw_mci_set_data_timeout(struct dw_mci
*host
,
1286 unsigned int timeout_ns
)
1288 const struct dw_mci_drv_data
*drv_data
= host
->drv_data
;
1292 if (drv_data
&& drv_data
->set_data_timeout
)
1293 return drv_data
->set_data_timeout(host
, timeout_ns
);
1295 clk_div
= (mci_readl(host
, CLKDIV
) & 0xFF) * 2;
1299 tmp
= DIV_ROUND_UP_ULL((u64
)timeout_ns
* host
->bus_hz
, NSEC_PER_SEC
);
1300 tmp
= DIV_ROUND_UP_ULL(tmp
, clk_div
);
1302 /* TMOUT[7:0] (RESPONSE_TIMEOUT) */
1303 tmout
= 0xFF; /* Set maximum */
1305 /* TMOUT[31:8] (DATA_TIMEOUT) */
1306 if (!tmp
|| tmp
> 0xFFFFFF)
1307 tmout
|= (0xFFFFFF << 8);
1309 tmout
|= (tmp
& 0xFFFFFF) << 8;
1311 mci_writel(host
, TMOUT
, tmout
);
1312 dev_dbg(host
->dev
, "timeout_ns: %u => TMOUT[31:8]: %#08x",
1313 timeout_ns
, tmout
>> 8);
1316 static void __dw_mci_start_request(struct dw_mci
*host
,
1317 struct dw_mci_slot
*slot
,
1318 struct mmc_command
*cmd
)
1320 struct mmc_request
*mrq
;
1321 struct mmc_data
*data
;
1328 host
->pending_events
= 0;
1329 host
->completed_events
= 0;
1330 host
->cmd_status
= 0;
1331 host
->data_status
= 0;
1332 host
->dir_status
= 0;
1336 dw_mci_set_data_timeout(host
, data
->timeout_ns
);
1337 mci_writel(host
, BYTCNT
, data
->blksz
*data
->blocks
);
1338 mci_writel(host
, BLKSIZ
, data
->blksz
);
1341 cmdflags
= dw_mci_prepare_command(slot
->mmc
, cmd
);
1343 /* this is the first command, send the initialization clock */
1344 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT
, &slot
->flags
))
1345 cmdflags
|= SDMMC_CMD_INIT
;
1348 dw_mci_submit_data(host
, data
);
1349 wmb(); /* drain writebuffer */
1352 dw_mci_start_command(host
, cmd
, cmdflags
);
1354 if (cmd
->opcode
== SD_SWITCH_VOLTAGE
) {
1355 unsigned long irqflags
;
1358 * Databook says to fail after 2ms w/ no response, but evidence
1359 * shows that sometimes the cmd11 interrupt takes over 130ms.
1360 * We'll set to 500ms, plus an extra jiffy just in case jiffies
1361 * is just about to roll over.
1363 * We do this whole thing under spinlock and only if the
1364 * command hasn't already completed (indicating the irq
1365 * already ran so we don't want the timeout).
1367 spin_lock_irqsave(&host
->irq_lock
, irqflags
);
1368 if (!test_bit(EVENT_CMD_COMPLETE
, &host
->pending_events
))
1369 mod_timer(&host
->cmd11_timer
,
1370 jiffies
+ msecs_to_jiffies(500) + 1);
1371 spin_unlock_irqrestore(&host
->irq_lock
, irqflags
);
1374 host
->stop_cmdr
= dw_mci_prep_stop_abort(host
, cmd
);
1377 static void dw_mci_start_request(struct dw_mci
*host
,
1378 struct dw_mci_slot
*slot
)
1380 struct mmc_request
*mrq
= slot
->mrq
;
1381 struct mmc_command
*cmd
;
1383 cmd
= mrq
->sbc
? mrq
->sbc
: mrq
->cmd
;
1384 __dw_mci_start_request(host
, slot
, cmd
);
1387 /* must be called with host->lock held */
1388 static void dw_mci_queue_request(struct dw_mci
*host
, struct dw_mci_slot
*slot
,
1389 struct mmc_request
*mrq
)
1391 dev_vdbg(&slot
->mmc
->class_dev
, "queue request: state=%d\n",
1396 if (host
->state
== STATE_WAITING_CMD11_DONE
) {
1397 dev_warn(&slot
->mmc
->class_dev
,
1398 "Voltage change didn't complete\n");
1400 * this case isn't expected to happen, so we can
1401 * either crash here or just try to continue on
1402 * in the closest possible state
1404 host
->state
= STATE_IDLE
;
1407 if (host
->state
== STATE_IDLE
) {
1408 host
->state
= STATE_SENDING_CMD
;
1409 dw_mci_start_request(host
, slot
);
1411 list_add_tail(&slot
->queue_node
, &host
->queue
);
1415 static void dw_mci_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
1417 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
1418 struct dw_mci
*host
= slot
->host
;
1423 * The check for card presence and queueing of the request must be
1424 * atomic, otherwise the card could be removed in between and the
1425 * request wouldn't fail until another card was inserted.
1428 if (!dw_mci_get_cd(mmc
)) {
1429 mrq
->cmd
->error
= -ENOMEDIUM
;
1430 mmc_request_done(mmc
, mrq
);
1434 spin_lock_bh(&host
->lock
);
1436 dw_mci_queue_request(host
, slot
, mrq
);
1438 spin_unlock_bh(&host
->lock
);
1441 static void dw_mci_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
1443 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
1444 const struct dw_mci_drv_data
*drv_data
= slot
->host
->drv_data
;
1448 switch (ios
->bus_width
) {
1449 case MMC_BUS_WIDTH_4
:
1450 slot
->ctype
= SDMMC_CTYPE_4BIT
;
1452 case MMC_BUS_WIDTH_8
:
1453 slot
->ctype
= SDMMC_CTYPE_8BIT
;
1456 /* set default 1 bit mode */
1457 slot
->ctype
= SDMMC_CTYPE_1BIT
;
1460 regs
= mci_readl(slot
->host
, UHS_REG
);
1463 if (ios
->timing
== MMC_TIMING_MMC_DDR52
||
1464 ios
->timing
== MMC_TIMING_UHS_DDR50
||
1465 ios
->timing
== MMC_TIMING_MMC_HS400
)
1466 regs
|= ((0x1 << slot
->id
) << 16);
1468 regs
&= ~((0x1 << slot
->id
) << 16);
1470 mci_writel(slot
->host
, UHS_REG
, regs
);
1471 slot
->host
->timing
= ios
->timing
;
1474 * Use mirror of ios->clock to prevent race with mmc
1475 * core ios update when finding the minimum.
1477 slot
->clock
= ios
->clock
;
1479 if (drv_data
&& drv_data
->set_ios
)
1480 drv_data
->set_ios(slot
->host
, ios
);
1482 switch (ios
->power_mode
) {
1484 if (!IS_ERR(mmc
->supply
.vmmc
)) {
1485 ret
= mmc_regulator_set_ocr(mmc
, mmc
->supply
.vmmc
,
1488 dev_err(slot
->host
->dev
,
1489 "failed to enable vmmc regulator\n");
1490 /*return, if failed turn on vmmc*/
1494 set_bit(DW_MMC_CARD_NEED_INIT
, &slot
->flags
);
1495 regs
= mci_readl(slot
->host
, PWREN
);
1496 regs
|= (1 << slot
->id
);
1497 mci_writel(slot
->host
, PWREN
, regs
);
1500 if (!slot
->host
->vqmmc_enabled
) {
1501 if (!IS_ERR(mmc
->supply
.vqmmc
)) {
1502 ret
= regulator_enable(mmc
->supply
.vqmmc
);
1504 dev_err(slot
->host
->dev
,
1505 "failed to enable vqmmc\n");
1507 slot
->host
->vqmmc_enabled
= true;
1510 /* Keep track so we don't reset again */
1511 slot
->host
->vqmmc_enabled
= true;
1514 /* Reset our state machine after powering on */
1515 dw_mci_ctrl_reset(slot
->host
,
1516 SDMMC_CTRL_ALL_RESET_FLAGS
);
1519 /* Adjust clock / bus width after power is up */
1520 dw_mci_setup_bus(slot
, false);
1524 /* Turn clock off before power goes down */
1525 dw_mci_setup_bus(slot
, false);
1527 if (!IS_ERR(mmc
->supply
.vmmc
))
1528 mmc_regulator_set_ocr(mmc
, mmc
->supply
.vmmc
, 0);
1530 if (!IS_ERR(mmc
->supply
.vqmmc
) && slot
->host
->vqmmc_enabled
)
1531 regulator_disable(mmc
->supply
.vqmmc
);
1532 slot
->host
->vqmmc_enabled
= false;
1534 regs
= mci_readl(slot
->host
, PWREN
);
1535 regs
&= ~(1 << slot
->id
);
1536 mci_writel(slot
->host
, PWREN
, regs
);
1542 if (slot
->host
->state
== STATE_WAITING_CMD11_DONE
&& ios
->clock
!= 0)
1543 slot
->host
->state
= STATE_IDLE
;
1546 static int dw_mci_card_busy(struct mmc_host
*mmc
)
1548 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
1552 * Check the busy bit which is low when DAT[3:0]
1553 * (the data lines) are 0000
1555 status
= mci_readl(slot
->host
, STATUS
);
1557 return !!(status
& SDMMC_STATUS_BUSY
);
1560 static int dw_mci_switch_voltage(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
1562 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
1563 struct dw_mci
*host
= slot
->host
;
1564 const struct dw_mci_drv_data
*drv_data
= host
->drv_data
;
1566 u32 v18
= SDMMC_UHS_18V
<< slot
->id
;
1569 if (drv_data
&& drv_data
->switch_voltage
)
1570 return drv_data
->switch_voltage(mmc
, ios
);
1573 * Program the voltage. Note that some instances of dw_mmc may use
1574 * the UHS_REG for this. For other instances (like exynos) the UHS_REG
1575 * does no harm but you need to set the regulator directly. Try both.
1577 uhs
= mci_readl(host
, UHS_REG
);
1578 if (ios
->signal_voltage
== MMC_SIGNAL_VOLTAGE_330
)
1583 if (!IS_ERR(mmc
->supply
.vqmmc
)) {
1584 ret
= mmc_regulator_set_vqmmc(mmc
, ios
);
1586 dev_dbg(&mmc
->class_dev
,
1587 "Regulator set error %d - %s V\n",
1588 ret
, uhs
& v18
? "1.8" : "3.3");
1592 mci_writel(host
, UHS_REG
, uhs
);
1597 static int dw_mci_get_ro(struct mmc_host
*mmc
)
1600 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
1601 int gpio_ro
= mmc_gpio_get_ro(mmc
);
1603 /* Use platform get_ro function, else try on board write protect */
1605 read_only
= gpio_ro
;
1608 mci_readl(slot
->host
, WRTPRT
) & (1 << slot
->id
) ? 1 : 0;
1610 dev_dbg(&mmc
->class_dev
, "card is %s\n",
1611 read_only
? "read-only" : "read-write");
1616 static void dw_mci_hw_reset(struct mmc_host
*mmc
)
1618 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
1619 struct dw_mci
*host
= slot
->host
;
1620 const struct dw_mci_drv_data
*drv_data
= host
->drv_data
;
1623 if (host
->use_dma
== TRANS_MODE_IDMAC
)
1624 dw_mci_idmac_reset(host
);
1626 if (!dw_mci_ctrl_reset(host
, SDMMC_CTRL_DMA_RESET
|
1627 SDMMC_CTRL_FIFO_RESET
))
1630 if (drv_data
&& drv_data
->hw_reset
) {
1631 drv_data
->hw_reset(host
);
1636 * According to eMMC spec, card reset procedure:
1637 * tRstW >= 1us: RST_n pulse width
1638 * tRSCA >= 200us: RST_n to Command time
1639 * tRSTH >= 1us: RST_n high period
1641 reset
= mci_readl(host
, RST_N
);
1642 reset
&= ~(SDMMC_RST_HWACTIVE
<< slot
->id
);
1643 mci_writel(host
, RST_N
, reset
);
1645 reset
|= SDMMC_RST_HWACTIVE
<< slot
->id
;
1646 mci_writel(host
, RST_N
, reset
);
1647 usleep_range(200, 300);
1650 static void dw_mci_prepare_sdio_irq(struct dw_mci_slot
*slot
, bool prepare
)
1652 struct dw_mci
*host
= slot
->host
;
1653 const u32 clken_low_pwr
= SDMMC_CLKEN_LOW_PWR
<< slot
->id
;
1658 * Low power mode will stop the card clock when idle. According to the
1659 * description of the CLKENA register we should disable low power mode
1660 * for SDIO cards if we need SDIO interrupts to work.
1663 clk_en_a_old
= mci_readl(host
, CLKENA
);
1665 set_bit(DW_MMC_CARD_NO_LOW_PWR
, &slot
->flags
);
1666 clk_en_a
= clk_en_a_old
& ~clken_low_pwr
;
1668 clear_bit(DW_MMC_CARD_NO_LOW_PWR
, &slot
->flags
);
1669 clk_en_a
= clk_en_a_old
| clken_low_pwr
;
1672 if (clk_en_a
!= clk_en_a_old
) {
1673 mci_writel(host
, CLKENA
, clk_en_a
);
1674 mci_send_cmd(slot
, SDMMC_CMD_UPD_CLK
| SDMMC_CMD_PRV_DAT_WAIT
,
1679 static void __dw_mci_enable_sdio_irq(struct dw_mci_slot
*slot
, int enb
)
1681 struct dw_mci
*host
= slot
->host
;
1682 unsigned long irqflags
;
1685 spin_lock_irqsave(&host
->irq_lock
, irqflags
);
1687 /* Enable/disable Slot Specific SDIO interrupt */
1688 int_mask
= mci_readl(host
, INTMASK
);
1690 int_mask
|= SDMMC_INT_SDIO(slot
->sdio_id
);
1692 int_mask
&= ~SDMMC_INT_SDIO(slot
->sdio_id
);
1693 mci_writel(host
, INTMASK
, int_mask
);
1695 spin_unlock_irqrestore(&host
->irq_lock
, irqflags
);
1698 static void dw_mci_enable_sdio_irq(struct mmc_host
*mmc
, int enb
)
1700 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
1701 struct dw_mci
*host
= slot
->host
;
1703 dw_mci_prepare_sdio_irq(slot
, enb
);
1704 __dw_mci_enable_sdio_irq(slot
, enb
);
1706 /* Avoid runtime suspending the device when SDIO IRQ is enabled */
1708 pm_runtime_get_noresume(host
->dev
);
1710 pm_runtime_put_noidle(host
->dev
);
1713 static void dw_mci_ack_sdio_irq(struct mmc_host
*mmc
)
1715 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
1717 __dw_mci_enable_sdio_irq(slot
, 1);
1720 static int dw_mci_execute_tuning(struct mmc_host
*mmc
, u32 opcode
)
1722 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
1723 struct dw_mci
*host
= slot
->host
;
1724 const struct dw_mci_drv_data
*drv_data
= host
->drv_data
;
1727 if (drv_data
&& drv_data
->execute_tuning
)
1728 err
= drv_data
->execute_tuning(slot
, opcode
);
1732 static int dw_mci_prepare_hs400_tuning(struct mmc_host
*mmc
,
1733 struct mmc_ios
*ios
)
1735 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
1736 struct dw_mci
*host
= slot
->host
;
1737 const struct dw_mci_drv_data
*drv_data
= host
->drv_data
;
1739 if (drv_data
&& drv_data
->prepare_hs400_tuning
)
1740 return drv_data
->prepare_hs400_tuning(host
, ios
);
1745 static bool dw_mci_reset(struct dw_mci
*host
)
1747 u32 flags
= SDMMC_CTRL_RESET
| SDMMC_CTRL_FIFO_RESET
;
1752 * Resetting generates a block interrupt, hence setting
1753 * the scatter-gather pointer to NULL.
1756 sg_miter_stop(&host
->sg_miter
);
1761 flags
|= SDMMC_CTRL_DMA_RESET
;
1763 if (dw_mci_ctrl_reset(host
, flags
)) {
1765 * In all cases we clear the RAWINTS
1766 * register to clear any interrupts.
1768 mci_writel(host
, RINTSTS
, 0xFFFFFFFF);
1770 if (!host
->use_dma
) {
1775 /* Wait for dma_req to be cleared */
1776 if (readl_poll_timeout_atomic(host
->regs
+ SDMMC_STATUS
,
1778 !(status
& SDMMC_STATUS_DMA_REQ
),
1779 1, 500 * USEC_PER_MSEC
)) {
1781 "%s: Timeout waiting for dma_req to be cleared\n",
1786 /* when using DMA next we reset the fifo again */
1787 if (!dw_mci_ctrl_reset(host
, SDMMC_CTRL_FIFO_RESET
))
1790 /* if the controller reset bit did clear, then set clock regs */
1791 if (!(mci_readl(host
, CTRL
) & SDMMC_CTRL_RESET
)) {
1793 "%s: fifo/dma reset bits didn't clear but ciu was reset, doing clock update\n",
1799 if (host
->use_dma
== TRANS_MODE_IDMAC
)
1800 /* It is also required that we reinit idmac */
1801 dw_mci_idmac_init(host
);
1806 /* After a CTRL reset we need to have CIU set clock registers */
1807 mci_send_cmd(host
->slot
, SDMMC_CMD_UPD_CLK
, 0);
1812 static const struct mmc_host_ops dw_mci_ops
= {
1813 .request
= dw_mci_request
,
1814 .pre_req
= dw_mci_pre_req
,
1815 .post_req
= dw_mci_post_req
,
1816 .set_ios
= dw_mci_set_ios
,
1817 .get_ro
= dw_mci_get_ro
,
1818 .get_cd
= dw_mci_get_cd
,
1819 .card_hw_reset
= dw_mci_hw_reset
,
1820 .enable_sdio_irq
= dw_mci_enable_sdio_irq
,
1821 .ack_sdio_irq
= dw_mci_ack_sdio_irq
,
1822 .execute_tuning
= dw_mci_execute_tuning
,
1823 .card_busy
= dw_mci_card_busy
,
1824 .start_signal_voltage_switch
= dw_mci_switch_voltage
,
1825 .prepare_hs400_tuning
= dw_mci_prepare_hs400_tuning
,
1828 #ifdef CONFIG_FAULT_INJECTION
1829 static enum hrtimer_restart
dw_mci_fault_timer(struct hrtimer
*t
)
1831 struct dw_mci
*host
= container_of(t
, struct dw_mci
, fault_timer
);
1832 unsigned long flags
;
1834 spin_lock_irqsave(&host
->irq_lock
, flags
);
1837 * Only inject an error if we haven't already got an error or data over
1840 if (!host
->data_status
) {
1841 host
->data_status
= SDMMC_INT_DCRC
;
1842 set_bit(EVENT_DATA_ERROR
, &host
->pending_events
);
1843 queue_work(system_bh_wq
, &host
->bh_work
);
1846 spin_unlock_irqrestore(&host
->irq_lock
, flags
);
1848 return HRTIMER_NORESTART
;
1851 static void dw_mci_start_fault_timer(struct dw_mci
*host
)
1853 struct mmc_data
*data
= host
->data
;
1855 if (!data
|| data
->blocks
<= 1)
1858 if (!should_fail(&host
->fail_data_crc
, 1))
1862 * Try to inject the error at random points during the data transfer.
1864 hrtimer_start(&host
->fault_timer
,
1865 ms_to_ktime(get_random_u32_below(25)),
1869 static void dw_mci_stop_fault_timer(struct dw_mci
*host
)
1871 hrtimer_cancel(&host
->fault_timer
);
1874 static void dw_mci_init_fault(struct dw_mci
*host
)
1876 host
->fail_data_crc
= (struct fault_attr
) FAULT_ATTR_INITIALIZER
;
1878 hrtimer_init(&host
->fault_timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
1879 host
->fault_timer
.function
= dw_mci_fault_timer
;
1882 static void dw_mci_init_fault(struct dw_mci
*host
)
1886 static void dw_mci_start_fault_timer(struct dw_mci
*host
)
1890 static void dw_mci_stop_fault_timer(struct dw_mci
*host
)
1895 static void dw_mci_request_end(struct dw_mci
*host
, struct mmc_request
*mrq
)
1896 __releases(&host
->lock
)
1897 __acquires(&host
->lock
)
1899 struct dw_mci_slot
*slot
;
1900 struct mmc_host
*prev_mmc
= host
->slot
->mmc
;
1902 WARN_ON(host
->cmd
|| host
->data
);
1904 host
->slot
->mrq
= NULL
;
1906 if (!list_empty(&host
->queue
)) {
1907 slot
= list_entry(host
->queue
.next
,
1908 struct dw_mci_slot
, queue_node
);
1909 list_del(&slot
->queue_node
);
1910 dev_vdbg(host
->dev
, "list not empty: %s is next\n",
1911 mmc_hostname(slot
->mmc
));
1912 host
->state
= STATE_SENDING_CMD
;
1913 dw_mci_start_request(host
, slot
);
1915 dev_vdbg(host
->dev
, "list empty\n");
1917 if (host
->state
== STATE_SENDING_CMD11
)
1918 host
->state
= STATE_WAITING_CMD11_DONE
;
1920 host
->state
= STATE_IDLE
;
1923 spin_unlock(&host
->lock
);
1924 mmc_request_done(prev_mmc
, mrq
);
1925 spin_lock(&host
->lock
);
1928 static int dw_mci_command_complete(struct dw_mci
*host
, struct mmc_command
*cmd
)
1930 u32 status
= host
->cmd_status
;
1932 host
->cmd_status
= 0;
1934 /* Read the response from the card (up to 16 bytes) */
1935 if (cmd
->flags
& MMC_RSP_PRESENT
) {
1936 if (cmd
->flags
& MMC_RSP_136
) {
1937 cmd
->resp
[3] = mci_readl(host
, RESP0
);
1938 cmd
->resp
[2] = mci_readl(host
, RESP1
);
1939 cmd
->resp
[1] = mci_readl(host
, RESP2
);
1940 cmd
->resp
[0] = mci_readl(host
, RESP3
);
1942 cmd
->resp
[0] = mci_readl(host
, RESP0
);
1949 if (status
& SDMMC_INT_RTO
)
1950 cmd
->error
= -ETIMEDOUT
;
1951 else if ((cmd
->flags
& MMC_RSP_CRC
) && (status
& SDMMC_INT_RCRC
))
1952 cmd
->error
= -EILSEQ
;
1953 else if (status
& SDMMC_INT_RESP_ERR
)
1961 static int dw_mci_data_complete(struct dw_mci
*host
, struct mmc_data
*data
)
1963 u32 status
= host
->data_status
;
1965 if (status
& DW_MCI_DATA_ERROR_FLAGS
) {
1966 if (status
& SDMMC_INT_DRTO
) {
1967 data
->error
= -ETIMEDOUT
;
1968 } else if (status
& SDMMC_INT_DCRC
) {
1969 data
->error
= -EILSEQ
;
1970 } else if (status
& SDMMC_INT_EBE
) {
1971 if (host
->dir_status
==
1972 DW_MCI_SEND_STATUS
) {
1974 * No data CRC status was returned.
1975 * The number of bytes transferred
1976 * will be exaggerated in PIO mode.
1978 data
->bytes_xfered
= 0;
1979 data
->error
= -ETIMEDOUT
;
1980 } else if (host
->dir_status
==
1981 DW_MCI_RECV_STATUS
) {
1982 data
->error
= -EILSEQ
;
1985 /* SDMMC_INT_SBE is included */
1986 data
->error
= -EILSEQ
;
1989 dev_dbg(host
->dev
, "data error, status 0x%08x\n", status
);
1992 * After an error, there may be data lingering
1997 data
->bytes_xfered
= data
->blocks
* data
->blksz
;
2004 static void dw_mci_set_drto(struct dw_mci
*host
)
2006 const struct dw_mci_drv_data
*drv_data
= host
->drv_data
;
2007 unsigned int drto_clks
;
2008 unsigned int drto_div
;
2009 unsigned int drto_ms
;
2010 unsigned long irqflags
;
2012 if (drv_data
&& drv_data
->get_drto_clks
)
2013 drto_clks
= drv_data
->get_drto_clks(host
);
2015 drto_clks
= mci_readl(host
, TMOUT
) >> 8;
2016 drto_div
= (mci_readl(host
, CLKDIV
) & 0xff) * 2;
2020 drto_ms
= DIV_ROUND_UP_ULL((u64
)MSEC_PER_SEC
* drto_clks
* drto_div
,
2023 dev_dbg(host
->dev
, "drto_ms: %u\n", drto_ms
);
2025 /* add a bit spare time */
2028 spin_lock_irqsave(&host
->irq_lock
, irqflags
);
2029 if (!test_bit(EVENT_DATA_COMPLETE
, &host
->pending_events
))
2030 mod_timer(&host
->dto_timer
,
2031 jiffies
+ msecs_to_jiffies(drto_ms
));
2032 spin_unlock_irqrestore(&host
->irq_lock
, irqflags
);
2035 static bool dw_mci_clear_pending_cmd_complete(struct dw_mci
*host
)
2037 if (!test_bit(EVENT_CMD_COMPLETE
, &host
->pending_events
))
2041 * Really be certain that the timer has stopped. This is a bit of
2042 * paranoia and could only really happen if we had really bad
2043 * interrupt latency and the interrupt routine and timeout were
2044 * running concurrently so that the del_timer() in the interrupt
2045 * handler couldn't run.
2047 WARN_ON(del_timer_sync(&host
->cto_timer
));
2048 clear_bit(EVENT_CMD_COMPLETE
, &host
->pending_events
);
2053 static bool dw_mci_clear_pending_data_complete(struct dw_mci
*host
)
2055 if (!test_bit(EVENT_DATA_COMPLETE
, &host
->pending_events
))
2058 /* Extra paranoia just like dw_mci_clear_pending_cmd_complete() */
2059 WARN_ON(del_timer_sync(&host
->dto_timer
));
2060 clear_bit(EVENT_DATA_COMPLETE
, &host
->pending_events
);
2065 static void dw_mci_work_func(struct work_struct
*t
)
2067 struct dw_mci
*host
= from_work(host
, t
, bh_work
);
2068 struct mmc_data
*data
;
2069 struct mmc_command
*cmd
;
2070 struct mmc_request
*mrq
;
2071 enum dw_mci_state state
;
2072 enum dw_mci_state prev_state
;
2075 spin_lock(&host
->lock
);
2077 state
= host
->state
;
2086 case STATE_WAITING_CMD11_DONE
:
2089 case STATE_SENDING_CMD11
:
2090 case STATE_SENDING_CMD
:
2091 if (!dw_mci_clear_pending_cmd_complete(host
))
2096 set_bit(EVENT_CMD_COMPLETE
, &host
->completed_events
);
2097 err
= dw_mci_command_complete(host
, cmd
);
2098 if (cmd
== mrq
->sbc
&& !err
) {
2099 __dw_mci_start_request(host
, host
->slot
,
2104 if (cmd
->data
&& err
) {
2106 * During UHS tuning sequence, sending the stop
2107 * command after the response CRC error would
2108 * throw the system into a confused state
2109 * causing all future tuning phases to report
2112 * In such case controller will move into a data
2113 * transfer state after a response error or
2114 * response CRC error. Let's let that finish
2115 * before trying to send a stop, so we'll go to
2116 * STATE_SENDING_DATA.
2118 * Although letting the data transfer take place
2119 * will waste a bit of time (we already know
2120 * the command was bad), it can't cause any
2121 * errors since it's possible it would have
2122 * taken place anyway if this bh work got
2123 * delayed. Allowing the transfer to take place
2124 * avoids races and keeps things simple.
2126 if (err
!= -ETIMEDOUT
&&
2127 host
->dir_status
== DW_MCI_RECV_STATUS
) {
2128 state
= STATE_SENDING_DATA
;
2132 send_stop_abort(host
, data
);
2133 dw_mci_stop_dma(host
);
2134 state
= STATE_SENDING_STOP
;
2138 if (!cmd
->data
|| err
) {
2139 dw_mci_request_end(host
, mrq
);
2143 prev_state
= state
= STATE_SENDING_DATA
;
2146 case STATE_SENDING_DATA
:
2148 * We could get a data error and never a transfer
2149 * complete so we'd better check for it here.
2151 * Note that we don't really care if we also got a
2152 * transfer complete; stopping the DMA and sending an
2155 if (test_and_clear_bit(EVENT_DATA_ERROR
,
2156 &host
->pending_events
)) {
2157 if (!(host
->data_status
& (SDMMC_INT_DRTO
|
2159 send_stop_abort(host
, data
);
2160 dw_mci_stop_dma(host
);
2161 state
= STATE_DATA_ERROR
;
2165 if (!test_and_clear_bit(EVENT_XFER_COMPLETE
,
2166 &host
->pending_events
)) {
2168 * If all data-related interrupts don't come
2169 * within the given time in reading data state.
2171 if (host
->dir_status
== DW_MCI_RECV_STATUS
)
2172 dw_mci_set_drto(host
);
2176 set_bit(EVENT_XFER_COMPLETE
, &host
->completed_events
);
2179 * Handle an EVENT_DATA_ERROR that might have shown up
2180 * before the transfer completed. This might not have
2181 * been caught by the check above because the interrupt
2182 * could have gone off between the previous check and
2183 * the check for transfer complete.
2185 * Technically this ought not be needed assuming we
2186 * get a DATA_COMPLETE eventually (we'll notice the
2187 * error and end the request), but it shouldn't hurt.
2189 * This has the advantage of sending the stop command.
2191 if (test_and_clear_bit(EVENT_DATA_ERROR
,
2192 &host
->pending_events
)) {
2193 if (!(host
->data_status
& (SDMMC_INT_DRTO
|
2195 send_stop_abort(host
, data
);
2196 dw_mci_stop_dma(host
);
2197 state
= STATE_DATA_ERROR
;
2200 prev_state
= state
= STATE_DATA_BUSY
;
2204 case STATE_DATA_BUSY
:
2205 if (!dw_mci_clear_pending_data_complete(host
)) {
2207 * If data error interrupt comes but data over
2208 * interrupt doesn't come within the given time.
2209 * in reading data state.
2211 if (host
->dir_status
== DW_MCI_RECV_STATUS
)
2212 dw_mci_set_drto(host
);
2216 dw_mci_stop_fault_timer(host
);
2218 set_bit(EVENT_DATA_COMPLETE
, &host
->completed_events
);
2219 err
= dw_mci_data_complete(host
, data
);
2222 if (!data
->stop
|| mrq
->sbc
) {
2223 if (mrq
->sbc
&& data
->stop
)
2224 data
->stop
->error
= 0;
2225 dw_mci_request_end(host
, mrq
);
2229 /* stop command for open-ended transfer*/
2231 send_stop_abort(host
, data
);
2234 * If we don't have a command complete now we'll
2235 * never get one since we just reset everything;
2236 * better end the request.
2238 * If we do have a command complete we'll fall
2239 * through to the SENDING_STOP command and
2240 * everything will be peachy keen.
2242 if (!test_bit(EVENT_CMD_COMPLETE
,
2243 &host
->pending_events
)) {
2245 dw_mci_request_end(host
, mrq
);
2251 * If err has non-zero,
2252 * stop-abort command has been already issued.
2254 prev_state
= state
= STATE_SENDING_STOP
;
2258 case STATE_SENDING_STOP
:
2259 if (!dw_mci_clear_pending_cmd_complete(host
))
2262 /* CMD error in data command */
2263 if (mrq
->cmd
->error
&& mrq
->data
)
2266 dw_mci_stop_fault_timer(host
);
2270 if (!mrq
->sbc
&& mrq
->stop
)
2271 dw_mci_command_complete(host
, mrq
->stop
);
2273 host
->cmd_status
= 0;
2275 dw_mci_request_end(host
, mrq
);
2278 case STATE_DATA_ERROR
:
2279 if (!test_and_clear_bit(EVENT_XFER_COMPLETE
,
2280 &host
->pending_events
))
2283 state
= STATE_DATA_BUSY
;
2286 } while (state
!= prev_state
);
2288 host
->state
= state
;
2290 spin_unlock(&host
->lock
);
2294 /* push final bytes to part_buf, only use during push */
2295 static void dw_mci_set_part_bytes(struct dw_mci
*host
, void *buf
, int cnt
)
2297 memcpy((void *)&host
->part_buf
, buf
, cnt
);
2298 host
->part_buf_count
= cnt
;
2301 /* append bytes to part_buf, only use during push */
2302 static int dw_mci_push_part_bytes(struct dw_mci
*host
, void *buf
, int cnt
)
2304 cnt
= min(cnt
, (1 << host
->data_shift
) - host
->part_buf_count
);
2305 memcpy((void *)&host
->part_buf
+ host
->part_buf_count
, buf
, cnt
);
2306 host
->part_buf_count
+= cnt
;
2310 /* pull first bytes from part_buf, only use during pull */
2311 static int dw_mci_pull_part_bytes(struct dw_mci
*host
, void *buf
, int cnt
)
2313 cnt
= min_t(int, cnt
, host
->part_buf_count
);
2315 memcpy(buf
, (void *)&host
->part_buf
+ host
->part_buf_start
,
2317 host
->part_buf_count
-= cnt
;
2318 host
->part_buf_start
+= cnt
;
2323 /* pull final bytes from the part_buf, assuming it's just been filled */
2324 static void dw_mci_pull_final_bytes(struct dw_mci
*host
, void *buf
, int cnt
)
2326 memcpy(buf
, &host
->part_buf
, cnt
);
2327 host
->part_buf_start
= cnt
;
2328 host
->part_buf_count
= (1 << host
->data_shift
) - cnt
;
2331 static void dw_mci_push_data16(struct dw_mci
*host
, void *buf
, int cnt
)
2333 struct mmc_data
*data
= host
->data
;
2336 /* try and push anything in the part_buf */
2337 if (unlikely(host
->part_buf_count
)) {
2338 int len
= dw_mci_push_part_bytes(host
, buf
, cnt
);
2342 if (host
->part_buf_count
== 2) {
2343 mci_fifo_writew(host
->fifo_reg
, host
->part_buf16
);
2344 host
->part_buf_count
= 0;
2347 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2348 if (unlikely((unsigned long)buf
& 0x1)) {
2350 u16 aligned_buf
[64];
2351 int len
= min(cnt
& -2, (int)sizeof(aligned_buf
));
2352 int items
= len
>> 1;
2354 /* memcpy from input buffer into aligned buffer */
2355 memcpy(aligned_buf
, buf
, len
);
2358 /* push data from aligned buffer into fifo */
2359 for (i
= 0; i
< items
; ++i
)
2360 mci_fifo_writew(host
->fifo_reg
, aligned_buf
[i
]);
2367 for (; cnt
>= 2; cnt
-= 2)
2368 mci_fifo_writew(host
->fifo_reg
, *pdata
++);
2371 /* put anything remaining in the part_buf */
2373 dw_mci_set_part_bytes(host
, buf
, cnt
);
2374 /* Push data if we have reached the expected data length */
2375 if ((data
->bytes_xfered
+ init_cnt
) ==
2376 (data
->blksz
* data
->blocks
))
2377 mci_fifo_writew(host
->fifo_reg
, host
->part_buf16
);
2381 static void dw_mci_pull_data16(struct dw_mci
*host
, void *buf
, int cnt
)
2383 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2384 if (unlikely((unsigned long)buf
& 0x1)) {
2386 /* pull data from fifo into aligned buffer */
2387 u16 aligned_buf
[64];
2388 int len
= min(cnt
& -2, (int)sizeof(aligned_buf
));
2389 int items
= len
>> 1;
2392 for (i
= 0; i
< items
; ++i
)
2393 aligned_buf
[i
] = mci_fifo_readw(host
->fifo_reg
);
2394 /* memcpy from aligned buffer into output buffer */
2395 memcpy(buf
, aligned_buf
, len
);
2404 for (; cnt
>= 2; cnt
-= 2)
2405 *pdata
++ = mci_fifo_readw(host
->fifo_reg
);
2409 host
->part_buf16
= mci_fifo_readw(host
->fifo_reg
);
2410 dw_mci_pull_final_bytes(host
, buf
, cnt
);
2414 static void dw_mci_push_data32(struct dw_mci
*host
, void *buf
, int cnt
)
2416 struct mmc_data
*data
= host
->data
;
2419 /* try and push anything in the part_buf */
2420 if (unlikely(host
->part_buf_count
)) {
2421 int len
= dw_mci_push_part_bytes(host
, buf
, cnt
);
2425 if (host
->part_buf_count
== 4) {
2426 mci_fifo_writel(host
->fifo_reg
, host
->part_buf32
);
2427 host
->part_buf_count
= 0;
2430 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2431 if (unlikely((unsigned long)buf
& 0x3)) {
2433 u32 aligned_buf
[32];
2434 int len
= min(cnt
& -4, (int)sizeof(aligned_buf
));
2435 int items
= len
>> 2;
2437 /* memcpy from input buffer into aligned buffer */
2438 memcpy(aligned_buf
, buf
, len
);
2441 /* push data from aligned buffer into fifo */
2442 for (i
= 0; i
< items
; ++i
)
2443 mci_fifo_writel(host
->fifo_reg
, aligned_buf
[i
]);
2450 for (; cnt
>= 4; cnt
-= 4)
2451 mci_fifo_writel(host
->fifo_reg
, *pdata
++);
2454 /* put anything remaining in the part_buf */
2456 dw_mci_set_part_bytes(host
, buf
, cnt
);
2457 /* Push data if we have reached the expected data length */
2458 if ((data
->bytes_xfered
+ init_cnt
) ==
2459 (data
->blksz
* data
->blocks
))
2460 mci_fifo_writel(host
->fifo_reg
, host
->part_buf32
);
2464 static void dw_mci_pull_data32(struct dw_mci
*host
, void *buf
, int cnt
)
2466 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2467 if (unlikely((unsigned long)buf
& 0x3)) {
2469 /* pull data from fifo into aligned buffer */
2470 u32 aligned_buf
[32];
2471 int len
= min(cnt
& -4, (int)sizeof(aligned_buf
));
2472 int items
= len
>> 2;
2475 for (i
= 0; i
< items
; ++i
)
2476 aligned_buf
[i
] = mci_fifo_readl(host
->fifo_reg
);
2477 /* memcpy from aligned buffer into output buffer */
2478 memcpy(buf
, aligned_buf
, len
);
2487 for (; cnt
>= 4; cnt
-= 4)
2488 *pdata
++ = mci_fifo_readl(host
->fifo_reg
);
2492 host
->part_buf32
= mci_fifo_readl(host
->fifo_reg
);
2493 dw_mci_pull_final_bytes(host
, buf
, cnt
);
2497 static void dw_mci_push_data64(struct dw_mci
*host
, void *buf
, int cnt
)
2499 struct mmc_data
*data
= host
->data
;
2502 /* try and push anything in the part_buf */
2503 if (unlikely(host
->part_buf_count
)) {
2504 int len
= dw_mci_push_part_bytes(host
, buf
, cnt
);
2509 if (host
->part_buf_count
== 8) {
2510 mci_fifo_writeq(host
->fifo_reg
, host
->part_buf
);
2511 host
->part_buf_count
= 0;
2514 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2515 if (unlikely((unsigned long)buf
& 0x7)) {
2517 u64 aligned_buf
[16];
2518 int len
= min(cnt
& -8, (int)sizeof(aligned_buf
));
2519 int items
= len
>> 3;
2521 /* memcpy from input buffer into aligned buffer */
2522 memcpy(aligned_buf
, buf
, len
);
2525 /* push data from aligned buffer into fifo */
2526 for (i
= 0; i
< items
; ++i
)
2527 mci_fifo_writeq(host
->fifo_reg
, aligned_buf
[i
]);
2534 for (; cnt
>= 8; cnt
-= 8)
2535 mci_fifo_writeq(host
->fifo_reg
, *pdata
++);
2538 /* put anything remaining in the part_buf */
2540 dw_mci_set_part_bytes(host
, buf
, cnt
);
2541 /* Push data if we have reached the expected data length */
2542 if ((data
->bytes_xfered
+ init_cnt
) ==
2543 (data
->blksz
* data
->blocks
))
2544 mci_fifo_writeq(host
->fifo_reg
, host
->part_buf
);
2548 static void dw_mci_pull_data64(struct dw_mci
*host
, void *buf
, int cnt
)
2550 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2551 if (unlikely((unsigned long)buf
& 0x7)) {
2553 /* pull data from fifo into aligned buffer */
2554 u64 aligned_buf
[16];
2555 int len
= min(cnt
& -8, (int)sizeof(aligned_buf
));
2556 int items
= len
>> 3;
2559 for (i
= 0; i
< items
; ++i
)
2560 aligned_buf
[i
] = mci_fifo_readq(host
->fifo_reg
);
2562 /* memcpy from aligned buffer into output buffer */
2563 memcpy(buf
, aligned_buf
, len
);
2572 for (; cnt
>= 8; cnt
-= 8)
2573 *pdata
++ = mci_fifo_readq(host
->fifo_reg
);
2577 host
->part_buf
= mci_fifo_readq(host
->fifo_reg
);
2578 dw_mci_pull_final_bytes(host
, buf
, cnt
);
2582 static void dw_mci_pull_data(struct dw_mci
*host
, void *buf
, int cnt
)
2586 /* get remaining partial bytes */
2587 len
= dw_mci_pull_part_bytes(host
, buf
, cnt
);
2588 if (unlikely(len
== cnt
))
2593 /* get the rest of the data */
2594 host
->pull_data(host
, buf
, cnt
);
2597 static void dw_mci_read_data_pio(struct dw_mci
*host
, bool dto
)
2599 struct sg_mapping_iter
*sg_miter
= &host
->sg_miter
;
2601 unsigned int offset
;
2602 struct mmc_data
*data
= host
->data
;
2603 int shift
= host
->data_shift
;
2606 unsigned int remain
, fcnt
;
2609 if (!sg_miter_next(sg_miter
))
2612 host
->sg
= sg_miter
->piter
.sg
;
2613 buf
= sg_miter
->addr
;
2614 remain
= sg_miter
->length
;
2618 fcnt
= (SDMMC_GET_FCNT(mci_readl(host
, STATUS
))
2619 << shift
) + host
->part_buf_count
;
2620 len
= min(remain
, fcnt
);
2623 dw_mci_pull_data(host
, (void *)(buf
+ offset
), len
);
2624 data
->bytes_xfered
+= len
;
2629 sg_miter
->consumed
= offset
;
2630 status
= mci_readl(host
, MINTSTS
);
2631 mci_writel(host
, RINTSTS
, SDMMC_INT_RXDR
);
2632 /* if the RXDR is ready read again */
2633 } while ((status
& SDMMC_INT_RXDR
) ||
2634 (dto
&& SDMMC_GET_FCNT(mci_readl(host
, STATUS
))));
2637 if (!sg_miter_next(sg_miter
))
2639 sg_miter
->consumed
= 0;
2641 sg_miter_stop(sg_miter
);
2645 sg_miter_stop(sg_miter
);
2647 smp_wmb(); /* drain writebuffer */
2648 set_bit(EVENT_XFER_COMPLETE
, &host
->pending_events
);
2651 static void dw_mci_write_data_pio(struct dw_mci
*host
)
2653 struct sg_mapping_iter
*sg_miter
= &host
->sg_miter
;
2655 unsigned int offset
;
2656 struct mmc_data
*data
= host
->data
;
2657 int shift
= host
->data_shift
;
2660 unsigned int fifo_depth
= host
->fifo_depth
;
2661 unsigned int remain
, fcnt
;
2664 if (!sg_miter_next(sg_miter
))
2667 host
->sg
= sg_miter
->piter
.sg
;
2668 buf
= sg_miter
->addr
;
2669 remain
= sg_miter
->length
;
2673 fcnt
= ((fifo_depth
-
2674 SDMMC_GET_FCNT(mci_readl(host
, STATUS
)))
2675 << shift
) - host
->part_buf_count
;
2676 len
= min(remain
, fcnt
);
2679 host
->push_data(host
, (void *)(buf
+ offset
), len
);
2680 data
->bytes_xfered
+= len
;
2685 sg_miter
->consumed
= offset
;
2686 status
= mci_readl(host
, MINTSTS
);
2687 mci_writel(host
, RINTSTS
, SDMMC_INT_TXDR
);
2688 } while (status
& SDMMC_INT_TXDR
); /* if TXDR write again */
2691 if (!sg_miter_next(sg_miter
))
2693 sg_miter
->consumed
= 0;
2695 sg_miter_stop(sg_miter
);
2699 sg_miter_stop(sg_miter
);
2701 smp_wmb(); /* drain writebuffer */
2702 set_bit(EVENT_XFER_COMPLETE
, &host
->pending_events
);
2705 static void dw_mci_cmd_interrupt(struct dw_mci
*host
, u32 status
)
2707 del_timer(&host
->cto_timer
);
2709 if (!host
->cmd_status
)
2710 host
->cmd_status
= status
;
2712 smp_wmb(); /* drain writebuffer */
2714 set_bit(EVENT_CMD_COMPLETE
, &host
->pending_events
);
2715 queue_work(system_bh_wq
, &host
->bh_work
);
2717 dw_mci_start_fault_timer(host
);
2720 static void dw_mci_handle_cd(struct dw_mci
*host
)
2722 struct dw_mci_slot
*slot
= host
->slot
;
2724 mmc_detect_change(slot
->mmc
,
2725 msecs_to_jiffies(host
->pdata
->detect_delay_ms
));
2728 static irqreturn_t
dw_mci_interrupt(int irq
, void *dev_id
)
2730 struct dw_mci
*host
= dev_id
;
2732 struct dw_mci_slot
*slot
= host
->slot
;
2734 pending
= mci_readl(host
, MINTSTS
); /* read-only mask reg */
2737 /* Check volt switch first, since it can look like an error */
2738 if ((host
->state
== STATE_SENDING_CMD11
) &&
2739 (pending
& SDMMC_INT_VOLT_SWITCH
)) {
2740 mci_writel(host
, RINTSTS
, SDMMC_INT_VOLT_SWITCH
);
2741 pending
&= ~SDMMC_INT_VOLT_SWITCH
;
2744 * Hold the lock; we know cmd11_timer can't be kicked
2745 * off after the lock is released, so safe to delete.
2747 spin_lock(&host
->irq_lock
);
2748 dw_mci_cmd_interrupt(host
, pending
);
2749 spin_unlock(&host
->irq_lock
);
2751 del_timer(&host
->cmd11_timer
);
2754 if (pending
& DW_MCI_CMD_ERROR_FLAGS
) {
2755 spin_lock(&host
->irq_lock
);
2757 del_timer(&host
->cto_timer
);
2758 mci_writel(host
, RINTSTS
, DW_MCI_CMD_ERROR_FLAGS
);
2759 host
->cmd_status
= pending
;
2760 smp_wmb(); /* drain writebuffer */
2761 set_bit(EVENT_CMD_COMPLETE
, &host
->pending_events
);
2763 spin_unlock(&host
->irq_lock
);
2766 if (pending
& DW_MCI_DATA_ERROR_FLAGS
) {
2767 spin_lock(&host
->irq_lock
);
2769 if (host
->quirks
& DW_MMC_QUIRK_EXTENDED_TMOUT
)
2770 del_timer(&host
->dto_timer
);
2772 /* if there is an error report DATA_ERROR */
2773 mci_writel(host
, RINTSTS
, DW_MCI_DATA_ERROR_FLAGS
);
2774 host
->data_status
= pending
;
2775 smp_wmb(); /* drain writebuffer */
2776 set_bit(EVENT_DATA_ERROR
, &host
->pending_events
);
2778 if (host
->quirks
& DW_MMC_QUIRK_EXTENDED_TMOUT
)
2779 /* In case of error, we cannot expect a DTO */
2780 set_bit(EVENT_DATA_COMPLETE
,
2781 &host
->pending_events
);
2783 queue_work(system_bh_wq
, &host
->bh_work
);
2785 spin_unlock(&host
->irq_lock
);
2788 if (pending
& SDMMC_INT_DATA_OVER
) {
2789 spin_lock(&host
->irq_lock
);
2791 del_timer(&host
->dto_timer
);
2793 mci_writel(host
, RINTSTS
, SDMMC_INT_DATA_OVER
);
2794 if (!host
->data_status
)
2795 host
->data_status
= pending
;
2796 smp_wmb(); /* drain writebuffer */
2797 if (host
->dir_status
== DW_MCI_RECV_STATUS
) {
2798 if (host
->sg
!= NULL
)
2799 dw_mci_read_data_pio(host
, true);
2801 set_bit(EVENT_DATA_COMPLETE
, &host
->pending_events
);
2802 queue_work(system_bh_wq
, &host
->bh_work
);
2804 spin_unlock(&host
->irq_lock
);
2807 if (pending
& SDMMC_INT_RXDR
) {
2808 mci_writel(host
, RINTSTS
, SDMMC_INT_RXDR
);
2809 if (host
->dir_status
== DW_MCI_RECV_STATUS
&& host
->sg
)
2810 dw_mci_read_data_pio(host
, false);
2813 if (pending
& SDMMC_INT_TXDR
) {
2814 mci_writel(host
, RINTSTS
, SDMMC_INT_TXDR
);
2815 if (host
->dir_status
== DW_MCI_SEND_STATUS
&& host
->sg
)
2816 dw_mci_write_data_pio(host
);
2819 if (pending
& SDMMC_INT_CMD_DONE
) {
2820 spin_lock(&host
->irq_lock
);
2822 mci_writel(host
, RINTSTS
, SDMMC_INT_CMD_DONE
);
2823 dw_mci_cmd_interrupt(host
, pending
);
2825 spin_unlock(&host
->irq_lock
);
2828 if (pending
& SDMMC_INT_CD
) {
2829 mci_writel(host
, RINTSTS
, SDMMC_INT_CD
);
2830 dw_mci_handle_cd(host
);
2833 if (pending
& SDMMC_INT_SDIO(slot
->sdio_id
)) {
2834 mci_writel(host
, RINTSTS
,
2835 SDMMC_INT_SDIO(slot
->sdio_id
));
2836 __dw_mci_enable_sdio_irq(slot
, 0);
2837 sdio_signal_irq(slot
->mmc
);
2842 if (host
->use_dma
!= TRANS_MODE_IDMAC
)
2845 /* Handle IDMA interrupts */
2846 if (host
->dma_64bit_address
== 1) {
2847 pending
= mci_readl(host
, IDSTS64
);
2848 if (pending
& (SDMMC_IDMAC_INT_TI
| SDMMC_IDMAC_INT_RI
)) {
2849 mci_writel(host
, IDSTS64
, SDMMC_IDMAC_INT_TI
|
2850 SDMMC_IDMAC_INT_RI
);
2851 mci_writel(host
, IDSTS64
, SDMMC_IDMAC_INT_NI
);
2852 if (!test_bit(EVENT_DATA_ERROR
, &host
->pending_events
))
2853 host
->dma_ops
->complete((void *)host
);
2856 pending
= mci_readl(host
, IDSTS
);
2857 if (pending
& (SDMMC_IDMAC_INT_TI
| SDMMC_IDMAC_INT_RI
)) {
2858 mci_writel(host
, IDSTS
, SDMMC_IDMAC_INT_TI
|
2859 SDMMC_IDMAC_INT_RI
);
2860 mci_writel(host
, IDSTS
, SDMMC_IDMAC_INT_NI
);
2861 if (!test_bit(EVENT_DATA_ERROR
, &host
->pending_events
))
2862 host
->dma_ops
->complete((void *)host
);
2869 static int dw_mci_init_slot_caps(struct dw_mci_slot
*slot
)
2871 struct dw_mci
*host
= slot
->host
;
2872 const struct dw_mci_drv_data
*drv_data
= host
->drv_data
;
2873 struct mmc_host
*mmc
= slot
->mmc
;
2876 if (host
->pdata
->caps
)
2877 mmc
->caps
= host
->pdata
->caps
;
2879 if (host
->pdata
->pm_caps
)
2880 mmc
->pm_caps
= host
->pdata
->pm_caps
;
2883 mmc
->caps
|= drv_data
->common_caps
;
2885 if (host
->dev
->of_node
) {
2886 ctrl_id
= of_alias_get_id(host
->dev
->of_node
, "mshc");
2890 ctrl_id
= to_platform_device(host
->dev
)->id
;
2893 if (drv_data
&& drv_data
->caps
) {
2894 if (ctrl_id
>= drv_data
->num_caps
) {
2895 dev_err(host
->dev
, "invalid controller id %d\n",
2899 mmc
->caps
|= drv_data
->caps
[ctrl_id
];
2902 if (host
->pdata
->caps2
)
2903 mmc
->caps2
= host
->pdata
->caps2
;
2905 /* if host has set a minimum_freq, we should respect it */
2906 if (host
->minimum_speed
)
2907 mmc
->f_min
= host
->minimum_speed
;
2909 mmc
->f_min
= DW_MCI_FREQ_MIN
;
2912 mmc
->f_max
= DW_MCI_FREQ_MAX
;
2914 /* Process SDIO IRQs through the sdio_irq_work. */
2915 if (mmc
->caps
& MMC_CAP_SDIO_IRQ
)
2916 mmc
->caps2
|= MMC_CAP2_SDIO_IRQ_NOTHREAD
;
2921 static int dw_mci_init_slot(struct dw_mci
*host
)
2923 struct mmc_host
*mmc
;
2924 struct dw_mci_slot
*slot
;
2927 mmc
= mmc_alloc_host(sizeof(struct dw_mci_slot
), host
->dev
);
2931 slot
= mmc_priv(mmc
);
2933 slot
->sdio_id
= host
->sdio_id0
+ slot
->id
;
2938 mmc
->ops
= &dw_mci_ops
;
2940 /*if there are external regulators, get them*/
2941 ret
= mmc_regulator_get_supply(mmc
);
2943 goto err_host_allocated
;
2945 if (!mmc
->ocr_avail
)
2946 mmc
->ocr_avail
= MMC_VDD_32_33
| MMC_VDD_33_34
;
2948 ret
= mmc_of_parse(mmc
);
2950 goto err_host_allocated
;
2952 ret
= dw_mci_init_slot_caps(slot
);
2954 goto err_host_allocated
;
2956 /* Useful defaults if platform data is unset. */
2957 if (host
->use_dma
== TRANS_MODE_IDMAC
) {
2958 mmc
->max_segs
= host
->ring_size
;
2959 mmc
->max_blk_size
= 65535;
2960 mmc
->max_seg_size
= 0x1000;
2961 mmc
->max_req_size
= mmc
->max_seg_size
* host
->ring_size
;
2962 mmc
->max_blk_count
= mmc
->max_req_size
/ 512;
2963 } else if (host
->use_dma
== TRANS_MODE_EDMAC
) {
2965 mmc
->max_blk_size
= 65535;
2966 mmc
->max_blk_count
= 65535;
2968 mmc
->max_blk_size
* mmc
->max_blk_count
;
2969 mmc
->max_seg_size
= mmc
->max_req_size
;
2971 /* TRANS_MODE_PIO */
2973 mmc
->max_blk_size
= 65535; /* BLKSIZ is 16 bits */
2974 mmc
->max_blk_count
= 512;
2975 mmc
->max_req_size
= mmc
->max_blk_size
*
2977 mmc
->max_seg_size
= mmc
->max_req_size
;
2982 ret
= mmc_add_host(mmc
);
2984 goto err_host_allocated
;
2986 #if defined(CONFIG_DEBUG_FS)
2987 dw_mci_init_debugfs(slot
);
2997 static void dw_mci_cleanup_slot(struct dw_mci_slot
*slot
)
2999 /* Debugfs stuff is cleaned up by mmc core */
3000 mmc_remove_host(slot
->mmc
);
3001 slot
->host
->slot
= NULL
;
3002 mmc_free_host(slot
->mmc
);
3005 static void dw_mci_init_dma(struct dw_mci
*host
)
3008 struct device
*dev
= host
->dev
;
3011 * Check tansfer mode from HCON[17:16]
3012 * Clear the ambiguous description of dw_mmc databook:
3013 * 2b'00: No DMA Interface -> Actually means using Internal DMA block
3014 * 2b'01: DesignWare DMA Interface -> Synopsys DW-DMA block
3015 * 2b'10: Generic DMA Interface -> non-Synopsys generic DMA block
3016 * 2b'11: Non DW DMA Interface -> pio only
3017 * Compared to DesignWare DMA Interface, Generic DMA Interface has a
3018 * simpler request/acknowledge handshake mechanism and both of them
3019 * are regarded as external dma master for dw_mmc.
3021 host
->use_dma
= SDMMC_GET_TRANS_MODE(mci_readl(host
, HCON
));
3022 if (host
->use_dma
== DMA_INTERFACE_IDMA
) {
3023 host
->use_dma
= TRANS_MODE_IDMAC
;
3024 } else if (host
->use_dma
== DMA_INTERFACE_DWDMA
||
3025 host
->use_dma
== DMA_INTERFACE_GDMA
) {
3026 host
->use_dma
= TRANS_MODE_EDMAC
;
3031 /* Determine which DMA interface to use */
3032 if (host
->use_dma
== TRANS_MODE_IDMAC
) {
3034 * Check ADDR_CONFIG bit in HCON to find
3035 * IDMAC address bus width
3037 addr_config
= SDMMC_GET_ADDR_CONFIG(mci_readl(host
, HCON
));
3039 if (addr_config
== 1) {
3040 /* host supports IDMAC in 64-bit address mode */
3041 host
->dma_64bit_address
= 1;
3043 "IDMAC supports 64-bit address mode.\n");
3044 if (!dma_set_mask(host
->dev
, DMA_BIT_MASK(64)))
3045 dma_set_coherent_mask(host
->dev
,
3048 /* host supports IDMAC in 32-bit address mode */
3049 host
->dma_64bit_address
= 0;
3051 "IDMAC supports 32-bit address mode.\n");
3054 /* Alloc memory for sg translation */
3055 host
->sg_cpu
= dmam_alloc_coherent(host
->dev
,
3057 &host
->sg_dma
, GFP_KERNEL
);
3058 if (!host
->sg_cpu
) {
3060 "%s: could not alloc DMA memory\n",
3065 host
->dma_ops
= &dw_mci_idmac_ops
;
3066 dev_info(host
->dev
, "Using internal DMA controller.\n");
3068 /* TRANS_MODE_EDMAC: check dma bindings again */
3069 if ((device_property_string_array_count(dev
, "dma-names") < 0) ||
3070 !device_property_present(dev
, "dmas")) {
3073 host
->dma_ops
= &dw_mci_edmac_ops
;
3074 dev_info(host
->dev
, "Using external DMA controller.\n");
3077 if (host
->dma_ops
->init
&& host
->dma_ops
->start
&&
3078 host
->dma_ops
->stop
&& host
->dma_ops
->cleanup
) {
3079 if (host
->dma_ops
->init(host
)) {
3080 dev_err(host
->dev
, "%s: Unable to initialize DMA Controller.\n",
3085 dev_err(host
->dev
, "DMA initialization not found.\n");
3092 dev_info(host
->dev
, "Using PIO mode.\n");
3093 host
->use_dma
= TRANS_MODE_PIO
;
3096 static void dw_mci_cmd11_timer(struct timer_list
*t
)
3098 struct dw_mci
*host
= from_timer(host
, t
, cmd11_timer
);
3100 if (host
->state
!= STATE_SENDING_CMD11
) {
3101 dev_warn(host
->dev
, "Unexpected CMD11 timeout\n");
3105 host
->cmd_status
= SDMMC_INT_RTO
;
3106 set_bit(EVENT_CMD_COMPLETE
, &host
->pending_events
);
3107 queue_work(system_bh_wq
, &host
->bh_work
);
3110 static void dw_mci_cto_timer(struct timer_list
*t
)
3112 struct dw_mci
*host
= from_timer(host
, t
, cto_timer
);
3113 unsigned long irqflags
;
3116 spin_lock_irqsave(&host
->irq_lock
, irqflags
);
3119 * If somehow we have very bad interrupt latency it's remotely possible
3120 * that the timer could fire while the interrupt is still pending or
3121 * while the interrupt is midway through running. Let's be paranoid
3122 * and detect those two cases. Note that this is paranoia is somewhat
3123 * justified because in this function we don't actually cancel the
3124 * pending command in the controller--we just assume it will never come.
3126 pending
= mci_readl(host
, MINTSTS
); /* read-only mask reg */
3127 if (pending
& (DW_MCI_CMD_ERROR_FLAGS
| SDMMC_INT_CMD_DONE
)) {
3128 /* The interrupt should fire; no need to act but we can warn */
3129 dev_warn(host
->dev
, "Unexpected interrupt latency\n");
3132 if (test_bit(EVENT_CMD_COMPLETE
, &host
->pending_events
)) {
3133 /* Presumably interrupt handler couldn't delete the timer */
3134 dev_warn(host
->dev
, "CTO timeout when already completed\n");
3139 * Continued paranoia to make sure we're in the state we expect.
3140 * This paranoia isn't really justified but it seems good to be safe.
3142 switch (host
->state
) {
3143 case STATE_SENDING_CMD11
:
3144 case STATE_SENDING_CMD
:
3145 case STATE_SENDING_STOP
:
3147 * If CMD_DONE interrupt does NOT come in sending command
3148 * state, we should notify the driver to terminate current
3149 * transfer and report a command timeout to the core.
3151 host
->cmd_status
= SDMMC_INT_RTO
;
3152 set_bit(EVENT_CMD_COMPLETE
, &host
->pending_events
);
3153 queue_work(system_bh_wq
, &host
->bh_work
);
3156 dev_warn(host
->dev
, "Unexpected command timeout, state %d\n",
3162 spin_unlock_irqrestore(&host
->irq_lock
, irqflags
);
3165 static void dw_mci_dto_timer(struct timer_list
*t
)
3167 struct dw_mci
*host
= from_timer(host
, t
, dto_timer
);
3168 unsigned long irqflags
;
3171 spin_lock_irqsave(&host
->irq_lock
, irqflags
);
3174 * The DTO timer is much longer than the CTO timer, so it's even less
3175 * likely that we'll these cases, but it pays to be paranoid.
3177 pending
= mci_readl(host
, MINTSTS
); /* read-only mask reg */
3178 if (pending
& SDMMC_INT_DATA_OVER
) {
3179 /* The interrupt should fire; no need to act but we can warn */
3180 dev_warn(host
->dev
, "Unexpected data interrupt latency\n");
3183 if (test_bit(EVENT_DATA_COMPLETE
, &host
->pending_events
)) {
3184 /* Presumably interrupt handler couldn't delete the timer */
3185 dev_warn(host
->dev
, "DTO timeout when already completed\n");
3190 * Continued paranoia to make sure we're in the state we expect.
3191 * This paranoia isn't really justified but it seems good to be safe.
3193 switch (host
->state
) {
3194 case STATE_SENDING_DATA
:
3195 case STATE_DATA_BUSY
:
3197 * If DTO interrupt does NOT come in sending data state,
3198 * we should notify the driver to terminate current transfer
3199 * and report a data timeout to the core.
3201 host
->data_status
= SDMMC_INT_DRTO
;
3202 set_bit(EVENT_DATA_ERROR
, &host
->pending_events
);
3203 set_bit(EVENT_DATA_COMPLETE
, &host
->pending_events
);
3204 queue_work(system_bh_wq
, &host
->bh_work
);
3207 dev_warn(host
->dev
, "Unexpected data timeout, state %d\n",
3213 spin_unlock_irqrestore(&host
->irq_lock
, irqflags
);
3217 static struct dw_mci_board
*dw_mci_parse_dt(struct dw_mci
*host
)
3219 struct dw_mci_board
*pdata
;
3220 struct device
*dev
= host
->dev
;
3221 const struct dw_mci_drv_data
*drv_data
= host
->drv_data
;
3223 u32 clock_frequency
;
3225 pdata
= devm_kzalloc(dev
, sizeof(*pdata
), GFP_KERNEL
);
3227 return ERR_PTR(-ENOMEM
);
3229 /* find reset controller when exist */
3230 pdata
->rstc
= devm_reset_control_get_optional_exclusive(dev
, "reset");
3231 if (IS_ERR(pdata
->rstc
))
3232 return ERR_CAST(pdata
->rstc
);
3234 if (device_property_read_u32(dev
, "fifo-depth", &pdata
->fifo_depth
))
3236 "fifo-depth property not found, using value of FIFOTH register as default\n");
3238 device_property_read_u32(dev
, "card-detect-delay",
3239 &pdata
->detect_delay_ms
);
3241 device_property_read_u32(dev
, "data-addr", &host
->data_addr_override
);
3243 if (device_property_present(dev
, "fifo-watermark-aligned"))
3244 host
->wm_aligned
= true;
3246 if (!device_property_read_u32(dev
, "clock-frequency", &clock_frequency
))
3247 pdata
->bus_hz
= clock_frequency
;
3249 if (drv_data
&& drv_data
->parse_dt
) {
3250 ret
= drv_data
->parse_dt(host
);
3252 return ERR_PTR(ret
);
3258 #else /* CONFIG_OF */
3259 static struct dw_mci_board
*dw_mci_parse_dt(struct dw_mci
*host
)
3261 return ERR_PTR(-EINVAL
);
3263 #endif /* CONFIG_OF */
3265 static void dw_mci_enable_cd(struct dw_mci
*host
)
3267 unsigned long irqflags
;
3271 * No need for CD if all slots have a non-error GPIO
3272 * as well as broken card detection is found.
3274 if (host
->slot
->mmc
->caps
& MMC_CAP_NEEDS_POLL
)
3277 if (mmc_gpio_get_cd(host
->slot
->mmc
) < 0) {
3278 spin_lock_irqsave(&host
->irq_lock
, irqflags
);
3279 temp
= mci_readl(host
, INTMASK
);
3280 temp
|= SDMMC_INT_CD
;
3281 mci_writel(host
, INTMASK
, temp
);
3282 spin_unlock_irqrestore(&host
->irq_lock
, irqflags
);
3286 int dw_mci_probe(struct dw_mci
*host
)
3288 const struct dw_mci_drv_data
*drv_data
= host
->drv_data
;
3289 int width
, i
, ret
= 0;
3293 host
->pdata
= dw_mci_parse_dt(host
);
3294 if (IS_ERR(host
->pdata
))
3295 return dev_err_probe(host
->dev
, PTR_ERR(host
->pdata
),
3296 "platform data not available\n");
3299 host
->biu_clk
= devm_clk_get(host
->dev
, "biu");
3300 if (IS_ERR(host
->biu_clk
)) {
3301 dev_dbg(host
->dev
, "biu clock not available\n");
3302 ret
= PTR_ERR(host
->biu_clk
);
3303 if (ret
== -EPROBE_DEFER
)
3307 ret
= clk_prepare_enable(host
->biu_clk
);
3309 dev_err(host
->dev
, "failed to enable biu clock\n");
3314 host
->ciu_clk
= devm_clk_get(host
->dev
, "ciu");
3315 if (IS_ERR(host
->ciu_clk
)) {
3316 dev_dbg(host
->dev
, "ciu clock not available\n");
3317 ret
= PTR_ERR(host
->ciu_clk
);
3318 if (ret
== -EPROBE_DEFER
)
3321 host
->bus_hz
= host
->pdata
->bus_hz
;
3323 ret
= clk_prepare_enable(host
->ciu_clk
);
3325 dev_err(host
->dev
, "failed to enable ciu clock\n");
3329 if (host
->pdata
->bus_hz
) {
3330 ret
= clk_set_rate(host
->ciu_clk
, host
->pdata
->bus_hz
);
3333 "Unable to set bus rate to %uHz\n",
3334 host
->pdata
->bus_hz
);
3336 host
->bus_hz
= clk_get_rate(host
->ciu_clk
);
3339 if (!host
->bus_hz
) {
3341 "Platform data must supply bus speed\n");
3346 if (host
->pdata
->rstc
) {
3347 reset_control_assert(host
->pdata
->rstc
);
3348 usleep_range(10, 50);
3349 reset_control_deassert(host
->pdata
->rstc
);
3352 if (drv_data
&& drv_data
->init
) {
3353 ret
= drv_data
->init(host
);
3356 "implementation specific init failed\n");
3361 timer_setup(&host
->cmd11_timer
, dw_mci_cmd11_timer
, 0);
3362 timer_setup(&host
->cto_timer
, dw_mci_cto_timer
, 0);
3363 timer_setup(&host
->dto_timer
, dw_mci_dto_timer
, 0);
3365 spin_lock_init(&host
->lock
);
3366 spin_lock_init(&host
->irq_lock
);
3367 INIT_LIST_HEAD(&host
->queue
);
3369 dw_mci_init_fault(host
);
3372 * Get the host data width - this assumes that HCON has been set with
3373 * the correct values.
3375 i
= SDMMC_GET_HDATA_WIDTH(mci_readl(host
, HCON
));
3377 host
->push_data
= dw_mci_push_data16
;
3378 host
->pull_data
= dw_mci_pull_data16
;
3380 host
->data_shift
= 1;
3381 } else if (i
== 2) {
3382 host
->push_data
= dw_mci_push_data64
;
3383 host
->pull_data
= dw_mci_pull_data64
;
3385 host
->data_shift
= 3;
3387 /* Check for a reserved value, and warn if it is */
3389 "HCON reports a reserved host data width!\n"
3390 "Defaulting to 32-bit access.\n");
3391 host
->push_data
= dw_mci_push_data32
;
3392 host
->pull_data
= dw_mci_pull_data32
;
3394 host
->data_shift
= 2;
3397 /* Reset all blocks */
3398 if (!dw_mci_ctrl_reset(host
, SDMMC_CTRL_ALL_RESET_FLAGS
)) {
3403 host
->dma_ops
= host
->pdata
->dma_ops
;
3404 dw_mci_init_dma(host
);
3406 /* Clear the interrupts for the host controller */
3407 mci_writel(host
, RINTSTS
, 0xFFFFFFFF);
3408 mci_writel(host
, INTMASK
, 0); /* disable all mmc interrupt first */
3410 /* Put in max timeout */
3411 mci_writel(host
, TMOUT
, 0xFFFFFFFF);
3414 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
3415 * Tx Mark = fifo_size / 2 DMA Size = 8
3417 if (!host
->pdata
->fifo_depth
) {
3419 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
3420 * have been overwritten by the bootloader, just like we're
3421 * about to do, so if you know the value for your hardware, you
3422 * should put it in the platform data.
3424 fifo_size
= mci_readl(host
, FIFOTH
);
3425 fifo_size
= 1 + ((fifo_size
>> 16) & 0xfff);
3427 fifo_size
= host
->pdata
->fifo_depth
;
3429 host
->fifo_depth
= fifo_size
;
3431 SDMMC_SET_FIFOTH(0x2, fifo_size
/ 2 - 1, fifo_size
/ 2);
3432 mci_writel(host
, FIFOTH
, host
->fifoth_val
);
3434 /* disable clock to CIU */
3435 mci_writel(host
, CLKENA
, 0);
3436 mci_writel(host
, CLKSRC
, 0);
3439 * In 2.40a spec, Data offset is changed.
3440 * Need to check the version-id and set data-offset for DATA register.
3442 host
->verid
= SDMMC_GET_VERID(mci_readl(host
, VERID
));
3443 dev_info(host
->dev
, "Version ID is %04x\n", host
->verid
);
3445 if (host
->data_addr_override
)
3446 host
->fifo_reg
= host
->regs
+ host
->data_addr_override
;
3447 else if (host
->verid
< DW_MMC_240A
)
3448 host
->fifo_reg
= host
->regs
+ DATA_OFFSET
;
3450 host
->fifo_reg
= host
->regs
+ DATA_240A_OFFSET
;
3452 INIT_WORK(&host
->bh_work
, dw_mci_work_func
);
3453 ret
= devm_request_irq(host
->dev
, host
->irq
, dw_mci_interrupt
,
3454 host
->irq_flags
, "dw-mci", host
);
3459 * Enable interrupts for command done, data over, data empty,
3460 * receive ready and error such as transmit, receive timeout, crc error
3462 mci_writel(host
, INTMASK
, SDMMC_INT_CMD_DONE
| SDMMC_INT_DATA_OVER
|
3463 SDMMC_INT_TXDR
| SDMMC_INT_RXDR
|
3464 DW_MCI_ERROR_FLAGS
);
3465 /* Enable mci interrupt */
3466 mci_writel(host
, CTRL
, SDMMC_CTRL_INT_ENABLE
);
3469 "DW MMC controller at irq %d,%d bit host data width,%u deep fifo\n",
3470 host
->irq
, width
, fifo_size
);
3472 /* We need at least one slot to succeed */
3473 ret
= dw_mci_init_slot(host
);
3475 dev_dbg(host
->dev
, "slot %d init failed\n", i
);
3479 /* Now that slots are all setup, we can enable card detect */
3480 dw_mci_enable_cd(host
);
3485 if (host
->use_dma
&& host
->dma_ops
->exit
)
3486 host
->dma_ops
->exit(host
);
3488 reset_control_assert(host
->pdata
->rstc
);
3491 clk_disable_unprepare(host
->ciu_clk
);
3494 clk_disable_unprepare(host
->biu_clk
);
3498 EXPORT_SYMBOL(dw_mci_probe
);
3500 void dw_mci_remove(struct dw_mci
*host
)
3502 dev_dbg(host
->dev
, "remove slot\n");
3504 dw_mci_cleanup_slot(host
->slot
);
3506 mci_writel(host
, RINTSTS
, 0xFFFFFFFF);
3507 mci_writel(host
, INTMASK
, 0); /* disable all mmc interrupt first */
3509 /* disable clock to CIU */
3510 mci_writel(host
, CLKENA
, 0);
3511 mci_writel(host
, CLKSRC
, 0);
3513 if (host
->use_dma
&& host
->dma_ops
->exit
)
3514 host
->dma_ops
->exit(host
);
3516 reset_control_assert(host
->pdata
->rstc
);
3518 clk_disable_unprepare(host
->ciu_clk
);
3519 clk_disable_unprepare(host
->biu_clk
);
3521 EXPORT_SYMBOL(dw_mci_remove
);
3526 int dw_mci_runtime_suspend(struct device
*dev
)
3528 struct dw_mci
*host
= dev_get_drvdata(dev
);
3530 if (host
->use_dma
&& host
->dma_ops
->exit
)
3531 host
->dma_ops
->exit(host
);
3533 clk_disable_unprepare(host
->ciu_clk
);
3536 (mmc_can_gpio_cd(host
->slot
->mmc
) ||
3537 !mmc_card_is_removable(host
->slot
->mmc
)))
3538 clk_disable_unprepare(host
->biu_clk
);
3542 EXPORT_SYMBOL(dw_mci_runtime_suspend
);
3544 int dw_mci_runtime_resume(struct device
*dev
)
3547 struct dw_mci
*host
= dev_get_drvdata(dev
);
3550 (mmc_can_gpio_cd(host
->slot
->mmc
) ||
3551 !mmc_card_is_removable(host
->slot
->mmc
))) {
3552 ret
= clk_prepare_enable(host
->biu_clk
);
3557 ret
= clk_prepare_enable(host
->ciu_clk
);
3561 if (!dw_mci_ctrl_reset(host
, SDMMC_CTRL_ALL_RESET_FLAGS
)) {
3562 clk_disable_unprepare(host
->ciu_clk
);
3567 if (host
->use_dma
&& host
->dma_ops
->init
)
3568 host
->dma_ops
->init(host
);
3571 * Restore the initial value at FIFOTH register
3572 * And Invalidate the prev_blksz with zero
3574 mci_writel(host
, FIFOTH
, host
->fifoth_val
);
3575 host
->prev_blksz
= 0;
3577 /* Put in max timeout */
3578 mci_writel(host
, TMOUT
, 0xFFFFFFFF);
3580 mci_writel(host
, RINTSTS
, 0xFFFFFFFF);
3581 mci_writel(host
, INTMASK
, SDMMC_INT_CMD_DONE
| SDMMC_INT_DATA_OVER
|
3582 SDMMC_INT_TXDR
| SDMMC_INT_RXDR
|
3583 DW_MCI_ERROR_FLAGS
);
3584 mci_writel(host
, CTRL
, SDMMC_CTRL_INT_ENABLE
);
3587 if (host
->slot
&& host
->slot
->mmc
->pm_flags
& MMC_PM_KEEP_POWER
)
3588 dw_mci_set_ios(host
->slot
->mmc
, &host
->slot
->mmc
->ios
);
3590 /* Force setup bus to guarantee available clock output */
3591 dw_mci_setup_bus(host
->slot
, true);
3593 /* Re-enable SDIO interrupts. */
3594 if (sdio_irq_claimed(host
->slot
->mmc
))
3595 __dw_mci_enable_sdio_irq(host
->slot
, 1);
3597 /* Now that slots are all setup, we can enable card detect */
3598 dw_mci_enable_cd(host
);
3604 (mmc_can_gpio_cd(host
->slot
->mmc
) ||
3605 !mmc_card_is_removable(host
->slot
->mmc
)))
3606 clk_disable_unprepare(host
->biu_clk
);
3610 EXPORT_SYMBOL(dw_mci_runtime_resume
);
3611 #endif /* CONFIG_PM */
3613 static int __init
dw_mci_init(void)
3615 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
3619 static void __exit
dw_mci_exit(void)
3623 module_init(dw_mci_init
);
3624 module_exit(dw_mci_exit
);
3626 MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
3627 MODULE_AUTHOR("NXP Semiconductor VietNam");
3628 MODULE_AUTHOR("Imagination Technologies Ltd");
3629 MODULE_LICENSE("GPL v2");