treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / mmc / host / dw_mmc.c
blobbc5278ab5707c7c467337da8f375803a0b952b3b
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Synopsys DesignWare Multimedia Card Interface driver
4 * (Based on NXP driver for lpc 31xx)
6 * Copyright (C) 2009 NXP Semiconductors
7 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
8 */
10 #include <linux/blkdev.h>
11 #include <linux/clk.h>
12 #include <linux/debugfs.h>
13 #include <linux/device.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/err.h>
16 #include <linux/init.h>
17 #include <linux/interrupt.h>
18 #include <linux/iopoll.h>
19 #include <linux/ioport.h>
20 #include <linux/module.h>
21 #include <linux/platform_device.h>
22 #include <linux/pm_runtime.h>
23 #include <linux/seq_file.h>
24 #include <linux/slab.h>
25 #include <linux/stat.h>
26 #include <linux/delay.h>
27 #include <linux/irq.h>
28 #include <linux/mmc/card.h>
29 #include <linux/mmc/host.h>
30 #include <linux/mmc/mmc.h>
31 #include <linux/mmc/sd.h>
32 #include <linux/mmc/sdio.h>
33 #include <linux/bitops.h>
34 #include <linux/regulator/consumer.h>
35 #include <linux/of.h>
36 #include <linux/of_gpio.h>
37 #include <linux/mmc/slot-gpio.h>
39 #include "dw_mmc.h"
41 /* Common flag combinations */
42 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
43 SDMMC_INT_HTO | SDMMC_INT_SBE | \
44 SDMMC_INT_EBE | SDMMC_INT_HLE)
45 #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
46 SDMMC_INT_RESP_ERR | SDMMC_INT_HLE)
47 #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
48 DW_MCI_CMD_ERROR_FLAGS)
49 #define DW_MCI_SEND_STATUS 1
50 #define DW_MCI_RECV_STATUS 2
51 #define DW_MCI_DMA_THRESHOLD 16
53 #define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */
54 #define DW_MCI_FREQ_MIN 100000 /* unit: HZ */
56 #define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
57 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
58 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
59 SDMMC_IDMAC_INT_TI)
61 #define DESC_RING_BUF_SZ PAGE_SIZE
63 struct idmac_desc_64addr {
64 u32 des0; /* Control Descriptor */
65 #define IDMAC_OWN_CLR64(x) \
66 !((x) & cpu_to_le32(IDMAC_DES0_OWN))
68 u32 des1; /* Reserved */
70 u32 des2; /*Buffer sizes */
71 #define IDMAC_64ADDR_SET_BUFFER1_SIZE(d, s) \
72 ((d)->des2 = ((d)->des2 & cpu_to_le32(0x03ffe000)) | \
73 ((cpu_to_le32(s)) & cpu_to_le32(0x1fff)))
75 u32 des3; /* Reserved */
77 u32 des4; /* Lower 32-bits of Buffer Address Pointer 1*/
78 u32 des5; /* Upper 32-bits of Buffer Address Pointer 1*/
80 u32 des6; /* Lower 32-bits of Next Descriptor Address */
81 u32 des7; /* Upper 32-bits of Next Descriptor Address */
84 struct idmac_desc {
85 __le32 des0; /* Control Descriptor */
86 #define IDMAC_DES0_DIC BIT(1)
87 #define IDMAC_DES0_LD BIT(2)
88 #define IDMAC_DES0_FD BIT(3)
89 #define IDMAC_DES0_CH BIT(4)
90 #define IDMAC_DES0_ER BIT(5)
91 #define IDMAC_DES0_CES BIT(30)
92 #define IDMAC_DES0_OWN BIT(31)
94 __le32 des1; /* Buffer sizes */
95 #define IDMAC_SET_BUFFER1_SIZE(d, s) \
96 ((d)->des1 = ((d)->des1 & cpu_to_le32(0x03ffe000)) | (cpu_to_le32((s) & 0x1fff)))
98 __le32 des2; /* buffer 1 physical address */
100 __le32 des3; /* buffer 2 physical address */
103 /* Each descriptor can transfer up to 4KB of data in chained mode */
104 #define DW_MCI_DESC_DATA_LENGTH 0x1000
106 #if defined(CONFIG_DEBUG_FS)
107 static int dw_mci_req_show(struct seq_file *s, void *v)
109 struct dw_mci_slot *slot = s->private;
110 struct mmc_request *mrq;
111 struct mmc_command *cmd;
112 struct mmc_command *stop;
113 struct mmc_data *data;
115 /* Make sure we get a consistent snapshot */
116 spin_lock_bh(&slot->host->lock);
117 mrq = slot->mrq;
119 if (mrq) {
120 cmd = mrq->cmd;
121 data = mrq->data;
122 stop = mrq->stop;
124 if (cmd)
125 seq_printf(s,
126 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
127 cmd->opcode, cmd->arg, cmd->flags,
128 cmd->resp[0], cmd->resp[1], cmd->resp[2],
129 cmd->resp[2], cmd->error);
130 if (data)
131 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
132 data->bytes_xfered, data->blocks,
133 data->blksz, data->flags, data->error);
134 if (stop)
135 seq_printf(s,
136 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
137 stop->opcode, stop->arg, stop->flags,
138 stop->resp[0], stop->resp[1], stop->resp[2],
139 stop->resp[2], stop->error);
142 spin_unlock_bh(&slot->host->lock);
144 return 0;
146 DEFINE_SHOW_ATTRIBUTE(dw_mci_req);
148 static int dw_mci_regs_show(struct seq_file *s, void *v)
150 struct dw_mci *host = s->private;
152 pm_runtime_get_sync(host->dev);
154 seq_printf(s, "STATUS:\t0x%08x\n", mci_readl(host, STATUS));
155 seq_printf(s, "RINTSTS:\t0x%08x\n", mci_readl(host, RINTSTS));
156 seq_printf(s, "CMD:\t0x%08x\n", mci_readl(host, CMD));
157 seq_printf(s, "CTRL:\t0x%08x\n", mci_readl(host, CTRL));
158 seq_printf(s, "INTMASK:\t0x%08x\n", mci_readl(host, INTMASK));
159 seq_printf(s, "CLKENA:\t0x%08x\n", mci_readl(host, CLKENA));
161 pm_runtime_put_autosuspend(host->dev);
163 return 0;
165 DEFINE_SHOW_ATTRIBUTE(dw_mci_regs);
167 static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
169 struct mmc_host *mmc = slot->mmc;
170 struct dw_mci *host = slot->host;
171 struct dentry *root;
173 root = mmc->debugfs_root;
174 if (!root)
175 return;
177 debugfs_create_file("regs", S_IRUSR, root, host, &dw_mci_regs_fops);
178 debugfs_create_file("req", S_IRUSR, root, slot, &dw_mci_req_fops);
179 debugfs_create_u32("state", S_IRUSR, root, &host->state);
180 debugfs_create_xul("pending_events", S_IRUSR, root,
181 &host->pending_events);
182 debugfs_create_xul("completed_events", S_IRUSR, root,
183 &host->completed_events);
185 #endif /* defined(CONFIG_DEBUG_FS) */
187 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
189 u32 ctrl;
191 ctrl = mci_readl(host, CTRL);
192 ctrl |= reset;
193 mci_writel(host, CTRL, ctrl);
195 /* wait till resets clear */
196 if (readl_poll_timeout_atomic(host->regs + SDMMC_CTRL, ctrl,
197 !(ctrl & reset),
198 1, 500 * USEC_PER_MSEC)) {
199 dev_err(host->dev,
200 "Timeout resetting block (ctrl reset %#x)\n",
201 ctrl & reset);
202 return false;
205 return true;
208 static void dw_mci_wait_while_busy(struct dw_mci *host, u32 cmd_flags)
210 u32 status;
213 * Databook says that before issuing a new data transfer command
214 * we need to check to see if the card is busy. Data transfer commands
215 * all have SDMMC_CMD_PRV_DAT_WAIT set, so we'll key off that.
217 * ...also allow sending for SDMMC_CMD_VOLT_SWITCH where busy is
218 * expected.
220 if ((cmd_flags & SDMMC_CMD_PRV_DAT_WAIT) &&
221 !(cmd_flags & SDMMC_CMD_VOLT_SWITCH)) {
222 if (readl_poll_timeout_atomic(host->regs + SDMMC_STATUS,
223 status,
224 !(status & SDMMC_STATUS_BUSY),
225 10, 500 * USEC_PER_MSEC))
226 dev_err(host->dev, "Busy; trying anyway\n");
230 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
232 struct dw_mci *host = slot->host;
233 unsigned int cmd_status = 0;
235 mci_writel(host, CMDARG, arg);
236 wmb(); /* drain writebuffer */
237 dw_mci_wait_while_busy(host, cmd);
238 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
240 if (readl_poll_timeout_atomic(host->regs + SDMMC_CMD, cmd_status,
241 !(cmd_status & SDMMC_CMD_START),
242 1, 500 * USEC_PER_MSEC))
243 dev_err(&slot->mmc->class_dev,
244 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
245 cmd, arg, cmd_status);
248 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
250 struct dw_mci_slot *slot = mmc_priv(mmc);
251 struct dw_mci *host = slot->host;
252 u32 cmdr;
254 cmd->error = -EINPROGRESS;
255 cmdr = cmd->opcode;
257 if (cmd->opcode == MMC_STOP_TRANSMISSION ||
258 cmd->opcode == MMC_GO_IDLE_STATE ||
259 cmd->opcode == MMC_GO_INACTIVE_STATE ||
260 (cmd->opcode == SD_IO_RW_DIRECT &&
261 ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT))
262 cmdr |= SDMMC_CMD_STOP;
263 else if (cmd->opcode != MMC_SEND_STATUS && cmd->data)
264 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
266 if (cmd->opcode == SD_SWITCH_VOLTAGE) {
267 u32 clk_en_a;
269 /* Special bit makes CMD11 not die */
270 cmdr |= SDMMC_CMD_VOLT_SWITCH;
272 /* Change state to continue to handle CMD11 weirdness */
273 WARN_ON(slot->host->state != STATE_SENDING_CMD);
274 slot->host->state = STATE_SENDING_CMD11;
277 * We need to disable low power mode (automatic clock stop)
278 * while doing voltage switch so we don't confuse the card,
279 * since stopping the clock is a specific part of the UHS
280 * voltage change dance.
282 * Note that low power mode (SDMMC_CLKEN_LOW_PWR) will be
283 * unconditionally turned back on in dw_mci_setup_bus() if it's
284 * ever called with a non-zero clock. That shouldn't happen
285 * until the voltage change is all done.
287 clk_en_a = mci_readl(host, CLKENA);
288 clk_en_a &= ~(SDMMC_CLKEN_LOW_PWR << slot->id);
289 mci_writel(host, CLKENA, clk_en_a);
290 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
291 SDMMC_CMD_PRV_DAT_WAIT, 0);
294 if (cmd->flags & MMC_RSP_PRESENT) {
295 /* We expect a response, so set this bit */
296 cmdr |= SDMMC_CMD_RESP_EXP;
297 if (cmd->flags & MMC_RSP_136)
298 cmdr |= SDMMC_CMD_RESP_LONG;
301 if (cmd->flags & MMC_RSP_CRC)
302 cmdr |= SDMMC_CMD_RESP_CRC;
304 if (cmd->data) {
305 cmdr |= SDMMC_CMD_DAT_EXP;
306 if (cmd->data->flags & MMC_DATA_WRITE)
307 cmdr |= SDMMC_CMD_DAT_WR;
310 if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &slot->flags))
311 cmdr |= SDMMC_CMD_USE_HOLD_REG;
313 return cmdr;
316 static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
318 struct mmc_command *stop;
319 u32 cmdr;
321 if (!cmd->data)
322 return 0;
324 stop = &host->stop_abort;
325 cmdr = cmd->opcode;
326 memset(stop, 0, sizeof(struct mmc_command));
328 if (cmdr == MMC_READ_SINGLE_BLOCK ||
329 cmdr == MMC_READ_MULTIPLE_BLOCK ||
330 cmdr == MMC_WRITE_BLOCK ||
331 cmdr == MMC_WRITE_MULTIPLE_BLOCK ||
332 cmdr == MMC_SEND_TUNING_BLOCK ||
333 cmdr == MMC_SEND_TUNING_BLOCK_HS200) {
334 stop->opcode = MMC_STOP_TRANSMISSION;
335 stop->arg = 0;
336 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
337 } else if (cmdr == SD_IO_RW_EXTENDED) {
338 stop->opcode = SD_IO_RW_DIRECT;
339 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
340 ((cmd->arg >> 28) & 0x7);
341 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
342 } else {
343 return 0;
346 cmdr = stop->opcode | SDMMC_CMD_STOP |
347 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
349 if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &host->slot->flags))
350 cmdr |= SDMMC_CMD_USE_HOLD_REG;
352 return cmdr;
355 static inline void dw_mci_set_cto(struct dw_mci *host)
357 unsigned int cto_clks;
358 unsigned int cto_div;
359 unsigned int cto_ms;
360 unsigned long irqflags;
362 cto_clks = mci_readl(host, TMOUT) & 0xff;
363 cto_div = (mci_readl(host, CLKDIV) & 0xff) * 2;
364 if (cto_div == 0)
365 cto_div = 1;
367 cto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * cto_clks * cto_div,
368 host->bus_hz);
370 /* add a bit spare time */
371 cto_ms += 10;
374 * The durations we're working with are fairly short so we have to be
375 * extra careful about synchronization here. Specifically in hardware a
376 * command timeout is _at most_ 5.1 ms, so that means we expect an
377 * interrupt (either command done or timeout) to come rather quickly
378 * after the mci_writel. ...but just in case we have a long interrupt
379 * latency let's add a bit of paranoia.
381 * In general we'll assume that at least an interrupt will be asserted
382 * in hardware by the time the cto_timer runs. ...and if it hasn't
383 * been asserted in hardware by that time then we'll assume it'll never
384 * come.
386 spin_lock_irqsave(&host->irq_lock, irqflags);
387 if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events))
388 mod_timer(&host->cto_timer,
389 jiffies + msecs_to_jiffies(cto_ms) + 1);
390 spin_unlock_irqrestore(&host->irq_lock, irqflags);
393 static void dw_mci_start_command(struct dw_mci *host,
394 struct mmc_command *cmd, u32 cmd_flags)
396 host->cmd = cmd;
397 dev_vdbg(host->dev,
398 "start command: ARGR=0x%08x CMDR=0x%08x\n",
399 cmd->arg, cmd_flags);
401 mci_writel(host, CMDARG, cmd->arg);
402 wmb(); /* drain writebuffer */
403 dw_mci_wait_while_busy(host, cmd_flags);
405 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
407 /* response expected command only */
408 if (cmd_flags & SDMMC_CMD_RESP_EXP)
409 dw_mci_set_cto(host);
412 static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data)
414 struct mmc_command *stop = &host->stop_abort;
416 dw_mci_start_command(host, stop, host->stop_cmdr);
419 /* DMA interface functions */
420 static void dw_mci_stop_dma(struct dw_mci *host)
422 if (host->using_dma) {
423 host->dma_ops->stop(host);
424 host->dma_ops->cleanup(host);
427 /* Data transfer was stopped by the interrupt handler */
428 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
431 static void dw_mci_dma_cleanup(struct dw_mci *host)
433 struct mmc_data *data = host->data;
435 if (data && data->host_cookie == COOKIE_MAPPED) {
436 dma_unmap_sg(host->dev,
437 data->sg,
438 data->sg_len,
439 mmc_get_dma_dir(data));
440 data->host_cookie = COOKIE_UNMAPPED;
444 static void dw_mci_idmac_reset(struct dw_mci *host)
446 u32 bmod = mci_readl(host, BMOD);
447 /* Software reset of DMA */
448 bmod |= SDMMC_IDMAC_SWRESET;
449 mci_writel(host, BMOD, bmod);
452 static void dw_mci_idmac_stop_dma(struct dw_mci *host)
454 u32 temp;
456 /* Disable and reset the IDMAC interface */
457 temp = mci_readl(host, CTRL);
458 temp &= ~SDMMC_CTRL_USE_IDMAC;
459 temp |= SDMMC_CTRL_DMA_RESET;
460 mci_writel(host, CTRL, temp);
462 /* Stop the IDMAC running */
463 temp = mci_readl(host, BMOD);
464 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
465 temp |= SDMMC_IDMAC_SWRESET;
466 mci_writel(host, BMOD, temp);
469 static void dw_mci_dmac_complete_dma(void *arg)
471 struct dw_mci *host = arg;
472 struct mmc_data *data = host->data;
474 dev_vdbg(host->dev, "DMA complete\n");
476 if ((host->use_dma == TRANS_MODE_EDMAC) &&
477 data && (data->flags & MMC_DATA_READ))
478 /* Invalidate cache after read */
479 dma_sync_sg_for_cpu(mmc_dev(host->slot->mmc),
480 data->sg,
481 data->sg_len,
482 DMA_FROM_DEVICE);
484 host->dma_ops->cleanup(host);
487 * If the card was removed, data will be NULL. No point in trying to
488 * send the stop command or waiting for NBUSY in this case.
490 if (data) {
491 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
492 tasklet_schedule(&host->tasklet);
496 static int dw_mci_idmac_init(struct dw_mci *host)
498 int i;
500 if (host->dma_64bit_address == 1) {
501 struct idmac_desc_64addr *p;
502 /* Number of descriptors in the ring buffer */
503 host->ring_size =
504 DESC_RING_BUF_SZ / sizeof(struct idmac_desc_64addr);
506 /* Forward link the descriptor list */
507 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1;
508 i++, p++) {
509 p->des6 = (host->sg_dma +
510 (sizeof(struct idmac_desc_64addr) *
511 (i + 1))) & 0xffffffff;
513 p->des7 = (u64)(host->sg_dma +
514 (sizeof(struct idmac_desc_64addr) *
515 (i + 1))) >> 32;
516 /* Initialize reserved and buffer size fields to "0" */
517 p->des0 = 0;
518 p->des1 = 0;
519 p->des2 = 0;
520 p->des3 = 0;
523 /* Set the last descriptor as the end-of-ring descriptor */
524 p->des6 = host->sg_dma & 0xffffffff;
525 p->des7 = (u64)host->sg_dma >> 32;
526 p->des0 = IDMAC_DES0_ER;
528 } else {
529 struct idmac_desc *p;
530 /* Number of descriptors in the ring buffer */
531 host->ring_size =
532 DESC_RING_BUF_SZ / sizeof(struct idmac_desc);
534 /* Forward link the descriptor list */
535 for (i = 0, p = host->sg_cpu;
536 i < host->ring_size - 1;
537 i++, p++) {
538 p->des3 = cpu_to_le32(host->sg_dma +
539 (sizeof(struct idmac_desc) * (i + 1)));
540 p->des0 = 0;
541 p->des1 = 0;
544 /* Set the last descriptor as the end-of-ring descriptor */
545 p->des3 = cpu_to_le32(host->sg_dma);
546 p->des0 = cpu_to_le32(IDMAC_DES0_ER);
549 dw_mci_idmac_reset(host);
551 if (host->dma_64bit_address == 1) {
552 /* Mask out interrupts - get Tx & Rx complete only */
553 mci_writel(host, IDSTS64, IDMAC_INT_CLR);
554 mci_writel(host, IDINTEN64, SDMMC_IDMAC_INT_NI |
555 SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
557 /* Set the descriptor base address */
558 mci_writel(host, DBADDRL, host->sg_dma & 0xffffffff);
559 mci_writel(host, DBADDRU, (u64)host->sg_dma >> 32);
561 } else {
562 /* Mask out interrupts - get Tx & Rx complete only */
563 mci_writel(host, IDSTS, IDMAC_INT_CLR);
564 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI |
565 SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
567 /* Set the descriptor base address */
568 mci_writel(host, DBADDR, host->sg_dma);
571 return 0;
574 static inline int dw_mci_prepare_desc64(struct dw_mci *host,
575 struct mmc_data *data,
576 unsigned int sg_len)
578 unsigned int desc_len;
579 struct idmac_desc_64addr *desc_first, *desc_last, *desc;
580 u32 val;
581 int i;
583 desc_first = desc_last = desc = host->sg_cpu;
585 for (i = 0; i < sg_len; i++) {
586 unsigned int length = sg_dma_len(&data->sg[i]);
588 u64 mem_addr = sg_dma_address(&data->sg[i]);
590 for ( ; length ; desc++) {
591 desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
592 length : DW_MCI_DESC_DATA_LENGTH;
594 length -= desc_len;
597 * Wait for the former clear OWN bit operation
598 * of IDMAC to make sure that this descriptor
599 * isn't still owned by IDMAC as IDMAC's write
600 * ops and CPU's read ops are asynchronous.
602 if (readl_poll_timeout_atomic(&desc->des0, val,
603 !(val & IDMAC_DES0_OWN),
604 10, 100 * USEC_PER_MSEC))
605 goto err_own_bit;
608 * Set the OWN bit and disable interrupts
609 * for this descriptor
611 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC |
612 IDMAC_DES0_CH;
614 /* Buffer length */
615 IDMAC_64ADDR_SET_BUFFER1_SIZE(desc, desc_len);
617 /* Physical address to DMA to/from */
618 desc->des4 = mem_addr & 0xffffffff;
619 desc->des5 = mem_addr >> 32;
621 /* Update physical address for the next desc */
622 mem_addr += desc_len;
624 /* Save pointer to the last descriptor */
625 desc_last = desc;
629 /* Set first descriptor */
630 desc_first->des0 |= IDMAC_DES0_FD;
632 /* Set last descriptor */
633 desc_last->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
634 desc_last->des0 |= IDMAC_DES0_LD;
636 return 0;
637 err_own_bit:
638 /* restore the descriptor chain as it's polluted */
639 dev_dbg(host->dev, "descriptor is still owned by IDMAC.\n");
640 memset(host->sg_cpu, 0, DESC_RING_BUF_SZ);
641 dw_mci_idmac_init(host);
642 return -EINVAL;
646 static inline int dw_mci_prepare_desc32(struct dw_mci *host,
647 struct mmc_data *data,
648 unsigned int sg_len)
650 unsigned int desc_len;
651 struct idmac_desc *desc_first, *desc_last, *desc;
652 u32 val;
653 int i;
655 desc_first = desc_last = desc = host->sg_cpu;
657 for (i = 0; i < sg_len; i++) {
658 unsigned int length = sg_dma_len(&data->sg[i]);
660 u32 mem_addr = sg_dma_address(&data->sg[i]);
662 for ( ; length ; desc++) {
663 desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
664 length : DW_MCI_DESC_DATA_LENGTH;
666 length -= desc_len;
669 * Wait for the former clear OWN bit operation
670 * of IDMAC to make sure that this descriptor
671 * isn't still owned by IDMAC as IDMAC's write
672 * ops and CPU's read ops are asynchronous.
674 if (readl_poll_timeout_atomic(&desc->des0, val,
675 IDMAC_OWN_CLR64(val),
677 100 * USEC_PER_MSEC))
678 goto err_own_bit;
681 * Set the OWN bit and disable interrupts
682 * for this descriptor
684 desc->des0 = cpu_to_le32(IDMAC_DES0_OWN |
685 IDMAC_DES0_DIC |
686 IDMAC_DES0_CH);
688 /* Buffer length */
689 IDMAC_SET_BUFFER1_SIZE(desc, desc_len);
691 /* Physical address to DMA to/from */
692 desc->des2 = cpu_to_le32(mem_addr);
694 /* Update physical address for the next desc */
695 mem_addr += desc_len;
697 /* Save pointer to the last descriptor */
698 desc_last = desc;
702 /* Set first descriptor */
703 desc_first->des0 |= cpu_to_le32(IDMAC_DES0_FD);
705 /* Set last descriptor */
706 desc_last->des0 &= cpu_to_le32(~(IDMAC_DES0_CH |
707 IDMAC_DES0_DIC));
708 desc_last->des0 |= cpu_to_le32(IDMAC_DES0_LD);
710 return 0;
711 err_own_bit:
712 /* restore the descriptor chain as it's polluted */
713 dev_dbg(host->dev, "descriptor is still owned by IDMAC.\n");
714 memset(host->sg_cpu, 0, DESC_RING_BUF_SZ);
715 dw_mci_idmac_init(host);
716 return -EINVAL;
719 static int dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
721 u32 temp;
722 int ret;
724 if (host->dma_64bit_address == 1)
725 ret = dw_mci_prepare_desc64(host, host->data, sg_len);
726 else
727 ret = dw_mci_prepare_desc32(host, host->data, sg_len);
729 if (ret)
730 goto out;
732 /* drain writebuffer */
733 wmb();
735 /* Make sure to reset DMA in case we did PIO before this */
736 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);
737 dw_mci_idmac_reset(host);
739 /* Select IDMAC interface */
740 temp = mci_readl(host, CTRL);
741 temp |= SDMMC_CTRL_USE_IDMAC;
742 mci_writel(host, CTRL, temp);
744 /* drain writebuffer */
745 wmb();
747 /* Enable the IDMAC */
748 temp = mci_readl(host, BMOD);
749 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
750 mci_writel(host, BMOD, temp);
752 /* Start it running */
753 mci_writel(host, PLDMND, 1);
755 out:
756 return ret;
759 static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
760 .init = dw_mci_idmac_init,
761 .start = dw_mci_idmac_start_dma,
762 .stop = dw_mci_idmac_stop_dma,
763 .complete = dw_mci_dmac_complete_dma,
764 .cleanup = dw_mci_dma_cleanup,
767 static void dw_mci_edmac_stop_dma(struct dw_mci *host)
769 dmaengine_terminate_async(host->dms->ch);
772 static int dw_mci_edmac_start_dma(struct dw_mci *host,
773 unsigned int sg_len)
775 struct dma_slave_config cfg;
776 struct dma_async_tx_descriptor *desc = NULL;
777 struct scatterlist *sgl = host->data->sg;
778 static const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
779 u32 sg_elems = host->data->sg_len;
780 u32 fifoth_val;
781 u32 fifo_offset = host->fifo_reg - host->regs;
782 int ret = 0;
784 /* Set external dma config: burst size, burst width */
785 cfg.dst_addr = host->phy_regs + fifo_offset;
786 cfg.src_addr = cfg.dst_addr;
787 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
788 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
790 /* Match burst msize with external dma config */
791 fifoth_val = mci_readl(host, FIFOTH);
792 cfg.dst_maxburst = mszs[(fifoth_val >> 28) & 0x7];
793 cfg.src_maxburst = cfg.dst_maxburst;
795 if (host->data->flags & MMC_DATA_WRITE)
796 cfg.direction = DMA_MEM_TO_DEV;
797 else
798 cfg.direction = DMA_DEV_TO_MEM;
800 ret = dmaengine_slave_config(host->dms->ch, &cfg);
801 if (ret) {
802 dev_err(host->dev, "Failed to config edmac.\n");
803 return -EBUSY;
806 desc = dmaengine_prep_slave_sg(host->dms->ch, sgl,
807 sg_len, cfg.direction,
808 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
809 if (!desc) {
810 dev_err(host->dev, "Can't prepare slave sg.\n");
811 return -EBUSY;
814 /* Set dw_mci_dmac_complete_dma as callback */
815 desc->callback = dw_mci_dmac_complete_dma;
816 desc->callback_param = (void *)host;
817 dmaengine_submit(desc);
819 /* Flush cache before write */
820 if (host->data->flags & MMC_DATA_WRITE)
821 dma_sync_sg_for_device(mmc_dev(host->slot->mmc), sgl,
822 sg_elems, DMA_TO_DEVICE);
824 dma_async_issue_pending(host->dms->ch);
826 return 0;
829 static int dw_mci_edmac_init(struct dw_mci *host)
831 /* Request external dma channel */
832 host->dms = kzalloc(sizeof(struct dw_mci_dma_slave), GFP_KERNEL);
833 if (!host->dms)
834 return -ENOMEM;
836 host->dms->ch = dma_request_chan(host->dev, "rx-tx");
837 if (IS_ERR(host->dms->ch)) {
838 int ret = PTR_ERR(host->dms->ch);
840 dev_err(host->dev, "Failed to get external DMA channel.\n");
841 kfree(host->dms);
842 host->dms = NULL;
843 return ret;
846 return 0;
849 static void dw_mci_edmac_exit(struct dw_mci *host)
851 if (host->dms) {
852 if (host->dms->ch) {
853 dma_release_channel(host->dms->ch);
854 host->dms->ch = NULL;
856 kfree(host->dms);
857 host->dms = NULL;
861 static const struct dw_mci_dma_ops dw_mci_edmac_ops = {
862 .init = dw_mci_edmac_init,
863 .exit = dw_mci_edmac_exit,
864 .start = dw_mci_edmac_start_dma,
865 .stop = dw_mci_edmac_stop_dma,
866 .complete = dw_mci_dmac_complete_dma,
867 .cleanup = dw_mci_dma_cleanup,
870 static int dw_mci_pre_dma_transfer(struct dw_mci *host,
871 struct mmc_data *data,
872 int cookie)
874 struct scatterlist *sg;
875 unsigned int i, sg_len;
877 if (data->host_cookie == COOKIE_PRE_MAPPED)
878 return data->sg_len;
881 * We don't do DMA on "complex" transfers, i.e. with
882 * non-word-aligned buffers or lengths. Also, we don't bother
883 * with all the DMA setup overhead for short transfers.
885 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
886 return -EINVAL;
888 if (data->blksz & 3)
889 return -EINVAL;
891 for_each_sg(data->sg, sg, data->sg_len, i) {
892 if (sg->offset & 3 || sg->length & 3)
893 return -EINVAL;
896 sg_len = dma_map_sg(host->dev,
897 data->sg,
898 data->sg_len,
899 mmc_get_dma_dir(data));
900 if (sg_len == 0)
901 return -EINVAL;
903 data->host_cookie = cookie;
905 return sg_len;
908 static void dw_mci_pre_req(struct mmc_host *mmc,
909 struct mmc_request *mrq)
911 struct dw_mci_slot *slot = mmc_priv(mmc);
912 struct mmc_data *data = mrq->data;
914 if (!slot->host->use_dma || !data)
915 return;
917 /* This data might be unmapped at this time */
918 data->host_cookie = COOKIE_UNMAPPED;
920 if (dw_mci_pre_dma_transfer(slot->host, mrq->data,
921 COOKIE_PRE_MAPPED) < 0)
922 data->host_cookie = COOKIE_UNMAPPED;
925 static void dw_mci_post_req(struct mmc_host *mmc,
926 struct mmc_request *mrq,
927 int err)
929 struct dw_mci_slot *slot = mmc_priv(mmc);
930 struct mmc_data *data = mrq->data;
932 if (!slot->host->use_dma || !data)
933 return;
935 if (data->host_cookie != COOKIE_UNMAPPED)
936 dma_unmap_sg(slot->host->dev,
937 data->sg,
938 data->sg_len,
939 mmc_get_dma_dir(data));
940 data->host_cookie = COOKIE_UNMAPPED;
943 static int dw_mci_get_cd(struct mmc_host *mmc)
945 int present;
946 struct dw_mci_slot *slot = mmc_priv(mmc);
947 struct dw_mci *host = slot->host;
948 int gpio_cd = mmc_gpio_get_cd(mmc);
950 /* Use platform get_cd function, else try onboard card detect */
951 if (((mmc->caps & MMC_CAP_NEEDS_POLL)
952 || !mmc_card_is_removable(mmc))) {
953 present = 1;
955 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
956 if (mmc->caps & MMC_CAP_NEEDS_POLL) {
957 dev_info(&mmc->class_dev,
958 "card is polling.\n");
959 } else {
960 dev_info(&mmc->class_dev,
961 "card is non-removable.\n");
963 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
966 return present;
967 } else if (gpio_cd >= 0)
968 present = gpio_cd;
969 else
970 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
971 == 0 ? 1 : 0;
973 spin_lock_bh(&host->lock);
974 if (present && !test_and_set_bit(DW_MMC_CARD_PRESENT, &slot->flags))
975 dev_dbg(&mmc->class_dev, "card is present\n");
976 else if (!present &&
977 !test_and_clear_bit(DW_MMC_CARD_PRESENT, &slot->flags))
978 dev_dbg(&mmc->class_dev, "card is not present\n");
979 spin_unlock_bh(&host->lock);
981 return present;
984 static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
986 unsigned int blksz = data->blksz;
987 static const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
988 u32 fifo_width = 1 << host->data_shift;
989 u32 blksz_depth = blksz / fifo_width, fifoth_val;
990 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
991 int idx = ARRAY_SIZE(mszs) - 1;
993 /* pio should ship this scenario */
994 if (!host->use_dma)
995 return;
997 tx_wmark = (host->fifo_depth) / 2;
998 tx_wmark_invers = host->fifo_depth - tx_wmark;
1001 * MSIZE is '1',
1002 * if blksz is not a multiple of the FIFO width
1004 if (blksz % fifo_width)
1005 goto done;
1007 do {
1008 if (!((blksz_depth % mszs[idx]) ||
1009 (tx_wmark_invers % mszs[idx]))) {
1010 msize = idx;
1011 rx_wmark = mszs[idx] - 1;
1012 break;
1014 } while (--idx > 0);
1016 * If idx is '0', it won't be tried
1017 * Thus, initial values are uesed
1019 done:
1020 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
1021 mci_writel(host, FIFOTH, fifoth_val);
1024 static void dw_mci_ctrl_thld(struct dw_mci *host, struct mmc_data *data)
1026 unsigned int blksz = data->blksz;
1027 u32 blksz_depth, fifo_depth;
1028 u16 thld_size;
1029 u8 enable;
1032 * CDTHRCTL doesn't exist prior to 240A (in fact that register offset is
1033 * in the FIFO region, so we really shouldn't access it).
1035 if (host->verid < DW_MMC_240A ||
1036 (host->verid < DW_MMC_280A && data->flags & MMC_DATA_WRITE))
1037 return;
1040 * Card write Threshold is introduced since 2.80a
1041 * It's used when HS400 mode is enabled.
1043 if (data->flags & MMC_DATA_WRITE &&
1044 host->timing != MMC_TIMING_MMC_HS400)
1045 goto disable;
1047 if (data->flags & MMC_DATA_WRITE)
1048 enable = SDMMC_CARD_WR_THR_EN;
1049 else
1050 enable = SDMMC_CARD_RD_THR_EN;
1052 if (host->timing != MMC_TIMING_MMC_HS200 &&
1053 host->timing != MMC_TIMING_UHS_SDR104 &&
1054 host->timing != MMC_TIMING_MMC_HS400)
1055 goto disable;
1057 blksz_depth = blksz / (1 << host->data_shift);
1058 fifo_depth = host->fifo_depth;
1060 if (blksz_depth > fifo_depth)
1061 goto disable;
1064 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
1065 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
1066 * Currently just choose blksz.
1068 thld_size = blksz;
1069 mci_writel(host, CDTHRCTL, SDMMC_SET_THLD(thld_size, enable));
1070 return;
1072 disable:
1073 mci_writel(host, CDTHRCTL, 0);
1076 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
1078 unsigned long irqflags;
1079 int sg_len;
1080 u32 temp;
1082 host->using_dma = 0;
1084 /* If we don't have a channel, we can't do DMA */
1085 if (!host->use_dma)
1086 return -ENODEV;
1088 sg_len = dw_mci_pre_dma_transfer(host, data, COOKIE_MAPPED);
1089 if (sg_len < 0) {
1090 host->dma_ops->stop(host);
1091 return sg_len;
1094 host->using_dma = 1;
1096 if (host->use_dma == TRANS_MODE_IDMAC)
1097 dev_vdbg(host->dev,
1098 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
1099 (unsigned long)host->sg_cpu,
1100 (unsigned long)host->sg_dma,
1101 sg_len);
1104 * Decide the MSIZE and RX/TX Watermark.
1105 * If current block size is same with previous size,
1106 * no need to update fifoth.
1108 if (host->prev_blksz != data->blksz)
1109 dw_mci_adjust_fifoth(host, data);
1111 /* Enable the DMA interface */
1112 temp = mci_readl(host, CTRL);
1113 temp |= SDMMC_CTRL_DMA_ENABLE;
1114 mci_writel(host, CTRL, temp);
1116 /* Disable RX/TX IRQs, let DMA handle it */
1117 spin_lock_irqsave(&host->irq_lock, irqflags);
1118 temp = mci_readl(host, INTMASK);
1119 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
1120 mci_writel(host, INTMASK, temp);
1121 spin_unlock_irqrestore(&host->irq_lock, irqflags);
1123 if (host->dma_ops->start(host, sg_len)) {
1124 host->dma_ops->stop(host);
1125 /* We can't do DMA, try PIO for this one */
1126 dev_dbg(host->dev,
1127 "%s: fall back to PIO mode for current transfer\n",
1128 __func__);
1129 return -ENODEV;
1132 return 0;
1135 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
1137 unsigned long irqflags;
1138 int flags = SG_MITER_ATOMIC;
1139 u32 temp;
1141 data->error = -EINPROGRESS;
1143 WARN_ON(host->data);
1144 host->sg = NULL;
1145 host->data = data;
1147 if (data->flags & MMC_DATA_READ)
1148 host->dir_status = DW_MCI_RECV_STATUS;
1149 else
1150 host->dir_status = DW_MCI_SEND_STATUS;
1152 dw_mci_ctrl_thld(host, data);
1154 if (dw_mci_submit_data_dma(host, data)) {
1155 if (host->data->flags & MMC_DATA_READ)
1156 flags |= SG_MITER_TO_SG;
1157 else
1158 flags |= SG_MITER_FROM_SG;
1160 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
1161 host->sg = data->sg;
1162 host->part_buf_start = 0;
1163 host->part_buf_count = 0;
1165 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
1167 spin_lock_irqsave(&host->irq_lock, irqflags);
1168 temp = mci_readl(host, INTMASK);
1169 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
1170 mci_writel(host, INTMASK, temp);
1171 spin_unlock_irqrestore(&host->irq_lock, irqflags);
1173 temp = mci_readl(host, CTRL);
1174 temp &= ~SDMMC_CTRL_DMA_ENABLE;
1175 mci_writel(host, CTRL, temp);
1178 * Use the initial fifoth_val for PIO mode. If wm_algined
1179 * is set, we set watermark same as data size.
1180 * If next issued data may be transfered by DMA mode,
1181 * prev_blksz should be invalidated.
1183 if (host->wm_aligned)
1184 dw_mci_adjust_fifoth(host, data);
1185 else
1186 mci_writel(host, FIFOTH, host->fifoth_val);
1187 host->prev_blksz = 0;
1188 } else {
1190 * Keep the current block size.
1191 * It will be used to decide whether to update
1192 * fifoth register next time.
1194 host->prev_blksz = data->blksz;
1198 static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
1200 struct dw_mci *host = slot->host;
1201 unsigned int clock = slot->clock;
1202 u32 div;
1203 u32 clk_en_a;
1204 u32 sdmmc_cmd_bits = SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT;
1206 /* We must continue to set bit 28 in CMD until the change is complete */
1207 if (host->state == STATE_WAITING_CMD11_DONE)
1208 sdmmc_cmd_bits |= SDMMC_CMD_VOLT_SWITCH;
1210 slot->mmc->actual_clock = 0;
1212 if (!clock) {
1213 mci_writel(host, CLKENA, 0);
1214 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
1215 } else if (clock != host->current_speed || force_clkinit) {
1216 div = host->bus_hz / clock;
1217 if (host->bus_hz % clock && host->bus_hz > clock)
1219 * move the + 1 after the divide to prevent
1220 * over-clocking the card.
1222 div += 1;
1224 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
1226 if ((clock != slot->__clk_old &&
1227 !test_bit(DW_MMC_CARD_NEEDS_POLL, &slot->flags)) ||
1228 force_clkinit) {
1229 /* Silent the verbose log if calling from PM context */
1230 if (!force_clkinit)
1231 dev_info(&slot->mmc->class_dev,
1232 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
1233 slot->id, host->bus_hz, clock,
1234 div ? ((host->bus_hz / div) >> 1) :
1235 host->bus_hz, div);
1238 * If card is polling, display the message only
1239 * one time at boot time.
1241 if (slot->mmc->caps & MMC_CAP_NEEDS_POLL &&
1242 slot->mmc->f_min == clock)
1243 set_bit(DW_MMC_CARD_NEEDS_POLL, &slot->flags);
1246 /* disable clock */
1247 mci_writel(host, CLKENA, 0);
1248 mci_writel(host, CLKSRC, 0);
1250 /* inform CIU */
1251 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
1253 /* set clock to desired speed */
1254 mci_writel(host, CLKDIV, div);
1256 /* inform CIU */
1257 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
1259 /* enable clock; only low power if no SDIO */
1260 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
1261 if (!test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags))
1262 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
1263 mci_writel(host, CLKENA, clk_en_a);
1265 /* inform CIU */
1266 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
1268 /* keep the last clock value that was requested from core */
1269 slot->__clk_old = clock;
1270 slot->mmc->actual_clock = div ? ((host->bus_hz / div) >> 1) :
1271 host->bus_hz;
1274 host->current_speed = clock;
1276 /* Set the current slot bus width */
1277 mci_writel(host, CTYPE, (slot->ctype << slot->id));
1280 static void __dw_mci_start_request(struct dw_mci *host,
1281 struct dw_mci_slot *slot,
1282 struct mmc_command *cmd)
1284 struct mmc_request *mrq;
1285 struct mmc_data *data;
1286 u32 cmdflags;
1288 mrq = slot->mrq;
1290 host->mrq = mrq;
1292 host->pending_events = 0;
1293 host->completed_events = 0;
1294 host->cmd_status = 0;
1295 host->data_status = 0;
1296 host->dir_status = 0;
1298 data = cmd->data;
1299 if (data) {
1300 mci_writel(host, TMOUT, 0xFFFFFFFF);
1301 mci_writel(host, BYTCNT, data->blksz*data->blocks);
1302 mci_writel(host, BLKSIZ, data->blksz);
1305 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
1307 /* this is the first command, send the initialization clock */
1308 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
1309 cmdflags |= SDMMC_CMD_INIT;
1311 if (data) {
1312 dw_mci_submit_data(host, data);
1313 wmb(); /* drain writebuffer */
1316 dw_mci_start_command(host, cmd, cmdflags);
1318 if (cmd->opcode == SD_SWITCH_VOLTAGE) {
1319 unsigned long irqflags;
1322 * Databook says to fail after 2ms w/ no response, but evidence
1323 * shows that sometimes the cmd11 interrupt takes over 130ms.
1324 * We'll set to 500ms, plus an extra jiffy just in case jiffies
1325 * is just about to roll over.
1327 * We do this whole thing under spinlock and only if the
1328 * command hasn't already completed (indicating the the irq
1329 * already ran so we don't want the timeout).
1331 spin_lock_irqsave(&host->irq_lock, irqflags);
1332 if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events))
1333 mod_timer(&host->cmd11_timer,
1334 jiffies + msecs_to_jiffies(500) + 1);
1335 spin_unlock_irqrestore(&host->irq_lock, irqflags);
1338 host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd);
1341 static void dw_mci_start_request(struct dw_mci *host,
1342 struct dw_mci_slot *slot)
1344 struct mmc_request *mrq = slot->mrq;
1345 struct mmc_command *cmd;
1347 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
1348 __dw_mci_start_request(host, slot, cmd);
1351 /* must be called with host->lock held */
1352 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
1353 struct mmc_request *mrq)
1355 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
1356 host->state);
1358 slot->mrq = mrq;
1360 if (host->state == STATE_WAITING_CMD11_DONE) {
1361 dev_warn(&slot->mmc->class_dev,
1362 "Voltage change didn't complete\n");
1364 * this case isn't expected to happen, so we can
1365 * either crash here or just try to continue on
1366 * in the closest possible state
1368 host->state = STATE_IDLE;
1371 if (host->state == STATE_IDLE) {
1372 host->state = STATE_SENDING_CMD;
1373 dw_mci_start_request(host, slot);
1374 } else {
1375 list_add_tail(&slot->queue_node, &host->queue);
1379 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1381 struct dw_mci_slot *slot = mmc_priv(mmc);
1382 struct dw_mci *host = slot->host;
1384 WARN_ON(slot->mrq);
1387 * The check for card presence and queueing of the request must be
1388 * atomic, otherwise the card could be removed in between and the
1389 * request wouldn't fail until another card was inserted.
1392 if (!dw_mci_get_cd(mmc)) {
1393 mrq->cmd->error = -ENOMEDIUM;
1394 mmc_request_done(mmc, mrq);
1395 return;
1398 spin_lock_bh(&host->lock);
1400 dw_mci_queue_request(host, slot, mrq);
1402 spin_unlock_bh(&host->lock);
1405 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1407 struct dw_mci_slot *slot = mmc_priv(mmc);
1408 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
1409 u32 regs;
1410 int ret;
1412 switch (ios->bus_width) {
1413 case MMC_BUS_WIDTH_4:
1414 slot->ctype = SDMMC_CTYPE_4BIT;
1415 break;
1416 case MMC_BUS_WIDTH_8:
1417 slot->ctype = SDMMC_CTYPE_8BIT;
1418 break;
1419 default:
1420 /* set default 1 bit mode */
1421 slot->ctype = SDMMC_CTYPE_1BIT;
1424 regs = mci_readl(slot->host, UHS_REG);
1426 /* DDR mode set */
1427 if (ios->timing == MMC_TIMING_MMC_DDR52 ||
1428 ios->timing == MMC_TIMING_UHS_DDR50 ||
1429 ios->timing == MMC_TIMING_MMC_HS400)
1430 regs |= ((0x1 << slot->id) << 16);
1431 else
1432 regs &= ~((0x1 << slot->id) << 16);
1434 mci_writel(slot->host, UHS_REG, regs);
1435 slot->host->timing = ios->timing;
1438 * Use mirror of ios->clock to prevent race with mmc
1439 * core ios update when finding the minimum.
1441 slot->clock = ios->clock;
1443 if (drv_data && drv_data->set_ios)
1444 drv_data->set_ios(slot->host, ios);
1446 switch (ios->power_mode) {
1447 case MMC_POWER_UP:
1448 if (!IS_ERR(mmc->supply.vmmc)) {
1449 ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
1450 ios->vdd);
1451 if (ret) {
1452 dev_err(slot->host->dev,
1453 "failed to enable vmmc regulator\n");
1454 /*return, if failed turn on vmmc*/
1455 return;
1458 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
1459 regs = mci_readl(slot->host, PWREN);
1460 regs |= (1 << slot->id);
1461 mci_writel(slot->host, PWREN, regs);
1462 break;
1463 case MMC_POWER_ON:
1464 if (!slot->host->vqmmc_enabled) {
1465 if (!IS_ERR(mmc->supply.vqmmc)) {
1466 ret = regulator_enable(mmc->supply.vqmmc);
1467 if (ret < 0)
1468 dev_err(slot->host->dev,
1469 "failed to enable vqmmc\n");
1470 else
1471 slot->host->vqmmc_enabled = true;
1473 } else {
1474 /* Keep track so we don't reset again */
1475 slot->host->vqmmc_enabled = true;
1478 /* Reset our state machine after powering on */
1479 dw_mci_ctrl_reset(slot->host,
1480 SDMMC_CTRL_ALL_RESET_FLAGS);
1483 /* Adjust clock / bus width after power is up */
1484 dw_mci_setup_bus(slot, false);
1486 break;
1487 case MMC_POWER_OFF:
1488 /* Turn clock off before power goes down */
1489 dw_mci_setup_bus(slot, false);
1491 if (!IS_ERR(mmc->supply.vmmc))
1492 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1494 if (!IS_ERR(mmc->supply.vqmmc) && slot->host->vqmmc_enabled)
1495 regulator_disable(mmc->supply.vqmmc);
1496 slot->host->vqmmc_enabled = false;
1498 regs = mci_readl(slot->host, PWREN);
1499 regs &= ~(1 << slot->id);
1500 mci_writel(slot->host, PWREN, regs);
1501 break;
1502 default:
1503 break;
1506 if (slot->host->state == STATE_WAITING_CMD11_DONE && ios->clock != 0)
1507 slot->host->state = STATE_IDLE;
1510 static int dw_mci_card_busy(struct mmc_host *mmc)
1512 struct dw_mci_slot *slot = mmc_priv(mmc);
1513 u32 status;
1516 * Check the busy bit which is low when DAT[3:0]
1517 * (the data lines) are 0000
1519 status = mci_readl(slot->host, STATUS);
1521 return !!(status & SDMMC_STATUS_BUSY);
1524 static int dw_mci_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
1526 struct dw_mci_slot *slot = mmc_priv(mmc);
1527 struct dw_mci *host = slot->host;
1528 const struct dw_mci_drv_data *drv_data = host->drv_data;
1529 u32 uhs;
1530 u32 v18 = SDMMC_UHS_18V << slot->id;
1531 int ret;
1533 if (drv_data && drv_data->switch_voltage)
1534 return drv_data->switch_voltage(mmc, ios);
1537 * Program the voltage. Note that some instances of dw_mmc may use
1538 * the UHS_REG for this. For other instances (like exynos) the UHS_REG
1539 * does no harm but you need to set the regulator directly. Try both.
1541 uhs = mci_readl(host, UHS_REG);
1542 if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330)
1543 uhs &= ~v18;
1544 else
1545 uhs |= v18;
1547 if (!IS_ERR(mmc->supply.vqmmc)) {
1548 ret = mmc_regulator_set_vqmmc(mmc, ios);
1550 if (ret) {
1551 dev_dbg(&mmc->class_dev,
1552 "Regulator set error %d - %s V\n",
1553 ret, uhs & v18 ? "1.8" : "3.3");
1554 return ret;
1557 mci_writel(host, UHS_REG, uhs);
1559 return 0;
1562 static int dw_mci_get_ro(struct mmc_host *mmc)
1564 int read_only;
1565 struct dw_mci_slot *slot = mmc_priv(mmc);
1566 int gpio_ro = mmc_gpio_get_ro(mmc);
1568 /* Use platform get_ro function, else try on board write protect */
1569 if (gpio_ro >= 0)
1570 read_only = gpio_ro;
1571 else
1572 read_only =
1573 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1575 dev_dbg(&mmc->class_dev, "card is %s\n",
1576 read_only ? "read-only" : "read-write");
1578 return read_only;
1581 static void dw_mci_hw_reset(struct mmc_host *mmc)
1583 struct dw_mci_slot *slot = mmc_priv(mmc);
1584 struct dw_mci *host = slot->host;
1585 int reset;
1587 if (host->use_dma == TRANS_MODE_IDMAC)
1588 dw_mci_idmac_reset(host);
1590 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET |
1591 SDMMC_CTRL_FIFO_RESET))
1592 return;
1595 * According to eMMC spec, card reset procedure:
1596 * tRstW >= 1us: RST_n pulse width
1597 * tRSCA >= 200us: RST_n to Command time
1598 * tRSTH >= 1us: RST_n high period
1600 reset = mci_readl(host, RST_N);
1601 reset &= ~(SDMMC_RST_HWACTIVE << slot->id);
1602 mci_writel(host, RST_N, reset);
1603 usleep_range(1, 2);
1604 reset |= SDMMC_RST_HWACTIVE << slot->id;
1605 mci_writel(host, RST_N, reset);
1606 usleep_range(200, 300);
1609 static void dw_mci_init_card(struct mmc_host *mmc, struct mmc_card *card)
1611 struct dw_mci_slot *slot = mmc_priv(mmc);
1612 struct dw_mci *host = slot->host;
1615 * Low power mode will stop the card clock when idle. According to the
1616 * description of the CLKENA register we should disable low power mode
1617 * for SDIO cards if we need SDIO interrupts to work.
1619 if (mmc->caps & MMC_CAP_SDIO_IRQ) {
1620 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1621 u32 clk_en_a_old;
1622 u32 clk_en_a;
1624 clk_en_a_old = mci_readl(host, CLKENA);
1626 if (card->type == MMC_TYPE_SDIO ||
1627 card->type == MMC_TYPE_SD_COMBO) {
1628 set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
1629 clk_en_a = clk_en_a_old & ~clken_low_pwr;
1630 } else {
1631 clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
1632 clk_en_a = clk_en_a_old | clken_low_pwr;
1635 if (clk_en_a != clk_en_a_old) {
1636 mci_writel(host, CLKENA, clk_en_a);
1637 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1638 SDMMC_CMD_PRV_DAT_WAIT, 0);
1643 static void __dw_mci_enable_sdio_irq(struct dw_mci_slot *slot, int enb)
1645 struct dw_mci *host = slot->host;
1646 unsigned long irqflags;
1647 u32 int_mask;
1649 spin_lock_irqsave(&host->irq_lock, irqflags);
1651 /* Enable/disable Slot Specific SDIO interrupt */
1652 int_mask = mci_readl(host, INTMASK);
1653 if (enb)
1654 int_mask |= SDMMC_INT_SDIO(slot->sdio_id);
1655 else
1656 int_mask &= ~SDMMC_INT_SDIO(slot->sdio_id);
1657 mci_writel(host, INTMASK, int_mask);
1659 spin_unlock_irqrestore(&host->irq_lock, irqflags);
1662 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1664 struct dw_mci_slot *slot = mmc_priv(mmc);
1665 struct dw_mci *host = slot->host;
1667 __dw_mci_enable_sdio_irq(slot, enb);
1669 /* Avoid runtime suspending the device when SDIO IRQ is enabled */
1670 if (enb)
1671 pm_runtime_get_noresume(host->dev);
1672 else
1673 pm_runtime_put_noidle(host->dev);
1676 static void dw_mci_ack_sdio_irq(struct mmc_host *mmc)
1678 struct dw_mci_slot *slot = mmc_priv(mmc);
1680 __dw_mci_enable_sdio_irq(slot, 1);
1683 static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1685 struct dw_mci_slot *slot = mmc_priv(mmc);
1686 struct dw_mci *host = slot->host;
1687 const struct dw_mci_drv_data *drv_data = host->drv_data;
1688 int err = -EINVAL;
1690 if (drv_data && drv_data->execute_tuning)
1691 err = drv_data->execute_tuning(slot, opcode);
1692 return err;
1695 static int dw_mci_prepare_hs400_tuning(struct mmc_host *mmc,
1696 struct mmc_ios *ios)
1698 struct dw_mci_slot *slot = mmc_priv(mmc);
1699 struct dw_mci *host = slot->host;
1700 const struct dw_mci_drv_data *drv_data = host->drv_data;
1702 if (drv_data && drv_data->prepare_hs400_tuning)
1703 return drv_data->prepare_hs400_tuning(host, ios);
1705 return 0;
1708 static bool dw_mci_reset(struct dw_mci *host)
1710 u32 flags = SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET;
1711 bool ret = false;
1712 u32 status = 0;
1715 * Resetting generates a block interrupt, hence setting
1716 * the scatter-gather pointer to NULL.
1718 if (host->sg) {
1719 sg_miter_stop(&host->sg_miter);
1720 host->sg = NULL;
1723 if (host->use_dma)
1724 flags |= SDMMC_CTRL_DMA_RESET;
1726 if (dw_mci_ctrl_reset(host, flags)) {
1728 * In all cases we clear the RAWINTS
1729 * register to clear any interrupts.
1731 mci_writel(host, RINTSTS, 0xFFFFFFFF);
1733 if (!host->use_dma) {
1734 ret = true;
1735 goto ciu_out;
1738 /* Wait for dma_req to be cleared */
1739 if (readl_poll_timeout_atomic(host->regs + SDMMC_STATUS,
1740 status,
1741 !(status & SDMMC_STATUS_DMA_REQ),
1742 1, 500 * USEC_PER_MSEC)) {
1743 dev_err(host->dev,
1744 "%s: Timeout waiting for dma_req to be cleared\n",
1745 __func__);
1746 goto ciu_out;
1749 /* when using DMA next we reset the fifo again */
1750 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET))
1751 goto ciu_out;
1752 } else {
1753 /* if the controller reset bit did clear, then set clock regs */
1754 if (!(mci_readl(host, CTRL) & SDMMC_CTRL_RESET)) {
1755 dev_err(host->dev,
1756 "%s: fifo/dma reset bits didn't clear but ciu was reset, doing clock update\n",
1757 __func__);
1758 goto ciu_out;
1762 if (host->use_dma == TRANS_MODE_IDMAC)
1763 /* It is also required that we reinit idmac */
1764 dw_mci_idmac_init(host);
1766 ret = true;
1768 ciu_out:
1769 /* After a CTRL reset we need to have CIU set clock registers */
1770 mci_send_cmd(host->slot, SDMMC_CMD_UPD_CLK, 0);
1772 return ret;
1775 static const struct mmc_host_ops dw_mci_ops = {
1776 .request = dw_mci_request,
1777 .pre_req = dw_mci_pre_req,
1778 .post_req = dw_mci_post_req,
1779 .set_ios = dw_mci_set_ios,
1780 .get_ro = dw_mci_get_ro,
1781 .get_cd = dw_mci_get_cd,
1782 .hw_reset = dw_mci_hw_reset,
1783 .enable_sdio_irq = dw_mci_enable_sdio_irq,
1784 .ack_sdio_irq = dw_mci_ack_sdio_irq,
1785 .execute_tuning = dw_mci_execute_tuning,
1786 .card_busy = dw_mci_card_busy,
1787 .start_signal_voltage_switch = dw_mci_switch_voltage,
1788 .init_card = dw_mci_init_card,
1789 .prepare_hs400_tuning = dw_mci_prepare_hs400_tuning,
1792 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
1793 __releases(&host->lock)
1794 __acquires(&host->lock)
1796 struct dw_mci_slot *slot;
1797 struct mmc_host *prev_mmc = host->slot->mmc;
1799 WARN_ON(host->cmd || host->data);
1801 host->slot->mrq = NULL;
1802 host->mrq = NULL;
1803 if (!list_empty(&host->queue)) {
1804 slot = list_entry(host->queue.next,
1805 struct dw_mci_slot, queue_node);
1806 list_del(&slot->queue_node);
1807 dev_vdbg(host->dev, "list not empty: %s is next\n",
1808 mmc_hostname(slot->mmc));
1809 host->state = STATE_SENDING_CMD;
1810 dw_mci_start_request(host, slot);
1811 } else {
1812 dev_vdbg(host->dev, "list empty\n");
1814 if (host->state == STATE_SENDING_CMD11)
1815 host->state = STATE_WAITING_CMD11_DONE;
1816 else
1817 host->state = STATE_IDLE;
1820 spin_unlock(&host->lock);
1821 mmc_request_done(prev_mmc, mrq);
1822 spin_lock(&host->lock);
1825 static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
1827 u32 status = host->cmd_status;
1829 host->cmd_status = 0;
1831 /* Read the response from the card (up to 16 bytes) */
1832 if (cmd->flags & MMC_RSP_PRESENT) {
1833 if (cmd->flags & MMC_RSP_136) {
1834 cmd->resp[3] = mci_readl(host, RESP0);
1835 cmd->resp[2] = mci_readl(host, RESP1);
1836 cmd->resp[1] = mci_readl(host, RESP2);
1837 cmd->resp[0] = mci_readl(host, RESP3);
1838 } else {
1839 cmd->resp[0] = mci_readl(host, RESP0);
1840 cmd->resp[1] = 0;
1841 cmd->resp[2] = 0;
1842 cmd->resp[3] = 0;
1846 if (status & SDMMC_INT_RTO)
1847 cmd->error = -ETIMEDOUT;
1848 else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
1849 cmd->error = -EILSEQ;
1850 else if (status & SDMMC_INT_RESP_ERR)
1851 cmd->error = -EIO;
1852 else
1853 cmd->error = 0;
1855 return cmd->error;
1858 static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
1860 u32 status = host->data_status;
1862 if (status & DW_MCI_DATA_ERROR_FLAGS) {
1863 if (status & SDMMC_INT_DRTO) {
1864 data->error = -ETIMEDOUT;
1865 } else if (status & SDMMC_INT_DCRC) {
1866 data->error = -EILSEQ;
1867 } else if (status & SDMMC_INT_EBE) {
1868 if (host->dir_status ==
1869 DW_MCI_SEND_STATUS) {
1871 * No data CRC status was returned.
1872 * The number of bytes transferred
1873 * will be exaggerated in PIO mode.
1875 data->bytes_xfered = 0;
1876 data->error = -ETIMEDOUT;
1877 } else if (host->dir_status ==
1878 DW_MCI_RECV_STATUS) {
1879 data->error = -EILSEQ;
1881 } else {
1882 /* SDMMC_INT_SBE is included */
1883 data->error = -EILSEQ;
1886 dev_dbg(host->dev, "data error, status 0x%08x\n", status);
1889 * After an error, there may be data lingering
1890 * in the FIFO
1892 dw_mci_reset(host);
1893 } else {
1894 data->bytes_xfered = data->blocks * data->blksz;
1895 data->error = 0;
1898 return data->error;
1901 static void dw_mci_set_drto(struct dw_mci *host)
1903 unsigned int drto_clks;
1904 unsigned int drto_div;
1905 unsigned int drto_ms;
1906 unsigned long irqflags;
1908 drto_clks = mci_readl(host, TMOUT) >> 8;
1909 drto_div = (mci_readl(host, CLKDIV) & 0xff) * 2;
1910 if (drto_div == 0)
1911 drto_div = 1;
1913 drto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * drto_clks * drto_div,
1914 host->bus_hz);
1916 /* add a bit spare time */
1917 drto_ms += 10;
1919 spin_lock_irqsave(&host->irq_lock, irqflags);
1920 if (!test_bit(EVENT_DATA_COMPLETE, &host->pending_events))
1921 mod_timer(&host->dto_timer,
1922 jiffies + msecs_to_jiffies(drto_ms));
1923 spin_unlock_irqrestore(&host->irq_lock, irqflags);
1926 static bool dw_mci_clear_pending_cmd_complete(struct dw_mci *host)
1928 if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events))
1929 return false;
1932 * Really be certain that the timer has stopped. This is a bit of
1933 * paranoia and could only really happen if we had really bad
1934 * interrupt latency and the interrupt routine and timeout were
1935 * running concurrently so that the del_timer() in the interrupt
1936 * handler couldn't run.
1938 WARN_ON(del_timer_sync(&host->cto_timer));
1939 clear_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1941 return true;
1944 static bool dw_mci_clear_pending_data_complete(struct dw_mci *host)
1946 if (!test_bit(EVENT_DATA_COMPLETE, &host->pending_events))
1947 return false;
1949 /* Extra paranoia just like dw_mci_clear_pending_cmd_complete() */
1950 WARN_ON(del_timer_sync(&host->dto_timer));
1951 clear_bit(EVENT_DATA_COMPLETE, &host->pending_events);
1953 return true;
1956 static void dw_mci_tasklet_func(unsigned long priv)
1958 struct dw_mci *host = (struct dw_mci *)priv;
1959 struct mmc_data *data;
1960 struct mmc_command *cmd;
1961 struct mmc_request *mrq;
1962 enum dw_mci_state state;
1963 enum dw_mci_state prev_state;
1964 unsigned int err;
1966 spin_lock(&host->lock);
1968 state = host->state;
1969 data = host->data;
1970 mrq = host->mrq;
1972 do {
1973 prev_state = state;
1975 switch (state) {
1976 case STATE_IDLE:
1977 case STATE_WAITING_CMD11_DONE:
1978 break;
1980 case STATE_SENDING_CMD11:
1981 case STATE_SENDING_CMD:
1982 if (!dw_mci_clear_pending_cmd_complete(host))
1983 break;
1985 cmd = host->cmd;
1986 host->cmd = NULL;
1987 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
1988 err = dw_mci_command_complete(host, cmd);
1989 if (cmd == mrq->sbc && !err) {
1990 __dw_mci_start_request(host, host->slot,
1991 mrq->cmd);
1992 goto unlock;
1995 if (cmd->data && err) {
1997 * During UHS tuning sequence, sending the stop
1998 * command after the response CRC error would
1999 * throw the system into a confused state
2000 * causing all future tuning phases to report
2001 * failure.
2003 * In such case controller will move into a data
2004 * transfer state after a response error or
2005 * response CRC error. Let's let that finish
2006 * before trying to send a stop, so we'll go to
2007 * STATE_SENDING_DATA.
2009 * Although letting the data transfer take place
2010 * will waste a bit of time (we already know
2011 * the command was bad), it can't cause any
2012 * errors since it's possible it would have
2013 * taken place anyway if this tasklet got
2014 * delayed. Allowing the transfer to take place
2015 * avoids races and keeps things simple.
2017 if (err != -ETIMEDOUT) {
2018 state = STATE_SENDING_DATA;
2019 continue;
2022 dw_mci_stop_dma(host);
2023 send_stop_abort(host, data);
2024 state = STATE_SENDING_STOP;
2025 break;
2028 if (!cmd->data || err) {
2029 dw_mci_request_end(host, mrq);
2030 goto unlock;
2033 prev_state = state = STATE_SENDING_DATA;
2034 /* fall through */
2036 case STATE_SENDING_DATA:
2038 * We could get a data error and never a transfer
2039 * complete so we'd better check for it here.
2041 * Note that we don't really care if we also got a
2042 * transfer complete; stopping the DMA and sending an
2043 * abort won't hurt.
2045 if (test_and_clear_bit(EVENT_DATA_ERROR,
2046 &host->pending_events)) {
2047 dw_mci_stop_dma(host);
2048 if (!(host->data_status & (SDMMC_INT_DRTO |
2049 SDMMC_INT_EBE)))
2050 send_stop_abort(host, data);
2051 state = STATE_DATA_ERROR;
2052 break;
2055 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2056 &host->pending_events)) {
2058 * If all data-related interrupts don't come
2059 * within the given time in reading data state.
2061 if (host->dir_status == DW_MCI_RECV_STATUS)
2062 dw_mci_set_drto(host);
2063 break;
2066 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
2069 * Handle an EVENT_DATA_ERROR that might have shown up
2070 * before the transfer completed. This might not have
2071 * been caught by the check above because the interrupt
2072 * could have gone off between the previous check and
2073 * the check for transfer complete.
2075 * Technically this ought not be needed assuming we
2076 * get a DATA_COMPLETE eventually (we'll notice the
2077 * error and end the request), but it shouldn't hurt.
2079 * This has the advantage of sending the stop command.
2081 if (test_and_clear_bit(EVENT_DATA_ERROR,
2082 &host->pending_events)) {
2083 dw_mci_stop_dma(host);
2084 if (!(host->data_status & (SDMMC_INT_DRTO |
2085 SDMMC_INT_EBE)))
2086 send_stop_abort(host, data);
2087 state = STATE_DATA_ERROR;
2088 break;
2090 prev_state = state = STATE_DATA_BUSY;
2092 /* fall through */
2094 case STATE_DATA_BUSY:
2095 if (!dw_mci_clear_pending_data_complete(host)) {
2097 * If data error interrupt comes but data over
2098 * interrupt doesn't come within the given time.
2099 * in reading data state.
2101 if (host->dir_status == DW_MCI_RECV_STATUS)
2102 dw_mci_set_drto(host);
2103 break;
2106 host->data = NULL;
2107 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
2108 err = dw_mci_data_complete(host, data);
2110 if (!err) {
2111 if (!data->stop || mrq->sbc) {
2112 if (mrq->sbc && data->stop)
2113 data->stop->error = 0;
2114 dw_mci_request_end(host, mrq);
2115 goto unlock;
2118 /* stop command for open-ended transfer*/
2119 if (data->stop)
2120 send_stop_abort(host, data);
2121 } else {
2123 * If we don't have a command complete now we'll
2124 * never get one since we just reset everything;
2125 * better end the request.
2127 * If we do have a command complete we'll fall
2128 * through to the SENDING_STOP command and
2129 * everything will be peachy keen.
2131 if (!test_bit(EVENT_CMD_COMPLETE,
2132 &host->pending_events)) {
2133 host->cmd = NULL;
2134 dw_mci_request_end(host, mrq);
2135 goto unlock;
2140 * If err has non-zero,
2141 * stop-abort command has been already issued.
2143 prev_state = state = STATE_SENDING_STOP;
2145 /* fall through */
2147 case STATE_SENDING_STOP:
2148 if (!dw_mci_clear_pending_cmd_complete(host))
2149 break;
2151 /* CMD error in data command */
2152 if (mrq->cmd->error && mrq->data)
2153 dw_mci_reset(host);
2155 host->cmd = NULL;
2156 host->data = NULL;
2158 if (!mrq->sbc && mrq->stop)
2159 dw_mci_command_complete(host, mrq->stop);
2160 else
2161 host->cmd_status = 0;
2163 dw_mci_request_end(host, mrq);
2164 goto unlock;
2166 case STATE_DATA_ERROR:
2167 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2168 &host->pending_events))
2169 break;
2171 state = STATE_DATA_BUSY;
2172 break;
2174 } while (state != prev_state);
2176 host->state = state;
2177 unlock:
2178 spin_unlock(&host->lock);
2182 /* push final bytes to part_buf, only use during push */
2183 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
2185 memcpy((void *)&host->part_buf, buf, cnt);
2186 host->part_buf_count = cnt;
2189 /* append bytes to part_buf, only use during push */
2190 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
2192 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
2193 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
2194 host->part_buf_count += cnt;
2195 return cnt;
2198 /* pull first bytes from part_buf, only use during pull */
2199 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
2201 cnt = min_t(int, cnt, host->part_buf_count);
2202 if (cnt) {
2203 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
2204 cnt);
2205 host->part_buf_count -= cnt;
2206 host->part_buf_start += cnt;
2208 return cnt;
2211 /* pull final bytes from the part_buf, assuming it's just been filled */
2212 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
2214 memcpy(buf, &host->part_buf, cnt);
2215 host->part_buf_start = cnt;
2216 host->part_buf_count = (1 << host->data_shift) - cnt;
2219 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
2221 struct mmc_data *data = host->data;
2222 int init_cnt = cnt;
2224 /* try and push anything in the part_buf */
2225 if (unlikely(host->part_buf_count)) {
2226 int len = dw_mci_push_part_bytes(host, buf, cnt);
2228 buf += len;
2229 cnt -= len;
2230 if (host->part_buf_count == 2) {
2231 mci_fifo_writew(host->fifo_reg, host->part_buf16);
2232 host->part_buf_count = 0;
2235 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2236 if (unlikely((unsigned long)buf & 0x1)) {
2237 while (cnt >= 2) {
2238 u16 aligned_buf[64];
2239 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2240 int items = len >> 1;
2241 int i;
2242 /* memcpy from input buffer into aligned buffer */
2243 memcpy(aligned_buf, buf, len);
2244 buf += len;
2245 cnt -= len;
2246 /* push data from aligned buffer into fifo */
2247 for (i = 0; i < items; ++i)
2248 mci_fifo_writew(host->fifo_reg, aligned_buf[i]);
2250 } else
2251 #endif
2253 u16 *pdata = buf;
2255 for (; cnt >= 2; cnt -= 2)
2256 mci_fifo_writew(host->fifo_reg, *pdata++);
2257 buf = pdata;
2259 /* put anything remaining in the part_buf */
2260 if (cnt) {
2261 dw_mci_set_part_bytes(host, buf, cnt);
2262 /* Push data if we have reached the expected data length */
2263 if ((data->bytes_xfered + init_cnt) ==
2264 (data->blksz * data->blocks))
2265 mci_fifo_writew(host->fifo_reg, host->part_buf16);
2269 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
2271 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2272 if (unlikely((unsigned long)buf & 0x1)) {
2273 while (cnt >= 2) {
2274 /* pull data from fifo into aligned buffer */
2275 u16 aligned_buf[64];
2276 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2277 int items = len >> 1;
2278 int i;
2280 for (i = 0; i < items; ++i)
2281 aligned_buf[i] = mci_fifo_readw(host->fifo_reg);
2282 /* memcpy from aligned buffer into output buffer */
2283 memcpy(buf, aligned_buf, len);
2284 buf += len;
2285 cnt -= len;
2287 } else
2288 #endif
2290 u16 *pdata = buf;
2292 for (; cnt >= 2; cnt -= 2)
2293 *pdata++ = mci_fifo_readw(host->fifo_reg);
2294 buf = pdata;
2296 if (cnt) {
2297 host->part_buf16 = mci_fifo_readw(host->fifo_reg);
2298 dw_mci_pull_final_bytes(host, buf, cnt);
2302 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
2304 struct mmc_data *data = host->data;
2305 int init_cnt = cnt;
2307 /* try and push anything in the part_buf */
2308 if (unlikely(host->part_buf_count)) {
2309 int len = dw_mci_push_part_bytes(host, buf, cnt);
2311 buf += len;
2312 cnt -= len;
2313 if (host->part_buf_count == 4) {
2314 mci_fifo_writel(host->fifo_reg, host->part_buf32);
2315 host->part_buf_count = 0;
2318 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2319 if (unlikely((unsigned long)buf & 0x3)) {
2320 while (cnt >= 4) {
2321 u32 aligned_buf[32];
2322 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2323 int items = len >> 2;
2324 int i;
2325 /* memcpy from input buffer into aligned buffer */
2326 memcpy(aligned_buf, buf, len);
2327 buf += len;
2328 cnt -= len;
2329 /* push data from aligned buffer into fifo */
2330 for (i = 0; i < items; ++i)
2331 mci_fifo_writel(host->fifo_reg, aligned_buf[i]);
2333 } else
2334 #endif
2336 u32 *pdata = buf;
2338 for (; cnt >= 4; cnt -= 4)
2339 mci_fifo_writel(host->fifo_reg, *pdata++);
2340 buf = pdata;
2342 /* put anything remaining in the part_buf */
2343 if (cnt) {
2344 dw_mci_set_part_bytes(host, buf, cnt);
2345 /* Push data if we have reached the expected data length */
2346 if ((data->bytes_xfered + init_cnt) ==
2347 (data->blksz * data->blocks))
2348 mci_fifo_writel(host->fifo_reg, host->part_buf32);
2352 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
2354 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2355 if (unlikely((unsigned long)buf & 0x3)) {
2356 while (cnt >= 4) {
2357 /* pull data from fifo into aligned buffer */
2358 u32 aligned_buf[32];
2359 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2360 int items = len >> 2;
2361 int i;
2363 for (i = 0; i < items; ++i)
2364 aligned_buf[i] = mci_fifo_readl(host->fifo_reg);
2365 /* memcpy from aligned buffer into output buffer */
2366 memcpy(buf, aligned_buf, len);
2367 buf += len;
2368 cnt -= len;
2370 } else
2371 #endif
2373 u32 *pdata = buf;
2375 for (; cnt >= 4; cnt -= 4)
2376 *pdata++ = mci_fifo_readl(host->fifo_reg);
2377 buf = pdata;
2379 if (cnt) {
2380 host->part_buf32 = mci_fifo_readl(host->fifo_reg);
2381 dw_mci_pull_final_bytes(host, buf, cnt);
2385 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
2387 struct mmc_data *data = host->data;
2388 int init_cnt = cnt;
2390 /* try and push anything in the part_buf */
2391 if (unlikely(host->part_buf_count)) {
2392 int len = dw_mci_push_part_bytes(host, buf, cnt);
2394 buf += len;
2395 cnt -= len;
2397 if (host->part_buf_count == 8) {
2398 mci_fifo_writeq(host->fifo_reg, host->part_buf);
2399 host->part_buf_count = 0;
2402 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2403 if (unlikely((unsigned long)buf & 0x7)) {
2404 while (cnt >= 8) {
2405 u64 aligned_buf[16];
2406 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2407 int items = len >> 3;
2408 int i;
2409 /* memcpy from input buffer into aligned buffer */
2410 memcpy(aligned_buf, buf, len);
2411 buf += len;
2412 cnt -= len;
2413 /* push data from aligned buffer into fifo */
2414 for (i = 0; i < items; ++i)
2415 mci_fifo_writeq(host->fifo_reg, aligned_buf[i]);
2417 } else
2418 #endif
2420 u64 *pdata = buf;
2422 for (; cnt >= 8; cnt -= 8)
2423 mci_fifo_writeq(host->fifo_reg, *pdata++);
2424 buf = pdata;
2426 /* put anything remaining in the part_buf */
2427 if (cnt) {
2428 dw_mci_set_part_bytes(host, buf, cnt);
2429 /* Push data if we have reached the expected data length */
2430 if ((data->bytes_xfered + init_cnt) ==
2431 (data->blksz * data->blocks))
2432 mci_fifo_writeq(host->fifo_reg, host->part_buf);
2436 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
2438 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2439 if (unlikely((unsigned long)buf & 0x7)) {
2440 while (cnt >= 8) {
2441 /* pull data from fifo into aligned buffer */
2442 u64 aligned_buf[16];
2443 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2444 int items = len >> 3;
2445 int i;
2447 for (i = 0; i < items; ++i)
2448 aligned_buf[i] = mci_fifo_readq(host->fifo_reg);
2450 /* memcpy from aligned buffer into output buffer */
2451 memcpy(buf, aligned_buf, len);
2452 buf += len;
2453 cnt -= len;
2455 } else
2456 #endif
2458 u64 *pdata = buf;
2460 for (; cnt >= 8; cnt -= 8)
2461 *pdata++ = mci_fifo_readq(host->fifo_reg);
2462 buf = pdata;
2464 if (cnt) {
2465 host->part_buf = mci_fifo_readq(host->fifo_reg);
2466 dw_mci_pull_final_bytes(host, buf, cnt);
2470 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
2472 int len;
2474 /* get remaining partial bytes */
2475 len = dw_mci_pull_part_bytes(host, buf, cnt);
2476 if (unlikely(len == cnt))
2477 return;
2478 buf += len;
2479 cnt -= len;
2481 /* get the rest of the data */
2482 host->pull_data(host, buf, cnt);
2485 static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
2487 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2488 void *buf;
2489 unsigned int offset;
2490 struct mmc_data *data = host->data;
2491 int shift = host->data_shift;
2492 u32 status;
2493 unsigned int len;
2494 unsigned int remain, fcnt;
2496 do {
2497 if (!sg_miter_next(sg_miter))
2498 goto done;
2500 host->sg = sg_miter->piter.sg;
2501 buf = sg_miter->addr;
2502 remain = sg_miter->length;
2503 offset = 0;
2505 do {
2506 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
2507 << shift) + host->part_buf_count;
2508 len = min(remain, fcnt);
2509 if (!len)
2510 break;
2511 dw_mci_pull_data(host, (void *)(buf + offset), len);
2512 data->bytes_xfered += len;
2513 offset += len;
2514 remain -= len;
2515 } while (remain);
2517 sg_miter->consumed = offset;
2518 status = mci_readl(host, MINTSTS);
2519 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2520 /* if the RXDR is ready read again */
2521 } while ((status & SDMMC_INT_RXDR) ||
2522 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
2524 if (!remain) {
2525 if (!sg_miter_next(sg_miter))
2526 goto done;
2527 sg_miter->consumed = 0;
2529 sg_miter_stop(sg_miter);
2530 return;
2532 done:
2533 sg_miter_stop(sg_miter);
2534 host->sg = NULL;
2535 smp_wmb(); /* drain writebuffer */
2536 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2539 static void dw_mci_write_data_pio(struct dw_mci *host)
2541 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2542 void *buf;
2543 unsigned int offset;
2544 struct mmc_data *data = host->data;
2545 int shift = host->data_shift;
2546 u32 status;
2547 unsigned int len;
2548 unsigned int fifo_depth = host->fifo_depth;
2549 unsigned int remain, fcnt;
2551 do {
2552 if (!sg_miter_next(sg_miter))
2553 goto done;
2555 host->sg = sg_miter->piter.sg;
2556 buf = sg_miter->addr;
2557 remain = sg_miter->length;
2558 offset = 0;
2560 do {
2561 fcnt = ((fifo_depth -
2562 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
2563 << shift) - host->part_buf_count;
2564 len = min(remain, fcnt);
2565 if (!len)
2566 break;
2567 host->push_data(host, (void *)(buf + offset), len);
2568 data->bytes_xfered += len;
2569 offset += len;
2570 remain -= len;
2571 } while (remain);
2573 sg_miter->consumed = offset;
2574 status = mci_readl(host, MINTSTS);
2575 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2576 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
2578 if (!remain) {
2579 if (!sg_miter_next(sg_miter))
2580 goto done;
2581 sg_miter->consumed = 0;
2583 sg_miter_stop(sg_miter);
2584 return;
2586 done:
2587 sg_miter_stop(sg_miter);
2588 host->sg = NULL;
2589 smp_wmb(); /* drain writebuffer */
2590 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2593 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
2595 del_timer(&host->cto_timer);
2597 if (!host->cmd_status)
2598 host->cmd_status = status;
2600 smp_wmb(); /* drain writebuffer */
2602 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2603 tasklet_schedule(&host->tasklet);
2606 static void dw_mci_handle_cd(struct dw_mci *host)
2608 struct dw_mci_slot *slot = host->slot;
2610 if (slot->mmc->ops->card_event)
2611 slot->mmc->ops->card_event(slot->mmc);
2612 mmc_detect_change(slot->mmc,
2613 msecs_to_jiffies(host->pdata->detect_delay_ms));
2616 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
2618 struct dw_mci *host = dev_id;
2619 u32 pending;
2620 struct dw_mci_slot *slot = host->slot;
2621 unsigned long irqflags;
2623 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
2625 if (pending) {
2626 /* Check volt switch first, since it can look like an error */
2627 if ((host->state == STATE_SENDING_CMD11) &&
2628 (pending & SDMMC_INT_VOLT_SWITCH)) {
2629 mci_writel(host, RINTSTS, SDMMC_INT_VOLT_SWITCH);
2630 pending &= ~SDMMC_INT_VOLT_SWITCH;
2633 * Hold the lock; we know cmd11_timer can't be kicked
2634 * off after the lock is released, so safe to delete.
2636 spin_lock_irqsave(&host->irq_lock, irqflags);
2637 dw_mci_cmd_interrupt(host, pending);
2638 spin_unlock_irqrestore(&host->irq_lock, irqflags);
2640 del_timer(&host->cmd11_timer);
2643 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
2644 spin_lock_irqsave(&host->irq_lock, irqflags);
2646 del_timer(&host->cto_timer);
2647 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
2648 host->cmd_status = pending;
2649 smp_wmb(); /* drain writebuffer */
2650 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2652 spin_unlock_irqrestore(&host->irq_lock, irqflags);
2655 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
2656 /* if there is an error report DATA_ERROR */
2657 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
2658 host->data_status = pending;
2659 smp_wmb(); /* drain writebuffer */
2660 set_bit(EVENT_DATA_ERROR, &host->pending_events);
2661 tasklet_schedule(&host->tasklet);
2664 if (pending & SDMMC_INT_DATA_OVER) {
2665 spin_lock_irqsave(&host->irq_lock, irqflags);
2667 del_timer(&host->dto_timer);
2669 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
2670 if (!host->data_status)
2671 host->data_status = pending;
2672 smp_wmb(); /* drain writebuffer */
2673 if (host->dir_status == DW_MCI_RECV_STATUS) {
2674 if (host->sg != NULL)
2675 dw_mci_read_data_pio(host, true);
2677 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
2678 tasklet_schedule(&host->tasklet);
2680 spin_unlock_irqrestore(&host->irq_lock, irqflags);
2683 if (pending & SDMMC_INT_RXDR) {
2684 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2685 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
2686 dw_mci_read_data_pio(host, false);
2689 if (pending & SDMMC_INT_TXDR) {
2690 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2691 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
2692 dw_mci_write_data_pio(host);
2695 if (pending & SDMMC_INT_CMD_DONE) {
2696 spin_lock_irqsave(&host->irq_lock, irqflags);
2698 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
2699 dw_mci_cmd_interrupt(host, pending);
2701 spin_unlock_irqrestore(&host->irq_lock, irqflags);
2704 if (pending & SDMMC_INT_CD) {
2705 mci_writel(host, RINTSTS, SDMMC_INT_CD);
2706 dw_mci_handle_cd(host);
2709 if (pending & SDMMC_INT_SDIO(slot->sdio_id)) {
2710 mci_writel(host, RINTSTS,
2711 SDMMC_INT_SDIO(slot->sdio_id));
2712 __dw_mci_enable_sdio_irq(slot, 0);
2713 sdio_signal_irq(slot->mmc);
2718 if (host->use_dma != TRANS_MODE_IDMAC)
2719 return IRQ_HANDLED;
2721 /* Handle IDMA interrupts */
2722 if (host->dma_64bit_address == 1) {
2723 pending = mci_readl(host, IDSTS64);
2724 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2725 mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_TI |
2726 SDMMC_IDMAC_INT_RI);
2727 mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_NI);
2728 if (!test_bit(EVENT_DATA_ERROR, &host->pending_events))
2729 host->dma_ops->complete((void *)host);
2731 } else {
2732 pending = mci_readl(host, IDSTS);
2733 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2734 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI |
2735 SDMMC_IDMAC_INT_RI);
2736 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
2737 if (!test_bit(EVENT_DATA_ERROR, &host->pending_events))
2738 host->dma_ops->complete((void *)host);
2742 return IRQ_HANDLED;
2745 static int dw_mci_init_slot_caps(struct dw_mci_slot *slot)
2747 struct dw_mci *host = slot->host;
2748 const struct dw_mci_drv_data *drv_data = host->drv_data;
2749 struct mmc_host *mmc = slot->mmc;
2750 int ctrl_id;
2752 if (host->pdata->caps)
2753 mmc->caps = host->pdata->caps;
2756 * Support MMC_CAP_ERASE by default.
2757 * It needs to use trim/discard/erase commands.
2759 mmc->caps |= MMC_CAP_ERASE;
2761 if (host->pdata->pm_caps)
2762 mmc->pm_caps = host->pdata->pm_caps;
2764 if (host->dev->of_node) {
2765 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
2766 if (ctrl_id < 0)
2767 ctrl_id = 0;
2768 } else {
2769 ctrl_id = to_platform_device(host->dev)->id;
2772 if (drv_data && drv_data->caps) {
2773 if (ctrl_id >= drv_data->num_caps) {
2774 dev_err(host->dev, "invalid controller id %d\n",
2775 ctrl_id);
2776 return -EINVAL;
2778 mmc->caps |= drv_data->caps[ctrl_id];
2781 if (host->pdata->caps2)
2782 mmc->caps2 = host->pdata->caps2;
2784 mmc->f_min = DW_MCI_FREQ_MIN;
2785 if (!mmc->f_max)
2786 mmc->f_max = DW_MCI_FREQ_MAX;
2788 /* Process SDIO IRQs through the sdio_irq_work. */
2789 if (mmc->caps & MMC_CAP_SDIO_IRQ)
2790 mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
2792 return 0;
2795 static int dw_mci_init_slot(struct dw_mci *host)
2797 struct mmc_host *mmc;
2798 struct dw_mci_slot *slot;
2799 int ret;
2801 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
2802 if (!mmc)
2803 return -ENOMEM;
2805 slot = mmc_priv(mmc);
2806 slot->id = 0;
2807 slot->sdio_id = host->sdio_id0 + slot->id;
2808 slot->mmc = mmc;
2809 slot->host = host;
2810 host->slot = slot;
2812 mmc->ops = &dw_mci_ops;
2814 /*if there are external regulators, get them*/
2815 ret = mmc_regulator_get_supply(mmc);
2816 if (ret)
2817 goto err_host_allocated;
2819 if (!mmc->ocr_avail)
2820 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
2822 ret = mmc_of_parse(mmc);
2823 if (ret)
2824 goto err_host_allocated;
2826 ret = dw_mci_init_slot_caps(slot);
2827 if (ret)
2828 goto err_host_allocated;
2830 /* Useful defaults if platform data is unset. */
2831 if (host->use_dma == TRANS_MODE_IDMAC) {
2832 mmc->max_segs = host->ring_size;
2833 mmc->max_blk_size = 65535;
2834 mmc->max_seg_size = 0x1000;
2835 mmc->max_req_size = mmc->max_seg_size * host->ring_size;
2836 mmc->max_blk_count = mmc->max_req_size / 512;
2837 } else if (host->use_dma == TRANS_MODE_EDMAC) {
2838 mmc->max_segs = 64;
2839 mmc->max_blk_size = 65535;
2840 mmc->max_blk_count = 65535;
2841 mmc->max_req_size =
2842 mmc->max_blk_size * mmc->max_blk_count;
2843 mmc->max_seg_size = mmc->max_req_size;
2844 } else {
2845 /* TRANS_MODE_PIO */
2846 mmc->max_segs = 64;
2847 mmc->max_blk_size = 65535; /* BLKSIZ is 16 bits */
2848 mmc->max_blk_count = 512;
2849 mmc->max_req_size = mmc->max_blk_size *
2850 mmc->max_blk_count;
2851 mmc->max_seg_size = mmc->max_req_size;
2854 dw_mci_get_cd(mmc);
2856 ret = mmc_add_host(mmc);
2857 if (ret)
2858 goto err_host_allocated;
2860 #if defined(CONFIG_DEBUG_FS)
2861 dw_mci_init_debugfs(slot);
2862 #endif
2864 return 0;
2866 err_host_allocated:
2867 mmc_free_host(mmc);
2868 return ret;
2871 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot)
2873 /* Debugfs stuff is cleaned up by mmc core */
2874 mmc_remove_host(slot->mmc);
2875 slot->host->slot = NULL;
2876 mmc_free_host(slot->mmc);
2879 static void dw_mci_init_dma(struct dw_mci *host)
2881 int addr_config;
2882 struct device *dev = host->dev;
2885 * Check tansfer mode from HCON[17:16]
2886 * Clear the ambiguous description of dw_mmc databook:
2887 * 2b'00: No DMA Interface -> Actually means using Internal DMA block
2888 * 2b'01: DesignWare DMA Interface -> Synopsys DW-DMA block
2889 * 2b'10: Generic DMA Interface -> non-Synopsys generic DMA block
2890 * 2b'11: Non DW DMA Interface -> pio only
2891 * Compared to DesignWare DMA Interface, Generic DMA Interface has a
2892 * simpler request/acknowledge handshake mechanism and both of them
2893 * are regarded as external dma master for dw_mmc.
2895 host->use_dma = SDMMC_GET_TRANS_MODE(mci_readl(host, HCON));
2896 if (host->use_dma == DMA_INTERFACE_IDMA) {
2897 host->use_dma = TRANS_MODE_IDMAC;
2898 } else if (host->use_dma == DMA_INTERFACE_DWDMA ||
2899 host->use_dma == DMA_INTERFACE_GDMA) {
2900 host->use_dma = TRANS_MODE_EDMAC;
2901 } else {
2902 goto no_dma;
2905 /* Determine which DMA interface to use */
2906 if (host->use_dma == TRANS_MODE_IDMAC) {
2908 * Check ADDR_CONFIG bit in HCON to find
2909 * IDMAC address bus width
2911 addr_config = SDMMC_GET_ADDR_CONFIG(mci_readl(host, HCON));
2913 if (addr_config == 1) {
2914 /* host supports IDMAC in 64-bit address mode */
2915 host->dma_64bit_address = 1;
2916 dev_info(host->dev,
2917 "IDMAC supports 64-bit address mode.\n");
2918 if (!dma_set_mask(host->dev, DMA_BIT_MASK(64)))
2919 dma_set_coherent_mask(host->dev,
2920 DMA_BIT_MASK(64));
2921 } else {
2922 /* host supports IDMAC in 32-bit address mode */
2923 host->dma_64bit_address = 0;
2924 dev_info(host->dev,
2925 "IDMAC supports 32-bit address mode.\n");
2928 /* Alloc memory for sg translation */
2929 host->sg_cpu = dmam_alloc_coherent(host->dev,
2930 DESC_RING_BUF_SZ,
2931 &host->sg_dma, GFP_KERNEL);
2932 if (!host->sg_cpu) {
2933 dev_err(host->dev,
2934 "%s: could not alloc DMA memory\n",
2935 __func__);
2936 goto no_dma;
2939 host->dma_ops = &dw_mci_idmac_ops;
2940 dev_info(host->dev, "Using internal DMA controller.\n");
2941 } else {
2942 /* TRANS_MODE_EDMAC: check dma bindings again */
2943 if ((device_property_read_string_array(dev, "dma-names",
2944 NULL, 0) < 0) ||
2945 !device_property_present(dev, "dmas")) {
2946 goto no_dma;
2948 host->dma_ops = &dw_mci_edmac_ops;
2949 dev_info(host->dev, "Using external DMA controller.\n");
2952 if (host->dma_ops->init && host->dma_ops->start &&
2953 host->dma_ops->stop && host->dma_ops->cleanup) {
2954 if (host->dma_ops->init(host)) {
2955 dev_err(host->dev, "%s: Unable to initialize DMA Controller.\n",
2956 __func__);
2957 goto no_dma;
2959 } else {
2960 dev_err(host->dev, "DMA initialization not found.\n");
2961 goto no_dma;
2964 return;
2966 no_dma:
2967 dev_info(host->dev, "Using PIO mode.\n");
2968 host->use_dma = TRANS_MODE_PIO;
2971 static void dw_mci_cmd11_timer(struct timer_list *t)
2973 struct dw_mci *host = from_timer(host, t, cmd11_timer);
2975 if (host->state != STATE_SENDING_CMD11) {
2976 dev_warn(host->dev, "Unexpected CMD11 timeout\n");
2977 return;
2980 host->cmd_status = SDMMC_INT_RTO;
2981 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2982 tasklet_schedule(&host->tasklet);
2985 static void dw_mci_cto_timer(struct timer_list *t)
2987 struct dw_mci *host = from_timer(host, t, cto_timer);
2988 unsigned long irqflags;
2989 u32 pending;
2991 spin_lock_irqsave(&host->irq_lock, irqflags);
2994 * If somehow we have very bad interrupt latency it's remotely possible
2995 * that the timer could fire while the interrupt is still pending or
2996 * while the interrupt is midway through running. Let's be paranoid
2997 * and detect those two cases. Note that this is paranoia is somewhat
2998 * justified because in this function we don't actually cancel the
2999 * pending command in the controller--we just assume it will never come.
3001 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
3002 if (pending & (DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_CMD_DONE)) {
3003 /* The interrupt should fire; no need to act but we can warn */
3004 dev_warn(host->dev, "Unexpected interrupt latency\n");
3005 goto exit;
3007 if (test_bit(EVENT_CMD_COMPLETE, &host->pending_events)) {
3008 /* Presumably interrupt handler couldn't delete the timer */
3009 dev_warn(host->dev, "CTO timeout when already completed\n");
3010 goto exit;
3014 * Continued paranoia to make sure we're in the state we expect.
3015 * This paranoia isn't really justified but it seems good to be safe.
3017 switch (host->state) {
3018 case STATE_SENDING_CMD11:
3019 case STATE_SENDING_CMD:
3020 case STATE_SENDING_STOP:
3022 * If CMD_DONE interrupt does NOT come in sending command
3023 * state, we should notify the driver to terminate current
3024 * transfer and report a command timeout to the core.
3026 host->cmd_status = SDMMC_INT_RTO;
3027 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
3028 tasklet_schedule(&host->tasklet);
3029 break;
3030 default:
3031 dev_warn(host->dev, "Unexpected command timeout, state %d\n",
3032 host->state);
3033 break;
3036 exit:
3037 spin_unlock_irqrestore(&host->irq_lock, irqflags);
3040 static void dw_mci_dto_timer(struct timer_list *t)
3042 struct dw_mci *host = from_timer(host, t, dto_timer);
3043 unsigned long irqflags;
3044 u32 pending;
3046 spin_lock_irqsave(&host->irq_lock, irqflags);
3049 * The DTO timer is much longer than the CTO timer, so it's even less
3050 * likely that we'll these cases, but it pays to be paranoid.
3052 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
3053 if (pending & SDMMC_INT_DATA_OVER) {
3054 /* The interrupt should fire; no need to act but we can warn */
3055 dev_warn(host->dev, "Unexpected data interrupt latency\n");
3056 goto exit;
3058 if (test_bit(EVENT_DATA_COMPLETE, &host->pending_events)) {
3059 /* Presumably interrupt handler couldn't delete the timer */
3060 dev_warn(host->dev, "DTO timeout when already completed\n");
3061 goto exit;
3065 * Continued paranoia to make sure we're in the state we expect.
3066 * This paranoia isn't really justified but it seems good to be safe.
3068 switch (host->state) {
3069 case STATE_SENDING_DATA:
3070 case STATE_DATA_BUSY:
3072 * If DTO interrupt does NOT come in sending data state,
3073 * we should notify the driver to terminate current transfer
3074 * and report a data timeout to the core.
3076 host->data_status = SDMMC_INT_DRTO;
3077 set_bit(EVENT_DATA_ERROR, &host->pending_events);
3078 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
3079 tasklet_schedule(&host->tasklet);
3080 break;
3081 default:
3082 dev_warn(host->dev, "Unexpected data timeout, state %d\n",
3083 host->state);
3084 break;
3087 exit:
3088 spin_unlock_irqrestore(&host->irq_lock, irqflags);
3091 #ifdef CONFIG_OF
3092 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3094 struct dw_mci_board *pdata;
3095 struct device *dev = host->dev;
3096 const struct dw_mci_drv_data *drv_data = host->drv_data;
3097 int ret;
3098 u32 clock_frequency;
3100 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
3101 if (!pdata)
3102 return ERR_PTR(-ENOMEM);
3104 /* find reset controller when exist */
3105 pdata->rstc = devm_reset_control_get_optional_exclusive(dev, "reset");
3106 if (IS_ERR(pdata->rstc)) {
3107 if (PTR_ERR(pdata->rstc) == -EPROBE_DEFER)
3108 return ERR_PTR(-EPROBE_DEFER);
3111 if (device_property_read_u32(dev, "fifo-depth", &pdata->fifo_depth))
3112 dev_info(dev,
3113 "fifo-depth property not found, using value of FIFOTH register as default\n");
3115 device_property_read_u32(dev, "card-detect-delay",
3116 &pdata->detect_delay_ms);
3118 device_property_read_u32(dev, "data-addr", &host->data_addr_override);
3120 if (device_property_present(dev, "fifo-watermark-aligned"))
3121 host->wm_aligned = true;
3123 if (!device_property_read_u32(dev, "clock-frequency", &clock_frequency))
3124 pdata->bus_hz = clock_frequency;
3126 if (drv_data && drv_data->parse_dt) {
3127 ret = drv_data->parse_dt(host);
3128 if (ret)
3129 return ERR_PTR(ret);
3132 return pdata;
3135 #else /* CONFIG_OF */
3136 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3138 return ERR_PTR(-EINVAL);
3140 #endif /* CONFIG_OF */
3142 static void dw_mci_enable_cd(struct dw_mci *host)
3144 unsigned long irqflags;
3145 u32 temp;
3148 * No need for CD if all slots have a non-error GPIO
3149 * as well as broken card detection is found.
3151 if (host->slot->mmc->caps & MMC_CAP_NEEDS_POLL)
3152 return;
3154 if (mmc_gpio_get_cd(host->slot->mmc) < 0) {
3155 spin_lock_irqsave(&host->irq_lock, irqflags);
3156 temp = mci_readl(host, INTMASK);
3157 temp |= SDMMC_INT_CD;
3158 mci_writel(host, INTMASK, temp);
3159 spin_unlock_irqrestore(&host->irq_lock, irqflags);
3163 int dw_mci_probe(struct dw_mci *host)
3165 const struct dw_mci_drv_data *drv_data = host->drv_data;
3166 int width, i, ret = 0;
3167 u32 fifo_size;
3169 if (!host->pdata) {
3170 host->pdata = dw_mci_parse_dt(host);
3171 if (PTR_ERR(host->pdata) == -EPROBE_DEFER) {
3172 return -EPROBE_DEFER;
3173 } else if (IS_ERR(host->pdata)) {
3174 dev_err(host->dev, "platform data not available\n");
3175 return -EINVAL;
3179 host->biu_clk = devm_clk_get(host->dev, "biu");
3180 if (IS_ERR(host->biu_clk)) {
3181 dev_dbg(host->dev, "biu clock not available\n");
3182 } else {
3183 ret = clk_prepare_enable(host->biu_clk);
3184 if (ret) {
3185 dev_err(host->dev, "failed to enable biu clock\n");
3186 return ret;
3190 host->ciu_clk = devm_clk_get(host->dev, "ciu");
3191 if (IS_ERR(host->ciu_clk)) {
3192 dev_dbg(host->dev, "ciu clock not available\n");
3193 host->bus_hz = host->pdata->bus_hz;
3194 } else {
3195 ret = clk_prepare_enable(host->ciu_clk);
3196 if (ret) {
3197 dev_err(host->dev, "failed to enable ciu clock\n");
3198 goto err_clk_biu;
3201 if (host->pdata->bus_hz) {
3202 ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz);
3203 if (ret)
3204 dev_warn(host->dev,
3205 "Unable to set bus rate to %uHz\n",
3206 host->pdata->bus_hz);
3208 host->bus_hz = clk_get_rate(host->ciu_clk);
3211 if (!host->bus_hz) {
3212 dev_err(host->dev,
3213 "Platform data must supply bus speed\n");
3214 ret = -ENODEV;
3215 goto err_clk_ciu;
3218 if (!IS_ERR(host->pdata->rstc)) {
3219 reset_control_assert(host->pdata->rstc);
3220 usleep_range(10, 50);
3221 reset_control_deassert(host->pdata->rstc);
3224 if (drv_data && drv_data->init) {
3225 ret = drv_data->init(host);
3226 if (ret) {
3227 dev_err(host->dev,
3228 "implementation specific init failed\n");
3229 goto err_clk_ciu;
3233 timer_setup(&host->cmd11_timer, dw_mci_cmd11_timer, 0);
3234 timer_setup(&host->cto_timer, dw_mci_cto_timer, 0);
3235 timer_setup(&host->dto_timer, dw_mci_dto_timer, 0);
3237 spin_lock_init(&host->lock);
3238 spin_lock_init(&host->irq_lock);
3239 INIT_LIST_HEAD(&host->queue);
3242 * Get the host data width - this assumes that HCON has been set with
3243 * the correct values.
3245 i = SDMMC_GET_HDATA_WIDTH(mci_readl(host, HCON));
3246 if (!i) {
3247 host->push_data = dw_mci_push_data16;
3248 host->pull_data = dw_mci_pull_data16;
3249 width = 16;
3250 host->data_shift = 1;
3251 } else if (i == 2) {
3252 host->push_data = dw_mci_push_data64;
3253 host->pull_data = dw_mci_pull_data64;
3254 width = 64;
3255 host->data_shift = 3;
3256 } else {
3257 /* Check for a reserved value, and warn if it is */
3258 WARN((i != 1),
3259 "HCON reports a reserved host data width!\n"
3260 "Defaulting to 32-bit access.\n");
3261 host->push_data = dw_mci_push_data32;
3262 host->pull_data = dw_mci_pull_data32;
3263 width = 32;
3264 host->data_shift = 2;
3267 /* Reset all blocks */
3268 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) {
3269 ret = -ENODEV;
3270 goto err_clk_ciu;
3273 host->dma_ops = host->pdata->dma_ops;
3274 dw_mci_init_dma(host);
3276 /* Clear the interrupts for the host controller */
3277 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3278 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
3280 /* Put in max timeout */
3281 mci_writel(host, TMOUT, 0xFFFFFFFF);
3284 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
3285 * Tx Mark = fifo_size / 2 DMA Size = 8
3287 if (!host->pdata->fifo_depth) {
3289 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
3290 * have been overwritten by the bootloader, just like we're
3291 * about to do, so if you know the value for your hardware, you
3292 * should put it in the platform data.
3294 fifo_size = mci_readl(host, FIFOTH);
3295 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
3296 } else {
3297 fifo_size = host->pdata->fifo_depth;
3299 host->fifo_depth = fifo_size;
3300 host->fifoth_val =
3301 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
3302 mci_writel(host, FIFOTH, host->fifoth_val);
3304 /* disable clock to CIU */
3305 mci_writel(host, CLKENA, 0);
3306 mci_writel(host, CLKSRC, 0);
3309 * In 2.40a spec, Data offset is changed.
3310 * Need to check the version-id and set data-offset for DATA register.
3312 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
3313 dev_info(host->dev, "Version ID is %04x\n", host->verid);
3315 if (host->data_addr_override)
3316 host->fifo_reg = host->regs + host->data_addr_override;
3317 else if (host->verid < DW_MMC_240A)
3318 host->fifo_reg = host->regs + DATA_OFFSET;
3319 else
3320 host->fifo_reg = host->regs + DATA_240A_OFFSET;
3322 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
3323 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
3324 host->irq_flags, "dw-mci", host);
3325 if (ret)
3326 goto err_dmaunmap;
3329 * Enable interrupts for command done, data over, data empty,
3330 * receive ready and error such as transmit, receive timeout, crc error
3332 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
3333 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
3334 DW_MCI_ERROR_FLAGS);
3335 /* Enable mci interrupt */
3336 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
3338 dev_info(host->dev,
3339 "DW MMC controller at irq %d,%d bit host data width,%u deep fifo\n",
3340 host->irq, width, fifo_size);
3342 /* We need at least one slot to succeed */
3343 ret = dw_mci_init_slot(host);
3344 if (ret) {
3345 dev_dbg(host->dev, "slot %d init failed\n", i);
3346 goto err_dmaunmap;
3349 /* Now that slots are all setup, we can enable card detect */
3350 dw_mci_enable_cd(host);
3352 return 0;
3354 err_dmaunmap:
3355 if (host->use_dma && host->dma_ops->exit)
3356 host->dma_ops->exit(host);
3358 if (!IS_ERR(host->pdata->rstc))
3359 reset_control_assert(host->pdata->rstc);
3361 err_clk_ciu:
3362 clk_disable_unprepare(host->ciu_clk);
3364 err_clk_biu:
3365 clk_disable_unprepare(host->biu_clk);
3367 return ret;
3369 EXPORT_SYMBOL(dw_mci_probe);
3371 void dw_mci_remove(struct dw_mci *host)
3373 dev_dbg(host->dev, "remove slot\n");
3374 if (host->slot)
3375 dw_mci_cleanup_slot(host->slot);
3377 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3378 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
3380 /* disable clock to CIU */
3381 mci_writel(host, CLKENA, 0);
3382 mci_writel(host, CLKSRC, 0);
3384 if (host->use_dma && host->dma_ops->exit)
3385 host->dma_ops->exit(host);
3387 if (!IS_ERR(host->pdata->rstc))
3388 reset_control_assert(host->pdata->rstc);
3390 clk_disable_unprepare(host->ciu_clk);
3391 clk_disable_unprepare(host->biu_clk);
3393 EXPORT_SYMBOL(dw_mci_remove);
3397 #ifdef CONFIG_PM
3398 int dw_mci_runtime_suspend(struct device *dev)
3400 struct dw_mci *host = dev_get_drvdata(dev);
3402 if (host->use_dma && host->dma_ops->exit)
3403 host->dma_ops->exit(host);
3405 clk_disable_unprepare(host->ciu_clk);
3407 if (host->slot &&
3408 (mmc_can_gpio_cd(host->slot->mmc) ||
3409 !mmc_card_is_removable(host->slot->mmc)))
3410 clk_disable_unprepare(host->biu_clk);
3412 return 0;
3414 EXPORT_SYMBOL(dw_mci_runtime_suspend);
3416 int dw_mci_runtime_resume(struct device *dev)
3418 int ret = 0;
3419 struct dw_mci *host = dev_get_drvdata(dev);
3421 if (host->slot &&
3422 (mmc_can_gpio_cd(host->slot->mmc) ||
3423 !mmc_card_is_removable(host->slot->mmc))) {
3424 ret = clk_prepare_enable(host->biu_clk);
3425 if (ret)
3426 return ret;
3429 ret = clk_prepare_enable(host->ciu_clk);
3430 if (ret)
3431 goto err;
3433 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) {
3434 clk_disable_unprepare(host->ciu_clk);
3435 ret = -ENODEV;
3436 goto err;
3439 if (host->use_dma && host->dma_ops->init)
3440 host->dma_ops->init(host);
3443 * Restore the initial value at FIFOTH register
3444 * And Invalidate the prev_blksz with zero
3446 mci_writel(host, FIFOTH, host->fifoth_val);
3447 host->prev_blksz = 0;
3449 /* Put in max timeout */
3450 mci_writel(host, TMOUT, 0xFFFFFFFF);
3452 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3453 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
3454 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
3455 DW_MCI_ERROR_FLAGS);
3456 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
3459 if (host->slot->mmc->pm_flags & MMC_PM_KEEP_POWER)
3460 dw_mci_set_ios(host->slot->mmc, &host->slot->mmc->ios);
3462 /* Force setup bus to guarantee available clock output */
3463 dw_mci_setup_bus(host->slot, true);
3465 /* Re-enable SDIO interrupts. */
3466 if (sdio_irq_claimed(host->slot->mmc))
3467 __dw_mci_enable_sdio_irq(host->slot, 1);
3469 /* Now that slots are all setup, we can enable card detect */
3470 dw_mci_enable_cd(host);
3472 return 0;
3474 err:
3475 if (host->slot &&
3476 (mmc_can_gpio_cd(host->slot->mmc) ||
3477 !mmc_card_is_removable(host->slot->mmc)))
3478 clk_disable_unprepare(host->biu_clk);
3480 return ret;
3482 EXPORT_SYMBOL(dw_mci_runtime_resume);
3483 #endif /* CONFIG_PM */
3485 static int __init dw_mci_init(void)
3487 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
3488 return 0;
3491 static void __exit dw_mci_exit(void)
3495 module_init(dw_mci_init);
3496 module_exit(dw_mci_exit);
3498 MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
3499 MODULE_AUTHOR("NXP Semiconductor VietNam");
3500 MODULE_AUTHOR("Imagination Technologies Ltd");
3501 MODULE_LICENSE("GPL v2");