2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
14 #include <linux/blkdev.h>
15 #include <linux/clk.h>
16 #include <linux/debugfs.h>
17 #include <linux/device.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/err.h>
20 #include <linux/init.h>
21 #include <linux/interrupt.h>
22 #include <linux/ioport.h>
23 #include <linux/module.h>
24 #include <linux/platform_device.h>
25 #include <linux/seq_file.h>
26 #include <linux/slab.h>
27 #include <linux/stat.h>
28 #include <linux/delay.h>
29 #include <linux/irq.h>
30 #include <linux/mmc/host.h>
31 #include <linux/mmc/mmc.h>
32 #include <linux/mmc/sd.h>
33 #include <linux/mmc/sdio.h>
34 #include <linux/mmc/dw_mmc.h>
35 #include <linux/bitops.h>
36 #include <linux/regulator/consumer.h>
38 #include <linux/of_gpio.h>
39 #include <linux/mmc/slot-gpio.h>
43 /* Common flag combinations */
44 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
45 SDMMC_INT_HTO | SDMMC_INT_SBE | \
47 #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
49 #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
50 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
51 #define DW_MCI_SEND_STATUS 1
52 #define DW_MCI_RECV_STATUS 2
53 #define DW_MCI_DMA_THRESHOLD 16
55 #define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */
56 #define DW_MCI_FREQ_MIN 400000 /* unit: HZ */
58 #ifdef CONFIG_MMC_DW_IDMAC
59 #define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
60 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
61 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
64 struct idmac_desc_64addr
{
65 u32 des0
; /* Control Descriptor */
67 u32 des1
; /* Reserved */
69 u32 des2
; /*Buffer sizes */
70 #define IDMAC_64ADDR_SET_BUFFER1_SIZE(d, s) \
71 ((d)->des2 = ((d)->des2 & 0x03ffe000) | ((s) & 0x1fff))
73 u32 des3
; /* Reserved */
75 u32 des4
; /* Lower 32-bits of Buffer Address Pointer 1*/
76 u32 des5
; /* Upper 32-bits of Buffer Address Pointer 1*/
78 u32 des6
; /* Lower 32-bits of Next Descriptor Address */
79 u32 des7
; /* Upper 32-bits of Next Descriptor Address */
83 u32 des0
; /* Control Descriptor */
84 #define IDMAC_DES0_DIC BIT(1)
85 #define IDMAC_DES0_LD BIT(2)
86 #define IDMAC_DES0_FD BIT(3)
87 #define IDMAC_DES0_CH BIT(4)
88 #define IDMAC_DES0_ER BIT(5)
89 #define IDMAC_DES0_CES BIT(30)
90 #define IDMAC_DES0_OWN BIT(31)
92 u32 des1
; /* Buffer sizes */
93 #define IDMAC_SET_BUFFER1_SIZE(d, s) \
94 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
96 u32 des2
; /* buffer 1 physical address */
98 u32 des3
; /* buffer 2 physical address */
100 #endif /* CONFIG_MMC_DW_IDMAC */
102 static bool dw_mci_reset(struct dw_mci
*host
);
103 static bool dw_mci_ctrl_reset(struct dw_mci
*host
, u32 reset
);
105 #if defined(CONFIG_DEBUG_FS)
106 static int dw_mci_req_show(struct seq_file
*s
, void *v
)
108 struct dw_mci_slot
*slot
= s
->private;
109 struct mmc_request
*mrq
;
110 struct mmc_command
*cmd
;
111 struct mmc_command
*stop
;
112 struct mmc_data
*data
;
114 /* Make sure we get a consistent snapshot */
115 spin_lock_bh(&slot
->host
->lock
);
125 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
126 cmd
->opcode
, cmd
->arg
, cmd
->flags
,
127 cmd
->resp
[0], cmd
->resp
[1], cmd
->resp
[2],
128 cmd
->resp
[2], cmd
->error
);
130 seq_printf(s
, "DATA %u / %u * %u flg %x err %d\n",
131 data
->bytes_xfered
, data
->blocks
,
132 data
->blksz
, data
->flags
, data
->error
);
135 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
136 stop
->opcode
, stop
->arg
, stop
->flags
,
137 stop
->resp
[0], stop
->resp
[1], stop
->resp
[2],
138 stop
->resp
[2], stop
->error
);
141 spin_unlock_bh(&slot
->host
->lock
);
146 static int dw_mci_req_open(struct inode
*inode
, struct file
*file
)
148 return single_open(file
, dw_mci_req_show
, inode
->i_private
);
151 static const struct file_operations dw_mci_req_fops
= {
152 .owner
= THIS_MODULE
,
153 .open
= dw_mci_req_open
,
156 .release
= single_release
,
159 static int dw_mci_regs_show(struct seq_file
*s
, void *v
)
161 seq_printf(s
, "STATUS:\t0x%08x\n", SDMMC_STATUS
);
162 seq_printf(s
, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS
);
163 seq_printf(s
, "CMD:\t0x%08x\n", SDMMC_CMD
);
164 seq_printf(s
, "CTRL:\t0x%08x\n", SDMMC_CTRL
);
165 seq_printf(s
, "INTMASK:\t0x%08x\n", SDMMC_INTMASK
);
166 seq_printf(s
, "CLKENA:\t0x%08x\n", SDMMC_CLKENA
);
171 static int dw_mci_regs_open(struct inode
*inode
, struct file
*file
)
173 return single_open(file
, dw_mci_regs_show
, inode
->i_private
);
176 static const struct file_operations dw_mci_regs_fops
= {
177 .owner
= THIS_MODULE
,
178 .open
= dw_mci_regs_open
,
181 .release
= single_release
,
184 static void dw_mci_init_debugfs(struct dw_mci_slot
*slot
)
186 struct mmc_host
*mmc
= slot
->mmc
;
187 struct dw_mci
*host
= slot
->host
;
191 root
= mmc
->debugfs_root
;
195 node
= debugfs_create_file("regs", S_IRUSR
, root
, host
,
200 node
= debugfs_create_file("req", S_IRUSR
, root
, slot
,
205 node
= debugfs_create_u32("state", S_IRUSR
, root
, (u32
*)&host
->state
);
209 node
= debugfs_create_x32("pending_events", S_IRUSR
, root
,
210 (u32
*)&host
->pending_events
);
214 node
= debugfs_create_x32("completed_events", S_IRUSR
, root
,
215 (u32
*)&host
->completed_events
);
222 dev_err(&mmc
->class_dev
, "failed to initialize debugfs for slot\n");
224 #endif /* defined(CONFIG_DEBUG_FS) */
226 static void mci_send_cmd(struct dw_mci_slot
*slot
, u32 cmd
, u32 arg
);
228 static u32
dw_mci_prepare_command(struct mmc_host
*mmc
, struct mmc_command
*cmd
)
230 struct mmc_data
*data
;
231 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
232 struct dw_mci
*host
= slot
->host
;
233 const struct dw_mci_drv_data
*drv_data
= slot
->host
->drv_data
;
235 cmd
->error
= -EINPROGRESS
;
239 if (cmd
->opcode
== MMC_STOP_TRANSMISSION
||
240 cmd
->opcode
== MMC_GO_IDLE_STATE
||
241 cmd
->opcode
== MMC_GO_INACTIVE_STATE
||
242 (cmd
->opcode
== SD_IO_RW_DIRECT
&&
243 ((cmd
->arg
>> 9) & 0x1FFFF) == SDIO_CCCR_ABORT
))
244 cmdr
|= SDMMC_CMD_STOP
;
245 else if (cmd
->opcode
!= MMC_SEND_STATUS
&& cmd
->data
)
246 cmdr
|= SDMMC_CMD_PRV_DAT_WAIT
;
248 if (cmd
->opcode
== SD_SWITCH_VOLTAGE
) {
251 /* Special bit makes CMD11 not die */
252 cmdr
|= SDMMC_CMD_VOLT_SWITCH
;
254 /* Change state to continue to handle CMD11 weirdness */
255 WARN_ON(slot
->host
->state
!= STATE_SENDING_CMD
);
256 slot
->host
->state
= STATE_SENDING_CMD11
;
259 * We need to disable low power mode (automatic clock stop)
260 * while doing voltage switch so we don't confuse the card,
261 * since stopping the clock is a specific part of the UHS
262 * voltage change dance.
264 * Note that low power mode (SDMMC_CLKEN_LOW_PWR) will be
265 * unconditionally turned back on in dw_mci_setup_bus() if it's
266 * ever called with a non-zero clock. That shouldn't happen
267 * until the voltage change is all done.
269 clk_en_a
= mci_readl(host
, CLKENA
);
270 clk_en_a
&= ~(SDMMC_CLKEN_LOW_PWR
<< slot
->id
);
271 mci_writel(host
, CLKENA
, clk_en_a
);
272 mci_send_cmd(slot
, SDMMC_CMD_UPD_CLK
|
273 SDMMC_CMD_PRV_DAT_WAIT
, 0);
276 if (cmd
->flags
& MMC_RSP_PRESENT
) {
277 /* We expect a response, so set this bit */
278 cmdr
|= SDMMC_CMD_RESP_EXP
;
279 if (cmd
->flags
& MMC_RSP_136
)
280 cmdr
|= SDMMC_CMD_RESP_LONG
;
283 if (cmd
->flags
& MMC_RSP_CRC
)
284 cmdr
|= SDMMC_CMD_RESP_CRC
;
288 cmdr
|= SDMMC_CMD_DAT_EXP
;
289 if (data
->flags
& MMC_DATA_STREAM
)
290 cmdr
|= SDMMC_CMD_STRM_MODE
;
291 if (data
->flags
& MMC_DATA_WRITE
)
292 cmdr
|= SDMMC_CMD_DAT_WR
;
295 if (drv_data
&& drv_data
->prepare_command
)
296 drv_data
->prepare_command(slot
->host
, &cmdr
);
301 static u32
dw_mci_prep_stop_abort(struct dw_mci
*host
, struct mmc_command
*cmd
)
303 struct mmc_command
*stop
;
309 stop
= &host
->stop_abort
;
311 memset(stop
, 0, sizeof(struct mmc_command
));
313 if (cmdr
== MMC_READ_SINGLE_BLOCK
||
314 cmdr
== MMC_READ_MULTIPLE_BLOCK
||
315 cmdr
== MMC_WRITE_BLOCK
||
316 cmdr
== MMC_WRITE_MULTIPLE_BLOCK
) {
317 stop
->opcode
= MMC_STOP_TRANSMISSION
;
319 stop
->flags
= MMC_RSP_R1B
| MMC_CMD_AC
;
320 } else if (cmdr
== SD_IO_RW_EXTENDED
) {
321 stop
->opcode
= SD_IO_RW_DIRECT
;
322 stop
->arg
|= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT
<< 9) |
323 ((cmd
->arg
>> 28) & 0x7);
324 stop
->flags
= MMC_RSP_SPI_R5
| MMC_RSP_R5
| MMC_CMD_AC
;
329 cmdr
= stop
->opcode
| SDMMC_CMD_STOP
|
330 SDMMC_CMD_RESP_CRC
| SDMMC_CMD_RESP_EXP
;
335 static void dw_mci_start_command(struct dw_mci
*host
,
336 struct mmc_command
*cmd
, u32 cmd_flags
)
340 "start command: ARGR=0x%08x CMDR=0x%08x\n",
341 cmd
->arg
, cmd_flags
);
343 mci_writel(host
, CMDARG
, cmd
->arg
);
346 mci_writel(host
, CMD
, cmd_flags
| SDMMC_CMD_START
);
349 static inline void send_stop_abort(struct dw_mci
*host
, struct mmc_data
*data
)
351 struct mmc_command
*stop
= data
->stop
? data
->stop
: &host
->stop_abort
;
352 dw_mci_start_command(host
, stop
, host
->stop_cmdr
);
355 /* DMA interface functions */
356 static void dw_mci_stop_dma(struct dw_mci
*host
)
358 if (host
->using_dma
) {
359 host
->dma_ops
->stop(host
);
360 host
->dma_ops
->cleanup(host
);
363 /* Data transfer was stopped by the interrupt handler */
364 set_bit(EVENT_XFER_COMPLETE
, &host
->pending_events
);
367 static int dw_mci_get_dma_dir(struct mmc_data
*data
)
369 if (data
->flags
& MMC_DATA_WRITE
)
370 return DMA_TO_DEVICE
;
372 return DMA_FROM_DEVICE
;
375 #ifdef CONFIG_MMC_DW_IDMAC
376 static void dw_mci_dma_cleanup(struct dw_mci
*host
)
378 struct mmc_data
*data
= host
->data
;
381 if (!data
->host_cookie
)
382 dma_unmap_sg(host
->dev
,
385 dw_mci_get_dma_dir(data
));
388 static void dw_mci_idmac_reset(struct dw_mci
*host
)
390 u32 bmod
= mci_readl(host
, BMOD
);
391 /* Software reset of DMA */
392 bmod
|= SDMMC_IDMAC_SWRESET
;
393 mci_writel(host
, BMOD
, bmod
);
396 static void dw_mci_idmac_stop_dma(struct dw_mci
*host
)
400 /* Disable and reset the IDMAC interface */
401 temp
= mci_readl(host
, CTRL
);
402 temp
&= ~SDMMC_CTRL_USE_IDMAC
;
403 temp
|= SDMMC_CTRL_DMA_RESET
;
404 mci_writel(host
, CTRL
, temp
);
406 /* Stop the IDMAC running */
407 temp
= mci_readl(host
, BMOD
);
408 temp
&= ~(SDMMC_IDMAC_ENABLE
| SDMMC_IDMAC_FB
);
409 temp
|= SDMMC_IDMAC_SWRESET
;
410 mci_writel(host
, BMOD
, temp
);
413 static void dw_mci_idmac_complete_dma(struct dw_mci
*host
)
415 struct mmc_data
*data
= host
->data
;
417 dev_vdbg(host
->dev
, "DMA complete\n");
419 host
->dma_ops
->cleanup(host
);
422 * If the card was removed, data will be NULL. No point in trying to
423 * send the stop command or waiting for NBUSY in this case.
426 set_bit(EVENT_XFER_COMPLETE
, &host
->pending_events
);
427 tasklet_schedule(&host
->tasklet
);
431 static void dw_mci_translate_sglist(struct dw_mci
*host
, struct mmc_data
*data
,
435 if (host
->dma_64bit_address
== 1) {
436 struct idmac_desc_64addr
*desc
= host
->sg_cpu
;
438 for (i
= 0; i
< sg_len
; i
++, desc
++) {
439 unsigned int length
= sg_dma_len(&data
->sg
[i
]);
440 u64 mem_addr
= sg_dma_address(&data
->sg
[i
]);
443 * Set the OWN bit and disable interrupts for this
446 desc
->des0
= IDMAC_DES0_OWN
| IDMAC_DES0_DIC
|
449 IDMAC_64ADDR_SET_BUFFER1_SIZE(desc
, length
);
451 /* Physical address to DMA to/from */
452 desc
->des4
= mem_addr
& 0xffffffff;
453 desc
->des5
= mem_addr
>> 32;
456 /* Set first descriptor */
458 desc
->des0
|= IDMAC_DES0_FD
;
460 /* Set last descriptor */
461 desc
= host
->sg_cpu
+ (i
- 1) *
462 sizeof(struct idmac_desc_64addr
);
463 desc
->des0
&= ~(IDMAC_DES0_CH
| IDMAC_DES0_DIC
);
464 desc
->des0
|= IDMAC_DES0_LD
;
467 struct idmac_desc
*desc
= host
->sg_cpu
;
469 for (i
= 0; i
< sg_len
; i
++, desc
++) {
470 unsigned int length
= sg_dma_len(&data
->sg
[i
]);
471 u32 mem_addr
= sg_dma_address(&data
->sg
[i
]);
474 * Set the OWN bit and disable interrupts for this
477 desc
->des0
= IDMAC_DES0_OWN
| IDMAC_DES0_DIC
|
480 IDMAC_SET_BUFFER1_SIZE(desc
, length
);
482 /* Physical address to DMA to/from */
483 desc
->des2
= mem_addr
;
486 /* Set first descriptor */
488 desc
->des0
|= IDMAC_DES0_FD
;
490 /* Set last descriptor */
491 desc
= host
->sg_cpu
+ (i
- 1) * sizeof(struct idmac_desc
);
492 desc
->des0
&= ~(IDMAC_DES0_CH
| IDMAC_DES0_DIC
);
493 desc
->des0
|= IDMAC_DES0_LD
;
499 static void dw_mci_idmac_start_dma(struct dw_mci
*host
, unsigned int sg_len
)
503 dw_mci_translate_sglist(host
, host
->data
, sg_len
);
505 /* Make sure to reset DMA in case we did PIO before this */
506 dw_mci_ctrl_reset(host
, SDMMC_CTRL_DMA_RESET
);
507 dw_mci_idmac_reset(host
);
509 /* Select IDMAC interface */
510 temp
= mci_readl(host
, CTRL
);
511 temp
|= SDMMC_CTRL_USE_IDMAC
;
512 mci_writel(host
, CTRL
, temp
);
516 /* Enable the IDMAC */
517 temp
= mci_readl(host
, BMOD
);
518 temp
|= SDMMC_IDMAC_ENABLE
| SDMMC_IDMAC_FB
;
519 mci_writel(host
, BMOD
, temp
);
521 /* Start it running */
522 mci_writel(host
, PLDMND
, 1);
525 static int dw_mci_idmac_init(struct dw_mci
*host
)
529 if (host
->dma_64bit_address
== 1) {
530 struct idmac_desc_64addr
*p
;
531 /* Number of descriptors in the ring buffer */
532 host
->ring_size
= PAGE_SIZE
/ sizeof(struct idmac_desc_64addr
);
534 /* Forward link the descriptor list */
535 for (i
= 0, p
= host
->sg_cpu
; i
< host
->ring_size
- 1;
537 p
->des6
= (host
->sg_dma
+
538 (sizeof(struct idmac_desc_64addr
) *
539 (i
+ 1))) & 0xffffffff;
541 p
->des7
= (u64
)(host
->sg_dma
+
542 (sizeof(struct idmac_desc_64addr
) *
544 /* Initialize reserved and buffer size fields to "0" */
550 /* Set the last descriptor as the end-of-ring descriptor */
551 p
->des6
= host
->sg_dma
& 0xffffffff;
552 p
->des7
= (u64
)host
->sg_dma
>> 32;
553 p
->des0
= IDMAC_DES0_ER
;
556 struct idmac_desc
*p
;
557 /* Number of descriptors in the ring buffer */
558 host
->ring_size
= PAGE_SIZE
/ sizeof(struct idmac_desc
);
560 /* Forward link the descriptor list */
561 for (i
= 0, p
= host
->sg_cpu
; i
< host
->ring_size
- 1; i
++, p
++)
562 p
->des3
= host
->sg_dma
+ (sizeof(struct idmac_desc
) *
565 /* Set the last descriptor as the end-of-ring descriptor */
566 p
->des3
= host
->sg_dma
;
567 p
->des0
= IDMAC_DES0_ER
;
570 dw_mci_idmac_reset(host
);
572 if (host
->dma_64bit_address
== 1) {
573 /* Mask out interrupts - get Tx & Rx complete only */
574 mci_writel(host
, IDSTS64
, IDMAC_INT_CLR
);
575 mci_writel(host
, IDINTEN64
, SDMMC_IDMAC_INT_NI
|
576 SDMMC_IDMAC_INT_RI
| SDMMC_IDMAC_INT_TI
);
578 /* Set the descriptor base address */
579 mci_writel(host
, DBADDRL
, host
->sg_dma
& 0xffffffff);
580 mci_writel(host
, DBADDRU
, (u64
)host
->sg_dma
>> 32);
583 /* Mask out interrupts - get Tx & Rx complete only */
584 mci_writel(host
, IDSTS
, IDMAC_INT_CLR
);
585 mci_writel(host
, IDINTEN
, SDMMC_IDMAC_INT_NI
|
586 SDMMC_IDMAC_INT_RI
| SDMMC_IDMAC_INT_TI
);
588 /* Set the descriptor base address */
589 mci_writel(host
, DBADDR
, host
->sg_dma
);
595 static const struct dw_mci_dma_ops dw_mci_idmac_ops
= {
596 .init
= dw_mci_idmac_init
,
597 .start
= dw_mci_idmac_start_dma
,
598 .stop
= dw_mci_idmac_stop_dma
,
599 .complete
= dw_mci_idmac_complete_dma
,
600 .cleanup
= dw_mci_dma_cleanup
,
602 #endif /* CONFIG_MMC_DW_IDMAC */
604 static int dw_mci_pre_dma_transfer(struct dw_mci
*host
,
605 struct mmc_data
*data
,
608 struct scatterlist
*sg
;
609 unsigned int i
, sg_len
;
611 if (!next
&& data
->host_cookie
)
612 return data
->host_cookie
;
615 * We don't do DMA on "complex" transfers, i.e. with
616 * non-word-aligned buffers or lengths. Also, we don't bother
617 * with all the DMA setup overhead for short transfers.
619 if (data
->blocks
* data
->blksz
< DW_MCI_DMA_THRESHOLD
)
625 for_each_sg(data
->sg
, sg
, data
->sg_len
, i
) {
626 if (sg
->offset
& 3 || sg
->length
& 3)
630 sg_len
= dma_map_sg(host
->dev
,
633 dw_mci_get_dma_dir(data
));
638 data
->host_cookie
= sg_len
;
643 static void dw_mci_pre_req(struct mmc_host
*mmc
,
644 struct mmc_request
*mrq
,
647 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
648 struct mmc_data
*data
= mrq
->data
;
650 if (!slot
->host
->use_dma
|| !data
)
653 if (data
->host_cookie
) {
654 data
->host_cookie
= 0;
658 if (dw_mci_pre_dma_transfer(slot
->host
, mrq
->data
, 1) < 0)
659 data
->host_cookie
= 0;
662 static void dw_mci_post_req(struct mmc_host
*mmc
,
663 struct mmc_request
*mrq
,
666 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
667 struct mmc_data
*data
= mrq
->data
;
669 if (!slot
->host
->use_dma
|| !data
)
672 if (data
->host_cookie
)
673 dma_unmap_sg(slot
->host
->dev
,
676 dw_mci_get_dma_dir(data
));
677 data
->host_cookie
= 0;
680 static void dw_mci_adjust_fifoth(struct dw_mci
*host
, struct mmc_data
*data
)
682 #ifdef CONFIG_MMC_DW_IDMAC
683 unsigned int blksz
= data
->blksz
;
684 const u32 mszs
[] = {1, 4, 8, 16, 32, 64, 128, 256};
685 u32 fifo_width
= 1 << host
->data_shift
;
686 u32 blksz_depth
= blksz
/ fifo_width
, fifoth_val
;
687 u32 msize
= 0, rx_wmark
= 1, tx_wmark
, tx_wmark_invers
;
688 int idx
= (sizeof(mszs
) / sizeof(mszs
[0])) - 1;
690 tx_wmark
= (host
->fifo_depth
) / 2;
691 tx_wmark_invers
= host
->fifo_depth
- tx_wmark
;
695 * if blksz is not a multiple of the FIFO width
697 if (blksz
% fifo_width
) {
704 if (!((blksz_depth
% mszs
[idx
]) ||
705 (tx_wmark_invers
% mszs
[idx
]))) {
707 rx_wmark
= mszs
[idx
] - 1;
712 * If idx is '0', it won't be tried
713 * Thus, initial values are uesed
716 fifoth_val
= SDMMC_SET_FIFOTH(msize
, rx_wmark
, tx_wmark
);
717 mci_writel(host
, FIFOTH
, fifoth_val
);
721 static void dw_mci_ctrl_rd_thld(struct dw_mci
*host
, struct mmc_data
*data
)
723 unsigned int blksz
= data
->blksz
;
724 u32 blksz_depth
, fifo_depth
;
727 WARN_ON(!(data
->flags
& MMC_DATA_READ
));
730 * CDTHRCTL doesn't exist prior to 240A (in fact that register offset is
731 * in the FIFO region, so we really shouldn't access it).
733 if (host
->verid
< DW_MMC_240A
)
736 if (host
->timing
!= MMC_TIMING_MMC_HS200
&&
737 host
->timing
!= MMC_TIMING_UHS_SDR104
)
740 blksz_depth
= blksz
/ (1 << host
->data_shift
);
741 fifo_depth
= host
->fifo_depth
;
743 if (blksz_depth
> fifo_depth
)
747 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
748 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
749 * Currently just choose blksz.
752 mci_writel(host
, CDTHRCTL
, SDMMC_SET_RD_THLD(thld_size
, 1));
756 mci_writel(host
, CDTHRCTL
, SDMMC_SET_RD_THLD(0, 0));
759 static int dw_mci_submit_data_dma(struct dw_mci
*host
, struct mmc_data
*data
)
766 /* If we don't have a channel, we can't do DMA */
770 sg_len
= dw_mci_pre_dma_transfer(host
, data
, 0);
772 host
->dma_ops
->stop(host
);
779 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
780 (unsigned long)host
->sg_cpu
, (unsigned long)host
->sg_dma
,
784 * Decide the MSIZE and RX/TX Watermark.
785 * If current block size is same with previous size,
786 * no need to update fifoth.
788 if (host
->prev_blksz
!= data
->blksz
)
789 dw_mci_adjust_fifoth(host
, data
);
791 /* Enable the DMA interface */
792 temp
= mci_readl(host
, CTRL
);
793 temp
|= SDMMC_CTRL_DMA_ENABLE
;
794 mci_writel(host
, CTRL
, temp
);
796 /* Disable RX/TX IRQs, let DMA handle it */
797 temp
= mci_readl(host
, INTMASK
);
798 temp
&= ~(SDMMC_INT_RXDR
| SDMMC_INT_TXDR
);
799 mci_writel(host
, INTMASK
, temp
);
801 host
->dma_ops
->start(host
, sg_len
);
806 static void dw_mci_submit_data(struct dw_mci
*host
, struct mmc_data
*data
)
810 data
->error
= -EINPROGRESS
;
816 if (data
->flags
& MMC_DATA_READ
) {
817 host
->dir_status
= DW_MCI_RECV_STATUS
;
818 dw_mci_ctrl_rd_thld(host
, data
);
820 host
->dir_status
= DW_MCI_SEND_STATUS
;
823 if (dw_mci_submit_data_dma(host
, data
)) {
824 int flags
= SG_MITER_ATOMIC
;
825 if (host
->data
->flags
& MMC_DATA_READ
)
826 flags
|= SG_MITER_TO_SG
;
828 flags
|= SG_MITER_FROM_SG
;
830 sg_miter_start(&host
->sg_miter
, data
->sg
, data
->sg_len
, flags
);
832 host
->part_buf_start
= 0;
833 host
->part_buf_count
= 0;
835 mci_writel(host
, RINTSTS
, SDMMC_INT_TXDR
| SDMMC_INT_RXDR
);
836 temp
= mci_readl(host
, INTMASK
);
837 temp
|= SDMMC_INT_TXDR
| SDMMC_INT_RXDR
;
838 mci_writel(host
, INTMASK
, temp
);
840 temp
= mci_readl(host
, CTRL
);
841 temp
&= ~SDMMC_CTRL_DMA_ENABLE
;
842 mci_writel(host
, CTRL
, temp
);
845 * Use the initial fifoth_val for PIO mode.
846 * If next issued data may be transfered by DMA mode,
847 * prev_blksz should be invalidated.
849 mci_writel(host
, FIFOTH
, host
->fifoth_val
);
850 host
->prev_blksz
= 0;
853 * Keep the current block size.
854 * It will be used to decide whether to update
855 * fifoth register next time.
857 host
->prev_blksz
= data
->blksz
;
861 static void mci_send_cmd(struct dw_mci_slot
*slot
, u32 cmd
, u32 arg
)
863 struct dw_mci
*host
= slot
->host
;
864 unsigned long timeout
= jiffies
+ msecs_to_jiffies(500);
865 unsigned int cmd_status
= 0;
867 mci_writel(host
, CMDARG
, arg
);
869 mci_writel(host
, CMD
, SDMMC_CMD_START
| cmd
);
871 while (time_before(jiffies
, timeout
)) {
872 cmd_status
= mci_readl(host
, CMD
);
873 if (!(cmd_status
& SDMMC_CMD_START
))
876 dev_err(&slot
->mmc
->class_dev
,
877 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
878 cmd
, arg
, cmd_status
);
881 static void dw_mci_setup_bus(struct dw_mci_slot
*slot
, bool force_clkinit
)
883 struct dw_mci
*host
= slot
->host
;
884 unsigned int clock
= slot
->clock
;
887 u32 sdmmc_cmd_bits
= SDMMC_CMD_UPD_CLK
| SDMMC_CMD_PRV_DAT_WAIT
;
889 /* We must continue to set bit 28 in CMD until the change is complete */
890 if (host
->state
== STATE_WAITING_CMD11_DONE
)
891 sdmmc_cmd_bits
|= SDMMC_CMD_VOLT_SWITCH
;
894 mci_writel(host
, CLKENA
, 0);
895 mci_send_cmd(slot
, sdmmc_cmd_bits
, 0);
896 } else if (clock
!= host
->current_speed
|| force_clkinit
) {
897 div
= host
->bus_hz
/ clock
;
898 if (host
->bus_hz
% clock
&& host
->bus_hz
> clock
)
900 * move the + 1 after the divide to prevent
901 * over-clocking the card.
905 div
= (host
->bus_hz
!= clock
) ? DIV_ROUND_UP(div
, 2) : 0;
907 if ((clock
<< div
) != slot
->__clk_old
|| force_clkinit
)
908 dev_info(&slot
->mmc
->class_dev
,
909 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
910 slot
->id
, host
->bus_hz
, clock
,
911 div
? ((host
->bus_hz
/ div
) >> 1) :
915 mci_writel(host
, CLKENA
, 0);
916 mci_writel(host
, CLKSRC
, 0);
919 mci_send_cmd(slot
, sdmmc_cmd_bits
, 0);
921 /* set clock to desired speed */
922 mci_writel(host
, CLKDIV
, div
);
925 mci_send_cmd(slot
, sdmmc_cmd_bits
, 0);
927 /* enable clock; only low power if no SDIO */
928 clk_en_a
= SDMMC_CLKEN_ENABLE
<< slot
->id
;
929 if (!(mci_readl(host
, INTMASK
) & SDMMC_INT_SDIO(slot
->sdio_id
)))
930 clk_en_a
|= SDMMC_CLKEN_LOW_PWR
<< slot
->id
;
931 mci_writel(host
, CLKENA
, clk_en_a
);
934 mci_send_cmd(slot
, sdmmc_cmd_bits
, 0);
936 /* keep the clock with reflecting clock dividor */
937 slot
->__clk_old
= clock
<< div
;
940 host
->current_speed
= clock
;
942 /* Set the current slot bus width */
943 mci_writel(host
, CTYPE
, (slot
->ctype
<< slot
->id
));
946 static void __dw_mci_start_request(struct dw_mci
*host
,
947 struct dw_mci_slot
*slot
,
948 struct mmc_command
*cmd
)
950 struct mmc_request
*mrq
;
951 struct mmc_data
*data
;
956 host
->cur_slot
= slot
;
959 host
->pending_events
= 0;
960 host
->completed_events
= 0;
961 host
->cmd_status
= 0;
962 host
->data_status
= 0;
963 host
->dir_status
= 0;
967 mci_writel(host
, TMOUT
, 0xFFFFFFFF);
968 mci_writel(host
, BYTCNT
, data
->blksz
*data
->blocks
);
969 mci_writel(host
, BLKSIZ
, data
->blksz
);
972 cmdflags
= dw_mci_prepare_command(slot
->mmc
, cmd
);
974 /* this is the first command, send the initialization clock */
975 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT
, &slot
->flags
))
976 cmdflags
|= SDMMC_CMD_INIT
;
979 dw_mci_submit_data(host
, data
);
983 dw_mci_start_command(host
, cmd
, cmdflags
);
986 host
->stop_cmdr
= dw_mci_prepare_command(slot
->mmc
, mrq
->stop
);
988 host
->stop_cmdr
= dw_mci_prep_stop_abort(host
, cmd
);
991 static void dw_mci_start_request(struct dw_mci
*host
,
992 struct dw_mci_slot
*slot
)
994 struct mmc_request
*mrq
= slot
->mrq
;
995 struct mmc_command
*cmd
;
997 cmd
= mrq
->sbc
? mrq
->sbc
: mrq
->cmd
;
998 __dw_mci_start_request(host
, slot
, cmd
);
1001 /* must be called with host->lock held */
1002 static void dw_mci_queue_request(struct dw_mci
*host
, struct dw_mci_slot
*slot
,
1003 struct mmc_request
*mrq
)
1005 dev_vdbg(&slot
->mmc
->class_dev
, "queue request: state=%d\n",
1010 if (host
->state
== STATE_WAITING_CMD11_DONE
) {
1011 dev_warn(&slot
->mmc
->class_dev
,
1012 "Voltage change didn't complete\n");
1014 * this case isn't expected to happen, so we can
1015 * either crash here or just try to continue on
1016 * in the closest possible state
1018 host
->state
= STATE_IDLE
;
1021 if (host
->state
== STATE_IDLE
) {
1022 host
->state
= STATE_SENDING_CMD
;
1023 dw_mci_start_request(host
, slot
);
1025 list_add_tail(&slot
->queue_node
, &host
->queue
);
1029 static void dw_mci_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
1031 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
1032 struct dw_mci
*host
= slot
->host
;
1037 * The check for card presence and queueing of the request must be
1038 * atomic, otherwise the card could be removed in between and the
1039 * request wouldn't fail until another card was inserted.
1041 spin_lock_bh(&host
->lock
);
1043 if (!test_bit(DW_MMC_CARD_PRESENT
, &slot
->flags
)) {
1044 spin_unlock_bh(&host
->lock
);
1045 mrq
->cmd
->error
= -ENOMEDIUM
;
1046 mmc_request_done(mmc
, mrq
);
1050 dw_mci_queue_request(host
, slot
, mrq
);
1052 spin_unlock_bh(&host
->lock
);
1055 static void dw_mci_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
1057 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
1058 const struct dw_mci_drv_data
*drv_data
= slot
->host
->drv_data
;
1062 switch (ios
->bus_width
) {
1063 case MMC_BUS_WIDTH_4
:
1064 slot
->ctype
= SDMMC_CTYPE_4BIT
;
1066 case MMC_BUS_WIDTH_8
:
1067 slot
->ctype
= SDMMC_CTYPE_8BIT
;
1070 /* set default 1 bit mode */
1071 slot
->ctype
= SDMMC_CTYPE_1BIT
;
1074 regs
= mci_readl(slot
->host
, UHS_REG
);
1077 if (ios
->timing
== MMC_TIMING_MMC_DDR52
)
1078 regs
|= ((0x1 << slot
->id
) << 16);
1080 regs
&= ~((0x1 << slot
->id
) << 16);
1082 mci_writel(slot
->host
, UHS_REG
, regs
);
1083 slot
->host
->timing
= ios
->timing
;
1086 * Use mirror of ios->clock to prevent race with mmc
1087 * core ios update when finding the minimum.
1089 slot
->clock
= ios
->clock
;
1091 if (drv_data
&& drv_data
->set_ios
)
1092 drv_data
->set_ios(slot
->host
, ios
);
1094 /* Slot specific timing and width adjustment */
1095 dw_mci_setup_bus(slot
, false);
1097 if (slot
->host
->state
== STATE_WAITING_CMD11_DONE
&& ios
->clock
!= 0)
1098 slot
->host
->state
= STATE_IDLE
;
1100 switch (ios
->power_mode
) {
1102 if (!IS_ERR(mmc
->supply
.vmmc
)) {
1103 ret
= mmc_regulator_set_ocr(mmc
, mmc
->supply
.vmmc
,
1106 dev_err(slot
->host
->dev
,
1107 "failed to enable vmmc regulator\n");
1108 /*return, if failed turn on vmmc*/
1112 if (!IS_ERR(mmc
->supply
.vqmmc
) && !slot
->host
->vqmmc_enabled
) {
1113 ret
= regulator_enable(mmc
->supply
.vqmmc
);
1115 dev_err(slot
->host
->dev
,
1116 "failed to enable vqmmc regulator\n");
1118 slot
->host
->vqmmc_enabled
= true;
1120 set_bit(DW_MMC_CARD_NEED_INIT
, &slot
->flags
);
1121 regs
= mci_readl(slot
->host
, PWREN
);
1122 regs
|= (1 << slot
->id
);
1123 mci_writel(slot
->host
, PWREN
, regs
);
1126 if (!IS_ERR(mmc
->supply
.vmmc
))
1127 mmc_regulator_set_ocr(mmc
, mmc
->supply
.vmmc
, 0);
1129 if (!IS_ERR(mmc
->supply
.vqmmc
) && slot
->host
->vqmmc_enabled
) {
1130 regulator_disable(mmc
->supply
.vqmmc
);
1131 slot
->host
->vqmmc_enabled
= false;
1134 regs
= mci_readl(slot
->host
, PWREN
);
1135 regs
&= ~(1 << slot
->id
);
1136 mci_writel(slot
->host
, PWREN
, regs
);
1143 static int dw_mci_card_busy(struct mmc_host
*mmc
)
1145 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
1149 * Check the busy bit which is low when DAT[3:0]
1150 * (the data lines) are 0000
1152 status
= mci_readl(slot
->host
, STATUS
);
1154 return !!(status
& SDMMC_STATUS_BUSY
);
1157 static int dw_mci_switch_voltage(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
1159 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
1160 struct dw_mci
*host
= slot
->host
;
1162 u32 v18
= SDMMC_UHS_18V
<< slot
->id
;
1167 * Program the voltage. Note that some instances of dw_mmc may use
1168 * the UHS_REG for this. For other instances (like exynos) the UHS_REG
1169 * does no harm but you need to set the regulator directly. Try both.
1171 uhs
= mci_readl(host
, UHS_REG
);
1172 if (ios
->signal_voltage
== MMC_SIGNAL_VOLTAGE_330
) {
1181 if (!IS_ERR(mmc
->supply
.vqmmc
)) {
1182 ret
= regulator_set_voltage(mmc
->supply
.vqmmc
, min_uv
, max_uv
);
1185 dev_dbg(&mmc
->class_dev
,
1186 "Regulator set error %d: %d - %d\n",
1187 ret
, min_uv
, max_uv
);
1191 mci_writel(host
, UHS_REG
, uhs
);
1196 static int dw_mci_get_ro(struct mmc_host
*mmc
)
1199 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
1200 int gpio_ro
= mmc_gpio_get_ro(mmc
);
1202 /* Use platform get_ro function, else try on board write protect */
1203 if ((slot
->quirks
& DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT
) ||
1204 (slot
->host
->quirks
& DW_MCI_QUIRK_NO_WRITE_PROTECT
))
1206 else if (!IS_ERR_VALUE(gpio_ro
))
1207 read_only
= gpio_ro
;
1210 mci_readl(slot
->host
, WRTPRT
) & (1 << slot
->id
) ? 1 : 0;
1212 dev_dbg(&mmc
->class_dev
, "card is %s\n",
1213 read_only
? "read-only" : "read-write");
1218 static int dw_mci_get_cd(struct mmc_host
*mmc
)
1221 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
1222 struct dw_mci_board
*brd
= slot
->host
->pdata
;
1223 struct dw_mci
*host
= slot
->host
;
1224 int gpio_cd
= mmc_gpio_get_cd(mmc
);
1226 /* Use platform get_cd function, else try onboard card detect */
1227 if (brd
->quirks
& DW_MCI_QUIRK_BROKEN_CARD_DETECTION
)
1229 else if (!IS_ERR_VALUE(gpio_cd
))
1232 present
= (mci_readl(slot
->host
, CDETECT
) & (1 << slot
->id
))
1235 spin_lock_bh(&host
->lock
);
1237 set_bit(DW_MMC_CARD_PRESENT
, &slot
->flags
);
1238 dev_dbg(&mmc
->class_dev
, "card is present\n");
1240 clear_bit(DW_MMC_CARD_PRESENT
, &slot
->flags
);
1241 dev_dbg(&mmc
->class_dev
, "card is not present\n");
1243 spin_unlock_bh(&host
->lock
);
1249 * Disable lower power mode.
1251 * Low power mode will stop the card clock when idle. According to the
1252 * description of the CLKENA register we should disable low power mode
1253 * for SDIO cards if we need SDIO interrupts to work.
1255 * This function is fast if low power mode is already disabled.
1257 static void dw_mci_disable_low_power(struct dw_mci_slot
*slot
)
1259 struct dw_mci
*host
= slot
->host
;
1261 const u32 clken_low_pwr
= SDMMC_CLKEN_LOW_PWR
<< slot
->id
;
1263 clk_en_a
= mci_readl(host
, CLKENA
);
1265 if (clk_en_a
& clken_low_pwr
) {
1266 mci_writel(host
, CLKENA
, clk_en_a
& ~clken_low_pwr
);
1267 mci_send_cmd(slot
, SDMMC_CMD_UPD_CLK
|
1268 SDMMC_CMD_PRV_DAT_WAIT
, 0);
1272 static void dw_mci_enable_sdio_irq(struct mmc_host
*mmc
, int enb
)
1274 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
1275 struct dw_mci
*host
= slot
->host
;
1278 /* Enable/disable Slot Specific SDIO interrupt */
1279 int_mask
= mci_readl(host
, INTMASK
);
1282 * Turn off low power mode if it was enabled. This is a bit of
1283 * a heavy operation and we disable / enable IRQs a lot, so
1284 * we'll leave low power mode disabled and it will get
1285 * re-enabled again in dw_mci_setup_bus().
1287 dw_mci_disable_low_power(slot
);
1289 mci_writel(host
, INTMASK
,
1290 (int_mask
| SDMMC_INT_SDIO(slot
->sdio_id
)));
1292 mci_writel(host
, INTMASK
,
1293 (int_mask
& ~SDMMC_INT_SDIO(slot
->sdio_id
)));
1297 static int dw_mci_execute_tuning(struct mmc_host
*mmc
, u32 opcode
)
1299 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
1300 struct dw_mci
*host
= slot
->host
;
1301 const struct dw_mci_drv_data
*drv_data
= host
->drv_data
;
1302 struct dw_mci_tuning_data tuning_data
;
1305 if (opcode
== MMC_SEND_TUNING_BLOCK_HS200
) {
1306 if (mmc
->ios
.bus_width
== MMC_BUS_WIDTH_8
) {
1307 tuning_data
.blk_pattern
= tuning_blk_pattern_8bit
;
1308 tuning_data
.blksz
= sizeof(tuning_blk_pattern_8bit
);
1309 } else if (mmc
->ios
.bus_width
== MMC_BUS_WIDTH_4
) {
1310 tuning_data
.blk_pattern
= tuning_blk_pattern_4bit
;
1311 tuning_data
.blksz
= sizeof(tuning_blk_pattern_4bit
);
1315 } else if (opcode
== MMC_SEND_TUNING_BLOCK
) {
1316 tuning_data
.blk_pattern
= tuning_blk_pattern_4bit
;
1317 tuning_data
.blksz
= sizeof(tuning_blk_pattern_4bit
);
1320 "Undefined command(%d) for tuning\n", opcode
);
1324 if (drv_data
&& drv_data
->execute_tuning
)
1325 err
= drv_data
->execute_tuning(slot
, opcode
, &tuning_data
);
1329 static const struct mmc_host_ops dw_mci_ops
= {
1330 .request
= dw_mci_request
,
1331 .pre_req
= dw_mci_pre_req
,
1332 .post_req
= dw_mci_post_req
,
1333 .set_ios
= dw_mci_set_ios
,
1334 .get_ro
= dw_mci_get_ro
,
1335 .get_cd
= dw_mci_get_cd
,
1336 .enable_sdio_irq
= dw_mci_enable_sdio_irq
,
1337 .execute_tuning
= dw_mci_execute_tuning
,
1338 .card_busy
= dw_mci_card_busy
,
1339 .start_signal_voltage_switch
= dw_mci_switch_voltage
,
1343 static void dw_mci_request_end(struct dw_mci
*host
, struct mmc_request
*mrq
)
1344 __releases(&host
->lock
)
1345 __acquires(&host
->lock
)
1347 struct dw_mci_slot
*slot
;
1348 struct mmc_host
*prev_mmc
= host
->cur_slot
->mmc
;
1350 WARN_ON(host
->cmd
|| host
->data
);
1352 host
->cur_slot
->mrq
= NULL
;
1354 if (!list_empty(&host
->queue
)) {
1355 slot
= list_entry(host
->queue
.next
,
1356 struct dw_mci_slot
, queue_node
);
1357 list_del(&slot
->queue_node
);
1358 dev_vdbg(host
->dev
, "list not empty: %s is next\n",
1359 mmc_hostname(slot
->mmc
));
1360 host
->state
= STATE_SENDING_CMD
;
1361 dw_mci_start_request(host
, slot
);
1363 dev_vdbg(host
->dev
, "list empty\n");
1365 if (host
->state
== STATE_SENDING_CMD11
)
1366 host
->state
= STATE_WAITING_CMD11_DONE
;
1368 host
->state
= STATE_IDLE
;
1371 spin_unlock(&host
->lock
);
1372 mmc_request_done(prev_mmc
, mrq
);
1373 spin_lock(&host
->lock
);
1376 static int dw_mci_command_complete(struct dw_mci
*host
, struct mmc_command
*cmd
)
1378 u32 status
= host
->cmd_status
;
1380 host
->cmd_status
= 0;
1382 /* Read the response from the card (up to 16 bytes) */
1383 if (cmd
->flags
& MMC_RSP_PRESENT
) {
1384 if (cmd
->flags
& MMC_RSP_136
) {
1385 cmd
->resp
[3] = mci_readl(host
, RESP0
);
1386 cmd
->resp
[2] = mci_readl(host
, RESP1
);
1387 cmd
->resp
[1] = mci_readl(host
, RESP2
);
1388 cmd
->resp
[0] = mci_readl(host
, RESP3
);
1390 cmd
->resp
[0] = mci_readl(host
, RESP0
);
1397 if (status
& SDMMC_INT_RTO
)
1398 cmd
->error
= -ETIMEDOUT
;
1399 else if ((cmd
->flags
& MMC_RSP_CRC
) && (status
& SDMMC_INT_RCRC
))
1400 cmd
->error
= -EILSEQ
;
1401 else if (status
& SDMMC_INT_RESP_ERR
)
1407 /* newer ip versions need a delay between retries */
1408 if (host
->quirks
& DW_MCI_QUIRK_RETRY_DELAY
)
1415 static int dw_mci_data_complete(struct dw_mci
*host
, struct mmc_data
*data
)
1417 u32 status
= host
->data_status
;
1419 if (status
& DW_MCI_DATA_ERROR_FLAGS
) {
1420 if (status
& SDMMC_INT_DRTO
) {
1421 data
->error
= -ETIMEDOUT
;
1422 } else if (status
& SDMMC_INT_DCRC
) {
1423 data
->error
= -EILSEQ
;
1424 } else if (status
& SDMMC_INT_EBE
) {
1425 if (host
->dir_status
==
1426 DW_MCI_SEND_STATUS
) {
1428 * No data CRC status was returned.
1429 * The number of bytes transferred
1430 * will be exaggerated in PIO mode.
1432 data
->bytes_xfered
= 0;
1433 data
->error
= -ETIMEDOUT
;
1434 } else if (host
->dir_status
==
1435 DW_MCI_RECV_STATUS
) {
1439 /* SDMMC_INT_SBE is included */
1443 dev_dbg(host
->dev
, "data error, status 0x%08x\n", status
);
1446 * After an error, there may be data lingering
1451 data
->bytes_xfered
= data
->blocks
* data
->blksz
;
1458 static void dw_mci_tasklet_func(unsigned long priv
)
1460 struct dw_mci
*host
= (struct dw_mci
*)priv
;
1461 struct mmc_data
*data
;
1462 struct mmc_command
*cmd
;
1463 struct mmc_request
*mrq
;
1464 enum dw_mci_state state
;
1465 enum dw_mci_state prev_state
;
1468 spin_lock(&host
->lock
);
1470 state
= host
->state
;
1479 case STATE_WAITING_CMD11_DONE
:
1482 case STATE_SENDING_CMD11
:
1483 case STATE_SENDING_CMD
:
1484 if (!test_and_clear_bit(EVENT_CMD_COMPLETE
,
1485 &host
->pending_events
))
1490 set_bit(EVENT_CMD_COMPLETE
, &host
->completed_events
);
1491 err
= dw_mci_command_complete(host
, cmd
);
1492 if (cmd
== mrq
->sbc
&& !err
) {
1493 prev_state
= state
= STATE_SENDING_CMD
;
1494 __dw_mci_start_request(host
, host
->cur_slot
,
1499 if (cmd
->data
&& err
) {
1500 dw_mci_stop_dma(host
);
1501 send_stop_abort(host
, data
);
1502 state
= STATE_SENDING_STOP
;
1506 if (!cmd
->data
|| err
) {
1507 dw_mci_request_end(host
, mrq
);
1511 prev_state
= state
= STATE_SENDING_DATA
;
1514 case STATE_SENDING_DATA
:
1516 * We could get a data error and never a transfer
1517 * complete so we'd better check for it here.
1519 * Note that we don't really care if we also got a
1520 * transfer complete; stopping the DMA and sending an
1523 if (test_and_clear_bit(EVENT_DATA_ERROR
,
1524 &host
->pending_events
)) {
1525 dw_mci_stop_dma(host
);
1526 send_stop_abort(host
, data
);
1527 state
= STATE_DATA_ERROR
;
1531 if (!test_and_clear_bit(EVENT_XFER_COMPLETE
,
1532 &host
->pending_events
))
1535 set_bit(EVENT_XFER_COMPLETE
, &host
->completed_events
);
1538 * Handle an EVENT_DATA_ERROR that might have shown up
1539 * before the transfer completed. This might not have
1540 * been caught by the check above because the interrupt
1541 * could have gone off between the previous check and
1542 * the check for transfer complete.
1544 * Technically this ought not be needed assuming we
1545 * get a DATA_COMPLETE eventually (we'll notice the
1546 * error and end the request), but it shouldn't hurt.
1548 * This has the advantage of sending the stop command.
1550 if (test_and_clear_bit(EVENT_DATA_ERROR
,
1551 &host
->pending_events
)) {
1552 dw_mci_stop_dma(host
);
1553 send_stop_abort(host
, data
);
1554 state
= STATE_DATA_ERROR
;
1557 prev_state
= state
= STATE_DATA_BUSY
;
1561 case STATE_DATA_BUSY
:
1562 if (!test_and_clear_bit(EVENT_DATA_COMPLETE
,
1563 &host
->pending_events
))
1567 set_bit(EVENT_DATA_COMPLETE
, &host
->completed_events
);
1568 err
= dw_mci_data_complete(host
, data
);
1571 if (!data
->stop
|| mrq
->sbc
) {
1572 if (mrq
->sbc
&& data
->stop
)
1573 data
->stop
->error
= 0;
1574 dw_mci_request_end(host
, mrq
);
1578 /* stop command for open-ended transfer*/
1580 send_stop_abort(host
, data
);
1583 * If we don't have a command complete now we'll
1584 * never get one since we just reset everything;
1585 * better end the request.
1587 * If we do have a command complete we'll fall
1588 * through to the SENDING_STOP command and
1589 * everything will be peachy keen.
1591 if (!test_bit(EVENT_CMD_COMPLETE
,
1592 &host
->pending_events
)) {
1594 dw_mci_request_end(host
, mrq
);
1600 * If err has non-zero,
1601 * stop-abort command has been already issued.
1603 prev_state
= state
= STATE_SENDING_STOP
;
1607 case STATE_SENDING_STOP
:
1608 if (!test_and_clear_bit(EVENT_CMD_COMPLETE
,
1609 &host
->pending_events
))
1612 /* CMD error in data command */
1613 if (mrq
->cmd
->error
&& mrq
->data
)
1620 dw_mci_command_complete(host
, mrq
->stop
);
1622 host
->cmd_status
= 0;
1624 dw_mci_request_end(host
, mrq
);
1627 case STATE_DATA_ERROR
:
1628 if (!test_and_clear_bit(EVENT_XFER_COMPLETE
,
1629 &host
->pending_events
))
1632 state
= STATE_DATA_BUSY
;
1635 } while (state
!= prev_state
);
1637 host
->state
= state
;
1639 spin_unlock(&host
->lock
);
1643 /* push final bytes to part_buf, only use during push */
1644 static void dw_mci_set_part_bytes(struct dw_mci
*host
, void *buf
, int cnt
)
1646 memcpy((void *)&host
->part_buf
, buf
, cnt
);
1647 host
->part_buf_count
= cnt
;
1650 /* append bytes to part_buf, only use during push */
1651 static int dw_mci_push_part_bytes(struct dw_mci
*host
, void *buf
, int cnt
)
1653 cnt
= min(cnt
, (1 << host
->data_shift
) - host
->part_buf_count
);
1654 memcpy((void *)&host
->part_buf
+ host
->part_buf_count
, buf
, cnt
);
1655 host
->part_buf_count
+= cnt
;
1659 /* pull first bytes from part_buf, only use during pull */
1660 static int dw_mci_pull_part_bytes(struct dw_mci
*host
, void *buf
, int cnt
)
1662 cnt
= min(cnt
, (int)host
->part_buf_count
);
1664 memcpy(buf
, (void *)&host
->part_buf
+ host
->part_buf_start
,
1666 host
->part_buf_count
-= cnt
;
1667 host
->part_buf_start
+= cnt
;
1672 /* pull final bytes from the part_buf, assuming it's just been filled */
1673 static void dw_mci_pull_final_bytes(struct dw_mci
*host
, void *buf
, int cnt
)
1675 memcpy(buf
, &host
->part_buf
, cnt
);
1676 host
->part_buf_start
= cnt
;
1677 host
->part_buf_count
= (1 << host
->data_shift
) - cnt
;
1680 static void dw_mci_push_data16(struct dw_mci
*host
, void *buf
, int cnt
)
1682 struct mmc_data
*data
= host
->data
;
1685 /* try and push anything in the part_buf */
1686 if (unlikely(host
->part_buf_count
)) {
1687 int len
= dw_mci_push_part_bytes(host
, buf
, cnt
);
1690 if (host
->part_buf_count
== 2) {
1691 mci_writew(host
, DATA(host
->data_offset
),
1693 host
->part_buf_count
= 0;
1696 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1697 if (unlikely((unsigned long)buf
& 0x1)) {
1699 u16 aligned_buf
[64];
1700 int len
= min(cnt
& -2, (int)sizeof(aligned_buf
));
1701 int items
= len
>> 1;
1703 /* memcpy from input buffer into aligned buffer */
1704 memcpy(aligned_buf
, buf
, len
);
1707 /* push data from aligned buffer into fifo */
1708 for (i
= 0; i
< items
; ++i
)
1709 mci_writew(host
, DATA(host
->data_offset
),
1716 for (; cnt
>= 2; cnt
-= 2)
1717 mci_writew(host
, DATA(host
->data_offset
), *pdata
++);
1720 /* put anything remaining in the part_buf */
1722 dw_mci_set_part_bytes(host
, buf
, cnt
);
1723 /* Push data if we have reached the expected data length */
1724 if ((data
->bytes_xfered
+ init_cnt
) ==
1725 (data
->blksz
* data
->blocks
))
1726 mci_writew(host
, DATA(host
->data_offset
),
1731 static void dw_mci_pull_data16(struct dw_mci
*host
, void *buf
, int cnt
)
1733 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1734 if (unlikely((unsigned long)buf
& 0x1)) {
1736 /* pull data from fifo into aligned buffer */
1737 u16 aligned_buf
[64];
1738 int len
= min(cnt
& -2, (int)sizeof(aligned_buf
));
1739 int items
= len
>> 1;
1741 for (i
= 0; i
< items
; ++i
)
1742 aligned_buf
[i
] = mci_readw(host
,
1743 DATA(host
->data_offset
));
1744 /* memcpy from aligned buffer into output buffer */
1745 memcpy(buf
, aligned_buf
, len
);
1753 for (; cnt
>= 2; cnt
-= 2)
1754 *pdata
++ = mci_readw(host
, DATA(host
->data_offset
));
1758 host
->part_buf16
= mci_readw(host
, DATA(host
->data_offset
));
1759 dw_mci_pull_final_bytes(host
, buf
, cnt
);
1763 static void dw_mci_push_data32(struct dw_mci
*host
, void *buf
, int cnt
)
1765 struct mmc_data
*data
= host
->data
;
1768 /* try and push anything in the part_buf */
1769 if (unlikely(host
->part_buf_count
)) {
1770 int len
= dw_mci_push_part_bytes(host
, buf
, cnt
);
1773 if (host
->part_buf_count
== 4) {
1774 mci_writel(host
, DATA(host
->data_offset
),
1776 host
->part_buf_count
= 0;
1779 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1780 if (unlikely((unsigned long)buf
& 0x3)) {
1782 u32 aligned_buf
[32];
1783 int len
= min(cnt
& -4, (int)sizeof(aligned_buf
));
1784 int items
= len
>> 2;
1786 /* memcpy from input buffer into aligned buffer */
1787 memcpy(aligned_buf
, buf
, len
);
1790 /* push data from aligned buffer into fifo */
1791 for (i
= 0; i
< items
; ++i
)
1792 mci_writel(host
, DATA(host
->data_offset
),
1799 for (; cnt
>= 4; cnt
-= 4)
1800 mci_writel(host
, DATA(host
->data_offset
), *pdata
++);
1803 /* put anything remaining in the part_buf */
1805 dw_mci_set_part_bytes(host
, buf
, cnt
);
1806 /* Push data if we have reached the expected data length */
1807 if ((data
->bytes_xfered
+ init_cnt
) ==
1808 (data
->blksz
* data
->blocks
))
1809 mci_writel(host
, DATA(host
->data_offset
),
1814 static void dw_mci_pull_data32(struct dw_mci
*host
, void *buf
, int cnt
)
1816 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1817 if (unlikely((unsigned long)buf
& 0x3)) {
1819 /* pull data from fifo into aligned buffer */
1820 u32 aligned_buf
[32];
1821 int len
= min(cnt
& -4, (int)sizeof(aligned_buf
));
1822 int items
= len
>> 2;
1824 for (i
= 0; i
< items
; ++i
)
1825 aligned_buf
[i
] = mci_readl(host
,
1826 DATA(host
->data_offset
));
1827 /* memcpy from aligned buffer into output buffer */
1828 memcpy(buf
, aligned_buf
, len
);
1836 for (; cnt
>= 4; cnt
-= 4)
1837 *pdata
++ = mci_readl(host
, DATA(host
->data_offset
));
1841 host
->part_buf32
= mci_readl(host
, DATA(host
->data_offset
));
1842 dw_mci_pull_final_bytes(host
, buf
, cnt
);
1846 static void dw_mci_push_data64(struct dw_mci
*host
, void *buf
, int cnt
)
1848 struct mmc_data
*data
= host
->data
;
1851 /* try and push anything in the part_buf */
1852 if (unlikely(host
->part_buf_count
)) {
1853 int len
= dw_mci_push_part_bytes(host
, buf
, cnt
);
1857 if (host
->part_buf_count
== 8) {
1858 mci_writeq(host
, DATA(host
->data_offset
),
1860 host
->part_buf_count
= 0;
1863 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1864 if (unlikely((unsigned long)buf
& 0x7)) {
1866 u64 aligned_buf
[16];
1867 int len
= min(cnt
& -8, (int)sizeof(aligned_buf
));
1868 int items
= len
>> 3;
1870 /* memcpy from input buffer into aligned buffer */
1871 memcpy(aligned_buf
, buf
, len
);
1874 /* push data from aligned buffer into fifo */
1875 for (i
= 0; i
< items
; ++i
)
1876 mci_writeq(host
, DATA(host
->data_offset
),
1883 for (; cnt
>= 8; cnt
-= 8)
1884 mci_writeq(host
, DATA(host
->data_offset
), *pdata
++);
1887 /* put anything remaining in the part_buf */
1889 dw_mci_set_part_bytes(host
, buf
, cnt
);
1890 /* Push data if we have reached the expected data length */
1891 if ((data
->bytes_xfered
+ init_cnt
) ==
1892 (data
->blksz
* data
->blocks
))
1893 mci_writeq(host
, DATA(host
->data_offset
),
1898 static void dw_mci_pull_data64(struct dw_mci
*host
, void *buf
, int cnt
)
1900 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1901 if (unlikely((unsigned long)buf
& 0x7)) {
1903 /* pull data from fifo into aligned buffer */
1904 u64 aligned_buf
[16];
1905 int len
= min(cnt
& -8, (int)sizeof(aligned_buf
));
1906 int items
= len
>> 3;
1908 for (i
= 0; i
< items
; ++i
)
1909 aligned_buf
[i
] = mci_readq(host
,
1910 DATA(host
->data_offset
));
1911 /* memcpy from aligned buffer into output buffer */
1912 memcpy(buf
, aligned_buf
, len
);
1920 for (; cnt
>= 8; cnt
-= 8)
1921 *pdata
++ = mci_readq(host
, DATA(host
->data_offset
));
1925 host
->part_buf
= mci_readq(host
, DATA(host
->data_offset
));
1926 dw_mci_pull_final_bytes(host
, buf
, cnt
);
1930 static void dw_mci_pull_data(struct dw_mci
*host
, void *buf
, int cnt
)
1934 /* get remaining partial bytes */
1935 len
= dw_mci_pull_part_bytes(host
, buf
, cnt
);
1936 if (unlikely(len
== cnt
))
1941 /* get the rest of the data */
1942 host
->pull_data(host
, buf
, cnt
);
1945 static void dw_mci_read_data_pio(struct dw_mci
*host
, bool dto
)
1947 struct sg_mapping_iter
*sg_miter
= &host
->sg_miter
;
1949 unsigned int offset
;
1950 struct mmc_data
*data
= host
->data
;
1951 int shift
= host
->data_shift
;
1954 unsigned int remain
, fcnt
;
1957 if (!sg_miter_next(sg_miter
))
1960 host
->sg
= sg_miter
->piter
.sg
;
1961 buf
= sg_miter
->addr
;
1962 remain
= sg_miter
->length
;
1966 fcnt
= (SDMMC_GET_FCNT(mci_readl(host
, STATUS
))
1967 << shift
) + host
->part_buf_count
;
1968 len
= min(remain
, fcnt
);
1971 dw_mci_pull_data(host
, (void *)(buf
+ offset
), len
);
1972 data
->bytes_xfered
+= len
;
1977 sg_miter
->consumed
= offset
;
1978 status
= mci_readl(host
, MINTSTS
);
1979 mci_writel(host
, RINTSTS
, SDMMC_INT_RXDR
);
1980 /* if the RXDR is ready read again */
1981 } while ((status
& SDMMC_INT_RXDR
) ||
1982 (dto
&& SDMMC_GET_FCNT(mci_readl(host
, STATUS
))));
1985 if (!sg_miter_next(sg_miter
))
1987 sg_miter
->consumed
= 0;
1989 sg_miter_stop(sg_miter
);
1993 sg_miter_stop(sg_miter
);
1996 set_bit(EVENT_XFER_COMPLETE
, &host
->pending_events
);
1999 static void dw_mci_write_data_pio(struct dw_mci
*host
)
2001 struct sg_mapping_iter
*sg_miter
= &host
->sg_miter
;
2003 unsigned int offset
;
2004 struct mmc_data
*data
= host
->data
;
2005 int shift
= host
->data_shift
;
2008 unsigned int fifo_depth
= host
->fifo_depth
;
2009 unsigned int remain
, fcnt
;
2012 if (!sg_miter_next(sg_miter
))
2015 host
->sg
= sg_miter
->piter
.sg
;
2016 buf
= sg_miter
->addr
;
2017 remain
= sg_miter
->length
;
2021 fcnt
= ((fifo_depth
-
2022 SDMMC_GET_FCNT(mci_readl(host
, STATUS
)))
2023 << shift
) - host
->part_buf_count
;
2024 len
= min(remain
, fcnt
);
2027 host
->push_data(host
, (void *)(buf
+ offset
), len
);
2028 data
->bytes_xfered
+= len
;
2033 sg_miter
->consumed
= offset
;
2034 status
= mci_readl(host
, MINTSTS
);
2035 mci_writel(host
, RINTSTS
, SDMMC_INT_TXDR
);
2036 } while (status
& SDMMC_INT_TXDR
); /* if TXDR write again */
2039 if (!sg_miter_next(sg_miter
))
2041 sg_miter
->consumed
= 0;
2043 sg_miter_stop(sg_miter
);
2047 sg_miter_stop(sg_miter
);
2050 set_bit(EVENT_XFER_COMPLETE
, &host
->pending_events
);
2053 static void dw_mci_cmd_interrupt(struct dw_mci
*host
, u32 status
)
2055 if (!host
->cmd_status
)
2056 host
->cmd_status
= status
;
2060 set_bit(EVENT_CMD_COMPLETE
, &host
->pending_events
);
2061 tasklet_schedule(&host
->tasklet
);
2064 static void dw_mci_handle_cd(struct dw_mci
*host
)
2068 for (i
= 0; i
< host
->num_slots
; i
++) {
2069 struct dw_mci_slot
*slot
= host
->slot
[i
];
2074 if (slot
->mmc
->ops
->card_event
)
2075 slot
->mmc
->ops
->card_event(slot
->mmc
);
2076 mmc_detect_change(slot
->mmc
,
2077 msecs_to_jiffies(host
->pdata
->detect_delay_ms
));
2081 static irqreturn_t
dw_mci_interrupt(int irq
, void *dev_id
)
2083 struct dw_mci
*host
= dev_id
;
2087 pending
= mci_readl(host
, MINTSTS
); /* read-only mask reg */
2090 * DTO fix - version 2.10a and below, and only if internal DMA
2093 if (host
->quirks
& DW_MCI_QUIRK_IDMAC_DTO
) {
2095 ((mci_readl(host
, STATUS
) >> 17) & 0x1fff))
2096 pending
|= SDMMC_INT_DATA_OVER
;
2100 /* Check volt switch first, since it can look like an error */
2101 if ((host
->state
== STATE_SENDING_CMD11
) &&
2102 (pending
& SDMMC_INT_VOLT_SWITCH
)) {
2103 mci_writel(host
, RINTSTS
, SDMMC_INT_VOLT_SWITCH
);
2104 pending
&= ~SDMMC_INT_VOLT_SWITCH
;
2105 dw_mci_cmd_interrupt(host
, pending
);
2108 if (pending
& DW_MCI_CMD_ERROR_FLAGS
) {
2109 mci_writel(host
, RINTSTS
, DW_MCI_CMD_ERROR_FLAGS
);
2110 host
->cmd_status
= pending
;
2112 set_bit(EVENT_CMD_COMPLETE
, &host
->pending_events
);
2115 if (pending
& DW_MCI_DATA_ERROR_FLAGS
) {
2116 /* if there is an error report DATA_ERROR */
2117 mci_writel(host
, RINTSTS
, DW_MCI_DATA_ERROR_FLAGS
);
2118 host
->data_status
= pending
;
2120 set_bit(EVENT_DATA_ERROR
, &host
->pending_events
);
2121 tasklet_schedule(&host
->tasklet
);
2124 if (pending
& SDMMC_INT_DATA_OVER
) {
2125 mci_writel(host
, RINTSTS
, SDMMC_INT_DATA_OVER
);
2126 if (!host
->data_status
)
2127 host
->data_status
= pending
;
2129 if (host
->dir_status
== DW_MCI_RECV_STATUS
) {
2130 if (host
->sg
!= NULL
)
2131 dw_mci_read_data_pio(host
, true);
2133 set_bit(EVENT_DATA_COMPLETE
, &host
->pending_events
);
2134 tasklet_schedule(&host
->tasklet
);
2137 if (pending
& SDMMC_INT_RXDR
) {
2138 mci_writel(host
, RINTSTS
, SDMMC_INT_RXDR
);
2139 if (host
->dir_status
== DW_MCI_RECV_STATUS
&& host
->sg
)
2140 dw_mci_read_data_pio(host
, false);
2143 if (pending
& SDMMC_INT_TXDR
) {
2144 mci_writel(host
, RINTSTS
, SDMMC_INT_TXDR
);
2145 if (host
->dir_status
== DW_MCI_SEND_STATUS
&& host
->sg
)
2146 dw_mci_write_data_pio(host
);
2149 if (pending
& SDMMC_INT_CMD_DONE
) {
2150 mci_writel(host
, RINTSTS
, SDMMC_INT_CMD_DONE
);
2151 dw_mci_cmd_interrupt(host
, pending
);
2154 if (pending
& SDMMC_INT_CD
) {
2155 mci_writel(host
, RINTSTS
, SDMMC_INT_CD
);
2156 dw_mci_handle_cd(host
);
2159 /* Handle SDIO Interrupts */
2160 for (i
= 0; i
< host
->num_slots
; i
++) {
2161 struct dw_mci_slot
*slot
= host
->slot
[i
];
2162 if (pending
& SDMMC_INT_SDIO(slot
->sdio_id
)) {
2163 mci_writel(host
, RINTSTS
,
2164 SDMMC_INT_SDIO(slot
->sdio_id
));
2165 mmc_signal_sdio_irq(slot
->mmc
);
2171 #ifdef CONFIG_MMC_DW_IDMAC
2172 /* Handle DMA interrupts */
2173 if (host
->dma_64bit_address
== 1) {
2174 pending
= mci_readl(host
, IDSTS64
);
2175 if (pending
& (SDMMC_IDMAC_INT_TI
| SDMMC_IDMAC_INT_RI
)) {
2176 mci_writel(host
, IDSTS64
, SDMMC_IDMAC_INT_TI
|
2177 SDMMC_IDMAC_INT_RI
);
2178 mci_writel(host
, IDSTS64
, SDMMC_IDMAC_INT_NI
);
2179 host
->dma_ops
->complete(host
);
2182 pending
= mci_readl(host
, IDSTS
);
2183 if (pending
& (SDMMC_IDMAC_INT_TI
| SDMMC_IDMAC_INT_RI
)) {
2184 mci_writel(host
, IDSTS
, SDMMC_IDMAC_INT_TI
|
2185 SDMMC_IDMAC_INT_RI
);
2186 mci_writel(host
, IDSTS
, SDMMC_IDMAC_INT_NI
);
2187 host
->dma_ops
->complete(host
);
2196 /* given a slot id, find out the device node representing that slot */
2197 static struct device_node
*dw_mci_of_find_slot_node(struct device
*dev
, u8 slot
)
2199 struct device_node
*np
;
2203 if (!dev
|| !dev
->of_node
)
2206 for_each_child_of_node(dev
->of_node
, np
) {
2207 addr
= of_get_property(np
, "reg", &len
);
2208 if (!addr
|| (len
< sizeof(int)))
2210 if (be32_to_cpup(addr
) == slot
)
2216 static struct dw_mci_of_slot_quirks
{
2219 } of_slot_quirks
[] = {
2221 .quirk
= "disable-wp",
2222 .id
= DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT
,
2226 static int dw_mci_of_get_slot_quirks(struct device
*dev
, u8 slot
)
2228 struct device_node
*np
= dw_mci_of_find_slot_node(dev
, slot
);
2233 for (idx
= 0; idx
< ARRAY_SIZE(of_slot_quirks
); idx
++)
2234 if (of_get_property(np
, of_slot_quirks
[idx
].quirk
, NULL
)) {
2235 dev_warn(dev
, "Slot quirk %s is deprecated\n",
2236 of_slot_quirks
[idx
].quirk
);
2237 quirks
|= of_slot_quirks
[idx
].id
;
2242 #else /* CONFIG_OF */
2243 static int dw_mci_of_get_slot_quirks(struct device
*dev
, u8 slot
)
2247 #endif /* CONFIG_OF */
2249 static int dw_mci_init_slot(struct dw_mci
*host
, unsigned int id
)
2251 struct mmc_host
*mmc
;
2252 struct dw_mci_slot
*slot
;
2253 const struct dw_mci_drv_data
*drv_data
= host
->drv_data
;
2257 mmc
= mmc_alloc_host(sizeof(struct dw_mci_slot
), host
->dev
);
2261 slot
= mmc_priv(mmc
);
2263 slot
->sdio_id
= host
->sdio_id0
+ id
;
2266 host
->slot
[id
] = slot
;
2268 slot
->quirks
= dw_mci_of_get_slot_quirks(host
->dev
, slot
->id
);
2270 mmc
->ops
= &dw_mci_ops
;
2271 if (of_property_read_u32_array(host
->dev
->of_node
,
2272 "clock-freq-min-max", freq
, 2)) {
2273 mmc
->f_min
= DW_MCI_FREQ_MIN
;
2274 mmc
->f_max
= DW_MCI_FREQ_MAX
;
2276 mmc
->f_min
= freq
[0];
2277 mmc
->f_max
= freq
[1];
2280 /*if there are external regulators, get them*/
2281 ret
= mmc_regulator_get_supply(mmc
);
2282 if (ret
== -EPROBE_DEFER
)
2283 goto err_host_allocated
;
2285 if (!mmc
->ocr_avail
)
2286 mmc
->ocr_avail
= MMC_VDD_32_33
| MMC_VDD_33_34
;
2288 if (host
->pdata
->caps
)
2289 mmc
->caps
= host
->pdata
->caps
;
2291 if (host
->pdata
->pm_caps
)
2292 mmc
->pm_caps
= host
->pdata
->pm_caps
;
2294 if (host
->dev
->of_node
) {
2295 ctrl_id
= of_alias_get_id(host
->dev
->of_node
, "mshc");
2299 ctrl_id
= to_platform_device(host
->dev
)->id
;
2301 if (drv_data
&& drv_data
->caps
)
2302 mmc
->caps
|= drv_data
->caps
[ctrl_id
];
2304 if (host
->pdata
->caps2
)
2305 mmc
->caps2
= host
->pdata
->caps2
;
2307 ret
= mmc_of_parse(mmc
);
2309 goto err_host_allocated
;
2311 if (host
->pdata
->blk_settings
) {
2312 mmc
->max_segs
= host
->pdata
->blk_settings
->max_segs
;
2313 mmc
->max_blk_size
= host
->pdata
->blk_settings
->max_blk_size
;
2314 mmc
->max_blk_count
= host
->pdata
->blk_settings
->max_blk_count
;
2315 mmc
->max_req_size
= host
->pdata
->blk_settings
->max_req_size
;
2316 mmc
->max_seg_size
= host
->pdata
->blk_settings
->max_seg_size
;
2318 /* Useful defaults if platform data is unset. */
2319 #ifdef CONFIG_MMC_DW_IDMAC
2320 mmc
->max_segs
= host
->ring_size
;
2321 mmc
->max_blk_size
= 65536;
2322 mmc
->max_blk_count
= host
->ring_size
;
2323 mmc
->max_seg_size
= 0x1000;
2324 mmc
->max_req_size
= mmc
->max_seg_size
* mmc
->max_blk_count
;
2327 mmc
->max_blk_size
= 65536; /* BLKSIZ is 16 bits */
2328 mmc
->max_blk_count
= 512;
2329 mmc
->max_req_size
= mmc
->max_blk_size
* mmc
->max_blk_count
;
2330 mmc
->max_seg_size
= mmc
->max_req_size
;
2331 #endif /* CONFIG_MMC_DW_IDMAC */
2334 if (dw_mci_get_cd(mmc
))
2335 set_bit(DW_MMC_CARD_PRESENT
, &slot
->flags
);
2337 clear_bit(DW_MMC_CARD_PRESENT
, &slot
->flags
);
2339 ret
= mmc_add_host(mmc
);
2341 goto err_host_allocated
;
2343 #if defined(CONFIG_DEBUG_FS)
2344 dw_mci_init_debugfs(slot
);
2354 static void dw_mci_cleanup_slot(struct dw_mci_slot
*slot
, unsigned int id
)
2356 /* Debugfs stuff is cleaned up by mmc core */
2357 mmc_remove_host(slot
->mmc
);
2358 slot
->host
->slot
[id
] = NULL
;
2359 mmc_free_host(slot
->mmc
);
2362 static void dw_mci_init_dma(struct dw_mci
*host
)
2365 /* Check ADDR_CONFIG bit in HCON to find IDMAC address bus width */
2366 addr_config
= (mci_readl(host
, HCON
) >> 27) & 0x01;
2368 if (addr_config
== 1) {
2369 /* host supports IDMAC in 64-bit address mode */
2370 host
->dma_64bit_address
= 1;
2371 dev_info(host
->dev
, "IDMAC supports 64-bit address mode.\n");
2372 if (!dma_set_mask(host
->dev
, DMA_BIT_MASK(64)))
2373 dma_set_coherent_mask(host
->dev
, DMA_BIT_MASK(64));
2375 /* host supports IDMAC in 32-bit address mode */
2376 host
->dma_64bit_address
= 0;
2377 dev_info(host
->dev
, "IDMAC supports 32-bit address mode.\n");
2380 /* Alloc memory for sg translation */
2381 host
->sg_cpu
= dmam_alloc_coherent(host
->dev
, PAGE_SIZE
,
2382 &host
->sg_dma
, GFP_KERNEL
);
2383 if (!host
->sg_cpu
) {
2384 dev_err(host
->dev
, "%s: could not alloc DMA memory\n",
2389 /* Determine which DMA interface to use */
2390 #ifdef CONFIG_MMC_DW_IDMAC
2391 host
->dma_ops
= &dw_mci_idmac_ops
;
2392 dev_info(host
->dev
, "Using internal DMA controller.\n");
2398 if (host
->dma_ops
->init
&& host
->dma_ops
->start
&&
2399 host
->dma_ops
->stop
&& host
->dma_ops
->cleanup
) {
2400 if (host
->dma_ops
->init(host
)) {
2401 dev_err(host
->dev
, "%s: Unable to initialize "
2402 "DMA Controller.\n", __func__
);
2406 dev_err(host
->dev
, "DMA initialization not found.\n");
2414 dev_info(host
->dev
, "Using PIO mode.\n");
2419 static bool dw_mci_ctrl_reset(struct dw_mci
*host
, u32 reset
)
2421 unsigned long timeout
= jiffies
+ msecs_to_jiffies(500);
2424 ctrl
= mci_readl(host
, CTRL
);
2426 mci_writel(host
, CTRL
, ctrl
);
2428 /* wait till resets clear */
2430 ctrl
= mci_readl(host
, CTRL
);
2431 if (!(ctrl
& reset
))
2433 } while (time_before(jiffies
, timeout
));
2436 "Timeout resetting block (ctrl reset %#x)\n",
2442 static bool dw_mci_reset(struct dw_mci
*host
)
2444 u32 flags
= SDMMC_CTRL_RESET
| SDMMC_CTRL_FIFO_RESET
;
2448 * Reseting generates a block interrupt, hence setting
2449 * the scatter-gather pointer to NULL.
2452 sg_miter_stop(&host
->sg_miter
);
2457 flags
|= SDMMC_CTRL_DMA_RESET
;
2459 if (dw_mci_ctrl_reset(host
, flags
)) {
2461 * In all cases we clear the RAWINTS register to clear any
2464 mci_writel(host
, RINTSTS
, 0xFFFFFFFF);
2466 /* if using dma we wait for dma_req to clear */
2467 if (host
->use_dma
) {
2468 unsigned long timeout
= jiffies
+ msecs_to_jiffies(500);
2471 status
= mci_readl(host
, STATUS
);
2472 if (!(status
& SDMMC_STATUS_DMA_REQ
))
2475 } while (time_before(jiffies
, timeout
));
2477 if (status
& SDMMC_STATUS_DMA_REQ
) {
2479 "%s: Timeout waiting for dma_req to "
2480 "clear during reset\n", __func__
);
2484 /* when using DMA next we reset the fifo again */
2485 if (!dw_mci_ctrl_reset(host
, SDMMC_CTRL_FIFO_RESET
))
2489 /* if the controller reset bit did clear, then set clock regs */
2490 if (!(mci_readl(host
, CTRL
) & SDMMC_CTRL_RESET
)) {
2491 dev_err(host
->dev
, "%s: fifo/dma reset bits didn't "
2492 "clear but ciu was reset, doing clock update\n",
2498 #if IS_ENABLED(CONFIG_MMC_DW_IDMAC)
2499 /* It is also recommended that we reset and reprogram idmac */
2500 dw_mci_idmac_reset(host
);
2506 /* After a CTRL reset we need to have CIU set clock registers */
2507 mci_send_cmd(host
->cur_slot
, SDMMC_CMD_UPD_CLK
, 0);
2513 static struct dw_mci_of_quirks
{
2518 .quirk
= "broken-cd",
2519 .id
= DW_MCI_QUIRK_BROKEN_CARD_DETECTION
,
2521 .quirk
= "disable-wp",
2522 .id
= DW_MCI_QUIRK_NO_WRITE_PROTECT
,
2526 static struct dw_mci_board
*dw_mci_parse_dt(struct dw_mci
*host
)
2528 struct dw_mci_board
*pdata
;
2529 struct device
*dev
= host
->dev
;
2530 struct device_node
*np
= dev
->of_node
;
2531 const struct dw_mci_drv_data
*drv_data
= host
->drv_data
;
2533 u32 clock_frequency
;
2535 pdata
= devm_kzalloc(dev
, sizeof(*pdata
), GFP_KERNEL
);
2537 dev_err(dev
, "could not allocate memory for pdata\n");
2538 return ERR_PTR(-ENOMEM
);
2541 /* find out number of slots supported */
2542 if (of_property_read_u32(dev
->of_node
, "num-slots",
2543 &pdata
->num_slots
)) {
2544 dev_info(dev
, "num-slots property not found, "
2545 "assuming 1 slot is available\n");
2546 pdata
->num_slots
= 1;
2550 for (idx
= 0; idx
< ARRAY_SIZE(of_quirks
); idx
++)
2551 if (of_get_property(np
, of_quirks
[idx
].quirk
, NULL
))
2552 pdata
->quirks
|= of_quirks
[idx
].id
;
2554 if (of_property_read_u32(np
, "fifo-depth", &pdata
->fifo_depth
))
2555 dev_info(dev
, "fifo-depth property not found, using "
2556 "value of FIFOTH register as default\n");
2558 of_property_read_u32(np
, "card-detect-delay", &pdata
->detect_delay_ms
);
2560 if (!of_property_read_u32(np
, "clock-frequency", &clock_frequency
))
2561 pdata
->bus_hz
= clock_frequency
;
2563 if (drv_data
&& drv_data
->parse_dt
) {
2564 ret
= drv_data
->parse_dt(host
);
2566 return ERR_PTR(ret
);
2569 if (of_find_property(np
, "supports-highspeed", NULL
))
2570 pdata
->caps
|= MMC_CAP_SD_HIGHSPEED
| MMC_CAP_MMC_HIGHSPEED
;
2575 #else /* CONFIG_OF */
2576 static struct dw_mci_board
*dw_mci_parse_dt(struct dw_mci
*host
)
2578 return ERR_PTR(-EINVAL
);
2580 #endif /* CONFIG_OF */
2582 int dw_mci_probe(struct dw_mci
*host
)
2584 const struct dw_mci_drv_data
*drv_data
= host
->drv_data
;
2585 int width
, i
, ret
= 0;
2590 host
->pdata
= dw_mci_parse_dt(host
);
2591 if (IS_ERR(host
->pdata
)) {
2592 dev_err(host
->dev
, "platform data not available\n");
2597 if (host
->pdata
->num_slots
> 1) {
2599 "Platform data must supply num_slots.\n");
2603 host
->biu_clk
= devm_clk_get(host
->dev
, "biu");
2604 if (IS_ERR(host
->biu_clk
)) {
2605 dev_dbg(host
->dev
, "biu clock not available\n");
2607 ret
= clk_prepare_enable(host
->biu_clk
);
2609 dev_err(host
->dev
, "failed to enable biu clock\n");
2614 host
->ciu_clk
= devm_clk_get(host
->dev
, "ciu");
2615 if (IS_ERR(host
->ciu_clk
)) {
2616 dev_dbg(host
->dev
, "ciu clock not available\n");
2617 host
->bus_hz
= host
->pdata
->bus_hz
;
2619 ret
= clk_prepare_enable(host
->ciu_clk
);
2621 dev_err(host
->dev
, "failed to enable ciu clock\n");
2625 if (host
->pdata
->bus_hz
) {
2626 ret
= clk_set_rate(host
->ciu_clk
, host
->pdata
->bus_hz
);
2629 "Unable to set bus rate to %uHz\n",
2630 host
->pdata
->bus_hz
);
2632 host
->bus_hz
= clk_get_rate(host
->ciu_clk
);
2635 if (!host
->bus_hz
) {
2637 "Platform data must supply bus speed\n");
2642 if (drv_data
&& drv_data
->init
) {
2643 ret
= drv_data
->init(host
);
2646 "implementation specific init failed\n");
2651 if (drv_data
&& drv_data
->setup_clock
) {
2652 ret
= drv_data
->setup_clock(host
);
2655 "implementation specific clock setup failed\n");
2660 host
->quirks
= host
->pdata
->quirks
;
2662 spin_lock_init(&host
->lock
);
2663 INIT_LIST_HEAD(&host
->queue
);
2666 * Get the host data width - this assumes that HCON has been set with
2667 * the correct values.
2669 i
= (mci_readl(host
, HCON
) >> 7) & 0x7;
2671 host
->push_data
= dw_mci_push_data16
;
2672 host
->pull_data
= dw_mci_pull_data16
;
2674 host
->data_shift
= 1;
2675 } else if (i
== 2) {
2676 host
->push_data
= dw_mci_push_data64
;
2677 host
->pull_data
= dw_mci_pull_data64
;
2679 host
->data_shift
= 3;
2681 /* Check for a reserved value, and warn if it is */
2683 "HCON reports a reserved host data width!\n"
2684 "Defaulting to 32-bit access.\n");
2685 host
->push_data
= dw_mci_push_data32
;
2686 host
->pull_data
= dw_mci_pull_data32
;
2688 host
->data_shift
= 2;
2691 /* Reset all blocks */
2692 if (!dw_mci_ctrl_reset(host
, SDMMC_CTRL_ALL_RESET_FLAGS
))
2695 host
->dma_ops
= host
->pdata
->dma_ops
;
2696 dw_mci_init_dma(host
);
2698 /* Clear the interrupts for the host controller */
2699 mci_writel(host
, RINTSTS
, 0xFFFFFFFF);
2700 mci_writel(host
, INTMASK
, 0); /* disable all mmc interrupt first */
2702 /* Put in max timeout */
2703 mci_writel(host
, TMOUT
, 0xFFFFFFFF);
2706 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
2707 * Tx Mark = fifo_size / 2 DMA Size = 8
2709 if (!host
->pdata
->fifo_depth
) {
2711 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
2712 * have been overwritten by the bootloader, just like we're
2713 * about to do, so if you know the value for your hardware, you
2714 * should put it in the platform data.
2716 fifo_size
= mci_readl(host
, FIFOTH
);
2717 fifo_size
= 1 + ((fifo_size
>> 16) & 0xfff);
2719 fifo_size
= host
->pdata
->fifo_depth
;
2721 host
->fifo_depth
= fifo_size
;
2723 SDMMC_SET_FIFOTH(0x2, fifo_size
/ 2 - 1, fifo_size
/ 2);
2724 mci_writel(host
, FIFOTH
, host
->fifoth_val
);
2726 /* disable clock to CIU */
2727 mci_writel(host
, CLKENA
, 0);
2728 mci_writel(host
, CLKSRC
, 0);
2731 * In 2.40a spec, Data offset is changed.
2732 * Need to check the version-id and set data-offset for DATA register.
2734 host
->verid
= SDMMC_GET_VERID(mci_readl(host
, VERID
));
2735 dev_info(host
->dev
, "Version ID is %04x\n", host
->verid
);
2737 if (host
->verid
< DW_MMC_240A
)
2738 host
->data_offset
= DATA_OFFSET
;
2740 host
->data_offset
= DATA_240A_OFFSET
;
2742 tasklet_init(&host
->tasklet
, dw_mci_tasklet_func
, (unsigned long)host
);
2743 ret
= devm_request_irq(host
->dev
, host
->irq
, dw_mci_interrupt
,
2744 host
->irq_flags
, "dw-mci", host
);
2748 if (host
->pdata
->num_slots
)
2749 host
->num_slots
= host
->pdata
->num_slots
;
2751 host
->num_slots
= ((mci_readl(host
, HCON
) >> 1) & 0x1F) + 1;
2754 * Enable interrupts for command done, data over, data empty, card det,
2755 * receive ready and error such as transmit, receive timeout, crc error
2757 mci_writel(host
, RINTSTS
, 0xFFFFFFFF);
2758 mci_writel(host
, INTMASK
, SDMMC_INT_CMD_DONE
| SDMMC_INT_DATA_OVER
|
2759 SDMMC_INT_TXDR
| SDMMC_INT_RXDR
|
2760 DW_MCI_ERROR_FLAGS
| SDMMC_INT_CD
);
2761 mci_writel(host
, CTRL
, SDMMC_CTRL_INT_ENABLE
); /* Enable mci interrupt */
2763 dev_info(host
->dev
, "DW MMC controller at irq %d, "
2764 "%d bit host data width, "
2766 host
->irq
, width
, fifo_size
);
2768 /* We need at least one slot to succeed */
2769 for (i
= 0; i
< host
->num_slots
; i
++) {
2770 ret
= dw_mci_init_slot(host
, i
);
2772 dev_dbg(host
->dev
, "slot %d init failed\n", i
);
2778 dev_info(host
->dev
, "%d slots initialized\n", init_slots
);
2780 dev_dbg(host
->dev
, "attempted to initialize %d slots, "
2781 "but failed on all\n", host
->num_slots
);
2785 if (host
->quirks
& DW_MCI_QUIRK_IDMAC_DTO
)
2786 dev_info(host
->dev
, "Internal DMAC interrupt fix enabled.\n");
2791 if (host
->use_dma
&& host
->dma_ops
->exit
)
2792 host
->dma_ops
->exit(host
);
2795 if (!IS_ERR(host
->ciu_clk
))
2796 clk_disable_unprepare(host
->ciu_clk
);
2799 if (!IS_ERR(host
->biu_clk
))
2800 clk_disable_unprepare(host
->biu_clk
);
2804 EXPORT_SYMBOL(dw_mci_probe
);
2806 void dw_mci_remove(struct dw_mci
*host
)
2810 mci_writel(host
, RINTSTS
, 0xFFFFFFFF);
2811 mci_writel(host
, INTMASK
, 0); /* disable all mmc interrupt first */
2813 for (i
= 0; i
< host
->num_slots
; i
++) {
2814 dev_dbg(host
->dev
, "remove slot %d\n", i
);
2816 dw_mci_cleanup_slot(host
->slot
[i
], i
);
2819 /* disable clock to CIU */
2820 mci_writel(host
, CLKENA
, 0);
2821 mci_writel(host
, CLKSRC
, 0);
2823 if (host
->use_dma
&& host
->dma_ops
->exit
)
2824 host
->dma_ops
->exit(host
);
2826 if (!IS_ERR(host
->ciu_clk
))
2827 clk_disable_unprepare(host
->ciu_clk
);
2829 if (!IS_ERR(host
->biu_clk
))
2830 clk_disable_unprepare(host
->biu_clk
);
2832 EXPORT_SYMBOL(dw_mci_remove
);
2836 #ifdef CONFIG_PM_SLEEP
2838 * TODO: we should probably disable the clock to the card in the suspend path.
2840 int dw_mci_suspend(struct dw_mci
*host
)
2844 EXPORT_SYMBOL(dw_mci_suspend
);
2846 int dw_mci_resume(struct dw_mci
*host
)
2850 if (!dw_mci_ctrl_reset(host
, SDMMC_CTRL_ALL_RESET_FLAGS
)) {
2855 if (host
->use_dma
&& host
->dma_ops
->init
)
2856 host
->dma_ops
->init(host
);
2859 * Restore the initial value at FIFOTH register
2860 * And Invalidate the prev_blksz with zero
2862 mci_writel(host
, FIFOTH
, host
->fifoth_val
);
2863 host
->prev_blksz
= 0;
2865 /* Put in max timeout */
2866 mci_writel(host
, TMOUT
, 0xFFFFFFFF);
2868 mci_writel(host
, RINTSTS
, 0xFFFFFFFF);
2869 mci_writel(host
, INTMASK
, SDMMC_INT_CMD_DONE
| SDMMC_INT_DATA_OVER
|
2870 SDMMC_INT_TXDR
| SDMMC_INT_RXDR
|
2871 DW_MCI_ERROR_FLAGS
| SDMMC_INT_CD
);
2872 mci_writel(host
, CTRL
, SDMMC_CTRL_INT_ENABLE
);
2874 for (i
= 0; i
< host
->num_slots
; i
++) {
2875 struct dw_mci_slot
*slot
= host
->slot
[i
];
2878 if (slot
->mmc
->pm_flags
& MMC_PM_KEEP_POWER
) {
2879 dw_mci_set_ios(slot
->mmc
, &slot
->mmc
->ios
);
2880 dw_mci_setup_bus(slot
, true);
2885 EXPORT_SYMBOL(dw_mci_resume
);
2886 #endif /* CONFIG_PM_SLEEP */
2888 static int __init
dw_mci_init(void)
2890 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
2894 static void __exit
dw_mci_exit(void)
2898 module_init(dw_mci_init
);
2899 module_exit(dw_mci_exit
);
2901 MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
2902 MODULE_AUTHOR("NXP Semiconductor VietNam");
2903 MODULE_AUTHOR("Imagination Technologies Ltd");
2904 MODULE_LICENSE("GPL v2");