2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
14 #include <linux/blkdev.h>
15 #include <linux/clk.h>
16 #include <linux/debugfs.h>
17 #include <linux/device.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/err.h>
20 #include <linux/init.h>
21 #include <linux/interrupt.h>
22 #include <linux/ioport.h>
23 #include <linux/module.h>
24 #include <linux/platform_device.h>
25 #include <linux/seq_file.h>
26 #include <linux/slab.h>
27 #include <linux/stat.h>
28 #include <linux/delay.h>
29 #include <linux/irq.h>
30 #include <linux/mmc/host.h>
31 #include <linux/mmc/mmc.h>
32 #include <linux/mmc/sdio.h>
33 #include <linux/mmc/dw_mmc.h>
34 #include <linux/bitops.h>
35 #include <linux/regulator/consumer.h>
36 #include <linux/workqueue.h>
38 #include <linux/of_gpio.h>
39 #include <linux/mmc/slot-gpio.h>
43 /* Common flag combinations */
44 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
45 SDMMC_INT_HTO | SDMMC_INT_SBE | \
47 #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
49 #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
50 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
51 #define DW_MCI_SEND_STATUS 1
52 #define DW_MCI_RECV_STATUS 2
53 #define DW_MCI_DMA_THRESHOLD 16
55 #define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */
56 #define DW_MCI_FREQ_MIN 400000 /* unit: HZ */
58 #ifdef CONFIG_MMC_DW_IDMAC
59 #define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
60 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
61 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
65 u32 des0
; /* Control Descriptor */
66 #define IDMAC_DES0_DIC BIT(1)
67 #define IDMAC_DES0_LD BIT(2)
68 #define IDMAC_DES0_FD BIT(3)
69 #define IDMAC_DES0_CH BIT(4)
70 #define IDMAC_DES0_ER BIT(5)
71 #define IDMAC_DES0_CES BIT(30)
72 #define IDMAC_DES0_OWN BIT(31)
74 u32 des1
; /* Buffer sizes */
75 #define IDMAC_SET_BUFFER1_SIZE(d, s) \
76 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
78 u32 des2
; /* buffer 1 physical address */
80 u32 des3
; /* buffer 2 physical address */
82 #endif /* CONFIG_MMC_DW_IDMAC */
84 static const u8 tuning_blk_pattern_4bit
[] = {
85 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
86 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
87 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
88 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
89 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
90 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
91 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
92 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
95 static const u8 tuning_blk_pattern_8bit
[] = {
96 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
97 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
98 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
99 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
100 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
101 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
102 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
103 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
104 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
105 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
106 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
107 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
108 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
109 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
110 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
111 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
114 static inline bool dw_mci_fifo_reset(struct dw_mci
*host
);
115 static inline bool dw_mci_ctrl_all_reset(struct dw_mci
*host
);
117 #if defined(CONFIG_DEBUG_FS)
118 static int dw_mci_req_show(struct seq_file
*s
, void *v
)
120 struct dw_mci_slot
*slot
= s
->private;
121 struct mmc_request
*mrq
;
122 struct mmc_command
*cmd
;
123 struct mmc_command
*stop
;
124 struct mmc_data
*data
;
126 /* Make sure we get a consistent snapshot */
127 spin_lock_bh(&slot
->host
->lock
);
137 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
138 cmd
->opcode
, cmd
->arg
, cmd
->flags
,
139 cmd
->resp
[0], cmd
->resp
[1], cmd
->resp
[2],
140 cmd
->resp
[2], cmd
->error
);
142 seq_printf(s
, "DATA %u / %u * %u flg %x err %d\n",
143 data
->bytes_xfered
, data
->blocks
,
144 data
->blksz
, data
->flags
, data
->error
);
147 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
148 stop
->opcode
, stop
->arg
, stop
->flags
,
149 stop
->resp
[0], stop
->resp
[1], stop
->resp
[2],
150 stop
->resp
[2], stop
->error
);
153 spin_unlock_bh(&slot
->host
->lock
);
158 static int dw_mci_req_open(struct inode
*inode
, struct file
*file
)
160 return single_open(file
, dw_mci_req_show
, inode
->i_private
);
163 static const struct file_operations dw_mci_req_fops
= {
164 .owner
= THIS_MODULE
,
165 .open
= dw_mci_req_open
,
168 .release
= single_release
,
171 static int dw_mci_regs_show(struct seq_file
*s
, void *v
)
173 seq_printf(s
, "STATUS:\t0x%08x\n", SDMMC_STATUS
);
174 seq_printf(s
, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS
);
175 seq_printf(s
, "CMD:\t0x%08x\n", SDMMC_CMD
);
176 seq_printf(s
, "CTRL:\t0x%08x\n", SDMMC_CTRL
);
177 seq_printf(s
, "INTMASK:\t0x%08x\n", SDMMC_INTMASK
);
178 seq_printf(s
, "CLKENA:\t0x%08x\n", SDMMC_CLKENA
);
183 static int dw_mci_regs_open(struct inode
*inode
, struct file
*file
)
185 return single_open(file
, dw_mci_regs_show
, inode
->i_private
);
188 static const struct file_operations dw_mci_regs_fops
= {
189 .owner
= THIS_MODULE
,
190 .open
= dw_mci_regs_open
,
193 .release
= single_release
,
196 static void dw_mci_init_debugfs(struct dw_mci_slot
*slot
)
198 struct mmc_host
*mmc
= slot
->mmc
;
199 struct dw_mci
*host
= slot
->host
;
203 root
= mmc
->debugfs_root
;
207 node
= debugfs_create_file("regs", S_IRUSR
, root
, host
,
212 node
= debugfs_create_file("req", S_IRUSR
, root
, slot
,
217 node
= debugfs_create_u32("state", S_IRUSR
, root
, (u32
*)&host
->state
);
221 node
= debugfs_create_x32("pending_events", S_IRUSR
, root
,
222 (u32
*)&host
->pending_events
);
226 node
= debugfs_create_x32("completed_events", S_IRUSR
, root
,
227 (u32
*)&host
->completed_events
);
234 dev_err(&mmc
->class_dev
, "failed to initialize debugfs for slot\n");
236 #endif /* defined(CONFIG_DEBUG_FS) */
238 static void dw_mci_set_timeout(struct dw_mci
*host
)
240 /* timeout (maximum) */
241 mci_writel(host
, TMOUT
, 0xffffffff);
244 static u32
dw_mci_prepare_command(struct mmc_host
*mmc
, struct mmc_command
*cmd
)
246 struct mmc_data
*data
;
247 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
248 const struct dw_mci_drv_data
*drv_data
= slot
->host
->drv_data
;
250 cmd
->error
= -EINPROGRESS
;
254 if (cmd
->opcode
== MMC_STOP_TRANSMISSION
||
255 cmd
->opcode
== MMC_GO_IDLE_STATE
||
256 cmd
->opcode
== MMC_GO_INACTIVE_STATE
||
257 (cmd
->opcode
== SD_IO_RW_DIRECT
&&
258 ((cmd
->arg
>> 9) & 0x1FFFF) == SDIO_CCCR_ABORT
))
259 cmdr
|= SDMMC_CMD_STOP
;
261 if (cmd
->opcode
!= MMC_SEND_STATUS
&& cmd
->data
)
262 cmdr
|= SDMMC_CMD_PRV_DAT_WAIT
;
264 if (cmd
->flags
& MMC_RSP_PRESENT
) {
265 /* We expect a response, so set this bit */
266 cmdr
|= SDMMC_CMD_RESP_EXP
;
267 if (cmd
->flags
& MMC_RSP_136
)
268 cmdr
|= SDMMC_CMD_RESP_LONG
;
271 if (cmd
->flags
& MMC_RSP_CRC
)
272 cmdr
|= SDMMC_CMD_RESP_CRC
;
276 cmdr
|= SDMMC_CMD_DAT_EXP
;
277 if (data
->flags
& MMC_DATA_STREAM
)
278 cmdr
|= SDMMC_CMD_STRM_MODE
;
279 if (data
->flags
& MMC_DATA_WRITE
)
280 cmdr
|= SDMMC_CMD_DAT_WR
;
283 if (drv_data
&& drv_data
->prepare_command
)
284 drv_data
->prepare_command(slot
->host
, &cmdr
);
289 static u32
dw_mci_prep_stop_abort(struct dw_mci
*host
, struct mmc_command
*cmd
)
291 struct mmc_command
*stop
;
297 stop
= &host
->stop_abort
;
299 memset(stop
, 0, sizeof(struct mmc_command
));
301 if (cmdr
== MMC_READ_SINGLE_BLOCK
||
302 cmdr
== MMC_READ_MULTIPLE_BLOCK
||
303 cmdr
== MMC_WRITE_BLOCK
||
304 cmdr
== MMC_WRITE_MULTIPLE_BLOCK
) {
305 stop
->opcode
= MMC_STOP_TRANSMISSION
;
307 stop
->flags
= MMC_RSP_R1B
| MMC_CMD_AC
;
308 } else if (cmdr
== SD_IO_RW_EXTENDED
) {
309 stop
->opcode
= SD_IO_RW_DIRECT
;
310 stop
->arg
|= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT
<< 9) |
311 ((cmd
->arg
>> 28) & 0x7);
312 stop
->flags
= MMC_RSP_SPI_R5
| MMC_RSP_R5
| MMC_CMD_AC
;
317 cmdr
= stop
->opcode
| SDMMC_CMD_STOP
|
318 SDMMC_CMD_RESP_CRC
| SDMMC_CMD_RESP_EXP
;
323 static void dw_mci_start_command(struct dw_mci
*host
,
324 struct mmc_command
*cmd
, u32 cmd_flags
)
328 "start command: ARGR=0x%08x CMDR=0x%08x\n",
329 cmd
->arg
, cmd_flags
);
331 mci_writel(host
, CMDARG
, cmd
->arg
);
334 mci_writel(host
, CMD
, cmd_flags
| SDMMC_CMD_START
);
337 static inline void send_stop_abort(struct dw_mci
*host
, struct mmc_data
*data
)
339 struct mmc_command
*stop
= data
->stop
? data
->stop
: &host
->stop_abort
;
340 dw_mci_start_command(host
, stop
, host
->stop_cmdr
);
343 /* DMA interface functions */
344 static void dw_mci_stop_dma(struct dw_mci
*host
)
346 if (host
->using_dma
) {
347 host
->dma_ops
->stop(host
);
348 host
->dma_ops
->cleanup(host
);
351 /* Data transfer was stopped by the interrupt handler */
352 set_bit(EVENT_XFER_COMPLETE
, &host
->pending_events
);
355 static int dw_mci_get_dma_dir(struct mmc_data
*data
)
357 if (data
->flags
& MMC_DATA_WRITE
)
358 return DMA_TO_DEVICE
;
360 return DMA_FROM_DEVICE
;
363 #ifdef CONFIG_MMC_DW_IDMAC
364 static void dw_mci_dma_cleanup(struct dw_mci
*host
)
366 struct mmc_data
*data
= host
->data
;
369 if (!data
->host_cookie
)
370 dma_unmap_sg(host
->dev
,
373 dw_mci_get_dma_dir(data
));
376 static void dw_mci_idmac_reset(struct dw_mci
*host
)
378 u32 bmod
= mci_readl(host
, BMOD
);
379 /* Software reset of DMA */
380 bmod
|= SDMMC_IDMAC_SWRESET
;
381 mci_writel(host
, BMOD
, bmod
);
384 static void dw_mci_idmac_stop_dma(struct dw_mci
*host
)
388 /* Disable and reset the IDMAC interface */
389 temp
= mci_readl(host
, CTRL
);
390 temp
&= ~SDMMC_CTRL_USE_IDMAC
;
391 temp
|= SDMMC_CTRL_DMA_RESET
;
392 mci_writel(host
, CTRL
, temp
);
394 /* Stop the IDMAC running */
395 temp
= mci_readl(host
, BMOD
);
396 temp
&= ~(SDMMC_IDMAC_ENABLE
| SDMMC_IDMAC_FB
);
397 temp
|= SDMMC_IDMAC_SWRESET
;
398 mci_writel(host
, BMOD
, temp
);
401 static void dw_mci_idmac_complete_dma(struct dw_mci
*host
)
403 struct mmc_data
*data
= host
->data
;
405 dev_vdbg(host
->dev
, "DMA complete\n");
407 host
->dma_ops
->cleanup(host
);
410 * If the card was removed, data will be NULL. No point in trying to
411 * send the stop command or waiting for NBUSY in this case.
414 set_bit(EVENT_XFER_COMPLETE
, &host
->pending_events
);
415 tasklet_schedule(&host
->tasklet
);
419 static void dw_mci_translate_sglist(struct dw_mci
*host
, struct mmc_data
*data
,
423 struct idmac_desc
*desc
= host
->sg_cpu
;
425 for (i
= 0; i
< sg_len
; i
++, desc
++) {
426 unsigned int length
= sg_dma_len(&data
->sg
[i
]);
427 u32 mem_addr
= sg_dma_address(&data
->sg
[i
]);
429 /* Set the OWN bit and disable interrupts for this descriptor */
430 desc
->des0
= IDMAC_DES0_OWN
| IDMAC_DES0_DIC
| IDMAC_DES0_CH
;
433 IDMAC_SET_BUFFER1_SIZE(desc
, length
);
435 /* Physical address to DMA to/from */
436 desc
->des2
= mem_addr
;
439 /* Set first descriptor */
441 desc
->des0
|= IDMAC_DES0_FD
;
443 /* Set last descriptor */
444 desc
= host
->sg_cpu
+ (i
- 1) * sizeof(struct idmac_desc
);
445 desc
->des0
&= ~(IDMAC_DES0_CH
| IDMAC_DES0_DIC
);
446 desc
->des0
|= IDMAC_DES0_LD
;
451 static void dw_mci_idmac_start_dma(struct dw_mci
*host
, unsigned int sg_len
)
455 dw_mci_translate_sglist(host
, host
->data
, sg_len
);
457 /* Select IDMAC interface */
458 temp
= mci_readl(host
, CTRL
);
459 temp
|= SDMMC_CTRL_USE_IDMAC
;
460 mci_writel(host
, CTRL
, temp
);
464 /* Enable the IDMAC */
465 temp
= mci_readl(host
, BMOD
);
466 temp
|= SDMMC_IDMAC_ENABLE
| SDMMC_IDMAC_FB
;
467 mci_writel(host
, BMOD
, temp
);
469 /* Start it running */
470 mci_writel(host
, PLDMND
, 1);
473 static int dw_mci_idmac_init(struct dw_mci
*host
)
475 struct idmac_desc
*p
;
478 /* Number of descriptors in the ring buffer */
479 host
->ring_size
= PAGE_SIZE
/ sizeof(struct idmac_desc
);
481 /* Forward link the descriptor list */
482 for (i
= 0, p
= host
->sg_cpu
; i
< host
->ring_size
- 1; i
++, p
++)
483 p
->des3
= host
->sg_dma
+ (sizeof(struct idmac_desc
) * (i
+ 1));
485 /* Set the last descriptor as the end-of-ring descriptor */
486 p
->des3
= host
->sg_dma
;
487 p
->des0
= IDMAC_DES0_ER
;
489 dw_mci_idmac_reset(host
);
491 /* Mask out interrupts - get Tx & Rx complete only */
492 mci_writel(host
, IDSTS
, IDMAC_INT_CLR
);
493 mci_writel(host
, IDINTEN
, SDMMC_IDMAC_INT_NI
| SDMMC_IDMAC_INT_RI
|
496 /* Set the descriptor base address */
497 mci_writel(host
, DBADDR
, host
->sg_dma
);
501 static const struct dw_mci_dma_ops dw_mci_idmac_ops
= {
502 .init
= dw_mci_idmac_init
,
503 .start
= dw_mci_idmac_start_dma
,
504 .stop
= dw_mci_idmac_stop_dma
,
505 .complete
= dw_mci_idmac_complete_dma
,
506 .cleanup
= dw_mci_dma_cleanup
,
508 #endif /* CONFIG_MMC_DW_IDMAC */
510 static int dw_mci_pre_dma_transfer(struct dw_mci
*host
,
511 struct mmc_data
*data
,
514 struct scatterlist
*sg
;
515 unsigned int i
, sg_len
;
517 if (!next
&& data
->host_cookie
)
518 return data
->host_cookie
;
521 * We don't do DMA on "complex" transfers, i.e. with
522 * non-word-aligned buffers or lengths. Also, we don't bother
523 * with all the DMA setup overhead for short transfers.
525 if (data
->blocks
* data
->blksz
< DW_MCI_DMA_THRESHOLD
)
531 for_each_sg(data
->sg
, sg
, data
->sg_len
, i
) {
532 if (sg
->offset
& 3 || sg
->length
& 3)
536 sg_len
= dma_map_sg(host
->dev
,
539 dw_mci_get_dma_dir(data
));
544 data
->host_cookie
= sg_len
;
549 static void dw_mci_pre_req(struct mmc_host
*mmc
,
550 struct mmc_request
*mrq
,
553 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
554 struct mmc_data
*data
= mrq
->data
;
556 if (!slot
->host
->use_dma
|| !data
)
559 if (data
->host_cookie
) {
560 data
->host_cookie
= 0;
564 if (dw_mci_pre_dma_transfer(slot
->host
, mrq
->data
, 1) < 0)
565 data
->host_cookie
= 0;
568 static void dw_mci_post_req(struct mmc_host
*mmc
,
569 struct mmc_request
*mrq
,
572 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
573 struct mmc_data
*data
= mrq
->data
;
575 if (!slot
->host
->use_dma
|| !data
)
578 if (data
->host_cookie
)
579 dma_unmap_sg(slot
->host
->dev
,
582 dw_mci_get_dma_dir(data
));
583 data
->host_cookie
= 0;
586 static void dw_mci_adjust_fifoth(struct dw_mci
*host
, struct mmc_data
*data
)
588 #ifdef CONFIG_MMC_DW_IDMAC
589 unsigned int blksz
= data
->blksz
;
590 const u32 mszs
[] = {1, 4, 8, 16, 32, 64, 128, 256};
591 u32 fifo_width
= 1 << host
->data_shift
;
592 u32 blksz_depth
= blksz
/ fifo_width
, fifoth_val
;
593 u32 msize
= 0, rx_wmark
= 1, tx_wmark
, tx_wmark_invers
;
594 int idx
= (sizeof(mszs
) / sizeof(mszs
[0])) - 1;
596 tx_wmark
= (host
->fifo_depth
) / 2;
597 tx_wmark_invers
= host
->fifo_depth
- tx_wmark
;
601 * if blksz is not a multiple of the FIFO width
603 if (blksz
% fifo_width
) {
610 if (!((blksz_depth
% mszs
[idx
]) ||
611 (tx_wmark_invers
% mszs
[idx
]))) {
613 rx_wmark
= mszs
[idx
] - 1;
618 * If idx is '0', it won't be tried
619 * Thus, initial values are uesed
622 fifoth_val
= SDMMC_SET_FIFOTH(msize
, rx_wmark
, tx_wmark
);
623 mci_writel(host
, FIFOTH
, fifoth_val
);
627 static void dw_mci_ctrl_rd_thld(struct dw_mci
*host
, struct mmc_data
*data
)
629 unsigned int blksz
= data
->blksz
;
630 u32 blksz_depth
, fifo_depth
;
633 WARN_ON(!(data
->flags
& MMC_DATA_READ
));
635 if (host
->timing
!= MMC_TIMING_MMC_HS200
&&
636 host
->timing
!= MMC_TIMING_UHS_SDR104
)
639 blksz_depth
= blksz
/ (1 << host
->data_shift
);
640 fifo_depth
= host
->fifo_depth
;
642 if (blksz_depth
> fifo_depth
)
646 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
647 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
648 * Currently just choose blksz.
651 mci_writel(host
, CDTHRCTL
, SDMMC_SET_RD_THLD(thld_size
, 1));
655 mci_writel(host
, CDTHRCTL
, SDMMC_SET_RD_THLD(0, 0));
658 static int dw_mci_submit_data_dma(struct dw_mci
*host
, struct mmc_data
*data
)
665 /* If we don't have a channel, we can't do DMA */
669 sg_len
= dw_mci_pre_dma_transfer(host
, data
, 0);
671 host
->dma_ops
->stop(host
);
678 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
679 (unsigned long)host
->sg_cpu
, (unsigned long)host
->sg_dma
,
683 * Decide the MSIZE and RX/TX Watermark.
684 * If current block size is same with previous size,
685 * no need to update fifoth.
687 if (host
->prev_blksz
!= data
->blksz
)
688 dw_mci_adjust_fifoth(host
, data
);
690 /* Enable the DMA interface */
691 temp
= mci_readl(host
, CTRL
);
692 temp
|= SDMMC_CTRL_DMA_ENABLE
;
693 mci_writel(host
, CTRL
, temp
);
695 /* Disable RX/TX IRQs, let DMA handle it */
696 temp
= mci_readl(host
, INTMASK
);
697 temp
&= ~(SDMMC_INT_RXDR
| SDMMC_INT_TXDR
);
698 mci_writel(host
, INTMASK
, temp
);
700 host
->dma_ops
->start(host
, sg_len
);
705 static void dw_mci_submit_data(struct dw_mci
*host
, struct mmc_data
*data
)
709 data
->error
= -EINPROGRESS
;
715 if (data
->flags
& MMC_DATA_READ
) {
716 host
->dir_status
= DW_MCI_RECV_STATUS
;
717 dw_mci_ctrl_rd_thld(host
, data
);
719 host
->dir_status
= DW_MCI_SEND_STATUS
;
722 if (dw_mci_submit_data_dma(host
, data
)) {
723 int flags
= SG_MITER_ATOMIC
;
724 if (host
->data
->flags
& MMC_DATA_READ
)
725 flags
|= SG_MITER_TO_SG
;
727 flags
|= SG_MITER_FROM_SG
;
729 sg_miter_start(&host
->sg_miter
, data
->sg
, data
->sg_len
, flags
);
731 host
->part_buf_start
= 0;
732 host
->part_buf_count
= 0;
734 mci_writel(host
, RINTSTS
, SDMMC_INT_TXDR
| SDMMC_INT_RXDR
);
735 temp
= mci_readl(host
, INTMASK
);
736 temp
|= SDMMC_INT_TXDR
| SDMMC_INT_RXDR
;
737 mci_writel(host
, INTMASK
, temp
);
739 temp
= mci_readl(host
, CTRL
);
740 temp
&= ~SDMMC_CTRL_DMA_ENABLE
;
741 mci_writel(host
, CTRL
, temp
);
744 * Use the initial fifoth_val for PIO mode.
745 * If next issued data may be transfered by DMA mode,
746 * prev_blksz should be invalidated.
748 mci_writel(host
, FIFOTH
, host
->fifoth_val
);
749 host
->prev_blksz
= 0;
752 * Keep the current block size.
753 * It will be used to decide whether to update
754 * fifoth register next time.
756 host
->prev_blksz
= data
->blksz
;
760 static void mci_send_cmd(struct dw_mci_slot
*slot
, u32 cmd
, u32 arg
)
762 struct dw_mci
*host
= slot
->host
;
763 unsigned long timeout
= jiffies
+ msecs_to_jiffies(500);
764 unsigned int cmd_status
= 0;
766 mci_writel(host
, CMDARG
, arg
);
768 mci_writel(host
, CMD
, SDMMC_CMD_START
| cmd
);
770 while (time_before(jiffies
, timeout
)) {
771 cmd_status
= mci_readl(host
, CMD
);
772 if (!(cmd_status
& SDMMC_CMD_START
))
775 dev_err(&slot
->mmc
->class_dev
,
776 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
777 cmd
, arg
, cmd_status
);
780 static void dw_mci_setup_bus(struct dw_mci_slot
*slot
, bool force_clkinit
)
782 struct dw_mci
*host
= slot
->host
;
783 unsigned int clock
= slot
->clock
;
788 mci_writel(host
, CLKENA
, 0);
790 SDMMC_CMD_UPD_CLK
| SDMMC_CMD_PRV_DAT_WAIT
, 0);
791 } else if (clock
!= host
->current_speed
|| force_clkinit
) {
792 div
= host
->bus_hz
/ clock
;
793 if (host
->bus_hz
% clock
&& host
->bus_hz
> clock
)
795 * move the + 1 after the divide to prevent
796 * over-clocking the card.
800 div
= (host
->bus_hz
!= clock
) ? DIV_ROUND_UP(div
, 2) : 0;
802 if ((clock
<< div
) != slot
->__clk_old
|| force_clkinit
)
803 dev_info(&slot
->mmc
->class_dev
,
804 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
805 slot
->id
, host
->bus_hz
, clock
,
806 div
? ((host
->bus_hz
/ div
) >> 1) :
810 mci_writel(host
, CLKENA
, 0);
811 mci_writel(host
, CLKSRC
, 0);
815 SDMMC_CMD_UPD_CLK
| SDMMC_CMD_PRV_DAT_WAIT
, 0);
817 /* set clock to desired speed */
818 mci_writel(host
, CLKDIV
, div
);
822 SDMMC_CMD_UPD_CLK
| SDMMC_CMD_PRV_DAT_WAIT
, 0);
824 /* enable clock; only low power if no SDIO */
825 clk_en_a
= SDMMC_CLKEN_ENABLE
<< slot
->id
;
826 if (!(mci_readl(host
, INTMASK
) & SDMMC_INT_SDIO(slot
->id
)))
827 clk_en_a
|= SDMMC_CLKEN_LOW_PWR
<< slot
->id
;
828 mci_writel(host
, CLKENA
, clk_en_a
);
832 SDMMC_CMD_UPD_CLK
| SDMMC_CMD_PRV_DAT_WAIT
, 0);
834 /* keep the clock with reflecting clock dividor */
835 slot
->__clk_old
= clock
<< div
;
838 host
->current_speed
= clock
;
840 /* Set the current slot bus width */
841 mci_writel(host
, CTYPE
, (slot
->ctype
<< slot
->id
));
844 static void __dw_mci_start_request(struct dw_mci
*host
,
845 struct dw_mci_slot
*slot
,
846 struct mmc_command
*cmd
)
848 struct mmc_request
*mrq
;
849 struct mmc_data
*data
;
853 if (host
->pdata
->select_slot
)
854 host
->pdata
->select_slot(slot
->id
);
856 host
->cur_slot
= slot
;
859 host
->pending_events
= 0;
860 host
->completed_events
= 0;
861 host
->cmd_status
= 0;
862 host
->data_status
= 0;
863 host
->dir_status
= 0;
867 dw_mci_set_timeout(host
);
868 mci_writel(host
, BYTCNT
, data
->blksz
*data
->blocks
);
869 mci_writel(host
, BLKSIZ
, data
->blksz
);
872 cmdflags
= dw_mci_prepare_command(slot
->mmc
, cmd
);
874 /* this is the first command, send the initialization clock */
875 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT
, &slot
->flags
))
876 cmdflags
|= SDMMC_CMD_INIT
;
879 dw_mci_submit_data(host
, data
);
883 dw_mci_start_command(host
, cmd
, cmdflags
);
886 host
->stop_cmdr
= dw_mci_prepare_command(slot
->mmc
, mrq
->stop
);
888 host
->stop_cmdr
= dw_mci_prep_stop_abort(host
, cmd
);
891 static void dw_mci_start_request(struct dw_mci
*host
,
892 struct dw_mci_slot
*slot
)
894 struct mmc_request
*mrq
= slot
->mrq
;
895 struct mmc_command
*cmd
;
897 cmd
= mrq
->sbc
? mrq
->sbc
: mrq
->cmd
;
898 __dw_mci_start_request(host
, slot
, cmd
);
901 /* must be called with host->lock held */
902 static void dw_mci_queue_request(struct dw_mci
*host
, struct dw_mci_slot
*slot
,
903 struct mmc_request
*mrq
)
905 dev_vdbg(&slot
->mmc
->class_dev
, "queue request: state=%d\n",
910 if (host
->state
== STATE_IDLE
) {
911 host
->state
= STATE_SENDING_CMD
;
912 dw_mci_start_request(host
, slot
);
914 list_add_tail(&slot
->queue_node
, &host
->queue
);
918 static void dw_mci_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
920 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
921 struct dw_mci
*host
= slot
->host
;
926 * The check for card presence and queueing of the request must be
927 * atomic, otherwise the card could be removed in between and the
928 * request wouldn't fail until another card was inserted.
930 spin_lock_bh(&host
->lock
);
932 if (!test_bit(DW_MMC_CARD_PRESENT
, &slot
->flags
)) {
933 spin_unlock_bh(&host
->lock
);
934 mrq
->cmd
->error
= -ENOMEDIUM
;
935 mmc_request_done(mmc
, mrq
);
939 dw_mci_queue_request(host
, slot
, mrq
);
941 spin_unlock_bh(&host
->lock
);
944 static void dw_mci_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
946 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
947 const struct dw_mci_drv_data
*drv_data
= slot
->host
->drv_data
;
950 switch (ios
->bus_width
) {
951 case MMC_BUS_WIDTH_4
:
952 slot
->ctype
= SDMMC_CTYPE_4BIT
;
954 case MMC_BUS_WIDTH_8
:
955 slot
->ctype
= SDMMC_CTYPE_8BIT
;
958 /* set default 1 bit mode */
959 slot
->ctype
= SDMMC_CTYPE_1BIT
;
962 regs
= mci_readl(slot
->host
, UHS_REG
);
965 if (ios
->timing
== MMC_TIMING_UHS_DDR50
)
966 regs
|= ((0x1 << slot
->id
) << 16);
968 regs
&= ~((0x1 << slot
->id
) << 16);
970 mci_writel(slot
->host
, UHS_REG
, regs
);
971 slot
->host
->timing
= ios
->timing
;
974 * Use mirror of ios->clock to prevent race with mmc
975 * core ios update when finding the minimum.
977 slot
->clock
= ios
->clock
;
979 if (drv_data
&& drv_data
->set_ios
)
980 drv_data
->set_ios(slot
->host
, ios
);
982 /* Slot specific timing and width adjustment */
983 dw_mci_setup_bus(slot
, false);
985 switch (ios
->power_mode
) {
987 set_bit(DW_MMC_CARD_NEED_INIT
, &slot
->flags
);
989 if (slot
->host
->pdata
->setpower
)
990 slot
->host
->pdata
->setpower(slot
->id
, mmc
->ocr_avail
);
991 regs
= mci_readl(slot
->host
, PWREN
);
992 regs
|= (1 << slot
->id
);
993 mci_writel(slot
->host
, PWREN
, regs
);
996 /* Power down slot */
997 if (slot
->host
->pdata
->setpower
)
998 slot
->host
->pdata
->setpower(slot
->id
, 0);
999 regs
= mci_readl(slot
->host
, PWREN
);
1000 regs
&= ~(1 << slot
->id
);
1001 mci_writel(slot
->host
, PWREN
, regs
);
1008 static int dw_mci_get_ro(struct mmc_host
*mmc
)
1011 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
1012 struct dw_mci_board
*brd
= slot
->host
->pdata
;
1014 /* Use platform get_ro function, else try on board write protect */
1015 if (slot
->quirks
& DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT
)
1017 else if (brd
->get_ro
)
1018 read_only
= brd
->get_ro(slot
->id
);
1019 else if (gpio_is_valid(slot
->wp_gpio
))
1020 read_only
= gpio_get_value(slot
->wp_gpio
);
1023 mci_readl(slot
->host
, WRTPRT
) & (1 << slot
->id
) ? 1 : 0;
1025 dev_dbg(&mmc
->class_dev
, "card is %s\n",
1026 read_only
? "read-only" : "read-write");
1031 static int dw_mci_get_cd(struct mmc_host
*mmc
)
1034 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
1035 struct dw_mci_board
*brd
= slot
->host
->pdata
;
1036 struct dw_mci
*host
= slot
->host
;
1037 int gpio_cd
= mmc_gpio_get_cd(mmc
);
1039 /* Use platform get_cd function, else try onboard card detect */
1040 if (brd
->quirks
& DW_MCI_QUIRK_BROKEN_CARD_DETECTION
)
1042 else if (brd
->get_cd
)
1043 present
= !brd
->get_cd(slot
->id
);
1044 else if (!IS_ERR_VALUE(gpio_cd
))
1047 present
= (mci_readl(slot
->host
, CDETECT
) & (1 << slot
->id
))
1050 spin_lock_bh(&host
->lock
);
1052 set_bit(DW_MMC_CARD_PRESENT
, &slot
->flags
);
1053 dev_dbg(&mmc
->class_dev
, "card is present\n");
1055 clear_bit(DW_MMC_CARD_PRESENT
, &slot
->flags
);
1056 dev_dbg(&mmc
->class_dev
, "card is not present\n");
1058 spin_unlock_bh(&host
->lock
);
1064 * Disable lower power mode.
1066 * Low power mode will stop the card clock when idle. According to the
1067 * description of the CLKENA register we should disable low power mode
1068 * for SDIO cards if we need SDIO interrupts to work.
1070 * This function is fast if low power mode is already disabled.
1072 static void dw_mci_disable_low_power(struct dw_mci_slot
*slot
)
1074 struct dw_mci
*host
= slot
->host
;
1076 const u32 clken_low_pwr
= SDMMC_CLKEN_LOW_PWR
<< slot
->id
;
1078 clk_en_a
= mci_readl(host
, CLKENA
);
1080 if (clk_en_a
& clken_low_pwr
) {
1081 mci_writel(host
, CLKENA
, clk_en_a
& ~clken_low_pwr
);
1082 mci_send_cmd(slot
, SDMMC_CMD_UPD_CLK
|
1083 SDMMC_CMD_PRV_DAT_WAIT
, 0);
1087 static void dw_mci_enable_sdio_irq(struct mmc_host
*mmc
, int enb
)
1089 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
1090 struct dw_mci
*host
= slot
->host
;
1093 /* Enable/disable Slot Specific SDIO interrupt */
1094 int_mask
= mci_readl(host
, INTMASK
);
1097 * Turn off low power mode if it was enabled. This is a bit of
1098 * a heavy operation and we disable / enable IRQs a lot, so
1099 * we'll leave low power mode disabled and it will get
1100 * re-enabled again in dw_mci_setup_bus().
1102 dw_mci_disable_low_power(slot
);
1104 mci_writel(host
, INTMASK
,
1105 (int_mask
| SDMMC_INT_SDIO(slot
->id
)));
1107 mci_writel(host
, INTMASK
,
1108 (int_mask
& ~SDMMC_INT_SDIO(slot
->id
)));
1112 static int dw_mci_execute_tuning(struct mmc_host
*mmc
, u32 opcode
)
1114 struct dw_mci_slot
*slot
= mmc_priv(mmc
);
1115 struct dw_mci
*host
= slot
->host
;
1116 const struct dw_mci_drv_data
*drv_data
= host
->drv_data
;
1117 struct dw_mci_tuning_data tuning_data
;
1120 if (opcode
== MMC_SEND_TUNING_BLOCK_HS200
) {
1121 if (mmc
->ios
.bus_width
== MMC_BUS_WIDTH_8
) {
1122 tuning_data
.blk_pattern
= tuning_blk_pattern_8bit
;
1123 tuning_data
.blksz
= sizeof(tuning_blk_pattern_8bit
);
1124 } else if (mmc
->ios
.bus_width
== MMC_BUS_WIDTH_4
) {
1125 tuning_data
.blk_pattern
= tuning_blk_pattern_4bit
;
1126 tuning_data
.blksz
= sizeof(tuning_blk_pattern_4bit
);
1130 } else if (opcode
== MMC_SEND_TUNING_BLOCK
) {
1131 tuning_data
.blk_pattern
= tuning_blk_pattern_4bit
;
1132 tuning_data
.blksz
= sizeof(tuning_blk_pattern_4bit
);
1135 "Undefined command(%d) for tuning\n", opcode
);
1139 if (drv_data
&& drv_data
->execute_tuning
)
1140 err
= drv_data
->execute_tuning(slot
, opcode
, &tuning_data
);
1144 static const struct mmc_host_ops dw_mci_ops
= {
1145 .request
= dw_mci_request
,
1146 .pre_req
= dw_mci_pre_req
,
1147 .post_req
= dw_mci_post_req
,
1148 .set_ios
= dw_mci_set_ios
,
1149 .get_ro
= dw_mci_get_ro
,
1150 .get_cd
= dw_mci_get_cd
,
1151 .enable_sdio_irq
= dw_mci_enable_sdio_irq
,
1152 .execute_tuning
= dw_mci_execute_tuning
,
1155 static void dw_mci_request_end(struct dw_mci
*host
, struct mmc_request
*mrq
)
1156 __releases(&host
->lock
)
1157 __acquires(&host
->lock
)
1159 struct dw_mci_slot
*slot
;
1160 struct mmc_host
*prev_mmc
= host
->cur_slot
->mmc
;
1162 WARN_ON(host
->cmd
|| host
->data
);
1164 host
->cur_slot
->mrq
= NULL
;
1166 if (!list_empty(&host
->queue
)) {
1167 slot
= list_entry(host
->queue
.next
,
1168 struct dw_mci_slot
, queue_node
);
1169 list_del(&slot
->queue_node
);
1170 dev_vdbg(host
->dev
, "list not empty: %s is next\n",
1171 mmc_hostname(slot
->mmc
));
1172 host
->state
= STATE_SENDING_CMD
;
1173 dw_mci_start_request(host
, slot
);
1175 dev_vdbg(host
->dev
, "list empty\n");
1176 host
->state
= STATE_IDLE
;
1179 spin_unlock(&host
->lock
);
1180 mmc_request_done(prev_mmc
, mrq
);
1181 spin_lock(&host
->lock
);
1184 static int dw_mci_command_complete(struct dw_mci
*host
, struct mmc_command
*cmd
)
1186 u32 status
= host
->cmd_status
;
1188 host
->cmd_status
= 0;
1190 /* Read the response from the card (up to 16 bytes) */
1191 if (cmd
->flags
& MMC_RSP_PRESENT
) {
1192 if (cmd
->flags
& MMC_RSP_136
) {
1193 cmd
->resp
[3] = mci_readl(host
, RESP0
);
1194 cmd
->resp
[2] = mci_readl(host
, RESP1
);
1195 cmd
->resp
[1] = mci_readl(host
, RESP2
);
1196 cmd
->resp
[0] = mci_readl(host
, RESP3
);
1198 cmd
->resp
[0] = mci_readl(host
, RESP0
);
1205 if (status
& SDMMC_INT_RTO
)
1206 cmd
->error
= -ETIMEDOUT
;
1207 else if ((cmd
->flags
& MMC_RSP_CRC
) && (status
& SDMMC_INT_RCRC
))
1208 cmd
->error
= -EILSEQ
;
1209 else if (status
& SDMMC_INT_RESP_ERR
)
1215 /* newer ip versions need a delay between retries */
1216 if (host
->quirks
& DW_MCI_QUIRK_RETRY_DELAY
)
1223 static int dw_mci_data_complete(struct dw_mci
*host
, struct mmc_data
*data
)
1225 u32 status
= host
->data_status
;
1227 if (status
& DW_MCI_DATA_ERROR_FLAGS
) {
1228 if (status
& SDMMC_INT_DRTO
) {
1229 data
->error
= -ETIMEDOUT
;
1230 } else if (status
& SDMMC_INT_DCRC
) {
1231 data
->error
= -EILSEQ
;
1232 } else if (status
& SDMMC_INT_EBE
) {
1233 if (host
->dir_status
==
1234 DW_MCI_SEND_STATUS
) {
1236 * No data CRC status was returned.
1237 * The number of bytes transferred
1238 * will be exaggerated in PIO mode.
1240 data
->bytes_xfered
= 0;
1241 data
->error
= -ETIMEDOUT
;
1242 } else if (host
->dir_status
==
1243 DW_MCI_RECV_STATUS
) {
1247 /* SDMMC_INT_SBE is included */
1251 dev_err(host
->dev
, "data error, status 0x%08x\n", status
);
1254 * After an error, there may be data lingering
1257 dw_mci_fifo_reset(host
);
1259 data
->bytes_xfered
= data
->blocks
* data
->blksz
;
1266 static void dw_mci_tasklet_func(unsigned long priv
)
1268 struct dw_mci
*host
= (struct dw_mci
*)priv
;
1269 struct mmc_data
*data
;
1270 struct mmc_command
*cmd
;
1271 struct mmc_request
*mrq
;
1272 enum dw_mci_state state
;
1273 enum dw_mci_state prev_state
;
1276 spin_lock(&host
->lock
);
1278 state
= host
->state
;
1289 case STATE_SENDING_CMD
:
1290 if (!test_and_clear_bit(EVENT_CMD_COMPLETE
,
1291 &host
->pending_events
))
1296 set_bit(EVENT_CMD_COMPLETE
, &host
->completed_events
);
1297 err
= dw_mci_command_complete(host
, cmd
);
1298 if (cmd
== mrq
->sbc
&& !err
) {
1299 prev_state
= state
= STATE_SENDING_CMD
;
1300 __dw_mci_start_request(host
, host
->cur_slot
,
1305 if (cmd
->data
&& err
) {
1306 dw_mci_stop_dma(host
);
1307 send_stop_abort(host
, data
);
1308 state
= STATE_SENDING_STOP
;
1312 if (!cmd
->data
|| err
) {
1313 dw_mci_request_end(host
, mrq
);
1317 prev_state
= state
= STATE_SENDING_DATA
;
1320 case STATE_SENDING_DATA
:
1321 if (test_and_clear_bit(EVENT_DATA_ERROR
,
1322 &host
->pending_events
)) {
1323 dw_mci_stop_dma(host
);
1324 send_stop_abort(host
, data
);
1325 state
= STATE_DATA_ERROR
;
1329 if (!test_and_clear_bit(EVENT_XFER_COMPLETE
,
1330 &host
->pending_events
))
1333 set_bit(EVENT_XFER_COMPLETE
, &host
->completed_events
);
1334 prev_state
= state
= STATE_DATA_BUSY
;
1337 case STATE_DATA_BUSY
:
1338 if (!test_and_clear_bit(EVENT_DATA_COMPLETE
,
1339 &host
->pending_events
))
1343 set_bit(EVENT_DATA_COMPLETE
, &host
->completed_events
);
1344 err
= dw_mci_data_complete(host
, data
);
1347 if (!data
->stop
|| mrq
->sbc
) {
1349 data
->stop
->error
= 0;
1350 dw_mci_request_end(host
, mrq
);
1354 /* stop command for open-ended transfer*/
1356 send_stop_abort(host
, data
);
1360 * If err has non-zero,
1361 * stop-abort command has been already issued.
1363 prev_state
= state
= STATE_SENDING_STOP
;
1367 case STATE_SENDING_STOP
:
1368 if (!test_and_clear_bit(EVENT_CMD_COMPLETE
,
1369 &host
->pending_events
))
1372 /* CMD error in data command */
1373 if (mrq
->cmd
->error
&& mrq
->data
)
1374 dw_mci_fifo_reset(host
);
1380 dw_mci_command_complete(host
, mrq
->stop
);
1382 host
->cmd_status
= 0;
1384 dw_mci_request_end(host
, mrq
);
1387 case STATE_DATA_ERROR
:
1388 if (!test_and_clear_bit(EVENT_XFER_COMPLETE
,
1389 &host
->pending_events
))
1392 state
= STATE_DATA_BUSY
;
1395 } while (state
!= prev_state
);
1397 host
->state
= state
;
1399 spin_unlock(&host
->lock
);
1403 /* push final bytes to part_buf, only use during push */
1404 static void dw_mci_set_part_bytes(struct dw_mci
*host
, void *buf
, int cnt
)
1406 memcpy((void *)&host
->part_buf
, buf
, cnt
);
1407 host
->part_buf_count
= cnt
;
1410 /* append bytes to part_buf, only use during push */
1411 static int dw_mci_push_part_bytes(struct dw_mci
*host
, void *buf
, int cnt
)
1413 cnt
= min(cnt
, (1 << host
->data_shift
) - host
->part_buf_count
);
1414 memcpy((void *)&host
->part_buf
+ host
->part_buf_count
, buf
, cnt
);
1415 host
->part_buf_count
+= cnt
;
1419 /* pull first bytes from part_buf, only use during pull */
1420 static int dw_mci_pull_part_bytes(struct dw_mci
*host
, void *buf
, int cnt
)
1422 cnt
= min(cnt
, (int)host
->part_buf_count
);
1424 memcpy(buf
, (void *)&host
->part_buf
+ host
->part_buf_start
,
1426 host
->part_buf_count
-= cnt
;
1427 host
->part_buf_start
+= cnt
;
1432 /* pull final bytes from the part_buf, assuming it's just been filled */
1433 static void dw_mci_pull_final_bytes(struct dw_mci
*host
, void *buf
, int cnt
)
1435 memcpy(buf
, &host
->part_buf
, cnt
);
1436 host
->part_buf_start
= cnt
;
1437 host
->part_buf_count
= (1 << host
->data_shift
) - cnt
;
1440 static void dw_mci_push_data16(struct dw_mci
*host
, void *buf
, int cnt
)
1442 struct mmc_data
*data
= host
->data
;
1445 /* try and push anything in the part_buf */
1446 if (unlikely(host
->part_buf_count
)) {
1447 int len
= dw_mci_push_part_bytes(host
, buf
, cnt
);
1450 if (host
->part_buf_count
== 2) {
1451 mci_writew(host
, DATA(host
->data_offset
),
1453 host
->part_buf_count
= 0;
1456 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1457 if (unlikely((unsigned long)buf
& 0x1)) {
1459 u16 aligned_buf
[64];
1460 int len
= min(cnt
& -2, (int)sizeof(aligned_buf
));
1461 int items
= len
>> 1;
1463 /* memcpy from input buffer into aligned buffer */
1464 memcpy(aligned_buf
, buf
, len
);
1467 /* push data from aligned buffer into fifo */
1468 for (i
= 0; i
< items
; ++i
)
1469 mci_writew(host
, DATA(host
->data_offset
),
1476 for (; cnt
>= 2; cnt
-= 2)
1477 mci_writew(host
, DATA(host
->data_offset
), *pdata
++);
1480 /* put anything remaining in the part_buf */
1482 dw_mci_set_part_bytes(host
, buf
, cnt
);
1483 /* Push data if we have reached the expected data length */
1484 if ((data
->bytes_xfered
+ init_cnt
) ==
1485 (data
->blksz
* data
->blocks
))
1486 mci_writew(host
, DATA(host
->data_offset
),
1491 static void dw_mci_pull_data16(struct dw_mci
*host
, void *buf
, int cnt
)
1493 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1494 if (unlikely((unsigned long)buf
& 0x1)) {
1496 /* pull data from fifo into aligned buffer */
1497 u16 aligned_buf
[64];
1498 int len
= min(cnt
& -2, (int)sizeof(aligned_buf
));
1499 int items
= len
>> 1;
1501 for (i
= 0; i
< items
; ++i
)
1502 aligned_buf
[i
] = mci_readw(host
,
1503 DATA(host
->data_offset
));
1504 /* memcpy from aligned buffer into output buffer */
1505 memcpy(buf
, aligned_buf
, len
);
1513 for (; cnt
>= 2; cnt
-= 2)
1514 *pdata
++ = mci_readw(host
, DATA(host
->data_offset
));
1518 host
->part_buf16
= mci_readw(host
, DATA(host
->data_offset
));
1519 dw_mci_pull_final_bytes(host
, buf
, cnt
);
1523 static void dw_mci_push_data32(struct dw_mci
*host
, void *buf
, int cnt
)
1525 struct mmc_data
*data
= host
->data
;
1528 /* try and push anything in the part_buf */
1529 if (unlikely(host
->part_buf_count
)) {
1530 int len
= dw_mci_push_part_bytes(host
, buf
, cnt
);
1533 if (host
->part_buf_count
== 4) {
1534 mci_writel(host
, DATA(host
->data_offset
),
1536 host
->part_buf_count
= 0;
1539 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1540 if (unlikely((unsigned long)buf
& 0x3)) {
1542 u32 aligned_buf
[32];
1543 int len
= min(cnt
& -4, (int)sizeof(aligned_buf
));
1544 int items
= len
>> 2;
1546 /* memcpy from input buffer into aligned buffer */
1547 memcpy(aligned_buf
, buf
, len
);
1550 /* push data from aligned buffer into fifo */
1551 for (i
= 0; i
< items
; ++i
)
1552 mci_writel(host
, DATA(host
->data_offset
),
1559 for (; cnt
>= 4; cnt
-= 4)
1560 mci_writel(host
, DATA(host
->data_offset
), *pdata
++);
1563 /* put anything remaining in the part_buf */
1565 dw_mci_set_part_bytes(host
, buf
, cnt
);
1566 /* Push data if we have reached the expected data length */
1567 if ((data
->bytes_xfered
+ init_cnt
) ==
1568 (data
->blksz
* data
->blocks
))
1569 mci_writel(host
, DATA(host
->data_offset
),
1574 static void dw_mci_pull_data32(struct dw_mci
*host
, void *buf
, int cnt
)
1576 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1577 if (unlikely((unsigned long)buf
& 0x3)) {
1579 /* pull data from fifo into aligned buffer */
1580 u32 aligned_buf
[32];
1581 int len
= min(cnt
& -4, (int)sizeof(aligned_buf
));
1582 int items
= len
>> 2;
1584 for (i
= 0; i
< items
; ++i
)
1585 aligned_buf
[i
] = mci_readl(host
,
1586 DATA(host
->data_offset
));
1587 /* memcpy from aligned buffer into output buffer */
1588 memcpy(buf
, aligned_buf
, len
);
1596 for (; cnt
>= 4; cnt
-= 4)
1597 *pdata
++ = mci_readl(host
, DATA(host
->data_offset
));
1601 host
->part_buf32
= mci_readl(host
, DATA(host
->data_offset
));
1602 dw_mci_pull_final_bytes(host
, buf
, cnt
);
1606 static void dw_mci_push_data64(struct dw_mci
*host
, void *buf
, int cnt
)
1608 struct mmc_data
*data
= host
->data
;
1611 /* try and push anything in the part_buf */
1612 if (unlikely(host
->part_buf_count
)) {
1613 int len
= dw_mci_push_part_bytes(host
, buf
, cnt
);
1617 if (host
->part_buf_count
== 8) {
1618 mci_writeq(host
, DATA(host
->data_offset
),
1620 host
->part_buf_count
= 0;
1623 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1624 if (unlikely((unsigned long)buf
& 0x7)) {
1626 u64 aligned_buf
[16];
1627 int len
= min(cnt
& -8, (int)sizeof(aligned_buf
));
1628 int items
= len
>> 3;
1630 /* memcpy from input buffer into aligned buffer */
1631 memcpy(aligned_buf
, buf
, len
);
1634 /* push data from aligned buffer into fifo */
1635 for (i
= 0; i
< items
; ++i
)
1636 mci_writeq(host
, DATA(host
->data_offset
),
1643 for (; cnt
>= 8; cnt
-= 8)
1644 mci_writeq(host
, DATA(host
->data_offset
), *pdata
++);
1647 /* put anything remaining in the part_buf */
1649 dw_mci_set_part_bytes(host
, buf
, cnt
);
1650 /* Push data if we have reached the expected data length */
1651 if ((data
->bytes_xfered
+ init_cnt
) ==
1652 (data
->blksz
* data
->blocks
))
1653 mci_writeq(host
, DATA(host
->data_offset
),
1658 static void dw_mci_pull_data64(struct dw_mci
*host
, void *buf
, int cnt
)
1660 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1661 if (unlikely((unsigned long)buf
& 0x7)) {
1663 /* pull data from fifo into aligned buffer */
1664 u64 aligned_buf
[16];
1665 int len
= min(cnt
& -8, (int)sizeof(aligned_buf
));
1666 int items
= len
>> 3;
1668 for (i
= 0; i
< items
; ++i
)
1669 aligned_buf
[i
] = mci_readq(host
,
1670 DATA(host
->data_offset
));
1671 /* memcpy from aligned buffer into output buffer */
1672 memcpy(buf
, aligned_buf
, len
);
1680 for (; cnt
>= 8; cnt
-= 8)
1681 *pdata
++ = mci_readq(host
, DATA(host
->data_offset
));
1685 host
->part_buf
= mci_readq(host
, DATA(host
->data_offset
));
1686 dw_mci_pull_final_bytes(host
, buf
, cnt
);
1690 static void dw_mci_pull_data(struct dw_mci
*host
, void *buf
, int cnt
)
1694 /* get remaining partial bytes */
1695 len
= dw_mci_pull_part_bytes(host
, buf
, cnt
);
1696 if (unlikely(len
== cnt
))
1701 /* get the rest of the data */
1702 host
->pull_data(host
, buf
, cnt
);
1705 static void dw_mci_read_data_pio(struct dw_mci
*host
, bool dto
)
1707 struct sg_mapping_iter
*sg_miter
= &host
->sg_miter
;
1709 unsigned int offset
;
1710 struct mmc_data
*data
= host
->data
;
1711 int shift
= host
->data_shift
;
1714 unsigned int remain
, fcnt
;
1717 if (!sg_miter_next(sg_miter
))
1720 host
->sg
= sg_miter
->piter
.sg
;
1721 buf
= sg_miter
->addr
;
1722 remain
= sg_miter
->length
;
1726 fcnt
= (SDMMC_GET_FCNT(mci_readl(host
, STATUS
))
1727 << shift
) + host
->part_buf_count
;
1728 len
= min(remain
, fcnt
);
1731 dw_mci_pull_data(host
, (void *)(buf
+ offset
), len
);
1732 data
->bytes_xfered
+= len
;
1737 sg_miter
->consumed
= offset
;
1738 status
= mci_readl(host
, MINTSTS
);
1739 mci_writel(host
, RINTSTS
, SDMMC_INT_RXDR
);
1740 /* if the RXDR is ready read again */
1741 } while ((status
& SDMMC_INT_RXDR
) ||
1742 (dto
&& SDMMC_GET_FCNT(mci_readl(host
, STATUS
))));
1745 if (!sg_miter_next(sg_miter
))
1747 sg_miter
->consumed
= 0;
1749 sg_miter_stop(sg_miter
);
1753 sg_miter_stop(sg_miter
);
1756 set_bit(EVENT_XFER_COMPLETE
, &host
->pending_events
);
1759 static void dw_mci_write_data_pio(struct dw_mci
*host
)
1761 struct sg_mapping_iter
*sg_miter
= &host
->sg_miter
;
1763 unsigned int offset
;
1764 struct mmc_data
*data
= host
->data
;
1765 int shift
= host
->data_shift
;
1768 unsigned int fifo_depth
= host
->fifo_depth
;
1769 unsigned int remain
, fcnt
;
1772 if (!sg_miter_next(sg_miter
))
1775 host
->sg
= sg_miter
->piter
.sg
;
1776 buf
= sg_miter
->addr
;
1777 remain
= sg_miter
->length
;
1781 fcnt
= ((fifo_depth
-
1782 SDMMC_GET_FCNT(mci_readl(host
, STATUS
)))
1783 << shift
) - host
->part_buf_count
;
1784 len
= min(remain
, fcnt
);
1787 host
->push_data(host
, (void *)(buf
+ offset
), len
);
1788 data
->bytes_xfered
+= len
;
1793 sg_miter
->consumed
= offset
;
1794 status
= mci_readl(host
, MINTSTS
);
1795 mci_writel(host
, RINTSTS
, SDMMC_INT_TXDR
);
1796 } while (status
& SDMMC_INT_TXDR
); /* if TXDR write again */
1799 if (!sg_miter_next(sg_miter
))
1801 sg_miter
->consumed
= 0;
1803 sg_miter_stop(sg_miter
);
1807 sg_miter_stop(sg_miter
);
1810 set_bit(EVENT_XFER_COMPLETE
, &host
->pending_events
);
1813 static void dw_mci_cmd_interrupt(struct dw_mci
*host
, u32 status
)
1815 if (!host
->cmd_status
)
1816 host
->cmd_status
= status
;
1820 set_bit(EVENT_CMD_COMPLETE
, &host
->pending_events
);
1821 tasklet_schedule(&host
->tasklet
);
1824 static irqreturn_t
dw_mci_interrupt(int irq
, void *dev_id
)
1826 struct dw_mci
*host
= dev_id
;
1830 pending
= mci_readl(host
, MINTSTS
); /* read-only mask reg */
1833 * DTO fix - version 2.10a and below, and only if internal DMA
1836 if (host
->quirks
& DW_MCI_QUIRK_IDMAC_DTO
) {
1838 ((mci_readl(host
, STATUS
) >> 17) & 0x1fff))
1839 pending
|= SDMMC_INT_DATA_OVER
;
1843 if (pending
& DW_MCI_CMD_ERROR_FLAGS
) {
1844 mci_writel(host
, RINTSTS
, DW_MCI_CMD_ERROR_FLAGS
);
1845 host
->cmd_status
= pending
;
1847 set_bit(EVENT_CMD_COMPLETE
, &host
->pending_events
);
1850 if (pending
& DW_MCI_DATA_ERROR_FLAGS
) {
1851 /* if there is an error report DATA_ERROR */
1852 mci_writel(host
, RINTSTS
, DW_MCI_DATA_ERROR_FLAGS
);
1853 host
->data_status
= pending
;
1855 set_bit(EVENT_DATA_ERROR
, &host
->pending_events
);
1856 tasklet_schedule(&host
->tasklet
);
1859 if (pending
& SDMMC_INT_DATA_OVER
) {
1860 mci_writel(host
, RINTSTS
, SDMMC_INT_DATA_OVER
);
1861 if (!host
->data_status
)
1862 host
->data_status
= pending
;
1864 if (host
->dir_status
== DW_MCI_RECV_STATUS
) {
1865 if (host
->sg
!= NULL
)
1866 dw_mci_read_data_pio(host
, true);
1868 set_bit(EVENT_DATA_COMPLETE
, &host
->pending_events
);
1869 tasklet_schedule(&host
->tasklet
);
1872 if (pending
& SDMMC_INT_RXDR
) {
1873 mci_writel(host
, RINTSTS
, SDMMC_INT_RXDR
);
1874 if (host
->dir_status
== DW_MCI_RECV_STATUS
&& host
->sg
)
1875 dw_mci_read_data_pio(host
, false);
1878 if (pending
& SDMMC_INT_TXDR
) {
1879 mci_writel(host
, RINTSTS
, SDMMC_INT_TXDR
);
1880 if (host
->dir_status
== DW_MCI_SEND_STATUS
&& host
->sg
)
1881 dw_mci_write_data_pio(host
);
1884 if (pending
& SDMMC_INT_CMD_DONE
) {
1885 mci_writel(host
, RINTSTS
, SDMMC_INT_CMD_DONE
);
1886 dw_mci_cmd_interrupt(host
, pending
);
1889 if (pending
& SDMMC_INT_CD
) {
1890 mci_writel(host
, RINTSTS
, SDMMC_INT_CD
);
1891 queue_work(host
->card_workqueue
, &host
->card_work
);
1894 /* Handle SDIO Interrupts */
1895 for (i
= 0; i
< host
->num_slots
; i
++) {
1896 struct dw_mci_slot
*slot
= host
->slot
[i
];
1897 if (pending
& SDMMC_INT_SDIO(i
)) {
1898 mci_writel(host
, RINTSTS
, SDMMC_INT_SDIO(i
));
1899 mmc_signal_sdio_irq(slot
->mmc
);
1905 #ifdef CONFIG_MMC_DW_IDMAC
1906 /* Handle DMA interrupts */
1907 pending
= mci_readl(host
, IDSTS
);
1908 if (pending
& (SDMMC_IDMAC_INT_TI
| SDMMC_IDMAC_INT_RI
)) {
1909 mci_writel(host
, IDSTS
, SDMMC_IDMAC_INT_TI
| SDMMC_IDMAC_INT_RI
);
1910 mci_writel(host
, IDSTS
, SDMMC_IDMAC_INT_NI
);
1911 host
->dma_ops
->complete(host
);
1918 static void dw_mci_work_routine_card(struct work_struct
*work
)
1920 struct dw_mci
*host
= container_of(work
, struct dw_mci
, card_work
);
1923 for (i
= 0; i
< host
->num_slots
; i
++) {
1924 struct dw_mci_slot
*slot
= host
->slot
[i
];
1925 struct mmc_host
*mmc
= slot
->mmc
;
1926 struct mmc_request
*mrq
;
1929 present
= dw_mci_get_cd(mmc
);
1930 while (present
!= slot
->last_detect_state
) {
1931 dev_dbg(&slot
->mmc
->class_dev
, "card %s\n",
1932 present
? "inserted" : "removed");
1934 spin_lock_bh(&host
->lock
);
1936 /* Card change detected */
1937 slot
->last_detect_state
= present
;
1939 /* Clean up queue if present */
1942 if (mrq
== host
->mrq
) {
1946 switch (host
->state
) {
1949 case STATE_SENDING_CMD
:
1950 mrq
->cmd
->error
= -ENOMEDIUM
;
1954 case STATE_SENDING_DATA
:
1955 mrq
->data
->error
= -ENOMEDIUM
;
1956 dw_mci_stop_dma(host
);
1958 case STATE_DATA_BUSY
:
1959 case STATE_DATA_ERROR
:
1960 if (mrq
->data
->error
== -EINPROGRESS
)
1961 mrq
->data
->error
= -ENOMEDIUM
;
1963 case STATE_SENDING_STOP
:
1965 mrq
->stop
->error
= -ENOMEDIUM
;
1969 dw_mci_request_end(host
, mrq
);
1971 list_del(&slot
->queue_node
);
1972 mrq
->cmd
->error
= -ENOMEDIUM
;
1974 mrq
->data
->error
= -ENOMEDIUM
;
1976 mrq
->stop
->error
= -ENOMEDIUM
;
1978 spin_unlock(&host
->lock
);
1979 mmc_request_done(slot
->mmc
, mrq
);
1980 spin_lock(&host
->lock
);
1984 /* Power down slot */
1986 /* Clear down the FIFO */
1987 dw_mci_fifo_reset(host
);
1988 #ifdef CONFIG_MMC_DW_IDMAC
1989 dw_mci_idmac_reset(host
);
1994 spin_unlock_bh(&host
->lock
);
1996 present
= dw_mci_get_cd(mmc
);
1999 mmc_detect_change(slot
->mmc
,
2000 msecs_to_jiffies(host
->pdata
->detect_delay_ms
));
2005 /* given a slot id, find out the device node representing that slot */
2006 static struct device_node
*dw_mci_of_find_slot_node(struct device
*dev
, u8 slot
)
2008 struct device_node
*np
;
2012 if (!dev
|| !dev
->of_node
)
2015 for_each_child_of_node(dev
->of_node
, np
) {
2016 addr
= of_get_property(np
, "reg", &len
);
2017 if (!addr
|| (len
< sizeof(int)))
2019 if (be32_to_cpup(addr
) == slot
)
2025 static struct dw_mci_of_slot_quirks
{
2028 } of_slot_quirks
[] = {
2030 .quirk
= "disable-wp",
2031 .id
= DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT
,
2035 static int dw_mci_of_get_slot_quirks(struct device
*dev
, u8 slot
)
2037 struct device_node
*np
= dw_mci_of_find_slot_node(dev
, slot
);
2042 for (idx
= 0; idx
< ARRAY_SIZE(of_slot_quirks
); idx
++)
2043 if (of_get_property(np
, of_slot_quirks
[idx
].quirk
, NULL
))
2044 quirks
|= of_slot_quirks
[idx
].id
;
2049 /* find out bus-width for a given slot */
2050 static u32
dw_mci_of_get_bus_wd(struct device
*dev
, u8 slot
)
2052 struct device_node
*np
= dw_mci_of_find_slot_node(dev
, slot
);
2058 if (of_property_read_u32(np
, "bus-width", &bus_wd
))
2059 dev_err(dev
, "bus-width property not found, assuming width"
2064 /* find the write protect gpio for a given slot; or -1 if none specified */
2065 static int dw_mci_of_get_wp_gpio(struct device
*dev
, u8 slot
)
2067 struct device_node
*np
= dw_mci_of_find_slot_node(dev
, slot
);
2073 gpio
= of_get_named_gpio(np
, "wp-gpios", 0);
2075 /* Having a missing entry is valid; return silently */
2076 if (!gpio_is_valid(gpio
))
2079 if (devm_gpio_request(dev
, gpio
, "dw-mci-wp")) {
2080 dev_warn(dev
, "gpio [%d] request failed\n", gpio
);
2087 /* find the cd gpio for a given slot */
2088 static void dw_mci_of_get_cd_gpio(struct device
*dev
, u8 slot
,
2089 struct mmc_host
*mmc
)
2091 struct device_node
*np
= dw_mci_of_find_slot_node(dev
, slot
);
2097 gpio
= of_get_named_gpio(np
, "cd-gpios", 0);
2099 /* Having a missing entry is valid; return silently */
2100 if (!gpio_is_valid(gpio
))
2103 if (mmc_gpio_request_cd(mmc
, gpio
, 0))
2104 dev_warn(dev
, "gpio [%d] request failed\n", gpio
);
2106 #else /* CONFIG_OF */
2107 static int dw_mci_of_get_slot_quirks(struct device
*dev
, u8 slot
)
2111 static u32
dw_mci_of_get_bus_wd(struct device
*dev
, u8 slot
)
2115 static struct device_node
*dw_mci_of_find_slot_node(struct device
*dev
, u8 slot
)
2119 static int dw_mci_of_get_wp_gpio(struct device
*dev
, u8 slot
)
2123 static void dw_mci_of_get_cd_gpio(struct device
*dev
, u8 slot
,
2124 struct mmc_host
*mmc
)
2128 #endif /* CONFIG_OF */
2130 static int dw_mci_init_slot(struct dw_mci
*host
, unsigned int id
)
2132 struct mmc_host
*mmc
;
2133 struct dw_mci_slot
*slot
;
2134 const struct dw_mci_drv_data
*drv_data
= host
->drv_data
;
2139 mmc
= mmc_alloc_host(sizeof(struct dw_mci_slot
), host
->dev
);
2143 slot
= mmc_priv(mmc
);
2147 host
->slot
[id
] = slot
;
2149 slot
->quirks
= dw_mci_of_get_slot_quirks(host
->dev
, slot
->id
);
2151 mmc
->ops
= &dw_mci_ops
;
2152 if (of_property_read_u32_array(host
->dev
->of_node
,
2153 "clock-freq-min-max", freq
, 2)) {
2154 mmc
->f_min
= DW_MCI_FREQ_MIN
;
2155 mmc
->f_max
= DW_MCI_FREQ_MAX
;
2157 mmc
->f_min
= freq
[0];
2158 mmc
->f_max
= freq
[1];
2161 if (host
->pdata
->get_ocr
)
2162 mmc
->ocr_avail
= host
->pdata
->get_ocr(id
);
2164 mmc
->ocr_avail
= MMC_VDD_32_33
| MMC_VDD_33_34
;
2167 * Start with slot power disabled, it will be enabled when a card
2170 if (host
->pdata
->setpower
)
2171 host
->pdata
->setpower(id
, 0);
2173 if (host
->pdata
->caps
)
2174 mmc
->caps
= host
->pdata
->caps
;
2176 if (host
->pdata
->pm_caps
)
2177 mmc
->pm_caps
= host
->pdata
->pm_caps
;
2179 if (host
->dev
->of_node
) {
2180 ctrl_id
= of_alias_get_id(host
->dev
->of_node
, "mshc");
2184 ctrl_id
= to_platform_device(host
->dev
)->id
;
2186 if (drv_data
&& drv_data
->caps
)
2187 mmc
->caps
|= drv_data
->caps
[ctrl_id
];
2189 if (host
->pdata
->caps2
)
2190 mmc
->caps2
= host
->pdata
->caps2
;
2192 if (host
->pdata
->get_bus_wd
)
2193 bus_width
= host
->pdata
->get_bus_wd(slot
->id
);
2194 else if (host
->dev
->of_node
)
2195 bus_width
= dw_mci_of_get_bus_wd(host
->dev
, slot
->id
);
2199 switch (bus_width
) {
2201 mmc
->caps
|= MMC_CAP_8_BIT_DATA
;
2203 mmc
->caps
|= MMC_CAP_4_BIT_DATA
;
2206 if (host
->pdata
->blk_settings
) {
2207 mmc
->max_segs
= host
->pdata
->blk_settings
->max_segs
;
2208 mmc
->max_blk_size
= host
->pdata
->blk_settings
->max_blk_size
;
2209 mmc
->max_blk_count
= host
->pdata
->blk_settings
->max_blk_count
;
2210 mmc
->max_req_size
= host
->pdata
->blk_settings
->max_req_size
;
2211 mmc
->max_seg_size
= host
->pdata
->blk_settings
->max_seg_size
;
2213 /* Useful defaults if platform data is unset. */
2214 #ifdef CONFIG_MMC_DW_IDMAC
2215 mmc
->max_segs
= host
->ring_size
;
2216 mmc
->max_blk_size
= 65536;
2217 mmc
->max_blk_count
= host
->ring_size
;
2218 mmc
->max_seg_size
= 0x1000;
2219 mmc
->max_req_size
= mmc
->max_seg_size
* mmc
->max_blk_count
;
2222 mmc
->max_blk_size
= 65536; /* BLKSIZ is 16 bits */
2223 mmc
->max_blk_count
= 512;
2224 mmc
->max_req_size
= mmc
->max_blk_size
* mmc
->max_blk_count
;
2225 mmc
->max_seg_size
= mmc
->max_req_size
;
2226 #endif /* CONFIG_MMC_DW_IDMAC */
2229 slot
->wp_gpio
= dw_mci_of_get_wp_gpio(host
->dev
, slot
->id
);
2230 dw_mci_of_get_cd_gpio(host
->dev
, slot
->id
, mmc
);
2232 ret
= mmc_add_host(mmc
);
2236 #if defined(CONFIG_DEBUG_FS)
2237 dw_mci_init_debugfs(slot
);
2240 /* Card initially undetected */
2241 slot
->last_detect_state
= 0;
2250 static void dw_mci_cleanup_slot(struct dw_mci_slot
*slot
, unsigned int id
)
2252 /* Shutdown detect IRQ */
2253 if (slot
->host
->pdata
->exit
)
2254 slot
->host
->pdata
->exit(id
);
2256 /* Debugfs stuff is cleaned up by mmc core */
2257 mmc_remove_host(slot
->mmc
);
2258 slot
->host
->slot
[id
] = NULL
;
2259 mmc_free_host(slot
->mmc
);
2262 static void dw_mci_init_dma(struct dw_mci
*host
)
2264 /* Alloc memory for sg translation */
2265 host
->sg_cpu
= dmam_alloc_coherent(host
->dev
, PAGE_SIZE
,
2266 &host
->sg_dma
, GFP_KERNEL
);
2267 if (!host
->sg_cpu
) {
2268 dev_err(host
->dev
, "%s: could not alloc DMA memory\n",
2273 /* Determine which DMA interface to use */
2274 #ifdef CONFIG_MMC_DW_IDMAC
2275 host
->dma_ops
= &dw_mci_idmac_ops
;
2276 dev_info(host
->dev
, "Using internal DMA controller.\n");
2282 if (host
->dma_ops
->init
&& host
->dma_ops
->start
&&
2283 host
->dma_ops
->stop
&& host
->dma_ops
->cleanup
) {
2284 if (host
->dma_ops
->init(host
)) {
2285 dev_err(host
->dev
, "%s: Unable to initialize "
2286 "DMA Controller.\n", __func__
);
2290 dev_err(host
->dev
, "DMA initialization not found.\n");
2298 dev_info(host
->dev
, "Using PIO mode.\n");
2303 static bool dw_mci_ctrl_reset(struct dw_mci
*host
, u32 reset
)
2305 unsigned long timeout
= jiffies
+ msecs_to_jiffies(500);
2308 ctrl
= mci_readl(host
, CTRL
);
2310 mci_writel(host
, CTRL
, ctrl
);
2312 /* wait till resets clear */
2314 ctrl
= mci_readl(host
, CTRL
);
2315 if (!(ctrl
& reset
))
2317 } while (time_before(jiffies
, timeout
));
2320 "Timeout resetting block (ctrl reset %#x)\n",
2326 static inline bool dw_mci_fifo_reset(struct dw_mci
*host
)
2329 * Reseting generates a block interrupt, hence setting
2330 * the scatter-gather pointer to NULL.
2333 sg_miter_stop(&host
->sg_miter
);
2337 return dw_mci_ctrl_reset(host
, SDMMC_CTRL_FIFO_RESET
);
2340 static inline bool dw_mci_ctrl_all_reset(struct dw_mci
*host
)
2342 return dw_mci_ctrl_reset(host
,
2343 SDMMC_CTRL_FIFO_RESET
|
2345 SDMMC_CTRL_DMA_RESET
);
2349 static struct dw_mci_of_quirks
{
2354 .quirk
= "broken-cd",
2355 .id
= DW_MCI_QUIRK_BROKEN_CARD_DETECTION
,
2359 static struct dw_mci_board
*dw_mci_parse_dt(struct dw_mci
*host
)
2361 struct dw_mci_board
*pdata
;
2362 struct device
*dev
= host
->dev
;
2363 struct device_node
*np
= dev
->of_node
;
2364 const struct dw_mci_drv_data
*drv_data
= host
->drv_data
;
2366 u32 clock_frequency
;
2368 pdata
= devm_kzalloc(dev
, sizeof(*pdata
), GFP_KERNEL
);
2370 dev_err(dev
, "could not allocate memory for pdata\n");
2371 return ERR_PTR(-ENOMEM
);
2374 /* find out number of slots supported */
2375 if (of_property_read_u32(dev
->of_node
, "num-slots",
2376 &pdata
->num_slots
)) {
2377 dev_info(dev
, "num-slots property not found, "
2378 "assuming 1 slot is available\n");
2379 pdata
->num_slots
= 1;
2383 for (idx
= 0; idx
< ARRAY_SIZE(of_quirks
); idx
++)
2384 if (of_get_property(np
, of_quirks
[idx
].quirk
, NULL
))
2385 pdata
->quirks
|= of_quirks
[idx
].id
;
2387 if (of_property_read_u32(np
, "fifo-depth", &pdata
->fifo_depth
))
2388 dev_info(dev
, "fifo-depth property not found, using "
2389 "value of FIFOTH register as default\n");
2391 of_property_read_u32(np
, "card-detect-delay", &pdata
->detect_delay_ms
);
2393 if (!of_property_read_u32(np
, "clock-frequency", &clock_frequency
))
2394 pdata
->bus_hz
= clock_frequency
;
2396 if (drv_data
&& drv_data
->parse_dt
) {
2397 ret
= drv_data
->parse_dt(host
);
2399 return ERR_PTR(ret
);
2402 if (of_find_property(np
, "keep-power-in-suspend", NULL
))
2403 pdata
->pm_caps
|= MMC_PM_KEEP_POWER
;
2405 if (of_find_property(np
, "enable-sdio-wakeup", NULL
))
2406 pdata
->pm_caps
|= MMC_PM_WAKE_SDIO_IRQ
;
2408 if (of_find_property(np
, "supports-highspeed", NULL
))
2409 pdata
->caps
|= MMC_CAP_SD_HIGHSPEED
| MMC_CAP_MMC_HIGHSPEED
;
2411 if (of_find_property(np
, "caps2-mmc-hs200-1_8v", NULL
))
2412 pdata
->caps2
|= MMC_CAP2_HS200_1_8V_SDR
;
2414 if (of_find_property(np
, "caps2-mmc-hs200-1_2v", NULL
))
2415 pdata
->caps2
|= MMC_CAP2_HS200_1_2V_SDR
;
2417 if (of_get_property(np
, "cd-inverted", NULL
))
2418 pdata
->caps2
|= MMC_CAP2_CD_ACTIVE_HIGH
;
2423 #else /* CONFIG_OF */
2424 static struct dw_mci_board
*dw_mci_parse_dt(struct dw_mci
*host
)
2426 return ERR_PTR(-EINVAL
);
2428 #endif /* CONFIG_OF */
2430 int dw_mci_probe(struct dw_mci
*host
)
2432 const struct dw_mci_drv_data
*drv_data
= host
->drv_data
;
2433 int width
, i
, ret
= 0;
2438 host
->pdata
= dw_mci_parse_dt(host
);
2439 if (IS_ERR(host
->pdata
)) {
2440 dev_err(host
->dev
, "platform data not available\n");
2445 if (!host
->pdata
->select_slot
&& host
->pdata
->num_slots
> 1) {
2447 "Platform data must supply select_slot function\n");
2451 host
->biu_clk
= devm_clk_get(host
->dev
, "biu");
2452 if (IS_ERR(host
->biu_clk
)) {
2453 dev_dbg(host
->dev
, "biu clock not available\n");
2455 ret
= clk_prepare_enable(host
->biu_clk
);
2457 dev_err(host
->dev
, "failed to enable biu clock\n");
2462 host
->ciu_clk
= devm_clk_get(host
->dev
, "ciu");
2463 if (IS_ERR(host
->ciu_clk
)) {
2464 dev_dbg(host
->dev
, "ciu clock not available\n");
2465 host
->bus_hz
= host
->pdata
->bus_hz
;
2467 ret
= clk_prepare_enable(host
->ciu_clk
);
2469 dev_err(host
->dev
, "failed to enable ciu clock\n");
2473 if (host
->pdata
->bus_hz
) {
2474 ret
= clk_set_rate(host
->ciu_clk
, host
->pdata
->bus_hz
);
2477 "Unable to set bus rate to %ul\n",
2478 host
->pdata
->bus_hz
);
2480 host
->bus_hz
= clk_get_rate(host
->ciu_clk
);
2483 if (drv_data
&& drv_data
->init
) {
2484 ret
= drv_data
->init(host
);
2487 "implementation specific init failed\n");
2492 if (drv_data
&& drv_data
->setup_clock
) {
2493 ret
= drv_data
->setup_clock(host
);
2496 "implementation specific clock setup failed\n");
2501 host
->vmmc
= devm_regulator_get_optional(host
->dev
, "vmmc");
2502 if (IS_ERR(host
->vmmc
)) {
2503 ret
= PTR_ERR(host
->vmmc
);
2504 if (ret
== -EPROBE_DEFER
)
2507 dev_info(host
->dev
, "no vmmc regulator found: %d\n", ret
);
2510 ret
= regulator_enable(host
->vmmc
);
2512 if (ret
!= -EPROBE_DEFER
)
2514 "regulator_enable fail: %d\n", ret
);
2519 if (!host
->bus_hz
) {
2521 "Platform data must supply bus speed\n");
2526 host
->quirks
= host
->pdata
->quirks
;
2528 spin_lock_init(&host
->lock
);
2529 INIT_LIST_HEAD(&host
->queue
);
2532 * Get the host data width - this assumes that HCON has been set with
2533 * the correct values.
2535 i
= (mci_readl(host
, HCON
) >> 7) & 0x7;
2537 host
->push_data
= dw_mci_push_data16
;
2538 host
->pull_data
= dw_mci_pull_data16
;
2540 host
->data_shift
= 1;
2541 } else if (i
== 2) {
2542 host
->push_data
= dw_mci_push_data64
;
2543 host
->pull_data
= dw_mci_pull_data64
;
2545 host
->data_shift
= 3;
2547 /* Check for a reserved value, and warn if it is */
2549 "HCON reports a reserved host data width!\n"
2550 "Defaulting to 32-bit access.\n");
2551 host
->push_data
= dw_mci_push_data32
;
2552 host
->pull_data
= dw_mci_pull_data32
;
2554 host
->data_shift
= 2;
2557 /* Reset all blocks */
2558 if (!dw_mci_ctrl_all_reset(host
))
2561 host
->dma_ops
= host
->pdata
->dma_ops
;
2562 dw_mci_init_dma(host
);
2564 /* Clear the interrupts for the host controller */
2565 mci_writel(host
, RINTSTS
, 0xFFFFFFFF);
2566 mci_writel(host
, INTMASK
, 0); /* disable all mmc interrupt first */
2568 /* Put in max timeout */
2569 mci_writel(host
, TMOUT
, 0xFFFFFFFF);
2572 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
2573 * Tx Mark = fifo_size / 2 DMA Size = 8
2575 if (!host
->pdata
->fifo_depth
) {
2577 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
2578 * have been overwritten by the bootloader, just like we're
2579 * about to do, so if you know the value for your hardware, you
2580 * should put it in the platform data.
2582 fifo_size
= mci_readl(host
, FIFOTH
);
2583 fifo_size
= 1 + ((fifo_size
>> 16) & 0xfff);
2585 fifo_size
= host
->pdata
->fifo_depth
;
2587 host
->fifo_depth
= fifo_size
;
2589 SDMMC_SET_FIFOTH(0x2, fifo_size
/ 2 - 1, fifo_size
/ 2);
2590 mci_writel(host
, FIFOTH
, host
->fifoth_val
);
2592 /* disable clock to CIU */
2593 mci_writel(host
, CLKENA
, 0);
2594 mci_writel(host
, CLKSRC
, 0);
2597 * In 2.40a spec, Data offset is changed.
2598 * Need to check the version-id and set data-offset for DATA register.
2600 host
->verid
= SDMMC_GET_VERID(mci_readl(host
, VERID
));
2601 dev_info(host
->dev
, "Version ID is %04x\n", host
->verid
);
2603 if (host
->verid
< DW_MMC_240A
)
2604 host
->data_offset
= DATA_OFFSET
;
2606 host
->data_offset
= DATA_240A_OFFSET
;
2608 tasklet_init(&host
->tasklet
, dw_mci_tasklet_func
, (unsigned long)host
);
2609 host
->card_workqueue
= alloc_workqueue("dw-mci-card",
2610 WQ_MEM_RECLAIM
| WQ_NON_REENTRANT
, 1);
2611 if (!host
->card_workqueue
) {
2615 INIT_WORK(&host
->card_work
, dw_mci_work_routine_card
);
2616 ret
= devm_request_irq(host
->dev
, host
->irq
, dw_mci_interrupt
,
2617 host
->irq_flags
, "dw-mci", host
);
2621 if (host
->pdata
->num_slots
)
2622 host
->num_slots
= host
->pdata
->num_slots
;
2624 host
->num_slots
= ((mci_readl(host
, HCON
) >> 1) & 0x1F) + 1;
2627 * Enable interrupts for command done, data over, data empty, card det,
2628 * receive ready and error such as transmit, receive timeout, crc error
2630 mci_writel(host
, RINTSTS
, 0xFFFFFFFF);
2631 mci_writel(host
, INTMASK
, SDMMC_INT_CMD_DONE
| SDMMC_INT_DATA_OVER
|
2632 SDMMC_INT_TXDR
| SDMMC_INT_RXDR
|
2633 DW_MCI_ERROR_FLAGS
| SDMMC_INT_CD
);
2634 mci_writel(host
, CTRL
, SDMMC_CTRL_INT_ENABLE
); /* Enable mci interrupt */
2636 dev_info(host
->dev
, "DW MMC controller at irq %d, "
2637 "%d bit host data width, "
2639 host
->irq
, width
, fifo_size
);
2641 /* We need at least one slot to succeed */
2642 for (i
= 0; i
< host
->num_slots
; i
++) {
2643 ret
= dw_mci_init_slot(host
, i
);
2645 dev_dbg(host
->dev
, "slot %d init failed\n", i
);
2651 dev_info(host
->dev
, "%d slots initialized\n", init_slots
);
2653 dev_dbg(host
->dev
, "attempted to initialize %d slots, "
2654 "but failed on all\n", host
->num_slots
);
2658 if (host
->quirks
& DW_MCI_QUIRK_IDMAC_DTO
)
2659 dev_info(host
->dev
, "Internal DMAC interrupt fix enabled.\n");
2664 destroy_workqueue(host
->card_workqueue
);
2667 if (host
->use_dma
&& host
->dma_ops
->exit
)
2668 host
->dma_ops
->exit(host
);
2672 regulator_disable(host
->vmmc
);
2675 if (!IS_ERR(host
->ciu_clk
))
2676 clk_disable_unprepare(host
->ciu_clk
);
2679 if (!IS_ERR(host
->biu_clk
))
2680 clk_disable_unprepare(host
->biu_clk
);
2684 EXPORT_SYMBOL(dw_mci_probe
);
2686 void dw_mci_remove(struct dw_mci
*host
)
2690 mci_writel(host
, RINTSTS
, 0xFFFFFFFF);
2691 mci_writel(host
, INTMASK
, 0); /* disable all mmc interrupt first */
2693 for (i
= 0; i
< host
->num_slots
; i
++) {
2694 dev_dbg(host
->dev
, "remove slot %d\n", i
);
2696 dw_mci_cleanup_slot(host
->slot
[i
], i
);
2699 /* disable clock to CIU */
2700 mci_writel(host
, CLKENA
, 0);
2701 mci_writel(host
, CLKSRC
, 0);
2703 destroy_workqueue(host
->card_workqueue
);
2705 if (host
->use_dma
&& host
->dma_ops
->exit
)
2706 host
->dma_ops
->exit(host
);
2709 regulator_disable(host
->vmmc
);
2711 if (!IS_ERR(host
->ciu_clk
))
2712 clk_disable_unprepare(host
->ciu_clk
);
2714 if (!IS_ERR(host
->biu_clk
))
2715 clk_disable_unprepare(host
->biu_clk
);
2717 EXPORT_SYMBOL(dw_mci_remove
);
2721 #ifdef CONFIG_PM_SLEEP
2723 * TODO: we should probably disable the clock to the card in the suspend path.
2725 int dw_mci_suspend(struct dw_mci
*host
)
2728 regulator_disable(host
->vmmc
);
2732 EXPORT_SYMBOL(dw_mci_suspend
);
2734 int dw_mci_resume(struct dw_mci
*host
)
2739 ret
= regulator_enable(host
->vmmc
);
2742 "failed to enable regulator: %d\n", ret
);
2747 if (!dw_mci_ctrl_all_reset(host
)) {
2752 if (host
->use_dma
&& host
->dma_ops
->init
)
2753 host
->dma_ops
->init(host
);
2756 * Restore the initial value at FIFOTH register
2757 * And Invalidate the prev_blksz with zero
2759 mci_writel(host
, FIFOTH
, host
->fifoth_val
);
2760 host
->prev_blksz
= 0;
2762 /* Put in max timeout */
2763 mci_writel(host
, TMOUT
, 0xFFFFFFFF);
2765 mci_writel(host
, RINTSTS
, 0xFFFFFFFF);
2766 mci_writel(host
, INTMASK
, SDMMC_INT_CMD_DONE
| SDMMC_INT_DATA_OVER
|
2767 SDMMC_INT_TXDR
| SDMMC_INT_RXDR
|
2768 DW_MCI_ERROR_FLAGS
| SDMMC_INT_CD
);
2769 mci_writel(host
, CTRL
, SDMMC_CTRL_INT_ENABLE
);
2771 for (i
= 0; i
< host
->num_slots
; i
++) {
2772 struct dw_mci_slot
*slot
= host
->slot
[i
];
2775 if (slot
->mmc
->pm_flags
& MMC_PM_KEEP_POWER
) {
2776 dw_mci_set_ios(slot
->mmc
, &slot
->mmc
->ios
);
2777 dw_mci_setup_bus(slot
, true);
2782 EXPORT_SYMBOL(dw_mci_resume
);
2783 #endif /* CONFIG_PM_SLEEP */
2785 static int __init
dw_mci_init(void)
2787 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
2791 static void __exit
dw_mci_exit(void)
2795 module_init(dw_mci_init
);
2796 module_exit(dw_mci_exit
);
2798 MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
2799 MODULE_AUTHOR("NXP Semiconductor VietNam");
2800 MODULE_AUTHOR("Imagination Technologies Ltd");
2801 MODULE_LICENSE("GPL v2");