2 * davinci_mmc.c - TI DaVinci MMC/SD/SDIO driver
4 * Copyright (C) 2006 Texas Instruments.
5 * Original author: Purushotam Kumar
6 * Copyright (C) 2009 David Brownell
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #include <linux/module.h>
24 #include <linux/ioport.h>
25 #include <linux/platform_device.h>
26 #include <linux/clk.h>
27 #include <linux/err.h>
28 #include <linux/cpufreq.h>
29 #include <linux/mmc/host.h>
31 #include <linux/irq.h>
32 #include <linux/delay.h>
33 #include <linux/dmaengine.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/edma.h>
36 #include <linux/mmc/mmc.h>
38 #include <linux/of_device.h>
40 #include <linux/platform_data/edma.h>
41 #include <linux/platform_data/mmc-davinci.h>
44 * Register Definitions
46 #define DAVINCI_MMCCTL 0x00 /* Control Register */
47 #define DAVINCI_MMCCLK 0x04 /* Memory Clock Control Register */
48 #define DAVINCI_MMCST0 0x08 /* Status Register 0 */
49 #define DAVINCI_MMCST1 0x0C /* Status Register 1 */
50 #define DAVINCI_MMCIM 0x10 /* Interrupt Mask Register */
51 #define DAVINCI_MMCTOR 0x14 /* Response Time-Out Register */
52 #define DAVINCI_MMCTOD 0x18 /* Data Read Time-Out Register */
53 #define DAVINCI_MMCBLEN 0x1C /* Block Length Register */
54 #define DAVINCI_MMCNBLK 0x20 /* Number of Blocks Register */
55 #define DAVINCI_MMCNBLC 0x24 /* Number of Blocks Counter Register */
56 #define DAVINCI_MMCDRR 0x28 /* Data Receive Register */
57 #define DAVINCI_MMCDXR 0x2C /* Data Transmit Register */
58 #define DAVINCI_MMCCMD 0x30 /* Command Register */
59 #define DAVINCI_MMCARGHL 0x34 /* Argument Register */
60 #define DAVINCI_MMCRSP01 0x38 /* Response Register 0 and 1 */
61 #define DAVINCI_MMCRSP23 0x3C /* Response Register 0 and 1 */
62 #define DAVINCI_MMCRSP45 0x40 /* Response Register 0 and 1 */
63 #define DAVINCI_MMCRSP67 0x44 /* Response Register 0 and 1 */
64 #define DAVINCI_MMCDRSP 0x48 /* Data Response Register */
65 #define DAVINCI_MMCETOK 0x4C
66 #define DAVINCI_MMCCIDX 0x50 /* Command Index Register */
67 #define DAVINCI_MMCCKC 0x54
68 #define DAVINCI_MMCTORC 0x58
69 #define DAVINCI_MMCTODC 0x5C
70 #define DAVINCI_MMCBLNC 0x60
71 #define DAVINCI_SDIOCTL 0x64
72 #define DAVINCI_SDIOST0 0x68
73 #define DAVINCI_SDIOIEN 0x6C
74 #define DAVINCI_SDIOIST 0x70
75 #define DAVINCI_MMCFIFOCTL 0x74 /* FIFO Control Register */
77 /* DAVINCI_MMCCTL definitions */
78 #define MMCCTL_DATRST (1 << 0)
79 #define MMCCTL_CMDRST (1 << 1)
80 #define MMCCTL_WIDTH_8_BIT (1 << 8)
81 #define MMCCTL_WIDTH_4_BIT (1 << 2)
82 #define MMCCTL_DATEG_DISABLED (0 << 6)
83 #define MMCCTL_DATEG_RISING (1 << 6)
84 #define MMCCTL_DATEG_FALLING (2 << 6)
85 #define MMCCTL_DATEG_BOTH (3 << 6)
86 #define MMCCTL_PERMDR_LE (0 << 9)
87 #define MMCCTL_PERMDR_BE (1 << 9)
88 #define MMCCTL_PERMDX_LE (0 << 10)
89 #define MMCCTL_PERMDX_BE (1 << 10)
91 /* DAVINCI_MMCCLK definitions */
92 #define MMCCLK_CLKEN (1 << 8)
93 #define MMCCLK_CLKRT_MASK (0xFF << 0)
95 /* IRQ bit definitions, for DAVINCI_MMCST0 and DAVINCI_MMCIM */
96 #define MMCST0_DATDNE BIT(0) /* data done */
97 #define MMCST0_BSYDNE BIT(1) /* busy done */
98 #define MMCST0_RSPDNE BIT(2) /* command done */
99 #define MMCST0_TOUTRD BIT(3) /* data read timeout */
100 #define MMCST0_TOUTRS BIT(4) /* command response timeout */
101 #define MMCST0_CRCWR BIT(5) /* data write CRC error */
102 #define MMCST0_CRCRD BIT(6) /* data read CRC error */
103 #define MMCST0_CRCRS BIT(7) /* command response CRC error */
104 #define MMCST0_DXRDY BIT(9) /* data transmit ready (fifo empty) */
105 #define MMCST0_DRRDY BIT(10) /* data receive ready (data in fifo)*/
106 #define MMCST0_DATED BIT(11) /* DAT3 edge detect */
107 #define MMCST0_TRNDNE BIT(12) /* transfer done */
109 /* DAVINCI_MMCST1 definitions */
110 #define MMCST1_BUSY (1 << 0)
112 /* DAVINCI_MMCCMD definitions */
113 #define MMCCMD_CMD_MASK (0x3F << 0)
114 #define MMCCMD_PPLEN (1 << 7)
115 #define MMCCMD_BSYEXP (1 << 8)
116 #define MMCCMD_RSPFMT_MASK (3 << 9)
117 #define MMCCMD_RSPFMT_NONE (0 << 9)
118 #define MMCCMD_RSPFMT_R1456 (1 << 9)
119 #define MMCCMD_RSPFMT_R2 (2 << 9)
120 #define MMCCMD_RSPFMT_R3 (3 << 9)
121 #define MMCCMD_DTRW (1 << 11)
122 #define MMCCMD_STRMTP (1 << 12)
123 #define MMCCMD_WDATX (1 << 13)
124 #define MMCCMD_INITCK (1 << 14)
125 #define MMCCMD_DCLR (1 << 15)
126 #define MMCCMD_DMATRIG (1 << 16)
128 /* DAVINCI_MMCFIFOCTL definitions */
129 #define MMCFIFOCTL_FIFORST (1 << 0)
130 #define MMCFIFOCTL_FIFODIR_WR (1 << 1)
131 #define MMCFIFOCTL_FIFODIR_RD (0 << 1)
132 #define MMCFIFOCTL_FIFOLEV (1 << 2) /* 0 = 128 bits, 1 = 256 bits */
133 #define MMCFIFOCTL_ACCWD_4 (0 << 3) /* access width of 4 bytes */
134 #define MMCFIFOCTL_ACCWD_3 (1 << 3) /* access width of 3 bytes */
135 #define MMCFIFOCTL_ACCWD_2 (2 << 3) /* access width of 2 bytes */
136 #define MMCFIFOCTL_ACCWD_1 (3 << 3) /* access width of 1 byte */
138 /* DAVINCI_SDIOST0 definitions */
139 #define SDIOST0_DAT1_HI BIT(0)
141 /* DAVINCI_SDIOIEN definitions */
142 #define SDIOIEN_IOINTEN BIT(0)
144 /* DAVINCI_SDIOIST definitions */
145 #define SDIOIST_IOINT BIT(0)
147 /* MMCSD Init clock in Hz in opendrain mode */
148 #define MMCSD_INIT_CLOCK 200000
151 * One scatterlist dma "segment" is at most MAX_CCNT rw_threshold units,
152 * and we handle up to MAX_NR_SG segments. MMC_BLOCK_BOUNCE kicks in only
153 * for drivers with max_segs == 1, making the segments bigger (64KB)
154 * than the page or two that's otherwise typical. nr_sg (passed from
155 * platform data) == 16 gives at least the same throughput boost, using
156 * EDMA transfer linkage instead of spending CPU time copying pages.
158 #define MAX_CCNT ((1 << 16) - 1)
162 static unsigned rw_threshold
= 32;
163 module_param(rw_threshold
, uint
, S_IRUGO
);
164 MODULE_PARM_DESC(rw_threshold
,
165 "Read/Write threshold. Default = 32");
167 static unsigned poll_threshold
= 128;
168 module_param(poll_threshold
, uint
, S_IRUGO
);
169 MODULE_PARM_DESC(poll_threshold
,
170 "Polling transaction size threshold. Default = 128");
172 static unsigned poll_loopcount
= 32;
173 module_param(poll_loopcount
, uint
, S_IRUGO
);
174 MODULE_PARM_DESC(poll_loopcount
,
175 "Maximum polling loop count. Default = 32");
177 static unsigned __initdata use_dma
= 1;
178 module_param(use_dma
, uint
, 0);
179 MODULE_PARM_DESC(use_dma
, "Whether to use DMA or not. Default = 1");
181 struct mmc_davinci_host
{
182 struct mmc_command
*cmd
;
183 struct mmc_data
*data
;
184 struct mmc_host
*mmc
;
186 unsigned int mmc_input_clk
;
188 struct resource
*mem_res
;
189 int mmc_irq
, sdio_irq
;
190 unsigned char bus_mode
;
192 #define DAVINCI_MMC_DATADIR_NONE 0
193 #define DAVINCI_MMC_DATADIR_READ 1
194 #define DAVINCI_MMC_DATADIR_WRITE 2
195 unsigned char data_dir
;
197 /* buffer is used during PIO of one scatterlist segment, and
198 * is updated along with buffer_bytes_left. bytes_left applies
199 * to all N blocks of the PIO transfer.
202 u32 buffer_bytes_left
;
206 struct dma_chan
*dma_tx
;
207 struct dma_chan
*dma_rx
;
213 /* For PIO we walk scatterlists one segment at a time. */
215 struct scatterlist
*sg
;
217 /* Version of the MMC/SD controller */
219 /* for ns in one cycle calculation */
220 unsigned ns_in_one_cycle
;
221 /* Number of sg segments */
223 #ifdef CONFIG_CPU_FREQ
224 struct notifier_block freq_transition
;
228 static irqreturn_t
mmc_davinci_irq(int irq
, void *dev_id
);
231 static void mmc_davinci_sg_to_buf(struct mmc_davinci_host
*host
)
233 host
->buffer_bytes_left
= sg_dma_len(host
->sg
);
234 host
->buffer
= sg_virt(host
->sg
);
235 if (host
->buffer_bytes_left
> host
->bytes_left
)
236 host
->buffer_bytes_left
= host
->bytes_left
;
239 static void davinci_fifo_data_trans(struct mmc_davinci_host
*host
,
245 if (host
->buffer_bytes_left
== 0) {
246 host
->sg
= sg_next(host
->data
->sg
);
247 mmc_davinci_sg_to_buf(host
);
251 if (n
> host
->buffer_bytes_left
)
252 n
= host
->buffer_bytes_left
;
253 host
->buffer_bytes_left
-= n
;
254 host
->bytes_left
-= n
;
256 /* NOTE: we never transfer more than rw_threshold bytes
257 * to/from the fifo here; there's no I/O overlap.
258 * This also assumes that access width( i.e. ACCWD) is 4 bytes
260 if (host
->data_dir
== DAVINCI_MMC_DATADIR_WRITE
) {
261 for (i
= 0; i
< (n
>> 2); i
++) {
262 writel(*((u32
*)p
), host
->base
+ DAVINCI_MMCDXR
);
266 iowrite8_rep(host
->base
+ DAVINCI_MMCDXR
, p
, (n
& 3));
270 for (i
= 0; i
< (n
>> 2); i
++) {
271 *((u32
*)p
) = readl(host
->base
+ DAVINCI_MMCDRR
);
275 ioread8_rep(host
->base
+ DAVINCI_MMCDRR
, p
, (n
& 3));
282 static void mmc_davinci_start_command(struct mmc_davinci_host
*host
,
283 struct mmc_command
*cmd
)
288 dev_dbg(mmc_dev(host
->mmc
), "CMD%d, arg 0x%08x%s\n",
289 cmd
->opcode
, cmd
->arg
,
291 switch (mmc_resp_type(cmd
)) {
293 s
= ", R1/R5/R6/R7 response";
296 s
= ", R1b response";
302 s
= ", R3/R4 response";
305 s
= ", (R? response)";
310 switch (mmc_resp_type(cmd
)) {
312 /* There's some spec confusion about when R1B is
313 * allowed, but if the card doesn't issue a BUSY
314 * then it's harmless for us to allow it.
316 cmd_reg
|= MMCCMD_BSYEXP
;
318 case MMC_RSP_R1
: /* 48 bits, CRC */
319 cmd_reg
|= MMCCMD_RSPFMT_R1456
;
321 case MMC_RSP_R2
: /* 136 bits, CRC */
322 cmd_reg
|= MMCCMD_RSPFMT_R2
;
324 case MMC_RSP_R3
: /* 48 bits, no CRC */
325 cmd_reg
|= MMCCMD_RSPFMT_R3
;
328 cmd_reg
|= MMCCMD_RSPFMT_NONE
;
329 dev_dbg(mmc_dev(host
->mmc
), "unknown resp_type %04x\n",
334 /* Set command index */
335 cmd_reg
|= cmd
->opcode
;
337 /* Enable EDMA transfer triggers */
339 cmd_reg
|= MMCCMD_DMATRIG
;
341 if (host
->version
== MMC_CTLR_VERSION_2
&& host
->data
!= NULL
&&
342 host
->data_dir
== DAVINCI_MMC_DATADIR_READ
)
343 cmd_reg
|= MMCCMD_DMATRIG
;
345 /* Setting whether command involves data transfer or not */
347 cmd_reg
|= MMCCMD_WDATX
;
349 /* Setting whether stream or block transfer */
350 if (cmd
->flags
& MMC_DATA_STREAM
)
351 cmd_reg
|= MMCCMD_STRMTP
;
353 /* Setting whether data read or write */
354 if (host
->data_dir
== DAVINCI_MMC_DATADIR_WRITE
)
355 cmd_reg
|= MMCCMD_DTRW
;
357 if (host
->bus_mode
== MMC_BUSMODE_PUSHPULL
)
358 cmd_reg
|= MMCCMD_PPLEN
;
360 /* set Command timeout */
361 writel(0x1FFF, host
->base
+ DAVINCI_MMCTOR
);
363 /* Enable interrupt (calculate here, defer until FIFO is stuffed). */
364 im_val
= MMCST0_RSPDNE
| MMCST0_CRCRS
| MMCST0_TOUTRS
;
365 if (host
->data_dir
== DAVINCI_MMC_DATADIR_WRITE
) {
366 im_val
|= MMCST0_DATDNE
| MMCST0_CRCWR
;
369 im_val
|= MMCST0_DXRDY
;
370 } else if (host
->data_dir
== DAVINCI_MMC_DATADIR_READ
) {
371 im_val
|= MMCST0_DATDNE
| MMCST0_CRCRD
| MMCST0_TOUTRD
;
374 im_val
|= MMCST0_DRRDY
;
378 * Before non-DMA WRITE commands the controller needs priming:
379 * FIFO should be populated with 32 bytes i.e. whatever is the FIFO size
381 if (!host
->do_dma
&& (host
->data_dir
== DAVINCI_MMC_DATADIR_WRITE
))
382 davinci_fifo_data_trans(host
, rw_threshold
);
384 writel(cmd
->arg
, host
->base
+ DAVINCI_MMCARGHL
);
385 writel(cmd_reg
, host
->base
+ DAVINCI_MMCCMD
);
387 host
->active_request
= true;
389 if (!host
->do_dma
&& host
->bytes_left
<= poll_threshold
) {
390 u32 count
= poll_loopcount
;
392 while (host
->active_request
&& count
--) {
393 mmc_davinci_irq(0, host
);
398 if (host
->active_request
)
399 writel(im_val
, host
->base
+ DAVINCI_MMCIM
);
402 /*----------------------------------------------------------------------*/
404 /* DMA infrastructure */
406 static void davinci_abort_dma(struct mmc_davinci_host
*host
)
408 struct dma_chan
*sync_dev
;
410 if (host
->data_dir
== DAVINCI_MMC_DATADIR_READ
)
411 sync_dev
= host
->dma_rx
;
413 sync_dev
= host
->dma_tx
;
415 dmaengine_terminate_all(sync_dev
);
418 static int mmc_davinci_send_dma_request(struct mmc_davinci_host
*host
,
419 struct mmc_data
*data
)
421 struct dma_chan
*chan
;
422 struct dma_async_tx_descriptor
*desc
;
425 if (host
->data_dir
== DAVINCI_MMC_DATADIR_WRITE
) {
426 struct dma_slave_config dma_tx_conf
= {
427 .direction
= DMA_MEM_TO_DEV
,
428 .dst_addr
= host
->mem_res
->start
+ DAVINCI_MMCDXR
,
429 .dst_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
,
431 rw_threshold
/ DMA_SLAVE_BUSWIDTH_4_BYTES
,
434 dmaengine_slave_config(host
->dma_tx
, &dma_tx_conf
);
436 desc
= dmaengine_prep_slave_sg(host
->dma_tx
,
440 DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
442 dev_dbg(mmc_dev(host
->mmc
),
443 "failed to allocate DMA TX descriptor");
448 struct dma_slave_config dma_rx_conf
= {
449 .direction
= DMA_DEV_TO_MEM
,
450 .src_addr
= host
->mem_res
->start
+ DAVINCI_MMCDRR
,
451 .src_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
,
453 rw_threshold
/ DMA_SLAVE_BUSWIDTH_4_BYTES
,
456 dmaengine_slave_config(host
->dma_rx
, &dma_rx_conf
);
458 desc
= dmaengine_prep_slave_sg(host
->dma_rx
,
462 DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
464 dev_dbg(mmc_dev(host
->mmc
),
465 "failed to allocate DMA RX descriptor");
471 dmaengine_submit(desc
);
472 dma_async_issue_pending(chan
);
478 static int mmc_davinci_start_dma_transfer(struct mmc_davinci_host
*host
,
479 struct mmc_data
*data
)
482 int mask
= rw_threshold
- 1;
485 host
->sg_len
= dma_map_sg(mmc_dev(host
->mmc
), data
->sg
, data
->sg_len
,
486 ((data
->flags
& MMC_DATA_WRITE
)
490 /* no individual DMA segment should need a partial FIFO */
491 for (i
= 0; i
< host
->sg_len
; i
++) {
492 if (sg_dma_len(data
->sg
+ i
) & mask
) {
493 dma_unmap_sg(mmc_dev(host
->mmc
),
494 data
->sg
, data
->sg_len
,
495 (data
->flags
& MMC_DATA_WRITE
)
503 ret
= mmc_davinci_send_dma_request(host
, data
);
508 static void __init_or_module
509 davinci_release_dma_channels(struct mmc_davinci_host
*host
)
514 dma_release_channel(host
->dma_tx
);
515 dma_release_channel(host
->dma_rx
);
518 static int __init
davinci_acquire_dma_channels(struct mmc_davinci_host
*host
)
524 dma_cap_set(DMA_SLAVE
, mask
);
527 dma_request_slave_channel_compat(mask
, edma_filter_fn
,
528 &host
->txdma
, mmc_dev(host
->mmc
), "tx");
530 dev_err(mmc_dev(host
->mmc
), "Can't get dma_tx channel\n");
535 dma_request_slave_channel_compat(mask
, edma_filter_fn
,
536 &host
->rxdma
, mmc_dev(host
->mmc
), "rx");
538 dev_err(mmc_dev(host
->mmc
), "Can't get dma_rx channel\n");
540 goto free_master_write
;
546 dma_release_channel(host
->dma_tx
);
551 /*----------------------------------------------------------------------*/
554 mmc_davinci_prepare_data(struct mmc_davinci_host
*host
, struct mmc_request
*req
)
556 int fifo_lev
= (rw_threshold
== 32) ? MMCFIFOCTL_FIFOLEV
: 0;
558 struct mmc_data
*data
= req
->data
;
560 if (host
->version
== MMC_CTLR_VERSION_2
)
561 fifo_lev
= (rw_threshold
== 64) ? MMCFIFOCTL_FIFOLEV
: 0;
565 host
->data_dir
= DAVINCI_MMC_DATADIR_NONE
;
566 writel(0, host
->base
+ DAVINCI_MMCBLEN
);
567 writel(0, host
->base
+ DAVINCI_MMCNBLK
);
571 dev_dbg(mmc_dev(host
->mmc
), "%s %s, %d blocks of %d bytes\n",
572 (data
->flags
& MMC_DATA_STREAM
) ? "stream" : "block",
573 (data
->flags
& MMC_DATA_WRITE
) ? "write" : "read",
574 data
->blocks
, data
->blksz
);
575 dev_dbg(mmc_dev(host
->mmc
), " DTO %d cycles + %d ns\n",
576 data
->timeout_clks
, data
->timeout_ns
);
577 timeout
= data
->timeout_clks
+
578 (data
->timeout_ns
/ host
->ns_in_one_cycle
);
579 if (timeout
> 0xffff)
582 writel(timeout
, host
->base
+ DAVINCI_MMCTOD
);
583 writel(data
->blocks
, host
->base
+ DAVINCI_MMCNBLK
);
584 writel(data
->blksz
, host
->base
+ DAVINCI_MMCBLEN
);
586 /* Configure the FIFO */
587 switch (data
->flags
& MMC_DATA_WRITE
) {
589 host
->data_dir
= DAVINCI_MMC_DATADIR_WRITE
;
590 writel(fifo_lev
| MMCFIFOCTL_FIFODIR_WR
| MMCFIFOCTL_FIFORST
,
591 host
->base
+ DAVINCI_MMCFIFOCTL
);
592 writel(fifo_lev
| MMCFIFOCTL_FIFODIR_WR
,
593 host
->base
+ DAVINCI_MMCFIFOCTL
);
597 host
->data_dir
= DAVINCI_MMC_DATADIR_READ
;
598 writel(fifo_lev
| MMCFIFOCTL_FIFODIR_RD
| MMCFIFOCTL_FIFORST
,
599 host
->base
+ DAVINCI_MMCFIFOCTL
);
600 writel(fifo_lev
| MMCFIFOCTL_FIFODIR_RD
,
601 host
->base
+ DAVINCI_MMCFIFOCTL
);
606 host
->bytes_left
= data
->blocks
* data
->blksz
;
608 /* For now we try to use DMA whenever we won't need partial FIFO
609 * reads or writes, either for the whole transfer (as tested here)
610 * or for any individual scatterlist segment (tested when we call
611 * start_dma_transfer).
613 * While we *could* change that, unusual block sizes are rarely
614 * used. The occasional fallback to PIO should't hurt.
616 if (host
->use_dma
&& (host
->bytes_left
& (rw_threshold
- 1)) == 0
617 && mmc_davinci_start_dma_transfer(host
, data
) == 0) {
618 /* zero this to ensure we take no PIO paths */
619 host
->bytes_left
= 0;
621 /* Revert to CPU Copy */
622 host
->sg_len
= data
->sg_len
;
623 host
->sg
= host
->data
->sg
;
624 mmc_davinci_sg_to_buf(host
);
628 static void mmc_davinci_request(struct mmc_host
*mmc
, struct mmc_request
*req
)
630 struct mmc_davinci_host
*host
= mmc_priv(mmc
);
631 unsigned long timeout
= jiffies
+ msecs_to_jiffies(900);
634 /* Card may still be sending BUSY after a previous operation,
635 * typically some kind of write. If so, we can't proceed yet.
637 while (time_before(jiffies
, timeout
)) {
638 mmcst1
= readl(host
->base
+ DAVINCI_MMCST1
);
639 if (!(mmcst1
& MMCST1_BUSY
))
643 if (mmcst1
& MMCST1_BUSY
) {
644 dev_err(mmc_dev(host
->mmc
), "still BUSY? bad ... \n");
645 req
->cmd
->error
= -ETIMEDOUT
;
646 mmc_request_done(mmc
, req
);
651 mmc_davinci_prepare_data(host
, req
);
652 mmc_davinci_start_command(host
, req
->cmd
);
655 static unsigned int calculate_freq_for_card(struct mmc_davinci_host
*host
,
656 unsigned int mmc_req_freq
)
658 unsigned int mmc_freq
= 0, mmc_pclk
= 0, mmc_push_pull_divisor
= 0;
660 mmc_pclk
= host
->mmc_input_clk
;
661 if (mmc_req_freq
&& mmc_pclk
> (2 * mmc_req_freq
))
662 mmc_push_pull_divisor
= ((unsigned int)mmc_pclk
663 / (2 * mmc_req_freq
)) - 1;
665 mmc_push_pull_divisor
= 0;
667 mmc_freq
= (unsigned int)mmc_pclk
668 / (2 * (mmc_push_pull_divisor
+ 1));
670 if (mmc_freq
> mmc_req_freq
)
671 mmc_push_pull_divisor
= mmc_push_pull_divisor
+ 1;
672 /* Convert ns to clock cycles */
673 if (mmc_req_freq
<= 400000)
674 host
->ns_in_one_cycle
= (1000000) / (((mmc_pclk
675 / (2 * (mmc_push_pull_divisor
+ 1)))/1000));
677 host
->ns_in_one_cycle
= (1000000) / (((mmc_pclk
678 / (2 * (mmc_push_pull_divisor
+ 1)))/1000000));
680 return mmc_push_pull_divisor
;
683 static void calculate_clk_divider(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
685 unsigned int open_drain_freq
= 0, mmc_pclk
= 0;
686 unsigned int mmc_push_pull_freq
= 0;
687 struct mmc_davinci_host
*host
= mmc_priv(mmc
);
689 if (ios
->bus_mode
== MMC_BUSMODE_OPENDRAIN
) {
692 /* Ignoring the init clock value passed for fixing the inter
693 * operability with different cards.
695 open_drain_freq
= ((unsigned int)mmc_pclk
696 / (2 * MMCSD_INIT_CLOCK
)) - 1;
698 if (open_drain_freq
> 0xFF)
699 open_drain_freq
= 0xFF;
701 temp
= readl(host
->base
+ DAVINCI_MMCCLK
) & ~MMCCLK_CLKRT_MASK
;
702 temp
|= open_drain_freq
;
703 writel(temp
, host
->base
+ DAVINCI_MMCCLK
);
705 /* Convert ns to clock cycles */
706 host
->ns_in_one_cycle
= (1000000) / (MMCSD_INIT_CLOCK
/1000);
709 mmc_push_pull_freq
= calculate_freq_for_card(host
, ios
->clock
);
711 if (mmc_push_pull_freq
> 0xFF)
712 mmc_push_pull_freq
= 0xFF;
714 temp
= readl(host
->base
+ DAVINCI_MMCCLK
) & ~MMCCLK_CLKEN
;
715 writel(temp
, host
->base
+ DAVINCI_MMCCLK
);
719 temp
= readl(host
->base
+ DAVINCI_MMCCLK
) & ~MMCCLK_CLKRT_MASK
;
720 temp
|= mmc_push_pull_freq
;
721 writel(temp
, host
->base
+ DAVINCI_MMCCLK
);
723 writel(temp
| MMCCLK_CLKEN
, host
->base
+ DAVINCI_MMCCLK
);
729 static void mmc_davinci_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
731 struct mmc_davinci_host
*host
= mmc_priv(mmc
);
732 struct platform_device
*pdev
= to_platform_device(mmc
->parent
);
733 struct davinci_mmc_config
*config
= pdev
->dev
.platform_data
;
735 dev_dbg(mmc_dev(host
->mmc
),
736 "clock %dHz busmode %d powermode %d Vdd %04x\n",
737 ios
->clock
, ios
->bus_mode
, ios
->power_mode
,
740 switch (ios
->power_mode
) {
742 if (config
&& config
->set_power
)
743 config
->set_power(pdev
->id
, false);
746 if (config
&& config
->set_power
)
747 config
->set_power(pdev
->id
, true);
751 switch (ios
->bus_width
) {
752 case MMC_BUS_WIDTH_8
:
753 dev_dbg(mmc_dev(host
->mmc
), "Enabling 8 bit mode\n");
754 writel((readl(host
->base
+ DAVINCI_MMCCTL
) &
755 ~MMCCTL_WIDTH_4_BIT
) | MMCCTL_WIDTH_8_BIT
,
756 host
->base
+ DAVINCI_MMCCTL
);
758 case MMC_BUS_WIDTH_4
:
759 dev_dbg(mmc_dev(host
->mmc
), "Enabling 4 bit mode\n");
760 if (host
->version
== MMC_CTLR_VERSION_2
)
761 writel((readl(host
->base
+ DAVINCI_MMCCTL
) &
762 ~MMCCTL_WIDTH_8_BIT
) | MMCCTL_WIDTH_4_BIT
,
763 host
->base
+ DAVINCI_MMCCTL
);
765 writel(readl(host
->base
+ DAVINCI_MMCCTL
) |
767 host
->base
+ DAVINCI_MMCCTL
);
769 case MMC_BUS_WIDTH_1
:
770 dev_dbg(mmc_dev(host
->mmc
), "Enabling 1 bit mode\n");
771 if (host
->version
== MMC_CTLR_VERSION_2
)
772 writel(readl(host
->base
+ DAVINCI_MMCCTL
) &
773 ~(MMCCTL_WIDTH_8_BIT
| MMCCTL_WIDTH_4_BIT
),
774 host
->base
+ DAVINCI_MMCCTL
);
776 writel(readl(host
->base
+ DAVINCI_MMCCTL
) &
778 host
->base
+ DAVINCI_MMCCTL
);
782 calculate_clk_divider(mmc
, ios
);
784 host
->bus_mode
= ios
->bus_mode
;
785 if (ios
->power_mode
== MMC_POWER_UP
) {
786 unsigned long timeout
= jiffies
+ msecs_to_jiffies(50);
789 /* Send clock cycles, poll completion */
790 writel(0, host
->base
+ DAVINCI_MMCARGHL
);
791 writel(MMCCMD_INITCK
, host
->base
+ DAVINCI_MMCCMD
);
792 while (time_before(jiffies
, timeout
)) {
793 u32 tmp
= readl(host
->base
+ DAVINCI_MMCST0
);
795 if (tmp
& MMCST0_RSPDNE
) {
802 dev_warn(mmc_dev(host
->mmc
), "powerup timeout\n");
805 /* FIXME on power OFF, reset things ... */
809 mmc_davinci_xfer_done(struct mmc_davinci_host
*host
, struct mmc_data
*data
)
813 if (host
->mmc
->caps
& MMC_CAP_SDIO_IRQ
) {
815 * SDIO Interrupt Detection work-around as suggested by
816 * Davinci Errata (TMS320DM355 Silicon Revision 1.1 Errata
817 * 2.1.6): Signal SDIO interrupt only if it is enabled by core
819 if (host
->sdio_int
&& !(readl(host
->base
+ DAVINCI_SDIOST0
) &
821 writel(SDIOIST_IOINT
, host
->base
+ DAVINCI_SDIOIST
);
822 mmc_signal_sdio_irq(host
->mmc
);
827 davinci_abort_dma(host
);
829 dma_unmap_sg(mmc_dev(host
->mmc
), data
->sg
, data
->sg_len
,
830 (data
->flags
& MMC_DATA_WRITE
)
833 host
->do_dma
= false;
835 host
->data_dir
= DAVINCI_MMC_DATADIR_NONE
;
837 if (!data
->stop
|| (host
->cmd
&& host
->cmd
->error
)) {
838 mmc_request_done(host
->mmc
, data
->mrq
);
839 writel(0, host
->base
+ DAVINCI_MMCIM
);
840 host
->active_request
= false;
842 mmc_davinci_start_command(host
, data
->stop
);
845 static void mmc_davinci_cmd_done(struct mmc_davinci_host
*host
,
846 struct mmc_command
*cmd
)
850 if (cmd
->flags
& MMC_RSP_PRESENT
) {
851 if (cmd
->flags
& MMC_RSP_136
) {
852 /* response type 2 */
853 cmd
->resp
[3] = readl(host
->base
+ DAVINCI_MMCRSP01
);
854 cmd
->resp
[2] = readl(host
->base
+ DAVINCI_MMCRSP23
);
855 cmd
->resp
[1] = readl(host
->base
+ DAVINCI_MMCRSP45
);
856 cmd
->resp
[0] = readl(host
->base
+ DAVINCI_MMCRSP67
);
858 /* response types 1, 1b, 3, 4, 5, 6 */
859 cmd
->resp
[0] = readl(host
->base
+ DAVINCI_MMCRSP67
);
863 if (host
->data
== NULL
|| cmd
->error
) {
864 if (cmd
->error
== -ETIMEDOUT
)
865 cmd
->mrq
->cmd
->retries
= 0;
866 mmc_request_done(host
->mmc
, cmd
->mrq
);
867 writel(0, host
->base
+ DAVINCI_MMCIM
);
868 host
->active_request
= false;
872 static inline void mmc_davinci_reset_ctrl(struct mmc_davinci_host
*host
,
877 temp
= readl(host
->base
+ DAVINCI_MMCCTL
);
879 temp
|= MMCCTL_CMDRST
| MMCCTL_DATRST
;
881 temp
&= ~(MMCCTL_CMDRST
| MMCCTL_DATRST
);
883 writel(temp
, host
->base
+ DAVINCI_MMCCTL
);
888 davinci_abort_data(struct mmc_davinci_host
*host
, struct mmc_data
*data
)
890 mmc_davinci_reset_ctrl(host
, 1);
891 mmc_davinci_reset_ctrl(host
, 0);
894 static irqreturn_t
mmc_davinci_sdio_irq(int irq
, void *dev_id
)
896 struct mmc_davinci_host
*host
= dev_id
;
899 status
= readl(host
->base
+ DAVINCI_SDIOIST
);
900 if (status
& SDIOIST_IOINT
) {
901 dev_dbg(mmc_dev(host
->mmc
),
902 "SDIO interrupt status %x\n", status
);
903 writel(status
| SDIOIST_IOINT
, host
->base
+ DAVINCI_SDIOIST
);
904 mmc_signal_sdio_irq(host
->mmc
);
909 static irqreturn_t
mmc_davinci_irq(int irq
, void *dev_id
)
911 struct mmc_davinci_host
*host
= (struct mmc_davinci_host
*)dev_id
;
912 unsigned int status
, qstatus
;
914 int end_transfer
= 0;
915 struct mmc_data
*data
= host
->data
;
917 if (host
->cmd
== NULL
&& host
->data
== NULL
) {
918 status
= readl(host
->base
+ DAVINCI_MMCST0
);
919 dev_dbg(mmc_dev(host
->mmc
),
920 "Spurious interrupt 0x%04x\n", status
);
921 /* Disable the interrupt from mmcsd */
922 writel(0, host
->base
+ DAVINCI_MMCIM
);
926 status
= readl(host
->base
+ DAVINCI_MMCST0
);
929 /* handle FIFO first when using PIO for data.
930 * bytes_left will decrease to zero as I/O progress and status will
931 * read zero over iteration because this controller status
932 * register(MMCST0) reports any status only once and it is cleared
933 * by read. So, it is not unbouned loop even in the case of
936 if (host
->bytes_left
&& (status
& (MMCST0_DXRDY
| MMCST0_DRRDY
))) {
937 unsigned long im_val
;
940 * If interrupts fire during the following loop, they will be
941 * handled by the handler, but the PIC will still buffer these.
942 * As a result, the handler will be called again to serve these
943 * needlessly. In order to avoid these spurious interrupts,
944 * keep interrupts masked during the loop.
946 im_val
= readl(host
->base
+ DAVINCI_MMCIM
);
947 writel(0, host
->base
+ DAVINCI_MMCIM
);
950 davinci_fifo_data_trans(host
, rw_threshold
);
951 status
= readl(host
->base
+ DAVINCI_MMCST0
);
953 } while (host
->bytes_left
&&
954 (status
& (MMCST0_DXRDY
| MMCST0_DRRDY
)));
957 * If an interrupt is pending, it is assumed it will fire when
958 * it is unmasked. This assumption is also taken when the MMCIM
959 * is first set. Otherwise, writing to MMCIM after reading the
960 * status is race-prone.
962 writel(im_val
, host
->base
+ DAVINCI_MMCIM
);
965 if (qstatus
& MMCST0_DATDNE
) {
966 /* All blocks sent/received, and CRC checks passed */
968 if ((host
->do_dma
== 0) && (host
->bytes_left
> 0)) {
969 /* if datasize < rw_threshold
970 * no RX ints are generated
972 davinci_fifo_data_trans(host
, host
->bytes_left
);
975 data
->bytes_xfered
= data
->blocks
* data
->blksz
;
977 dev_err(mmc_dev(host
->mmc
),
978 "DATDNE with no host->data\n");
982 if (qstatus
& MMCST0_TOUTRD
) {
983 /* Read data timeout */
984 data
->error
= -ETIMEDOUT
;
987 dev_dbg(mmc_dev(host
->mmc
),
988 "read data timeout, status %x\n",
991 davinci_abort_data(host
, data
);
994 if (qstatus
& (MMCST0_CRCWR
| MMCST0_CRCRD
)) {
996 data
->error
= -EILSEQ
;
999 /* NOTE: this controller uses CRCWR to report both CRC
1000 * errors and timeouts (on writes). MMCDRSP values are
1001 * only weakly documented, but 0x9f was clearly a timeout
1002 * case and the two three-bit patterns in various SD specs
1003 * (101, 010) aren't part of it ...
1005 if (qstatus
& MMCST0_CRCWR
) {
1006 u32 temp
= readb(host
->base
+ DAVINCI_MMCDRSP
);
1009 data
->error
= -ETIMEDOUT
;
1011 dev_dbg(mmc_dev(host
->mmc
), "data %s %s error\n",
1012 (qstatus
& MMCST0_CRCWR
) ? "write" : "read",
1013 (data
->error
== -ETIMEDOUT
) ? "timeout" : "CRC");
1015 davinci_abort_data(host
, data
);
1018 if (qstatus
& MMCST0_TOUTRS
) {
1019 /* Command timeout */
1021 dev_dbg(mmc_dev(host
->mmc
),
1022 "CMD%d timeout, status %x\n",
1023 host
->cmd
->opcode
, qstatus
);
1024 host
->cmd
->error
= -ETIMEDOUT
;
1027 davinci_abort_data(host
, data
);
1033 if (qstatus
& MMCST0_CRCRS
) {
1034 /* Command CRC error */
1035 dev_dbg(mmc_dev(host
->mmc
), "Command CRC error\n");
1037 host
->cmd
->error
= -EILSEQ
;
1042 if (qstatus
& MMCST0_RSPDNE
) {
1043 /* End of command phase */
1044 end_command
= (int) host
->cmd
;
1048 mmc_davinci_cmd_done(host
, host
->cmd
);
1050 mmc_davinci_xfer_done(host
, data
);
1054 static int mmc_davinci_get_cd(struct mmc_host
*mmc
)
1056 struct platform_device
*pdev
= to_platform_device(mmc
->parent
);
1057 struct davinci_mmc_config
*config
= pdev
->dev
.platform_data
;
1059 if (!config
|| !config
->get_cd
)
1061 return config
->get_cd(pdev
->id
);
1064 static int mmc_davinci_get_ro(struct mmc_host
*mmc
)
1066 struct platform_device
*pdev
= to_platform_device(mmc
->parent
);
1067 struct davinci_mmc_config
*config
= pdev
->dev
.platform_data
;
1069 if (!config
|| !config
->get_ro
)
1071 return config
->get_ro(pdev
->id
);
1074 static void mmc_davinci_enable_sdio_irq(struct mmc_host
*mmc
, int enable
)
1076 struct mmc_davinci_host
*host
= mmc_priv(mmc
);
1079 if (!(readl(host
->base
+ DAVINCI_SDIOST0
) & SDIOST0_DAT1_HI
)) {
1080 writel(SDIOIST_IOINT
, host
->base
+ DAVINCI_SDIOIST
);
1081 mmc_signal_sdio_irq(host
->mmc
);
1083 host
->sdio_int
= true;
1084 writel(readl(host
->base
+ DAVINCI_SDIOIEN
) |
1085 SDIOIEN_IOINTEN
, host
->base
+ DAVINCI_SDIOIEN
);
1088 host
->sdio_int
= false;
1089 writel(readl(host
->base
+ DAVINCI_SDIOIEN
) & ~SDIOIEN_IOINTEN
,
1090 host
->base
+ DAVINCI_SDIOIEN
);
1094 static struct mmc_host_ops mmc_davinci_ops
= {
1095 .request
= mmc_davinci_request
,
1096 .set_ios
= mmc_davinci_set_ios
,
1097 .get_cd
= mmc_davinci_get_cd
,
1098 .get_ro
= mmc_davinci_get_ro
,
1099 .enable_sdio_irq
= mmc_davinci_enable_sdio_irq
,
1102 /*----------------------------------------------------------------------*/
1104 #ifdef CONFIG_CPU_FREQ
1105 static int mmc_davinci_cpufreq_transition(struct notifier_block
*nb
,
1106 unsigned long val
, void *data
)
1108 struct mmc_davinci_host
*host
;
1109 unsigned int mmc_pclk
;
1110 struct mmc_host
*mmc
;
1111 unsigned long flags
;
1113 host
= container_of(nb
, struct mmc_davinci_host
, freq_transition
);
1115 mmc_pclk
= clk_get_rate(host
->clk
);
1117 if (val
== CPUFREQ_POSTCHANGE
) {
1118 spin_lock_irqsave(&mmc
->lock
, flags
);
1119 host
->mmc_input_clk
= mmc_pclk
;
1120 calculate_clk_divider(mmc
, &mmc
->ios
);
1121 spin_unlock_irqrestore(&mmc
->lock
, flags
);
1127 static inline int mmc_davinci_cpufreq_register(struct mmc_davinci_host
*host
)
1129 host
->freq_transition
.notifier_call
= mmc_davinci_cpufreq_transition
;
1131 return cpufreq_register_notifier(&host
->freq_transition
,
1132 CPUFREQ_TRANSITION_NOTIFIER
);
1135 static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host
*host
)
1137 cpufreq_unregister_notifier(&host
->freq_transition
,
1138 CPUFREQ_TRANSITION_NOTIFIER
);
1141 static inline int mmc_davinci_cpufreq_register(struct mmc_davinci_host
*host
)
1146 static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host
*host
)
1150 static void __init
init_mmcsd_host(struct mmc_davinci_host
*host
)
1153 mmc_davinci_reset_ctrl(host
, 1);
1155 writel(0, host
->base
+ DAVINCI_MMCCLK
);
1156 writel(MMCCLK_CLKEN
, host
->base
+ DAVINCI_MMCCLK
);
1158 writel(0x1FFF, host
->base
+ DAVINCI_MMCTOR
);
1159 writel(0xFFFF, host
->base
+ DAVINCI_MMCTOD
);
1161 mmc_davinci_reset_ctrl(host
, 0);
1164 static struct platform_device_id davinci_mmc_devtype
[] = {
1166 .name
= "dm6441-mmc",
1167 .driver_data
= MMC_CTLR_VERSION_1
,
1169 .name
= "da830-mmc",
1170 .driver_data
= MMC_CTLR_VERSION_2
,
1174 MODULE_DEVICE_TABLE(platform
, davinci_mmc_devtype
);
1176 static const struct of_device_id davinci_mmc_dt_ids
[] = {
1178 .compatible
= "ti,dm6441-mmc",
1179 .data
= &davinci_mmc_devtype
[MMC_CTLR_VERSION_1
],
1182 .compatible
= "ti,da830-mmc",
1183 .data
= &davinci_mmc_devtype
[MMC_CTLR_VERSION_2
],
1187 MODULE_DEVICE_TABLE(of
, davinci_mmc_dt_ids
);
1189 static struct davinci_mmc_config
1190 *mmc_parse_pdata(struct platform_device
*pdev
)
1192 struct device_node
*np
;
1193 struct davinci_mmc_config
*pdata
= pdev
->dev
.platform_data
;
1194 const struct of_device_id
*match
=
1195 of_match_device(of_match_ptr(davinci_mmc_dt_ids
), &pdev
->dev
);
1198 np
= pdev
->dev
.of_node
;
1202 pdata
= devm_kzalloc(&pdev
->dev
, sizeof(*pdata
), GFP_KERNEL
);
1204 dev_err(&pdev
->dev
, "Failed to allocate memory for struct davinci_mmc_config\n");
1209 pdev
->id_entry
= match
->data
;
1211 if (of_property_read_u32(np
, "max-frequency", &pdata
->max_freq
))
1212 dev_info(&pdev
->dev
, "'max-frequency' property not specified, defaulting to 25MHz\n");
1214 of_property_read_u32(np
, "bus-width", &data
);
1219 pdata
->wires
= data
;
1223 dev_info(&pdev
->dev
, "Unsupported buswidth, defaulting to 1 bit\n");
1229 static int __init
davinci_mmcsd_probe(struct platform_device
*pdev
)
1231 struct davinci_mmc_config
*pdata
= NULL
;
1232 struct mmc_davinci_host
*host
= NULL
;
1233 struct mmc_host
*mmc
= NULL
;
1234 struct resource
*r
, *mem
= NULL
;
1235 int ret
= 0, irq
= 0;
1237 const struct platform_device_id
*id_entry
;
1239 pdata
= mmc_parse_pdata(pdev
);
1240 if (pdata
== NULL
) {
1241 dev_err(&pdev
->dev
, "Couldn't get platform data\n");
1246 r
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1247 irq
= platform_get_irq(pdev
, 0);
1248 if (!r
|| irq
== NO_IRQ
)
1252 mem_size
= resource_size(r
);
1253 mem
= request_mem_region(r
->start
, mem_size
, pdev
->name
);
1258 mmc
= mmc_alloc_host(sizeof(struct mmc_davinci_host
), &pdev
->dev
);
1262 host
= mmc_priv(mmc
);
1263 host
->mmc
= mmc
; /* Important */
1265 r
= platform_get_resource(pdev
, IORESOURCE_DMA
, 0);
1267 dev_warn(&pdev
->dev
, "RX DMA resource not specified\n");
1269 host
->rxdma
= r
->start
;
1271 r
= platform_get_resource(pdev
, IORESOURCE_DMA
, 1);
1273 dev_warn(&pdev
->dev
, "TX DMA resource not specified\n");
1275 host
->txdma
= r
->start
;
1277 host
->mem_res
= mem
;
1278 host
->base
= ioremap(mem
->start
, mem_size
);
1283 host
->clk
= clk_get(&pdev
->dev
, "MMCSDCLK");
1284 if (IS_ERR(host
->clk
)) {
1285 ret
= PTR_ERR(host
->clk
);
1288 clk_enable(host
->clk
);
1289 host
->mmc_input_clk
= clk_get_rate(host
->clk
);
1291 init_mmcsd_host(host
);
1294 host
->nr_sg
= pdata
->nr_sg
- 1;
1296 if (host
->nr_sg
> MAX_NR_SG
|| !host
->nr_sg
)
1297 host
->nr_sg
= MAX_NR_SG
;
1299 host
->use_dma
= use_dma
;
1300 host
->mmc_irq
= irq
;
1301 host
->sdio_irq
= platform_get_irq(pdev
, 1);
1303 if (host
->use_dma
&& davinci_acquire_dma_channels(host
) != 0)
1306 /* REVISIT: someday, support IRQ-driven card detection. */
1307 mmc
->caps
|= MMC_CAP_NEEDS_POLL
;
1308 mmc
->caps
|= MMC_CAP_WAIT_WHILE_BUSY
;
1310 if (pdata
&& (pdata
->wires
== 4 || pdata
->wires
== 0))
1311 mmc
->caps
|= MMC_CAP_4_BIT_DATA
;
1313 if (pdata
&& (pdata
->wires
== 8))
1314 mmc
->caps
|= (MMC_CAP_4_BIT_DATA
| MMC_CAP_8_BIT_DATA
);
1316 id_entry
= platform_get_device_id(pdev
);
1318 host
->version
= id_entry
->driver_data
;
1320 mmc
->ops
= &mmc_davinci_ops
;
1321 mmc
->f_min
= 312500;
1322 mmc
->f_max
= 25000000;
1323 if (pdata
&& pdata
->max_freq
)
1324 mmc
->f_max
= pdata
->max_freq
;
1325 if (pdata
&& pdata
->caps
)
1326 mmc
->caps
|= pdata
->caps
;
1327 mmc
->ocr_avail
= MMC_VDD_32_33
| MMC_VDD_33_34
;
1329 /* With no iommu coalescing pages, each phys_seg is a hw_seg.
1330 * Each hw_seg uses one EDMA parameter RAM slot, always one
1331 * channel and then usually some linked slots.
1333 mmc
->max_segs
= MAX_NR_SG
;
1335 /* EDMA limit per hw segment (one or two MBytes) */
1336 mmc
->max_seg_size
= MAX_CCNT
* rw_threshold
;
1338 /* MMC/SD controller limits for multiblock requests */
1339 mmc
->max_blk_size
= 4095; /* BLEN is 12 bits */
1340 mmc
->max_blk_count
= 65535; /* NBLK is 16 bits */
1341 mmc
->max_req_size
= mmc
->max_blk_size
* mmc
->max_blk_count
;
1343 dev_dbg(mmc_dev(host
->mmc
), "max_segs=%d\n", mmc
->max_segs
);
1344 dev_dbg(mmc_dev(host
->mmc
), "max_blk_size=%d\n", mmc
->max_blk_size
);
1345 dev_dbg(mmc_dev(host
->mmc
), "max_req_size=%d\n", mmc
->max_req_size
);
1346 dev_dbg(mmc_dev(host
->mmc
), "max_seg_size=%d\n", mmc
->max_seg_size
);
1348 platform_set_drvdata(pdev
, host
);
1350 ret
= mmc_davinci_cpufreq_register(host
);
1352 dev_err(&pdev
->dev
, "failed to register cpufreq\n");
1356 ret
= mmc_add_host(mmc
);
1360 ret
= request_irq(irq
, mmc_davinci_irq
, 0, mmc_hostname(mmc
), host
);
1364 if (host
->sdio_irq
>= 0) {
1365 ret
= request_irq(host
->sdio_irq
, mmc_davinci_sdio_irq
, 0,
1366 mmc_hostname(mmc
), host
);
1368 mmc
->caps
|= MMC_CAP_SDIO_IRQ
;
1371 rename_region(mem
, mmc_hostname(mmc
));
1373 dev_info(mmc_dev(host
->mmc
), "Using %s, %d-bit mode\n",
1374 host
->use_dma
? "DMA" : "PIO",
1375 (mmc
->caps
& MMC_CAP_4_BIT_DATA
) ? 4 : 1);
1380 mmc_davinci_cpufreq_deregister(host
);
1383 davinci_release_dma_channels(host
);
1386 clk_disable(host
->clk
);
1391 iounmap(host
->base
);
1398 release_resource(mem
);
1400 dev_dbg(&pdev
->dev
, "probe err %d\n", ret
);
1405 static int __exit
davinci_mmcsd_remove(struct platform_device
*pdev
)
1407 struct mmc_davinci_host
*host
= platform_get_drvdata(pdev
);
1410 mmc_davinci_cpufreq_deregister(host
);
1412 mmc_remove_host(host
->mmc
);
1413 free_irq(host
->mmc_irq
, host
);
1414 if (host
->mmc
->caps
& MMC_CAP_SDIO_IRQ
)
1415 free_irq(host
->sdio_irq
, host
);
1417 davinci_release_dma_channels(host
);
1419 clk_disable(host
->clk
);
1422 iounmap(host
->base
);
1424 release_resource(host
->mem_res
);
1426 mmc_free_host(host
->mmc
);
1433 static int davinci_mmcsd_suspend(struct device
*dev
)
1435 struct platform_device
*pdev
= to_platform_device(dev
);
1436 struct mmc_davinci_host
*host
= platform_get_drvdata(pdev
);
1438 writel(0, host
->base
+ DAVINCI_MMCIM
);
1439 mmc_davinci_reset_ctrl(host
, 1);
1440 clk_disable(host
->clk
);
1445 static int davinci_mmcsd_resume(struct device
*dev
)
1447 struct platform_device
*pdev
= to_platform_device(dev
);
1448 struct mmc_davinci_host
*host
= platform_get_drvdata(pdev
);
1450 clk_enable(host
->clk
);
1451 mmc_davinci_reset_ctrl(host
, 0);
1456 static const struct dev_pm_ops davinci_mmcsd_pm
= {
1457 .suspend
= davinci_mmcsd_suspend
,
1458 .resume
= davinci_mmcsd_resume
,
1461 #define davinci_mmcsd_pm_ops (&davinci_mmcsd_pm)
1463 #define davinci_mmcsd_pm_ops NULL
1466 static struct platform_driver davinci_mmcsd_driver
= {
1468 .name
= "davinci_mmc",
1469 .owner
= THIS_MODULE
,
1470 .pm
= davinci_mmcsd_pm_ops
,
1471 .of_match_table
= of_match_ptr(davinci_mmc_dt_ids
),
1473 .remove
= __exit_p(davinci_mmcsd_remove
),
1474 .id_table
= davinci_mmc_devtype
,
1477 module_platform_driver_probe(davinci_mmcsd_driver
, davinci_mmcsd_probe
);
1479 MODULE_AUTHOR("Texas Instruments India");
1480 MODULE_LICENSE("GPL");
1481 MODULE_DESCRIPTION("MMC/SD driver for Davinci MMC controller");
1482 MODULE_ALIAS("platform:davinci_mmc");