2 * linux/drivers/mmc/tmio_mmc.c
4 * Copyright (C) 2004 Ian Molton
5 * Copyright (C) 2007 Ian Molton
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * Driver for the MMC / SD / SDIO cell found in:
13 * TC6393XB TC6391XB TC6387XB T7L66XB ASIC3
15 * This driver draws mainly on scattered spec sheets, Reverse engineering
16 * of the toshiba e800 SD driver and some parts of the 2.4 ASIC3 driver (4 bit
17 * support). (Further 4 bit support from a later datasheet).
20 * Investigate using a workqueue for PIO transfers
23 * Better Power management
24 * Handle MMC errors better
25 * double buffer support
28 #include <linux/module.h>
29 #include <linux/irq.h>
30 #include <linux/device.h>
31 #include <linux/delay.h>
32 #include <linux/dmaengine.h>
33 #include <linux/mmc/host.h>
34 #include <linux/mfd/core.h>
35 #include <linux/mfd/tmio.h>
39 static void tmio_mmc_set_clock(struct tmio_mmc_host
*host
, int new_clock
)
44 for (clock
= host
->mmc
->f_min
, clk
= 0x80000080;
45 new_clock
>= (clock
<<1); clk
>>= 1)
50 if (host
->set_clk_div
)
51 host
->set_clk_div(host
->pdev
, (clk
>>22) & 1);
53 sd_ctrl_write16(host
, CTL_SD_CARD_CLK_CTL
, clk
& 0x1ff);
56 static void tmio_mmc_clk_stop(struct tmio_mmc_host
*host
)
58 sd_ctrl_write16(host
, CTL_CLK_AND_WAIT_CTL
, 0x0000);
60 sd_ctrl_write16(host
, CTL_SD_CARD_CLK_CTL
, ~0x0100 &
61 sd_ctrl_read16(host
, CTL_SD_CARD_CLK_CTL
));
65 static void tmio_mmc_clk_start(struct tmio_mmc_host
*host
)
67 sd_ctrl_write16(host
, CTL_SD_CARD_CLK_CTL
, 0x0100 |
68 sd_ctrl_read16(host
, CTL_SD_CARD_CLK_CTL
));
70 sd_ctrl_write16(host
, CTL_CLK_AND_WAIT_CTL
, 0x0100);
74 static void reset(struct tmio_mmc_host
*host
)
76 /* FIXME - should we set stop clock reg here */
77 sd_ctrl_write16(host
, CTL_RESET_SD
, 0x0000);
78 sd_ctrl_write16(host
, CTL_RESET_SDIO
, 0x0000);
80 sd_ctrl_write16(host
, CTL_RESET_SD
, 0x0001);
81 sd_ctrl_write16(host
, CTL_RESET_SDIO
, 0x0001);
86 tmio_mmc_finish_request(struct tmio_mmc_host
*host
)
88 struct mmc_request
*mrq
= host
->mrq
;
94 mmc_request_done(host
->mmc
, mrq
);
97 /* These are the bitmasks the tmio chip requires to implement the MMC response
98 * types. Note that R1 and R6 are the same in this scheme. */
99 #define APP_CMD 0x0040
100 #define RESP_NONE 0x0300
101 #define RESP_R1 0x0400
102 #define RESP_R1B 0x0500
103 #define RESP_R2 0x0600
104 #define RESP_R3 0x0700
105 #define DATA_PRESENT 0x0800
106 #define TRANSFER_READ 0x1000
107 #define TRANSFER_MULTI 0x2000
108 #define SECURITY_CMD 0x4000
111 tmio_mmc_start_command(struct tmio_mmc_host
*host
, struct mmc_command
*cmd
)
113 struct mmc_data
*data
= host
->data
;
116 /* Command 12 is handled by hardware */
117 if (cmd
->opcode
== 12 && !cmd
->arg
) {
118 sd_ctrl_write16(host
, CTL_STOP_INTERNAL_ACTION
, 0x001);
122 switch (mmc_resp_type(cmd
)) {
123 case MMC_RSP_NONE
: c
|= RESP_NONE
; break;
124 case MMC_RSP_R1
: c
|= RESP_R1
; break;
125 case MMC_RSP_R1B
: c
|= RESP_R1B
; break;
126 case MMC_RSP_R2
: c
|= RESP_R2
; break;
127 case MMC_RSP_R3
: c
|= RESP_R3
; break;
129 pr_debug("Unknown response type %d\n", mmc_resp_type(cmd
));
135 /* FIXME - this seems to be ok commented out but the spec suggest this bit
136 * should be set when issuing app commands.
137 * if(cmd->flags & MMC_FLAG_ACMD)
142 if (data
->blocks
> 1) {
143 sd_ctrl_write16(host
, CTL_STOP_INTERNAL_ACTION
, 0x100);
146 if (data
->flags
& MMC_DATA_READ
)
150 enable_mmc_irqs(host
, TMIO_MASK_CMD
);
152 /* Fire off the command */
153 sd_ctrl_write32(host
, CTL_ARG_REG
, cmd
->arg
);
154 sd_ctrl_write16(host
, CTL_SD_CMD
, c
);
160 * This chip always returns (at least?) as much data as you ask for.
161 * I'm unsure what happens if you ask for less than a block. This should be
162 * looked into to ensure that a funny length read doesnt hose the controller.
164 static void tmio_mmc_pio_irq(struct tmio_mmc_host
*host
)
166 struct mmc_data
*data
= host
->data
;
172 pr_debug("Spurious PIO IRQ\n");
176 buf
= (unsigned short *)(tmio_mmc_kmap_atomic(host
, &flags
) +
179 count
= host
->sg_ptr
->length
- host
->sg_off
;
180 if (count
> data
->blksz
)
183 pr_debug("count: %08x offset: %08x flags %08x\n",
184 count
, host
->sg_off
, data
->flags
);
186 /* Transfer the data */
187 if (data
->flags
& MMC_DATA_READ
)
188 sd_ctrl_read16_rep(host
, CTL_SD_DATA_PORT
, buf
, count
>> 1);
190 sd_ctrl_write16_rep(host
, CTL_SD_DATA_PORT
, buf
, count
>> 1);
192 host
->sg_off
+= count
;
194 tmio_mmc_kunmap_atomic(host
, &flags
);
196 if (host
->sg_off
== host
->sg_ptr
->length
)
197 tmio_mmc_next_sg(host
);
202 static void tmio_mmc_do_data_irq(struct tmio_mmc_host
*host
)
204 struct mmc_data
*data
= host
->data
;
205 struct mmc_command
*stop
;
210 dev_warn(&host
->pdev
->dev
, "Spurious data end IRQ\n");
215 /* FIXME - return correct transfer count on errors */
217 data
->bytes_xfered
= data
->blocks
* data
->blksz
;
219 data
->bytes_xfered
= 0;
221 pr_debug("Completed data request\n");
224 * FIXME: other drivers allow an optional stop command of any given type
225 * which we dont do, as the chip can auto generate them.
226 * Perhaps we can be smarter about when to use auto CMD12 and
227 * only issue the auto request when we know this is the desired
228 * stop command, allowing fallback to the stop command the
229 * upper layers expect. For now, we do what works.
232 if (data
->flags
& MMC_DATA_READ
) {
234 disable_mmc_irqs(host
, TMIO_MASK_READOP
);
235 dev_dbg(&host
->pdev
->dev
, "Complete Rx request %p\n",
239 disable_mmc_irqs(host
, TMIO_MASK_WRITEOP
);
240 dev_dbg(&host
->pdev
->dev
, "Complete Tx request %p\n",
245 if (stop
->opcode
== 12 && !stop
->arg
)
246 sd_ctrl_write16(host
, CTL_STOP_INTERNAL_ACTION
, 0x000);
251 tmio_mmc_finish_request(host
);
254 static void tmio_mmc_data_irq(struct tmio_mmc_host
*host
)
256 struct mmc_data
*data
= host
->data
;
261 if (host
->chan_tx
&& (data
->flags
& MMC_DATA_WRITE
)) {
263 * Has all data been written out yet? Testing on SuperH showed,
264 * that in most cases the first interrupt comes already with the
265 * BUSY status bit clear, but on some operations, like mount or
266 * in the beginning of a write / sync / umount, there is one
267 * DATAEND interrupt with the BUSY bit set, in this cases
268 * waiting for one more interrupt fixes the problem.
270 if (!(sd_ctrl_read32(host
, CTL_STATUS
) & TMIO_STAT_CMD_BUSY
)) {
271 disable_mmc_irqs(host
, TMIO_STAT_DATAEND
);
272 tasklet_schedule(&host
->dma_complete
);
274 } else if (host
->chan_rx
&& (data
->flags
& MMC_DATA_READ
)) {
275 disable_mmc_irqs(host
, TMIO_STAT_DATAEND
);
276 tasklet_schedule(&host
->dma_complete
);
278 tmio_mmc_do_data_irq(host
);
282 static void tmio_mmc_cmd_irq(struct tmio_mmc_host
*host
,
285 struct mmc_command
*cmd
= host
->cmd
;
289 pr_debug("Spurious CMD irq\n");
295 /* This controller is sicker than the PXA one. Not only do we need to
296 * drop the top 8 bits of the first response word, we also need to
297 * modify the order of the response for short response command types.
300 for (i
= 3, addr
= CTL_RESPONSE
; i
>= 0 ; i
--, addr
+= 4)
301 cmd
->resp
[i
] = sd_ctrl_read32(host
, addr
);
303 if (cmd
->flags
& MMC_RSP_136
) {
304 cmd
->resp
[0] = (cmd
->resp
[0] << 8) | (cmd
->resp
[1] >> 24);
305 cmd
->resp
[1] = (cmd
->resp
[1] << 8) | (cmd
->resp
[2] >> 24);
306 cmd
->resp
[2] = (cmd
->resp
[2] << 8) | (cmd
->resp
[3] >> 24);
308 } else if (cmd
->flags
& MMC_RSP_R3
) {
309 cmd
->resp
[0] = cmd
->resp
[3];
312 if (stat
& TMIO_STAT_CMDTIMEOUT
)
313 cmd
->error
= -ETIMEDOUT
;
314 else if (stat
& TMIO_STAT_CRCFAIL
&& cmd
->flags
& MMC_RSP_CRC
)
315 cmd
->error
= -EILSEQ
;
317 /* If there is data to handle we enable data IRQs here, and
318 * we will ultimatley finish the request in the data_end handler.
319 * If theres no data or we encountered an error, finish now.
321 if (host
->data
&& !cmd
->error
) {
322 if (host
->data
->flags
& MMC_DATA_READ
) {
324 enable_mmc_irqs(host
, TMIO_MASK_READOP
);
326 struct dma_chan
*chan
= host
->chan_tx
;
328 enable_mmc_irqs(host
, TMIO_MASK_WRITEOP
);
330 tasklet_schedule(&host
->dma_issue
);
333 tmio_mmc_finish_request(host
);
339 static irqreturn_t
tmio_mmc_irq(int irq
, void *devid
)
341 struct tmio_mmc_host
*host
= devid
;
342 unsigned int ireg
, irq_mask
, status
;
344 pr_debug("MMC IRQ begin\n");
346 status
= sd_ctrl_read32(host
, CTL_STATUS
);
347 irq_mask
= sd_ctrl_read32(host
, CTL_IRQ_MASK
);
348 ireg
= status
& TMIO_MASK_IRQ
& ~irq_mask
;
350 pr_debug_status(status
);
351 pr_debug_status(ireg
);
354 disable_mmc_irqs(host
, status
& ~irq_mask
);
356 pr_warning("tmio_mmc: Spurious irq, disabling! "
357 "0x%08x 0x%08x 0x%08x\n", status
, irq_mask
, ireg
);
358 pr_debug_status(status
);
364 /* Card insert / remove attempts */
365 if (ireg
& (TMIO_STAT_CARD_INSERT
| TMIO_STAT_CARD_REMOVE
)) {
366 ack_mmc_irqs(host
, TMIO_STAT_CARD_INSERT
|
367 TMIO_STAT_CARD_REMOVE
);
368 mmc_detect_change(host
->mmc
, msecs_to_jiffies(100));
371 /* CRC and other errors */
372 /* if (ireg & TMIO_STAT_ERR_IRQ)
373 * handled |= tmio_error_irq(host, irq, stat);
376 /* Command completion */
377 if (ireg
& TMIO_MASK_CMD
) {
378 ack_mmc_irqs(host
, TMIO_MASK_CMD
);
379 tmio_mmc_cmd_irq(host
, status
);
383 if (ireg
& (TMIO_STAT_RXRDY
| TMIO_STAT_TXRQ
)) {
384 ack_mmc_irqs(host
, TMIO_STAT_RXRDY
| TMIO_STAT_TXRQ
);
385 tmio_mmc_pio_irq(host
);
388 /* Data transfer completion */
389 if (ireg
& TMIO_STAT_DATAEND
) {
390 ack_mmc_irqs(host
, TMIO_STAT_DATAEND
);
391 tmio_mmc_data_irq(host
);
394 /* Check status - keep going until we've handled it all */
395 status
= sd_ctrl_read32(host
, CTL_STATUS
);
396 irq_mask
= sd_ctrl_read32(host
, CTL_IRQ_MASK
);
397 ireg
= status
& TMIO_MASK_IRQ
& ~irq_mask
;
399 pr_debug("Status at end of loop: %08x\n", status
);
400 pr_debug_status(status
);
402 pr_debug("MMC IRQ end\n");
408 #ifdef CONFIG_TMIO_MMC_DMA
409 static void tmio_mmc_enable_dma(struct tmio_mmc_host
*host
, bool enable
)
411 #if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE)
412 /* Switch DMA mode on or off - SuperH specific? */
413 sd_ctrl_write16(host
, 0xd8, enable
? 2 : 0);
417 static void tmio_dma_complete(void *arg
)
419 struct tmio_mmc_host
*host
= arg
;
421 dev_dbg(&host
->pdev
->dev
, "Command completed\n");
424 dev_warn(&host
->pdev
->dev
, "NULL data in DMA completion!\n");
426 enable_mmc_irqs(host
, TMIO_STAT_DATAEND
);
429 static int tmio_mmc_start_dma_rx(struct tmio_mmc_host
*host
)
431 struct scatterlist
*sg
= host
->sg_ptr
;
432 struct dma_async_tx_descriptor
*desc
= NULL
;
433 struct dma_chan
*chan
= host
->chan_rx
;
436 ret
= dma_map_sg(&host
->pdev
->dev
, sg
, host
->sg_len
, DMA_FROM_DEVICE
);
438 host
->dma_sglen
= ret
;
439 desc
= chan
->device
->device_prep_slave_sg(chan
, sg
, ret
,
440 DMA_FROM_DEVICE
, DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
445 desc
->callback
= tmio_dma_complete
;
446 desc
->callback_param
= host
;
447 host
->cookie
= desc
->tx_submit(desc
);
448 if (host
->cookie
< 0) {
452 chan
->device
->device_issue_pending(chan
);
455 dev_dbg(&host
->pdev
->dev
, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
456 __func__
, host
->sg_len
, ret
, host
->cookie
, host
->mrq
);
459 /* DMA failed, fall back to PIO */
462 host
->chan_rx
= NULL
;
463 dma_release_channel(chan
);
464 /* Free the Tx channel too */
465 chan
= host
->chan_tx
;
467 host
->chan_tx
= NULL
;
468 dma_release_channel(chan
);
470 dev_warn(&host
->pdev
->dev
,
471 "DMA failed: %d, falling back to PIO\n", ret
);
472 tmio_mmc_enable_dma(host
, false);
474 /* Fail this request, let above layers recover */
475 host
->mrq
->cmd
->error
= ret
;
476 tmio_mmc_finish_request(host
);
479 dev_dbg(&host
->pdev
->dev
, "%s(): desc %p, cookie %d, sg[%d]\n", __func__
,
480 desc
, host
->cookie
, host
->sg_len
);
482 return ret
> 0 ? 0 : ret
;
485 static int tmio_mmc_start_dma_tx(struct tmio_mmc_host
*host
)
487 struct scatterlist
*sg
= host
->sg_ptr
;
488 struct dma_async_tx_descriptor
*desc
= NULL
;
489 struct dma_chan
*chan
= host
->chan_tx
;
492 ret
= dma_map_sg(&host
->pdev
->dev
, sg
, host
->sg_len
, DMA_TO_DEVICE
);
494 host
->dma_sglen
= ret
;
495 desc
= chan
->device
->device_prep_slave_sg(chan
, sg
, ret
,
496 DMA_TO_DEVICE
, DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
501 desc
->callback
= tmio_dma_complete
;
502 desc
->callback_param
= host
;
503 host
->cookie
= desc
->tx_submit(desc
);
504 if (host
->cookie
< 0) {
509 dev_dbg(&host
->pdev
->dev
, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
510 __func__
, host
->sg_len
, ret
, host
->cookie
, host
->mrq
);
513 /* DMA failed, fall back to PIO */
516 host
->chan_tx
= NULL
;
517 dma_release_channel(chan
);
518 /* Free the Rx channel too */
519 chan
= host
->chan_rx
;
521 host
->chan_rx
= NULL
;
522 dma_release_channel(chan
);
524 dev_warn(&host
->pdev
->dev
,
525 "DMA failed: %d, falling back to PIO\n", ret
);
526 tmio_mmc_enable_dma(host
, false);
528 /* Fail this request, let above layers recover */
529 host
->mrq
->cmd
->error
= ret
;
530 tmio_mmc_finish_request(host
);
533 dev_dbg(&host
->pdev
->dev
, "%s(): desc %p, cookie %d\n", __func__
,
536 return ret
> 0 ? 0 : ret
;
539 static int tmio_mmc_start_dma(struct tmio_mmc_host
*host
,
540 struct mmc_data
*data
)
542 if (data
->flags
& MMC_DATA_READ
) {
544 return tmio_mmc_start_dma_rx(host
);
547 return tmio_mmc_start_dma_tx(host
);
553 static void tmio_issue_tasklet_fn(unsigned long priv
)
555 struct tmio_mmc_host
*host
= (struct tmio_mmc_host
*)priv
;
556 struct dma_chan
*chan
= host
->chan_tx
;
558 chan
->device
->device_issue_pending(chan
);
561 static void tmio_tasklet_fn(unsigned long arg
)
563 struct tmio_mmc_host
*host
= (struct tmio_mmc_host
*)arg
;
565 if (host
->data
->flags
& MMC_DATA_READ
)
566 dma_unmap_sg(&host
->pdev
->dev
, host
->sg_ptr
, host
->dma_sglen
,
569 dma_unmap_sg(&host
->pdev
->dev
, host
->sg_ptr
, host
->dma_sglen
,
572 tmio_mmc_do_data_irq(host
);
575 /* It might be necessary to make filter MFD specific */
576 static bool tmio_mmc_filter(struct dma_chan
*chan
, void *arg
)
578 dev_dbg(chan
->device
->dev
, "%s: slave data %p\n", __func__
, arg
);
583 static void tmio_mmc_request_dma(struct tmio_mmc_host
*host
,
584 struct tmio_mmc_data
*pdata
)
586 host
->cookie
= -EINVAL
;
589 /* We can only either use DMA for both Tx and Rx or not use it at all */
594 dma_cap_set(DMA_SLAVE
, mask
);
596 host
->chan_tx
= dma_request_channel(mask
, tmio_mmc_filter
,
597 pdata
->dma
->chan_priv_tx
);
598 dev_dbg(&host
->pdev
->dev
, "%s: TX: got channel %p\n", __func__
,
604 host
->chan_rx
= dma_request_channel(mask
, tmio_mmc_filter
,
605 pdata
->dma
->chan_priv_rx
);
606 dev_dbg(&host
->pdev
->dev
, "%s: RX: got channel %p\n", __func__
,
609 if (!host
->chan_rx
) {
610 dma_release_channel(host
->chan_tx
);
611 host
->chan_tx
= NULL
;
615 tasklet_init(&host
->dma_complete
, tmio_tasklet_fn
, (unsigned long)host
);
616 tasklet_init(&host
->dma_issue
, tmio_issue_tasklet_fn
, (unsigned long)host
);
618 tmio_mmc_enable_dma(host
, true);
622 static void tmio_mmc_release_dma(struct tmio_mmc_host
*host
)
625 struct dma_chan
*chan
= host
->chan_tx
;
626 host
->chan_tx
= NULL
;
627 dma_release_channel(chan
);
630 struct dma_chan
*chan
= host
->chan_rx
;
631 host
->chan_rx
= NULL
;
632 dma_release_channel(chan
);
635 host
->cookie
= -EINVAL
;
639 static int tmio_mmc_start_dma(struct tmio_mmc_host
*host
,
640 struct mmc_data
*data
)
645 static void tmio_mmc_request_dma(struct tmio_mmc_host
*host
,
646 struct tmio_mmc_data
*pdata
)
648 host
->chan_tx
= NULL
;
649 host
->chan_rx
= NULL
;
652 static void tmio_mmc_release_dma(struct tmio_mmc_host
*host
)
657 static int tmio_mmc_start_data(struct tmio_mmc_host
*host
,
658 struct mmc_data
*data
)
660 pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n",
661 data
->blksz
, data
->blocks
);
663 /* Hardware cannot perform 1 and 2 byte requests in 4 bit mode */
664 if (data
->blksz
< 4 && host
->mmc
->ios
.bus_width
== MMC_BUS_WIDTH_4
) {
665 pr_err("%s: %d byte block unsupported in 4 bit mode\n",
666 mmc_hostname(host
->mmc
), data
->blksz
);
670 tmio_mmc_init_sg(host
, data
);
673 /* Set transfer length / blocksize */
674 sd_ctrl_write16(host
, CTL_SD_XFER_LEN
, data
->blksz
);
675 sd_ctrl_write16(host
, CTL_XFER_BLK_COUNT
, data
->blocks
);
677 return tmio_mmc_start_dma(host
, data
);
680 /* Process requests from the MMC layer */
681 static void tmio_mmc_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
683 struct tmio_mmc_host
*host
= mmc_priv(mmc
);
687 pr_debug("request not null\n");
692 ret
= tmio_mmc_start_data(host
, mrq
->data
);
697 ret
= tmio_mmc_start_command(host
, mrq
->cmd
);
702 mrq
->cmd
->error
= ret
;
703 mmc_request_done(mmc
, mrq
);
706 /* Set MMC clock / power.
707 * Note: This controller uses a simple divider scheme therefore it cannot
708 * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as
709 * MMC wont run that fast, it has to be clocked at 12MHz which is the next
712 static void tmio_mmc_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
714 struct tmio_mmc_host
*host
= mmc_priv(mmc
);
717 tmio_mmc_set_clock(host
, ios
->clock
);
719 /* Power sequence - OFF -> ON -> UP */
720 switch (ios
->power_mode
) {
721 case MMC_POWER_OFF
: /* power down SD bus */
723 host
->set_pwr(host
->pdev
, 0);
724 tmio_mmc_clk_stop(host
);
726 case MMC_POWER_ON
: /* power up SD bus */
728 host
->set_pwr(host
->pdev
, 1);
730 case MMC_POWER_UP
: /* start bus clock */
731 tmio_mmc_clk_start(host
);
735 switch (ios
->bus_width
) {
736 case MMC_BUS_WIDTH_1
:
737 sd_ctrl_write16(host
, CTL_SD_MEM_CARD_OPT
, 0x80e0);
739 case MMC_BUS_WIDTH_4
:
740 sd_ctrl_write16(host
, CTL_SD_MEM_CARD_OPT
, 0x00e0);
744 /* Let things settle. delay taken from winCE driver */
748 static int tmio_mmc_get_ro(struct mmc_host
*mmc
)
750 struct tmio_mmc_host
*host
= mmc_priv(mmc
);
751 struct mfd_cell
*cell
= host
->pdev
->dev
.platform_data
;
752 struct tmio_mmc_data
*pdata
= cell
->driver_data
;
754 return ((pdata
->flags
& TMIO_MMC_WRPROTECT_DISABLE
) ||
755 (sd_ctrl_read32(host
, CTL_STATUS
) & TMIO_STAT_WRPROTECT
)) ? 0 : 1;
758 static const struct mmc_host_ops tmio_mmc_ops
= {
759 .request
= tmio_mmc_request
,
760 .set_ios
= tmio_mmc_set_ios
,
761 .get_ro
= tmio_mmc_get_ro
,
765 static int tmio_mmc_suspend(struct platform_device
*dev
, pm_message_t state
)
767 struct mfd_cell
*cell
= (struct mfd_cell
*)dev
->dev
.platform_data
;
768 struct mmc_host
*mmc
= platform_get_drvdata(dev
);
771 ret
= mmc_suspend_host(mmc
);
773 /* Tell MFD core it can disable us now.*/
774 if (!ret
&& cell
->disable
)
780 static int tmio_mmc_resume(struct platform_device
*dev
)
782 struct mfd_cell
*cell
= (struct mfd_cell
*)dev
->dev
.platform_data
;
783 struct mmc_host
*mmc
= platform_get_drvdata(dev
);
786 /* Tell the MFD core we are ready to be enabled */
788 ret
= cell
->resume(dev
);
793 mmc_resume_host(mmc
);
799 #define tmio_mmc_suspend NULL
800 #define tmio_mmc_resume NULL
803 static int __devinit
tmio_mmc_probe(struct platform_device
*dev
)
805 struct mfd_cell
*cell
= (struct mfd_cell
*)dev
->dev
.platform_data
;
806 struct tmio_mmc_data
*pdata
;
807 struct resource
*res_ctl
;
808 struct tmio_mmc_host
*host
;
809 struct mmc_host
*mmc
;
811 u32 irq_mask
= TMIO_MASK_CMD
;
813 if (dev
->num_resources
!= 2)
816 res_ctl
= platform_get_resource(dev
, IORESOURCE_MEM
, 0);
820 pdata
= cell
->driver_data
;
821 if (!pdata
|| !pdata
->hclk
)
826 mmc
= mmc_alloc_host(sizeof(struct tmio_mmc_host
), &dev
->dev
);
830 host
= mmc_priv(mmc
);
833 platform_set_drvdata(dev
, mmc
);
835 host
->set_pwr
= pdata
->set_pwr
;
836 host
->set_clk_div
= pdata
->set_clk_div
;
838 /* SD control register space size is 0x200, 0x400 for bus_shift=1 */
839 host
->bus_shift
= resource_size(res_ctl
) >> 10;
841 host
->ctl
= ioremap(res_ctl
->start
, resource_size(res_ctl
));
845 mmc
->ops
= &tmio_mmc_ops
;
846 mmc
->caps
= MMC_CAP_4_BIT_DATA
;
847 mmc
->caps
|= pdata
->capabilities
;
848 mmc
->f_max
= pdata
->hclk
;
849 mmc
->f_min
= mmc
->f_max
/ 512;
851 mmc
->ocr_avail
= pdata
->ocr_mask
;
853 mmc
->ocr_avail
= MMC_VDD_32_33
| MMC_VDD_33_34
;
855 /* Tell the MFD core we are ready to be enabled */
857 ret
= cell
->enable(dev
);
862 tmio_mmc_clk_stop(host
);
865 ret
= platform_get_irq(dev
, 0);
871 disable_mmc_irqs(host
, TMIO_MASK_ALL
);
873 ret
= request_irq(host
->irq
, tmio_mmc_irq
, IRQF_DISABLED
|
874 IRQF_TRIGGER_FALLING
, dev_name(&dev
->dev
), host
);
878 /* See if we also get DMA */
879 tmio_mmc_request_dma(host
, pdata
);
883 pr_info("%s at 0x%08lx irq %d\n", mmc_hostname(host
->mmc
),
884 (unsigned long)host
->ctl
, host
->irq
);
886 /* Unmask the IRQs we want to know about */
888 irq_mask
|= TMIO_MASK_READOP
;
890 irq_mask
|= TMIO_MASK_WRITEOP
;
891 enable_mmc_irqs(host
, irq_mask
);
906 static int __devexit
tmio_mmc_remove(struct platform_device
*dev
)
908 struct mfd_cell
*cell
= (struct mfd_cell
*)dev
->dev
.platform_data
;
909 struct mmc_host
*mmc
= platform_get_drvdata(dev
);
911 platform_set_drvdata(dev
, NULL
);
914 struct tmio_mmc_host
*host
= mmc_priv(mmc
);
915 mmc_remove_host(mmc
);
916 tmio_mmc_release_dma(host
);
917 free_irq(host
->irq
, host
);
927 /* ------------------- device registration ----------------------- */
929 static struct platform_driver tmio_mmc_driver
= {
932 .owner
= THIS_MODULE
,
934 .probe
= tmio_mmc_probe
,
935 .remove
= __devexit_p(tmio_mmc_remove
),
936 .suspend
= tmio_mmc_suspend
,
937 .resume
= tmio_mmc_resume
,
941 static int __init
tmio_mmc_init(void)
943 return platform_driver_register(&tmio_mmc_driver
);
946 static void __exit
tmio_mmc_exit(void)
948 platform_driver_unregister(&tmio_mmc_driver
);
951 module_init(tmio_mmc_init
);
952 module_exit(tmio_mmc_exit
);
954 MODULE_DESCRIPTION("Toshiba TMIO SD/MMC driver");
955 MODULE_AUTHOR("Ian Molton <spyro@f2s.com>");
956 MODULE_LICENSE("GPL v2");
957 MODULE_ALIAS("platform:tmio-mmc");