2 * linux/drivers/mmc/host/tmio_mmc_pio.c
4 * Copyright (C) 2011 Guennadi Liakhovetski
5 * Copyright (C) 2007 Ian Molton
6 * Copyright (C) 2004 Ian Molton
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * Driver for the MMC / SD / SDIO IP found in:
14 * TC6393XB, TC6391XB, TC6387XB, T7L66XB, ASIC3, SH-Mobile SoCs
16 * This driver draws mainly on scattered spec sheets, Reverse engineering
17 * of the toshiba e800 SD driver and some parts of the 2.4 ASIC3 driver (4 bit
18 * support). (Further 4 bit support from a later datasheet).
21 * Investigate using a workqueue for PIO transfers
24 * Better Power management
25 * Handle MMC errors better
26 * double buffer support
30 #include <linux/delay.h>
31 #include <linux/device.h>
32 #include <linux/highmem.h>
33 #include <linux/interrupt.h>
35 #include <linux/irq.h>
36 #include <linux/mfd/tmio.h>
37 #include <linux/mmc/host.h>
38 #include <linux/mmc/tmio.h>
39 #include <linux/module.h>
40 #include <linux/pagemap.h>
41 #include <linux/platform_device.h>
42 #include <linux/scatterlist.h>
43 #include <linux/workqueue.h>
44 #include <linux/spinlock.h>
48 static u16
sd_ctrl_read16(struct tmio_mmc_host
*host
, int addr
)
50 return readw(host
->ctl
+ (addr
<< host
->bus_shift
));
53 static void sd_ctrl_read16_rep(struct tmio_mmc_host
*host
, int addr
,
56 readsw(host
->ctl
+ (addr
<< host
->bus_shift
), buf
, count
);
59 static u32
sd_ctrl_read32(struct tmio_mmc_host
*host
, int addr
)
61 return readw(host
->ctl
+ (addr
<< host
->bus_shift
)) |
62 readw(host
->ctl
+ ((addr
+ 2) << host
->bus_shift
)) << 16;
65 static void sd_ctrl_write16(struct tmio_mmc_host
*host
, int addr
, u16 val
)
67 writew(val
, host
->ctl
+ (addr
<< host
->bus_shift
));
70 static void sd_ctrl_write16_rep(struct tmio_mmc_host
*host
, int addr
,
73 writesw(host
->ctl
+ (addr
<< host
->bus_shift
), buf
, count
);
76 static void sd_ctrl_write32(struct tmio_mmc_host
*host
, int addr
, u32 val
)
78 writew(val
, host
->ctl
+ (addr
<< host
->bus_shift
));
79 writew(val
>> 16, host
->ctl
+ ((addr
+ 2) << host
->bus_shift
));
82 void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host
*host
, u32 i
)
84 u32 mask
= sd_ctrl_read32(host
, CTL_IRQ_MASK
) & ~(i
& TMIO_MASK_IRQ
);
85 sd_ctrl_write32(host
, CTL_IRQ_MASK
, mask
);
88 void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host
*host
, u32 i
)
90 u32 mask
= sd_ctrl_read32(host
, CTL_IRQ_MASK
) | (i
& TMIO_MASK_IRQ
);
91 sd_ctrl_write32(host
, CTL_IRQ_MASK
, mask
);
94 static void tmio_mmc_ack_mmc_irqs(struct tmio_mmc_host
*host
, u32 i
)
96 sd_ctrl_write32(host
, CTL_STATUS
, ~i
);
99 static void tmio_mmc_init_sg(struct tmio_mmc_host
*host
, struct mmc_data
*data
)
101 host
->sg_len
= data
->sg_len
;
102 host
->sg_ptr
= data
->sg
;
103 host
->sg_orig
= data
->sg
;
107 static int tmio_mmc_next_sg(struct tmio_mmc_host
*host
)
109 host
->sg_ptr
= sg_next(host
->sg_ptr
);
111 return --host
->sg_len
;
114 #ifdef CONFIG_MMC_DEBUG
116 #define STATUS_TO_TEXT(a, status, i) \
118 if (status & TMIO_STAT_##a) { \
125 static void pr_debug_status(u32 status
)
128 printk(KERN_DEBUG
"status: %08x = ", status
);
129 STATUS_TO_TEXT(CARD_REMOVE
, status
, i
);
130 STATUS_TO_TEXT(CARD_INSERT
, status
, i
);
131 STATUS_TO_TEXT(SIGSTATE
, status
, i
);
132 STATUS_TO_TEXT(WRPROTECT
, status
, i
);
133 STATUS_TO_TEXT(CARD_REMOVE_A
, status
, i
);
134 STATUS_TO_TEXT(CARD_INSERT_A
, status
, i
);
135 STATUS_TO_TEXT(SIGSTATE_A
, status
, i
);
136 STATUS_TO_TEXT(CMD_IDX_ERR
, status
, i
);
137 STATUS_TO_TEXT(STOPBIT_ERR
, status
, i
);
138 STATUS_TO_TEXT(ILL_FUNC
, status
, i
);
139 STATUS_TO_TEXT(CMD_BUSY
, status
, i
);
140 STATUS_TO_TEXT(CMDRESPEND
, status
, i
);
141 STATUS_TO_TEXT(DATAEND
, status
, i
);
142 STATUS_TO_TEXT(CRCFAIL
, status
, i
);
143 STATUS_TO_TEXT(DATATIMEOUT
, status
, i
);
144 STATUS_TO_TEXT(CMDTIMEOUT
, status
, i
);
145 STATUS_TO_TEXT(RXOVERFLOW
, status
, i
);
146 STATUS_TO_TEXT(TXUNDERRUN
, status
, i
);
147 STATUS_TO_TEXT(RXRDY
, status
, i
);
148 STATUS_TO_TEXT(TXRQ
, status
, i
);
149 STATUS_TO_TEXT(ILL_ACCESS
, status
, i
);
154 #define pr_debug_status(s) do { } while (0)
157 static void tmio_mmc_enable_sdio_irq(struct mmc_host
*mmc
, int enable
)
159 struct tmio_mmc_host
*host
= mmc_priv(mmc
);
162 host
->sdio_irq_enabled
= 1;
163 sd_ctrl_write16(host
, CTL_TRANSACTION_CTL
, 0x0001);
164 sd_ctrl_write16(host
, CTL_SDIO_IRQ_MASK
,
165 (TMIO_SDIO_MASK_ALL
& ~TMIO_SDIO_STAT_IOIRQ
));
167 sd_ctrl_write16(host
, CTL_SDIO_IRQ_MASK
, TMIO_SDIO_MASK_ALL
);
168 sd_ctrl_write16(host
, CTL_TRANSACTION_CTL
, 0x0000);
169 host
->sdio_irq_enabled
= 0;
173 static void tmio_mmc_set_clock(struct tmio_mmc_host
*host
, int new_clock
)
178 for (clock
= host
->mmc
->f_min
, clk
= 0x80000080;
179 new_clock
>= (clock
<<1); clk
>>= 1)
184 if (host
->set_clk_div
)
185 host
->set_clk_div(host
->pdev
, (clk
>>22) & 1);
187 sd_ctrl_write16(host
, CTL_SD_CARD_CLK_CTL
, clk
& 0x1ff);
190 static void tmio_mmc_clk_stop(struct tmio_mmc_host
*host
)
192 struct resource
*res
= platform_get_resource(host
->pdev
, IORESOURCE_MEM
, 0);
194 /* implicit BUG_ON(!res) */
195 if (resource_size(res
) > 0x100) {
196 sd_ctrl_write16(host
, CTL_CLK_AND_WAIT_CTL
, 0x0000);
200 sd_ctrl_write16(host
, CTL_SD_CARD_CLK_CTL
, ~0x0100 &
201 sd_ctrl_read16(host
, CTL_SD_CARD_CLK_CTL
));
205 static void tmio_mmc_clk_start(struct tmio_mmc_host
*host
)
207 struct resource
*res
= platform_get_resource(host
->pdev
, IORESOURCE_MEM
, 0);
209 sd_ctrl_write16(host
, CTL_SD_CARD_CLK_CTL
, 0x0100 |
210 sd_ctrl_read16(host
, CTL_SD_CARD_CLK_CTL
));
213 /* implicit BUG_ON(!res) */
214 if (resource_size(res
) > 0x100) {
215 sd_ctrl_write16(host
, CTL_CLK_AND_WAIT_CTL
, 0x0100);
220 static void tmio_mmc_reset(struct tmio_mmc_host
*host
)
222 struct resource
*res
= platform_get_resource(host
->pdev
, IORESOURCE_MEM
, 0);
224 /* FIXME - should we set stop clock reg here */
225 sd_ctrl_write16(host
, CTL_RESET_SD
, 0x0000);
226 /* implicit BUG_ON(!res) */
227 if (resource_size(res
) > 0x100)
228 sd_ctrl_write16(host
, CTL_RESET_SDIO
, 0x0000);
230 sd_ctrl_write16(host
, CTL_RESET_SD
, 0x0001);
231 if (resource_size(res
) > 0x100)
232 sd_ctrl_write16(host
, CTL_RESET_SDIO
, 0x0001);
236 static void tmio_mmc_reset_work(struct work_struct
*work
)
238 struct tmio_mmc_host
*host
= container_of(work
, struct tmio_mmc_host
,
239 delayed_reset_work
.work
);
240 struct mmc_request
*mrq
;
243 spin_lock_irqsave(&host
->lock
, flags
);
246 /* request already finished */
248 || time_is_after_jiffies(host
->last_req_ts
+
249 msecs_to_jiffies(2000))) {
250 spin_unlock_irqrestore(&host
->lock
, flags
);
254 dev_warn(&host
->pdev
->dev
,
255 "timeout waiting for hardware interrupt (CMD%u)\n",
259 host
->data
->error
= -ETIMEDOUT
;
261 host
->cmd
->error
= -ETIMEDOUT
;
263 mrq
->cmd
->error
= -ETIMEDOUT
;
268 host
->force_pio
= false;
270 spin_unlock_irqrestore(&host
->lock
, flags
);
272 tmio_mmc_reset(host
);
274 mmc_request_done(host
->mmc
, mrq
);
277 static void tmio_mmc_finish_request(struct tmio_mmc_host
*host
)
279 struct mmc_request
*mrq
= host
->mrq
;
287 host
->force_pio
= false;
289 cancel_delayed_work(&host
->delayed_reset_work
);
291 mmc_request_done(host
->mmc
, mrq
);
294 /* These are the bitmasks the tmio chip requires to implement the MMC response
295 * types. Note that R1 and R6 are the same in this scheme. */
296 #define APP_CMD 0x0040
297 #define RESP_NONE 0x0300
298 #define RESP_R1 0x0400
299 #define RESP_R1B 0x0500
300 #define RESP_R2 0x0600
301 #define RESP_R3 0x0700
302 #define DATA_PRESENT 0x0800
303 #define TRANSFER_READ 0x1000
304 #define TRANSFER_MULTI 0x2000
305 #define SECURITY_CMD 0x4000
307 static int tmio_mmc_start_command(struct tmio_mmc_host
*host
, struct mmc_command
*cmd
)
309 struct mmc_data
*data
= host
->data
;
312 /* Command 12 is handled by hardware */
313 if (cmd
->opcode
== 12 && !cmd
->arg
) {
314 sd_ctrl_write16(host
, CTL_STOP_INTERNAL_ACTION
, 0x001);
318 switch (mmc_resp_type(cmd
)) {
319 case MMC_RSP_NONE
: c
|= RESP_NONE
; break;
320 case MMC_RSP_R1
: c
|= RESP_R1
; break;
321 case MMC_RSP_R1B
: c
|= RESP_R1B
; break;
322 case MMC_RSP_R2
: c
|= RESP_R2
; break;
323 case MMC_RSP_R3
: c
|= RESP_R3
; break;
325 pr_debug("Unknown response type %d\n", mmc_resp_type(cmd
));
331 /* FIXME - this seems to be ok commented out but the spec suggest this bit
332 * should be set when issuing app commands.
333 * if(cmd->flags & MMC_FLAG_ACMD)
338 if (data
->blocks
> 1) {
339 sd_ctrl_write16(host
, CTL_STOP_INTERNAL_ACTION
, 0x100);
342 if (data
->flags
& MMC_DATA_READ
)
346 tmio_mmc_enable_mmc_irqs(host
, TMIO_MASK_CMD
);
348 /* Fire off the command */
349 sd_ctrl_write32(host
, CTL_ARG_REG
, cmd
->arg
);
350 sd_ctrl_write16(host
, CTL_SD_CMD
, c
);
356 * This chip always returns (at least?) as much data as you ask for.
357 * I'm unsure what happens if you ask for less than a block. This should be
358 * looked into to ensure that a funny length read doesn't hose the controller.
360 static void tmio_mmc_pio_irq(struct tmio_mmc_host
*host
)
362 struct mmc_data
*data
= host
->data
;
368 if ((host
->chan_tx
|| host
->chan_rx
) && !host
->force_pio
) {
369 pr_err("PIO IRQ in DMA mode!\n");
372 pr_debug("Spurious PIO IRQ\n");
376 sg_virt
= tmio_mmc_kmap_atomic(host
->sg_ptr
, &flags
);
377 buf
= (unsigned short *)(sg_virt
+ host
->sg_off
);
379 count
= host
->sg_ptr
->length
- host
->sg_off
;
380 if (count
> data
->blksz
)
383 pr_debug("count: %08x offset: %08x flags %08x\n",
384 count
, host
->sg_off
, data
->flags
);
386 /* Transfer the data */
387 if (data
->flags
& MMC_DATA_READ
)
388 sd_ctrl_read16_rep(host
, CTL_SD_DATA_PORT
, buf
, count
>> 1);
390 sd_ctrl_write16_rep(host
, CTL_SD_DATA_PORT
, buf
, count
>> 1);
392 host
->sg_off
+= count
;
394 tmio_mmc_kunmap_atomic(host
->sg_ptr
, &flags
, sg_virt
);
396 if (host
->sg_off
== host
->sg_ptr
->length
)
397 tmio_mmc_next_sg(host
);
402 static void tmio_mmc_check_bounce_buffer(struct tmio_mmc_host
*host
)
404 if (host
->sg_ptr
== &host
->bounce_sg
) {
406 void *sg_vaddr
= tmio_mmc_kmap_atomic(host
->sg_orig
, &flags
);
407 memcpy(sg_vaddr
, host
->bounce_buf
, host
->bounce_sg
.length
);
408 tmio_mmc_kunmap_atomic(host
->sg_orig
, &flags
, sg_vaddr
);
412 /* needs to be called with host->lock held */
413 void tmio_mmc_do_data_irq(struct tmio_mmc_host
*host
)
415 struct mmc_data
*data
= host
->data
;
416 struct mmc_command
*stop
;
421 dev_warn(&host
->pdev
->dev
, "Spurious data end IRQ\n");
426 /* FIXME - return correct transfer count on errors */
428 data
->bytes_xfered
= data
->blocks
* data
->blksz
;
430 data
->bytes_xfered
= 0;
432 pr_debug("Completed data request\n");
435 * FIXME: other drivers allow an optional stop command of any given type
436 * which we dont do, as the chip can auto generate them.
437 * Perhaps we can be smarter about when to use auto CMD12 and
438 * only issue the auto request when we know this is the desired
439 * stop command, allowing fallback to the stop command the
440 * upper layers expect. For now, we do what works.
443 if (data
->flags
& MMC_DATA_READ
) {
444 if (host
->chan_rx
&& !host
->force_pio
)
445 tmio_mmc_check_bounce_buffer(host
);
446 dev_dbg(&host
->pdev
->dev
, "Complete Rx request %p\n",
449 dev_dbg(&host
->pdev
->dev
, "Complete Tx request %p\n",
454 if (stop
->opcode
== 12 && !stop
->arg
)
455 sd_ctrl_write16(host
, CTL_STOP_INTERNAL_ACTION
, 0x000);
460 tmio_mmc_finish_request(host
);
463 static void tmio_mmc_data_irq(struct tmio_mmc_host
*host
)
465 struct mmc_data
*data
;
466 spin_lock(&host
->lock
);
472 if (host
->chan_tx
&& (data
->flags
& MMC_DATA_WRITE
) && !host
->force_pio
) {
474 * Has all data been written out yet? Testing on SuperH showed,
475 * that in most cases the first interrupt comes already with the
476 * BUSY status bit clear, but on some operations, like mount or
477 * in the beginning of a write / sync / umount, there is one
478 * DATAEND interrupt with the BUSY bit set, in this cases
479 * waiting for one more interrupt fixes the problem.
481 if (!(sd_ctrl_read32(host
, CTL_STATUS
) & TMIO_STAT_CMD_BUSY
)) {
482 tmio_mmc_disable_mmc_irqs(host
, TMIO_STAT_DATAEND
);
483 tasklet_schedule(&host
->dma_complete
);
485 } else if (host
->chan_rx
&& (data
->flags
& MMC_DATA_READ
) && !host
->force_pio
) {
486 tmio_mmc_disable_mmc_irqs(host
, TMIO_STAT_DATAEND
);
487 tasklet_schedule(&host
->dma_complete
);
489 tmio_mmc_do_data_irq(host
);
490 tmio_mmc_disable_mmc_irqs(host
, TMIO_MASK_READOP
| TMIO_MASK_WRITEOP
);
493 spin_unlock(&host
->lock
);
496 static void tmio_mmc_cmd_irq(struct tmio_mmc_host
*host
,
499 struct mmc_command
*cmd
= host
->cmd
;
502 spin_lock(&host
->lock
);
505 pr_debug("Spurious CMD irq\n");
511 /* This controller is sicker than the PXA one. Not only do we need to
512 * drop the top 8 bits of the first response word, we also need to
513 * modify the order of the response for short response command types.
516 for (i
= 3, addr
= CTL_RESPONSE
; i
>= 0 ; i
--, addr
+= 4)
517 cmd
->resp
[i
] = sd_ctrl_read32(host
, addr
);
519 if (cmd
->flags
& MMC_RSP_136
) {
520 cmd
->resp
[0] = (cmd
->resp
[0] << 8) | (cmd
->resp
[1] >> 24);
521 cmd
->resp
[1] = (cmd
->resp
[1] << 8) | (cmd
->resp
[2] >> 24);
522 cmd
->resp
[2] = (cmd
->resp
[2] << 8) | (cmd
->resp
[3] >> 24);
524 } else if (cmd
->flags
& MMC_RSP_R3
) {
525 cmd
->resp
[0] = cmd
->resp
[3];
528 if (stat
& TMIO_STAT_CMDTIMEOUT
)
529 cmd
->error
= -ETIMEDOUT
;
530 else if (stat
& TMIO_STAT_CRCFAIL
&& cmd
->flags
& MMC_RSP_CRC
)
531 cmd
->error
= -EILSEQ
;
533 /* If there is data to handle we enable data IRQs here, and
534 * we will ultimatley finish the request in the data_end handler.
535 * If theres no data or we encountered an error, finish now.
537 if (host
->data
&& !cmd
->error
) {
538 if (host
->data
->flags
& MMC_DATA_READ
) {
539 if (host
->force_pio
|| !host
->chan_rx
)
540 tmio_mmc_enable_mmc_irqs(host
, TMIO_MASK_READOP
);
542 tasklet_schedule(&host
->dma_issue
);
544 if (host
->force_pio
|| !host
->chan_tx
)
545 tmio_mmc_enable_mmc_irqs(host
, TMIO_MASK_WRITEOP
);
547 tasklet_schedule(&host
->dma_issue
);
550 tmio_mmc_finish_request(host
);
554 spin_unlock(&host
->lock
);
557 static irqreturn_t
tmio_mmc_irq(int irq
, void *devid
)
559 struct tmio_mmc_host
*host
= devid
;
560 struct tmio_mmc_data
*pdata
= host
->pdata
;
561 unsigned int ireg
, irq_mask
, status
;
562 unsigned int sdio_ireg
, sdio_irq_mask
, sdio_status
;
564 pr_debug("MMC IRQ begin\n");
566 status
= sd_ctrl_read32(host
, CTL_STATUS
);
567 irq_mask
= sd_ctrl_read32(host
, CTL_IRQ_MASK
);
568 ireg
= status
& TMIO_MASK_IRQ
& ~irq_mask
;
571 if (!ireg
&& pdata
->flags
& TMIO_MMC_SDIO_IRQ
) {
572 sdio_status
= sd_ctrl_read16(host
, CTL_SDIO_STATUS
);
573 sdio_irq_mask
= sd_ctrl_read16(host
, CTL_SDIO_IRQ_MASK
);
574 sdio_ireg
= sdio_status
& TMIO_SDIO_MASK_ALL
& ~sdio_irq_mask
;
576 sd_ctrl_write16(host
, CTL_SDIO_STATUS
, sdio_status
& ~TMIO_SDIO_MASK_ALL
);
578 if (sdio_ireg
&& !host
->sdio_irq_enabled
) {
579 pr_warning("tmio_mmc: Spurious SDIO IRQ, disabling! 0x%04x 0x%04x 0x%04x\n",
580 sdio_status
, sdio_irq_mask
, sdio_ireg
);
581 tmio_mmc_enable_sdio_irq(host
->mmc
, 0);
585 if (host
->mmc
->caps
& MMC_CAP_SDIO_IRQ
&&
586 sdio_ireg
& TMIO_SDIO_STAT_IOIRQ
)
587 mmc_signal_sdio_irq(host
->mmc
);
593 pr_debug_status(status
);
594 pr_debug_status(ireg
);
597 tmio_mmc_disable_mmc_irqs(host
, status
& ~irq_mask
);
599 pr_warning("tmio_mmc: Spurious irq, disabling! "
600 "0x%08x 0x%08x 0x%08x\n", status
, irq_mask
, ireg
);
601 pr_debug_status(status
);
607 /* Card insert / remove attempts */
608 if (ireg
& (TMIO_STAT_CARD_INSERT
| TMIO_STAT_CARD_REMOVE
)) {
609 tmio_mmc_ack_mmc_irqs(host
, TMIO_STAT_CARD_INSERT
|
610 TMIO_STAT_CARD_REMOVE
);
611 mmc_detect_change(host
->mmc
, msecs_to_jiffies(100));
614 /* CRC and other errors */
615 /* if (ireg & TMIO_STAT_ERR_IRQ)
616 * handled |= tmio_error_irq(host, irq, stat);
619 /* Command completion */
620 if (ireg
& (TMIO_STAT_CMDRESPEND
| TMIO_STAT_CMDTIMEOUT
)) {
621 tmio_mmc_ack_mmc_irqs(host
,
622 TMIO_STAT_CMDRESPEND
|
623 TMIO_STAT_CMDTIMEOUT
);
624 tmio_mmc_cmd_irq(host
, status
);
628 if (ireg
& (TMIO_STAT_RXRDY
| TMIO_STAT_TXRQ
)) {
629 tmio_mmc_ack_mmc_irqs(host
, TMIO_STAT_RXRDY
| TMIO_STAT_TXRQ
);
630 tmio_mmc_pio_irq(host
);
633 /* Data transfer completion */
634 if (ireg
& TMIO_STAT_DATAEND
) {
635 tmio_mmc_ack_mmc_irqs(host
, TMIO_STAT_DATAEND
);
636 tmio_mmc_data_irq(host
);
639 /* Check status - keep going until we've handled it all */
640 status
= sd_ctrl_read32(host
, CTL_STATUS
);
641 irq_mask
= sd_ctrl_read32(host
, CTL_IRQ_MASK
);
642 ireg
= status
& TMIO_MASK_IRQ
& ~irq_mask
;
644 pr_debug("Status at end of loop: %08x\n", status
);
645 pr_debug_status(status
);
647 pr_debug("MMC IRQ end\n");
653 static int tmio_mmc_start_data(struct tmio_mmc_host
*host
,
654 struct mmc_data
*data
)
656 struct tmio_mmc_data
*pdata
= host
->pdata
;
658 pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n",
659 data
->blksz
, data
->blocks
);
661 /* Some hardware cannot perform 2 byte requests in 4 bit mode */
662 if (host
->mmc
->ios
.bus_width
== MMC_BUS_WIDTH_4
) {
663 int blksz_2bytes
= pdata
->flags
& TMIO_MMC_BLKSZ_2BYTES
;
665 if (data
->blksz
< 2 || (data
->blksz
< 4 && !blksz_2bytes
)) {
666 pr_err("%s: %d byte block unsupported in 4 bit mode\n",
667 mmc_hostname(host
->mmc
), data
->blksz
);
672 tmio_mmc_init_sg(host
, data
);
675 /* Set transfer length / blocksize */
676 sd_ctrl_write16(host
, CTL_SD_XFER_LEN
, data
->blksz
);
677 sd_ctrl_write16(host
, CTL_XFER_BLK_COUNT
, data
->blocks
);
679 tmio_mmc_start_dma(host
, data
);
684 /* Process requests from the MMC layer */
685 static void tmio_mmc_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
687 struct tmio_mmc_host
*host
= mmc_priv(mmc
);
691 pr_debug("request not null\n");
693 host
->last_req_ts
= jiffies
;
698 ret
= tmio_mmc_start_data(host
, mrq
->data
);
703 ret
= tmio_mmc_start_command(host
, mrq
->cmd
);
705 schedule_delayed_work(&host
->delayed_reset_work
,
706 msecs_to_jiffies(2000));
712 host
->force_pio
= false;
713 mrq
->cmd
->error
= ret
;
714 mmc_request_done(mmc
, mrq
);
717 /* Set MMC clock / power.
718 * Note: This controller uses a simple divider scheme therefore it cannot
719 * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as
720 * MMC wont run that fast, it has to be clocked at 12MHz which is the next
723 static void tmio_mmc_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
725 struct tmio_mmc_host
*host
= mmc_priv(mmc
);
728 tmio_mmc_set_clock(host
, ios
->clock
);
730 /* Power sequence - OFF -> UP -> ON */
731 if (ios
->power_mode
== MMC_POWER_UP
) {
732 /* power up SD bus */
734 host
->set_pwr(host
->pdev
, 1);
735 } else if (ios
->power_mode
== MMC_POWER_OFF
|| !ios
->clock
) {
736 /* power down SD bus */
737 if (ios
->power_mode
== MMC_POWER_OFF
&& host
->set_pwr
)
738 host
->set_pwr(host
->pdev
, 0);
739 tmio_mmc_clk_stop(host
);
741 /* start bus clock */
742 tmio_mmc_clk_start(host
);
745 switch (ios
->bus_width
) {
746 case MMC_BUS_WIDTH_1
:
747 sd_ctrl_write16(host
, CTL_SD_MEM_CARD_OPT
, 0x80e0);
749 case MMC_BUS_WIDTH_4
:
750 sd_ctrl_write16(host
, CTL_SD_MEM_CARD_OPT
, 0x00e0);
754 /* Let things settle. delay taken from winCE driver */
758 static int tmio_mmc_get_ro(struct mmc_host
*mmc
)
760 struct tmio_mmc_host
*host
= mmc_priv(mmc
);
761 struct tmio_mmc_data
*pdata
= host
->pdata
;
763 return ((pdata
->flags
& TMIO_MMC_WRPROTECT_DISABLE
) ||
764 !(sd_ctrl_read32(host
, CTL_STATUS
) & TMIO_STAT_WRPROTECT
));
767 static int tmio_mmc_get_cd(struct mmc_host
*mmc
)
769 struct tmio_mmc_host
*host
= mmc_priv(mmc
);
770 struct tmio_mmc_data
*pdata
= host
->pdata
;
775 return pdata
->get_cd(host
->pdev
);
778 static const struct mmc_host_ops tmio_mmc_ops
= {
779 .request
= tmio_mmc_request
,
780 .set_ios
= tmio_mmc_set_ios
,
781 .get_ro
= tmio_mmc_get_ro
,
782 .get_cd
= tmio_mmc_get_cd
,
783 .enable_sdio_irq
= tmio_mmc_enable_sdio_irq
,
786 int __devinit
tmio_mmc_host_probe(struct tmio_mmc_host
**host
,
787 struct platform_device
*pdev
,
788 struct tmio_mmc_data
*pdata
)
790 struct tmio_mmc_host
*_host
;
791 struct mmc_host
*mmc
;
792 struct resource
*res_ctl
;
794 u32 irq_mask
= TMIO_MASK_CMD
;
796 res_ctl
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
800 mmc
= mmc_alloc_host(sizeof(struct tmio_mmc_host
), &pdev
->dev
);
804 _host
= mmc_priv(mmc
);
805 _host
->pdata
= pdata
;
808 platform_set_drvdata(pdev
, mmc
);
810 _host
->set_pwr
= pdata
->set_pwr
;
811 _host
->set_clk_div
= pdata
->set_clk_div
;
813 /* SD control register space size is 0x200, 0x400 for bus_shift=1 */
814 _host
->bus_shift
= resource_size(res_ctl
) >> 10;
816 _host
->ctl
= ioremap(res_ctl
->start
, resource_size(res_ctl
));
822 mmc
->ops
= &tmio_mmc_ops
;
823 mmc
->caps
= MMC_CAP_4_BIT_DATA
| pdata
->capabilities
;
824 mmc
->f_max
= pdata
->hclk
;
825 mmc
->f_min
= mmc
->f_max
/ 512;
827 mmc
->max_blk_size
= 512;
828 mmc
->max_blk_count
= (PAGE_CACHE_SIZE
/ mmc
->max_blk_size
) *
830 mmc
->max_req_size
= mmc
->max_blk_size
* mmc
->max_blk_count
;
831 mmc
->max_seg_size
= mmc
->max_req_size
;
833 mmc
->ocr_avail
= pdata
->ocr_mask
;
835 mmc
->ocr_avail
= MMC_VDD_32_33
| MMC_VDD_33_34
;
837 tmio_mmc_clk_stop(_host
);
838 tmio_mmc_reset(_host
);
840 ret
= platform_get_irq(pdev
, 0);
846 tmio_mmc_disable_mmc_irqs(_host
, TMIO_MASK_ALL
);
847 if (pdata
->flags
& TMIO_MMC_SDIO_IRQ
)
848 tmio_mmc_enable_sdio_irq(mmc
, 0);
850 ret
= request_irq(_host
->irq
, tmio_mmc_irq
, IRQF_DISABLED
|
851 IRQF_TRIGGER_FALLING
, dev_name(&pdev
->dev
), _host
);
855 spin_lock_init(&_host
->lock
);
857 /* Init delayed work for request timeouts */
858 INIT_DELAYED_WORK(&_host
->delayed_reset_work
, tmio_mmc_reset_work
);
860 /* See if we also get DMA */
861 tmio_mmc_request_dma(_host
, pdata
);
865 /* Unmask the IRQs we want to know about */
867 irq_mask
|= TMIO_MASK_READOP
;
869 irq_mask
|= TMIO_MASK_WRITEOP
;
871 tmio_mmc_enable_mmc_irqs(_host
, irq_mask
);
884 EXPORT_SYMBOL(tmio_mmc_host_probe
);
886 void tmio_mmc_host_remove(struct tmio_mmc_host
*host
)
888 mmc_remove_host(host
->mmc
);
889 cancel_delayed_work_sync(&host
->delayed_reset_work
);
890 tmio_mmc_release_dma(host
);
891 free_irq(host
->irq
, host
);
893 mmc_free_host(host
->mmc
);
895 EXPORT_SYMBOL(tmio_mmc_host_remove
);
897 MODULE_LICENSE("GPL v2");