2 * linux/drivers/mmc/host/tmio_mmc_pio.c
4 * Copyright (C) 2011 Guennadi Liakhovetski
5 * Copyright (C) 2007 Ian Molton
6 * Copyright (C) 2004 Ian Molton
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * Driver for the MMC / SD / SDIO IP found in:
14 * TC6393XB, TC6391XB, TC6387XB, T7L66XB, ASIC3, SH-Mobile SoCs
16 * This driver draws mainly on scattered spec sheets, Reverse engineering
17 * of the toshiba e800 SD driver and some parts of the 2.4 ASIC3 driver (4 bit
18 * support). (Further 4 bit support from a later datasheet).
21 * Investigate using a workqueue for PIO transfers
24 * Better Power management
25 * Handle MMC errors better
26 * double buffer support
30 #include <linux/delay.h>
31 #include <linux/device.h>
32 #include <linux/highmem.h>
33 #include <linux/interrupt.h>
35 #include <linux/irq.h>
36 #include <linux/mfd/tmio.h>
37 #include <linux/mmc/host.h>
38 #include <linux/mmc/tmio.h>
39 #include <linux/module.h>
40 #include <linux/pagemap.h>
41 #include <linux/platform_device.h>
42 #include <linux/pm_runtime.h>
43 #include <linux/scatterlist.h>
44 #include <linux/workqueue.h>
45 #include <linux/spinlock.h>
49 void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host
*host
, u32 i
)
51 host
->sdcard_irq_mask
&= ~(i
& TMIO_MASK_IRQ
);
52 sd_ctrl_write32(host
, CTL_IRQ_MASK
, host
->sdcard_irq_mask
);
55 void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host
*host
, u32 i
)
57 host
->sdcard_irq_mask
|= (i
& TMIO_MASK_IRQ
);
58 sd_ctrl_write32(host
, CTL_IRQ_MASK
, host
->sdcard_irq_mask
);
61 static void tmio_mmc_ack_mmc_irqs(struct tmio_mmc_host
*host
, u32 i
)
63 sd_ctrl_write32(host
, CTL_STATUS
, ~i
);
66 static void tmio_mmc_init_sg(struct tmio_mmc_host
*host
, struct mmc_data
*data
)
68 host
->sg_len
= data
->sg_len
;
69 host
->sg_ptr
= data
->sg
;
70 host
->sg_orig
= data
->sg
;
74 static int tmio_mmc_next_sg(struct tmio_mmc_host
*host
)
76 host
->sg_ptr
= sg_next(host
->sg_ptr
);
78 return --host
->sg_len
;
81 #ifdef CONFIG_MMC_DEBUG
83 #define STATUS_TO_TEXT(a, status, i) \
85 if (status & TMIO_STAT_##a) { \
92 static void pr_debug_status(u32 status
)
95 printk(KERN_DEBUG
"status: %08x = ", status
);
96 STATUS_TO_TEXT(CARD_REMOVE
, status
, i
);
97 STATUS_TO_TEXT(CARD_INSERT
, status
, i
);
98 STATUS_TO_TEXT(SIGSTATE
, status
, i
);
99 STATUS_TO_TEXT(WRPROTECT
, status
, i
);
100 STATUS_TO_TEXT(CARD_REMOVE_A
, status
, i
);
101 STATUS_TO_TEXT(CARD_INSERT_A
, status
, i
);
102 STATUS_TO_TEXT(SIGSTATE_A
, status
, i
);
103 STATUS_TO_TEXT(CMD_IDX_ERR
, status
, i
);
104 STATUS_TO_TEXT(STOPBIT_ERR
, status
, i
);
105 STATUS_TO_TEXT(ILL_FUNC
, status
, i
);
106 STATUS_TO_TEXT(CMD_BUSY
, status
, i
);
107 STATUS_TO_TEXT(CMDRESPEND
, status
, i
);
108 STATUS_TO_TEXT(DATAEND
, status
, i
);
109 STATUS_TO_TEXT(CRCFAIL
, status
, i
);
110 STATUS_TO_TEXT(DATATIMEOUT
, status
, i
);
111 STATUS_TO_TEXT(CMDTIMEOUT
, status
, i
);
112 STATUS_TO_TEXT(RXOVERFLOW
, status
, i
);
113 STATUS_TO_TEXT(TXUNDERRUN
, status
, i
);
114 STATUS_TO_TEXT(RXRDY
, status
, i
);
115 STATUS_TO_TEXT(TXRQ
, status
, i
);
116 STATUS_TO_TEXT(ILL_ACCESS
, status
, i
);
121 #define pr_debug_status(s) do { } while (0)
124 static void tmio_mmc_enable_sdio_irq(struct mmc_host
*mmc
, int enable
)
126 struct tmio_mmc_host
*host
= mmc_priv(mmc
);
129 host
->sdio_irq_enabled
= 1;
130 host
->sdio_irq_mask
= TMIO_SDIO_MASK_ALL
&
131 ~TMIO_SDIO_STAT_IOIRQ
;
132 sd_ctrl_write16(host
, CTL_TRANSACTION_CTL
, 0x0001);
133 sd_ctrl_write16(host
, CTL_SDIO_IRQ_MASK
, host
->sdio_irq_mask
);
135 host
->sdio_irq_mask
= TMIO_SDIO_MASK_ALL
;
136 sd_ctrl_write16(host
, CTL_SDIO_IRQ_MASK
, host
->sdio_irq_mask
);
137 sd_ctrl_write16(host
, CTL_TRANSACTION_CTL
, 0x0000);
138 host
->sdio_irq_enabled
= 0;
142 static void tmio_mmc_set_clock(struct tmio_mmc_host
*host
, int new_clock
)
147 for (clock
= host
->mmc
->f_min
, clk
= 0x80000080;
148 new_clock
>= (clock
<<1); clk
>>= 1)
153 if (host
->set_clk_div
)
154 host
->set_clk_div(host
->pdev
, (clk
>>22) & 1);
156 sd_ctrl_write16(host
, CTL_SD_CARD_CLK_CTL
, clk
& 0x1ff);
159 static void tmio_mmc_clk_stop(struct tmio_mmc_host
*host
)
161 struct resource
*res
= platform_get_resource(host
->pdev
, IORESOURCE_MEM
, 0);
163 /* implicit BUG_ON(!res) */
164 if (resource_size(res
) > 0x100) {
165 sd_ctrl_write16(host
, CTL_CLK_AND_WAIT_CTL
, 0x0000);
169 sd_ctrl_write16(host
, CTL_SD_CARD_CLK_CTL
, ~0x0100 &
170 sd_ctrl_read16(host
, CTL_SD_CARD_CLK_CTL
));
174 static void tmio_mmc_clk_start(struct tmio_mmc_host
*host
)
176 struct resource
*res
= platform_get_resource(host
->pdev
, IORESOURCE_MEM
, 0);
178 sd_ctrl_write16(host
, CTL_SD_CARD_CLK_CTL
, 0x0100 |
179 sd_ctrl_read16(host
, CTL_SD_CARD_CLK_CTL
));
182 /* implicit BUG_ON(!res) */
183 if (resource_size(res
) > 0x100) {
184 sd_ctrl_write16(host
, CTL_CLK_AND_WAIT_CTL
, 0x0100);
189 static void tmio_mmc_reset(struct tmio_mmc_host
*host
)
191 struct resource
*res
= platform_get_resource(host
->pdev
, IORESOURCE_MEM
, 0);
193 /* FIXME - should we set stop clock reg here */
194 sd_ctrl_write16(host
, CTL_RESET_SD
, 0x0000);
195 /* implicit BUG_ON(!res) */
196 if (resource_size(res
) > 0x100)
197 sd_ctrl_write16(host
, CTL_RESET_SDIO
, 0x0000);
199 sd_ctrl_write16(host
, CTL_RESET_SD
, 0x0001);
200 if (resource_size(res
) > 0x100)
201 sd_ctrl_write16(host
, CTL_RESET_SDIO
, 0x0001);
205 static void tmio_mmc_reset_work(struct work_struct
*work
)
207 struct tmio_mmc_host
*host
= container_of(work
, struct tmio_mmc_host
,
208 delayed_reset_work
.work
);
209 struct mmc_request
*mrq
;
212 spin_lock_irqsave(&host
->lock
, flags
);
216 * is request already finished? Since we use a non-blocking
217 * cancel_delayed_work(), it can happen, that a .set_ios() call preempts
218 * us, so, have to check for IS_ERR(host->mrq)
220 if (IS_ERR_OR_NULL(mrq
)
221 || time_is_after_jiffies(host
->last_req_ts
+
222 msecs_to_jiffies(2000))) {
223 spin_unlock_irqrestore(&host
->lock
, flags
);
227 dev_warn(&host
->pdev
->dev
,
228 "timeout waiting for hardware interrupt (CMD%u)\n",
232 host
->data
->error
= -ETIMEDOUT
;
234 host
->cmd
->error
= -ETIMEDOUT
;
236 mrq
->cmd
->error
= -ETIMEDOUT
;
240 host
->force_pio
= false;
242 spin_unlock_irqrestore(&host
->lock
, flags
);
244 tmio_mmc_reset(host
);
246 /* Ready for new calls */
249 mmc_request_done(host
->mmc
, mrq
);
252 /* called with host->lock held, interrupts disabled */
253 static void tmio_mmc_finish_request(struct tmio_mmc_host
*host
)
255 struct mmc_request
*mrq
;
258 spin_lock_irqsave(&host
->lock
, flags
);
261 if (IS_ERR_OR_NULL(mrq
)) {
262 spin_unlock_irqrestore(&host
->lock
, flags
);
268 host
->force_pio
= false;
270 cancel_delayed_work(&host
->delayed_reset_work
);
273 spin_unlock_irqrestore(&host
->lock
, flags
);
275 mmc_request_done(host
->mmc
, mrq
);
278 static void tmio_mmc_done_work(struct work_struct
*work
)
280 struct tmio_mmc_host
*host
= container_of(work
, struct tmio_mmc_host
,
282 tmio_mmc_finish_request(host
);
285 /* These are the bitmasks the tmio chip requires to implement the MMC response
286 * types. Note that R1 and R6 are the same in this scheme. */
287 #define APP_CMD 0x0040
288 #define RESP_NONE 0x0300
289 #define RESP_R1 0x0400
290 #define RESP_R1B 0x0500
291 #define RESP_R2 0x0600
292 #define RESP_R3 0x0700
293 #define DATA_PRESENT 0x0800
294 #define TRANSFER_READ 0x1000
295 #define TRANSFER_MULTI 0x2000
296 #define SECURITY_CMD 0x4000
298 static int tmio_mmc_start_command(struct tmio_mmc_host
*host
, struct mmc_command
*cmd
)
300 struct mmc_data
*data
= host
->data
;
303 /* Command 12 is handled by hardware */
304 if (cmd
->opcode
== 12 && !cmd
->arg
) {
305 sd_ctrl_write16(host
, CTL_STOP_INTERNAL_ACTION
, 0x001);
309 switch (mmc_resp_type(cmd
)) {
310 case MMC_RSP_NONE
: c
|= RESP_NONE
; break;
311 case MMC_RSP_R1
: c
|= RESP_R1
; break;
312 case MMC_RSP_R1B
: c
|= RESP_R1B
; break;
313 case MMC_RSP_R2
: c
|= RESP_R2
; break;
314 case MMC_RSP_R3
: c
|= RESP_R3
; break;
316 pr_debug("Unknown response type %d\n", mmc_resp_type(cmd
));
322 /* FIXME - this seems to be ok commented out but the spec suggest this bit
323 * should be set when issuing app commands.
324 * if(cmd->flags & MMC_FLAG_ACMD)
329 if (data
->blocks
> 1) {
330 sd_ctrl_write16(host
, CTL_STOP_INTERNAL_ACTION
, 0x100);
333 if (data
->flags
& MMC_DATA_READ
)
337 tmio_mmc_enable_mmc_irqs(host
, TMIO_MASK_CMD
);
339 /* Fire off the command */
340 sd_ctrl_write32(host
, CTL_ARG_REG
, cmd
->arg
);
341 sd_ctrl_write16(host
, CTL_SD_CMD
, c
);
347 * This chip always returns (at least?) as much data as you ask for.
348 * I'm unsure what happens if you ask for less than a block. This should be
349 * looked into to ensure that a funny length read doesn't hose the controller.
351 static void tmio_mmc_pio_irq(struct tmio_mmc_host
*host
)
353 struct mmc_data
*data
= host
->data
;
359 if ((host
->chan_tx
|| host
->chan_rx
) && !host
->force_pio
) {
360 pr_err("PIO IRQ in DMA mode!\n");
363 pr_debug("Spurious PIO IRQ\n");
367 sg_virt
= tmio_mmc_kmap_atomic(host
->sg_ptr
, &flags
);
368 buf
= (unsigned short *)(sg_virt
+ host
->sg_off
);
370 count
= host
->sg_ptr
->length
- host
->sg_off
;
371 if (count
> data
->blksz
)
374 pr_debug("count: %08x offset: %08x flags %08x\n",
375 count
, host
->sg_off
, data
->flags
);
377 /* Transfer the data */
378 if (data
->flags
& MMC_DATA_READ
)
379 sd_ctrl_read16_rep(host
, CTL_SD_DATA_PORT
, buf
, count
>> 1);
381 sd_ctrl_write16_rep(host
, CTL_SD_DATA_PORT
, buf
, count
>> 1);
383 host
->sg_off
+= count
;
385 tmio_mmc_kunmap_atomic(host
->sg_ptr
, &flags
, sg_virt
);
387 if (host
->sg_off
== host
->sg_ptr
->length
)
388 tmio_mmc_next_sg(host
);
393 static void tmio_mmc_check_bounce_buffer(struct tmio_mmc_host
*host
)
395 if (host
->sg_ptr
== &host
->bounce_sg
) {
397 void *sg_vaddr
= tmio_mmc_kmap_atomic(host
->sg_orig
, &flags
);
398 memcpy(sg_vaddr
, host
->bounce_buf
, host
->bounce_sg
.length
);
399 tmio_mmc_kunmap_atomic(host
->sg_orig
, &flags
, sg_vaddr
);
403 /* needs to be called with host->lock held */
404 void tmio_mmc_do_data_irq(struct tmio_mmc_host
*host
)
406 struct mmc_data
*data
= host
->data
;
407 struct mmc_command
*stop
;
412 dev_warn(&host
->pdev
->dev
, "Spurious data end IRQ\n");
417 /* FIXME - return correct transfer count on errors */
419 data
->bytes_xfered
= data
->blocks
* data
->blksz
;
421 data
->bytes_xfered
= 0;
423 pr_debug("Completed data request\n");
426 * FIXME: other drivers allow an optional stop command of any given type
427 * which we dont do, as the chip can auto generate them.
428 * Perhaps we can be smarter about when to use auto CMD12 and
429 * only issue the auto request when we know this is the desired
430 * stop command, allowing fallback to the stop command the
431 * upper layers expect. For now, we do what works.
434 if (data
->flags
& MMC_DATA_READ
) {
435 if (host
->chan_rx
&& !host
->force_pio
)
436 tmio_mmc_check_bounce_buffer(host
);
437 dev_dbg(&host
->pdev
->dev
, "Complete Rx request %p\n",
440 dev_dbg(&host
->pdev
->dev
, "Complete Tx request %p\n",
445 if (stop
->opcode
== 12 && !stop
->arg
)
446 sd_ctrl_write16(host
, CTL_STOP_INTERNAL_ACTION
, 0x000);
451 schedule_work(&host
->done
);
454 static void tmio_mmc_data_irq(struct tmio_mmc_host
*host
)
456 struct mmc_data
*data
;
457 spin_lock(&host
->lock
);
463 if (host
->chan_tx
&& (data
->flags
& MMC_DATA_WRITE
) && !host
->force_pio
) {
465 * Has all data been written out yet? Testing on SuperH showed,
466 * that in most cases the first interrupt comes already with the
467 * BUSY status bit clear, but on some operations, like mount or
468 * in the beginning of a write / sync / umount, there is one
469 * DATAEND interrupt with the BUSY bit set, in this cases
470 * waiting for one more interrupt fixes the problem.
472 if (!(sd_ctrl_read32(host
, CTL_STATUS
) & TMIO_STAT_CMD_BUSY
)) {
473 tmio_mmc_disable_mmc_irqs(host
, TMIO_STAT_DATAEND
);
474 tasklet_schedule(&host
->dma_complete
);
476 } else if (host
->chan_rx
&& (data
->flags
& MMC_DATA_READ
) && !host
->force_pio
) {
477 tmio_mmc_disable_mmc_irqs(host
, TMIO_STAT_DATAEND
);
478 tasklet_schedule(&host
->dma_complete
);
480 tmio_mmc_do_data_irq(host
);
481 tmio_mmc_disable_mmc_irqs(host
, TMIO_MASK_READOP
| TMIO_MASK_WRITEOP
);
484 spin_unlock(&host
->lock
);
487 static void tmio_mmc_cmd_irq(struct tmio_mmc_host
*host
,
490 struct mmc_command
*cmd
= host
->cmd
;
493 spin_lock(&host
->lock
);
496 pr_debug("Spurious CMD irq\n");
502 /* This controller is sicker than the PXA one. Not only do we need to
503 * drop the top 8 bits of the first response word, we also need to
504 * modify the order of the response for short response command types.
507 for (i
= 3, addr
= CTL_RESPONSE
; i
>= 0 ; i
--, addr
+= 4)
508 cmd
->resp
[i
] = sd_ctrl_read32(host
, addr
);
510 if (cmd
->flags
& MMC_RSP_136
) {
511 cmd
->resp
[0] = (cmd
->resp
[0] << 8) | (cmd
->resp
[1] >> 24);
512 cmd
->resp
[1] = (cmd
->resp
[1] << 8) | (cmd
->resp
[2] >> 24);
513 cmd
->resp
[2] = (cmd
->resp
[2] << 8) | (cmd
->resp
[3] >> 24);
515 } else if (cmd
->flags
& MMC_RSP_R3
) {
516 cmd
->resp
[0] = cmd
->resp
[3];
519 if (stat
& TMIO_STAT_CMDTIMEOUT
)
520 cmd
->error
= -ETIMEDOUT
;
521 else if (stat
& TMIO_STAT_CRCFAIL
&& cmd
->flags
& MMC_RSP_CRC
)
522 cmd
->error
= -EILSEQ
;
524 /* If there is data to handle we enable data IRQs here, and
525 * we will ultimatley finish the request in the data_end handler.
526 * If theres no data or we encountered an error, finish now.
528 if (host
->data
&& !cmd
->error
) {
529 if (host
->data
->flags
& MMC_DATA_READ
) {
530 if (host
->force_pio
|| !host
->chan_rx
)
531 tmio_mmc_enable_mmc_irqs(host
, TMIO_MASK_READOP
);
533 tasklet_schedule(&host
->dma_issue
);
535 if (host
->force_pio
|| !host
->chan_tx
)
536 tmio_mmc_enable_mmc_irqs(host
, TMIO_MASK_WRITEOP
);
538 tasklet_schedule(&host
->dma_issue
);
541 schedule_work(&host
->done
);
545 spin_unlock(&host
->lock
);
548 static void tmio_mmc_card_irq_status(struct tmio_mmc_host
*host
,
549 int *ireg
, int *status
)
551 *status
= sd_ctrl_read32(host
, CTL_STATUS
);
552 *ireg
= *status
& TMIO_MASK_IRQ
& ~host
->sdcard_irq_mask
;
554 pr_debug_status(*status
);
555 pr_debug_status(*ireg
);
558 static bool __tmio_mmc_card_detect_irq(struct tmio_mmc_host
*host
,
559 int ireg
, int status
)
561 struct mmc_host
*mmc
= host
->mmc
;
563 /* Card insert / remove attempts */
564 if (ireg
& (TMIO_STAT_CARD_INSERT
| TMIO_STAT_CARD_REMOVE
)) {
565 tmio_mmc_ack_mmc_irqs(host
, TMIO_STAT_CARD_INSERT
|
566 TMIO_STAT_CARD_REMOVE
);
567 if ((((ireg
& TMIO_STAT_CARD_REMOVE
) && mmc
->card
) ||
568 ((ireg
& TMIO_STAT_CARD_INSERT
) && !mmc
->card
)) &&
569 !work_pending(&mmc
->detect
.work
))
570 mmc_detect_change(host
->mmc
, msecs_to_jiffies(100));
577 irqreturn_t
tmio_mmc_card_detect_irq(int irq
, void *devid
)
579 unsigned int ireg
, status
;
580 struct tmio_mmc_host
*host
= devid
;
582 tmio_mmc_card_irq_status(host
, &ireg
, &status
);
583 __tmio_mmc_card_detect_irq(host
, ireg
, status
);
587 EXPORT_SYMBOL(tmio_mmc_card_detect_irq
);
589 static bool __tmio_mmc_sdcard_irq(struct tmio_mmc_host
*host
,
590 int ireg
, int status
)
592 /* Command completion */
593 if (ireg
& (TMIO_STAT_CMDRESPEND
| TMIO_STAT_CMDTIMEOUT
)) {
594 tmio_mmc_ack_mmc_irqs(host
,
595 TMIO_STAT_CMDRESPEND
|
596 TMIO_STAT_CMDTIMEOUT
);
597 tmio_mmc_cmd_irq(host
, status
);
602 if (ireg
& (TMIO_STAT_RXRDY
| TMIO_STAT_TXRQ
)) {
603 tmio_mmc_ack_mmc_irqs(host
, TMIO_STAT_RXRDY
| TMIO_STAT_TXRQ
);
604 tmio_mmc_pio_irq(host
);
608 /* Data transfer completion */
609 if (ireg
& TMIO_STAT_DATAEND
) {
610 tmio_mmc_ack_mmc_irqs(host
, TMIO_STAT_DATAEND
);
611 tmio_mmc_data_irq(host
);
618 irqreturn_t
tmio_mmc_sdcard_irq(int irq
, void *devid
)
620 unsigned int ireg
, status
;
621 struct tmio_mmc_host
*host
= devid
;
623 tmio_mmc_card_irq_status(host
, &ireg
, &status
);
624 __tmio_mmc_sdcard_irq(host
, ireg
, status
);
628 EXPORT_SYMBOL(tmio_mmc_sdcard_irq
);
630 irqreturn_t
tmio_mmc_sdio_irq(int irq
, void *devid
)
632 struct tmio_mmc_host
*host
= devid
;
633 struct mmc_host
*mmc
= host
->mmc
;
634 struct tmio_mmc_data
*pdata
= host
->pdata
;
635 unsigned int ireg
, status
;
637 if (!(pdata
->flags
& TMIO_MMC_SDIO_IRQ
))
640 status
= sd_ctrl_read16(host
, CTL_SDIO_STATUS
);
641 ireg
= status
& TMIO_SDIO_MASK_ALL
& ~host
->sdcard_irq_mask
;
643 sd_ctrl_write16(host
, CTL_SDIO_STATUS
, status
& ~TMIO_SDIO_MASK_ALL
);
645 if (mmc
->caps
& MMC_CAP_SDIO_IRQ
&& ireg
& TMIO_SDIO_STAT_IOIRQ
)
646 mmc_signal_sdio_irq(mmc
);
650 EXPORT_SYMBOL(tmio_mmc_sdio_irq
);
652 irqreturn_t
tmio_mmc_irq(int irq
, void *devid
)
654 struct tmio_mmc_host
*host
= devid
;
655 unsigned int ireg
, status
;
657 pr_debug("MMC IRQ begin\n");
659 tmio_mmc_card_irq_status(host
, &ireg
, &status
);
660 if (__tmio_mmc_card_detect_irq(host
, ireg
, status
))
662 if (__tmio_mmc_sdcard_irq(host
, ireg
, status
))
665 tmio_mmc_sdio_irq(irq
, devid
);
669 EXPORT_SYMBOL(tmio_mmc_irq
);
671 static int tmio_mmc_start_data(struct tmio_mmc_host
*host
,
672 struct mmc_data
*data
)
674 struct tmio_mmc_data
*pdata
= host
->pdata
;
676 pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n",
677 data
->blksz
, data
->blocks
);
679 /* Some hardware cannot perform 2 byte requests in 4 bit mode */
680 if (host
->mmc
->ios
.bus_width
== MMC_BUS_WIDTH_4
) {
681 int blksz_2bytes
= pdata
->flags
& TMIO_MMC_BLKSZ_2BYTES
;
683 if (data
->blksz
< 2 || (data
->blksz
< 4 && !blksz_2bytes
)) {
684 pr_err("%s: %d byte block unsupported in 4 bit mode\n",
685 mmc_hostname(host
->mmc
), data
->blksz
);
690 tmio_mmc_init_sg(host
, data
);
693 /* Set transfer length / blocksize */
694 sd_ctrl_write16(host
, CTL_SD_XFER_LEN
, data
->blksz
);
695 sd_ctrl_write16(host
, CTL_XFER_BLK_COUNT
, data
->blocks
);
697 tmio_mmc_start_dma(host
, data
);
702 /* Process requests from the MMC layer */
703 static void tmio_mmc_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
705 struct tmio_mmc_host
*host
= mmc_priv(mmc
);
709 spin_lock_irqsave(&host
->lock
, flags
);
712 pr_debug("request not null\n");
713 if (IS_ERR(host
->mrq
)) {
714 spin_unlock_irqrestore(&host
->lock
, flags
);
715 mrq
->cmd
->error
= -EAGAIN
;
716 mmc_request_done(mmc
, mrq
);
721 host
->last_req_ts
= jiffies
;
725 spin_unlock_irqrestore(&host
->lock
, flags
);
728 ret
= tmio_mmc_start_data(host
, mrq
->data
);
733 ret
= tmio_mmc_start_command(host
, mrq
->cmd
);
735 schedule_delayed_work(&host
->delayed_reset_work
,
736 msecs_to_jiffies(2000));
741 host
->force_pio
= false;
743 mrq
->cmd
->error
= ret
;
744 mmc_request_done(mmc
, mrq
);
747 /* Set MMC clock / power.
748 * Note: This controller uses a simple divider scheme therefore it cannot
749 * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as
750 * MMC wont run that fast, it has to be clocked at 12MHz which is the next
753 static void tmio_mmc_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
755 struct tmio_mmc_host
*host
= mmc_priv(mmc
);
756 struct tmio_mmc_data
*pdata
= host
->pdata
;
759 mutex_lock(&host
->ios_lock
);
761 spin_lock_irqsave(&host
->lock
, flags
);
763 if (IS_ERR(host
->mrq
)) {
764 dev_dbg(&host
->pdev
->dev
,
765 "%s.%d: concurrent .set_ios(), clk %u, mode %u\n",
766 current
->comm
, task_pid_nr(current
),
767 ios
->clock
, ios
->power_mode
);
768 host
->mrq
= ERR_PTR(-EINTR
);
770 dev_dbg(&host
->pdev
->dev
,
771 "%s.%d: CMD%u active since %lu, now %lu!\n",
772 current
->comm
, task_pid_nr(current
),
773 host
->mrq
->cmd
->opcode
, host
->last_req_ts
, jiffies
);
775 spin_unlock_irqrestore(&host
->lock
, flags
);
777 mutex_unlock(&host
->ios_lock
);
781 host
->mrq
= ERR_PTR(-EBUSY
);
783 spin_unlock_irqrestore(&host
->lock
, flags
);
786 * pdata->power == false only if COLD_CD is available, otherwise only
787 * in short time intervals during probing or resuming
789 if (ios
->power_mode
== MMC_POWER_ON
&& ios
->clock
) {
791 pm_runtime_get_sync(&host
->pdev
->dev
);
794 tmio_mmc_set_clock(host
, ios
->clock
);
795 /* power up SD bus */
797 host
->set_pwr(host
->pdev
, 1);
798 /* start bus clock */
799 tmio_mmc_clk_start(host
);
800 } else if (ios
->power_mode
!= MMC_POWER_UP
) {
802 host
->set_pwr(host
->pdev
, 0);
803 if ((pdata
->flags
& TMIO_MMC_HAS_COLD_CD
) &&
805 pdata
->power
= false;
806 pm_runtime_put(&host
->pdev
->dev
);
808 tmio_mmc_clk_stop(host
);
811 switch (ios
->bus_width
) {
812 case MMC_BUS_WIDTH_1
:
813 sd_ctrl_write16(host
, CTL_SD_MEM_CARD_OPT
, 0x80e0);
815 case MMC_BUS_WIDTH_4
:
816 sd_ctrl_write16(host
, CTL_SD_MEM_CARD_OPT
, 0x00e0);
820 /* Let things settle. delay taken from winCE driver */
822 if (PTR_ERR(host
->mrq
) == -EINTR
)
823 dev_dbg(&host
->pdev
->dev
,
824 "%s.%d: IOS interrupted: clk %u, mode %u",
825 current
->comm
, task_pid_nr(current
),
826 ios
->clock
, ios
->power_mode
);
829 mutex_unlock(&host
->ios_lock
);
832 static int tmio_mmc_get_ro(struct mmc_host
*mmc
)
834 struct tmio_mmc_host
*host
= mmc_priv(mmc
);
835 struct tmio_mmc_data
*pdata
= host
->pdata
;
837 return !((pdata
->flags
& TMIO_MMC_WRPROTECT_DISABLE
) ||
838 (sd_ctrl_read32(host
, CTL_STATUS
) & TMIO_STAT_WRPROTECT
));
841 static int tmio_mmc_get_cd(struct mmc_host
*mmc
)
843 struct tmio_mmc_host
*host
= mmc_priv(mmc
);
844 struct tmio_mmc_data
*pdata
= host
->pdata
;
849 return pdata
->get_cd(host
->pdev
);
852 static const struct mmc_host_ops tmio_mmc_ops
= {
853 .request
= tmio_mmc_request
,
854 .set_ios
= tmio_mmc_set_ios
,
855 .get_ro
= tmio_mmc_get_ro
,
856 .get_cd
= tmio_mmc_get_cd
,
857 .enable_sdio_irq
= tmio_mmc_enable_sdio_irq
,
860 int __devinit
tmio_mmc_host_probe(struct tmio_mmc_host
**host
,
861 struct platform_device
*pdev
,
862 struct tmio_mmc_data
*pdata
)
864 struct tmio_mmc_host
*_host
;
865 struct mmc_host
*mmc
;
866 struct resource
*res_ctl
;
868 u32 irq_mask
= TMIO_MASK_CMD
;
870 res_ctl
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
874 mmc
= mmc_alloc_host(sizeof(struct tmio_mmc_host
), &pdev
->dev
);
878 pdata
->dev
= &pdev
->dev
;
879 _host
= mmc_priv(mmc
);
880 _host
->pdata
= pdata
;
883 platform_set_drvdata(pdev
, mmc
);
885 _host
->set_pwr
= pdata
->set_pwr
;
886 _host
->set_clk_div
= pdata
->set_clk_div
;
888 /* SD control register space size is 0x200, 0x400 for bus_shift=1 */
889 _host
->bus_shift
= resource_size(res_ctl
) >> 10;
891 _host
->ctl
= ioremap(res_ctl
->start
, resource_size(res_ctl
));
897 mmc
->ops
= &tmio_mmc_ops
;
898 mmc
->caps
= MMC_CAP_4_BIT_DATA
| pdata
->capabilities
;
899 mmc
->f_max
= pdata
->hclk
;
900 mmc
->f_min
= mmc
->f_max
/ 512;
902 mmc
->max_blk_size
= 512;
903 mmc
->max_blk_count
= (PAGE_CACHE_SIZE
/ mmc
->max_blk_size
) *
905 mmc
->max_req_size
= mmc
->max_blk_size
* mmc
->max_blk_count
;
906 mmc
->max_seg_size
= mmc
->max_req_size
;
908 mmc
->ocr_avail
= pdata
->ocr_mask
;
910 mmc
->ocr_avail
= MMC_VDD_32_33
| MMC_VDD_33_34
;
912 pdata
->power
= false;
913 pm_runtime_enable(&pdev
->dev
);
914 ret
= pm_runtime_resume(&pdev
->dev
);
918 tmio_mmc_clk_stop(_host
);
919 tmio_mmc_reset(_host
);
921 _host
->sdcard_irq_mask
= sd_ctrl_read32(_host
, CTL_IRQ_MASK
);
922 tmio_mmc_disable_mmc_irqs(_host
, TMIO_MASK_ALL
);
923 if (pdata
->flags
& TMIO_MMC_SDIO_IRQ
)
924 tmio_mmc_enable_sdio_irq(mmc
, 0);
926 spin_lock_init(&_host
->lock
);
927 mutex_init(&_host
->ios_lock
);
929 /* Init delayed work for request timeouts */
930 INIT_DELAYED_WORK(&_host
->delayed_reset_work
, tmio_mmc_reset_work
);
931 INIT_WORK(&_host
->done
, tmio_mmc_done_work
);
933 /* See if we also get DMA */
934 tmio_mmc_request_dma(_host
, pdata
);
936 /* We have to keep the device powered for its card detection to work */
937 if (!(pdata
->flags
& TMIO_MMC_HAS_COLD_CD
)) {
939 pm_runtime_get_noresume(&pdev
->dev
);
944 /* Unmask the IRQs we want to know about */
946 irq_mask
|= TMIO_MASK_READOP
;
948 irq_mask
|= TMIO_MASK_WRITEOP
;
950 tmio_mmc_enable_mmc_irqs(_host
, irq_mask
);
957 pm_runtime_disable(&pdev
->dev
);
964 EXPORT_SYMBOL(tmio_mmc_host_probe
);
966 void tmio_mmc_host_remove(struct tmio_mmc_host
*host
)
968 struct platform_device
*pdev
= host
->pdev
;
971 * We don't have to manipulate pdata->power here: if there is a card in
972 * the slot, the runtime PM is active and our .runtime_resume() will not
973 * be run. If there is no card in the slot and the platform can suspend
974 * the controller, the runtime PM is suspended and pdata->power == false,
975 * so, our .runtime_resume() will not try to detect a card in the slot.
977 if (host
->pdata
->flags
& TMIO_MMC_HAS_COLD_CD
)
978 pm_runtime_get_sync(&pdev
->dev
);
980 mmc_remove_host(host
->mmc
);
981 cancel_work_sync(&host
->done
);
982 cancel_delayed_work_sync(&host
->delayed_reset_work
);
983 tmio_mmc_release_dma(host
);
985 pm_runtime_put_sync(&pdev
->dev
);
986 pm_runtime_disable(&pdev
->dev
);
989 mmc_free_host(host
->mmc
);
991 EXPORT_SYMBOL(tmio_mmc_host_remove
);
994 int tmio_mmc_host_suspend(struct device
*dev
)
996 struct mmc_host
*mmc
= dev_get_drvdata(dev
);
997 struct tmio_mmc_host
*host
= mmc_priv(mmc
);
998 int ret
= mmc_suspend_host(mmc
);
1001 tmio_mmc_disable_mmc_irqs(host
, TMIO_MASK_ALL
);
1003 host
->pm_error
= pm_runtime_put_sync(dev
);
1007 EXPORT_SYMBOL(tmio_mmc_host_suspend
);
1009 int tmio_mmc_host_resume(struct device
*dev
)
1011 struct mmc_host
*mmc
= dev_get_drvdata(dev
);
1012 struct tmio_mmc_host
*host
= mmc_priv(mmc
);
1014 /* The MMC core will perform the complete set up */
1015 host
->pdata
->power
= false;
1017 host
->pm_global
= true;
1018 if (!host
->pm_error
)
1019 pm_runtime_get_sync(dev
);
1021 if (host
->pm_global
) {
1022 /* Runtime PM resume callback didn't run */
1023 tmio_mmc_reset(host
);
1024 tmio_mmc_enable_dma(host
, true);
1025 host
->pm_global
= false;
1028 return mmc_resume_host(mmc
);
1030 EXPORT_SYMBOL(tmio_mmc_host_resume
);
1032 #endif /* CONFIG_PM */
1034 int tmio_mmc_host_runtime_suspend(struct device
*dev
)
1038 EXPORT_SYMBOL(tmio_mmc_host_runtime_suspend
);
1040 int tmio_mmc_host_runtime_resume(struct device
*dev
)
1042 struct mmc_host
*mmc
= dev_get_drvdata(dev
);
1043 struct tmio_mmc_host
*host
= mmc_priv(mmc
);
1044 struct tmio_mmc_data
*pdata
= host
->pdata
;
1046 tmio_mmc_reset(host
);
1047 tmio_mmc_enable_dma(host
, true);
1050 /* Only entered after a card-insert interrupt */
1052 tmio_mmc_set_ios(mmc
, &mmc
->ios
);
1053 mmc_detect_change(mmc
, msecs_to_jiffies(100));
1055 host
->pm_global
= false;
1059 EXPORT_SYMBOL(tmio_mmc_host_runtime_resume
);
1061 MODULE_LICENSE("GPL v2");