2 * linux/drivers/mmc/host/tmio_mmc_pio.c
4 * Copyright (C) 2011 Guennadi Liakhovetski
5 * Copyright (C) 2007 Ian Molton
6 * Copyright (C) 2004 Ian Molton
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * Driver for the MMC / SD / SDIO IP found in:
14 * TC6393XB, TC6391XB, TC6387XB, T7L66XB, ASIC3, SH-Mobile SoCs
16 * This driver draws mainly on scattered spec sheets, Reverse engineering
17 * of the toshiba e800 SD driver and some parts of the 2.4 ASIC3 driver (4 bit
18 * support). (Further 4 bit support from a later datasheet).
21 * Investigate using a workqueue for PIO transfers
24 * Better Power management
25 * Handle MMC errors better
26 * double buffer support
30 #include <linux/delay.h>
31 #include <linux/device.h>
32 #include <linux/highmem.h>
33 #include <linux/interrupt.h>
35 #include <linux/irq.h>
36 #include <linux/mfd/tmio.h>
37 #include <linux/mmc/host.h>
38 #include <linux/mmc/mmc.h>
39 #include <linux/mmc/slot-gpio.h>
40 #include <linux/mmc/tmio.h>
41 #include <linux/module.h>
42 #include <linux/pagemap.h>
43 #include <linux/platform_device.h>
44 #include <linux/pm_qos.h>
45 #include <linux/pm_runtime.h>
46 #include <linux/regulator/consumer.h>
47 #include <linux/scatterlist.h>
48 #include <linux/spinlock.h>
49 #include <linux/workqueue.h>
53 void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host
*host
, u32 i
)
55 host
->sdcard_irq_mask
&= ~(i
& TMIO_MASK_IRQ
);
56 sd_ctrl_write32(host
, CTL_IRQ_MASK
, host
->sdcard_irq_mask
);
59 void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host
*host
, u32 i
)
61 host
->sdcard_irq_mask
|= (i
& TMIO_MASK_IRQ
);
62 sd_ctrl_write32(host
, CTL_IRQ_MASK
, host
->sdcard_irq_mask
);
65 static void tmio_mmc_ack_mmc_irqs(struct tmio_mmc_host
*host
, u32 i
)
67 sd_ctrl_write32(host
, CTL_STATUS
, ~i
);
70 static void tmio_mmc_init_sg(struct tmio_mmc_host
*host
, struct mmc_data
*data
)
72 host
->sg_len
= data
->sg_len
;
73 host
->sg_ptr
= data
->sg
;
74 host
->sg_orig
= data
->sg
;
78 static int tmio_mmc_next_sg(struct tmio_mmc_host
*host
)
80 host
->sg_ptr
= sg_next(host
->sg_ptr
);
82 return --host
->sg_len
;
85 #ifdef CONFIG_MMC_DEBUG
87 #define STATUS_TO_TEXT(a, status, i) \
89 if (status & TMIO_STAT_##a) { \
96 static void pr_debug_status(u32 status
)
99 pr_debug("status: %08x = ", status
);
100 STATUS_TO_TEXT(CARD_REMOVE
, status
, i
);
101 STATUS_TO_TEXT(CARD_INSERT
, status
, i
);
102 STATUS_TO_TEXT(SIGSTATE
, status
, i
);
103 STATUS_TO_TEXT(WRPROTECT
, status
, i
);
104 STATUS_TO_TEXT(CARD_REMOVE_A
, status
, i
);
105 STATUS_TO_TEXT(CARD_INSERT_A
, status
, i
);
106 STATUS_TO_TEXT(SIGSTATE_A
, status
, i
);
107 STATUS_TO_TEXT(CMD_IDX_ERR
, status
, i
);
108 STATUS_TO_TEXT(STOPBIT_ERR
, status
, i
);
109 STATUS_TO_TEXT(ILL_FUNC
, status
, i
);
110 STATUS_TO_TEXT(CMD_BUSY
, status
, i
);
111 STATUS_TO_TEXT(CMDRESPEND
, status
, i
);
112 STATUS_TO_TEXT(DATAEND
, status
, i
);
113 STATUS_TO_TEXT(CRCFAIL
, status
, i
);
114 STATUS_TO_TEXT(DATATIMEOUT
, status
, i
);
115 STATUS_TO_TEXT(CMDTIMEOUT
, status
, i
);
116 STATUS_TO_TEXT(RXOVERFLOW
, status
, i
);
117 STATUS_TO_TEXT(TXUNDERRUN
, status
, i
);
118 STATUS_TO_TEXT(RXRDY
, status
, i
);
119 STATUS_TO_TEXT(TXRQ
, status
, i
);
120 STATUS_TO_TEXT(ILL_ACCESS
, status
, i
);
125 #define pr_debug_status(s) do { } while (0)
128 static void tmio_mmc_enable_sdio_irq(struct mmc_host
*mmc
, int enable
)
130 struct tmio_mmc_host
*host
= mmc_priv(mmc
);
133 host
->sdio_irq_mask
= TMIO_SDIO_MASK_ALL
&
134 ~TMIO_SDIO_STAT_IOIRQ
;
135 sd_ctrl_write16(host
, CTL_TRANSACTION_CTL
, 0x0001);
136 sd_ctrl_write16(host
, CTL_SDIO_IRQ_MASK
, host
->sdio_irq_mask
);
138 host
->sdio_irq_mask
= TMIO_SDIO_MASK_ALL
;
139 sd_ctrl_write16(host
, CTL_SDIO_IRQ_MASK
, host
->sdio_irq_mask
);
140 sd_ctrl_write16(host
, CTL_TRANSACTION_CTL
, 0x0000);
144 static void tmio_mmc_set_clock(struct tmio_mmc_host
*host
, int new_clock
)
149 for (clock
= host
->mmc
->f_min
, clk
= 0x80000080;
150 new_clock
>= (clock
<<1); clk
>>= 1)
155 if (host
->set_clk_div
)
156 host
->set_clk_div(host
->pdev
, (clk
>>22) & 1);
158 sd_ctrl_write16(host
, CTL_SD_CARD_CLK_CTL
, clk
& 0x1ff);
162 static void tmio_mmc_clk_stop(struct tmio_mmc_host
*host
)
164 struct resource
*res
= platform_get_resource(host
->pdev
, IORESOURCE_MEM
, 0);
166 /* implicit BUG_ON(!res) */
167 if (resource_size(res
) > 0x100) {
168 sd_ctrl_write16(host
, CTL_CLK_AND_WAIT_CTL
, 0x0000);
172 sd_ctrl_write16(host
, CTL_SD_CARD_CLK_CTL
, ~0x0100 &
173 sd_ctrl_read16(host
, CTL_SD_CARD_CLK_CTL
));
177 static void tmio_mmc_clk_start(struct tmio_mmc_host
*host
)
179 struct resource
*res
= platform_get_resource(host
->pdev
, IORESOURCE_MEM
, 0);
181 sd_ctrl_write16(host
, CTL_SD_CARD_CLK_CTL
, 0x0100 |
182 sd_ctrl_read16(host
, CTL_SD_CARD_CLK_CTL
));
185 /* implicit BUG_ON(!res) */
186 if (resource_size(res
) > 0x100) {
187 sd_ctrl_write16(host
, CTL_CLK_AND_WAIT_CTL
, 0x0100);
192 static void tmio_mmc_reset(struct tmio_mmc_host
*host
)
194 struct resource
*res
= platform_get_resource(host
->pdev
, IORESOURCE_MEM
, 0);
196 /* FIXME - should we set stop clock reg here */
197 sd_ctrl_write16(host
, CTL_RESET_SD
, 0x0000);
198 /* implicit BUG_ON(!res) */
199 if (resource_size(res
) > 0x100)
200 sd_ctrl_write16(host
, CTL_RESET_SDIO
, 0x0000);
202 sd_ctrl_write16(host
, CTL_RESET_SD
, 0x0001);
203 if (resource_size(res
) > 0x100)
204 sd_ctrl_write16(host
, CTL_RESET_SDIO
, 0x0001);
208 static void tmio_mmc_reset_work(struct work_struct
*work
)
210 struct tmio_mmc_host
*host
= container_of(work
, struct tmio_mmc_host
,
211 delayed_reset_work
.work
);
212 struct mmc_request
*mrq
;
215 spin_lock_irqsave(&host
->lock
, flags
);
219 * is request already finished? Since we use a non-blocking
220 * cancel_delayed_work(), it can happen, that a .set_ios() call preempts
221 * us, so, have to check for IS_ERR(host->mrq)
223 if (IS_ERR_OR_NULL(mrq
)
224 || time_is_after_jiffies(host
->last_req_ts
+
225 msecs_to_jiffies(2000))) {
226 spin_unlock_irqrestore(&host
->lock
, flags
);
230 dev_warn(&host
->pdev
->dev
,
231 "timeout waiting for hardware interrupt (CMD%u)\n",
235 host
->data
->error
= -ETIMEDOUT
;
237 host
->cmd
->error
= -ETIMEDOUT
;
239 mrq
->cmd
->error
= -ETIMEDOUT
;
243 host
->force_pio
= false;
245 spin_unlock_irqrestore(&host
->lock
, flags
);
247 tmio_mmc_reset(host
);
249 /* Ready for new calls */
252 tmio_mmc_abort_dma(host
);
253 mmc_request_done(host
->mmc
, mrq
);
256 /* called with host->lock held, interrupts disabled */
257 static void tmio_mmc_finish_request(struct tmio_mmc_host
*host
)
259 struct mmc_request
*mrq
;
262 spin_lock_irqsave(&host
->lock
, flags
);
265 if (IS_ERR_OR_NULL(mrq
)) {
266 spin_unlock_irqrestore(&host
->lock
, flags
);
272 host
->force_pio
= false;
274 cancel_delayed_work(&host
->delayed_reset_work
);
277 spin_unlock_irqrestore(&host
->lock
, flags
);
279 if (mrq
->cmd
->error
|| (mrq
->data
&& mrq
->data
->error
))
280 tmio_mmc_abort_dma(host
);
282 mmc_request_done(host
->mmc
, mrq
);
285 static void tmio_mmc_done_work(struct work_struct
*work
)
287 struct tmio_mmc_host
*host
= container_of(work
, struct tmio_mmc_host
,
289 tmio_mmc_finish_request(host
);
292 /* These are the bitmasks the tmio chip requires to implement the MMC response
293 * types. Note that R1 and R6 are the same in this scheme. */
294 #define APP_CMD 0x0040
295 #define RESP_NONE 0x0300
296 #define RESP_R1 0x0400
297 #define RESP_R1B 0x0500
298 #define RESP_R2 0x0600
299 #define RESP_R3 0x0700
300 #define DATA_PRESENT 0x0800
301 #define TRANSFER_READ 0x1000
302 #define TRANSFER_MULTI 0x2000
303 #define SECURITY_CMD 0x4000
305 static int tmio_mmc_start_command(struct tmio_mmc_host
*host
, struct mmc_command
*cmd
)
307 struct mmc_data
*data
= host
->data
;
309 u32 irq_mask
= TMIO_MASK_CMD
;
311 /* CMD12 is handled by hardware */
312 if (cmd
->opcode
== MMC_STOP_TRANSMISSION
&& !cmd
->arg
) {
313 sd_ctrl_write16(host
, CTL_STOP_INTERNAL_ACTION
, 0x001);
317 switch (mmc_resp_type(cmd
)) {
318 case MMC_RSP_NONE
: c
|= RESP_NONE
; break;
319 case MMC_RSP_R1
: c
|= RESP_R1
; break;
320 case MMC_RSP_R1B
: c
|= RESP_R1B
; break;
321 case MMC_RSP_R2
: c
|= RESP_R2
; break;
322 case MMC_RSP_R3
: c
|= RESP_R3
; break;
324 pr_debug("Unknown response type %d\n", mmc_resp_type(cmd
));
330 /* FIXME - this seems to be ok commented out but the spec suggest this bit
331 * should be set when issuing app commands.
332 * if(cmd->flags & MMC_FLAG_ACMD)
337 if (data
->blocks
> 1) {
338 sd_ctrl_write16(host
, CTL_STOP_INTERNAL_ACTION
, 0x100);
341 if (data
->flags
& MMC_DATA_READ
)
345 if (!host
->native_hotplug
)
346 irq_mask
&= ~(TMIO_STAT_CARD_REMOVE
| TMIO_STAT_CARD_INSERT
);
347 tmio_mmc_enable_mmc_irqs(host
, irq_mask
);
349 /* Fire off the command */
350 sd_ctrl_write32(host
, CTL_ARG_REG
, cmd
->arg
);
351 sd_ctrl_write16(host
, CTL_SD_CMD
, c
);
357 * This chip always returns (at least?) as much data as you ask for.
358 * I'm unsure what happens if you ask for less than a block. This should be
359 * looked into to ensure that a funny length read doesn't hose the controller.
361 static void tmio_mmc_pio_irq(struct tmio_mmc_host
*host
)
363 struct mmc_data
*data
= host
->data
;
369 if ((host
->chan_tx
|| host
->chan_rx
) && !host
->force_pio
) {
370 pr_err("PIO IRQ in DMA mode!\n");
373 pr_debug("Spurious PIO IRQ\n");
377 sg_virt
= tmio_mmc_kmap_atomic(host
->sg_ptr
, &flags
);
378 buf
= (unsigned short *)(sg_virt
+ host
->sg_off
);
380 count
= host
->sg_ptr
->length
- host
->sg_off
;
381 if (count
> data
->blksz
)
384 pr_debug("count: %08x offset: %08x flags %08x\n",
385 count
, host
->sg_off
, data
->flags
);
387 /* Transfer the data */
388 if (data
->flags
& MMC_DATA_READ
)
389 sd_ctrl_read16_rep(host
, CTL_SD_DATA_PORT
, buf
, count
>> 1);
391 sd_ctrl_write16_rep(host
, CTL_SD_DATA_PORT
, buf
, count
>> 1);
393 host
->sg_off
+= count
;
395 tmio_mmc_kunmap_atomic(host
->sg_ptr
, &flags
, sg_virt
);
397 if (host
->sg_off
== host
->sg_ptr
->length
)
398 tmio_mmc_next_sg(host
);
403 static void tmio_mmc_check_bounce_buffer(struct tmio_mmc_host
*host
)
405 if (host
->sg_ptr
== &host
->bounce_sg
) {
407 void *sg_vaddr
= tmio_mmc_kmap_atomic(host
->sg_orig
, &flags
);
408 memcpy(sg_vaddr
, host
->bounce_buf
, host
->bounce_sg
.length
);
409 tmio_mmc_kunmap_atomic(host
->sg_orig
, &flags
, sg_vaddr
);
413 /* needs to be called with host->lock held */
414 void tmio_mmc_do_data_irq(struct tmio_mmc_host
*host
)
416 struct mmc_data
*data
= host
->data
;
417 struct mmc_command
*stop
;
422 dev_warn(&host
->pdev
->dev
, "Spurious data end IRQ\n");
427 /* FIXME - return correct transfer count on errors */
429 data
->bytes_xfered
= data
->blocks
* data
->blksz
;
431 data
->bytes_xfered
= 0;
433 pr_debug("Completed data request\n");
436 * FIXME: other drivers allow an optional stop command of any given type
437 * which we dont do, as the chip can auto generate them.
438 * Perhaps we can be smarter about when to use auto CMD12 and
439 * only issue the auto request when we know this is the desired
440 * stop command, allowing fallback to the stop command the
441 * upper layers expect. For now, we do what works.
444 if (data
->flags
& MMC_DATA_READ
) {
445 if (host
->chan_rx
&& !host
->force_pio
)
446 tmio_mmc_check_bounce_buffer(host
);
447 dev_dbg(&host
->pdev
->dev
, "Complete Rx request %p\n",
450 dev_dbg(&host
->pdev
->dev
, "Complete Tx request %p\n",
455 if (stop
->opcode
== MMC_STOP_TRANSMISSION
&& !stop
->arg
)
456 sd_ctrl_write16(host
, CTL_STOP_INTERNAL_ACTION
, 0x000);
461 schedule_work(&host
->done
);
464 static void tmio_mmc_data_irq(struct tmio_mmc_host
*host
)
466 struct mmc_data
*data
;
467 spin_lock(&host
->lock
);
473 if (host
->chan_tx
&& (data
->flags
& MMC_DATA_WRITE
) && !host
->force_pio
) {
475 * Has all data been written out yet? Testing on SuperH showed,
476 * that in most cases the first interrupt comes already with the
477 * BUSY status bit clear, but on some operations, like mount or
478 * in the beginning of a write / sync / umount, there is one
479 * DATAEND interrupt with the BUSY bit set, in this cases
480 * waiting for one more interrupt fixes the problem.
482 if (!(sd_ctrl_read32(host
, CTL_STATUS
) & TMIO_STAT_CMD_BUSY
)) {
483 tmio_mmc_disable_mmc_irqs(host
, TMIO_STAT_DATAEND
);
484 tasklet_schedule(&host
->dma_complete
);
486 } else if (host
->chan_rx
&& (data
->flags
& MMC_DATA_READ
) && !host
->force_pio
) {
487 tmio_mmc_disable_mmc_irqs(host
, TMIO_STAT_DATAEND
);
488 tasklet_schedule(&host
->dma_complete
);
490 tmio_mmc_do_data_irq(host
);
491 tmio_mmc_disable_mmc_irqs(host
, TMIO_MASK_READOP
| TMIO_MASK_WRITEOP
);
494 spin_unlock(&host
->lock
);
497 static void tmio_mmc_cmd_irq(struct tmio_mmc_host
*host
,
500 struct mmc_command
*cmd
= host
->cmd
;
503 spin_lock(&host
->lock
);
506 pr_debug("Spurious CMD irq\n");
512 /* This controller is sicker than the PXA one. Not only do we need to
513 * drop the top 8 bits of the first response word, we also need to
514 * modify the order of the response for short response command types.
517 for (i
= 3, addr
= CTL_RESPONSE
; i
>= 0 ; i
--, addr
+= 4)
518 cmd
->resp
[i
] = sd_ctrl_read32(host
, addr
);
520 if (cmd
->flags
& MMC_RSP_136
) {
521 cmd
->resp
[0] = (cmd
->resp
[0] << 8) | (cmd
->resp
[1] >> 24);
522 cmd
->resp
[1] = (cmd
->resp
[1] << 8) | (cmd
->resp
[2] >> 24);
523 cmd
->resp
[2] = (cmd
->resp
[2] << 8) | (cmd
->resp
[3] >> 24);
525 } else if (cmd
->flags
& MMC_RSP_R3
) {
526 cmd
->resp
[0] = cmd
->resp
[3];
529 if (stat
& TMIO_STAT_CMDTIMEOUT
)
530 cmd
->error
= -ETIMEDOUT
;
531 else if (stat
& TMIO_STAT_CRCFAIL
&& cmd
->flags
& MMC_RSP_CRC
)
532 cmd
->error
= -EILSEQ
;
534 /* If there is data to handle we enable data IRQs here, and
535 * we will ultimatley finish the request in the data_end handler.
536 * If theres no data or we encountered an error, finish now.
538 if (host
->data
&& !cmd
->error
) {
539 if (host
->data
->flags
& MMC_DATA_READ
) {
540 if (host
->force_pio
|| !host
->chan_rx
)
541 tmio_mmc_enable_mmc_irqs(host
, TMIO_MASK_READOP
);
543 tasklet_schedule(&host
->dma_issue
);
545 if (host
->force_pio
|| !host
->chan_tx
)
546 tmio_mmc_enable_mmc_irqs(host
, TMIO_MASK_WRITEOP
);
548 tasklet_schedule(&host
->dma_issue
);
551 schedule_work(&host
->done
);
555 spin_unlock(&host
->lock
);
558 static void tmio_mmc_card_irq_status(struct tmio_mmc_host
*host
,
559 int *ireg
, int *status
)
561 *status
= sd_ctrl_read32(host
, CTL_STATUS
);
562 *ireg
= *status
& TMIO_MASK_IRQ
& ~host
->sdcard_irq_mask
;
564 pr_debug_status(*status
);
565 pr_debug_status(*ireg
);
568 static bool __tmio_mmc_card_detect_irq(struct tmio_mmc_host
*host
,
569 int ireg
, int status
)
571 struct mmc_host
*mmc
= host
->mmc
;
573 /* Card insert / remove attempts */
574 if (ireg
& (TMIO_STAT_CARD_INSERT
| TMIO_STAT_CARD_REMOVE
)) {
575 tmio_mmc_ack_mmc_irqs(host
, TMIO_STAT_CARD_INSERT
|
576 TMIO_STAT_CARD_REMOVE
);
577 if ((((ireg
& TMIO_STAT_CARD_REMOVE
) && mmc
->card
) ||
578 ((ireg
& TMIO_STAT_CARD_INSERT
) && !mmc
->card
)) &&
579 !work_pending(&mmc
->detect
.work
))
580 mmc_detect_change(host
->mmc
, msecs_to_jiffies(100));
587 irqreturn_t
tmio_mmc_card_detect_irq(int irq
, void *devid
)
589 unsigned int ireg
, status
;
590 struct tmio_mmc_host
*host
= devid
;
592 tmio_mmc_card_irq_status(host
, &ireg
, &status
);
593 __tmio_mmc_card_detect_irq(host
, ireg
, status
);
597 EXPORT_SYMBOL(tmio_mmc_card_detect_irq
);
599 static bool __tmio_mmc_sdcard_irq(struct tmio_mmc_host
*host
,
600 int ireg
, int status
)
602 /* Command completion */
603 if (ireg
& (TMIO_STAT_CMDRESPEND
| TMIO_STAT_CMDTIMEOUT
)) {
604 tmio_mmc_ack_mmc_irqs(host
,
605 TMIO_STAT_CMDRESPEND
|
606 TMIO_STAT_CMDTIMEOUT
);
607 tmio_mmc_cmd_irq(host
, status
);
612 if (ireg
& (TMIO_STAT_RXRDY
| TMIO_STAT_TXRQ
)) {
613 tmio_mmc_ack_mmc_irqs(host
, TMIO_STAT_RXRDY
| TMIO_STAT_TXRQ
);
614 tmio_mmc_pio_irq(host
);
618 /* Data transfer completion */
619 if (ireg
& TMIO_STAT_DATAEND
) {
620 tmio_mmc_ack_mmc_irqs(host
, TMIO_STAT_DATAEND
);
621 tmio_mmc_data_irq(host
);
628 irqreturn_t
tmio_mmc_sdcard_irq(int irq
, void *devid
)
630 unsigned int ireg
, status
;
631 struct tmio_mmc_host
*host
= devid
;
633 tmio_mmc_card_irq_status(host
, &ireg
, &status
);
634 __tmio_mmc_sdcard_irq(host
, ireg
, status
);
638 EXPORT_SYMBOL(tmio_mmc_sdcard_irq
);
640 irqreturn_t
tmio_mmc_sdio_irq(int irq
, void *devid
)
642 struct tmio_mmc_host
*host
= devid
;
643 struct mmc_host
*mmc
= host
->mmc
;
644 struct tmio_mmc_data
*pdata
= host
->pdata
;
645 unsigned int ireg
, status
;
647 if (!(pdata
->flags
& TMIO_MMC_SDIO_IRQ
))
650 status
= sd_ctrl_read16(host
, CTL_SDIO_STATUS
);
651 ireg
= status
& TMIO_SDIO_MASK_ALL
& ~host
->sdcard_irq_mask
;
653 sd_ctrl_write16(host
, CTL_SDIO_STATUS
, status
& ~TMIO_SDIO_MASK_ALL
);
655 if (mmc
->caps
& MMC_CAP_SDIO_IRQ
&& ireg
& TMIO_SDIO_STAT_IOIRQ
)
656 mmc_signal_sdio_irq(mmc
);
660 EXPORT_SYMBOL(tmio_mmc_sdio_irq
);
662 irqreturn_t
tmio_mmc_irq(int irq
, void *devid
)
664 struct tmio_mmc_host
*host
= devid
;
665 unsigned int ireg
, status
;
667 pr_debug("MMC IRQ begin\n");
669 tmio_mmc_card_irq_status(host
, &ireg
, &status
);
670 if (__tmio_mmc_card_detect_irq(host
, ireg
, status
))
672 if (__tmio_mmc_sdcard_irq(host
, ireg
, status
))
675 tmio_mmc_sdio_irq(irq
, devid
);
679 EXPORT_SYMBOL(tmio_mmc_irq
);
681 static int tmio_mmc_start_data(struct tmio_mmc_host
*host
,
682 struct mmc_data
*data
)
684 struct tmio_mmc_data
*pdata
= host
->pdata
;
686 pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n",
687 data
->blksz
, data
->blocks
);
689 /* Some hardware cannot perform 2 byte requests in 4 bit mode */
690 if (host
->mmc
->ios
.bus_width
== MMC_BUS_WIDTH_4
) {
691 int blksz_2bytes
= pdata
->flags
& TMIO_MMC_BLKSZ_2BYTES
;
693 if (data
->blksz
< 2 || (data
->blksz
< 4 && !blksz_2bytes
)) {
694 pr_err("%s: %d byte block unsupported in 4 bit mode\n",
695 mmc_hostname(host
->mmc
), data
->blksz
);
700 tmio_mmc_init_sg(host
, data
);
703 /* Set transfer length / blocksize */
704 sd_ctrl_write16(host
, CTL_SD_XFER_LEN
, data
->blksz
);
705 sd_ctrl_write16(host
, CTL_XFER_BLK_COUNT
, data
->blocks
);
707 tmio_mmc_start_dma(host
, data
);
712 /* Process requests from the MMC layer */
713 static void tmio_mmc_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
715 struct tmio_mmc_host
*host
= mmc_priv(mmc
);
719 spin_lock_irqsave(&host
->lock
, flags
);
722 pr_debug("request not null\n");
723 if (IS_ERR(host
->mrq
)) {
724 spin_unlock_irqrestore(&host
->lock
, flags
);
725 mrq
->cmd
->error
= -EAGAIN
;
726 mmc_request_done(mmc
, mrq
);
731 host
->last_req_ts
= jiffies
;
735 spin_unlock_irqrestore(&host
->lock
, flags
);
738 ret
= tmio_mmc_start_data(host
, mrq
->data
);
743 ret
= tmio_mmc_start_command(host
, mrq
->cmd
);
745 schedule_delayed_work(&host
->delayed_reset_work
,
746 msecs_to_jiffies(2000));
751 host
->force_pio
= false;
753 mrq
->cmd
->error
= ret
;
754 mmc_request_done(mmc
, mrq
);
757 static int tmio_mmc_clk_update(struct mmc_host
*mmc
)
759 struct tmio_mmc_host
*host
= mmc_priv(mmc
);
760 struct tmio_mmc_data
*pdata
= host
->pdata
;
763 if (!pdata
->clk_enable
)
766 ret
= pdata
->clk_enable(host
->pdev
, &mmc
->f_max
);
768 mmc
->f_min
= mmc
->f_max
/ 512;
773 static void tmio_mmc_power_on(struct tmio_mmc_host
*host
, unsigned short vdd
)
775 struct mmc_host
*mmc
= host
->mmc
;
778 /* .set_ios() is returning void, so, no chance to report an error */
781 host
->set_pwr(host
->pdev
, 1);
783 if (!IS_ERR(mmc
->supply
.vmmc
)) {
784 ret
= mmc_regulator_set_ocr(mmc
, mmc
->supply
.vmmc
, vdd
);
786 * Attention: empiric value. With a b43 WiFi SDIO card this
787 * delay proved necessary for reliable card-insertion probing.
788 * 100us were not enough. Is this the same 140us delay, as in
789 * tmio_mmc_set_ios()?
794 * It seems, VccQ should be switched on after Vcc, this is also what the
795 * omap_hsmmc.c driver does.
797 if (!IS_ERR(mmc
->supply
.vqmmc
) && !ret
) {
798 regulator_enable(mmc
->supply
.vqmmc
);
803 static void tmio_mmc_power_off(struct tmio_mmc_host
*host
)
805 struct mmc_host
*mmc
= host
->mmc
;
807 if (!IS_ERR(mmc
->supply
.vqmmc
))
808 regulator_disable(mmc
->supply
.vqmmc
);
810 if (!IS_ERR(mmc
->supply
.vmmc
))
811 mmc_regulator_set_ocr(mmc
, mmc
->supply
.vmmc
, 0);
814 host
->set_pwr(host
->pdev
, 0);
817 /* Set MMC clock / power.
818 * Note: This controller uses a simple divider scheme therefore it cannot
819 * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as
820 * MMC wont run that fast, it has to be clocked at 12MHz which is the next
823 static void tmio_mmc_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
825 struct tmio_mmc_host
*host
= mmc_priv(mmc
);
826 struct device
*dev
= &host
->pdev
->dev
;
829 mutex_lock(&host
->ios_lock
);
831 spin_lock_irqsave(&host
->lock
, flags
);
833 if (IS_ERR(host
->mrq
)) {
835 "%s.%d: concurrent .set_ios(), clk %u, mode %u\n",
836 current
->comm
, task_pid_nr(current
),
837 ios
->clock
, ios
->power_mode
);
838 host
->mrq
= ERR_PTR(-EINTR
);
841 "%s.%d: CMD%u active since %lu, now %lu!\n",
842 current
->comm
, task_pid_nr(current
),
843 host
->mrq
->cmd
->opcode
, host
->last_req_ts
, jiffies
);
845 spin_unlock_irqrestore(&host
->lock
, flags
);
847 mutex_unlock(&host
->ios_lock
);
851 host
->mrq
= ERR_PTR(-EBUSY
);
853 spin_unlock_irqrestore(&host
->lock
, flags
);
856 * host->power toggles between false and true in both cases - either
857 * or not the controller can be runtime-suspended during inactivity.
858 * But if the controller has to be kept on, the runtime-pm usage_count
859 * is kept positive, so no suspending actually takes place.
861 if (ios
->power_mode
== MMC_POWER_ON
&& ios
->clock
) {
862 if (host
->power
!= TMIO_MMC_ON_RUN
) {
863 tmio_mmc_clk_update(mmc
);
864 pm_runtime_get_sync(dev
);
865 if (host
->resuming
) {
866 tmio_mmc_reset(host
);
867 host
->resuming
= false;
870 if (host
->power
== TMIO_MMC_OFF_STOP
)
871 tmio_mmc_reset(host
);
872 tmio_mmc_set_clock(host
, ios
->clock
);
873 if (host
->power
== TMIO_MMC_OFF_STOP
)
874 /* power up SD card and the bus */
875 tmio_mmc_power_on(host
, ios
->vdd
);
876 host
->power
= TMIO_MMC_ON_RUN
;
877 /* start bus clock */
878 tmio_mmc_clk_start(host
);
879 } else if (ios
->power_mode
!= MMC_POWER_UP
) {
880 struct tmio_mmc_data
*pdata
= host
->pdata
;
881 unsigned int old_power
= host
->power
;
883 if (old_power
!= TMIO_MMC_OFF_STOP
) {
884 if (ios
->power_mode
== MMC_POWER_OFF
) {
885 tmio_mmc_power_off(host
);
886 host
->power
= TMIO_MMC_OFF_STOP
;
888 host
->power
= TMIO_MMC_ON_STOP
;
892 if (old_power
== TMIO_MMC_ON_RUN
) {
893 tmio_mmc_clk_stop(host
);
895 if (pdata
->clk_disable
)
896 pdata
->clk_disable(host
->pdev
);
900 if (host
->power
!= TMIO_MMC_OFF_STOP
) {
901 switch (ios
->bus_width
) {
902 case MMC_BUS_WIDTH_1
:
903 sd_ctrl_write16(host
, CTL_SD_MEM_CARD_OPT
, 0x80e0);
905 case MMC_BUS_WIDTH_4
:
906 sd_ctrl_write16(host
, CTL_SD_MEM_CARD_OPT
, 0x00e0);
911 /* Let things settle. delay taken from winCE driver */
913 if (PTR_ERR(host
->mrq
) == -EINTR
)
914 dev_dbg(&host
->pdev
->dev
,
915 "%s.%d: IOS interrupted: clk %u, mode %u",
916 current
->comm
, task_pid_nr(current
),
917 ios
->clock
, ios
->power_mode
);
920 mutex_unlock(&host
->ios_lock
);
923 static int tmio_mmc_get_ro(struct mmc_host
*mmc
)
925 struct tmio_mmc_host
*host
= mmc_priv(mmc
);
926 struct tmio_mmc_data
*pdata
= host
->pdata
;
927 int ret
= mmc_gpio_get_ro(mmc
);
931 return !((pdata
->flags
& TMIO_MMC_WRPROTECT_DISABLE
) ||
932 (sd_ctrl_read32(host
, CTL_STATUS
) & TMIO_STAT_WRPROTECT
));
935 static int tmio_mmc_get_cd(struct mmc_host
*mmc
)
937 struct tmio_mmc_host
*host
= mmc_priv(mmc
);
938 struct tmio_mmc_data
*pdata
= host
->pdata
;
939 int ret
= mmc_gpio_get_cd(mmc
);
946 return pdata
->get_cd(host
->pdev
);
949 static const struct mmc_host_ops tmio_mmc_ops
= {
950 .request
= tmio_mmc_request
,
951 .set_ios
= tmio_mmc_set_ios
,
952 .get_ro
= tmio_mmc_get_ro
,
953 .get_cd
= tmio_mmc_get_cd
,
954 .enable_sdio_irq
= tmio_mmc_enable_sdio_irq
,
957 static void tmio_mmc_init_ocr(struct tmio_mmc_host
*host
)
959 struct tmio_mmc_data
*pdata
= host
->pdata
;
960 struct mmc_host
*mmc
= host
->mmc
;
962 mmc_regulator_get_supply(mmc
);
965 mmc
->ocr_avail
= pdata
->ocr_mask
? : MMC_VDD_32_33
| MMC_VDD_33_34
;
966 else if (pdata
->ocr_mask
)
967 dev_warn(mmc_dev(mmc
), "Platform OCR mask is ignored\n");
970 static void tmio_mmc_of_parse(struct platform_device
*pdev
,
971 struct tmio_mmc_data
*pdata
)
973 const struct device_node
*np
= pdev
->dev
.of_node
;
977 if (of_get_property(np
, "toshiba,mmc-wrprotect-disable", NULL
))
978 pdata
->flags
|= TMIO_MMC_WRPROTECT_DISABLE
;
981 int tmio_mmc_host_probe(struct tmio_mmc_host
**host
,
982 struct platform_device
*pdev
,
983 struct tmio_mmc_data
*pdata
)
985 struct tmio_mmc_host
*_host
;
986 struct mmc_host
*mmc
;
987 struct resource
*res_ctl
;
989 u32 irq_mask
= TMIO_MASK_CMD
;
991 tmio_mmc_of_parse(pdev
, pdata
);
993 if (!(pdata
->flags
& TMIO_MMC_HAS_IDLE_WAIT
))
994 pdata
->write16_hook
= NULL
;
996 res_ctl
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1000 mmc
= mmc_alloc_host(sizeof(struct tmio_mmc_host
), &pdev
->dev
);
1004 ret
= mmc_of_parse(mmc
);
1008 pdata
->dev
= &pdev
->dev
;
1009 _host
= mmc_priv(mmc
);
1010 _host
->pdata
= pdata
;
1013 platform_set_drvdata(pdev
, mmc
);
1015 _host
->set_pwr
= pdata
->set_pwr
;
1016 _host
->set_clk_div
= pdata
->set_clk_div
;
1018 /* SD control register space size is 0x200, 0x400 for bus_shift=1 */
1019 _host
->bus_shift
= resource_size(res_ctl
) >> 10;
1021 _host
->ctl
= ioremap(res_ctl
->start
, resource_size(res_ctl
));
1027 mmc
->ops
= &tmio_mmc_ops
;
1028 mmc
->caps
|= MMC_CAP_4_BIT_DATA
| pdata
->capabilities
;
1029 mmc
->caps2
= pdata
->capabilities2
;
1031 mmc
->max_blk_size
= 512;
1032 mmc
->max_blk_count
= (PAGE_CACHE_SIZE
/ mmc
->max_blk_size
) *
1034 mmc
->max_req_size
= mmc
->max_blk_size
* mmc
->max_blk_count
;
1035 mmc
->max_seg_size
= mmc
->max_req_size
;
1036 tmio_mmc_init_ocr(_host
);
1038 _host
->native_hotplug
= !(pdata
->flags
& TMIO_MMC_USE_GPIO_CD
||
1039 mmc
->caps
& MMC_CAP_NEEDS_POLL
||
1040 mmc
->caps
& MMC_CAP_NONREMOVABLE
||
1041 mmc
->slot
.cd_irq
>= 0);
1043 _host
->power
= TMIO_MMC_OFF_STOP
;
1044 pm_runtime_enable(&pdev
->dev
);
1045 ret
= pm_runtime_resume(&pdev
->dev
);
1049 if (tmio_mmc_clk_update(mmc
) < 0) {
1050 mmc
->f_max
= pdata
->hclk
;
1051 mmc
->f_min
= mmc
->f_max
/ 512;
1055 * There are 4 different scenarios for the card detection:
1056 * 1) an external gpio irq handles the cd (best for power savings)
1057 * 2) internal sdhi irq handles the cd
1058 * 3) a worker thread polls the sdhi - indicated by MMC_CAP_NEEDS_POLL
1059 * 4) the medium is non-removable - indicated by MMC_CAP_NONREMOVABLE
1061 * While we increment the runtime PM counter for all scenarios when
1062 * the mmc core activates us by calling an appropriate set_ios(), we
1063 * must additionally ensure that in case 2) the tmio mmc hardware stays
1064 * powered on during runtime for the card detection to work.
1066 if (_host
->native_hotplug
)
1067 pm_runtime_get_noresume(&pdev
->dev
);
1069 tmio_mmc_clk_stop(_host
);
1070 tmio_mmc_reset(_host
);
1072 _host
->sdcard_irq_mask
= sd_ctrl_read32(_host
, CTL_IRQ_MASK
);
1073 tmio_mmc_disable_mmc_irqs(_host
, TMIO_MASK_ALL
);
1075 /* Unmask the IRQs we want to know about */
1076 if (!_host
->chan_rx
)
1077 irq_mask
|= TMIO_MASK_READOP
;
1078 if (!_host
->chan_tx
)
1079 irq_mask
|= TMIO_MASK_WRITEOP
;
1080 if (!_host
->native_hotplug
)
1081 irq_mask
&= ~(TMIO_STAT_CARD_REMOVE
| TMIO_STAT_CARD_INSERT
);
1083 _host
->sdcard_irq_mask
&= ~irq_mask
;
1085 if (pdata
->flags
& TMIO_MMC_SDIO_IRQ
)
1086 tmio_mmc_enable_sdio_irq(mmc
, 0);
1088 spin_lock_init(&_host
->lock
);
1089 mutex_init(&_host
->ios_lock
);
1091 /* Init delayed work for request timeouts */
1092 INIT_DELAYED_WORK(&_host
->delayed_reset_work
, tmio_mmc_reset_work
);
1093 INIT_WORK(&_host
->done
, tmio_mmc_done_work
);
1095 /* See if we also get DMA */
1096 tmio_mmc_request_dma(_host
, pdata
);
1098 ret
= mmc_add_host(mmc
);
1099 if (pdata
->clk_disable
)
1100 pdata
->clk_disable(pdev
);
1102 tmio_mmc_host_remove(_host
);
1106 dev_pm_qos_expose_latency_limit(&pdev
->dev
, 100);
1108 if (pdata
->flags
& TMIO_MMC_USE_GPIO_CD
) {
1109 ret
= mmc_gpio_request_cd(mmc
, pdata
->cd_gpio
);
1111 tmio_mmc_host_remove(_host
);
1121 pm_runtime_disable(&pdev
->dev
);
1122 iounmap(_host
->ctl
);
1128 EXPORT_SYMBOL(tmio_mmc_host_probe
);
1130 void tmio_mmc_host_remove(struct tmio_mmc_host
*host
)
1132 struct platform_device
*pdev
= host
->pdev
;
1133 struct mmc_host
*mmc
= host
->mmc
;
1135 if (!host
->native_hotplug
)
1136 pm_runtime_get_sync(&pdev
->dev
);
1138 dev_pm_qos_hide_latency_limit(&pdev
->dev
);
1140 mmc_remove_host(mmc
);
1141 cancel_work_sync(&host
->done
);
1142 cancel_delayed_work_sync(&host
->delayed_reset_work
);
1143 tmio_mmc_release_dma(host
);
1145 pm_runtime_put_sync(&pdev
->dev
);
1146 pm_runtime_disable(&pdev
->dev
);
1151 EXPORT_SYMBOL(tmio_mmc_host_remove
);
1154 int tmio_mmc_host_suspend(struct device
*dev
)
1156 struct mmc_host
*mmc
= dev_get_drvdata(dev
);
1157 struct tmio_mmc_host
*host
= mmc_priv(mmc
);
1158 int ret
= mmc_suspend_host(mmc
);
1161 tmio_mmc_disable_mmc_irqs(host
, TMIO_MASK_ALL
);
1165 EXPORT_SYMBOL(tmio_mmc_host_suspend
);
1167 int tmio_mmc_host_resume(struct device
*dev
)
1169 struct mmc_host
*mmc
= dev_get_drvdata(dev
);
1170 struct tmio_mmc_host
*host
= mmc_priv(mmc
);
1172 tmio_mmc_enable_dma(host
, true);
1174 /* The MMC core will perform the complete set up */
1175 host
->resuming
= true;
1176 return mmc_resume_host(mmc
);
1178 EXPORT_SYMBOL(tmio_mmc_host_resume
);
1180 #endif /* CONFIG_PM */
1182 int tmio_mmc_host_runtime_suspend(struct device
*dev
)
1186 EXPORT_SYMBOL(tmio_mmc_host_runtime_suspend
);
1188 int tmio_mmc_host_runtime_resume(struct device
*dev
)
1190 struct mmc_host
*mmc
= dev_get_drvdata(dev
);
1191 struct tmio_mmc_host
*host
= mmc_priv(mmc
);
1193 tmio_mmc_enable_dma(host
, true);
1197 EXPORT_SYMBOL(tmio_mmc_host_runtime_resume
);
1199 MODULE_LICENSE("GPL v2");