2 * linux/drivers/mmc/host/tmio_mmc_pio.c
4 * Copyright (C) 2016 Sang Engineering, Wolfram Sang
5 * Copyright (C) 2015-16 Renesas Electronics Corporation
6 * Copyright (C) 2011 Guennadi Liakhovetski
7 * Copyright (C) 2007 Ian Molton
8 * Copyright (C) 2004 Ian Molton
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
14 * Driver for the MMC / SD / SDIO IP found in:
16 * TC6393XB, TC6391XB, TC6387XB, T7L66XB, ASIC3, SH-Mobile SoCs
18 * This driver draws mainly on scattered spec sheets, Reverse engineering
19 * of the toshiba e800 SD driver and some parts of the 2.4 ASIC3 driver (4 bit
20 * support). (Further 4 bit support from a later datasheet).
23 * Investigate using a workqueue for PIO transfers
26 * Better Power management
27 * Handle MMC errors better
28 * double buffer support
32 #include <linux/delay.h>
33 #include <linux/device.h>
34 #include <linux/highmem.h>
35 #include <linux/interrupt.h>
37 #include <linux/irq.h>
38 #include <linux/mfd/tmio.h>
39 #include <linux/mmc/host.h>
40 #include <linux/mmc/mmc.h>
41 #include <linux/mmc/slot-gpio.h>
42 #include <linux/mmc/tmio.h>
43 #include <linux/module.h>
44 #include <linux/pagemap.h>
45 #include <linux/platform_device.h>
46 #include <linux/pm_qos.h>
47 #include <linux/pm_runtime.h>
48 #include <linux/regulator/consumer.h>
49 #include <linux/mmc/sdio.h>
50 #include <linux/scatterlist.h>
51 #include <linux/spinlock.h>
52 #include <linux/workqueue.h>
56 void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host
*host
, u32 i
)
58 host
->sdcard_irq_mask
&= ~(i
& TMIO_MASK_IRQ
);
59 sd_ctrl_write32(host
, CTL_IRQ_MASK
, host
->sdcard_irq_mask
);
62 void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host
*host
, u32 i
)
64 host
->sdcard_irq_mask
|= (i
& TMIO_MASK_IRQ
);
65 sd_ctrl_write32(host
, CTL_IRQ_MASK
, host
->sdcard_irq_mask
);
68 static void tmio_mmc_ack_mmc_irqs(struct tmio_mmc_host
*host
, u32 i
)
70 sd_ctrl_write32(host
, CTL_STATUS
, ~i
);
73 static void tmio_mmc_init_sg(struct tmio_mmc_host
*host
, struct mmc_data
*data
)
75 host
->sg_len
= data
->sg_len
;
76 host
->sg_ptr
= data
->sg
;
77 host
->sg_orig
= data
->sg
;
81 static int tmio_mmc_next_sg(struct tmio_mmc_host
*host
)
83 host
->sg_ptr
= sg_next(host
->sg_ptr
);
85 return --host
->sg_len
;
88 #define CMDREQ_TIMEOUT 5000
90 #ifdef CONFIG_MMC_DEBUG
92 #define STATUS_TO_TEXT(a, status, i) \
94 if (status & TMIO_STAT_##a) { \
101 static void pr_debug_status(u32 status
)
104 pr_debug("status: %08x = ", status
);
105 STATUS_TO_TEXT(CARD_REMOVE
, status
, i
);
106 STATUS_TO_TEXT(CARD_INSERT
, status
, i
);
107 STATUS_TO_TEXT(SIGSTATE
, status
, i
);
108 STATUS_TO_TEXT(WRPROTECT
, status
, i
);
109 STATUS_TO_TEXT(CARD_REMOVE_A
, status
, i
);
110 STATUS_TO_TEXT(CARD_INSERT_A
, status
, i
);
111 STATUS_TO_TEXT(SIGSTATE_A
, status
, i
);
112 STATUS_TO_TEXT(CMD_IDX_ERR
, status
, i
);
113 STATUS_TO_TEXT(STOPBIT_ERR
, status
, i
);
114 STATUS_TO_TEXT(ILL_FUNC
, status
, i
);
115 STATUS_TO_TEXT(CMD_BUSY
, status
, i
);
116 STATUS_TO_TEXT(CMDRESPEND
, status
, i
);
117 STATUS_TO_TEXT(DATAEND
, status
, i
);
118 STATUS_TO_TEXT(CRCFAIL
, status
, i
);
119 STATUS_TO_TEXT(DATATIMEOUT
, status
, i
);
120 STATUS_TO_TEXT(CMDTIMEOUT
, status
, i
);
121 STATUS_TO_TEXT(RXOVERFLOW
, status
, i
);
122 STATUS_TO_TEXT(TXUNDERRUN
, status
, i
);
123 STATUS_TO_TEXT(RXRDY
, status
, i
);
124 STATUS_TO_TEXT(TXRQ
, status
, i
);
125 STATUS_TO_TEXT(ILL_ACCESS
, status
, i
);
130 #define pr_debug_status(s) do { } while (0)
133 static void tmio_mmc_enable_sdio_irq(struct mmc_host
*mmc
, int enable
)
135 struct tmio_mmc_host
*host
= mmc_priv(mmc
);
137 if (enable
&& !host
->sdio_irq_enabled
) {
138 /* Keep device active while SDIO irq is enabled */
139 pm_runtime_get_sync(mmc_dev(mmc
));
140 host
->sdio_irq_enabled
= true;
142 host
->sdio_irq_mask
= TMIO_SDIO_MASK_ALL
&
143 ~TMIO_SDIO_STAT_IOIRQ
;
144 sd_ctrl_write16(host
, CTL_TRANSACTION_CTL
, 0x0001);
145 sd_ctrl_write16(host
, CTL_SDIO_IRQ_MASK
, host
->sdio_irq_mask
);
146 } else if (!enable
&& host
->sdio_irq_enabled
) {
147 host
->sdio_irq_mask
= TMIO_SDIO_MASK_ALL
;
148 sd_ctrl_write16(host
, CTL_SDIO_IRQ_MASK
, host
->sdio_irq_mask
);
149 sd_ctrl_write16(host
, CTL_TRANSACTION_CTL
, 0x0000);
151 host
->sdio_irq_enabled
= false;
152 pm_runtime_mark_last_busy(mmc_dev(mmc
));
153 pm_runtime_put_autosuspend(mmc_dev(mmc
));
157 static void tmio_mmc_set_clock(struct tmio_mmc_host
*host
,
158 unsigned int new_clock
)
163 for (clock
= host
->mmc
->f_min
, clk
= 0x80000080;
164 new_clock
>= (clock
<< 1);
168 /* 1/1 clock is option */
169 if ((host
->pdata
->flags
& TMIO_MMC_CLK_ACTUAL
) &&
174 if (host
->set_clk_div
)
175 host
->set_clk_div(host
->pdev
, (clk
>> 22) & 1);
177 sd_ctrl_write16(host
, CTL_SD_CARD_CLK_CTL
, ~CLK_CTL_SCLKEN
&
178 sd_ctrl_read16(host
, CTL_SD_CARD_CLK_CTL
));
179 sd_ctrl_write16(host
, CTL_SD_CARD_CLK_CTL
, clk
& CLK_CTL_DIV_MASK
);
180 if (!(host
->pdata
->flags
& TMIO_MMC_FAST_CLK_CHG
))
184 static void tmio_mmc_clk_stop(struct tmio_mmc_host
*host
)
186 if (host
->pdata
->flags
& TMIO_MMC_HAVE_HIGH_REG
) {
187 sd_ctrl_write16(host
, CTL_CLK_AND_WAIT_CTL
, 0x0000);
191 sd_ctrl_write16(host
, CTL_SD_CARD_CLK_CTL
, ~CLK_CTL_SCLKEN
&
192 sd_ctrl_read16(host
, CTL_SD_CARD_CLK_CTL
));
193 msleep(host
->pdata
->flags
& TMIO_MMC_FAST_CLK_CHG
? 5 : 10);
196 static void tmio_mmc_clk_start(struct tmio_mmc_host
*host
)
198 sd_ctrl_write16(host
, CTL_SD_CARD_CLK_CTL
, CLK_CTL_SCLKEN
|
199 sd_ctrl_read16(host
, CTL_SD_CARD_CLK_CTL
));
200 msleep(host
->pdata
->flags
& TMIO_MMC_FAST_CLK_CHG
? 1 : 10);
202 if (host
->pdata
->flags
& TMIO_MMC_HAVE_HIGH_REG
) {
203 sd_ctrl_write16(host
, CTL_CLK_AND_WAIT_CTL
, 0x0100);
208 static void tmio_mmc_reset(struct tmio_mmc_host
*host
)
210 /* FIXME - should we set stop clock reg here */
211 sd_ctrl_write16(host
, CTL_RESET_SD
, 0x0000);
212 if (host
->pdata
->flags
& TMIO_MMC_HAVE_HIGH_REG
)
213 sd_ctrl_write16(host
, CTL_RESET_SDIO
, 0x0000);
215 sd_ctrl_write16(host
, CTL_RESET_SD
, 0x0001);
216 if (host
->pdata
->flags
& TMIO_MMC_HAVE_HIGH_REG
)
217 sd_ctrl_write16(host
, CTL_RESET_SDIO
, 0x0001);
221 static void tmio_mmc_reset_work(struct work_struct
*work
)
223 struct tmio_mmc_host
*host
= container_of(work
, struct tmio_mmc_host
,
224 delayed_reset_work
.work
);
225 struct mmc_request
*mrq
;
228 spin_lock_irqsave(&host
->lock
, flags
);
232 * is request already finished? Since we use a non-blocking
233 * cancel_delayed_work(), it can happen, that a .set_ios() call preempts
234 * us, so, have to check for IS_ERR(host->mrq)
236 if (IS_ERR_OR_NULL(mrq
)
237 || time_is_after_jiffies(host
->last_req_ts
+
238 msecs_to_jiffies(CMDREQ_TIMEOUT
))) {
239 spin_unlock_irqrestore(&host
->lock
, flags
);
243 dev_warn(&host
->pdev
->dev
,
244 "timeout waiting for hardware interrupt (CMD%u)\n",
248 host
->data
->error
= -ETIMEDOUT
;
250 host
->cmd
->error
= -ETIMEDOUT
;
252 mrq
->cmd
->error
= -ETIMEDOUT
;
256 host
->force_pio
= false;
258 spin_unlock_irqrestore(&host
->lock
, flags
);
260 tmio_mmc_reset(host
);
262 /* Ready for new calls */
265 tmio_mmc_abort_dma(host
);
266 mmc_request_done(host
->mmc
, mrq
);
268 pm_runtime_mark_last_busy(mmc_dev(host
->mmc
));
269 pm_runtime_put_autosuspend(mmc_dev(host
->mmc
));
272 /* called with host->lock held, interrupts disabled */
273 static void tmio_mmc_finish_request(struct tmio_mmc_host
*host
)
275 struct mmc_request
*mrq
;
278 spin_lock_irqsave(&host
->lock
, flags
);
281 if (IS_ERR_OR_NULL(mrq
)) {
282 spin_unlock_irqrestore(&host
->lock
, flags
);
288 host
->force_pio
= false;
290 cancel_delayed_work(&host
->delayed_reset_work
);
293 spin_unlock_irqrestore(&host
->lock
, flags
);
295 if (mrq
->cmd
->error
|| (mrq
->data
&& mrq
->data
->error
))
296 tmio_mmc_abort_dma(host
);
298 mmc_request_done(host
->mmc
, mrq
);
300 pm_runtime_mark_last_busy(mmc_dev(host
->mmc
));
301 pm_runtime_put_autosuspend(mmc_dev(host
->mmc
));
304 static void tmio_mmc_done_work(struct work_struct
*work
)
306 struct tmio_mmc_host
*host
= container_of(work
, struct tmio_mmc_host
,
308 tmio_mmc_finish_request(host
);
311 /* These are the bitmasks the tmio chip requires to implement the MMC response
312 * types. Note that R1 and R6 are the same in this scheme. */
313 #define APP_CMD 0x0040
314 #define RESP_NONE 0x0300
315 #define RESP_R1 0x0400
316 #define RESP_R1B 0x0500
317 #define RESP_R2 0x0600
318 #define RESP_R3 0x0700
319 #define DATA_PRESENT 0x0800
320 #define TRANSFER_READ 0x1000
321 #define TRANSFER_MULTI 0x2000
322 #define SECURITY_CMD 0x4000
323 #define NO_CMD12_ISSUE 0x4000 /* TMIO_MMC_HAVE_CMD12_CTRL */
325 static int tmio_mmc_start_command(struct tmio_mmc_host
*host
, struct mmc_command
*cmd
)
327 struct mmc_data
*data
= host
->data
;
329 u32 irq_mask
= TMIO_MASK_CMD
;
331 /* CMD12 is handled by hardware */
332 if (cmd
->opcode
== MMC_STOP_TRANSMISSION
&& !cmd
->arg
) {
333 sd_ctrl_write16(host
, CTL_STOP_INTERNAL_ACTION
, 0x001);
337 switch (mmc_resp_type(cmd
)) {
338 case MMC_RSP_NONE
: c
|= RESP_NONE
; break;
339 case MMC_RSP_R1
: c
|= RESP_R1
; break;
340 case MMC_RSP_R1B
: c
|= RESP_R1B
; break;
341 case MMC_RSP_R2
: c
|= RESP_R2
; break;
342 case MMC_RSP_R3
: c
|= RESP_R3
; break;
344 pr_debug("Unknown response type %d\n", mmc_resp_type(cmd
));
350 /* FIXME - this seems to be ok commented out but the spec suggest this bit
351 * should be set when issuing app commands.
352 * if(cmd->flags & MMC_FLAG_ACMD)
357 if (data
->blocks
> 1) {
358 sd_ctrl_write16(host
, CTL_STOP_INTERNAL_ACTION
, 0x100);
362 * Disable auto CMD12 at IO_RW_EXTENDED when
363 * multiple block transfer
365 if ((host
->pdata
->flags
& TMIO_MMC_HAVE_CMD12_CTRL
) &&
366 (cmd
->opcode
== SD_IO_RW_EXTENDED
))
369 if (data
->flags
& MMC_DATA_READ
)
373 if (!host
->native_hotplug
)
374 irq_mask
&= ~(TMIO_STAT_CARD_REMOVE
| TMIO_STAT_CARD_INSERT
);
375 tmio_mmc_enable_mmc_irqs(host
, irq_mask
);
377 /* Fire off the command */
378 sd_ctrl_write32(host
, CTL_ARG_REG
, cmd
->arg
);
379 sd_ctrl_write16(host
, CTL_SD_CMD
, c
);
384 static void tmio_mmc_transfer_data(struct tmio_mmc_host
*host
,
388 int is_read
= host
->data
->flags
& MMC_DATA_READ
;
395 sd_ctrl_read16_rep(host
, CTL_SD_DATA_PORT
, buf
, count
>> 1);
397 sd_ctrl_write16_rep(host
, CTL_SD_DATA_PORT
, buf
, count
>> 1);
399 /* if count was even number */
403 /* if count was odd number */
404 buf8
= (u8
*)(buf
+ (count
>> 1));
409 * driver and this function are assuming that
410 * it is used as little endian
413 *buf8
= sd_ctrl_read16(host
, CTL_SD_DATA_PORT
) & 0xff;
415 sd_ctrl_write16(host
, CTL_SD_DATA_PORT
, *buf8
);
419 * This chip always returns (at least?) as much data as you ask for.
420 * I'm unsure what happens if you ask for less than a block. This should be
421 * looked into to ensure that a funny length read doesn't hose the controller.
423 static void tmio_mmc_pio_irq(struct tmio_mmc_host
*host
)
425 struct mmc_data
*data
= host
->data
;
431 if ((host
->chan_tx
|| host
->chan_rx
) && !host
->force_pio
) {
432 pr_err("PIO IRQ in DMA mode!\n");
435 pr_debug("Spurious PIO IRQ\n");
439 sg_virt
= tmio_mmc_kmap_atomic(host
->sg_ptr
, &flags
);
440 buf
= (unsigned short *)(sg_virt
+ host
->sg_off
);
442 count
= host
->sg_ptr
->length
- host
->sg_off
;
443 if (count
> data
->blksz
)
446 pr_debug("count: %08x offset: %08x flags %08x\n",
447 count
, host
->sg_off
, data
->flags
);
449 /* Transfer the data */
450 tmio_mmc_transfer_data(host
, buf
, count
);
452 host
->sg_off
+= count
;
454 tmio_mmc_kunmap_atomic(host
->sg_ptr
, &flags
, sg_virt
);
456 if (host
->sg_off
== host
->sg_ptr
->length
)
457 tmio_mmc_next_sg(host
);
462 static void tmio_mmc_check_bounce_buffer(struct tmio_mmc_host
*host
)
464 if (host
->sg_ptr
== &host
->bounce_sg
) {
466 void *sg_vaddr
= tmio_mmc_kmap_atomic(host
->sg_orig
, &flags
);
467 memcpy(sg_vaddr
, host
->bounce_buf
, host
->bounce_sg
.length
);
468 tmio_mmc_kunmap_atomic(host
->sg_orig
, &flags
, sg_vaddr
);
472 /* needs to be called with host->lock held */
473 void tmio_mmc_do_data_irq(struct tmio_mmc_host
*host
)
475 struct mmc_data
*data
= host
->data
;
476 struct mmc_command
*stop
;
481 dev_warn(&host
->pdev
->dev
, "Spurious data end IRQ\n");
486 /* FIXME - return correct transfer count on errors */
488 data
->bytes_xfered
= data
->blocks
* data
->blksz
;
490 data
->bytes_xfered
= 0;
492 pr_debug("Completed data request\n");
495 * FIXME: other drivers allow an optional stop command of any given type
496 * which we dont do, as the chip can auto generate them.
497 * Perhaps we can be smarter about when to use auto CMD12 and
498 * only issue the auto request when we know this is the desired
499 * stop command, allowing fallback to the stop command the
500 * upper layers expect. For now, we do what works.
503 if (data
->flags
& MMC_DATA_READ
) {
504 if (host
->chan_rx
&& !host
->force_pio
)
505 tmio_mmc_check_bounce_buffer(host
);
506 dev_dbg(&host
->pdev
->dev
, "Complete Rx request %p\n",
509 dev_dbg(&host
->pdev
->dev
, "Complete Tx request %p\n",
514 if (stop
->opcode
== MMC_STOP_TRANSMISSION
&& !stop
->arg
)
515 sd_ctrl_write16(host
, CTL_STOP_INTERNAL_ACTION
, 0x000);
520 schedule_work(&host
->done
);
523 static void tmio_mmc_data_irq(struct tmio_mmc_host
*host
)
525 struct mmc_data
*data
;
526 spin_lock(&host
->lock
);
532 if (host
->chan_tx
&& (data
->flags
& MMC_DATA_WRITE
) && !host
->force_pio
) {
533 u32 status
= sd_ctrl_read32(host
, CTL_STATUS
);
537 * Has all data been written out yet? Testing on SuperH showed,
538 * that in most cases the first interrupt comes already with the
539 * BUSY status bit clear, but on some operations, like mount or
540 * in the beginning of a write / sync / umount, there is one
541 * DATAEND interrupt with the BUSY bit set, in this cases
542 * waiting for one more interrupt fixes the problem.
544 if (host
->pdata
->flags
& TMIO_MMC_HAS_IDLE_WAIT
) {
545 if (status
& TMIO_STAT_ILL_FUNC
)
548 if (!(status
& TMIO_STAT_CMD_BUSY
))
553 tmio_mmc_disable_mmc_irqs(host
, TMIO_STAT_DATAEND
);
554 tasklet_schedule(&host
->dma_complete
);
556 } else if (host
->chan_rx
&& (data
->flags
& MMC_DATA_READ
) && !host
->force_pio
) {
557 tmio_mmc_disable_mmc_irqs(host
, TMIO_STAT_DATAEND
);
558 tasklet_schedule(&host
->dma_complete
);
560 tmio_mmc_do_data_irq(host
);
561 tmio_mmc_disable_mmc_irqs(host
, TMIO_MASK_READOP
| TMIO_MASK_WRITEOP
);
564 spin_unlock(&host
->lock
);
567 static void tmio_mmc_cmd_irq(struct tmio_mmc_host
*host
,
570 struct mmc_command
*cmd
= host
->cmd
;
573 spin_lock(&host
->lock
);
576 pr_debug("Spurious CMD irq\n");
582 /* This controller is sicker than the PXA one. Not only do we need to
583 * drop the top 8 bits of the first response word, we also need to
584 * modify the order of the response for short response command types.
587 for (i
= 3, addr
= CTL_RESPONSE
; i
>= 0 ; i
--, addr
+= 4)
588 cmd
->resp
[i
] = sd_ctrl_read32(host
, addr
);
590 if (cmd
->flags
& MMC_RSP_136
) {
591 cmd
->resp
[0] = (cmd
->resp
[0] << 8) | (cmd
->resp
[1] >> 24);
592 cmd
->resp
[1] = (cmd
->resp
[1] << 8) | (cmd
->resp
[2] >> 24);
593 cmd
->resp
[2] = (cmd
->resp
[2] << 8) | (cmd
->resp
[3] >> 24);
595 } else if (cmd
->flags
& MMC_RSP_R3
) {
596 cmd
->resp
[0] = cmd
->resp
[3];
599 if (stat
& TMIO_STAT_CMDTIMEOUT
)
600 cmd
->error
= -ETIMEDOUT
;
601 else if (stat
& TMIO_STAT_CRCFAIL
&& cmd
->flags
& MMC_RSP_CRC
)
602 cmd
->error
= -EILSEQ
;
604 /* If there is data to handle we enable data IRQs here, and
605 * we will ultimatley finish the request in the data_end handler.
606 * If theres no data or we encountered an error, finish now.
608 if (host
->data
&& !cmd
->error
) {
609 if (host
->data
->flags
& MMC_DATA_READ
) {
610 if (host
->force_pio
|| !host
->chan_rx
)
611 tmio_mmc_enable_mmc_irqs(host
, TMIO_MASK_READOP
);
613 tasklet_schedule(&host
->dma_issue
);
615 if (host
->force_pio
|| !host
->chan_tx
)
616 tmio_mmc_enable_mmc_irqs(host
, TMIO_MASK_WRITEOP
);
618 tasklet_schedule(&host
->dma_issue
);
621 schedule_work(&host
->done
);
625 spin_unlock(&host
->lock
);
628 static void tmio_mmc_card_irq_status(struct tmio_mmc_host
*host
,
629 int *ireg
, int *status
)
631 *status
= sd_ctrl_read32(host
, CTL_STATUS
);
632 *ireg
= *status
& TMIO_MASK_IRQ
& ~host
->sdcard_irq_mask
;
634 pr_debug_status(*status
);
635 pr_debug_status(*ireg
);
637 /* Clear the status except the interrupt status */
638 sd_ctrl_write32(host
, CTL_STATUS
, TMIO_MASK_IRQ
);
641 static bool __tmio_mmc_card_detect_irq(struct tmio_mmc_host
*host
,
642 int ireg
, int status
)
644 struct mmc_host
*mmc
= host
->mmc
;
646 /* Card insert / remove attempts */
647 if (ireg
& (TMIO_STAT_CARD_INSERT
| TMIO_STAT_CARD_REMOVE
)) {
648 tmio_mmc_ack_mmc_irqs(host
, TMIO_STAT_CARD_INSERT
|
649 TMIO_STAT_CARD_REMOVE
);
650 if ((((ireg
& TMIO_STAT_CARD_REMOVE
) && mmc
->card
) ||
651 ((ireg
& TMIO_STAT_CARD_INSERT
) && !mmc
->card
)) &&
652 !work_pending(&mmc
->detect
.work
))
653 mmc_detect_change(host
->mmc
, msecs_to_jiffies(100));
660 irqreturn_t
tmio_mmc_card_detect_irq(int irq
, void *devid
)
662 unsigned int ireg
, status
;
663 struct tmio_mmc_host
*host
= devid
;
665 tmio_mmc_card_irq_status(host
, &ireg
, &status
);
666 __tmio_mmc_card_detect_irq(host
, ireg
, status
);
670 EXPORT_SYMBOL(tmio_mmc_card_detect_irq
);
672 static bool __tmio_mmc_sdcard_irq(struct tmio_mmc_host
*host
,
673 int ireg
, int status
)
675 /* Command completion */
676 if (ireg
& (TMIO_STAT_CMDRESPEND
| TMIO_STAT_CMDTIMEOUT
)) {
677 tmio_mmc_ack_mmc_irqs(host
,
678 TMIO_STAT_CMDRESPEND
|
679 TMIO_STAT_CMDTIMEOUT
);
680 tmio_mmc_cmd_irq(host
, status
);
685 if (ireg
& (TMIO_STAT_RXRDY
| TMIO_STAT_TXRQ
)) {
686 tmio_mmc_ack_mmc_irqs(host
, TMIO_STAT_RXRDY
| TMIO_STAT_TXRQ
);
687 tmio_mmc_pio_irq(host
);
691 /* Data transfer completion */
692 if (ireg
& TMIO_STAT_DATAEND
) {
693 tmio_mmc_ack_mmc_irqs(host
, TMIO_STAT_DATAEND
);
694 tmio_mmc_data_irq(host
);
701 irqreturn_t
tmio_mmc_sdcard_irq(int irq
, void *devid
)
703 unsigned int ireg
, status
;
704 struct tmio_mmc_host
*host
= devid
;
706 tmio_mmc_card_irq_status(host
, &ireg
, &status
);
707 __tmio_mmc_sdcard_irq(host
, ireg
, status
);
711 EXPORT_SYMBOL(tmio_mmc_sdcard_irq
);
713 irqreturn_t
tmio_mmc_sdio_irq(int irq
, void *devid
)
715 struct tmio_mmc_host
*host
= devid
;
716 struct mmc_host
*mmc
= host
->mmc
;
717 struct tmio_mmc_data
*pdata
= host
->pdata
;
718 unsigned int ireg
, status
;
719 unsigned int sdio_status
;
721 if (!(pdata
->flags
& TMIO_MMC_SDIO_IRQ
))
724 status
= sd_ctrl_read16(host
, CTL_SDIO_STATUS
);
725 ireg
= status
& TMIO_SDIO_MASK_ALL
& ~host
->sdcard_irq_mask
;
727 sdio_status
= status
& ~TMIO_SDIO_MASK_ALL
;
728 if (pdata
->flags
& TMIO_MMC_SDIO_STATUS_QUIRK
)
731 sd_ctrl_write16(host
, CTL_SDIO_STATUS
, sdio_status
);
733 if (mmc
->caps
& MMC_CAP_SDIO_IRQ
&& ireg
& TMIO_SDIO_STAT_IOIRQ
)
734 mmc_signal_sdio_irq(mmc
);
738 EXPORT_SYMBOL(tmio_mmc_sdio_irq
);
740 irqreturn_t
tmio_mmc_irq(int irq
, void *devid
)
742 struct tmio_mmc_host
*host
= devid
;
743 unsigned int ireg
, status
;
745 pr_debug("MMC IRQ begin\n");
747 tmio_mmc_card_irq_status(host
, &ireg
, &status
);
748 if (__tmio_mmc_card_detect_irq(host
, ireg
, status
))
750 if (__tmio_mmc_sdcard_irq(host
, ireg
, status
))
753 tmio_mmc_sdio_irq(irq
, devid
);
757 EXPORT_SYMBOL(tmio_mmc_irq
);
759 static int tmio_mmc_start_data(struct tmio_mmc_host
*host
,
760 struct mmc_data
*data
)
762 struct tmio_mmc_data
*pdata
= host
->pdata
;
764 pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n",
765 data
->blksz
, data
->blocks
);
767 /* Some hardware cannot perform 2 byte requests in 4 bit mode */
768 if (host
->mmc
->ios
.bus_width
== MMC_BUS_WIDTH_4
) {
769 int blksz_2bytes
= pdata
->flags
& TMIO_MMC_BLKSZ_2BYTES
;
771 if (data
->blksz
< 2 || (data
->blksz
< 4 && !blksz_2bytes
)) {
772 pr_err("%s: %d byte block unsupported in 4 bit mode\n",
773 mmc_hostname(host
->mmc
), data
->blksz
);
778 tmio_mmc_init_sg(host
, data
);
781 /* Set transfer length / blocksize */
782 sd_ctrl_write16(host
, CTL_SD_XFER_LEN
, data
->blksz
);
783 sd_ctrl_write16(host
, CTL_XFER_BLK_COUNT
, data
->blocks
);
785 tmio_mmc_start_dma(host
, data
);
790 /* Process requests from the MMC layer */
791 static void tmio_mmc_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
793 struct tmio_mmc_host
*host
= mmc_priv(mmc
);
797 spin_lock_irqsave(&host
->lock
, flags
);
800 pr_debug("request not null\n");
801 if (IS_ERR(host
->mrq
)) {
802 spin_unlock_irqrestore(&host
->lock
, flags
);
803 mrq
->cmd
->error
= -EAGAIN
;
804 mmc_request_done(mmc
, mrq
);
809 host
->last_req_ts
= jiffies
;
813 spin_unlock_irqrestore(&host
->lock
, flags
);
815 pm_runtime_get_sync(mmc_dev(mmc
));
818 ret
= tmio_mmc_start_data(host
, mrq
->data
);
823 ret
= tmio_mmc_start_command(host
, mrq
->cmd
);
825 schedule_delayed_work(&host
->delayed_reset_work
,
826 msecs_to_jiffies(CMDREQ_TIMEOUT
));
831 host
->force_pio
= false;
833 mrq
->cmd
->error
= ret
;
834 mmc_request_done(mmc
, mrq
);
836 pm_runtime_mark_last_busy(mmc_dev(mmc
));
837 pm_runtime_put_autosuspend(mmc_dev(mmc
));
840 static int tmio_mmc_clk_update(struct tmio_mmc_host
*host
)
842 struct mmc_host
*mmc
= host
->mmc
;
845 if (!host
->clk_enable
)
848 ret
= host
->clk_enable(host
->pdev
, &mmc
->f_max
);
850 mmc
->f_min
= mmc
->f_max
/ 512;
855 static void tmio_mmc_power_on(struct tmio_mmc_host
*host
, unsigned short vdd
)
857 struct mmc_host
*mmc
= host
->mmc
;
860 /* .set_ios() is returning void, so, no chance to report an error */
863 host
->set_pwr(host
->pdev
, 1);
865 if (!IS_ERR(mmc
->supply
.vmmc
)) {
866 ret
= mmc_regulator_set_ocr(mmc
, mmc
->supply
.vmmc
, vdd
);
868 * Attention: empiric value. With a b43 WiFi SDIO card this
869 * delay proved necessary for reliable card-insertion probing.
870 * 100us were not enough. Is this the same 140us delay, as in
871 * tmio_mmc_set_ios()?
876 * It seems, VccQ should be switched on after Vcc, this is also what the
877 * omap_hsmmc.c driver does.
879 if (!IS_ERR(mmc
->supply
.vqmmc
) && !ret
) {
880 ret
= regulator_enable(mmc
->supply
.vqmmc
);
885 dev_dbg(&host
->pdev
->dev
, "Regulators failed to power up: %d\n",
889 static void tmio_mmc_power_off(struct tmio_mmc_host
*host
)
891 struct mmc_host
*mmc
= host
->mmc
;
893 if (!IS_ERR(mmc
->supply
.vqmmc
))
894 regulator_disable(mmc
->supply
.vqmmc
);
896 if (!IS_ERR(mmc
->supply
.vmmc
))
897 mmc_regulator_set_ocr(mmc
, mmc
->supply
.vmmc
, 0);
900 host
->set_pwr(host
->pdev
, 0);
903 static void tmio_mmc_set_bus_width(struct tmio_mmc_host
*host
,
904 unsigned char bus_width
)
907 case MMC_BUS_WIDTH_1
:
908 sd_ctrl_write16(host
, CTL_SD_MEM_CARD_OPT
, 0x80e0);
910 case MMC_BUS_WIDTH_4
:
911 sd_ctrl_write16(host
, CTL_SD_MEM_CARD_OPT
, 0x00e0);
916 /* Set MMC clock / power.
917 * Note: This controller uses a simple divider scheme therefore it cannot
918 * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as
919 * MMC wont run that fast, it has to be clocked at 12MHz which is the next
922 static void tmio_mmc_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
924 struct tmio_mmc_host
*host
= mmc_priv(mmc
);
925 struct device
*dev
= &host
->pdev
->dev
;
928 pm_runtime_get_sync(mmc_dev(mmc
));
930 mutex_lock(&host
->ios_lock
);
932 spin_lock_irqsave(&host
->lock
, flags
);
934 if (IS_ERR(host
->mrq
)) {
936 "%s.%d: concurrent .set_ios(), clk %u, mode %u\n",
937 current
->comm
, task_pid_nr(current
),
938 ios
->clock
, ios
->power_mode
);
939 host
->mrq
= ERR_PTR(-EINTR
);
942 "%s.%d: CMD%u active since %lu, now %lu!\n",
943 current
->comm
, task_pid_nr(current
),
944 host
->mrq
->cmd
->opcode
, host
->last_req_ts
, jiffies
);
946 spin_unlock_irqrestore(&host
->lock
, flags
);
948 mutex_unlock(&host
->ios_lock
);
952 host
->mrq
= ERR_PTR(-EBUSY
);
954 spin_unlock_irqrestore(&host
->lock
, flags
);
956 switch (ios
->power_mode
) {
958 tmio_mmc_power_off(host
);
959 tmio_mmc_clk_stop(host
);
962 tmio_mmc_set_clock(host
, ios
->clock
);
963 tmio_mmc_power_on(host
, ios
->vdd
);
964 tmio_mmc_clk_start(host
);
965 tmio_mmc_set_bus_width(host
, ios
->bus_width
);
968 tmio_mmc_set_clock(host
, ios
->clock
);
969 tmio_mmc_clk_start(host
);
970 tmio_mmc_set_bus_width(host
, ios
->bus_width
);
974 /* Let things settle. delay taken from winCE driver */
976 if (PTR_ERR(host
->mrq
) == -EINTR
)
977 dev_dbg(&host
->pdev
->dev
,
978 "%s.%d: IOS interrupted: clk %u, mode %u",
979 current
->comm
, task_pid_nr(current
),
980 ios
->clock
, ios
->power_mode
);
983 host
->clk_cache
= ios
->clock
;
985 mutex_unlock(&host
->ios_lock
);
987 pm_runtime_mark_last_busy(mmc_dev(mmc
));
988 pm_runtime_put_autosuspend(mmc_dev(mmc
));
991 static int tmio_mmc_get_ro(struct mmc_host
*mmc
)
993 struct tmio_mmc_host
*host
= mmc_priv(mmc
);
994 struct tmio_mmc_data
*pdata
= host
->pdata
;
995 int ret
= mmc_gpio_get_ro(mmc
);
999 pm_runtime_get_sync(mmc_dev(mmc
));
1000 ret
= !((pdata
->flags
& TMIO_MMC_WRPROTECT_DISABLE
) ||
1001 (sd_ctrl_read32(host
, CTL_STATUS
) & TMIO_STAT_WRPROTECT
));
1002 pm_runtime_mark_last_busy(mmc_dev(mmc
));
1003 pm_runtime_put_autosuspend(mmc_dev(mmc
));
1008 static int tmio_multi_io_quirk(struct mmc_card
*card
,
1009 unsigned int direction
, int blk_size
)
1011 struct tmio_mmc_host
*host
= mmc_priv(card
->host
);
1013 if (host
->multi_io_quirk
)
1014 return host
->multi_io_quirk(card
, direction
, blk_size
);
1019 static const struct mmc_host_ops tmio_mmc_ops
= {
1020 .request
= tmio_mmc_request
,
1021 .set_ios
= tmio_mmc_set_ios
,
1022 .get_ro
= tmio_mmc_get_ro
,
1023 .get_cd
= mmc_gpio_get_cd
,
1024 .enable_sdio_irq
= tmio_mmc_enable_sdio_irq
,
1025 .multi_io_quirk
= tmio_multi_io_quirk
,
1028 static int tmio_mmc_init_ocr(struct tmio_mmc_host
*host
)
1030 struct tmio_mmc_data
*pdata
= host
->pdata
;
1031 struct mmc_host
*mmc
= host
->mmc
;
1033 mmc_regulator_get_supply(mmc
);
1035 /* use ocr_mask if no regulator */
1036 if (!mmc
->ocr_avail
)
1037 mmc
->ocr_avail
= pdata
->ocr_mask
;
1041 * There is possibility that regulator has not been probed
1043 if (!mmc
->ocr_avail
)
1044 return -EPROBE_DEFER
;
1049 static void tmio_mmc_of_parse(struct platform_device
*pdev
,
1050 struct tmio_mmc_data
*pdata
)
1052 const struct device_node
*np
= pdev
->dev
.of_node
;
1056 if (of_get_property(np
, "toshiba,mmc-wrprotect-disable", NULL
))
1057 pdata
->flags
|= TMIO_MMC_WRPROTECT_DISABLE
;
1060 struct tmio_mmc_host
*
1061 tmio_mmc_host_alloc(struct platform_device
*pdev
)
1063 struct tmio_mmc_host
*host
;
1064 struct mmc_host
*mmc
;
1066 mmc
= mmc_alloc_host(sizeof(struct tmio_mmc_host
), &pdev
->dev
);
1070 host
= mmc_priv(mmc
);
1076 EXPORT_SYMBOL(tmio_mmc_host_alloc
);
1078 void tmio_mmc_host_free(struct tmio_mmc_host
*host
)
1080 mmc_free_host(host
->mmc
);
1082 EXPORT_SYMBOL(tmio_mmc_host_free
);
1084 int tmio_mmc_host_probe(struct tmio_mmc_host
*_host
,
1085 struct tmio_mmc_data
*pdata
)
1087 struct platform_device
*pdev
= _host
->pdev
;
1088 struct mmc_host
*mmc
= _host
->mmc
;
1089 struct resource
*res_ctl
;
1091 u32 irq_mask
= TMIO_MASK_CMD
;
1093 tmio_mmc_of_parse(pdev
, pdata
);
1095 if (!(pdata
->flags
& TMIO_MMC_HAS_IDLE_WAIT
))
1096 _host
->write16_hook
= NULL
;
1098 res_ctl
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1102 ret
= mmc_of_parse(mmc
);
1106 _host
->pdata
= pdata
;
1107 platform_set_drvdata(pdev
, mmc
);
1109 _host
->set_pwr
= pdata
->set_pwr
;
1110 _host
->set_clk_div
= pdata
->set_clk_div
;
1112 ret
= tmio_mmc_init_ocr(_host
);
1116 _host
->ctl
= devm_ioremap(&pdev
->dev
,
1117 res_ctl
->start
, resource_size(res_ctl
));
1123 mmc
->ops
= &tmio_mmc_ops
;
1124 mmc
->caps
|= MMC_CAP_4_BIT_DATA
| pdata
->capabilities
;
1125 mmc
->caps2
|= pdata
->capabilities2
;
1127 mmc
->max_blk_size
= 512;
1128 mmc
->max_blk_count
= (PAGE_CACHE_SIZE
/ mmc
->max_blk_size
) *
1130 mmc
->max_req_size
= mmc
->max_blk_size
* mmc
->max_blk_count
;
1131 mmc
->max_seg_size
= mmc
->max_req_size
;
1133 _host
->native_hotplug
= !(pdata
->flags
& TMIO_MMC_USE_GPIO_CD
||
1134 mmc
->caps
& MMC_CAP_NEEDS_POLL
||
1135 mmc
->caps
& MMC_CAP_NONREMOVABLE
||
1136 mmc
->slot
.cd_irq
>= 0);
1138 if (tmio_mmc_clk_update(_host
) < 0) {
1139 mmc
->f_max
= pdata
->hclk
;
1140 mmc
->f_min
= mmc
->f_max
/ 512;
1144 * Check the sanity of mmc->f_min to prevent tmio_mmc_set_clock() from
1145 * looping forever...
1147 if (mmc
->f_min
== 0) {
1153 * While using internal tmio hardware logic for card detection, we need
1154 * to ensure it stays powered for it to work.
1156 if (_host
->native_hotplug
)
1157 pm_runtime_get_noresume(&pdev
->dev
);
1159 tmio_mmc_clk_stop(_host
);
1160 tmio_mmc_reset(_host
);
1162 _host
->sdcard_irq_mask
= sd_ctrl_read32(_host
, CTL_IRQ_MASK
);
1163 tmio_mmc_disable_mmc_irqs(_host
, TMIO_MASK_ALL
);
1165 /* Unmask the IRQs we want to know about */
1166 if (!_host
->chan_rx
)
1167 irq_mask
|= TMIO_MASK_READOP
;
1168 if (!_host
->chan_tx
)
1169 irq_mask
|= TMIO_MASK_WRITEOP
;
1170 if (!_host
->native_hotplug
)
1171 irq_mask
&= ~(TMIO_STAT_CARD_REMOVE
| TMIO_STAT_CARD_INSERT
);
1173 _host
->sdcard_irq_mask
&= ~irq_mask
;
1175 _host
->sdio_irq_enabled
= false;
1176 if (pdata
->flags
& TMIO_MMC_SDIO_IRQ
) {
1177 _host
->sdio_irq_mask
= TMIO_SDIO_MASK_ALL
;
1178 sd_ctrl_write16(_host
, CTL_SDIO_IRQ_MASK
, _host
->sdio_irq_mask
);
1179 sd_ctrl_write16(_host
, CTL_TRANSACTION_CTL
, 0x0000);
1182 spin_lock_init(&_host
->lock
);
1183 mutex_init(&_host
->ios_lock
);
1185 /* Init delayed work for request timeouts */
1186 INIT_DELAYED_WORK(&_host
->delayed_reset_work
, tmio_mmc_reset_work
);
1187 INIT_WORK(&_host
->done
, tmio_mmc_done_work
);
1189 /* See if we also get DMA */
1190 tmio_mmc_request_dma(_host
, pdata
);
1192 pm_runtime_set_active(&pdev
->dev
);
1193 pm_runtime_set_autosuspend_delay(&pdev
->dev
, 50);
1194 pm_runtime_use_autosuspend(&pdev
->dev
);
1195 pm_runtime_enable(&pdev
->dev
);
1197 ret
= mmc_add_host(mmc
);
1199 tmio_mmc_host_remove(_host
);
1203 dev_pm_qos_expose_latency_limit(&pdev
->dev
, 100);
1205 if (pdata
->flags
& TMIO_MMC_USE_GPIO_CD
) {
1206 ret
= mmc_gpio_request_cd(mmc
, pdata
->cd_gpio
, 0);
1208 tmio_mmc_host_remove(_host
);
1211 mmc_gpiod_request_cd_irq(mmc
);
1220 EXPORT_SYMBOL(tmio_mmc_host_probe
);
1222 void tmio_mmc_host_remove(struct tmio_mmc_host
*host
)
1224 struct platform_device
*pdev
= host
->pdev
;
1225 struct mmc_host
*mmc
= host
->mmc
;
1227 if (!host
->native_hotplug
)
1228 pm_runtime_get_sync(&pdev
->dev
);
1230 dev_pm_qos_hide_latency_limit(&pdev
->dev
);
1232 mmc_remove_host(mmc
);
1233 cancel_work_sync(&host
->done
);
1234 cancel_delayed_work_sync(&host
->delayed_reset_work
);
1235 tmio_mmc_release_dma(host
);
1237 pm_runtime_put_sync(&pdev
->dev
);
1238 pm_runtime_disable(&pdev
->dev
);
1240 EXPORT_SYMBOL(tmio_mmc_host_remove
);
1243 int tmio_mmc_host_runtime_suspend(struct device
*dev
)
1245 struct mmc_host
*mmc
= dev_get_drvdata(dev
);
1246 struct tmio_mmc_host
*host
= mmc_priv(mmc
);
1248 tmio_mmc_disable_mmc_irqs(host
, TMIO_MASK_ALL
);
1250 if (host
->clk_cache
)
1251 tmio_mmc_clk_stop(host
);
1253 if (host
->clk_disable
)
1254 host
->clk_disable(host
->pdev
);
1258 EXPORT_SYMBOL(tmio_mmc_host_runtime_suspend
);
1260 int tmio_mmc_host_runtime_resume(struct device
*dev
)
1262 struct mmc_host
*mmc
= dev_get_drvdata(dev
);
1263 struct tmio_mmc_host
*host
= mmc_priv(mmc
);
1265 tmio_mmc_reset(host
);
1266 tmio_mmc_clk_update(host
);
1268 if (host
->clk_cache
) {
1269 tmio_mmc_set_clock(host
, host
->clk_cache
);
1270 tmio_mmc_clk_start(host
);
1273 tmio_mmc_enable_dma(host
, true);
1277 EXPORT_SYMBOL(tmio_mmc_host_runtime_resume
);
1280 MODULE_LICENSE("GPL v2");