2 * Driver for the MMC / SD / SDIO IP found in:
4 * TC6393XB, TC6391XB, TC6387XB, T7L66XB, ASIC3, SH-Mobile SoCs
6 * Copyright (C) 2015-17 Renesas Electronics Corporation
7 * Copyright (C) 2016-17 Sang Engineering, Wolfram Sang
8 * Copyright (C) 2017 Horms Solutions, Simon Horman
9 * Copyright (C) 2011 Guennadi Liakhovetski
10 * Copyright (C) 2007 Ian Molton
11 * Copyright (C) 2004 Ian Molton
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
17 * This driver draws mainly on scattered spec sheets, Reverse engineering
18 * of the toshiba e800 SD driver and some parts of the 2.4 ASIC3 driver (4 bit
19 * support). (Further 4 bit support from a later datasheet).
22 * Investigate using a workqueue for PIO transfers
24 * Better Power management
25 * Handle MMC errors better
26 * double buffer support
30 #include <linux/delay.h>
31 #include <linux/device.h>
32 #include <linux/highmem.h>
33 #include <linux/interrupt.h>
35 #include <linux/irq.h>
36 #include <linux/mfd/tmio.h>
37 #include <linux/mmc/card.h>
38 #include <linux/mmc/host.h>
39 #include <linux/mmc/mmc.h>
40 #include <linux/mmc/slot-gpio.h>
41 #include <linux/module.h>
42 #include <linux/pagemap.h>
43 #include <linux/platform_device.h>
44 #include <linux/pm_qos.h>
45 #include <linux/pm_runtime.h>
46 #include <linux/regulator/consumer.h>
47 #include <linux/mmc/sdio.h>
48 #include <linux/scatterlist.h>
49 #include <linux/sizes.h>
50 #include <linux/spinlock.h>
51 #include <linux/swiotlb.h>
52 #include <linux/workqueue.h>
56 static inline void tmio_mmc_start_dma(struct tmio_mmc_host
*host
,
57 struct mmc_data
*data
)
60 host
->dma_ops
->start(host
, data
);
63 static inline void tmio_mmc_enable_dma(struct tmio_mmc_host
*host
, bool enable
)
66 host
->dma_ops
->enable(host
, enable
);
69 static inline void tmio_mmc_request_dma(struct tmio_mmc_host
*host
,
70 struct tmio_mmc_data
*pdata
)
73 host
->dma_ops
->request(host
, pdata
);
80 static inline void tmio_mmc_release_dma(struct tmio_mmc_host
*host
)
83 host
->dma_ops
->release(host
);
86 static inline void tmio_mmc_abort_dma(struct tmio_mmc_host
*host
)
89 host
->dma_ops
->abort(host
);
92 static inline void tmio_mmc_dataend_dma(struct tmio_mmc_host
*host
)
95 host
->dma_ops
->dataend(host
);
98 void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host
*host
, u32 i
)
100 host
->sdcard_irq_mask
&= ~(i
& TMIO_MASK_IRQ
);
101 sd_ctrl_write32_as_16_and_16(host
, CTL_IRQ_MASK
, host
->sdcard_irq_mask
);
103 EXPORT_SYMBOL_GPL(tmio_mmc_enable_mmc_irqs
);
105 void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host
*host
, u32 i
)
107 host
->sdcard_irq_mask
|= (i
& TMIO_MASK_IRQ
);
108 sd_ctrl_write32_as_16_and_16(host
, CTL_IRQ_MASK
, host
->sdcard_irq_mask
);
110 EXPORT_SYMBOL_GPL(tmio_mmc_disable_mmc_irqs
);
112 static void tmio_mmc_ack_mmc_irqs(struct tmio_mmc_host
*host
, u32 i
)
114 sd_ctrl_write32_as_16_and_16(host
, CTL_STATUS
, ~i
);
117 static void tmio_mmc_init_sg(struct tmio_mmc_host
*host
, struct mmc_data
*data
)
119 host
->sg_len
= data
->sg_len
;
120 host
->sg_ptr
= data
->sg
;
121 host
->sg_orig
= data
->sg
;
125 static int tmio_mmc_next_sg(struct tmio_mmc_host
*host
)
127 host
->sg_ptr
= sg_next(host
->sg_ptr
);
129 return --host
->sg_len
;
132 #define CMDREQ_TIMEOUT 5000
134 static void tmio_mmc_enable_sdio_irq(struct mmc_host
*mmc
, int enable
)
136 struct tmio_mmc_host
*host
= mmc_priv(mmc
);
138 if (enable
&& !host
->sdio_irq_enabled
) {
141 /* Keep device active while SDIO irq is enabled */
142 pm_runtime_get_sync(mmc_dev(mmc
));
144 host
->sdio_irq_enabled
= true;
145 host
->sdio_irq_mask
= TMIO_SDIO_MASK_ALL
& ~TMIO_SDIO_STAT_IOIRQ
;
147 /* Clear obsolete interrupts before enabling */
148 sdio_status
= sd_ctrl_read16(host
, CTL_SDIO_STATUS
) & ~TMIO_SDIO_MASK_ALL
;
149 if (host
->pdata
->flags
& TMIO_MMC_SDIO_STATUS_SETBITS
)
150 sdio_status
|= TMIO_SDIO_SETBITS_MASK
;
151 sd_ctrl_write16(host
, CTL_SDIO_STATUS
, sdio_status
);
153 sd_ctrl_write16(host
, CTL_SDIO_IRQ_MASK
, host
->sdio_irq_mask
);
154 } else if (!enable
&& host
->sdio_irq_enabled
) {
155 host
->sdio_irq_mask
= TMIO_SDIO_MASK_ALL
;
156 sd_ctrl_write16(host
, CTL_SDIO_IRQ_MASK
, host
->sdio_irq_mask
);
158 host
->sdio_irq_enabled
= false;
159 pm_runtime_mark_last_busy(mmc_dev(mmc
));
160 pm_runtime_put_autosuspend(mmc_dev(mmc
));
164 static void tmio_mmc_clk_start(struct tmio_mmc_host
*host
)
166 sd_ctrl_write16(host
, CTL_SD_CARD_CLK_CTL
, CLK_CTL_SCLKEN
|
167 sd_ctrl_read16(host
, CTL_SD_CARD_CLK_CTL
));
169 /* HW engineers overrode docs: no sleep needed on R-Car2+ */
170 if (!(host
->pdata
->flags
& TMIO_MMC_MIN_RCAR2
))
171 usleep_range(10000, 11000);
173 if (host
->pdata
->flags
& TMIO_MMC_HAVE_HIGH_REG
) {
174 sd_ctrl_write16(host
, CTL_CLK_AND_WAIT_CTL
, 0x0100);
175 usleep_range(10000, 11000);
179 static void tmio_mmc_clk_stop(struct tmio_mmc_host
*host
)
181 if (host
->pdata
->flags
& TMIO_MMC_HAVE_HIGH_REG
) {
182 sd_ctrl_write16(host
, CTL_CLK_AND_WAIT_CTL
, 0x0000);
183 usleep_range(10000, 11000);
186 sd_ctrl_write16(host
, CTL_SD_CARD_CLK_CTL
, ~CLK_CTL_SCLKEN
&
187 sd_ctrl_read16(host
, CTL_SD_CARD_CLK_CTL
));
189 /* HW engineers overrode docs: no sleep needed on R-Car2+ */
190 if (!(host
->pdata
->flags
& TMIO_MMC_MIN_RCAR2
))
191 usleep_range(10000, 11000);
194 static void tmio_mmc_set_clock(struct tmio_mmc_host
*host
,
195 unsigned int new_clock
)
199 if (new_clock
== 0) {
200 tmio_mmc_clk_stop(host
);
204 * Both HS400 and HS200/SD104 set 200MHz, but some devices need to
205 * set 400MHz to distinguish the CPG settings in HS400.
207 if (host
->mmc
->ios
.timing
== MMC_TIMING_MMC_HS400
&&
208 host
->pdata
->flags
& TMIO_MMC_HAVE_4TAP_HS400
&&
209 new_clock
== 200000000)
210 new_clock
= 400000000;
212 if (host
->clk_update
)
213 clock
= host
->clk_update(host
, new_clock
) / 512;
215 clock
= host
->mmc
->f_min
;
217 for (clk
= 0x80000080; new_clock
>= (clock
<< 1); clk
>>= 1)
220 /* 1/1 clock is option */
221 if ((host
->pdata
->flags
& TMIO_MMC_CLK_ACTUAL
) &&
222 ((clk
>> 22) & 0x1)) {
223 if (!(host
->mmc
->ios
.timing
== MMC_TIMING_MMC_HS400
))
229 if (host
->set_clk_div
)
230 host
->set_clk_div(host
->pdev
, (clk
>> 22) & 1);
232 sd_ctrl_write16(host
, CTL_SD_CARD_CLK_CTL
, ~CLK_CTL_SCLKEN
&
233 sd_ctrl_read16(host
, CTL_SD_CARD_CLK_CTL
));
234 sd_ctrl_write16(host
, CTL_SD_CARD_CLK_CTL
, clk
& CLK_CTL_DIV_MASK
);
235 if (!(host
->pdata
->flags
& TMIO_MMC_MIN_RCAR2
))
236 usleep_range(10000, 11000);
238 tmio_mmc_clk_start(host
);
241 static void tmio_mmc_reset(struct tmio_mmc_host
*host
)
243 /* FIXME - should we set stop clock reg here */
244 sd_ctrl_write16(host
, CTL_RESET_SD
, 0x0000);
245 if (host
->pdata
->flags
& TMIO_MMC_HAVE_HIGH_REG
)
246 sd_ctrl_write16(host
, CTL_RESET_SDIO
, 0x0000);
247 usleep_range(10000, 11000);
248 sd_ctrl_write16(host
, CTL_RESET_SD
, 0x0001);
249 if (host
->pdata
->flags
& TMIO_MMC_HAVE_HIGH_REG
)
250 sd_ctrl_write16(host
, CTL_RESET_SDIO
, 0x0001);
251 usleep_range(10000, 11000);
253 if (host
->pdata
->flags
& TMIO_MMC_SDIO_IRQ
) {
254 sd_ctrl_write16(host
, CTL_SDIO_IRQ_MASK
, host
->sdio_irq_mask
);
255 sd_ctrl_write16(host
, CTL_TRANSACTION_CTL
, 0x0001);
260 static void tmio_mmc_reset_work(struct work_struct
*work
)
262 struct tmio_mmc_host
*host
= container_of(work
, struct tmio_mmc_host
,
263 delayed_reset_work
.work
);
264 struct mmc_request
*mrq
;
267 spin_lock_irqsave(&host
->lock
, flags
);
271 * is request already finished? Since we use a non-blocking
272 * cancel_delayed_work(), it can happen, that a .set_ios() call preempts
273 * us, so, have to check for IS_ERR(host->mrq)
275 if (IS_ERR_OR_NULL(mrq
) ||
276 time_is_after_jiffies(host
->last_req_ts
+
277 msecs_to_jiffies(CMDREQ_TIMEOUT
))) {
278 spin_unlock_irqrestore(&host
->lock
, flags
);
282 dev_warn(&host
->pdev
->dev
,
283 "timeout waiting for hardware interrupt (CMD%u)\n",
287 host
->data
->error
= -ETIMEDOUT
;
289 host
->cmd
->error
= -ETIMEDOUT
;
291 mrq
->cmd
->error
= -ETIMEDOUT
;
296 spin_unlock_irqrestore(&host
->lock
, flags
);
298 tmio_mmc_reset(host
);
300 /* Ready for new calls */
303 tmio_mmc_abort_dma(host
);
304 mmc_request_done(host
->mmc
, mrq
);
307 /* These are the bitmasks the tmio chip requires to implement the MMC response
308 * types. Note that R1 and R6 are the same in this scheme. */
309 #define APP_CMD 0x0040
310 #define RESP_NONE 0x0300
311 #define RESP_R1 0x0400
312 #define RESP_R1B 0x0500
313 #define RESP_R2 0x0600
314 #define RESP_R3 0x0700
315 #define DATA_PRESENT 0x0800
316 #define TRANSFER_READ 0x1000
317 #define TRANSFER_MULTI 0x2000
318 #define SECURITY_CMD 0x4000
319 #define NO_CMD12_ISSUE 0x4000 /* TMIO_MMC_HAVE_CMD12_CTRL */
321 static int tmio_mmc_start_command(struct tmio_mmc_host
*host
,
322 struct mmc_command
*cmd
)
324 struct mmc_data
*data
= host
->data
;
327 switch (mmc_resp_type(cmd
)) {
328 case MMC_RSP_NONE
: c
|= RESP_NONE
; break;
330 case MMC_RSP_R1_NO_CRC
:
332 case MMC_RSP_R1B
: c
|= RESP_R1B
; break;
333 case MMC_RSP_R2
: c
|= RESP_R2
; break;
334 case MMC_RSP_R3
: c
|= RESP_R3
; break;
336 pr_debug("Unknown response type %d\n", mmc_resp_type(cmd
));
342 /* FIXME - this seems to be ok commented out but the spec suggest this bit
343 * should be set when issuing app commands.
344 * if(cmd->flags & MMC_FLAG_ACMD)
349 if (data
->blocks
> 1) {
350 sd_ctrl_write16(host
, CTL_STOP_INTERNAL_ACTION
, TMIO_STOP_SEC
);
354 * Disable auto CMD12 at IO_RW_EXTENDED and
355 * SET_BLOCK_COUNT when doing multiple block transfer
357 if ((host
->pdata
->flags
& TMIO_MMC_HAVE_CMD12_CTRL
) &&
358 (cmd
->opcode
== SD_IO_RW_EXTENDED
|| host
->mrq
->sbc
))
361 if (data
->flags
& MMC_DATA_READ
)
365 tmio_mmc_enable_mmc_irqs(host
, TMIO_MASK_CMD
);
367 /* Fire off the command */
368 sd_ctrl_write32_as_16_and_16(host
, CTL_ARG_REG
, cmd
->arg
);
369 sd_ctrl_write16(host
, CTL_SD_CMD
, c
);
374 static void tmio_mmc_transfer_data(struct tmio_mmc_host
*host
,
378 int is_read
= host
->data
->flags
& MMC_DATA_READ
;
384 if (host
->pdata
->flags
& TMIO_MMC_32BIT_DATA_PORT
) {
386 u32
*buf32
= (u32
*)buf
;
389 sd_ctrl_read32_rep(host
, CTL_SD_DATA_PORT
, buf32
,
392 sd_ctrl_write32_rep(host
, CTL_SD_DATA_PORT
, buf32
,
395 /* if count was multiple of 4 */
403 sd_ctrl_read32_rep(host
, CTL_SD_DATA_PORT
, &data
, 1);
404 memcpy(buf32
, &data
, count
);
406 memcpy(&data
, buf32
, count
);
407 sd_ctrl_write32_rep(host
, CTL_SD_DATA_PORT
, &data
, 1);
414 sd_ctrl_read16_rep(host
, CTL_SD_DATA_PORT
, buf
, count
>> 1);
416 sd_ctrl_write16_rep(host
, CTL_SD_DATA_PORT
, buf
, count
>> 1);
418 /* if count was even number */
422 /* if count was odd number */
423 buf8
= (u8
*)(buf
+ (count
>> 1));
428 * driver and this function are assuming that
429 * it is used as little endian
432 *buf8
= sd_ctrl_read16(host
, CTL_SD_DATA_PORT
) & 0xff;
434 sd_ctrl_write16(host
, CTL_SD_DATA_PORT
, *buf8
);
438 * This chip always returns (at least?) as much data as you ask for.
439 * I'm unsure what happens if you ask for less than a block. This should be
440 * looked into to ensure that a funny length read doesn't hose the controller.
442 static void tmio_mmc_pio_irq(struct tmio_mmc_host
*host
)
444 struct mmc_data
*data
= host
->data
;
450 if ((host
->chan_tx
|| host
->chan_rx
) && !host
->force_pio
) {
451 pr_err("PIO IRQ in DMA mode!\n");
454 pr_debug("Spurious PIO IRQ\n");
458 sg_virt
= tmio_mmc_kmap_atomic(host
->sg_ptr
, &flags
);
459 buf
= (unsigned short *)(sg_virt
+ host
->sg_off
);
461 count
= host
->sg_ptr
->length
- host
->sg_off
;
462 if (count
> data
->blksz
)
465 pr_debug("count: %08x offset: %08x flags %08x\n",
466 count
, host
->sg_off
, data
->flags
);
468 /* Transfer the data */
469 tmio_mmc_transfer_data(host
, buf
, count
);
471 host
->sg_off
+= count
;
473 tmio_mmc_kunmap_atomic(host
->sg_ptr
, &flags
, sg_virt
);
475 if (host
->sg_off
== host
->sg_ptr
->length
)
476 tmio_mmc_next_sg(host
);
479 static void tmio_mmc_check_bounce_buffer(struct tmio_mmc_host
*host
)
481 if (host
->sg_ptr
== &host
->bounce_sg
) {
483 void *sg_vaddr
= tmio_mmc_kmap_atomic(host
->sg_orig
, &flags
);
485 memcpy(sg_vaddr
, host
->bounce_buf
, host
->bounce_sg
.length
);
486 tmio_mmc_kunmap_atomic(host
->sg_orig
, &flags
, sg_vaddr
);
490 /* needs to be called with host->lock held */
491 void tmio_mmc_do_data_irq(struct tmio_mmc_host
*host
)
493 struct mmc_data
*data
= host
->data
;
494 struct mmc_command
*stop
;
499 dev_warn(&host
->pdev
->dev
, "Spurious data end IRQ\n");
504 /* FIXME - return correct transfer count on errors */
506 data
->bytes_xfered
= data
->blocks
* data
->blksz
;
508 data
->bytes_xfered
= 0;
510 pr_debug("Completed data request\n");
513 * FIXME: other drivers allow an optional stop command of any given type
514 * which we dont do, as the chip can auto generate them.
515 * Perhaps we can be smarter about when to use auto CMD12 and
516 * only issue the auto request when we know this is the desired
517 * stop command, allowing fallback to the stop command the
518 * upper layers expect. For now, we do what works.
521 if (data
->flags
& MMC_DATA_READ
) {
522 if (host
->chan_rx
&& !host
->force_pio
)
523 tmio_mmc_check_bounce_buffer(host
);
524 dev_dbg(&host
->pdev
->dev
, "Complete Rx request %p\n",
527 dev_dbg(&host
->pdev
->dev
, "Complete Tx request %p\n",
531 if (stop
&& !host
->mrq
->sbc
) {
532 if (stop
->opcode
!= MMC_STOP_TRANSMISSION
|| stop
->arg
)
533 dev_err(&host
->pdev
->dev
, "unsupported stop: CMD%u,0x%x. We did CMD12,0\n",
534 stop
->opcode
, stop
->arg
);
536 /* fill in response from auto CMD12 */
537 stop
->resp
[0] = sd_ctrl_read16_and_16_as_32(host
, CTL_RESPONSE
);
539 sd_ctrl_write16(host
, CTL_STOP_INTERNAL_ACTION
, 0);
542 schedule_work(&host
->done
);
544 EXPORT_SYMBOL_GPL(tmio_mmc_do_data_irq
);
546 static void tmio_mmc_data_irq(struct tmio_mmc_host
*host
, unsigned int stat
)
548 struct mmc_data
*data
;
550 spin_lock(&host
->lock
);
556 if (stat
& TMIO_STAT_CRCFAIL
|| stat
& TMIO_STAT_STOPBIT_ERR
||
557 stat
& TMIO_STAT_TXUNDERRUN
)
558 data
->error
= -EILSEQ
;
559 if (host
->chan_tx
&& (data
->flags
& MMC_DATA_WRITE
) && !host
->force_pio
) {
560 u32 status
= sd_ctrl_read16_and_16_as_32(host
, CTL_STATUS
);
564 * Has all data been written out yet? Testing on SuperH showed,
565 * that in most cases the first interrupt comes already with the
566 * BUSY status bit clear, but on some operations, like mount or
567 * in the beginning of a write / sync / umount, there is one
568 * DATAEND interrupt with the BUSY bit set, in this cases
569 * waiting for one more interrupt fixes the problem.
571 if (host
->pdata
->flags
& TMIO_MMC_HAS_IDLE_WAIT
) {
572 if (status
& TMIO_STAT_SCLKDIVEN
)
575 if (!(status
& TMIO_STAT_CMD_BUSY
))
580 tmio_mmc_disable_mmc_irqs(host
, TMIO_STAT_DATAEND
);
581 tmio_mmc_dataend_dma(host
);
583 } else if (host
->chan_rx
&& (data
->flags
& MMC_DATA_READ
) && !host
->force_pio
) {
584 tmio_mmc_disable_mmc_irqs(host
, TMIO_STAT_DATAEND
);
585 tmio_mmc_dataend_dma(host
);
587 tmio_mmc_do_data_irq(host
);
588 tmio_mmc_disable_mmc_irqs(host
, TMIO_MASK_READOP
| TMIO_MASK_WRITEOP
);
591 spin_unlock(&host
->lock
);
594 static void tmio_mmc_cmd_irq(struct tmio_mmc_host
*host
, unsigned int stat
)
596 struct mmc_command
*cmd
= host
->cmd
;
599 spin_lock(&host
->lock
);
602 pr_debug("Spurious CMD irq\n");
606 /* This controller is sicker than the PXA one. Not only do we need to
607 * drop the top 8 bits of the first response word, we also need to
608 * modify the order of the response for short response command types.
611 for (i
= 3, addr
= CTL_RESPONSE
; i
>= 0 ; i
--, addr
+= 4)
612 cmd
->resp
[i
] = sd_ctrl_read16_and_16_as_32(host
, addr
);
614 if (cmd
->flags
& MMC_RSP_136
) {
615 cmd
->resp
[0] = (cmd
->resp
[0] << 8) | (cmd
->resp
[1] >> 24);
616 cmd
->resp
[1] = (cmd
->resp
[1] << 8) | (cmd
->resp
[2] >> 24);
617 cmd
->resp
[2] = (cmd
->resp
[2] << 8) | (cmd
->resp
[3] >> 24);
619 } else if (cmd
->flags
& MMC_RSP_R3
) {
620 cmd
->resp
[0] = cmd
->resp
[3];
623 if (stat
& TMIO_STAT_CMDTIMEOUT
)
624 cmd
->error
= -ETIMEDOUT
;
625 else if ((stat
& TMIO_STAT_CRCFAIL
&& cmd
->flags
& MMC_RSP_CRC
) ||
626 stat
& TMIO_STAT_STOPBIT_ERR
||
627 stat
& TMIO_STAT_CMD_IDX_ERR
)
628 cmd
->error
= -EILSEQ
;
630 /* If there is data to handle we enable data IRQs here, and
631 * we will ultimatley finish the request in the data_end handler.
632 * If theres no data or we encountered an error, finish now.
634 if (host
->data
&& (!cmd
->error
|| cmd
->error
== -EILSEQ
)) {
635 if (host
->data
->flags
& MMC_DATA_READ
) {
636 if (host
->force_pio
|| !host
->chan_rx
) {
637 tmio_mmc_enable_mmc_irqs(host
, TMIO_MASK_READOP
);
639 tmio_mmc_disable_mmc_irqs(host
,
641 tasklet_schedule(&host
->dma_issue
);
644 if (host
->force_pio
|| !host
->chan_tx
) {
645 tmio_mmc_enable_mmc_irqs(host
, TMIO_MASK_WRITEOP
);
647 tmio_mmc_disable_mmc_irqs(host
,
649 tasklet_schedule(&host
->dma_issue
);
653 schedule_work(&host
->done
);
657 spin_unlock(&host
->lock
);
660 static bool __tmio_mmc_card_detect_irq(struct tmio_mmc_host
*host
,
661 int ireg
, int status
)
663 struct mmc_host
*mmc
= host
->mmc
;
665 /* Card insert / remove attempts */
666 if (ireg
& (TMIO_STAT_CARD_INSERT
| TMIO_STAT_CARD_REMOVE
)) {
667 tmio_mmc_ack_mmc_irqs(host
, TMIO_STAT_CARD_INSERT
|
668 TMIO_STAT_CARD_REMOVE
);
669 if ((((ireg
& TMIO_STAT_CARD_REMOVE
) && mmc
->card
) ||
670 ((ireg
& TMIO_STAT_CARD_INSERT
) && !mmc
->card
)) &&
671 !work_pending(&mmc
->detect
.work
))
672 mmc_detect_change(host
->mmc
, msecs_to_jiffies(100));
679 static bool __tmio_mmc_sdcard_irq(struct tmio_mmc_host
*host
, int ireg
,
682 /* Command completion */
683 if (ireg
& (TMIO_STAT_CMDRESPEND
| TMIO_STAT_CMDTIMEOUT
)) {
684 tmio_mmc_ack_mmc_irqs(host
, TMIO_STAT_CMDRESPEND
|
685 TMIO_STAT_CMDTIMEOUT
);
686 tmio_mmc_cmd_irq(host
, status
);
691 if (ireg
& (TMIO_STAT_RXRDY
| TMIO_STAT_TXRQ
)) {
692 tmio_mmc_ack_mmc_irqs(host
, TMIO_STAT_RXRDY
| TMIO_STAT_TXRQ
);
693 tmio_mmc_pio_irq(host
);
697 /* Data transfer completion */
698 if (ireg
& TMIO_STAT_DATAEND
) {
699 tmio_mmc_ack_mmc_irqs(host
, TMIO_STAT_DATAEND
);
700 tmio_mmc_data_irq(host
, status
);
707 static bool __tmio_mmc_sdio_irq(struct tmio_mmc_host
*host
)
709 struct mmc_host
*mmc
= host
->mmc
;
710 struct tmio_mmc_data
*pdata
= host
->pdata
;
711 unsigned int ireg
, status
;
712 unsigned int sdio_status
;
714 if (!(pdata
->flags
& TMIO_MMC_SDIO_IRQ
))
717 status
= sd_ctrl_read16(host
, CTL_SDIO_STATUS
);
718 ireg
= status
& TMIO_SDIO_MASK_ALL
& ~host
->sdio_irq_mask
;
720 sdio_status
= status
& ~TMIO_SDIO_MASK_ALL
;
721 if (pdata
->flags
& TMIO_MMC_SDIO_STATUS_SETBITS
)
722 sdio_status
|= TMIO_SDIO_SETBITS_MASK
;
724 sd_ctrl_write16(host
, CTL_SDIO_STATUS
, sdio_status
);
726 if (mmc
->caps
& MMC_CAP_SDIO_IRQ
&& ireg
& TMIO_SDIO_STAT_IOIRQ
)
727 mmc_signal_sdio_irq(mmc
);
732 irqreturn_t
tmio_mmc_irq(int irq
, void *devid
)
734 struct tmio_mmc_host
*host
= devid
;
735 unsigned int ireg
, status
;
737 status
= sd_ctrl_read16_and_16_as_32(host
, CTL_STATUS
);
738 ireg
= status
& TMIO_MASK_IRQ
& ~host
->sdcard_irq_mask
;
740 /* Clear the status except the interrupt status */
741 sd_ctrl_write32_as_16_and_16(host
, CTL_STATUS
, TMIO_MASK_IRQ
);
743 if (__tmio_mmc_card_detect_irq(host
, ireg
, status
))
745 if (__tmio_mmc_sdcard_irq(host
, ireg
, status
))
748 if (__tmio_mmc_sdio_irq(host
))
753 EXPORT_SYMBOL_GPL(tmio_mmc_irq
);
755 static int tmio_mmc_start_data(struct tmio_mmc_host
*host
,
756 struct mmc_data
*data
)
758 struct tmio_mmc_data
*pdata
= host
->pdata
;
760 pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n",
761 data
->blksz
, data
->blocks
);
763 /* Some hardware cannot perform 2 byte requests in 4/8 bit mode */
764 if (host
->mmc
->ios
.bus_width
== MMC_BUS_WIDTH_4
||
765 host
->mmc
->ios
.bus_width
== MMC_BUS_WIDTH_8
) {
766 int blksz_2bytes
= pdata
->flags
& TMIO_MMC_BLKSZ_2BYTES
;
768 if (data
->blksz
< 2 || (data
->blksz
< 4 && !blksz_2bytes
)) {
769 pr_err("%s: %d byte block unsupported in 4/8 bit mode\n",
770 mmc_hostname(host
->mmc
), data
->blksz
);
775 tmio_mmc_init_sg(host
, data
);
777 host
->force_pio
= false;
779 /* Set transfer length / blocksize */
780 sd_ctrl_write16(host
, CTL_SD_XFER_LEN
, data
->blksz
);
781 if (host
->mmc
->max_blk_count
>= SZ_64K
)
782 sd_ctrl_write32(host
, CTL_XFER_BLK_COUNT
, data
->blocks
);
784 sd_ctrl_write16(host
, CTL_XFER_BLK_COUNT
, data
->blocks
);
786 tmio_mmc_start_dma(host
, data
);
791 static void tmio_mmc_hw_reset(struct mmc_host
*mmc
)
793 struct tmio_mmc_host
*host
= mmc_priv(mmc
);
796 host
->hw_reset(host
);
799 static int tmio_mmc_execute_tuning(struct mmc_host
*mmc
, u32 opcode
)
801 struct tmio_mmc_host
*host
= mmc_priv(mmc
);
804 if (!host
->init_tuning
|| !host
->select_tuning
)
805 /* Tuning is not supported */
808 host
->tap_num
= host
->init_tuning(host
);
810 /* Tuning is not supported */
813 if (host
->tap_num
* 2 >= sizeof(host
->taps
) * BITS_PER_BYTE
) {
814 dev_warn_once(&host
->pdev
->dev
,
815 "Too many taps, skipping tuning. Please consider updating size of taps field of tmio_mmc_host\n");
819 bitmap_zero(host
->taps
, host
->tap_num
* 2);
821 /* Issue CMD19 twice for each tap */
822 for (i
= 0; i
< 2 * host
->tap_num
; i
++) {
823 if (host
->prepare_tuning
)
824 host
->prepare_tuning(host
, i
% host
->tap_num
);
826 ret
= mmc_send_tuning(mmc
, opcode
, NULL
);
828 set_bit(i
, host
->taps
);
830 usleep_range(1000, 1200);
833 ret
= host
->select_tuning(host
);
837 dev_warn(&host
->pdev
->dev
, "Tuning procedure failed\n");
838 tmio_mmc_hw_reset(mmc
);
844 static void tmio_process_mrq(struct tmio_mmc_host
*host
,
845 struct mmc_request
*mrq
)
847 struct mmc_command
*cmd
;
850 if (mrq
->sbc
&& host
->cmd
!= mrq
->sbc
) {
855 ret
= tmio_mmc_start_data(host
, mrq
->data
);
861 ret
= tmio_mmc_start_command(host
, cmd
);
865 schedule_delayed_work(&host
->delayed_reset_work
,
866 msecs_to_jiffies(CMDREQ_TIMEOUT
));
871 mrq
->cmd
->error
= ret
;
872 mmc_request_done(host
->mmc
, mrq
);
875 /* Process requests from the MMC layer */
876 static void tmio_mmc_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
878 struct tmio_mmc_host
*host
= mmc_priv(mmc
);
881 spin_lock_irqsave(&host
->lock
, flags
);
884 pr_debug("request not null\n");
885 if (IS_ERR(host
->mrq
)) {
886 spin_unlock_irqrestore(&host
->lock
, flags
);
887 mrq
->cmd
->error
= -EAGAIN
;
888 mmc_request_done(mmc
, mrq
);
893 host
->last_req_ts
= jiffies
;
897 spin_unlock_irqrestore(&host
->lock
, flags
);
899 tmio_process_mrq(host
, mrq
);
902 static void tmio_mmc_finish_request(struct tmio_mmc_host
*host
)
904 struct mmc_request
*mrq
;
907 spin_lock_irqsave(&host
->lock
, flags
);
910 if (IS_ERR_OR_NULL(mrq
)) {
911 spin_unlock_irqrestore(&host
->lock
, flags
);
915 /* If not SET_BLOCK_COUNT, clear old data */
916 if (host
->cmd
!= mrq
->sbc
) {
922 cancel_delayed_work(&host
->delayed_reset_work
);
924 spin_unlock_irqrestore(&host
->lock
, flags
);
926 if (mrq
->cmd
->error
|| (mrq
->data
&& mrq
->data
->error
))
927 tmio_mmc_abort_dma(host
);
929 /* SCC error means retune, but executed command was still successful */
930 if (host
->check_scc_error
&& host
->check_scc_error(host
))
931 mmc_retune_needed(host
->mmc
);
933 /* If SET_BLOCK_COUNT, continue with main command */
934 if (host
->mrq
&& !mrq
->cmd
->error
) {
935 tmio_process_mrq(host
, mrq
);
939 mmc_request_done(host
->mmc
, mrq
);
942 static void tmio_mmc_done_work(struct work_struct
*work
)
944 struct tmio_mmc_host
*host
= container_of(work
, struct tmio_mmc_host
,
946 tmio_mmc_finish_request(host
);
949 static void tmio_mmc_power_on(struct tmio_mmc_host
*host
, unsigned short vdd
)
951 struct mmc_host
*mmc
= host
->mmc
;
954 /* .set_ios() is returning void, so, no chance to report an error */
957 host
->set_pwr(host
->pdev
, 1);
959 if (!IS_ERR(mmc
->supply
.vmmc
)) {
960 ret
= mmc_regulator_set_ocr(mmc
, mmc
->supply
.vmmc
, vdd
);
962 * Attention: empiric value. With a b43 WiFi SDIO card this
963 * delay proved necessary for reliable card-insertion probing.
964 * 100us were not enough. Is this the same 140us delay, as in
965 * tmio_mmc_set_ios()?
967 usleep_range(200, 300);
970 * It seems, VccQ should be switched on after Vcc, this is also what the
971 * omap_hsmmc.c driver does.
973 if (!IS_ERR(mmc
->supply
.vqmmc
) && !ret
) {
974 ret
= regulator_enable(mmc
->supply
.vqmmc
);
975 usleep_range(200, 300);
979 dev_dbg(&host
->pdev
->dev
, "Regulators failed to power up: %d\n",
983 static void tmio_mmc_power_off(struct tmio_mmc_host
*host
)
985 struct mmc_host
*mmc
= host
->mmc
;
987 if (!IS_ERR(mmc
->supply
.vqmmc
))
988 regulator_disable(mmc
->supply
.vqmmc
);
990 if (!IS_ERR(mmc
->supply
.vmmc
))
991 mmc_regulator_set_ocr(mmc
, mmc
->supply
.vmmc
, 0);
994 host
->set_pwr(host
->pdev
, 0);
997 static void tmio_mmc_set_bus_width(struct tmio_mmc_host
*host
,
998 unsigned char bus_width
)
1000 u16 reg
= sd_ctrl_read16(host
, CTL_SD_MEM_CARD_OPT
)
1001 & ~(CARD_OPT_WIDTH
| CARD_OPT_WIDTH8
);
1003 /* reg now applies to MMC_BUS_WIDTH_4 */
1004 if (bus_width
== MMC_BUS_WIDTH_1
)
1005 reg
|= CARD_OPT_WIDTH
;
1006 else if (bus_width
== MMC_BUS_WIDTH_8
)
1007 reg
|= CARD_OPT_WIDTH8
;
1009 sd_ctrl_write16(host
, CTL_SD_MEM_CARD_OPT
, reg
);
1012 /* Set MMC clock / power.
1013 * Note: This controller uses a simple divider scheme therefore it cannot
1014 * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as
1015 * MMC wont run that fast, it has to be clocked at 12MHz which is the next
1018 static void tmio_mmc_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
1020 struct tmio_mmc_host
*host
= mmc_priv(mmc
);
1021 struct device
*dev
= &host
->pdev
->dev
;
1022 unsigned long flags
;
1024 mutex_lock(&host
->ios_lock
);
1026 spin_lock_irqsave(&host
->lock
, flags
);
1028 if (IS_ERR(host
->mrq
)) {
1030 "%s.%d: concurrent .set_ios(), clk %u, mode %u\n",
1031 current
->comm
, task_pid_nr(current
),
1032 ios
->clock
, ios
->power_mode
);
1033 host
->mrq
= ERR_PTR(-EINTR
);
1036 "%s.%d: CMD%u active since %lu, now %lu!\n",
1037 current
->comm
, task_pid_nr(current
),
1038 host
->mrq
->cmd
->opcode
, host
->last_req_ts
,
1041 spin_unlock_irqrestore(&host
->lock
, flags
);
1043 mutex_unlock(&host
->ios_lock
);
1047 host
->mrq
= ERR_PTR(-EBUSY
);
1049 spin_unlock_irqrestore(&host
->lock
, flags
);
1051 switch (ios
->power_mode
) {
1053 tmio_mmc_power_off(host
);
1054 tmio_mmc_clk_stop(host
);
1057 tmio_mmc_power_on(host
, ios
->vdd
);
1058 tmio_mmc_set_clock(host
, ios
->clock
);
1059 tmio_mmc_set_bus_width(host
, ios
->bus_width
);
1062 tmio_mmc_set_clock(host
, ios
->clock
);
1063 tmio_mmc_set_bus_width(host
, ios
->bus_width
);
1067 /* Let things settle. delay taken from winCE driver */
1068 usleep_range(140, 200);
1069 if (PTR_ERR(host
->mrq
) == -EINTR
)
1070 dev_dbg(&host
->pdev
->dev
,
1071 "%s.%d: IOS interrupted: clk %u, mode %u",
1072 current
->comm
, task_pid_nr(current
),
1073 ios
->clock
, ios
->power_mode
);
1076 host
->clk_cache
= ios
->clock
;
1078 mutex_unlock(&host
->ios_lock
);
1081 static int tmio_mmc_get_ro(struct mmc_host
*mmc
)
1083 struct tmio_mmc_host
*host
= mmc_priv(mmc
);
1085 return !(sd_ctrl_read16_and_16_as_32(host
, CTL_STATUS
) &
1086 TMIO_STAT_WRPROTECT
);
1089 static int tmio_mmc_get_cd(struct mmc_host
*mmc
)
1091 struct tmio_mmc_host
*host
= mmc_priv(mmc
);
1093 return !!(sd_ctrl_read16_and_16_as_32(host
, CTL_STATUS
) &
1094 TMIO_STAT_SIGSTATE
);
1097 static int tmio_multi_io_quirk(struct mmc_card
*card
,
1098 unsigned int direction
, int blk_size
)
1100 struct tmio_mmc_host
*host
= mmc_priv(card
->host
);
1102 if (host
->multi_io_quirk
)
1103 return host
->multi_io_quirk(card
, direction
, blk_size
);
1108 static int tmio_mmc_prepare_hs400_tuning(struct mmc_host
*mmc
,
1109 struct mmc_ios
*ios
)
1111 struct tmio_mmc_host
*host
= mmc_priv(mmc
);
1113 if (host
->prepare_hs400_tuning
)
1114 host
->prepare_hs400_tuning(host
);
1119 static void tmio_mmc_hs400_downgrade(struct mmc_host
*mmc
)
1121 struct tmio_mmc_host
*host
= mmc_priv(mmc
);
1123 if (host
->hs400_downgrade
)
1124 host
->hs400_downgrade(host
);
1127 static void tmio_mmc_hs400_complete(struct mmc_host
*mmc
)
1129 struct tmio_mmc_host
*host
= mmc_priv(mmc
);
1131 if (host
->hs400_complete
)
1132 host
->hs400_complete(host
);
1135 static const struct mmc_host_ops tmio_mmc_ops
= {
1136 .request
= tmio_mmc_request
,
1137 .set_ios
= tmio_mmc_set_ios
,
1138 .get_ro
= tmio_mmc_get_ro
,
1139 .get_cd
= tmio_mmc_get_cd
,
1140 .enable_sdio_irq
= tmio_mmc_enable_sdio_irq
,
1141 .multi_io_quirk
= tmio_multi_io_quirk
,
1142 .hw_reset
= tmio_mmc_hw_reset
,
1143 .execute_tuning
= tmio_mmc_execute_tuning
,
1144 .prepare_hs400_tuning
= tmio_mmc_prepare_hs400_tuning
,
1145 .hs400_downgrade
= tmio_mmc_hs400_downgrade
,
1146 .hs400_complete
= tmio_mmc_hs400_complete
,
1149 static int tmio_mmc_init_ocr(struct tmio_mmc_host
*host
)
1151 struct tmio_mmc_data
*pdata
= host
->pdata
;
1152 struct mmc_host
*mmc
= host
->mmc
;
1155 err
= mmc_regulator_get_supply(mmc
);
1159 /* use ocr_mask if no regulator */
1160 if (!mmc
->ocr_avail
)
1161 mmc
->ocr_avail
= pdata
->ocr_mask
;
1165 * There is possibility that regulator has not been probed
1167 if (!mmc
->ocr_avail
)
1168 return -EPROBE_DEFER
;
1173 static void tmio_mmc_of_parse(struct platform_device
*pdev
,
1174 struct mmc_host
*mmc
)
1176 const struct device_node
*np
= pdev
->dev
.of_node
;
1183 * For new platforms, please use "disable-wp" instead of
1184 * "toshiba,mmc-wrprotect-disable"
1186 if (of_get_property(np
, "toshiba,mmc-wrprotect-disable", NULL
))
1187 mmc
->caps2
|= MMC_CAP2_NO_WRITE_PROTECT
;
1190 struct tmio_mmc_host
*tmio_mmc_host_alloc(struct platform_device
*pdev
,
1191 struct tmio_mmc_data
*pdata
)
1193 struct tmio_mmc_host
*host
;
1194 struct mmc_host
*mmc
;
1195 struct resource
*res
;
1199 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1200 ctl
= devm_ioremap_resource(&pdev
->dev
, res
);
1202 return ERR_CAST(ctl
);
1204 mmc
= mmc_alloc_host(sizeof(struct tmio_mmc_host
), &pdev
->dev
);
1206 return ERR_PTR(-ENOMEM
);
1208 host
= mmc_priv(mmc
);
1212 host
->pdata
= pdata
;
1213 host
->ops
= tmio_mmc_ops
;
1214 mmc
->ops
= &host
->ops
;
1216 ret
= mmc_of_parse(host
->mmc
);
1218 host
= ERR_PTR(ret
);
1222 tmio_mmc_of_parse(pdev
, mmc
);
1224 platform_set_drvdata(pdev
, host
);
1232 EXPORT_SYMBOL_GPL(tmio_mmc_host_alloc
);
1234 void tmio_mmc_host_free(struct tmio_mmc_host
*host
)
1236 mmc_free_host(host
->mmc
);
1238 EXPORT_SYMBOL_GPL(tmio_mmc_host_free
);
1240 int tmio_mmc_host_probe(struct tmio_mmc_host
*_host
)
1242 struct platform_device
*pdev
= _host
->pdev
;
1243 struct tmio_mmc_data
*pdata
= _host
->pdata
;
1244 struct mmc_host
*mmc
= _host
->mmc
;
1248 * Check the sanity of mmc->f_min to prevent tmio_mmc_set_clock() from
1249 * looping forever...
1251 if (mmc
->f_min
== 0)
1254 if (!(pdata
->flags
& TMIO_MMC_HAS_IDLE_WAIT
))
1255 _host
->write16_hook
= NULL
;
1257 _host
->set_pwr
= pdata
->set_pwr
;
1258 _host
->set_clk_div
= pdata
->set_clk_div
;
1260 ret
= tmio_mmc_init_ocr(_host
);
1264 if (pdata
->flags
& TMIO_MMC_USE_GPIO_CD
) {
1265 ret
= mmc_gpio_request_cd(mmc
, pdata
->cd_gpio
, 0);
1270 mmc
->caps
|= MMC_CAP_ERASE
| MMC_CAP_4_BIT_DATA
| pdata
->capabilities
;
1271 mmc
->caps2
|= pdata
->capabilities2
;
1272 mmc
->max_segs
= pdata
->max_segs
? : 32;
1273 mmc
->max_blk_size
= 512;
1274 mmc
->max_blk_count
= pdata
->max_blk_count
? :
1275 (PAGE_SIZE
/ mmc
->max_blk_size
) * mmc
->max_segs
;
1276 mmc
->max_req_size
= mmc
->max_blk_size
* mmc
->max_blk_count
;
1278 * Since swiotlb has memory size limitation, this will calculate
1279 * the maximum size locally (because we don't have any APIs for it now)
1280 * and check the current max_req_size. And then, this will update
1281 * the max_req_size if needed as a workaround.
1283 if (swiotlb_max_segment()) {
1284 unsigned int max_size
= (1 << IO_TLB_SHIFT
) * IO_TLB_SEGSIZE
;
1286 if (mmc
->max_req_size
> max_size
)
1287 mmc
->max_req_size
= max_size
;
1289 mmc
->max_seg_size
= mmc
->max_req_size
;
1291 if (mmc_can_gpio_ro(mmc
))
1292 _host
->ops
.get_ro
= mmc_gpio_get_ro
;
1294 if (mmc_can_gpio_cd(mmc
))
1295 _host
->ops
.get_cd
= mmc_gpio_get_cd
;
1297 _host
->native_hotplug
= !(mmc_can_gpio_cd(mmc
) ||
1298 mmc
->caps
& MMC_CAP_NEEDS_POLL
||
1299 !mmc_card_is_removable(mmc
));
1302 * On Gen2+, eMMC with NONREMOVABLE currently fails because native
1303 * hotplug gets disabled. It seems RuntimePM related yet we need further
1304 * research. Since we are planning a PM overhaul anyway, let's enforce
1305 * for now the device being active by enabling native hotplug always.
1307 if (pdata
->flags
& TMIO_MMC_MIN_RCAR2
)
1308 _host
->native_hotplug
= true;
1311 * While using internal tmio hardware logic for card detection, we need
1312 * to ensure it stays powered for it to work.
1314 if (_host
->native_hotplug
)
1315 pm_runtime_get_noresume(&pdev
->dev
);
1317 _host
->sdio_irq_enabled
= false;
1318 if (pdata
->flags
& TMIO_MMC_SDIO_IRQ
)
1319 _host
->sdio_irq_mask
= TMIO_SDIO_MASK_ALL
;
1321 tmio_mmc_clk_stop(_host
);
1322 tmio_mmc_reset(_host
);
1324 _host
->sdcard_irq_mask
= sd_ctrl_read16_and_16_as_32(_host
, CTL_IRQ_MASK
);
1325 tmio_mmc_disable_mmc_irqs(_host
, TMIO_MASK_ALL
);
1327 if (_host
->native_hotplug
)
1328 tmio_mmc_enable_mmc_irqs(_host
,
1329 TMIO_STAT_CARD_REMOVE
| TMIO_STAT_CARD_INSERT
);
1331 spin_lock_init(&_host
->lock
);
1332 mutex_init(&_host
->ios_lock
);
1334 /* Init delayed work for request timeouts */
1335 INIT_DELAYED_WORK(&_host
->delayed_reset_work
, tmio_mmc_reset_work
);
1336 INIT_WORK(&_host
->done
, tmio_mmc_done_work
);
1338 /* See if we also get DMA */
1339 tmio_mmc_request_dma(_host
, pdata
);
1341 pm_runtime_set_active(&pdev
->dev
);
1342 pm_runtime_set_autosuspend_delay(&pdev
->dev
, 50);
1343 pm_runtime_use_autosuspend(&pdev
->dev
);
1344 pm_runtime_enable(&pdev
->dev
);
1346 ret
= mmc_add_host(mmc
);
1350 dev_pm_qos_expose_latency_limit(&pdev
->dev
, 100);
1355 tmio_mmc_host_remove(_host
);
1358 EXPORT_SYMBOL_GPL(tmio_mmc_host_probe
);
1360 void tmio_mmc_host_remove(struct tmio_mmc_host
*host
)
1362 struct platform_device
*pdev
= host
->pdev
;
1363 struct mmc_host
*mmc
= host
->mmc
;
1365 if (host
->pdata
->flags
& TMIO_MMC_SDIO_IRQ
)
1366 sd_ctrl_write16(host
, CTL_TRANSACTION_CTL
, 0x0000);
1368 if (!host
->native_hotplug
)
1369 pm_runtime_get_sync(&pdev
->dev
);
1371 dev_pm_qos_hide_latency_limit(&pdev
->dev
);
1373 mmc_remove_host(mmc
);
1374 cancel_work_sync(&host
->done
);
1375 cancel_delayed_work_sync(&host
->delayed_reset_work
);
1376 tmio_mmc_release_dma(host
);
1378 pm_runtime_put_sync(&pdev
->dev
);
1379 pm_runtime_disable(&pdev
->dev
);
1381 EXPORT_SYMBOL_GPL(tmio_mmc_host_remove
);
1384 static int tmio_mmc_clk_enable(struct tmio_mmc_host
*host
)
1386 if (!host
->clk_enable
)
1389 return host
->clk_enable(host
);
1392 static void tmio_mmc_clk_disable(struct tmio_mmc_host
*host
)
1394 if (host
->clk_disable
)
1395 host
->clk_disable(host
);
1398 int tmio_mmc_host_runtime_suspend(struct device
*dev
)
1400 struct tmio_mmc_host
*host
= dev_get_drvdata(dev
);
1402 tmio_mmc_disable_mmc_irqs(host
, TMIO_MASK_ALL
);
1404 if (host
->clk_cache
)
1405 tmio_mmc_clk_stop(host
);
1407 tmio_mmc_clk_disable(host
);
1411 EXPORT_SYMBOL_GPL(tmio_mmc_host_runtime_suspend
);
1413 static bool tmio_mmc_can_retune(struct tmio_mmc_host
*host
)
1415 return host
->tap_num
&& mmc_can_retune(host
->mmc
);
1418 int tmio_mmc_host_runtime_resume(struct device
*dev
)
1420 struct tmio_mmc_host
*host
= dev_get_drvdata(dev
);
1422 tmio_mmc_reset(host
);
1423 tmio_mmc_clk_enable(host
);
1425 if (host
->clk_cache
)
1426 tmio_mmc_set_clock(host
, host
->clk_cache
);
1428 if (host
->native_hotplug
)
1429 tmio_mmc_enable_mmc_irqs(host
,
1430 TMIO_STAT_CARD_REMOVE
| TMIO_STAT_CARD_INSERT
);
1432 tmio_mmc_enable_dma(host
, true);
1434 if (tmio_mmc_can_retune(host
) && host
->select_tuning(host
))
1435 dev_warn(&host
->pdev
->dev
, "Tuning selection failed\n");
1439 EXPORT_SYMBOL_GPL(tmio_mmc_host_runtime_resume
);
1442 MODULE_LICENSE("GPL v2");