1 // SPDX-License-Identifier: GPL-2.0
3 * Driver for the MMC / SD / SDIO IP found in:
5 * TC6393XB, TC6391XB, TC6387XB, T7L66XB, ASIC3, SH-Mobile SoCs
7 * Copyright (C) 2015-19 Renesas Electronics Corporation
8 * Copyright (C) 2016-19 Sang Engineering, Wolfram Sang
9 * Copyright (C) 2017 Horms Solutions, Simon Horman
10 * Copyright (C) 2011 Guennadi Liakhovetski
11 * Copyright (C) 2007 Ian Molton
12 * Copyright (C) 2004 Ian Molton
14 * This driver draws mainly on scattered spec sheets, Reverse engineering
15 * of the toshiba e800 SD driver and some parts of the 2.4 ASIC3 driver (4 bit
16 * support). (Further 4 bit support from a later datasheet).
19 * Investigate using a workqueue for PIO transfers
21 * Better Power management
22 * Handle MMC errors better
23 * double buffer support
27 #include <linux/delay.h>
28 #include <linux/device.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/highmem.h>
31 #include <linux/interrupt.h>
33 #include <linux/irq.h>
34 #include <linux/mfd/tmio.h>
35 #include <linux/mmc/card.h>
36 #include <linux/mmc/host.h>
37 #include <linux/mmc/mmc.h>
38 #include <linux/mmc/slot-gpio.h>
39 #include <linux/module.h>
40 #include <linux/pagemap.h>
41 #include <linux/platform_device.h>
42 #include <linux/pm_qos.h>
43 #include <linux/pm_runtime.h>
44 #include <linux/regulator/consumer.h>
45 #include <linux/mmc/sdio.h>
46 #include <linux/scatterlist.h>
47 #include <linux/sizes.h>
48 #include <linux/spinlock.h>
49 #include <linux/workqueue.h>
53 static inline void tmio_mmc_start_dma(struct tmio_mmc_host
*host
,
54 struct mmc_data
*data
)
57 host
->dma_ops
->start(host
, data
);
60 static inline void tmio_mmc_end_dma(struct tmio_mmc_host
*host
)
62 if (host
->dma_ops
&& host
->dma_ops
->end
)
63 host
->dma_ops
->end(host
);
66 static inline void tmio_mmc_enable_dma(struct tmio_mmc_host
*host
, bool enable
)
69 host
->dma_ops
->enable(host
, enable
);
72 static inline void tmio_mmc_request_dma(struct tmio_mmc_host
*host
,
73 struct tmio_mmc_data
*pdata
)
76 host
->dma_ops
->request(host
, pdata
);
83 static inline void tmio_mmc_release_dma(struct tmio_mmc_host
*host
)
86 host
->dma_ops
->release(host
);
89 static inline void tmio_mmc_abort_dma(struct tmio_mmc_host
*host
)
92 host
->dma_ops
->abort(host
);
95 static inline void tmio_mmc_dataend_dma(struct tmio_mmc_host
*host
)
98 host
->dma_ops
->dataend(host
);
101 void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host
*host
, u32 i
)
103 host
->sdcard_irq_mask
&= ~(i
& TMIO_MASK_IRQ
);
104 sd_ctrl_write32_as_16_and_16(host
, CTL_IRQ_MASK
, host
->sdcard_irq_mask
);
106 EXPORT_SYMBOL_GPL(tmio_mmc_enable_mmc_irqs
);
108 void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host
*host
, u32 i
)
110 host
->sdcard_irq_mask
|= (i
& TMIO_MASK_IRQ
);
111 sd_ctrl_write32_as_16_and_16(host
, CTL_IRQ_MASK
, host
->sdcard_irq_mask
);
113 EXPORT_SYMBOL_GPL(tmio_mmc_disable_mmc_irqs
);
115 static void tmio_mmc_ack_mmc_irqs(struct tmio_mmc_host
*host
, u32 i
)
117 sd_ctrl_write32_as_16_and_16(host
, CTL_STATUS
, ~i
);
120 static void tmio_mmc_init_sg(struct tmio_mmc_host
*host
, struct mmc_data
*data
)
122 host
->sg_len
= data
->sg_len
;
123 host
->sg_ptr
= data
->sg
;
124 host
->sg_orig
= data
->sg
;
128 static int tmio_mmc_next_sg(struct tmio_mmc_host
*host
)
130 host
->sg_ptr
= sg_next(host
->sg_ptr
);
132 return --host
->sg_len
;
135 #define CMDREQ_TIMEOUT 5000
137 static void tmio_mmc_enable_sdio_irq(struct mmc_host
*mmc
, int enable
)
139 struct tmio_mmc_host
*host
= mmc_priv(mmc
);
141 if (enable
&& !host
->sdio_irq_enabled
) {
144 /* Keep device active while SDIO irq is enabled */
145 pm_runtime_get_sync(mmc_dev(mmc
));
147 host
->sdio_irq_enabled
= true;
148 host
->sdio_irq_mask
= TMIO_SDIO_MASK_ALL
& ~TMIO_SDIO_STAT_IOIRQ
;
150 /* Clear obsolete interrupts before enabling */
151 sdio_status
= sd_ctrl_read16(host
, CTL_SDIO_STATUS
) & ~TMIO_SDIO_MASK_ALL
;
152 if (host
->pdata
->flags
& TMIO_MMC_SDIO_STATUS_SETBITS
)
153 sdio_status
|= TMIO_SDIO_SETBITS_MASK
;
154 sd_ctrl_write16(host
, CTL_SDIO_STATUS
, sdio_status
);
156 sd_ctrl_write16(host
, CTL_SDIO_IRQ_MASK
, host
->sdio_irq_mask
);
157 } else if (!enable
&& host
->sdio_irq_enabled
) {
158 host
->sdio_irq_mask
= TMIO_SDIO_MASK_ALL
;
159 sd_ctrl_write16(host
, CTL_SDIO_IRQ_MASK
, host
->sdio_irq_mask
);
161 host
->sdio_irq_enabled
= false;
162 pm_runtime_mark_last_busy(mmc_dev(mmc
));
163 pm_runtime_put_autosuspend(mmc_dev(mmc
));
167 static void tmio_mmc_reset(struct tmio_mmc_host
*host
)
169 /* FIXME - should we set stop clock reg here */
170 sd_ctrl_write16(host
, CTL_RESET_SD
, 0x0000);
171 usleep_range(10000, 11000);
172 sd_ctrl_write16(host
, CTL_RESET_SD
, 0x0001);
173 usleep_range(10000, 11000);
178 tmio_mmc_abort_dma(host
);
180 if (host
->pdata
->flags
& TMIO_MMC_SDIO_IRQ
) {
181 sd_ctrl_write16(host
, CTL_SDIO_IRQ_MASK
, host
->sdio_irq_mask
);
182 sd_ctrl_write16(host
, CTL_TRANSACTION_CTL
, 0x0001);
186 static void tmio_mmc_reset_work(struct work_struct
*work
)
188 struct tmio_mmc_host
*host
= container_of(work
, struct tmio_mmc_host
,
189 delayed_reset_work
.work
);
190 struct mmc_request
*mrq
;
193 spin_lock_irqsave(&host
->lock
, flags
);
197 * is request already finished? Since we use a non-blocking
198 * cancel_delayed_work(), it can happen, that a .set_ios() call preempts
199 * us, so, have to check for IS_ERR(host->mrq)
201 if (IS_ERR_OR_NULL(mrq
) ||
202 time_is_after_jiffies(host
->last_req_ts
+
203 msecs_to_jiffies(CMDREQ_TIMEOUT
))) {
204 spin_unlock_irqrestore(&host
->lock
, flags
);
208 dev_warn(&host
->pdev
->dev
,
209 "timeout waiting for hardware interrupt (CMD%u)\n",
213 host
->data
->error
= -ETIMEDOUT
;
215 host
->cmd
->error
= -ETIMEDOUT
;
217 mrq
->cmd
->error
= -ETIMEDOUT
;
222 spin_unlock_irqrestore(&host
->lock
, flags
);
224 tmio_mmc_reset(host
);
226 /* Ready for new calls */
228 mmc_request_done(host
->mmc
, mrq
);
231 /* These are the bitmasks the tmio chip requires to implement the MMC response
232 * types. Note that R1 and R6 are the same in this scheme. */
233 #define APP_CMD 0x0040
234 #define RESP_NONE 0x0300
235 #define RESP_R1 0x0400
236 #define RESP_R1B 0x0500
237 #define RESP_R2 0x0600
238 #define RESP_R3 0x0700
239 #define DATA_PRESENT 0x0800
240 #define TRANSFER_READ 0x1000
241 #define TRANSFER_MULTI 0x2000
242 #define SECURITY_CMD 0x4000
243 #define NO_CMD12_ISSUE 0x4000 /* TMIO_MMC_HAVE_CMD12_CTRL */
245 static int tmio_mmc_start_command(struct tmio_mmc_host
*host
,
246 struct mmc_command
*cmd
)
248 struct mmc_data
*data
= host
->data
;
251 switch (mmc_resp_type(cmd
)) {
252 case MMC_RSP_NONE
: c
|= RESP_NONE
; break;
254 case MMC_RSP_R1_NO_CRC
:
256 case MMC_RSP_R1B
: c
|= RESP_R1B
; break;
257 case MMC_RSP_R2
: c
|= RESP_R2
; break;
258 case MMC_RSP_R3
: c
|= RESP_R3
; break;
260 pr_debug("Unknown response type %d\n", mmc_resp_type(cmd
));
266 /* FIXME - this seems to be ok commented out but the spec suggest this bit
267 * should be set when issuing app commands.
268 * if(cmd->flags & MMC_FLAG_ACMD)
273 if (data
->blocks
> 1) {
274 sd_ctrl_write16(host
, CTL_STOP_INTERNAL_ACTION
, TMIO_STOP_SEC
);
278 * Disable auto CMD12 at IO_RW_EXTENDED and
279 * SET_BLOCK_COUNT when doing multiple block transfer
281 if ((host
->pdata
->flags
& TMIO_MMC_HAVE_CMD12_CTRL
) &&
282 (cmd
->opcode
== SD_IO_RW_EXTENDED
|| host
->mrq
->sbc
))
285 if (data
->flags
& MMC_DATA_READ
)
289 tmio_mmc_enable_mmc_irqs(host
, TMIO_MASK_CMD
);
291 /* Fire off the command */
292 sd_ctrl_write32_as_16_and_16(host
, CTL_ARG_REG
, cmd
->arg
);
293 sd_ctrl_write16(host
, CTL_SD_CMD
, c
);
298 static void tmio_mmc_transfer_data(struct tmio_mmc_host
*host
,
302 int is_read
= host
->data
->flags
& MMC_DATA_READ
;
308 if (host
->pdata
->flags
& TMIO_MMC_32BIT_DATA_PORT
) {
310 u32
*buf32
= (u32
*)buf
;
313 sd_ctrl_read32_rep(host
, CTL_SD_DATA_PORT
, buf32
,
316 sd_ctrl_write32_rep(host
, CTL_SD_DATA_PORT
, buf32
,
319 /* if count was multiple of 4 */
327 sd_ctrl_read32_rep(host
, CTL_SD_DATA_PORT
, &data
, 1);
328 memcpy(buf32
, &data
, count
);
330 memcpy(&data
, buf32
, count
);
331 sd_ctrl_write32_rep(host
, CTL_SD_DATA_PORT
, &data
, 1);
338 sd_ctrl_read16_rep(host
, CTL_SD_DATA_PORT
, buf
, count
>> 1);
340 sd_ctrl_write16_rep(host
, CTL_SD_DATA_PORT
, buf
, count
>> 1);
342 /* if count was even number */
346 /* if count was odd number */
347 buf8
= (u8
*)(buf
+ (count
>> 1));
352 * driver and this function are assuming that
353 * it is used as little endian
356 *buf8
= sd_ctrl_read16(host
, CTL_SD_DATA_PORT
) & 0xff;
358 sd_ctrl_write16(host
, CTL_SD_DATA_PORT
, *buf8
);
362 * This chip always returns (at least?) as much data as you ask for.
363 * I'm unsure what happens if you ask for less than a block. This should be
364 * looked into to ensure that a funny length read doesn't hose the controller.
366 static void tmio_mmc_pio_irq(struct tmio_mmc_host
*host
)
368 struct mmc_data
*data
= host
->data
;
375 pr_err("PIO IRQ in DMA mode!\n");
378 pr_debug("Spurious PIO IRQ\n");
382 sg_virt
= tmio_mmc_kmap_atomic(host
->sg_ptr
, &flags
);
383 buf
= (unsigned short *)(sg_virt
+ host
->sg_off
);
385 count
= host
->sg_ptr
->length
- host
->sg_off
;
386 if (count
> data
->blksz
)
389 pr_debug("count: %08x offset: %08x flags %08x\n",
390 count
, host
->sg_off
, data
->flags
);
392 /* Transfer the data */
393 tmio_mmc_transfer_data(host
, buf
, count
);
395 host
->sg_off
+= count
;
397 tmio_mmc_kunmap_atomic(host
->sg_ptr
, &flags
, sg_virt
);
399 if (host
->sg_off
== host
->sg_ptr
->length
)
400 tmio_mmc_next_sg(host
);
403 static void tmio_mmc_check_bounce_buffer(struct tmio_mmc_host
*host
)
405 if (host
->sg_ptr
== &host
->bounce_sg
) {
407 void *sg_vaddr
= tmio_mmc_kmap_atomic(host
->sg_orig
, &flags
);
409 memcpy(sg_vaddr
, host
->bounce_buf
, host
->bounce_sg
.length
);
410 tmio_mmc_kunmap_atomic(host
->sg_orig
, &flags
, sg_vaddr
);
414 /* needs to be called with host->lock held */
415 void tmio_mmc_do_data_irq(struct tmio_mmc_host
*host
)
417 struct mmc_data
*data
= host
->data
;
418 struct mmc_command
*stop
;
423 dev_warn(&host
->pdev
->dev
, "Spurious data end IRQ\n");
428 /* FIXME - return correct transfer count on errors */
430 data
->bytes_xfered
= data
->blocks
* data
->blksz
;
432 data
->bytes_xfered
= 0;
434 pr_debug("Completed data request\n");
437 * FIXME: other drivers allow an optional stop command of any given type
438 * which we dont do, as the chip can auto generate them.
439 * Perhaps we can be smarter about when to use auto CMD12 and
440 * only issue the auto request when we know this is the desired
441 * stop command, allowing fallback to the stop command the
442 * upper layers expect. For now, we do what works.
445 if (data
->flags
& MMC_DATA_READ
) {
447 tmio_mmc_check_bounce_buffer(host
);
448 dev_dbg(&host
->pdev
->dev
, "Complete Rx request %p\n",
451 dev_dbg(&host
->pdev
->dev
, "Complete Tx request %p\n",
455 if (stop
&& !host
->mrq
->sbc
) {
456 if (stop
->opcode
!= MMC_STOP_TRANSMISSION
|| stop
->arg
)
457 dev_err(&host
->pdev
->dev
, "unsupported stop: CMD%u,0x%x. We did CMD12,0\n",
458 stop
->opcode
, stop
->arg
);
460 /* fill in response from auto CMD12 */
461 stop
->resp
[0] = sd_ctrl_read16_and_16_as_32(host
, CTL_RESPONSE
);
463 sd_ctrl_write16(host
, CTL_STOP_INTERNAL_ACTION
, 0);
466 schedule_work(&host
->done
);
468 EXPORT_SYMBOL_GPL(tmio_mmc_do_data_irq
);
470 static void tmio_mmc_data_irq(struct tmio_mmc_host
*host
, unsigned int stat
)
472 struct mmc_data
*data
;
474 spin_lock(&host
->lock
);
480 if (stat
& TMIO_STAT_CRCFAIL
|| stat
& TMIO_STAT_STOPBIT_ERR
||
481 stat
& TMIO_STAT_TXUNDERRUN
)
482 data
->error
= -EILSEQ
;
483 if (host
->dma_on
&& (data
->flags
& MMC_DATA_WRITE
)) {
484 u32 status
= sd_ctrl_read16_and_16_as_32(host
, CTL_STATUS
);
488 * Has all data been written out yet? Testing on SuperH showed,
489 * that in most cases the first interrupt comes already with the
490 * BUSY status bit clear, but on some operations, like mount or
491 * in the beginning of a write / sync / umount, there is one
492 * DATAEND interrupt with the BUSY bit set, in this cases
493 * waiting for one more interrupt fixes the problem.
495 if (host
->pdata
->flags
& TMIO_MMC_HAS_IDLE_WAIT
) {
496 if (status
& TMIO_STAT_SCLKDIVEN
)
499 if (!(status
& TMIO_STAT_CMD_BUSY
))
504 tmio_mmc_disable_mmc_irqs(host
, TMIO_STAT_DATAEND
);
505 tmio_mmc_dataend_dma(host
);
507 } else if (host
->dma_on
&& (data
->flags
& MMC_DATA_READ
)) {
508 tmio_mmc_disable_mmc_irqs(host
, TMIO_STAT_DATAEND
);
509 tmio_mmc_dataend_dma(host
);
511 tmio_mmc_do_data_irq(host
);
512 tmio_mmc_disable_mmc_irqs(host
, TMIO_MASK_READOP
| TMIO_MASK_WRITEOP
);
515 spin_unlock(&host
->lock
);
518 static void tmio_mmc_cmd_irq(struct tmio_mmc_host
*host
, unsigned int stat
)
520 struct mmc_command
*cmd
= host
->cmd
;
523 spin_lock(&host
->lock
);
526 pr_debug("Spurious CMD irq\n");
530 /* This controller is sicker than the PXA one. Not only do we need to
531 * drop the top 8 bits of the first response word, we also need to
532 * modify the order of the response for short response command types.
535 for (i
= 3, addr
= CTL_RESPONSE
; i
>= 0 ; i
--, addr
+= 4)
536 cmd
->resp
[i
] = sd_ctrl_read16_and_16_as_32(host
, addr
);
538 if (cmd
->flags
& MMC_RSP_136
) {
539 cmd
->resp
[0] = (cmd
->resp
[0] << 8) | (cmd
->resp
[1] >> 24);
540 cmd
->resp
[1] = (cmd
->resp
[1] << 8) | (cmd
->resp
[2] >> 24);
541 cmd
->resp
[2] = (cmd
->resp
[2] << 8) | (cmd
->resp
[3] >> 24);
543 } else if (cmd
->flags
& MMC_RSP_R3
) {
544 cmd
->resp
[0] = cmd
->resp
[3];
547 if (stat
& TMIO_STAT_CMDTIMEOUT
)
548 cmd
->error
= -ETIMEDOUT
;
549 else if ((stat
& TMIO_STAT_CRCFAIL
&& cmd
->flags
& MMC_RSP_CRC
) ||
550 stat
& TMIO_STAT_STOPBIT_ERR
||
551 stat
& TMIO_STAT_CMD_IDX_ERR
)
552 cmd
->error
= -EILSEQ
;
554 /* If there is data to handle we enable data IRQs here, and
555 * we will ultimatley finish the request in the data_end handler.
556 * If theres no data or we encountered an error, finish now.
558 if (host
->data
&& (!cmd
->error
|| cmd
->error
== -EILSEQ
)) {
559 if (host
->data
->flags
& MMC_DATA_READ
) {
561 tmio_mmc_enable_mmc_irqs(host
, TMIO_MASK_READOP
);
563 tmio_mmc_disable_mmc_irqs(host
,
565 tasklet_schedule(&host
->dma_issue
);
569 tmio_mmc_enable_mmc_irqs(host
, TMIO_MASK_WRITEOP
);
571 tmio_mmc_disable_mmc_irqs(host
,
573 tasklet_schedule(&host
->dma_issue
);
577 schedule_work(&host
->done
);
581 spin_unlock(&host
->lock
);
584 static bool __tmio_mmc_card_detect_irq(struct tmio_mmc_host
*host
,
585 int ireg
, int status
)
587 struct mmc_host
*mmc
= host
->mmc
;
589 /* Card insert / remove attempts */
590 if (ireg
& (TMIO_STAT_CARD_INSERT
| TMIO_STAT_CARD_REMOVE
)) {
591 tmio_mmc_ack_mmc_irqs(host
, TMIO_STAT_CARD_INSERT
|
592 TMIO_STAT_CARD_REMOVE
);
593 if ((((ireg
& TMIO_STAT_CARD_REMOVE
) && mmc
->card
) ||
594 ((ireg
& TMIO_STAT_CARD_INSERT
) && !mmc
->card
)) &&
595 !work_pending(&mmc
->detect
.work
))
596 mmc_detect_change(host
->mmc
, msecs_to_jiffies(100));
603 static bool __tmio_mmc_sdcard_irq(struct tmio_mmc_host
*host
, int ireg
,
606 /* Command completion */
607 if (ireg
& (TMIO_STAT_CMDRESPEND
| TMIO_STAT_CMDTIMEOUT
)) {
608 tmio_mmc_ack_mmc_irqs(host
, TMIO_STAT_CMDRESPEND
|
609 TMIO_STAT_CMDTIMEOUT
);
610 tmio_mmc_cmd_irq(host
, status
);
615 if (ireg
& (TMIO_STAT_RXRDY
| TMIO_STAT_TXRQ
)) {
616 tmio_mmc_ack_mmc_irqs(host
, TMIO_STAT_RXRDY
| TMIO_STAT_TXRQ
);
617 tmio_mmc_pio_irq(host
);
621 /* Data transfer completion */
622 if (ireg
& TMIO_STAT_DATAEND
) {
623 tmio_mmc_ack_mmc_irqs(host
, TMIO_STAT_DATAEND
);
624 tmio_mmc_data_irq(host
, status
);
631 static bool __tmio_mmc_sdio_irq(struct tmio_mmc_host
*host
)
633 struct mmc_host
*mmc
= host
->mmc
;
634 struct tmio_mmc_data
*pdata
= host
->pdata
;
635 unsigned int ireg
, status
;
636 unsigned int sdio_status
;
638 if (!(pdata
->flags
& TMIO_MMC_SDIO_IRQ
))
641 status
= sd_ctrl_read16(host
, CTL_SDIO_STATUS
);
642 ireg
= status
& TMIO_SDIO_MASK_ALL
& ~host
->sdio_irq_mask
;
644 sdio_status
= status
& ~TMIO_SDIO_MASK_ALL
;
645 if (pdata
->flags
& TMIO_MMC_SDIO_STATUS_SETBITS
)
646 sdio_status
|= TMIO_SDIO_SETBITS_MASK
;
648 sd_ctrl_write16(host
, CTL_SDIO_STATUS
, sdio_status
);
650 if (mmc
->caps
& MMC_CAP_SDIO_IRQ
&& ireg
& TMIO_SDIO_STAT_IOIRQ
)
651 mmc_signal_sdio_irq(mmc
);
656 irqreturn_t
tmio_mmc_irq(int irq
, void *devid
)
658 struct tmio_mmc_host
*host
= devid
;
659 unsigned int ireg
, status
;
661 status
= sd_ctrl_read16_and_16_as_32(host
, CTL_STATUS
);
662 ireg
= status
& TMIO_MASK_IRQ
& ~host
->sdcard_irq_mask
;
664 /* Clear the status except the interrupt status */
665 sd_ctrl_write32_as_16_and_16(host
, CTL_STATUS
, TMIO_MASK_IRQ
);
667 if (__tmio_mmc_card_detect_irq(host
, ireg
, status
))
669 if (__tmio_mmc_sdcard_irq(host
, ireg
, status
))
672 if (__tmio_mmc_sdio_irq(host
))
677 EXPORT_SYMBOL_GPL(tmio_mmc_irq
);
679 static int tmio_mmc_start_data(struct tmio_mmc_host
*host
,
680 struct mmc_data
*data
)
682 struct tmio_mmc_data
*pdata
= host
->pdata
;
684 pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n",
685 data
->blksz
, data
->blocks
);
687 /* Some hardware cannot perform 2 byte requests in 4/8 bit mode */
688 if (host
->mmc
->ios
.bus_width
== MMC_BUS_WIDTH_4
||
689 host
->mmc
->ios
.bus_width
== MMC_BUS_WIDTH_8
) {
690 int blksz_2bytes
= pdata
->flags
& TMIO_MMC_BLKSZ_2BYTES
;
692 if (data
->blksz
< 2 || (data
->blksz
< 4 && !blksz_2bytes
)) {
693 pr_err("%s: %d byte block unsupported in 4/8 bit mode\n",
694 mmc_hostname(host
->mmc
), data
->blksz
);
699 tmio_mmc_init_sg(host
, data
);
701 host
->dma_on
= false;
703 /* Set transfer length / blocksize */
704 sd_ctrl_write16(host
, CTL_SD_XFER_LEN
, data
->blksz
);
705 if (host
->mmc
->max_blk_count
>= SZ_64K
)
706 sd_ctrl_write32(host
, CTL_XFER_BLK_COUNT
, data
->blocks
);
708 sd_ctrl_write16(host
, CTL_XFER_BLK_COUNT
, data
->blocks
);
710 tmio_mmc_start_dma(host
, data
);
715 static void tmio_process_mrq(struct tmio_mmc_host
*host
,
716 struct mmc_request
*mrq
)
718 struct mmc_command
*cmd
;
721 if (mrq
->sbc
&& host
->cmd
!= mrq
->sbc
) {
726 ret
= tmio_mmc_start_data(host
, mrq
->data
);
732 ret
= tmio_mmc_start_command(host
, cmd
);
736 schedule_delayed_work(&host
->delayed_reset_work
,
737 msecs_to_jiffies(CMDREQ_TIMEOUT
));
742 mrq
->cmd
->error
= ret
;
743 mmc_request_done(host
->mmc
, mrq
);
746 /* Process requests from the MMC layer */
747 static void tmio_mmc_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
749 struct tmio_mmc_host
*host
= mmc_priv(mmc
);
752 spin_lock_irqsave(&host
->lock
, flags
);
755 pr_debug("request not null\n");
756 if (IS_ERR(host
->mrq
)) {
757 spin_unlock_irqrestore(&host
->lock
, flags
);
758 mrq
->cmd
->error
= -EAGAIN
;
759 mmc_request_done(mmc
, mrq
);
764 host
->last_req_ts
= jiffies
;
768 spin_unlock_irqrestore(&host
->lock
, flags
);
770 tmio_process_mrq(host
, mrq
);
773 static void tmio_mmc_finish_request(struct tmio_mmc_host
*host
)
775 struct mmc_request
*mrq
;
778 spin_lock_irqsave(&host
->lock
, flags
);
780 tmio_mmc_end_dma(host
);
783 if (IS_ERR_OR_NULL(mrq
)) {
784 spin_unlock_irqrestore(&host
->lock
, flags
);
788 /* If not SET_BLOCK_COUNT, clear old data */
789 if (host
->cmd
!= mrq
->sbc
) {
795 cancel_delayed_work(&host
->delayed_reset_work
);
797 spin_unlock_irqrestore(&host
->lock
, flags
);
799 if (mrq
->cmd
->error
|| (mrq
->data
&& mrq
->data
->error
)) {
800 tmio_mmc_ack_mmc_irqs(host
, TMIO_MASK_IRQ
); /* Clear all */
801 tmio_mmc_abort_dma(host
);
804 /* Error means retune, but executed command was still successful */
805 if (host
->check_retune
&& host
->check_retune(host
))
806 mmc_retune_needed(host
->mmc
);
808 /* If SET_BLOCK_COUNT, continue with main command */
809 if (host
->mrq
&& !mrq
->cmd
->error
) {
810 tmio_process_mrq(host
, mrq
);
814 if (host
->fixup_request
)
815 host
->fixup_request(host
, mrq
);
817 mmc_request_done(host
->mmc
, mrq
);
820 static void tmio_mmc_done_work(struct work_struct
*work
)
822 struct tmio_mmc_host
*host
= container_of(work
, struct tmio_mmc_host
,
824 tmio_mmc_finish_request(host
);
827 static void tmio_mmc_power_on(struct tmio_mmc_host
*host
, unsigned short vdd
)
829 struct mmc_host
*mmc
= host
->mmc
;
832 /* .set_ios() is returning void, so, no chance to report an error */
835 host
->set_pwr(host
->pdev
, 1);
837 if (!IS_ERR(mmc
->supply
.vmmc
)) {
838 ret
= mmc_regulator_set_ocr(mmc
, mmc
->supply
.vmmc
, vdd
);
840 * Attention: empiric value. With a b43 WiFi SDIO card this
841 * delay proved necessary for reliable card-insertion probing.
842 * 100us were not enough. Is this the same 140us delay, as in
843 * tmio_mmc_set_ios()?
845 usleep_range(200, 300);
848 * It seems, VccQ should be switched on after Vcc, this is also what the
849 * omap_hsmmc.c driver does.
851 if (!IS_ERR(mmc
->supply
.vqmmc
) && !ret
) {
852 ret
= regulator_enable(mmc
->supply
.vqmmc
);
853 usleep_range(200, 300);
857 dev_dbg(&host
->pdev
->dev
, "Regulators failed to power up: %d\n",
861 static void tmio_mmc_power_off(struct tmio_mmc_host
*host
)
863 struct mmc_host
*mmc
= host
->mmc
;
865 if (!IS_ERR(mmc
->supply
.vqmmc
))
866 regulator_disable(mmc
->supply
.vqmmc
);
868 if (!IS_ERR(mmc
->supply
.vmmc
))
869 mmc_regulator_set_ocr(mmc
, mmc
->supply
.vmmc
, 0);
872 host
->set_pwr(host
->pdev
, 0);
875 static void tmio_mmc_set_bus_width(struct tmio_mmc_host
*host
,
876 unsigned char bus_width
)
878 u16 reg
= sd_ctrl_read16(host
, CTL_SD_MEM_CARD_OPT
)
879 & ~(CARD_OPT_WIDTH
| CARD_OPT_WIDTH8
);
881 /* reg now applies to MMC_BUS_WIDTH_4 */
882 if (bus_width
== MMC_BUS_WIDTH_1
)
883 reg
|= CARD_OPT_WIDTH
;
884 else if (bus_width
== MMC_BUS_WIDTH_8
)
885 reg
|= CARD_OPT_WIDTH8
;
887 sd_ctrl_write16(host
, CTL_SD_MEM_CARD_OPT
, reg
);
890 static unsigned int tmio_mmc_get_timeout_cycles(struct tmio_mmc_host
*host
)
892 u16 val
= sd_ctrl_read16(host
, CTL_SD_MEM_CARD_OPT
);
894 val
= (val
& CARD_OPT_TOP_MASK
) >> CARD_OPT_TOP_SHIFT
;
895 return 1 << (13 + val
);
898 static void tmio_mmc_max_busy_timeout(struct tmio_mmc_host
*host
)
900 unsigned int clk_rate
= host
->mmc
->actual_clock
?: host
->mmc
->f_max
;
902 host
->mmc
->max_busy_timeout
= host
->get_timeout_cycles(host
) /
903 (clk_rate
/ MSEC_PER_SEC
);
906 /* Set MMC clock / power.
907 * Note: This controller uses a simple divider scheme therefore it cannot
908 * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as
909 * MMC wont run that fast, it has to be clocked at 12MHz which is the next
912 static void tmio_mmc_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
914 struct tmio_mmc_host
*host
= mmc_priv(mmc
);
915 struct device
*dev
= &host
->pdev
->dev
;
918 mutex_lock(&host
->ios_lock
);
920 spin_lock_irqsave(&host
->lock
, flags
);
922 if (IS_ERR(host
->mrq
)) {
924 "%s.%d: concurrent .set_ios(), clk %u, mode %u\n",
925 current
->comm
, task_pid_nr(current
),
926 ios
->clock
, ios
->power_mode
);
927 host
->mrq
= ERR_PTR(-EINTR
);
930 "%s.%d: CMD%u active since %lu, now %lu!\n",
931 current
->comm
, task_pid_nr(current
),
932 host
->mrq
->cmd
->opcode
, host
->last_req_ts
,
935 spin_unlock_irqrestore(&host
->lock
, flags
);
937 mutex_unlock(&host
->ios_lock
);
941 host
->mrq
= ERR_PTR(-EBUSY
);
943 spin_unlock_irqrestore(&host
->lock
, flags
);
945 switch (ios
->power_mode
) {
947 tmio_mmc_power_off(host
);
948 /* For R-Car Gen2+, we need to reset SDHI specific SCC */
949 if (host
->pdata
->flags
& TMIO_MMC_MIN_RCAR2
)
951 host
->set_clock(host
, 0);
954 tmio_mmc_power_on(host
, ios
->vdd
);
955 host
->set_clock(host
, ios
->clock
);
956 tmio_mmc_set_bus_width(host
, ios
->bus_width
);
959 host
->set_clock(host
, ios
->clock
);
960 tmio_mmc_set_bus_width(host
, ios
->bus_width
);
964 if (host
->pdata
->flags
& TMIO_MMC_USE_BUSY_TIMEOUT
)
965 tmio_mmc_max_busy_timeout(host
);
967 /* Let things settle. delay taken from winCE driver */
968 usleep_range(140, 200);
969 if (PTR_ERR(host
->mrq
) == -EINTR
)
970 dev_dbg(&host
->pdev
->dev
,
971 "%s.%d: IOS interrupted: clk %u, mode %u",
972 current
->comm
, task_pid_nr(current
),
973 ios
->clock
, ios
->power_mode
);
976 host
->clk_cache
= ios
->clock
;
978 mutex_unlock(&host
->ios_lock
);
981 static int tmio_mmc_get_ro(struct mmc_host
*mmc
)
983 struct tmio_mmc_host
*host
= mmc_priv(mmc
);
985 return !(sd_ctrl_read16_and_16_as_32(host
, CTL_STATUS
) &
986 TMIO_STAT_WRPROTECT
);
989 static int tmio_mmc_get_cd(struct mmc_host
*mmc
)
991 struct tmio_mmc_host
*host
= mmc_priv(mmc
);
993 return !!(sd_ctrl_read16_and_16_as_32(host
, CTL_STATUS
) &
997 static int tmio_multi_io_quirk(struct mmc_card
*card
,
998 unsigned int direction
, int blk_size
)
1000 struct tmio_mmc_host
*host
= mmc_priv(card
->host
);
1002 if (host
->multi_io_quirk
)
1003 return host
->multi_io_quirk(card
, direction
, blk_size
);
1008 static struct mmc_host_ops tmio_mmc_ops
= {
1009 .request
= tmio_mmc_request
,
1010 .set_ios
= tmio_mmc_set_ios
,
1011 .get_ro
= tmio_mmc_get_ro
,
1012 .get_cd
= tmio_mmc_get_cd
,
1013 .enable_sdio_irq
= tmio_mmc_enable_sdio_irq
,
1014 .multi_io_quirk
= tmio_multi_io_quirk
,
1017 static int tmio_mmc_init_ocr(struct tmio_mmc_host
*host
)
1019 struct tmio_mmc_data
*pdata
= host
->pdata
;
1020 struct mmc_host
*mmc
= host
->mmc
;
1023 err
= mmc_regulator_get_supply(mmc
);
1027 /* use ocr_mask if no regulator */
1028 if (!mmc
->ocr_avail
)
1029 mmc
->ocr_avail
= pdata
->ocr_mask
;
1033 * There is possibility that regulator has not been probed
1035 if (!mmc
->ocr_avail
)
1036 return -EPROBE_DEFER
;
1041 static void tmio_mmc_of_parse(struct platform_device
*pdev
,
1042 struct mmc_host
*mmc
)
1044 const struct device_node
*np
= pdev
->dev
.of_node
;
1051 * For new platforms, please use "disable-wp" instead of
1052 * "toshiba,mmc-wrprotect-disable"
1054 if (of_get_property(np
, "toshiba,mmc-wrprotect-disable", NULL
))
1055 mmc
->caps2
|= MMC_CAP2_NO_WRITE_PROTECT
;
1058 struct tmio_mmc_host
*tmio_mmc_host_alloc(struct platform_device
*pdev
,
1059 struct tmio_mmc_data
*pdata
)
1061 struct tmio_mmc_host
*host
;
1062 struct mmc_host
*mmc
;
1066 ctl
= devm_platform_ioremap_resource(pdev
, 0);
1068 return ERR_CAST(ctl
);
1070 mmc
= mmc_alloc_host(sizeof(struct tmio_mmc_host
), &pdev
->dev
);
1072 return ERR_PTR(-ENOMEM
);
1074 host
= mmc_priv(mmc
);
1078 host
->pdata
= pdata
;
1079 host
->ops
= tmio_mmc_ops
;
1080 mmc
->ops
= &host
->ops
;
1082 ret
= mmc_of_parse(host
->mmc
);
1084 host
= ERR_PTR(ret
);
1088 tmio_mmc_of_parse(pdev
, mmc
);
1090 platform_set_drvdata(pdev
, host
);
1098 EXPORT_SYMBOL_GPL(tmio_mmc_host_alloc
);
1100 void tmio_mmc_host_free(struct tmio_mmc_host
*host
)
1102 mmc_free_host(host
->mmc
);
1104 EXPORT_SYMBOL_GPL(tmio_mmc_host_free
);
1106 int tmio_mmc_host_probe(struct tmio_mmc_host
*_host
)
1108 struct platform_device
*pdev
= _host
->pdev
;
1109 struct tmio_mmc_data
*pdata
= _host
->pdata
;
1110 struct mmc_host
*mmc
= _host
->mmc
;
1114 * Check the sanity of mmc->f_min to prevent host->set_clock() from
1115 * looping forever...
1117 if (mmc
->f_min
== 0)
1120 if (!(pdata
->flags
& TMIO_MMC_HAS_IDLE_WAIT
))
1121 _host
->write16_hook
= NULL
;
1123 if (pdata
->flags
& TMIO_MMC_USE_BUSY_TIMEOUT
&& !_host
->get_timeout_cycles
)
1124 _host
->get_timeout_cycles
= tmio_mmc_get_timeout_cycles
;
1126 _host
->set_pwr
= pdata
->set_pwr
;
1128 ret
= tmio_mmc_init_ocr(_host
);
1133 * Look for a card detect GPIO, if it fails with anything
1134 * else than a probe deferral, just live without it.
1136 ret
= mmc_gpiod_request_cd(mmc
, "cd", 0, false, 0);
1137 if (ret
== -EPROBE_DEFER
)
1140 mmc
->caps
|= MMC_CAP_4_BIT_DATA
| pdata
->capabilities
;
1141 mmc
->caps2
|= pdata
->capabilities2
;
1142 mmc
->max_segs
= pdata
->max_segs
? : 32;
1143 mmc
->max_blk_size
= TMIO_MAX_BLK_SIZE
;
1144 mmc
->max_blk_count
= pdata
->max_blk_count
? :
1145 (PAGE_SIZE
/ mmc
->max_blk_size
) * mmc
->max_segs
;
1146 mmc
->max_req_size
= min_t(size_t,
1147 mmc
->max_blk_size
* mmc
->max_blk_count
,
1148 dma_max_mapping_size(&pdev
->dev
));
1149 mmc
->max_seg_size
= mmc
->max_req_size
;
1151 if (mmc_can_gpio_ro(mmc
))
1152 _host
->ops
.get_ro
= mmc_gpio_get_ro
;
1154 if (mmc_can_gpio_cd(mmc
))
1155 _host
->ops
.get_cd
= mmc_gpio_get_cd
;
1157 _host
->native_hotplug
= !(mmc_can_gpio_cd(mmc
) ||
1158 mmc
->caps
& MMC_CAP_NEEDS_POLL
||
1159 !mmc_card_is_removable(mmc
));
1162 * On Gen2+, eMMC with NONREMOVABLE currently fails because native
1163 * hotplug gets disabled. It seems RuntimePM related yet we need further
1164 * research. Since we are planning a PM overhaul anyway, let's enforce
1165 * for now the device being active by enabling native hotplug always.
1167 if (pdata
->flags
& TMIO_MMC_MIN_RCAR2
)
1168 _host
->native_hotplug
= true;
1171 * While using internal tmio hardware logic for card detection, we need
1172 * to ensure it stays powered for it to work.
1174 if (_host
->native_hotplug
)
1175 pm_runtime_get_noresume(&pdev
->dev
);
1177 _host
->sdio_irq_enabled
= false;
1178 if (pdata
->flags
& TMIO_MMC_SDIO_IRQ
)
1179 _host
->sdio_irq_mask
= TMIO_SDIO_MASK_ALL
;
1181 _host
->set_clock(_host
, 0);
1182 tmio_mmc_reset(_host
);
1184 _host
->sdcard_irq_mask
= sd_ctrl_read16_and_16_as_32(_host
, CTL_IRQ_MASK
);
1185 tmio_mmc_disable_mmc_irqs(_host
, TMIO_MASK_ALL
);
1187 if (_host
->native_hotplug
)
1188 tmio_mmc_enable_mmc_irqs(_host
,
1189 TMIO_STAT_CARD_REMOVE
| TMIO_STAT_CARD_INSERT
);
1191 spin_lock_init(&_host
->lock
);
1192 mutex_init(&_host
->ios_lock
);
1194 /* Init delayed work for request timeouts */
1195 INIT_DELAYED_WORK(&_host
->delayed_reset_work
, tmio_mmc_reset_work
);
1196 INIT_WORK(&_host
->done
, tmio_mmc_done_work
);
1198 /* See if we also get DMA */
1199 tmio_mmc_request_dma(_host
, pdata
);
1201 pm_runtime_get_noresume(&pdev
->dev
);
1202 pm_runtime_set_active(&pdev
->dev
);
1203 pm_runtime_set_autosuspend_delay(&pdev
->dev
, 50);
1204 pm_runtime_use_autosuspend(&pdev
->dev
);
1205 pm_runtime_enable(&pdev
->dev
);
1207 ret
= mmc_add_host(mmc
);
1211 dev_pm_qos_expose_latency_limit(&pdev
->dev
, 100);
1212 pm_runtime_put(&pdev
->dev
);
1217 pm_runtime_put_noidle(&pdev
->dev
);
1218 tmio_mmc_host_remove(_host
);
1221 EXPORT_SYMBOL_GPL(tmio_mmc_host_probe
);
1223 void tmio_mmc_host_remove(struct tmio_mmc_host
*host
)
1225 struct platform_device
*pdev
= host
->pdev
;
1226 struct mmc_host
*mmc
= host
->mmc
;
1228 pm_runtime_get_sync(&pdev
->dev
);
1230 if (host
->pdata
->flags
& TMIO_MMC_SDIO_IRQ
)
1231 sd_ctrl_write16(host
, CTL_TRANSACTION_CTL
, 0x0000);
1233 dev_pm_qos_hide_latency_limit(&pdev
->dev
);
1235 mmc_remove_host(mmc
);
1236 cancel_work_sync(&host
->done
);
1237 cancel_delayed_work_sync(&host
->delayed_reset_work
);
1238 tmio_mmc_release_dma(host
);
1239 tmio_mmc_disable_mmc_irqs(host
, TMIO_MASK_ALL
);
1241 if (host
->native_hotplug
)
1242 pm_runtime_put_noidle(&pdev
->dev
);
1244 pm_runtime_disable(&pdev
->dev
);
1245 pm_runtime_dont_use_autosuspend(&pdev
->dev
);
1246 pm_runtime_put_noidle(&pdev
->dev
);
1248 EXPORT_SYMBOL_GPL(tmio_mmc_host_remove
);
1251 static int tmio_mmc_clk_enable(struct tmio_mmc_host
*host
)
1253 if (!host
->clk_enable
)
1256 return host
->clk_enable(host
);
1259 static void tmio_mmc_clk_disable(struct tmio_mmc_host
*host
)
1261 if (host
->clk_disable
)
1262 host
->clk_disable(host
);
1265 int tmio_mmc_host_runtime_suspend(struct device
*dev
)
1267 struct tmio_mmc_host
*host
= dev_get_drvdata(dev
);
1269 tmio_mmc_disable_mmc_irqs(host
, TMIO_MASK_ALL
);
1271 if (host
->clk_cache
)
1272 host
->set_clock(host
, 0);
1274 tmio_mmc_clk_disable(host
);
1278 EXPORT_SYMBOL_GPL(tmio_mmc_host_runtime_suspend
);
1280 int tmio_mmc_host_runtime_resume(struct device
*dev
)
1282 struct tmio_mmc_host
*host
= dev_get_drvdata(dev
);
1284 tmio_mmc_clk_enable(host
);
1285 tmio_mmc_reset(host
);
1287 if (host
->clk_cache
)
1288 host
->set_clock(host
, host
->clk_cache
);
1290 if (host
->native_hotplug
)
1291 tmio_mmc_enable_mmc_irqs(host
,
1292 TMIO_STAT_CARD_REMOVE
| TMIO_STAT_CARD_INSERT
);
1294 tmio_mmc_enable_dma(host
, true);
1296 mmc_retune_needed(host
->mmc
);
1300 EXPORT_SYMBOL_GPL(tmio_mmc_host_runtime_resume
);
1303 MODULE_LICENSE("GPL v2");