2 * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver
4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
5 * Copyright (C) 2010 ST-Ericsson SA
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/init.h>
14 #include <linux/ioport.h>
15 #include <linux/device.h>
17 #include <linux/interrupt.h>
18 #include <linux/kernel.h>
19 #include <linux/slab.h>
20 #include <linux/delay.h>
21 #include <linux/err.h>
22 #include <linux/highmem.h>
23 #include <linux/log2.h>
24 #include <linux/mmc/pm.h>
25 #include <linux/mmc/host.h>
26 #include <linux/mmc/card.h>
27 #include <linux/mmc/slot-gpio.h>
28 #include <linux/amba/bus.h>
29 #include <linux/clk.h>
30 #include <linux/scatterlist.h>
31 #include <linux/gpio.h>
32 #include <linux/of_gpio.h>
33 #include <linux/regulator/consumer.h>
34 #include <linux/dmaengine.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/amba/mmci.h>
37 #include <linux/pm_runtime.h>
38 #include <linux/types.h>
39 #include <linux/pinctrl/consumer.h>
41 #include <asm/div64.h>
45 #include "mmci_qcom_dml.h"
47 #define DRIVER_NAME "mmci-pl18x"
49 static unsigned int fmax
= 515633;
51 static struct variant_data variant_arm
= {
53 .fifohalfsize
= 8 * 4,
54 .datalength_bits
= 16,
55 .pwrreg_powerup
= MCI_PWR_UP
,
57 .reversed_irq_handling
= true,
59 .start_err
= MCI_STARTBITERR
,
63 static struct variant_data variant_arm_extended_fifo
= {
65 .fifohalfsize
= 64 * 4,
66 .datalength_bits
= 16,
67 .pwrreg_powerup
= MCI_PWR_UP
,
70 .start_err
= MCI_STARTBITERR
,
74 static struct variant_data variant_arm_extended_fifo_hwfc
= {
76 .fifohalfsize
= 64 * 4,
77 .clkreg_enable
= MCI_ARM_HWFCEN
,
78 .datalength_bits
= 16,
79 .pwrreg_powerup
= MCI_PWR_UP
,
82 .start_err
= MCI_STARTBITERR
,
86 static struct variant_data variant_u300
= {
88 .fifohalfsize
= 8 * 4,
89 .clkreg_enable
= MCI_ST_U300_HWFCEN
,
90 .clkreg_8bit_bus_enable
= MCI_ST_8BIT_BUS
,
91 .datalength_bits
= 16,
92 .datactrl_mask_sdio
= MCI_DPSM_ST_SDIOEN
,
94 .pwrreg_powerup
= MCI_PWR_ON
,
96 .signal_direction
= true,
97 .pwrreg_clkgate
= true,
98 .pwrreg_nopower
= true,
100 .start_err
= MCI_STARTBITERR
,
104 static struct variant_data variant_nomadik
= {
106 .fifohalfsize
= 8 * 4,
107 .clkreg
= MCI_CLK_ENABLE
,
108 .clkreg_8bit_bus_enable
= MCI_ST_8BIT_BUS
,
109 .datalength_bits
= 24,
110 .datactrl_mask_sdio
= MCI_DPSM_ST_SDIOEN
,
113 .pwrreg_powerup
= MCI_PWR_ON
,
115 .signal_direction
= true,
116 .pwrreg_clkgate
= true,
117 .pwrreg_nopower
= true,
119 .start_err
= MCI_STARTBITERR
,
123 static struct variant_data variant_ux500
= {
125 .fifohalfsize
= 8 * 4,
126 .clkreg
= MCI_CLK_ENABLE
,
127 .clkreg_enable
= MCI_ST_UX500_HWFCEN
,
128 .clkreg_8bit_bus_enable
= MCI_ST_8BIT_BUS
,
129 .clkreg_neg_edge_enable
= MCI_ST_UX500_NEG_EDGE
,
130 .datalength_bits
= 24,
131 .datactrl_mask_sdio
= MCI_DPSM_ST_SDIOEN
,
134 .pwrreg_powerup
= MCI_PWR_ON
,
136 .signal_direction
= true,
137 .pwrreg_clkgate
= true,
139 .busy_dpsm_flag
= MCI_DPSM_ST_BUSYMODE
,
140 .busy_detect_flag
= MCI_ST_CARDBUSY
,
141 .busy_detect_mask
= MCI_ST_BUSYENDMASK
,
142 .pwrreg_nopower
= true,
144 .start_err
= MCI_STARTBITERR
,
148 static struct variant_data variant_ux500v2
= {
150 .fifohalfsize
= 8 * 4,
151 .clkreg
= MCI_CLK_ENABLE
,
152 .clkreg_enable
= MCI_ST_UX500_HWFCEN
,
153 .clkreg_8bit_bus_enable
= MCI_ST_8BIT_BUS
,
154 .clkreg_neg_edge_enable
= MCI_ST_UX500_NEG_EDGE
,
155 .datactrl_mask_ddrmode
= MCI_DPSM_ST_DDRMODE
,
156 .datalength_bits
= 24,
157 .datactrl_mask_sdio
= MCI_DPSM_ST_SDIOEN
,
160 .blksz_datactrl16
= true,
161 .pwrreg_powerup
= MCI_PWR_ON
,
163 .signal_direction
= true,
164 .pwrreg_clkgate
= true,
166 .busy_dpsm_flag
= MCI_DPSM_ST_BUSYMODE
,
167 .busy_detect_flag
= MCI_ST_CARDBUSY
,
168 .busy_detect_mask
= MCI_ST_BUSYENDMASK
,
169 .pwrreg_nopower
= true,
171 .start_err
= MCI_STARTBITERR
,
175 static struct variant_data variant_stm32
= {
177 .fifohalfsize
= 8 * 4,
178 .clkreg
= MCI_CLK_ENABLE
,
179 .clkreg_enable
= MCI_ST_UX500_HWFCEN
,
180 .clkreg_8bit_bus_enable
= MCI_ST_8BIT_BUS
,
181 .clkreg_neg_edge_enable
= MCI_ST_UX500_NEG_EDGE
,
182 .datalength_bits
= 24,
183 .datactrl_mask_sdio
= MCI_DPSM_ST_SDIOEN
,
186 .pwrreg_powerup
= MCI_PWR_ON
,
188 .pwrreg_clkgate
= true,
189 .pwrreg_nopower
= true,
192 static struct variant_data variant_qcom
= {
194 .fifohalfsize
= 8 * 4,
195 .clkreg
= MCI_CLK_ENABLE
,
196 .clkreg_enable
= MCI_QCOM_CLK_FLOWENA
|
197 MCI_QCOM_CLK_SELECT_IN_FBCLK
,
198 .clkreg_8bit_bus_enable
= MCI_QCOM_CLK_WIDEBUS_8
,
199 .datactrl_mask_ddrmode
= MCI_QCOM_CLK_SELECT_IN_DDR_MODE
,
200 .data_cmd_enable
= MCI_CPSM_QCOM_DATCMD
,
201 .blksz_datactrl4
= true,
202 .datalength_bits
= 24,
203 .pwrreg_powerup
= MCI_PWR_UP
,
205 .explicit_mclk_control
= true,
209 .start_err
= MCI_STARTBITERR
,
210 .opendrain
= MCI_ROD
,
211 .init
= qcom_variant_init
,
214 /* Busy detection for the ST Micro variant */
215 static int mmci_card_busy(struct mmc_host
*mmc
)
217 struct mmci_host
*host
= mmc_priv(mmc
);
221 spin_lock_irqsave(&host
->lock
, flags
);
222 if (readl(host
->base
+ MMCISTATUS
) & host
->variant
->busy_detect_flag
)
224 spin_unlock_irqrestore(&host
->lock
, flags
);
230 * Validate mmc prerequisites
232 static int mmci_validate_data(struct mmci_host
*host
,
233 struct mmc_data
*data
)
238 if (!is_power_of_2(data
->blksz
)) {
239 dev_err(mmc_dev(host
->mmc
),
240 "unsupported block size (%d bytes)\n", data
->blksz
);
247 static void mmci_reg_delay(struct mmci_host
*host
)
250 * According to the spec, at least three feedback clock cycles
251 * of max 52 MHz must pass between two writes to the MMCICLOCK reg.
252 * Three MCLK clock cycles must pass between two MMCIPOWER reg writes.
253 * Worst delay time during card init is at 100 kHz => 30 us.
254 * Worst delay time when up and running is at 25 MHz => 120 ns.
256 if (host
->cclk
< 25000000)
263 * This must be called with host->lock held
265 static void mmci_write_clkreg(struct mmci_host
*host
, u32 clk
)
267 if (host
->clk_reg
!= clk
) {
269 writel(clk
, host
->base
+ MMCICLOCK
);
274 * This must be called with host->lock held
276 static void mmci_write_pwrreg(struct mmci_host
*host
, u32 pwr
)
278 if (host
->pwr_reg
!= pwr
) {
280 writel(pwr
, host
->base
+ MMCIPOWER
);
285 * This must be called with host->lock held
287 static void mmci_write_datactrlreg(struct mmci_host
*host
, u32 datactrl
)
289 /* Keep busy mode in DPSM if enabled */
290 datactrl
|= host
->datactrl_reg
& host
->variant
->busy_dpsm_flag
;
292 if (host
->datactrl_reg
!= datactrl
) {
293 host
->datactrl_reg
= datactrl
;
294 writel(datactrl
, host
->base
+ MMCIDATACTRL
);
299 * This must be called with host->lock held
301 static void mmci_set_clkreg(struct mmci_host
*host
, unsigned int desired
)
303 struct variant_data
*variant
= host
->variant
;
304 u32 clk
= variant
->clkreg
;
306 /* Make sure cclk reflects the current calculated clock */
310 if (variant
->explicit_mclk_control
) {
311 host
->cclk
= host
->mclk
;
312 } else if (desired
>= host
->mclk
) {
313 clk
= MCI_CLK_BYPASS
;
314 if (variant
->st_clkdiv
)
315 clk
|= MCI_ST_UX500_NEG_EDGE
;
316 host
->cclk
= host
->mclk
;
317 } else if (variant
->st_clkdiv
) {
319 * DB8500 TRM says f = mclk / (clkdiv + 2)
320 * => clkdiv = (mclk / f) - 2
321 * Round the divider up so we don't exceed the max
324 clk
= DIV_ROUND_UP(host
->mclk
, desired
) - 2;
327 host
->cclk
= host
->mclk
/ (clk
+ 2);
330 * PL180 TRM says f = mclk / (2 * (clkdiv + 1))
331 * => clkdiv = mclk / (2 * f) - 1
333 clk
= host
->mclk
/ (2 * desired
) - 1;
336 host
->cclk
= host
->mclk
/ (2 * (clk
+ 1));
339 clk
|= variant
->clkreg_enable
;
340 clk
|= MCI_CLK_ENABLE
;
341 /* This hasn't proven to be worthwhile */
342 /* clk |= MCI_CLK_PWRSAVE; */
345 /* Set actual clock for debug */
346 host
->mmc
->actual_clock
= host
->cclk
;
348 if (host
->mmc
->ios
.bus_width
== MMC_BUS_WIDTH_4
)
350 if (host
->mmc
->ios
.bus_width
== MMC_BUS_WIDTH_8
)
351 clk
|= variant
->clkreg_8bit_bus_enable
;
353 if (host
->mmc
->ios
.timing
== MMC_TIMING_UHS_DDR50
||
354 host
->mmc
->ios
.timing
== MMC_TIMING_MMC_DDR52
)
355 clk
|= variant
->clkreg_neg_edge_enable
;
357 mmci_write_clkreg(host
, clk
);
361 mmci_request_end(struct mmci_host
*host
, struct mmc_request
*mrq
)
363 writel(0, host
->base
+ MMCICOMMAND
);
370 mmc_request_done(host
->mmc
, mrq
);
373 static void mmci_set_mask1(struct mmci_host
*host
, unsigned int mask
)
375 void __iomem
*base
= host
->base
;
376 struct variant_data
*variant
= host
->variant
;
378 if (host
->singleirq
) {
379 unsigned int mask0
= readl(base
+ MMCIMASK0
);
381 mask0
&= ~MCI_IRQ1MASK
;
384 writel(mask0
, base
+ MMCIMASK0
);
387 if (variant
->mmcimask1
)
388 writel(mask
, base
+ MMCIMASK1
);
390 host
->mask1_reg
= mask
;
393 static void mmci_stop_data(struct mmci_host
*host
)
395 mmci_write_datactrlreg(host
, 0);
396 mmci_set_mask1(host
, 0);
400 static void mmci_init_sg(struct mmci_host
*host
, struct mmc_data
*data
)
402 unsigned int flags
= SG_MITER_ATOMIC
;
404 if (data
->flags
& MMC_DATA_READ
)
405 flags
|= SG_MITER_TO_SG
;
407 flags
|= SG_MITER_FROM_SG
;
409 sg_miter_start(&host
->sg_miter
, data
->sg
, data
->sg_len
, flags
);
413 * All the DMA operation mode stuff goes inside this ifdef.
414 * This assumes that you have a generic DMA device interface,
415 * no custom DMA interfaces are supported.
417 #ifdef CONFIG_DMA_ENGINE
418 static void mmci_dma_setup(struct mmci_host
*host
)
420 const char *rxname
, *txname
;
422 host
->dma_rx_channel
= dma_request_slave_channel(mmc_dev(host
->mmc
), "rx");
423 host
->dma_tx_channel
= dma_request_slave_channel(mmc_dev(host
->mmc
), "tx");
425 /* initialize pre request cookie */
426 host
->next_data
.cookie
= 1;
429 * If only an RX channel is specified, the driver will
430 * attempt to use it bidirectionally, however if it is
431 * is specified but cannot be located, DMA will be disabled.
433 if (host
->dma_rx_channel
&& !host
->dma_tx_channel
)
434 host
->dma_tx_channel
= host
->dma_rx_channel
;
436 if (host
->dma_rx_channel
)
437 rxname
= dma_chan_name(host
->dma_rx_channel
);
441 if (host
->dma_tx_channel
)
442 txname
= dma_chan_name(host
->dma_tx_channel
);
446 dev_info(mmc_dev(host
->mmc
), "DMA channels RX %s, TX %s\n",
450 * Limit the maximum segment size in any SG entry according to
451 * the parameters of the DMA engine device.
453 if (host
->dma_tx_channel
) {
454 struct device
*dev
= host
->dma_tx_channel
->device
->dev
;
455 unsigned int max_seg_size
= dma_get_max_seg_size(dev
);
457 if (max_seg_size
< host
->mmc
->max_seg_size
)
458 host
->mmc
->max_seg_size
= max_seg_size
;
460 if (host
->dma_rx_channel
) {
461 struct device
*dev
= host
->dma_rx_channel
->device
->dev
;
462 unsigned int max_seg_size
= dma_get_max_seg_size(dev
);
464 if (max_seg_size
< host
->mmc
->max_seg_size
)
465 host
->mmc
->max_seg_size
= max_seg_size
;
468 if (host
->ops
&& host
->ops
->dma_setup
)
469 host
->ops
->dma_setup(host
);
473 * This is used in or so inline it
474 * so it can be discarded.
476 static inline void mmci_dma_release(struct mmci_host
*host
)
478 if (host
->dma_rx_channel
)
479 dma_release_channel(host
->dma_rx_channel
);
480 if (host
->dma_tx_channel
)
481 dma_release_channel(host
->dma_tx_channel
);
482 host
->dma_rx_channel
= host
->dma_tx_channel
= NULL
;
485 static void mmci_dma_data_error(struct mmci_host
*host
)
487 dev_err(mmc_dev(host
->mmc
), "error during DMA transfer!\n");
488 dmaengine_terminate_all(host
->dma_current
);
489 host
->dma_in_progress
= false;
490 host
->dma_current
= NULL
;
491 host
->dma_desc_current
= NULL
;
492 host
->data
->host_cookie
= 0;
495 static void mmci_dma_unmap(struct mmci_host
*host
, struct mmc_data
*data
)
497 struct dma_chan
*chan
;
499 if (data
->flags
& MMC_DATA_READ
)
500 chan
= host
->dma_rx_channel
;
502 chan
= host
->dma_tx_channel
;
504 dma_unmap_sg(chan
->device
->dev
, data
->sg
, data
->sg_len
,
505 mmc_get_dma_dir(data
));
508 static void mmci_dma_finalize(struct mmci_host
*host
, struct mmc_data
*data
)
513 /* Wait up to 1ms for the DMA to complete */
515 status
= readl(host
->base
+ MMCISTATUS
);
516 if (!(status
& MCI_RXDATAAVLBLMASK
) || i
>= 100)
522 * Check to see whether we still have some data left in the FIFO -
523 * this catches DMA controllers which are unable to monitor the
524 * DMALBREQ and DMALSREQ signals while allowing us to DMA to non-
525 * contiguous buffers. On TX, we'll get a FIFO underrun error.
527 if (status
& MCI_RXDATAAVLBLMASK
) {
528 mmci_dma_data_error(host
);
533 if (!data
->host_cookie
)
534 mmci_dma_unmap(host
, data
);
537 * Use of DMA with scatter-gather is impossible.
538 * Give up with DMA and switch back to PIO mode.
540 if (status
& MCI_RXDATAAVLBLMASK
) {
541 dev_err(mmc_dev(host
->mmc
), "buggy DMA detected. Taking evasive action.\n");
542 mmci_dma_release(host
);
545 host
->dma_in_progress
= false;
546 host
->dma_current
= NULL
;
547 host
->dma_desc_current
= NULL
;
550 /* prepares DMA channel and DMA descriptor, returns non-zero on failure */
551 static int __mmci_dma_prep_data(struct mmci_host
*host
, struct mmc_data
*data
,
552 struct dma_chan
**dma_chan
,
553 struct dma_async_tx_descriptor
**dma_desc
)
555 struct variant_data
*variant
= host
->variant
;
556 struct dma_slave_config conf
= {
557 .src_addr
= host
->phybase
+ MMCIFIFO
,
558 .dst_addr
= host
->phybase
+ MMCIFIFO
,
559 .src_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
,
560 .dst_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
,
561 .src_maxburst
= variant
->fifohalfsize
>> 2, /* # of words */
562 .dst_maxburst
= variant
->fifohalfsize
>> 2, /* # of words */
565 struct dma_chan
*chan
;
566 struct dma_device
*device
;
567 struct dma_async_tx_descriptor
*desc
;
569 unsigned long flags
= DMA_CTRL_ACK
;
571 if (data
->flags
& MMC_DATA_READ
) {
572 conf
.direction
= DMA_DEV_TO_MEM
;
573 chan
= host
->dma_rx_channel
;
575 conf
.direction
= DMA_MEM_TO_DEV
;
576 chan
= host
->dma_tx_channel
;
579 /* If there's no DMA channel, fall back to PIO */
583 /* If less than or equal to the fifo size, don't bother with DMA */
584 if (data
->blksz
* data
->blocks
<= variant
->fifosize
)
587 device
= chan
->device
;
588 nr_sg
= dma_map_sg(device
->dev
, data
->sg
, data
->sg_len
,
589 mmc_get_dma_dir(data
));
593 if (host
->variant
->qcom_dml
)
594 flags
|= DMA_PREP_INTERRUPT
;
596 dmaengine_slave_config(chan
, &conf
);
597 desc
= dmaengine_prep_slave_sg(chan
, data
->sg
, nr_sg
,
598 conf
.direction
, flags
);
608 dma_unmap_sg(device
->dev
, data
->sg
, data
->sg_len
,
609 mmc_get_dma_dir(data
));
613 static inline int mmci_dma_prep_data(struct mmci_host
*host
,
614 struct mmc_data
*data
)
616 /* Check if next job is already prepared. */
617 if (host
->dma_current
&& host
->dma_desc_current
)
620 /* No job were prepared thus do it now. */
621 return __mmci_dma_prep_data(host
, data
, &host
->dma_current
,
622 &host
->dma_desc_current
);
625 static inline int mmci_dma_prep_next(struct mmci_host
*host
,
626 struct mmc_data
*data
)
628 struct mmci_host_next
*nd
= &host
->next_data
;
629 return __mmci_dma_prep_data(host
, data
, &nd
->dma_chan
, &nd
->dma_desc
);
632 static int mmci_dma_start_data(struct mmci_host
*host
, unsigned int datactrl
)
635 struct mmc_data
*data
= host
->data
;
637 ret
= mmci_dma_prep_data(host
, host
->data
);
641 /* Okay, go for it. */
642 dev_vdbg(mmc_dev(host
->mmc
),
643 "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
644 data
->sg_len
, data
->blksz
, data
->blocks
, data
->flags
);
645 host
->dma_in_progress
= true;
646 dmaengine_submit(host
->dma_desc_current
);
647 dma_async_issue_pending(host
->dma_current
);
649 if (host
->variant
->qcom_dml
)
650 dml_start_xfer(host
, data
);
652 datactrl
|= MCI_DPSM_DMAENABLE
;
654 /* Trigger the DMA transfer */
655 mmci_write_datactrlreg(host
, datactrl
);
658 * Let the MMCI say when the data is ended and it's time
659 * to fire next DMA request. When that happens, MMCI will
660 * call mmci_data_end()
662 writel(readl(host
->base
+ MMCIMASK0
) | MCI_DATAENDMASK
,
663 host
->base
+ MMCIMASK0
);
667 static void mmci_get_next_data(struct mmci_host
*host
, struct mmc_data
*data
)
669 struct mmci_host_next
*next
= &host
->next_data
;
671 WARN_ON(data
->host_cookie
&& data
->host_cookie
!= next
->cookie
);
672 WARN_ON(!data
->host_cookie
&& (next
->dma_desc
|| next
->dma_chan
));
674 host
->dma_desc_current
= next
->dma_desc
;
675 host
->dma_current
= next
->dma_chan
;
676 next
->dma_desc
= NULL
;
677 next
->dma_chan
= NULL
;
680 static void mmci_pre_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
682 struct mmci_host
*host
= mmc_priv(mmc
);
683 struct mmc_data
*data
= mrq
->data
;
684 struct mmci_host_next
*nd
= &host
->next_data
;
689 BUG_ON(data
->host_cookie
);
691 if (mmci_validate_data(host
, data
))
694 if (!mmci_dma_prep_next(host
, data
))
695 data
->host_cookie
= ++nd
->cookie
< 0 ? 1 : nd
->cookie
;
698 static void mmci_post_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
,
701 struct mmci_host
*host
= mmc_priv(mmc
);
702 struct mmc_data
*data
= mrq
->data
;
704 if (!data
|| !data
->host_cookie
)
707 mmci_dma_unmap(host
, data
);
710 struct mmci_host_next
*next
= &host
->next_data
;
711 struct dma_chan
*chan
;
712 if (data
->flags
& MMC_DATA_READ
)
713 chan
= host
->dma_rx_channel
;
715 chan
= host
->dma_tx_channel
;
716 dmaengine_terminate_all(chan
);
718 if (host
->dma_desc_current
== next
->dma_desc
)
719 host
->dma_desc_current
= NULL
;
721 if (host
->dma_current
== next
->dma_chan
) {
722 host
->dma_in_progress
= false;
723 host
->dma_current
= NULL
;
726 next
->dma_desc
= NULL
;
727 next
->dma_chan
= NULL
;
728 data
->host_cookie
= 0;
733 /* Blank functions if the DMA engine is not available */
734 static void mmci_get_next_data(struct mmci_host
*host
, struct mmc_data
*data
)
737 static inline void mmci_dma_setup(struct mmci_host
*host
)
741 static inline void mmci_dma_release(struct mmci_host
*host
)
745 static inline void mmci_dma_unmap(struct mmci_host
*host
, struct mmc_data
*data
)
749 static inline void mmci_dma_finalize(struct mmci_host
*host
,
750 struct mmc_data
*data
)
754 static inline void mmci_dma_data_error(struct mmci_host
*host
)
758 static inline int mmci_dma_start_data(struct mmci_host
*host
, unsigned int datactrl
)
763 #define mmci_pre_request NULL
764 #define mmci_post_request NULL
768 static void mmci_start_data(struct mmci_host
*host
, struct mmc_data
*data
)
770 struct variant_data
*variant
= host
->variant
;
771 unsigned int datactrl
, timeout
, irqmask
;
772 unsigned long long clks
;
776 dev_dbg(mmc_dev(host
->mmc
), "blksz %04x blks %04x flags %08x\n",
777 data
->blksz
, data
->blocks
, data
->flags
);
780 host
->size
= data
->blksz
* data
->blocks
;
781 data
->bytes_xfered
= 0;
783 clks
= (unsigned long long)data
->timeout_ns
* host
->cclk
;
784 do_div(clks
, NSEC_PER_SEC
);
786 timeout
= data
->timeout_clks
+ (unsigned int)clks
;
789 writel(timeout
, base
+ MMCIDATATIMER
);
790 writel(host
->size
, base
+ MMCIDATALENGTH
);
792 blksz_bits
= ffs(data
->blksz
) - 1;
793 BUG_ON(1 << blksz_bits
!= data
->blksz
);
795 if (variant
->blksz_datactrl16
)
796 datactrl
= MCI_DPSM_ENABLE
| (data
->blksz
<< 16);
797 else if (variant
->blksz_datactrl4
)
798 datactrl
= MCI_DPSM_ENABLE
| (data
->blksz
<< 4);
800 datactrl
= MCI_DPSM_ENABLE
| blksz_bits
<< 4;
802 if (data
->flags
& MMC_DATA_READ
)
803 datactrl
|= MCI_DPSM_DIRECTION
;
805 if (host
->mmc
->card
&& mmc_card_sdio(host
->mmc
->card
)) {
808 datactrl
|= variant
->datactrl_mask_sdio
;
811 * The ST Micro variant for SDIO small write transfers
812 * needs to have clock H/W flow control disabled,
813 * otherwise the transfer will not start. The threshold
814 * depends on the rate of MCLK.
816 if (variant
->st_sdio
&& data
->flags
& MMC_DATA_WRITE
&&
818 (host
->size
<= 8 && host
->mclk
> 50000000)))
819 clk
= host
->clk_reg
& ~variant
->clkreg_enable
;
821 clk
= host
->clk_reg
| variant
->clkreg_enable
;
823 mmci_write_clkreg(host
, clk
);
826 if (host
->mmc
->ios
.timing
== MMC_TIMING_UHS_DDR50
||
827 host
->mmc
->ios
.timing
== MMC_TIMING_MMC_DDR52
)
828 datactrl
|= variant
->datactrl_mask_ddrmode
;
831 * Attempt to use DMA operation mode, if this
832 * should fail, fall back to PIO mode
834 if (!mmci_dma_start_data(host
, datactrl
))
837 /* IRQ mode, map the SG list for CPU reading/writing */
838 mmci_init_sg(host
, data
);
840 if (data
->flags
& MMC_DATA_READ
) {
841 irqmask
= MCI_RXFIFOHALFFULLMASK
;
844 * If we have less than the fifo 'half-full' threshold to
845 * transfer, trigger a PIO interrupt as soon as any data
848 if (host
->size
< variant
->fifohalfsize
)
849 irqmask
|= MCI_RXDATAAVLBLMASK
;
852 * We don't actually need to include "FIFO empty" here
853 * since its implicit in "FIFO half empty".
855 irqmask
= MCI_TXFIFOHALFEMPTYMASK
;
858 mmci_write_datactrlreg(host
, datactrl
);
859 writel(readl(base
+ MMCIMASK0
) & ~MCI_DATAENDMASK
, base
+ MMCIMASK0
);
860 mmci_set_mask1(host
, irqmask
);
864 mmci_start_command(struct mmci_host
*host
, struct mmc_command
*cmd
, u32 c
)
866 void __iomem
*base
= host
->base
;
868 dev_dbg(mmc_dev(host
->mmc
), "op %02x arg %08x flags %08x\n",
869 cmd
->opcode
, cmd
->arg
, cmd
->flags
);
871 if (readl(base
+ MMCICOMMAND
) & MCI_CPSM_ENABLE
) {
872 writel(0, base
+ MMCICOMMAND
);
873 mmci_reg_delay(host
);
876 c
|= cmd
->opcode
| MCI_CPSM_ENABLE
;
877 if (cmd
->flags
& MMC_RSP_PRESENT
) {
878 if (cmd
->flags
& MMC_RSP_136
)
879 c
|= MCI_CPSM_LONGRSP
;
880 c
|= MCI_CPSM_RESPONSE
;
883 c
|= MCI_CPSM_INTERRUPT
;
885 if (mmc_cmd_type(cmd
) == MMC_CMD_ADTC
)
886 c
|= host
->variant
->data_cmd_enable
;
890 writel(cmd
->arg
, base
+ MMCIARGUMENT
);
891 writel(c
, base
+ MMCICOMMAND
);
895 mmci_data_irq(struct mmci_host
*host
, struct mmc_data
*data
,
898 unsigned int status_err
;
900 /* Make sure we have data to handle */
904 /* First check for errors */
905 status_err
= status
& (host
->variant
->start_err
|
906 MCI_DATACRCFAIL
| MCI_DATATIMEOUT
|
907 MCI_TXUNDERRUN
| MCI_RXOVERRUN
);
912 /* Terminate the DMA transfer */
913 if (dma_inprogress(host
)) {
914 mmci_dma_data_error(host
);
915 mmci_dma_unmap(host
, data
);
919 * Calculate how far we are into the transfer. Note that
920 * the data counter gives the number of bytes transferred
921 * on the MMC bus, not on the host side. On reads, this
922 * can be as much as a FIFO-worth of data ahead. This
923 * matters for FIFO overruns only.
925 remain
= readl(host
->base
+ MMCIDATACNT
);
926 success
= data
->blksz
* data
->blocks
- remain
;
928 dev_dbg(mmc_dev(host
->mmc
), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n",
929 status_err
, success
);
930 if (status_err
& MCI_DATACRCFAIL
) {
931 /* Last block was not successful */
933 data
->error
= -EILSEQ
;
934 } else if (status_err
& MCI_DATATIMEOUT
) {
935 data
->error
= -ETIMEDOUT
;
936 } else if (status_err
& MCI_STARTBITERR
) {
937 data
->error
= -ECOMM
;
938 } else if (status_err
& MCI_TXUNDERRUN
) {
940 } else if (status_err
& MCI_RXOVERRUN
) {
941 if (success
> host
->variant
->fifosize
)
942 success
-= host
->variant
->fifosize
;
947 data
->bytes_xfered
= round_down(success
, data
->blksz
);
950 if (status
& MCI_DATABLOCKEND
)
951 dev_err(mmc_dev(host
->mmc
), "stray MCI_DATABLOCKEND interrupt\n");
953 if (status
& MCI_DATAEND
|| data
->error
) {
954 if (dma_inprogress(host
))
955 mmci_dma_finalize(host
, data
);
956 mmci_stop_data(host
);
959 /* The error clause is handled above, success! */
960 data
->bytes_xfered
= data
->blksz
* data
->blocks
;
962 if (!data
->stop
|| host
->mrq
->sbc
) {
963 mmci_request_end(host
, data
->mrq
);
965 mmci_start_command(host
, data
->stop
, 0);
971 mmci_cmd_irq(struct mmci_host
*host
, struct mmc_command
*cmd
,
974 void __iomem
*base
= host
->base
;
980 sbc
= (cmd
== host
->mrq
->sbc
);
983 * We need to be one of these interrupts to be considered worth
984 * handling. Note that we tag on any latent IRQs postponed
985 * due to waiting for busy status.
987 if (!((status
|host
->busy_status
) &
988 (MCI_CMDCRCFAIL
|MCI_CMDTIMEOUT
|MCI_CMDSENT
|MCI_CMDRESPEND
)))
992 * ST Micro variant: handle busy detection.
994 if (host
->variant
->busy_detect
) {
995 bool busy_resp
= !!(cmd
->flags
& MMC_RSP_BUSY
);
997 /* We are busy with a command, return */
998 if (host
->busy_status
&&
999 (status
& host
->variant
->busy_detect_flag
))
1003 * We were not busy, but we now got a busy response on
1004 * something that was not an error, and we double-check
1005 * that the special busy status bit is still set before
1008 if (!host
->busy_status
&& busy_resp
&&
1009 !(status
& (MCI_CMDCRCFAIL
|MCI_CMDTIMEOUT
)) &&
1010 (readl(base
+ MMCISTATUS
) & host
->variant
->busy_detect_flag
)) {
1012 /* Clear the busy start IRQ */
1013 writel(host
->variant
->busy_detect_mask
,
1014 host
->base
+ MMCICLEAR
);
1016 /* Unmask the busy end IRQ */
1017 writel(readl(base
+ MMCIMASK0
) |
1018 host
->variant
->busy_detect_mask
,
1021 * Now cache the last response status code (until
1022 * the busy bit goes low), and return.
1025 status
& (MCI_CMDSENT
|MCI_CMDRESPEND
);
1030 * At this point we are not busy with a command, we have
1031 * not received a new busy request, clear and mask the busy
1032 * end IRQ and fall through to process the IRQ.
1034 if (host
->busy_status
) {
1036 writel(host
->variant
->busy_detect_mask
,
1037 host
->base
+ MMCICLEAR
);
1039 writel(readl(base
+ MMCIMASK0
) &
1040 ~host
->variant
->busy_detect_mask
,
1042 host
->busy_status
= 0;
1048 if (status
& MCI_CMDTIMEOUT
) {
1049 cmd
->error
= -ETIMEDOUT
;
1050 } else if (status
& MCI_CMDCRCFAIL
&& cmd
->flags
& MMC_RSP_CRC
) {
1051 cmd
->error
= -EILSEQ
;
1053 cmd
->resp
[0] = readl(base
+ MMCIRESPONSE0
);
1054 cmd
->resp
[1] = readl(base
+ MMCIRESPONSE1
);
1055 cmd
->resp
[2] = readl(base
+ MMCIRESPONSE2
);
1056 cmd
->resp
[3] = readl(base
+ MMCIRESPONSE3
);
1059 if ((!sbc
&& !cmd
->data
) || cmd
->error
) {
1061 /* Terminate the DMA transfer */
1062 if (dma_inprogress(host
)) {
1063 mmci_dma_data_error(host
);
1064 mmci_dma_unmap(host
, host
->data
);
1066 mmci_stop_data(host
);
1068 mmci_request_end(host
, host
->mrq
);
1070 mmci_start_command(host
, host
->mrq
->cmd
, 0);
1071 } else if (!(cmd
->data
->flags
& MMC_DATA_READ
)) {
1072 mmci_start_data(host
, cmd
->data
);
1076 static int mmci_get_rx_fifocnt(struct mmci_host
*host
, u32 status
, int remain
)
1078 return remain
- (readl(host
->base
+ MMCIFIFOCNT
) << 2);
1081 static int mmci_qcom_get_rx_fifocnt(struct mmci_host
*host
, u32 status
, int r
)
1084 * on qcom SDCC4 only 8 words are used in each burst so only 8 addresses
1085 * from the fifo range should be used
1087 if (status
& MCI_RXFIFOHALFFULL
)
1088 return host
->variant
->fifohalfsize
;
1089 else if (status
& MCI_RXDATAAVLBL
)
1095 static int mmci_pio_read(struct mmci_host
*host
, char *buffer
, unsigned int remain
)
1097 void __iomem
*base
= host
->base
;
1099 u32 status
= readl(host
->base
+ MMCISTATUS
);
1100 int host_remain
= host
->size
;
1103 int count
= host
->get_rx_fifocnt(host
, status
, host_remain
);
1112 * SDIO especially may want to send something that is
1113 * not divisible by 4 (as opposed to card sectors
1114 * etc). Therefore make sure to always read the last bytes
1115 * while only doing full 32-bit reads towards the FIFO.
1117 if (unlikely(count
& 0x3)) {
1119 unsigned char buf
[4];
1120 ioread32_rep(base
+ MMCIFIFO
, buf
, 1);
1121 memcpy(ptr
, buf
, count
);
1123 ioread32_rep(base
+ MMCIFIFO
, ptr
, count
>> 2);
1127 ioread32_rep(base
+ MMCIFIFO
, ptr
, count
>> 2);
1132 host_remain
-= count
;
1137 status
= readl(base
+ MMCISTATUS
);
1138 } while (status
& MCI_RXDATAAVLBL
);
1140 return ptr
- buffer
;
1143 static int mmci_pio_write(struct mmci_host
*host
, char *buffer
, unsigned int remain
, u32 status
)
1145 struct variant_data
*variant
= host
->variant
;
1146 void __iomem
*base
= host
->base
;
1150 unsigned int count
, maxcnt
;
1152 maxcnt
= status
& MCI_TXFIFOEMPTY
?
1153 variant
->fifosize
: variant
->fifohalfsize
;
1154 count
= min(remain
, maxcnt
);
1157 * SDIO especially may want to send something that is
1158 * not divisible by 4 (as opposed to card sectors
1159 * etc), and the FIFO only accept full 32-bit writes.
1160 * So compensate by adding +3 on the count, a single
1161 * byte become a 32bit write, 7 bytes will be two
1164 iowrite32_rep(base
+ MMCIFIFO
, ptr
, (count
+ 3) >> 2);
1172 status
= readl(base
+ MMCISTATUS
);
1173 } while (status
& MCI_TXFIFOHALFEMPTY
);
1175 return ptr
- buffer
;
1179 * PIO data transfer IRQ handler.
1181 static irqreturn_t
mmci_pio_irq(int irq
, void *dev_id
)
1183 struct mmci_host
*host
= dev_id
;
1184 struct sg_mapping_iter
*sg_miter
= &host
->sg_miter
;
1185 struct variant_data
*variant
= host
->variant
;
1186 void __iomem
*base
= host
->base
;
1189 status
= readl(base
+ MMCISTATUS
);
1191 dev_dbg(mmc_dev(host
->mmc
), "irq1 (pio) %08x\n", status
);
1194 unsigned int remain
, len
;
1198 * For write, we only need to test the half-empty flag
1199 * here - if the FIFO is completely empty, then by
1200 * definition it is more than half empty.
1202 * For read, check for data available.
1204 if (!(status
& (MCI_TXFIFOHALFEMPTY
|MCI_RXDATAAVLBL
)))
1207 if (!sg_miter_next(sg_miter
))
1210 buffer
= sg_miter
->addr
;
1211 remain
= sg_miter
->length
;
1214 if (status
& MCI_RXACTIVE
)
1215 len
= mmci_pio_read(host
, buffer
, remain
);
1216 if (status
& MCI_TXACTIVE
)
1217 len
= mmci_pio_write(host
, buffer
, remain
, status
);
1219 sg_miter
->consumed
= len
;
1227 status
= readl(base
+ MMCISTATUS
);
1230 sg_miter_stop(sg_miter
);
1233 * If we have less than the fifo 'half-full' threshold to transfer,
1234 * trigger a PIO interrupt as soon as any data is available.
1236 if (status
& MCI_RXACTIVE
&& host
->size
< variant
->fifohalfsize
)
1237 mmci_set_mask1(host
, MCI_RXDATAAVLBLMASK
);
1240 * If we run out of data, disable the data IRQs; this
1241 * prevents a race where the FIFO becomes empty before
1242 * the chip itself has disabled the data path, and
1243 * stops us racing with our data end IRQ.
1245 if (host
->size
== 0) {
1246 mmci_set_mask1(host
, 0);
1247 writel(readl(base
+ MMCIMASK0
) | MCI_DATAENDMASK
, base
+ MMCIMASK0
);
1254 * Handle completion of command and data transfers.
1256 static irqreturn_t
mmci_irq(int irq
, void *dev_id
)
1258 struct mmci_host
*host
= dev_id
;
1262 spin_lock(&host
->lock
);
1265 status
= readl(host
->base
+ MMCISTATUS
);
1267 if (host
->singleirq
) {
1268 if (status
& host
->mask1_reg
)
1269 mmci_pio_irq(irq
, dev_id
);
1271 status
&= ~MCI_IRQ1MASK
;
1275 * We intentionally clear the MCI_ST_CARDBUSY IRQ (if it's
1276 * enabled) in mmci_cmd_irq() function where ST Micro busy
1277 * detection variant is handled. Considering the HW seems to be
1278 * triggering the IRQ on both edges while monitoring DAT0 for
1279 * busy completion and that same status bit is used to monitor
1280 * start and end of busy detection, special care must be taken
1281 * to make sure that both start and end interrupts are always
1282 * cleared one after the other.
1284 status
&= readl(host
->base
+ MMCIMASK0
);
1285 if (host
->variant
->busy_detect
)
1286 writel(status
& ~host
->variant
->busy_detect_mask
,
1287 host
->base
+ MMCICLEAR
);
1289 writel(status
, host
->base
+ MMCICLEAR
);
1291 dev_dbg(mmc_dev(host
->mmc
), "irq0 (data+cmd) %08x\n", status
);
1293 if (host
->variant
->reversed_irq_handling
) {
1294 mmci_data_irq(host
, host
->data
, status
);
1295 mmci_cmd_irq(host
, host
->cmd
, status
);
1297 mmci_cmd_irq(host
, host
->cmd
, status
);
1298 mmci_data_irq(host
, host
->data
, status
);
1302 * Busy detection has been handled by mmci_cmd_irq() above.
1303 * Clear the status bit to prevent polling in IRQ context.
1305 if (host
->variant
->busy_detect_flag
)
1306 status
&= ~host
->variant
->busy_detect_flag
;
1311 spin_unlock(&host
->lock
);
1313 return IRQ_RETVAL(ret
);
1316 static void mmci_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
1318 struct mmci_host
*host
= mmc_priv(mmc
);
1319 unsigned long flags
;
1321 WARN_ON(host
->mrq
!= NULL
);
1323 mrq
->cmd
->error
= mmci_validate_data(host
, mrq
->data
);
1324 if (mrq
->cmd
->error
) {
1325 mmc_request_done(mmc
, mrq
);
1329 spin_lock_irqsave(&host
->lock
, flags
);
1334 mmci_get_next_data(host
, mrq
->data
);
1336 if (mrq
->data
&& mrq
->data
->flags
& MMC_DATA_READ
)
1337 mmci_start_data(host
, mrq
->data
);
1340 mmci_start_command(host
, mrq
->sbc
, 0);
1342 mmci_start_command(host
, mrq
->cmd
, 0);
1344 spin_unlock_irqrestore(&host
->lock
, flags
);
1347 static void mmci_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
1349 struct mmci_host
*host
= mmc_priv(mmc
);
1350 struct variant_data
*variant
= host
->variant
;
1352 unsigned long flags
;
1355 if (host
->plat
->ios_handler
&&
1356 host
->plat
->ios_handler(mmc_dev(mmc
), ios
))
1357 dev_err(mmc_dev(mmc
), "platform ios_handler failed\n");
1359 switch (ios
->power_mode
) {
1361 if (!IS_ERR(mmc
->supply
.vmmc
))
1362 mmc_regulator_set_ocr(mmc
, mmc
->supply
.vmmc
, 0);
1364 if (!IS_ERR(mmc
->supply
.vqmmc
) && host
->vqmmc_enabled
) {
1365 regulator_disable(mmc
->supply
.vqmmc
);
1366 host
->vqmmc_enabled
= false;
1371 if (!IS_ERR(mmc
->supply
.vmmc
))
1372 mmc_regulator_set_ocr(mmc
, mmc
->supply
.vmmc
, ios
->vdd
);
1375 * The ST Micro variant doesn't have the PL180s MCI_PWR_UP
1376 * and instead uses MCI_PWR_ON so apply whatever value is
1377 * configured in the variant data.
1379 pwr
|= variant
->pwrreg_powerup
;
1383 if (!IS_ERR(mmc
->supply
.vqmmc
) && !host
->vqmmc_enabled
) {
1384 ret
= regulator_enable(mmc
->supply
.vqmmc
);
1386 dev_err(mmc_dev(mmc
),
1387 "failed to enable vqmmc regulator\n");
1389 host
->vqmmc_enabled
= true;
1396 if (variant
->signal_direction
&& ios
->power_mode
!= MMC_POWER_OFF
) {
1398 * The ST Micro variant has some additional bits
1399 * indicating signal direction for the signals in
1400 * the SD/MMC bus and feedback-clock usage.
1402 pwr
|= host
->pwr_reg_add
;
1404 if (ios
->bus_width
== MMC_BUS_WIDTH_4
)
1405 pwr
&= ~MCI_ST_DATA74DIREN
;
1406 else if (ios
->bus_width
== MMC_BUS_WIDTH_1
)
1407 pwr
&= (~MCI_ST_DATA74DIREN
&
1408 ~MCI_ST_DATA31DIREN
&
1409 ~MCI_ST_DATA2DIREN
);
1412 if (variant
->opendrain
) {
1413 if (ios
->bus_mode
== MMC_BUSMODE_OPENDRAIN
)
1414 pwr
|= variant
->opendrain
;
1417 * If the variant cannot configure the pads by its own, then we
1418 * expect the pinctrl to be able to do that for us
1420 if (ios
->bus_mode
== MMC_BUSMODE_OPENDRAIN
)
1421 pinctrl_select_state(host
->pinctrl
, host
->pins_opendrain
);
1423 pinctrl_select_state(host
->pinctrl
, host
->pins_default
);
1427 * If clock = 0 and the variant requires the MMCIPOWER to be used for
1428 * gating the clock, the MCI_PWR_ON bit is cleared.
1430 if (!ios
->clock
&& variant
->pwrreg_clkgate
)
1433 if (host
->variant
->explicit_mclk_control
&&
1434 ios
->clock
!= host
->clock_cache
) {
1435 ret
= clk_set_rate(host
->clk
, ios
->clock
);
1437 dev_err(mmc_dev(host
->mmc
),
1438 "Error setting clock rate (%d)\n", ret
);
1440 host
->mclk
= clk_get_rate(host
->clk
);
1442 host
->clock_cache
= ios
->clock
;
1444 spin_lock_irqsave(&host
->lock
, flags
);
1446 mmci_set_clkreg(host
, ios
->clock
);
1447 mmci_write_pwrreg(host
, pwr
);
1448 mmci_reg_delay(host
);
1450 spin_unlock_irqrestore(&host
->lock
, flags
);
1453 static int mmci_get_cd(struct mmc_host
*mmc
)
1455 struct mmci_host
*host
= mmc_priv(mmc
);
1456 struct mmci_platform_data
*plat
= host
->plat
;
1457 unsigned int status
= mmc_gpio_get_cd(mmc
);
1459 if (status
== -ENOSYS
) {
1461 return 1; /* Assume always present */
1463 status
= plat
->status(mmc_dev(host
->mmc
));
1468 static int mmci_sig_volt_switch(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
1472 if (!IS_ERR(mmc
->supply
.vqmmc
)) {
1474 switch (ios
->signal_voltage
) {
1475 case MMC_SIGNAL_VOLTAGE_330
:
1476 ret
= regulator_set_voltage(mmc
->supply
.vqmmc
,
1479 case MMC_SIGNAL_VOLTAGE_180
:
1480 ret
= regulator_set_voltage(mmc
->supply
.vqmmc
,
1483 case MMC_SIGNAL_VOLTAGE_120
:
1484 ret
= regulator_set_voltage(mmc
->supply
.vqmmc
,
1490 dev_warn(mmc_dev(mmc
), "Voltage switch failed\n");
1496 static struct mmc_host_ops mmci_ops
= {
1497 .request
= mmci_request
,
1498 .pre_req
= mmci_pre_request
,
1499 .post_req
= mmci_post_request
,
1500 .set_ios
= mmci_set_ios
,
1501 .get_ro
= mmc_gpio_get_ro
,
1502 .get_cd
= mmci_get_cd
,
1503 .start_signal_voltage_switch
= mmci_sig_volt_switch
,
1506 static int mmci_of_parse(struct device_node
*np
, struct mmc_host
*mmc
)
1508 struct mmci_host
*host
= mmc_priv(mmc
);
1509 int ret
= mmc_of_parse(mmc
);
1514 if (of_get_property(np
, "st,sig-dir-dat0", NULL
))
1515 host
->pwr_reg_add
|= MCI_ST_DATA0DIREN
;
1516 if (of_get_property(np
, "st,sig-dir-dat2", NULL
))
1517 host
->pwr_reg_add
|= MCI_ST_DATA2DIREN
;
1518 if (of_get_property(np
, "st,sig-dir-dat31", NULL
))
1519 host
->pwr_reg_add
|= MCI_ST_DATA31DIREN
;
1520 if (of_get_property(np
, "st,sig-dir-dat74", NULL
))
1521 host
->pwr_reg_add
|= MCI_ST_DATA74DIREN
;
1522 if (of_get_property(np
, "st,sig-dir-cmd", NULL
))
1523 host
->pwr_reg_add
|= MCI_ST_CMDDIREN
;
1524 if (of_get_property(np
, "st,sig-pin-fbclk", NULL
))
1525 host
->pwr_reg_add
|= MCI_ST_FBCLKEN
;
1527 if (of_get_property(np
, "mmc-cap-mmc-highspeed", NULL
))
1528 mmc
->caps
|= MMC_CAP_MMC_HIGHSPEED
;
1529 if (of_get_property(np
, "mmc-cap-sd-highspeed", NULL
))
1530 mmc
->caps
|= MMC_CAP_SD_HIGHSPEED
;
1535 static int mmci_probe(struct amba_device
*dev
,
1536 const struct amba_id
*id
)
1538 struct mmci_platform_data
*plat
= dev
->dev
.platform_data
;
1539 struct device_node
*np
= dev
->dev
.of_node
;
1540 struct variant_data
*variant
= id
->data
;
1541 struct mmci_host
*host
;
1542 struct mmc_host
*mmc
;
1545 /* Must have platform data or Device Tree. */
1547 dev_err(&dev
->dev
, "No plat data or DT found\n");
1552 plat
= devm_kzalloc(&dev
->dev
, sizeof(*plat
), GFP_KERNEL
);
1557 mmc
= mmc_alloc_host(sizeof(struct mmci_host
), &dev
->dev
);
1561 ret
= mmci_of_parse(np
, mmc
);
1565 host
= mmc_priv(mmc
);
1569 * Some variant (STM32) doesn't have opendrain bit, nevertheless
1570 * pins can be set accordingly using pinctrl
1572 if (!variant
->opendrain
) {
1573 host
->pinctrl
= devm_pinctrl_get(&dev
->dev
);
1574 if (IS_ERR(host
->pinctrl
)) {
1575 dev_err(&dev
->dev
, "failed to get pinctrl");
1576 ret
= PTR_ERR(host
->pinctrl
);
1580 host
->pins_default
= pinctrl_lookup_state(host
->pinctrl
,
1581 PINCTRL_STATE_DEFAULT
);
1582 if (IS_ERR(host
->pins_default
)) {
1583 dev_err(mmc_dev(mmc
), "Can't select default pins\n");
1584 ret
= PTR_ERR(host
->pins_default
);
1588 host
->pins_opendrain
= pinctrl_lookup_state(host
->pinctrl
,
1589 MMCI_PINCTRL_STATE_OPENDRAIN
);
1590 if (IS_ERR(host
->pins_opendrain
)) {
1591 dev_err(mmc_dev(mmc
), "Can't select opendrain pins\n");
1592 ret
= PTR_ERR(host
->pins_opendrain
);
1597 host
->hw_designer
= amba_manf(dev
);
1598 host
->hw_revision
= amba_rev(dev
);
1599 dev_dbg(mmc_dev(mmc
), "designer ID = 0x%02x\n", host
->hw_designer
);
1600 dev_dbg(mmc_dev(mmc
), "revision = 0x%01x\n", host
->hw_revision
);
1602 host
->clk
= devm_clk_get(&dev
->dev
, NULL
);
1603 if (IS_ERR(host
->clk
)) {
1604 ret
= PTR_ERR(host
->clk
);
1608 ret
= clk_prepare_enable(host
->clk
);
1612 if (variant
->qcom_fifo
)
1613 host
->get_rx_fifocnt
= mmci_qcom_get_rx_fifocnt
;
1615 host
->get_rx_fifocnt
= mmci_get_rx_fifocnt
;
1618 host
->variant
= variant
;
1619 host
->mclk
= clk_get_rate(host
->clk
);
1621 * According to the spec, mclk is max 100 MHz,
1622 * so we try to adjust the clock down to this,
1625 if (host
->mclk
> variant
->f_max
) {
1626 ret
= clk_set_rate(host
->clk
, variant
->f_max
);
1629 host
->mclk
= clk_get_rate(host
->clk
);
1630 dev_dbg(mmc_dev(mmc
), "eventual mclk rate: %u Hz\n",
1634 host
->phybase
= dev
->res
.start
;
1635 host
->base
= devm_ioremap_resource(&dev
->dev
, &dev
->res
);
1636 if (IS_ERR(host
->base
)) {
1637 ret
= PTR_ERR(host
->base
);
1642 variant
->init(host
);
1645 * The ARM and ST versions of the block have slightly different
1646 * clock divider equations which means that the minimum divider
1648 * on Qualcomm like controllers get the nearest minimum clock to 100Khz
1650 if (variant
->st_clkdiv
)
1651 mmc
->f_min
= DIV_ROUND_UP(host
->mclk
, 257);
1652 else if (variant
->explicit_mclk_control
)
1653 mmc
->f_min
= clk_round_rate(host
->clk
, 100000);
1655 mmc
->f_min
= DIV_ROUND_UP(host
->mclk
, 512);
1657 * If no maximum operating frequency is supplied, fall back to use
1658 * the module parameter, which has a (low) default value in case it
1659 * is not specified. Either value must not exceed the clock rate into
1660 * the block, of course.
1663 mmc
->f_max
= variant
->explicit_mclk_control
?
1664 min(variant
->f_max
, mmc
->f_max
) :
1665 min(host
->mclk
, mmc
->f_max
);
1667 mmc
->f_max
= variant
->explicit_mclk_control
?
1668 fmax
: min(host
->mclk
, fmax
);
1671 dev_dbg(mmc_dev(mmc
), "clocking block at %u Hz\n", mmc
->f_max
);
1673 /* Get regulators and the supported OCR mask */
1674 ret
= mmc_regulator_get_supply(mmc
);
1678 if (!mmc
->ocr_avail
)
1679 mmc
->ocr_avail
= plat
->ocr_mask
;
1680 else if (plat
->ocr_mask
)
1681 dev_warn(mmc_dev(mmc
), "Platform OCR mask is ignored\n");
1683 /* DT takes precedence over platform data. */
1685 if (!plat
->cd_invert
)
1686 mmc
->caps2
|= MMC_CAP2_CD_ACTIVE_HIGH
;
1687 mmc
->caps2
|= MMC_CAP2_RO_ACTIVE_HIGH
;
1690 /* We support these capabilities. */
1691 mmc
->caps
|= MMC_CAP_CMD23
;
1694 * Enable busy detection.
1696 if (variant
->busy_detect
) {
1697 mmci_ops
.card_busy
= mmci_card_busy
;
1699 * Not all variants have a flag to enable busy detection
1700 * in the DPSM, but if they do, set it here.
1702 if (variant
->busy_dpsm_flag
)
1703 mmci_write_datactrlreg(host
,
1704 host
->variant
->busy_dpsm_flag
);
1705 mmc
->caps
|= MMC_CAP_WAIT_WHILE_BUSY
;
1706 mmc
->max_busy_timeout
= 0;
1709 mmc
->ops
= &mmci_ops
;
1711 /* We support these PM capabilities. */
1712 mmc
->pm_caps
|= MMC_PM_KEEP_POWER
;
1717 mmc
->max_segs
= NR_SG
;
1720 * Since only a certain number of bits are valid in the data length
1721 * register, we must ensure that we don't exceed 2^num-1 bytes in a
1724 mmc
->max_req_size
= (1 << variant
->datalength_bits
) - 1;
1727 * Set the maximum segment size. Since we aren't doing DMA
1728 * (yet) we are only limited by the data length register.
1730 mmc
->max_seg_size
= mmc
->max_req_size
;
1733 * Block size can be up to 2048 bytes, but must be a power of two.
1735 mmc
->max_blk_size
= 1 << 11;
1738 * Limit the number of blocks transferred so that we don't overflow
1739 * the maximum request size.
1741 mmc
->max_blk_count
= mmc
->max_req_size
>> 11;
1743 spin_lock_init(&host
->lock
);
1745 writel(0, host
->base
+ MMCIMASK0
);
1747 if (variant
->mmcimask1
)
1748 writel(0, host
->base
+ MMCIMASK1
);
1750 writel(0xfff, host
->base
+ MMCICLEAR
);
1754 * - not using DT but using a descriptor table, or
1755 * - using a table of descriptors ALONGSIDE DT, or
1756 * look up these descriptors named "cd" and "wp" right here, fail
1757 * silently of these do not exist and proceed to try platform data
1760 ret
= mmc_gpiod_request_cd(mmc
, "cd", 0, false, 0, NULL
);
1762 if (ret
== -EPROBE_DEFER
)
1764 else if (gpio_is_valid(plat
->gpio_cd
)) {
1765 ret
= mmc_gpio_request_cd(mmc
, plat
->gpio_cd
, 0);
1771 ret
= mmc_gpiod_request_ro(mmc
, "wp", 0, false, 0, NULL
);
1773 if (ret
== -EPROBE_DEFER
)
1775 else if (gpio_is_valid(plat
->gpio_wp
)) {
1776 ret
= mmc_gpio_request_ro(mmc
, plat
->gpio_wp
);
1783 ret
= devm_request_irq(&dev
->dev
, dev
->irq
[0], mmci_irq
, IRQF_SHARED
,
1784 DRIVER_NAME
" (cmd)", host
);
1789 host
->singleirq
= true;
1791 ret
= devm_request_irq(&dev
->dev
, dev
->irq
[1], mmci_pio_irq
,
1792 IRQF_SHARED
, DRIVER_NAME
" (pio)", host
);
1797 writel(MCI_IRQENABLE
| variant
->start_err
, host
->base
+ MMCIMASK0
);
1799 amba_set_drvdata(dev
, mmc
);
1801 dev_info(&dev
->dev
, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n",
1802 mmc_hostname(mmc
), amba_part(dev
), amba_manf(dev
),
1803 amba_rev(dev
), (unsigned long long)dev
->res
.start
,
1804 dev
->irq
[0], dev
->irq
[1]);
1806 mmci_dma_setup(host
);
1808 pm_runtime_set_autosuspend_delay(&dev
->dev
, 50);
1809 pm_runtime_use_autosuspend(&dev
->dev
);
1813 pm_runtime_put(&dev
->dev
);
1817 clk_disable_unprepare(host
->clk
);
1823 static int mmci_remove(struct amba_device
*dev
)
1825 struct mmc_host
*mmc
= amba_get_drvdata(dev
);
1828 struct mmci_host
*host
= mmc_priv(mmc
);
1829 struct variant_data
*variant
= host
->variant
;
1832 * Undo pm_runtime_put() in probe. We use the _sync
1833 * version here so that we can access the primecell.
1835 pm_runtime_get_sync(&dev
->dev
);
1837 mmc_remove_host(mmc
);
1839 writel(0, host
->base
+ MMCIMASK0
);
1841 if (variant
->mmcimask1
)
1842 writel(0, host
->base
+ MMCIMASK1
);
1844 writel(0, host
->base
+ MMCICOMMAND
);
1845 writel(0, host
->base
+ MMCIDATACTRL
);
1847 mmci_dma_release(host
);
1848 clk_disable_unprepare(host
->clk
);
1856 static void mmci_save(struct mmci_host
*host
)
1858 unsigned long flags
;
1860 spin_lock_irqsave(&host
->lock
, flags
);
1862 writel(0, host
->base
+ MMCIMASK0
);
1863 if (host
->variant
->pwrreg_nopower
) {
1864 writel(0, host
->base
+ MMCIDATACTRL
);
1865 writel(0, host
->base
+ MMCIPOWER
);
1866 writel(0, host
->base
+ MMCICLOCK
);
1868 mmci_reg_delay(host
);
1870 spin_unlock_irqrestore(&host
->lock
, flags
);
1873 static void mmci_restore(struct mmci_host
*host
)
1875 unsigned long flags
;
1877 spin_lock_irqsave(&host
->lock
, flags
);
1879 if (host
->variant
->pwrreg_nopower
) {
1880 writel(host
->clk_reg
, host
->base
+ MMCICLOCK
);
1881 writel(host
->datactrl_reg
, host
->base
+ MMCIDATACTRL
);
1882 writel(host
->pwr_reg
, host
->base
+ MMCIPOWER
);
1884 writel(MCI_IRQENABLE
| host
->variant
->start_err
,
1885 host
->base
+ MMCIMASK0
);
1886 mmci_reg_delay(host
);
1888 spin_unlock_irqrestore(&host
->lock
, flags
);
1891 static int mmci_runtime_suspend(struct device
*dev
)
1893 struct amba_device
*adev
= to_amba_device(dev
);
1894 struct mmc_host
*mmc
= amba_get_drvdata(adev
);
1897 struct mmci_host
*host
= mmc_priv(mmc
);
1898 pinctrl_pm_select_sleep_state(dev
);
1900 clk_disable_unprepare(host
->clk
);
1906 static int mmci_runtime_resume(struct device
*dev
)
1908 struct amba_device
*adev
= to_amba_device(dev
);
1909 struct mmc_host
*mmc
= amba_get_drvdata(adev
);
1912 struct mmci_host
*host
= mmc_priv(mmc
);
1913 clk_prepare_enable(host
->clk
);
1915 pinctrl_pm_select_default_state(dev
);
1922 static const struct dev_pm_ops mmci_dev_pm_ops
= {
1923 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend
,
1924 pm_runtime_force_resume
)
1925 SET_RUNTIME_PM_OPS(mmci_runtime_suspend
, mmci_runtime_resume
, NULL
)
1928 static const struct amba_id mmci_ids
[] = {
1932 .data
= &variant_arm
,
1937 .data
= &variant_arm_extended_fifo
,
1942 .data
= &variant_arm_extended_fifo_hwfc
,
1947 .data
= &variant_arm
,
1949 /* ST Micro variants */
1953 .data
= &variant_u300
,
1958 .data
= &variant_nomadik
,
1963 .data
= &variant_nomadik
,
1968 .data
= &variant_ux500
,
1973 .data
= &variant_ux500v2
,
1978 .data
= &variant_stm32
,
1980 /* Qualcomm variants */
1984 .data
= &variant_qcom
,
1989 MODULE_DEVICE_TABLE(amba
, mmci_ids
);
1991 static struct amba_driver mmci_driver
= {
1993 .name
= DRIVER_NAME
,
1994 .pm
= &mmci_dev_pm_ops
,
1996 .probe
= mmci_probe
,
1997 .remove
= mmci_remove
,
1998 .id_table
= mmci_ids
,
2001 module_amba_driver(mmci_driver
);
2003 module_param(fmax
, uint
, 0444);
2005 MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver");
2006 MODULE_LICENSE("GPL");