2 * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver
4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
5 * Copyright (C) 2010 ST-Ericsson SA
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/init.h>
14 #include <linux/ioport.h>
15 #include <linux/device.h>
16 #include <linux/interrupt.h>
17 #include <linux/kernel.h>
18 #include <linux/slab.h>
19 #include <linux/delay.h>
20 #include <linux/err.h>
21 #include <linux/highmem.h>
22 #include <linux/log2.h>
23 #include <linux/mmc/pm.h>
24 #include <linux/mmc/host.h>
25 #include <linux/mmc/card.h>
26 #include <linux/amba/bus.h>
27 #include <linux/clk.h>
28 #include <linux/scatterlist.h>
29 #include <linux/gpio.h>
30 #include <linux/of_gpio.h>
31 #include <linux/regulator/consumer.h>
32 #include <linux/dmaengine.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/amba/mmci.h>
35 #include <linux/pm_runtime.h>
36 #include <linux/types.h>
37 #include <linux/pinctrl/consumer.h>
39 #include <asm/div64.h>
41 #include <asm/sizes.h>
45 #define DRIVER_NAME "mmci-pl18x"
47 static unsigned int fmax
= 515633;
50 * struct variant_data - MMCI variant-specific quirks
51 * @clkreg: default value for MCICLOCK register
52 * @clkreg_enable: enable value for MMCICLOCK register
53 * @datalength_bits: number of bits in the MMCIDATALENGTH register
54 * @fifosize: number of bytes that can be written when MMCI_TXFIFOEMPTY
55 * is asserted (likewise for RX)
56 * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY
57 * is asserted (likewise for RX)
58 * @sdio: variant supports SDIO
59 * @st_clkdiv: true if using a ST-specific clock divider algorithm
60 * @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register
61 * @pwrreg_powerup: power up value for MMCIPOWER register
62 * @signal_direction: input/out direction of bus signals can be indicated
63 * @pwrreg_clkgate: MMCIPOWER register must be used to gate the clock
64 * @busy_detect: true if busy detection on dat0 is supported
68 unsigned int clkreg_enable
;
69 unsigned int datalength_bits
;
70 unsigned int fifosize
;
71 unsigned int fifohalfsize
;
74 bool blksz_datactrl16
;
76 bool signal_direction
;
81 static struct variant_data variant_arm
= {
83 .fifohalfsize
= 8 * 4,
84 .datalength_bits
= 16,
85 .pwrreg_powerup
= MCI_PWR_UP
,
88 static struct variant_data variant_arm_extended_fifo
= {
90 .fifohalfsize
= 64 * 4,
91 .datalength_bits
= 16,
92 .pwrreg_powerup
= MCI_PWR_UP
,
95 static struct variant_data variant_arm_extended_fifo_hwfc
= {
97 .fifohalfsize
= 64 * 4,
98 .clkreg_enable
= MCI_ARM_HWFCEN
,
99 .datalength_bits
= 16,
100 .pwrreg_powerup
= MCI_PWR_UP
,
103 static struct variant_data variant_u300
= {
105 .fifohalfsize
= 8 * 4,
106 .clkreg_enable
= MCI_ST_U300_HWFCEN
,
107 .datalength_bits
= 16,
109 .pwrreg_powerup
= MCI_PWR_ON
,
110 .signal_direction
= true,
111 .pwrreg_clkgate
= true,
114 static struct variant_data variant_nomadik
= {
116 .fifohalfsize
= 8 * 4,
117 .clkreg
= MCI_CLK_ENABLE
,
118 .datalength_bits
= 24,
121 .pwrreg_powerup
= MCI_PWR_ON
,
122 .signal_direction
= true,
123 .pwrreg_clkgate
= true,
126 static struct variant_data variant_ux500
= {
128 .fifohalfsize
= 8 * 4,
129 .clkreg
= MCI_CLK_ENABLE
,
130 .clkreg_enable
= MCI_ST_UX500_HWFCEN
,
131 .datalength_bits
= 24,
134 .pwrreg_powerup
= MCI_PWR_ON
,
135 .signal_direction
= true,
136 .pwrreg_clkgate
= true,
140 static struct variant_data variant_ux500v2
= {
142 .fifohalfsize
= 8 * 4,
143 .clkreg
= MCI_CLK_ENABLE
,
144 .clkreg_enable
= MCI_ST_UX500_HWFCEN
,
145 .datalength_bits
= 24,
148 .blksz_datactrl16
= true,
149 .pwrreg_powerup
= MCI_PWR_ON
,
150 .signal_direction
= true,
151 .pwrreg_clkgate
= true,
155 static int mmci_card_busy(struct mmc_host
*mmc
)
157 struct mmci_host
*host
= mmc_priv(mmc
);
161 pm_runtime_get_sync(mmc_dev(mmc
));
163 spin_lock_irqsave(&host
->lock
, flags
);
164 if (readl(host
->base
+ MMCISTATUS
) & MCI_ST_CARDBUSY
)
166 spin_unlock_irqrestore(&host
->lock
, flags
);
168 pm_runtime_mark_last_busy(mmc_dev(mmc
));
169 pm_runtime_put_autosuspend(mmc_dev(mmc
));
175 * Validate mmc prerequisites
177 static int mmci_validate_data(struct mmci_host
*host
,
178 struct mmc_data
*data
)
183 if (!is_power_of_2(data
->blksz
)) {
184 dev_err(mmc_dev(host
->mmc
),
185 "unsupported block size (%d bytes)\n", data
->blksz
);
193 * This must be called with host->lock held
195 static void mmci_write_clkreg(struct mmci_host
*host
, u32 clk
)
197 if (host
->clk_reg
!= clk
) {
199 writel(clk
, host
->base
+ MMCICLOCK
);
204 * This must be called with host->lock held
206 static void mmci_write_pwrreg(struct mmci_host
*host
, u32 pwr
)
208 if (host
->pwr_reg
!= pwr
) {
210 writel(pwr
, host
->base
+ MMCIPOWER
);
215 * This must be called with host->lock held
217 static void mmci_write_datactrlreg(struct mmci_host
*host
, u32 datactrl
)
219 /* Keep ST Micro busy mode if enabled */
220 datactrl
|= host
->datactrl_reg
& MCI_ST_DPSM_BUSYMODE
;
222 if (host
->datactrl_reg
!= datactrl
) {
223 host
->datactrl_reg
= datactrl
;
224 writel(datactrl
, host
->base
+ MMCIDATACTRL
);
229 * This must be called with host->lock held
231 static void mmci_set_clkreg(struct mmci_host
*host
, unsigned int desired
)
233 struct variant_data
*variant
= host
->variant
;
234 u32 clk
= variant
->clkreg
;
236 /* Make sure cclk reflects the current calculated clock */
240 if (desired
>= host
->mclk
) {
241 clk
= MCI_CLK_BYPASS
;
242 if (variant
->st_clkdiv
)
243 clk
|= MCI_ST_UX500_NEG_EDGE
;
244 host
->cclk
= host
->mclk
;
245 } else if (variant
->st_clkdiv
) {
247 * DB8500 TRM says f = mclk / (clkdiv + 2)
248 * => clkdiv = (mclk / f) - 2
249 * Round the divider up so we don't exceed the max
252 clk
= DIV_ROUND_UP(host
->mclk
, desired
) - 2;
255 host
->cclk
= host
->mclk
/ (clk
+ 2);
258 * PL180 TRM says f = mclk / (2 * (clkdiv + 1))
259 * => clkdiv = mclk / (2 * f) - 1
261 clk
= host
->mclk
/ (2 * desired
) - 1;
264 host
->cclk
= host
->mclk
/ (2 * (clk
+ 1));
267 clk
|= variant
->clkreg_enable
;
268 clk
|= MCI_CLK_ENABLE
;
269 /* This hasn't proven to be worthwhile */
270 /* clk |= MCI_CLK_PWRSAVE; */
273 /* Set actual clock for debug */
274 host
->mmc
->actual_clock
= host
->cclk
;
276 if (host
->mmc
->ios
.bus_width
== MMC_BUS_WIDTH_4
)
278 if (host
->mmc
->ios
.bus_width
== MMC_BUS_WIDTH_8
)
279 clk
|= MCI_ST_8BIT_BUS
;
281 if (host
->mmc
->ios
.timing
== MMC_TIMING_UHS_DDR50
)
282 clk
|= MCI_ST_UX500_NEG_EDGE
;
284 mmci_write_clkreg(host
, clk
);
288 mmci_request_end(struct mmci_host
*host
, struct mmc_request
*mrq
)
290 writel(0, host
->base
+ MMCICOMMAND
);
297 mmc_request_done(host
->mmc
, mrq
);
299 pm_runtime_mark_last_busy(mmc_dev(host
->mmc
));
300 pm_runtime_put_autosuspend(mmc_dev(host
->mmc
));
303 static void mmci_set_mask1(struct mmci_host
*host
, unsigned int mask
)
305 void __iomem
*base
= host
->base
;
307 if (host
->singleirq
) {
308 unsigned int mask0
= readl(base
+ MMCIMASK0
);
310 mask0
&= ~MCI_IRQ1MASK
;
313 writel(mask0
, base
+ MMCIMASK0
);
316 writel(mask
, base
+ MMCIMASK1
);
319 static void mmci_stop_data(struct mmci_host
*host
)
321 mmci_write_datactrlreg(host
, 0);
322 mmci_set_mask1(host
, 0);
326 static void mmci_init_sg(struct mmci_host
*host
, struct mmc_data
*data
)
328 unsigned int flags
= SG_MITER_ATOMIC
;
330 if (data
->flags
& MMC_DATA_READ
)
331 flags
|= SG_MITER_TO_SG
;
333 flags
|= SG_MITER_FROM_SG
;
335 sg_miter_start(&host
->sg_miter
, data
->sg
, data
->sg_len
, flags
);
339 * All the DMA operation mode stuff goes inside this ifdef.
340 * This assumes that you have a generic DMA device interface,
341 * no custom DMA interfaces are supported.
343 #ifdef CONFIG_DMA_ENGINE
344 static void mmci_dma_setup(struct mmci_host
*host
)
346 struct mmci_platform_data
*plat
= host
->plat
;
347 const char *rxname
, *txname
;
350 host
->dma_rx_channel
= dma_request_slave_channel(mmc_dev(host
->mmc
), "rx");
351 host
->dma_tx_channel
= dma_request_slave_channel(mmc_dev(host
->mmc
), "tx");
353 /* initialize pre request cookie */
354 host
->next_data
.cookie
= 1;
356 /* Try to acquire a generic DMA engine slave channel */
358 dma_cap_set(DMA_SLAVE
, mask
);
360 if (plat
&& plat
->dma_filter
) {
361 if (!host
->dma_rx_channel
&& plat
->dma_rx_param
) {
362 host
->dma_rx_channel
= dma_request_channel(mask
,
365 /* E.g if no DMA hardware is present */
366 if (!host
->dma_rx_channel
)
367 dev_err(mmc_dev(host
->mmc
), "no RX DMA channel\n");
370 if (!host
->dma_tx_channel
&& plat
->dma_tx_param
) {
371 host
->dma_tx_channel
= dma_request_channel(mask
,
374 if (!host
->dma_tx_channel
)
375 dev_warn(mmc_dev(host
->mmc
), "no TX DMA channel\n");
380 * If only an RX channel is specified, the driver will
381 * attempt to use it bidirectionally, however if it is
382 * is specified but cannot be located, DMA will be disabled.
384 if (host
->dma_rx_channel
&& !host
->dma_tx_channel
)
385 host
->dma_tx_channel
= host
->dma_rx_channel
;
387 if (host
->dma_rx_channel
)
388 rxname
= dma_chan_name(host
->dma_rx_channel
);
392 if (host
->dma_tx_channel
)
393 txname
= dma_chan_name(host
->dma_tx_channel
);
397 dev_info(mmc_dev(host
->mmc
), "DMA channels RX %s, TX %s\n",
401 * Limit the maximum segment size in any SG entry according to
402 * the parameters of the DMA engine device.
404 if (host
->dma_tx_channel
) {
405 struct device
*dev
= host
->dma_tx_channel
->device
->dev
;
406 unsigned int max_seg_size
= dma_get_max_seg_size(dev
);
408 if (max_seg_size
< host
->mmc
->max_seg_size
)
409 host
->mmc
->max_seg_size
= max_seg_size
;
411 if (host
->dma_rx_channel
) {
412 struct device
*dev
= host
->dma_rx_channel
->device
->dev
;
413 unsigned int max_seg_size
= dma_get_max_seg_size(dev
);
415 if (max_seg_size
< host
->mmc
->max_seg_size
)
416 host
->mmc
->max_seg_size
= max_seg_size
;
421 * This is used in or so inline it
422 * so it can be discarded.
424 static inline void mmci_dma_release(struct mmci_host
*host
)
426 struct mmci_platform_data
*plat
= host
->plat
;
428 if (host
->dma_rx_channel
)
429 dma_release_channel(host
->dma_rx_channel
);
430 if (host
->dma_tx_channel
&& plat
->dma_tx_param
)
431 dma_release_channel(host
->dma_tx_channel
);
432 host
->dma_rx_channel
= host
->dma_tx_channel
= NULL
;
435 static void mmci_dma_data_error(struct mmci_host
*host
)
437 dev_err(mmc_dev(host
->mmc
), "error during DMA transfer!\n");
438 dmaengine_terminate_all(host
->dma_current
);
439 host
->dma_current
= NULL
;
440 host
->dma_desc_current
= NULL
;
441 host
->data
->host_cookie
= 0;
444 static void mmci_dma_unmap(struct mmci_host
*host
, struct mmc_data
*data
)
446 struct dma_chan
*chan
;
447 enum dma_data_direction dir
;
449 if (data
->flags
& MMC_DATA_READ
) {
450 dir
= DMA_FROM_DEVICE
;
451 chan
= host
->dma_rx_channel
;
454 chan
= host
->dma_tx_channel
;
457 dma_unmap_sg(chan
->device
->dev
, data
->sg
, data
->sg_len
, dir
);
460 static void mmci_dma_finalize(struct mmci_host
*host
, struct mmc_data
*data
)
465 /* Wait up to 1ms for the DMA to complete */
467 status
= readl(host
->base
+ MMCISTATUS
);
468 if (!(status
& MCI_RXDATAAVLBLMASK
) || i
>= 100)
474 * Check to see whether we still have some data left in the FIFO -
475 * this catches DMA controllers which are unable to monitor the
476 * DMALBREQ and DMALSREQ signals while allowing us to DMA to non-
477 * contiguous buffers. On TX, we'll get a FIFO underrun error.
479 if (status
& MCI_RXDATAAVLBLMASK
) {
480 mmci_dma_data_error(host
);
485 if (!data
->host_cookie
)
486 mmci_dma_unmap(host
, data
);
489 * Use of DMA with scatter-gather is impossible.
490 * Give up with DMA and switch back to PIO mode.
492 if (status
& MCI_RXDATAAVLBLMASK
) {
493 dev_err(mmc_dev(host
->mmc
), "buggy DMA detected. Taking evasive action.\n");
494 mmci_dma_release(host
);
497 host
->dma_current
= NULL
;
498 host
->dma_desc_current
= NULL
;
501 /* prepares DMA channel and DMA descriptor, returns non-zero on failure */
502 static int __mmci_dma_prep_data(struct mmci_host
*host
, struct mmc_data
*data
,
503 struct dma_chan
**dma_chan
,
504 struct dma_async_tx_descriptor
**dma_desc
)
506 struct variant_data
*variant
= host
->variant
;
507 struct dma_slave_config conf
= {
508 .src_addr
= host
->phybase
+ MMCIFIFO
,
509 .dst_addr
= host
->phybase
+ MMCIFIFO
,
510 .src_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
,
511 .dst_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
,
512 .src_maxburst
= variant
->fifohalfsize
>> 2, /* # of words */
513 .dst_maxburst
= variant
->fifohalfsize
>> 2, /* # of words */
516 struct dma_chan
*chan
;
517 struct dma_device
*device
;
518 struct dma_async_tx_descriptor
*desc
;
519 enum dma_data_direction buffer_dirn
;
522 if (data
->flags
& MMC_DATA_READ
) {
523 conf
.direction
= DMA_DEV_TO_MEM
;
524 buffer_dirn
= DMA_FROM_DEVICE
;
525 chan
= host
->dma_rx_channel
;
527 conf
.direction
= DMA_MEM_TO_DEV
;
528 buffer_dirn
= DMA_TO_DEVICE
;
529 chan
= host
->dma_tx_channel
;
532 /* If there's no DMA channel, fall back to PIO */
536 /* If less than or equal to the fifo size, don't bother with DMA */
537 if (data
->blksz
* data
->blocks
<= variant
->fifosize
)
540 device
= chan
->device
;
541 nr_sg
= dma_map_sg(device
->dev
, data
->sg
, data
->sg_len
, buffer_dirn
);
545 dmaengine_slave_config(chan
, &conf
);
546 desc
= dmaengine_prep_slave_sg(chan
, data
->sg
, nr_sg
,
547 conf
.direction
, DMA_CTRL_ACK
);
557 dma_unmap_sg(device
->dev
, data
->sg
, data
->sg_len
, buffer_dirn
);
561 static inline int mmci_dma_prep_data(struct mmci_host
*host
,
562 struct mmc_data
*data
)
564 /* Check if next job is already prepared. */
565 if (host
->dma_current
&& host
->dma_desc_current
)
568 /* No job were prepared thus do it now. */
569 return __mmci_dma_prep_data(host
, data
, &host
->dma_current
,
570 &host
->dma_desc_current
);
573 static inline int mmci_dma_prep_next(struct mmci_host
*host
,
574 struct mmc_data
*data
)
576 struct mmci_host_next
*nd
= &host
->next_data
;
577 return __mmci_dma_prep_data(host
, data
, &nd
->dma_chan
, &nd
->dma_desc
);
580 static int mmci_dma_start_data(struct mmci_host
*host
, unsigned int datactrl
)
583 struct mmc_data
*data
= host
->data
;
585 ret
= mmci_dma_prep_data(host
, host
->data
);
589 /* Okay, go for it. */
590 dev_vdbg(mmc_dev(host
->mmc
),
591 "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
592 data
->sg_len
, data
->blksz
, data
->blocks
, data
->flags
);
593 dmaengine_submit(host
->dma_desc_current
);
594 dma_async_issue_pending(host
->dma_current
);
596 datactrl
|= MCI_DPSM_DMAENABLE
;
598 /* Trigger the DMA transfer */
599 mmci_write_datactrlreg(host
, datactrl
);
602 * Let the MMCI say when the data is ended and it's time
603 * to fire next DMA request. When that happens, MMCI will
604 * call mmci_data_end()
606 writel(readl(host
->base
+ MMCIMASK0
) | MCI_DATAENDMASK
,
607 host
->base
+ MMCIMASK0
);
611 static void mmci_get_next_data(struct mmci_host
*host
, struct mmc_data
*data
)
613 struct mmci_host_next
*next
= &host
->next_data
;
615 WARN_ON(data
->host_cookie
&& data
->host_cookie
!= next
->cookie
);
616 WARN_ON(!data
->host_cookie
&& (next
->dma_desc
|| next
->dma_chan
));
618 host
->dma_desc_current
= next
->dma_desc
;
619 host
->dma_current
= next
->dma_chan
;
620 next
->dma_desc
= NULL
;
621 next
->dma_chan
= NULL
;
624 static void mmci_pre_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
,
627 struct mmci_host
*host
= mmc_priv(mmc
);
628 struct mmc_data
*data
= mrq
->data
;
629 struct mmci_host_next
*nd
= &host
->next_data
;
634 BUG_ON(data
->host_cookie
);
636 if (mmci_validate_data(host
, data
))
639 if (!mmci_dma_prep_next(host
, data
))
640 data
->host_cookie
= ++nd
->cookie
< 0 ? 1 : nd
->cookie
;
643 static void mmci_post_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
,
646 struct mmci_host
*host
= mmc_priv(mmc
);
647 struct mmc_data
*data
= mrq
->data
;
649 if (!data
|| !data
->host_cookie
)
652 mmci_dma_unmap(host
, data
);
655 struct mmci_host_next
*next
= &host
->next_data
;
656 struct dma_chan
*chan
;
657 if (data
->flags
& MMC_DATA_READ
)
658 chan
= host
->dma_rx_channel
;
660 chan
= host
->dma_tx_channel
;
661 dmaengine_terminate_all(chan
);
663 next
->dma_desc
= NULL
;
664 next
->dma_chan
= NULL
;
669 /* Blank functions if the DMA engine is not available */
670 static void mmci_get_next_data(struct mmci_host
*host
, struct mmc_data
*data
)
673 static inline void mmci_dma_setup(struct mmci_host
*host
)
677 static inline void mmci_dma_release(struct mmci_host
*host
)
681 static inline void mmci_dma_unmap(struct mmci_host
*host
, struct mmc_data
*data
)
685 static inline void mmci_dma_finalize(struct mmci_host
*host
,
686 struct mmc_data
*data
)
690 static inline void mmci_dma_data_error(struct mmci_host
*host
)
694 static inline int mmci_dma_start_data(struct mmci_host
*host
, unsigned int datactrl
)
699 #define mmci_pre_request NULL
700 #define mmci_post_request NULL
704 static void mmci_start_data(struct mmci_host
*host
, struct mmc_data
*data
)
706 struct variant_data
*variant
= host
->variant
;
707 unsigned int datactrl
, timeout
, irqmask
;
708 unsigned long long clks
;
712 dev_dbg(mmc_dev(host
->mmc
), "blksz %04x blks %04x flags %08x\n",
713 data
->blksz
, data
->blocks
, data
->flags
);
716 host
->size
= data
->blksz
* data
->blocks
;
717 data
->bytes_xfered
= 0;
719 clks
= (unsigned long long)data
->timeout_ns
* host
->cclk
;
720 do_div(clks
, 1000000000UL);
722 timeout
= data
->timeout_clks
+ (unsigned int)clks
;
725 writel(timeout
, base
+ MMCIDATATIMER
);
726 writel(host
->size
, base
+ MMCIDATALENGTH
);
728 blksz_bits
= ffs(data
->blksz
) - 1;
729 BUG_ON(1 << blksz_bits
!= data
->blksz
);
731 if (variant
->blksz_datactrl16
)
732 datactrl
= MCI_DPSM_ENABLE
| (data
->blksz
<< 16);
734 datactrl
= MCI_DPSM_ENABLE
| blksz_bits
<< 4;
736 if (data
->flags
& MMC_DATA_READ
)
737 datactrl
|= MCI_DPSM_DIRECTION
;
739 /* The ST Micro variants has a special bit to enable SDIO */
740 if (variant
->sdio
&& host
->mmc
->card
)
741 if (mmc_card_sdio(host
->mmc
->card
)) {
743 * The ST Micro variants has a special bit
748 datactrl
|= MCI_ST_DPSM_SDIOEN
;
751 * The ST Micro variant for SDIO small write transfers
752 * needs to have clock H/W flow control disabled,
753 * otherwise the transfer will not start. The threshold
754 * depends on the rate of MCLK.
756 if (data
->flags
& MMC_DATA_WRITE
&&
758 (host
->size
<= 8 && host
->mclk
> 50000000)))
759 clk
= host
->clk_reg
& ~variant
->clkreg_enable
;
761 clk
= host
->clk_reg
| variant
->clkreg_enable
;
763 mmci_write_clkreg(host
, clk
);
766 if (host
->mmc
->ios
.timing
== MMC_TIMING_UHS_DDR50
)
767 datactrl
|= MCI_ST_DPSM_DDRMODE
;
770 * Attempt to use DMA operation mode, if this
771 * should fail, fall back to PIO mode
773 if (!mmci_dma_start_data(host
, datactrl
))
776 /* IRQ mode, map the SG list for CPU reading/writing */
777 mmci_init_sg(host
, data
);
779 if (data
->flags
& MMC_DATA_READ
) {
780 irqmask
= MCI_RXFIFOHALFFULLMASK
;
783 * If we have less than the fifo 'half-full' threshold to
784 * transfer, trigger a PIO interrupt as soon as any data
787 if (host
->size
< variant
->fifohalfsize
)
788 irqmask
|= MCI_RXDATAAVLBLMASK
;
791 * We don't actually need to include "FIFO empty" here
792 * since its implicit in "FIFO half empty".
794 irqmask
= MCI_TXFIFOHALFEMPTYMASK
;
797 mmci_write_datactrlreg(host
, datactrl
);
798 writel(readl(base
+ MMCIMASK0
) & ~MCI_DATAENDMASK
, base
+ MMCIMASK0
);
799 mmci_set_mask1(host
, irqmask
);
803 mmci_start_command(struct mmci_host
*host
, struct mmc_command
*cmd
, u32 c
)
805 void __iomem
*base
= host
->base
;
807 dev_dbg(mmc_dev(host
->mmc
), "op %02x arg %08x flags %08x\n",
808 cmd
->opcode
, cmd
->arg
, cmd
->flags
);
810 if (readl(base
+ MMCICOMMAND
) & MCI_CPSM_ENABLE
) {
811 writel(0, base
+ MMCICOMMAND
);
815 c
|= cmd
->opcode
| MCI_CPSM_ENABLE
;
816 if (cmd
->flags
& MMC_RSP_PRESENT
) {
817 if (cmd
->flags
& MMC_RSP_136
)
818 c
|= MCI_CPSM_LONGRSP
;
819 c
|= MCI_CPSM_RESPONSE
;
822 c
|= MCI_CPSM_INTERRUPT
;
826 writel(cmd
->arg
, base
+ MMCIARGUMENT
);
827 writel(c
, base
+ MMCICOMMAND
);
831 mmci_data_irq(struct mmci_host
*host
, struct mmc_data
*data
,
834 /* First check for errors */
835 if (status
& (MCI_DATACRCFAIL
|MCI_DATATIMEOUT
|MCI_STARTBITERR
|
836 MCI_TXUNDERRUN
|MCI_RXOVERRUN
)) {
839 /* Terminate the DMA transfer */
840 if (dma_inprogress(host
)) {
841 mmci_dma_data_error(host
);
842 mmci_dma_unmap(host
, data
);
846 * Calculate how far we are into the transfer. Note that
847 * the data counter gives the number of bytes transferred
848 * on the MMC bus, not on the host side. On reads, this
849 * can be as much as a FIFO-worth of data ahead. This
850 * matters for FIFO overruns only.
852 remain
= readl(host
->base
+ MMCIDATACNT
);
853 success
= data
->blksz
* data
->blocks
- remain
;
855 dev_dbg(mmc_dev(host
->mmc
), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n",
857 if (status
& MCI_DATACRCFAIL
) {
858 /* Last block was not successful */
860 data
->error
= -EILSEQ
;
861 } else if (status
& MCI_DATATIMEOUT
) {
862 data
->error
= -ETIMEDOUT
;
863 } else if (status
& MCI_STARTBITERR
) {
864 data
->error
= -ECOMM
;
865 } else if (status
& MCI_TXUNDERRUN
) {
867 } else if (status
& MCI_RXOVERRUN
) {
868 if (success
> host
->variant
->fifosize
)
869 success
-= host
->variant
->fifosize
;
874 data
->bytes_xfered
= round_down(success
, data
->blksz
);
877 if (status
& MCI_DATABLOCKEND
)
878 dev_err(mmc_dev(host
->mmc
), "stray MCI_DATABLOCKEND interrupt\n");
880 if (status
& MCI_DATAEND
|| data
->error
) {
881 if (dma_inprogress(host
))
882 mmci_dma_finalize(host
, data
);
883 mmci_stop_data(host
);
886 /* The error clause is handled above, success! */
887 data
->bytes_xfered
= data
->blksz
* data
->blocks
;
889 if (!data
->stop
|| host
->mrq
->sbc
) {
890 mmci_request_end(host
, data
->mrq
);
892 mmci_start_command(host
, data
->stop
, 0);
898 mmci_cmd_irq(struct mmci_host
*host
, struct mmc_command
*cmd
,
901 void __iomem
*base
= host
->base
;
902 bool sbc
= (cmd
== host
->mrq
->sbc
);
906 if (status
& MCI_CMDTIMEOUT
) {
907 cmd
->error
= -ETIMEDOUT
;
908 } else if (status
& MCI_CMDCRCFAIL
&& cmd
->flags
& MMC_RSP_CRC
) {
909 cmd
->error
= -EILSEQ
;
911 cmd
->resp
[0] = readl(base
+ MMCIRESPONSE0
);
912 cmd
->resp
[1] = readl(base
+ MMCIRESPONSE1
);
913 cmd
->resp
[2] = readl(base
+ MMCIRESPONSE2
);
914 cmd
->resp
[3] = readl(base
+ MMCIRESPONSE3
);
917 if ((!sbc
&& !cmd
->data
) || cmd
->error
) {
919 /* Terminate the DMA transfer */
920 if (dma_inprogress(host
)) {
921 mmci_dma_data_error(host
);
922 mmci_dma_unmap(host
, host
->data
);
924 mmci_stop_data(host
);
926 mmci_request_end(host
, host
->mrq
);
928 mmci_start_command(host
, host
->mrq
->cmd
, 0);
929 } else if (!(cmd
->data
->flags
& MMC_DATA_READ
)) {
930 mmci_start_data(host
, cmd
->data
);
934 static int mmci_pio_read(struct mmci_host
*host
, char *buffer
, unsigned int remain
)
936 void __iomem
*base
= host
->base
;
939 int host_remain
= host
->size
;
942 int count
= host_remain
- (readl(base
+ MMCIFIFOCNT
) << 2);
951 * SDIO especially may want to send something that is
952 * not divisible by 4 (as opposed to card sectors
953 * etc). Therefore make sure to always read the last bytes
954 * while only doing full 32-bit reads towards the FIFO.
956 if (unlikely(count
& 0x3)) {
958 unsigned char buf
[4];
959 ioread32_rep(base
+ MMCIFIFO
, buf
, 1);
960 memcpy(ptr
, buf
, count
);
962 ioread32_rep(base
+ MMCIFIFO
, ptr
, count
>> 2);
966 ioread32_rep(base
+ MMCIFIFO
, ptr
, count
>> 2);
971 host_remain
-= count
;
976 status
= readl(base
+ MMCISTATUS
);
977 } while (status
& MCI_RXDATAAVLBL
);
982 static int mmci_pio_write(struct mmci_host
*host
, char *buffer
, unsigned int remain
, u32 status
)
984 struct variant_data
*variant
= host
->variant
;
985 void __iomem
*base
= host
->base
;
989 unsigned int count
, maxcnt
;
991 maxcnt
= status
& MCI_TXFIFOEMPTY
?
992 variant
->fifosize
: variant
->fifohalfsize
;
993 count
= min(remain
, maxcnt
);
996 * SDIO especially may want to send something that is
997 * not divisible by 4 (as opposed to card sectors
998 * etc), and the FIFO only accept full 32-bit writes.
999 * So compensate by adding +3 on the count, a single
1000 * byte become a 32bit write, 7 bytes will be two
1003 iowrite32_rep(base
+ MMCIFIFO
, ptr
, (count
+ 3) >> 2);
1011 status
= readl(base
+ MMCISTATUS
);
1012 } while (status
& MCI_TXFIFOHALFEMPTY
);
1014 return ptr
- buffer
;
1018 * PIO data transfer IRQ handler.
1020 static irqreturn_t
mmci_pio_irq(int irq
, void *dev_id
)
1022 struct mmci_host
*host
= dev_id
;
1023 struct sg_mapping_iter
*sg_miter
= &host
->sg_miter
;
1024 struct variant_data
*variant
= host
->variant
;
1025 void __iomem
*base
= host
->base
;
1026 unsigned long flags
;
1029 status
= readl(base
+ MMCISTATUS
);
1031 dev_dbg(mmc_dev(host
->mmc
), "irq1 (pio) %08x\n", status
);
1033 local_irq_save(flags
);
1036 unsigned int remain
, len
;
1040 * For write, we only need to test the half-empty flag
1041 * here - if the FIFO is completely empty, then by
1042 * definition it is more than half empty.
1044 * For read, check for data available.
1046 if (!(status
& (MCI_TXFIFOHALFEMPTY
|MCI_RXDATAAVLBL
)))
1049 if (!sg_miter_next(sg_miter
))
1052 buffer
= sg_miter
->addr
;
1053 remain
= sg_miter
->length
;
1056 if (status
& MCI_RXACTIVE
)
1057 len
= mmci_pio_read(host
, buffer
, remain
);
1058 if (status
& MCI_TXACTIVE
)
1059 len
= mmci_pio_write(host
, buffer
, remain
, status
);
1061 sg_miter
->consumed
= len
;
1069 status
= readl(base
+ MMCISTATUS
);
1072 sg_miter_stop(sg_miter
);
1074 local_irq_restore(flags
);
1077 * If we have less than the fifo 'half-full' threshold to transfer,
1078 * trigger a PIO interrupt as soon as any data is available.
1080 if (status
& MCI_RXACTIVE
&& host
->size
< variant
->fifohalfsize
)
1081 mmci_set_mask1(host
, MCI_RXDATAAVLBLMASK
);
1084 * If we run out of data, disable the data IRQs; this
1085 * prevents a race where the FIFO becomes empty before
1086 * the chip itself has disabled the data path, and
1087 * stops us racing with our data end IRQ.
1089 if (host
->size
== 0) {
1090 mmci_set_mask1(host
, 0);
1091 writel(readl(base
+ MMCIMASK0
) | MCI_DATAENDMASK
, base
+ MMCIMASK0
);
1098 * Handle completion of command and data transfers.
1100 static irqreturn_t
mmci_irq(int irq
, void *dev_id
)
1102 struct mmci_host
*host
= dev_id
;
1106 spin_lock(&host
->lock
);
1109 struct mmc_command
*cmd
;
1110 struct mmc_data
*data
;
1112 status
= readl(host
->base
+ MMCISTATUS
);
1114 if (host
->singleirq
) {
1115 if (status
& readl(host
->base
+ MMCIMASK1
))
1116 mmci_pio_irq(irq
, dev_id
);
1118 status
&= ~MCI_IRQ1MASK
;
1121 status
&= readl(host
->base
+ MMCIMASK0
);
1122 writel(status
, host
->base
+ MMCICLEAR
);
1124 dev_dbg(mmc_dev(host
->mmc
), "irq0 (data+cmd) %08x\n", status
);
1127 if (status
& (MCI_DATACRCFAIL
|MCI_DATATIMEOUT
|MCI_STARTBITERR
|
1128 MCI_TXUNDERRUN
|MCI_RXOVERRUN
|MCI_DATAEND
|
1129 MCI_DATABLOCKEND
) && data
)
1130 mmci_data_irq(host
, data
, status
);
1133 if (status
& (MCI_CMDCRCFAIL
|MCI_CMDTIMEOUT
|MCI_CMDSENT
|MCI_CMDRESPEND
) && cmd
)
1134 mmci_cmd_irq(host
, cmd
, status
);
1139 spin_unlock(&host
->lock
);
1141 return IRQ_RETVAL(ret
);
1144 static void mmci_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
1146 struct mmci_host
*host
= mmc_priv(mmc
);
1147 unsigned long flags
;
1149 WARN_ON(host
->mrq
!= NULL
);
1151 mrq
->cmd
->error
= mmci_validate_data(host
, mrq
->data
);
1152 if (mrq
->cmd
->error
) {
1153 mmc_request_done(mmc
, mrq
);
1157 pm_runtime_get_sync(mmc_dev(mmc
));
1159 spin_lock_irqsave(&host
->lock
, flags
);
1164 mmci_get_next_data(host
, mrq
->data
);
1166 if (mrq
->data
&& mrq
->data
->flags
& MMC_DATA_READ
)
1167 mmci_start_data(host
, mrq
->data
);
1170 mmci_start_command(host
, mrq
->sbc
, 0);
1172 mmci_start_command(host
, mrq
->cmd
, 0);
1174 spin_unlock_irqrestore(&host
->lock
, flags
);
1177 static void mmci_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
1179 struct mmci_host
*host
= mmc_priv(mmc
);
1180 struct variant_data
*variant
= host
->variant
;
1182 unsigned long flags
;
1185 pm_runtime_get_sync(mmc_dev(mmc
));
1187 if (host
->plat
->ios_handler
&&
1188 host
->plat
->ios_handler(mmc_dev(mmc
), ios
))
1189 dev_err(mmc_dev(mmc
), "platform ios_handler failed\n");
1191 switch (ios
->power_mode
) {
1193 if (!IS_ERR(mmc
->supply
.vmmc
))
1194 mmc_regulator_set_ocr(mmc
, mmc
->supply
.vmmc
, 0);
1196 if (!IS_ERR(mmc
->supply
.vqmmc
) && host
->vqmmc_enabled
) {
1197 regulator_disable(mmc
->supply
.vqmmc
);
1198 host
->vqmmc_enabled
= false;
1203 if (!IS_ERR(mmc
->supply
.vmmc
))
1204 mmc_regulator_set_ocr(mmc
, mmc
->supply
.vmmc
, ios
->vdd
);
1207 * The ST Micro variant doesn't have the PL180s MCI_PWR_UP
1208 * and instead uses MCI_PWR_ON so apply whatever value is
1209 * configured in the variant data.
1211 pwr
|= variant
->pwrreg_powerup
;
1215 if (!IS_ERR(mmc
->supply
.vqmmc
) && !host
->vqmmc_enabled
) {
1216 ret
= regulator_enable(mmc
->supply
.vqmmc
);
1218 dev_err(mmc_dev(mmc
),
1219 "failed to enable vqmmc regulator\n");
1221 host
->vqmmc_enabled
= true;
1228 if (variant
->signal_direction
&& ios
->power_mode
!= MMC_POWER_OFF
) {
1230 * The ST Micro variant has some additional bits
1231 * indicating signal direction for the signals in
1232 * the SD/MMC bus and feedback-clock usage.
1234 pwr
|= host
->plat
->sigdir
;
1236 if (ios
->bus_width
== MMC_BUS_WIDTH_4
)
1237 pwr
&= ~MCI_ST_DATA74DIREN
;
1238 else if (ios
->bus_width
== MMC_BUS_WIDTH_1
)
1239 pwr
&= (~MCI_ST_DATA74DIREN
&
1240 ~MCI_ST_DATA31DIREN
&
1241 ~MCI_ST_DATA2DIREN
);
1244 if (ios
->bus_mode
== MMC_BUSMODE_OPENDRAIN
) {
1245 if (host
->hw_designer
!= AMBA_VENDOR_ST
)
1249 * The ST Micro variant use the ROD bit for something
1250 * else and only has OD (Open Drain).
1257 * If clock = 0 and the variant requires the MMCIPOWER to be used for
1258 * gating the clock, the MCI_PWR_ON bit is cleared.
1260 if (!ios
->clock
&& variant
->pwrreg_clkgate
)
1263 spin_lock_irqsave(&host
->lock
, flags
);
1265 mmci_set_clkreg(host
, ios
->clock
);
1266 mmci_write_pwrreg(host
, pwr
);
1268 spin_unlock_irqrestore(&host
->lock
, flags
);
1270 pm_runtime_mark_last_busy(mmc_dev(mmc
));
1271 pm_runtime_put_autosuspend(mmc_dev(mmc
));
1274 static int mmci_get_ro(struct mmc_host
*mmc
)
1276 struct mmci_host
*host
= mmc_priv(mmc
);
1278 if (host
->gpio_wp
== -ENOSYS
)
1281 return gpio_get_value_cansleep(host
->gpio_wp
);
1284 static int mmci_get_cd(struct mmc_host
*mmc
)
1286 struct mmci_host
*host
= mmc_priv(mmc
);
1287 struct mmci_platform_data
*plat
= host
->plat
;
1288 unsigned int status
;
1290 if (host
->gpio_cd
== -ENOSYS
) {
1292 return 1; /* Assume always present */
1294 status
= plat
->status(mmc_dev(host
->mmc
));
1296 status
= !!gpio_get_value_cansleep(host
->gpio_cd
)
1300 * Use positive logic throughout - status is zero for no card,
1301 * non-zero for card inserted.
1306 static int mmci_sig_volt_switch(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
1310 if (!IS_ERR(mmc
->supply
.vqmmc
)) {
1312 pm_runtime_get_sync(mmc_dev(mmc
));
1314 switch (ios
->signal_voltage
) {
1315 case MMC_SIGNAL_VOLTAGE_330
:
1316 ret
= regulator_set_voltage(mmc
->supply
.vqmmc
,
1319 case MMC_SIGNAL_VOLTAGE_180
:
1320 ret
= regulator_set_voltage(mmc
->supply
.vqmmc
,
1323 case MMC_SIGNAL_VOLTAGE_120
:
1324 ret
= regulator_set_voltage(mmc
->supply
.vqmmc
,
1330 dev_warn(mmc_dev(mmc
), "Voltage switch failed\n");
1332 pm_runtime_mark_last_busy(mmc_dev(mmc
));
1333 pm_runtime_put_autosuspend(mmc_dev(mmc
));
1339 static irqreturn_t
mmci_cd_irq(int irq
, void *dev_id
)
1341 struct mmci_host
*host
= dev_id
;
1343 mmc_detect_change(host
->mmc
, msecs_to_jiffies(500));
1348 static struct mmc_host_ops mmci_ops
= {
1349 .request
= mmci_request
,
1350 .pre_req
= mmci_pre_request
,
1351 .post_req
= mmci_post_request
,
1352 .set_ios
= mmci_set_ios
,
1353 .get_ro
= mmci_get_ro
,
1354 .get_cd
= mmci_get_cd
,
1355 .start_signal_voltage_switch
= mmci_sig_volt_switch
,
1359 static void mmci_dt_populate_generic_pdata(struct device_node
*np
,
1360 struct mmci_platform_data
*pdata
)
1364 pdata
->gpio_wp
= of_get_named_gpio(np
, "wp-gpios", 0);
1365 pdata
->gpio_cd
= of_get_named_gpio(np
, "cd-gpios", 0);
1367 if (of_get_property(np
, "cd-inverted", NULL
))
1368 pdata
->cd_invert
= true;
1370 pdata
->cd_invert
= false;
1372 of_property_read_u32(np
, "max-frequency", &pdata
->f_max
);
1374 pr_warn("%s has no 'max-frequency' property\n", np
->full_name
);
1376 if (of_get_property(np
, "mmc-cap-mmc-highspeed", NULL
))
1377 pdata
->capabilities
|= MMC_CAP_MMC_HIGHSPEED
;
1378 if (of_get_property(np
, "mmc-cap-sd-highspeed", NULL
))
1379 pdata
->capabilities
|= MMC_CAP_SD_HIGHSPEED
;
1381 of_property_read_u32(np
, "bus-width", &bus_width
);
1382 switch (bus_width
) {
1384 /* No bus-width supplied. */
1387 pdata
->capabilities
|= MMC_CAP_4_BIT_DATA
;
1390 pdata
->capabilities
|= MMC_CAP_8_BIT_DATA
;
1393 pr_warn("%s: Unsupported bus width\n", np
->full_name
);
1397 static void mmci_dt_populate_generic_pdata(struct device_node
*np
,
1398 struct mmci_platform_data
*pdata
)
1404 static int mmci_probe(struct amba_device
*dev
,
1405 const struct amba_id
*id
)
1407 struct mmci_platform_data
*plat
= dev
->dev
.platform_data
;
1408 struct device_node
*np
= dev
->dev
.of_node
;
1409 struct variant_data
*variant
= id
->data
;
1410 struct mmci_host
*host
;
1411 struct mmc_host
*mmc
;
1414 /* Must have platform data or Device Tree. */
1416 dev_err(&dev
->dev
, "No plat data or DT found\n");
1421 plat
= devm_kzalloc(&dev
->dev
, sizeof(*plat
), GFP_KERNEL
);
1427 mmci_dt_populate_generic_pdata(np
, plat
);
1429 ret
= amba_request_regions(dev
, DRIVER_NAME
);
1433 mmc
= mmc_alloc_host(sizeof(struct mmci_host
), &dev
->dev
);
1439 host
= mmc_priv(mmc
);
1442 host
->gpio_wp
= -ENOSYS
;
1443 host
->gpio_cd
= -ENOSYS
;
1444 host
->gpio_cd_irq
= -1;
1446 host
->hw_designer
= amba_manf(dev
);
1447 host
->hw_revision
= amba_rev(dev
);
1448 dev_dbg(mmc_dev(mmc
), "designer ID = 0x%02x\n", host
->hw_designer
);
1449 dev_dbg(mmc_dev(mmc
), "revision = 0x%01x\n", host
->hw_revision
);
1451 host
->clk
= devm_clk_get(&dev
->dev
, NULL
);
1452 if (IS_ERR(host
->clk
)) {
1453 ret
= PTR_ERR(host
->clk
);
1457 ret
= clk_prepare_enable(host
->clk
);
1462 host
->variant
= variant
;
1463 host
->mclk
= clk_get_rate(host
->clk
);
1465 * According to the spec, mclk is max 100 MHz,
1466 * so we try to adjust the clock down to this,
1469 if (host
->mclk
> 100000000) {
1470 ret
= clk_set_rate(host
->clk
, 100000000);
1473 host
->mclk
= clk_get_rate(host
->clk
);
1474 dev_dbg(mmc_dev(mmc
), "eventual mclk rate: %u Hz\n",
1477 host
->phybase
= dev
->res
.start
;
1478 host
->base
= ioremap(dev
->res
.start
, resource_size(&dev
->res
));
1484 if (variant
->busy_detect
) {
1485 mmci_ops
.card_busy
= mmci_card_busy
;
1486 mmci_write_datactrlreg(host
, MCI_ST_DPSM_BUSYMODE
);
1489 mmc
->ops
= &mmci_ops
;
1491 * The ARM and ST versions of the block have slightly different
1492 * clock divider equations which means that the minimum divider
1495 if (variant
->st_clkdiv
)
1496 mmc
->f_min
= DIV_ROUND_UP(host
->mclk
, 257);
1498 mmc
->f_min
= DIV_ROUND_UP(host
->mclk
, 512);
1500 * If the platform data supplies a maximum operating
1501 * frequency, this takes precedence. Else, we fall back
1502 * to using the module parameter, which has a (low)
1503 * default value in case it is not specified. Either
1504 * value must not exceed the clock rate into the block,
1508 mmc
->f_max
= min(host
->mclk
, plat
->f_max
);
1510 mmc
->f_max
= min(host
->mclk
, fmax
);
1511 dev_dbg(mmc_dev(mmc
), "clocking block at %u Hz\n", mmc
->f_max
);
1513 host
->pinctrl
= devm_pinctrl_get(&dev
->dev
);
1514 if (IS_ERR(host
->pinctrl
)) {
1515 ret
= PTR_ERR(host
->pinctrl
);
1519 host
->pins_default
= pinctrl_lookup_state(host
->pinctrl
,
1520 PINCTRL_STATE_DEFAULT
);
1522 /* enable pins to be muxed in and configured */
1523 if (!IS_ERR(host
->pins_default
)) {
1524 ret
= pinctrl_select_state(host
->pinctrl
, host
->pins_default
);
1526 dev_warn(&dev
->dev
, "could not set default pins\n");
1528 dev_warn(&dev
->dev
, "could not get default pinstate\n");
1530 /* Get regulators and the supported OCR mask */
1531 mmc_regulator_get_supply(mmc
);
1532 if (!mmc
->ocr_avail
)
1533 mmc
->ocr_avail
= plat
->ocr_mask
;
1534 else if (plat
->ocr_mask
)
1535 dev_warn(mmc_dev(mmc
), "Platform OCR mask is ignored\n");
1537 mmc
->caps
= plat
->capabilities
;
1538 mmc
->caps2
= plat
->capabilities2
;
1540 /* We support these PM capabilities. */
1541 mmc
->pm_caps
= MMC_PM_KEEP_POWER
;
1546 mmc
->max_segs
= NR_SG
;
1549 * Since only a certain number of bits are valid in the data length
1550 * register, we must ensure that we don't exceed 2^num-1 bytes in a
1553 mmc
->max_req_size
= (1 << variant
->datalength_bits
) - 1;
1556 * Set the maximum segment size. Since we aren't doing DMA
1557 * (yet) we are only limited by the data length register.
1559 mmc
->max_seg_size
= mmc
->max_req_size
;
1562 * Block size can be up to 2048 bytes, but must be a power of two.
1564 mmc
->max_blk_size
= 1 << 11;
1567 * Limit the number of blocks transferred so that we don't overflow
1568 * the maximum request size.
1570 mmc
->max_blk_count
= mmc
->max_req_size
>> 11;
1572 spin_lock_init(&host
->lock
);
1574 writel(0, host
->base
+ MMCIMASK0
);
1575 writel(0, host
->base
+ MMCIMASK1
);
1576 writel(0xfff, host
->base
+ MMCICLEAR
);
1578 if (plat
->gpio_cd
== -EPROBE_DEFER
) {
1579 ret
= -EPROBE_DEFER
;
1582 if (gpio_is_valid(plat
->gpio_cd
)) {
1583 ret
= gpio_request(plat
->gpio_cd
, DRIVER_NAME
" (cd)");
1585 ret
= gpio_direction_input(plat
->gpio_cd
);
1587 host
->gpio_cd
= plat
->gpio_cd
;
1588 else if (ret
!= -ENOSYS
)
1592 * A gpio pin that will detect cards when inserted and removed
1593 * will most likely want to trigger on the edges if it is
1594 * 0 when ejected and 1 when inserted (or mutatis mutandis
1595 * for the inverted case) so we request triggers on both
1598 ret
= request_any_context_irq(gpio_to_irq(plat
->gpio_cd
),
1600 IRQF_TRIGGER_RISING
| IRQF_TRIGGER_FALLING
,
1601 DRIVER_NAME
" (cd)", host
);
1603 host
->gpio_cd_irq
= gpio_to_irq(plat
->gpio_cd
);
1605 if (plat
->gpio_wp
== -EPROBE_DEFER
) {
1606 ret
= -EPROBE_DEFER
;
1609 if (gpio_is_valid(plat
->gpio_wp
)) {
1610 ret
= gpio_request(plat
->gpio_wp
, DRIVER_NAME
" (wp)");
1612 ret
= gpio_direction_input(plat
->gpio_wp
);
1614 host
->gpio_wp
= plat
->gpio_wp
;
1615 else if (ret
!= -ENOSYS
)
1619 if ((host
->plat
->status
|| host
->gpio_cd
!= -ENOSYS
)
1620 && host
->gpio_cd_irq
< 0)
1621 mmc
->caps
|= MMC_CAP_NEEDS_POLL
;
1623 ret
= request_irq(dev
->irq
[0], mmci_irq
, IRQF_SHARED
, DRIVER_NAME
" (cmd)", host
);
1628 host
->singleirq
= true;
1630 ret
= request_irq(dev
->irq
[1], mmci_pio_irq
, IRQF_SHARED
,
1631 DRIVER_NAME
" (pio)", host
);
1636 writel(MCI_IRQENABLE
, host
->base
+ MMCIMASK0
);
1638 amba_set_drvdata(dev
, mmc
);
1640 dev_info(&dev
->dev
, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n",
1641 mmc_hostname(mmc
), amba_part(dev
), amba_manf(dev
),
1642 amba_rev(dev
), (unsigned long long)dev
->res
.start
,
1643 dev
->irq
[0], dev
->irq
[1]);
1645 mmci_dma_setup(host
);
1647 pm_runtime_set_autosuspend_delay(&dev
->dev
, 50);
1648 pm_runtime_use_autosuspend(&dev
->dev
);
1649 pm_runtime_put(&dev
->dev
);
1656 free_irq(dev
->irq
[0], host
);
1658 if (host
->gpio_wp
!= -ENOSYS
)
1659 gpio_free(host
->gpio_wp
);
1661 if (host
->gpio_cd_irq
>= 0)
1662 free_irq(host
->gpio_cd_irq
, host
);
1663 if (host
->gpio_cd
!= -ENOSYS
)
1664 gpio_free(host
->gpio_cd
);
1666 iounmap(host
->base
);
1668 clk_disable_unprepare(host
->clk
);
1672 amba_release_regions(dev
);
1677 static int mmci_remove(struct amba_device
*dev
)
1679 struct mmc_host
*mmc
= amba_get_drvdata(dev
);
1681 amba_set_drvdata(dev
, NULL
);
1684 struct mmci_host
*host
= mmc_priv(mmc
);
1687 * Undo pm_runtime_put() in probe. We use the _sync
1688 * version here so that we can access the primecell.
1690 pm_runtime_get_sync(&dev
->dev
);
1692 mmc_remove_host(mmc
);
1694 writel(0, host
->base
+ MMCIMASK0
);
1695 writel(0, host
->base
+ MMCIMASK1
);
1697 writel(0, host
->base
+ MMCICOMMAND
);
1698 writel(0, host
->base
+ MMCIDATACTRL
);
1700 mmci_dma_release(host
);
1701 free_irq(dev
->irq
[0], host
);
1702 if (!host
->singleirq
)
1703 free_irq(dev
->irq
[1], host
);
1705 if (host
->gpio_wp
!= -ENOSYS
)
1706 gpio_free(host
->gpio_wp
);
1707 if (host
->gpio_cd_irq
>= 0)
1708 free_irq(host
->gpio_cd_irq
, host
);
1709 if (host
->gpio_cd
!= -ENOSYS
)
1710 gpio_free(host
->gpio_cd
);
1712 iounmap(host
->base
);
1713 clk_disable_unprepare(host
->clk
);
1717 amba_release_regions(dev
);
1723 #ifdef CONFIG_SUSPEND
1724 static int mmci_suspend(struct device
*dev
)
1726 struct amba_device
*adev
= to_amba_device(dev
);
1727 struct mmc_host
*mmc
= amba_get_drvdata(adev
);
1731 struct mmci_host
*host
= mmc_priv(mmc
);
1733 ret
= mmc_suspend_host(mmc
);
1735 pm_runtime_get_sync(dev
);
1736 writel(0, host
->base
+ MMCIMASK0
);
1743 static int mmci_resume(struct device
*dev
)
1745 struct amba_device
*adev
= to_amba_device(dev
);
1746 struct mmc_host
*mmc
= amba_get_drvdata(adev
);
1750 struct mmci_host
*host
= mmc_priv(mmc
);
1752 writel(MCI_IRQENABLE
, host
->base
+ MMCIMASK0
);
1753 pm_runtime_put(dev
);
1755 ret
= mmc_resume_host(mmc
);
1762 #ifdef CONFIG_PM_RUNTIME
1763 static int mmci_runtime_suspend(struct device
*dev
)
1765 struct amba_device
*adev
= to_amba_device(dev
);
1766 struct mmc_host
*mmc
= amba_get_drvdata(adev
);
1769 struct mmci_host
*host
= mmc_priv(mmc
);
1770 clk_disable_unprepare(host
->clk
);
1776 static int mmci_runtime_resume(struct device
*dev
)
1778 struct amba_device
*adev
= to_amba_device(dev
);
1779 struct mmc_host
*mmc
= amba_get_drvdata(adev
);
1782 struct mmci_host
*host
= mmc_priv(mmc
);
1783 clk_prepare_enable(host
->clk
);
1790 static const struct dev_pm_ops mmci_dev_pm_ops
= {
1791 SET_SYSTEM_SLEEP_PM_OPS(mmci_suspend
, mmci_resume
)
1792 SET_RUNTIME_PM_OPS(mmci_runtime_suspend
, mmci_runtime_resume
, NULL
)
1795 static struct amba_id mmci_ids
[] = {
1799 .data
= &variant_arm
,
1804 .data
= &variant_arm_extended_fifo
,
1809 .data
= &variant_arm_extended_fifo_hwfc
,
1814 .data
= &variant_arm
,
1816 /* ST Micro variants */
1820 .data
= &variant_u300
,
1825 .data
= &variant_nomadik
,
1830 .data
= &variant_u300
,
1835 .data
= &variant_ux500
,
1840 .data
= &variant_ux500v2
,
1845 MODULE_DEVICE_TABLE(amba
, mmci_ids
);
1847 static struct amba_driver mmci_driver
= {
1849 .name
= DRIVER_NAME
,
1850 .pm
= &mmci_dev_pm_ops
,
1852 .probe
= mmci_probe
,
1853 .remove
= mmci_remove
,
1854 .id_table
= mmci_ids
,
1857 module_amba_driver(mmci_driver
);
1859 module_param(fmax
, uint
, 0444);
1861 MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver");
1862 MODULE_LICENSE("GPL");