2 * bcm2835 sdhost driver.
4 * The 2835 has two SD controllers: The Arasan sdhci controller
5 * (supported by the iproc driver) and a custom sdhost controller
6 * (supported by this driver).
8 * The sdhci controller supports both sdcard and sdio. The sdhost
9 * controller supports the sdcard only, but has better performance.
10 * Also note that the rpi3 has sdio wifi, so driving the sdcard with
11 * the sdhost controller allows to use the sdhci controller for wifi
14 * The configuration is done by devicetree via pin muxing. Both
15 * SD controller are available on the same pins (2 pin groups = pin 22
16 * to 27 + pin 48 to 53). So it's possible to use both SD controllers
17 * at the same time with different pin groups.
19 * Author: Phil Elwell <phil@raspberrypi.org>
20 * Copyright (C) 2015-2016 Raspberry Pi (Trading) Ltd.
23 * mmc-bcm2835.c by Gellert Weisz
24 * which is, in turn, based on
25 * sdhci-bcm2708.c by Broadcom
26 * sdhci-bcm2835.c by Stephen Warren and Oleksandr Tymoshenko
27 * sdhci.c and sdhci-pci.c by Pierre Ossman
29 * This program is free software; you can redistribute it and/or modify it
30 * under the terms and conditions of the GNU General Public License,
31 * version 2, as published by the Free Software Foundation.
33 * This program is distributed in the hope it will be useful, but WITHOUT
34 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
35 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
38 * You should have received a copy of the GNU General Public License
39 * along with this program. If not, see <http://www.gnu.org/licenses/>.
41 #include <linux/clk.h>
42 #include <linux/delay.h>
43 #include <linux/device.h>
44 #include <linux/dmaengine.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/err.h>
47 #include <linux/highmem.h>
48 #include <linux/interrupt.h>
50 #include <linux/iopoll.h>
51 #include <linux/module.h>
52 #include <linux/of_address.h>
53 #include <linux/of_irq.h>
54 #include <linux/platform_device.h>
55 #include <linux/scatterlist.h>
56 #include <linux/time.h>
57 #include <linux/workqueue.h>
59 #include <linux/mmc/host.h>
60 #include <linux/mmc/mmc.h>
61 #include <linux/mmc/sd.h>
63 #define SDCMD 0x00 /* Command to SD card - 16 R/W */
64 #define SDARG 0x04 /* Argument to SD card - 32 R/W */
65 #define SDTOUT 0x08 /* Start value for timeout counter - 32 R/W */
66 #define SDCDIV 0x0c /* Start value for clock divider - 11 R/W */
67 #define SDRSP0 0x10 /* SD card response (31:0) - 32 R */
68 #define SDRSP1 0x14 /* SD card response (63:32) - 32 R */
69 #define SDRSP2 0x18 /* SD card response (95:64) - 32 R */
70 #define SDRSP3 0x1c /* SD card response (127:96) - 32 R */
71 #define SDHSTS 0x20 /* SD host status - 11 R/W */
72 #define SDVDD 0x30 /* SD card power control - 1 R/W */
73 #define SDEDM 0x34 /* Emergency Debug Mode - 13 R/W */
74 #define SDHCFG 0x38 /* Host configuration - 2 R/W */
75 #define SDHBCT 0x3c /* Host byte count (debug) - 32 R/W */
76 #define SDDATA 0x40 /* Data to/from SD card - 32 R/W */
77 #define SDHBLC 0x50 /* Host block count (SDIO/SDHC) - 9 R/W */
79 #define SDCMD_NEW_FLAG 0x8000
80 #define SDCMD_FAIL_FLAG 0x4000
81 #define SDCMD_BUSYWAIT 0x800
82 #define SDCMD_NO_RESPONSE 0x400
83 #define SDCMD_LONG_RESPONSE 0x200
84 #define SDCMD_WRITE_CMD 0x80
85 #define SDCMD_READ_CMD 0x40
86 #define SDCMD_CMD_MASK 0x3f
88 #define SDCDIV_MAX_CDIV 0x7ff
90 #define SDHSTS_BUSY_IRPT 0x400
91 #define SDHSTS_BLOCK_IRPT 0x200
92 #define SDHSTS_SDIO_IRPT 0x100
93 #define SDHSTS_REW_TIME_OUT 0x80
94 #define SDHSTS_CMD_TIME_OUT 0x40
95 #define SDHSTS_CRC16_ERROR 0x20
96 #define SDHSTS_CRC7_ERROR 0x10
97 #define SDHSTS_FIFO_ERROR 0x08
100 #define SDHSTS_DATA_FLAG 0x01
102 #define SDHSTS_TRANSFER_ERROR_MASK (SDHSTS_CRC7_ERROR | \
103 SDHSTS_CRC16_ERROR | \
104 SDHSTS_REW_TIME_OUT | \
107 #define SDHSTS_ERROR_MASK (SDHSTS_CMD_TIME_OUT | \
108 SDHSTS_TRANSFER_ERROR_MASK)
110 #define SDHCFG_BUSY_IRPT_EN BIT(10)
111 #define SDHCFG_BLOCK_IRPT_EN BIT(8)
112 #define SDHCFG_SDIO_IRPT_EN BIT(5)
113 #define SDHCFG_DATA_IRPT_EN BIT(4)
114 #define SDHCFG_SLOW_CARD BIT(3)
115 #define SDHCFG_WIDE_EXT_BUS BIT(2)
116 #define SDHCFG_WIDE_INT_BUS BIT(1)
117 #define SDHCFG_REL_CMD_LINE BIT(0)
119 #define SDVDD_POWER_OFF 0
120 #define SDVDD_POWER_ON 1
122 #define SDEDM_FORCE_DATA_MODE BIT(19)
123 #define SDEDM_CLOCK_PULSE BIT(20)
124 #define SDEDM_BYPASS BIT(21)
126 #define SDEDM_WRITE_THRESHOLD_SHIFT 9
127 #define SDEDM_READ_THRESHOLD_SHIFT 14
128 #define SDEDM_THRESHOLD_MASK 0x1f
130 #define SDEDM_FSM_MASK 0xf
131 #define SDEDM_FSM_IDENTMODE 0x0
132 #define SDEDM_FSM_DATAMODE 0x1
133 #define SDEDM_FSM_READDATA 0x2
134 #define SDEDM_FSM_WRITEDATA 0x3
135 #define SDEDM_FSM_READWAIT 0x4
136 #define SDEDM_FSM_READCRC 0x5
137 #define SDEDM_FSM_WRITECRC 0x6
138 #define SDEDM_FSM_WRITEWAIT1 0x7
139 #define SDEDM_FSM_POWERDOWN 0x8
140 #define SDEDM_FSM_POWERUP 0x9
141 #define SDEDM_FSM_WRITESTART1 0xa
142 #define SDEDM_FSM_WRITESTART2 0xb
143 #define SDEDM_FSM_GENPULSES 0xc
144 #define SDEDM_FSM_WRITEWAIT2 0xd
145 #define SDEDM_FSM_STARTPOWDOWN 0xf
147 #define SDDATA_FIFO_WORDS 16
149 #define FIFO_READ_THRESHOLD 4
150 #define FIFO_WRITE_THRESHOLD 4
151 #define SDDATA_FIFO_PIO_BURST 8
153 #define PIO_THRESHOLD 1 /* Maximum block count for PIO (0 = always DMA) */
155 struct bcm2835_host
{
159 void __iomem
*ioaddr
;
162 struct mmc_host
*mmc
;
163 struct platform_device
*pdev
;
165 int clock
; /* Current clock speed */
166 unsigned int max_clk
; /* Max possible freq */
167 struct work_struct dma_work
;
168 struct delayed_work timeout_work
; /* Timer for timeouts */
169 struct sg_mapping_iter sg_miter
; /* SG state for PIO */
170 unsigned int blocks
; /* remaining PIO blocks */
171 int irq
; /* Device IRQ */
173 u32 ns_per_fifo_word
;
175 /* cached registers */
179 struct mmc_request
*mrq
; /* Current request */
180 struct mmc_command
*cmd
; /* Current command */
181 struct mmc_data
*data
; /* Current data request */
182 bool data_complete
:1;/* Data finished before cmd */
183 bool use_busy
:1; /* Wait for busy interrupt */
184 bool use_sbc
:1; /* Send CMD23 */
186 /* for threaded irq handler */
192 struct dma_chan
*dma_chan_rxtx
;
193 struct dma_chan
*dma_chan
;
194 struct dma_slave_config dma_cfg_rx
;
195 struct dma_slave_config dma_cfg_tx
;
196 struct dma_async_tx_descriptor
*dma_desc
;
199 struct page
*drain_page
;
204 static void bcm2835_dumpcmd(struct bcm2835_host
*host
, struct mmc_command
*cmd
,
207 struct device
*dev
= &host
->pdev
->dev
;
212 dev_dbg(dev
, "%c%s op %d arg 0x%x flags 0x%x - resp %08x %08x %08x %08x, err %d\n",
213 (cmd
== host
->cmd
) ? '>' : ' ',
214 label
, cmd
->opcode
, cmd
->arg
, cmd
->flags
,
215 cmd
->resp
[0], cmd
->resp
[1], cmd
->resp
[2], cmd
->resp
[3],
219 static void bcm2835_dumpregs(struct bcm2835_host
*host
)
221 struct mmc_request
*mrq
= host
->mrq
;
222 struct device
*dev
= &host
->pdev
->dev
;
225 bcm2835_dumpcmd(host
, mrq
->sbc
, "sbc");
226 bcm2835_dumpcmd(host
, mrq
->cmd
, "cmd");
228 dev_dbg(dev
, "data blocks %x blksz %x - err %d\n",
233 bcm2835_dumpcmd(host
, mrq
->stop
, "stop");
236 dev_dbg(dev
, "=========== REGISTER DUMP ===========\n");
237 dev_dbg(dev
, "SDCMD 0x%08x\n", readl(host
->ioaddr
+ SDCMD
));
238 dev_dbg(dev
, "SDARG 0x%08x\n", readl(host
->ioaddr
+ SDARG
));
239 dev_dbg(dev
, "SDTOUT 0x%08x\n", readl(host
->ioaddr
+ SDTOUT
));
240 dev_dbg(dev
, "SDCDIV 0x%08x\n", readl(host
->ioaddr
+ SDCDIV
));
241 dev_dbg(dev
, "SDRSP0 0x%08x\n", readl(host
->ioaddr
+ SDRSP0
));
242 dev_dbg(dev
, "SDRSP1 0x%08x\n", readl(host
->ioaddr
+ SDRSP1
));
243 dev_dbg(dev
, "SDRSP2 0x%08x\n", readl(host
->ioaddr
+ SDRSP2
));
244 dev_dbg(dev
, "SDRSP3 0x%08x\n", readl(host
->ioaddr
+ SDRSP3
));
245 dev_dbg(dev
, "SDHSTS 0x%08x\n", readl(host
->ioaddr
+ SDHSTS
));
246 dev_dbg(dev
, "SDVDD 0x%08x\n", readl(host
->ioaddr
+ SDVDD
));
247 dev_dbg(dev
, "SDEDM 0x%08x\n", readl(host
->ioaddr
+ SDEDM
));
248 dev_dbg(dev
, "SDHCFG 0x%08x\n", readl(host
->ioaddr
+ SDHCFG
));
249 dev_dbg(dev
, "SDHBCT 0x%08x\n", readl(host
->ioaddr
+ SDHBCT
));
250 dev_dbg(dev
, "SDHBLC 0x%08x\n", readl(host
->ioaddr
+ SDHBLC
));
251 dev_dbg(dev
, "===========================================\n");
254 static void bcm2835_reset_internal(struct bcm2835_host
*host
)
258 writel(SDVDD_POWER_OFF
, host
->ioaddr
+ SDVDD
);
259 writel(0, host
->ioaddr
+ SDCMD
);
260 writel(0, host
->ioaddr
+ SDARG
);
261 writel(0xf00000, host
->ioaddr
+ SDTOUT
);
262 writel(0, host
->ioaddr
+ SDCDIV
);
263 writel(0x7f8, host
->ioaddr
+ SDHSTS
); /* Write 1s to clear */
264 writel(0, host
->ioaddr
+ SDHCFG
);
265 writel(0, host
->ioaddr
+ SDHBCT
);
266 writel(0, host
->ioaddr
+ SDHBLC
);
268 /* Limit fifo usage due to silicon bug */
269 temp
= readl(host
->ioaddr
+ SDEDM
);
270 temp
&= ~((SDEDM_THRESHOLD_MASK
<< SDEDM_READ_THRESHOLD_SHIFT
) |
271 (SDEDM_THRESHOLD_MASK
<< SDEDM_WRITE_THRESHOLD_SHIFT
));
272 temp
|= (FIFO_READ_THRESHOLD
<< SDEDM_READ_THRESHOLD_SHIFT
) |
273 (FIFO_WRITE_THRESHOLD
<< SDEDM_WRITE_THRESHOLD_SHIFT
);
274 writel(temp
, host
->ioaddr
+ SDEDM
);
276 writel(SDVDD_POWER_ON
, host
->ioaddr
+ SDVDD
);
279 writel(host
->hcfg
, host
->ioaddr
+ SDHCFG
);
280 writel(host
->cdiv
, host
->ioaddr
+ SDCDIV
);
283 static void bcm2835_reset(struct mmc_host
*mmc
)
285 struct bcm2835_host
*host
= mmc_priv(mmc
);
288 dmaengine_terminate_sync(host
->dma_chan
);
289 bcm2835_reset_internal(host
);
292 static void bcm2835_finish_command(struct bcm2835_host
*host
);
294 static void bcm2835_wait_transfer_complete(struct bcm2835_host
*host
)
299 alternate_idle
= (host
->mrq
->data
->flags
& MMC_DATA_READ
) ?
300 SDEDM_FSM_READWAIT
: SDEDM_FSM_WRITESTART1
;
307 edm
= readl(host
->ioaddr
+ SDEDM
);
308 fsm
= edm
& SDEDM_FSM_MASK
;
310 if ((fsm
== SDEDM_FSM_IDENTMODE
) ||
311 (fsm
== SDEDM_FSM_DATAMODE
))
313 if (fsm
== alternate_idle
) {
314 writel(edm
| SDEDM_FORCE_DATA_MODE
,
315 host
->ioaddr
+ SDEDM
);
320 if (timediff
== 100000) {
321 dev_err(&host
->pdev
->dev
,
322 "wait_transfer_complete - still waiting after %d retries\n",
324 bcm2835_dumpregs(host
);
325 host
->mrq
->data
->error
= -ETIMEDOUT
;
332 static void bcm2835_dma_complete(void *param
)
334 struct bcm2835_host
*host
= param
;
336 schedule_work(&host
->dma_work
);
339 static void bcm2835_transfer_block_pio(struct bcm2835_host
*host
, bool is_read
)
343 unsigned long wait_max
;
345 blksize
= host
->data
->blksz
;
347 wait_max
= jiffies
+ msecs_to_jiffies(500);
349 local_irq_save(flags
);
357 if (!sg_miter_next(&host
->sg_miter
)) {
358 host
->data
->error
= -EINVAL
;
362 len
= min(host
->sg_miter
.length
, blksize
);
364 host
->data
->error
= -EINVAL
;
369 host
->sg_miter
.consumed
= len
;
371 buf
= (u32
*)host
->sg_miter
.addr
;
373 copy_words
= len
/ 4;
376 int burst_words
, words
;
379 burst_words
= min(SDDATA_FIFO_PIO_BURST
, copy_words
);
380 edm
= readl(host
->ioaddr
+ SDEDM
);
382 words
= ((edm
>> 4) & 0x1f);
384 words
= SDDATA_FIFO_WORDS
- ((edm
>> 4) & 0x1f);
386 if (words
< burst_words
) {
387 int fsm_state
= (edm
& SDEDM_FSM_MASK
);
388 struct device
*dev
= &host
->pdev
->dev
;
391 (fsm_state
!= SDEDM_FSM_READDATA
&&
392 fsm_state
!= SDEDM_FSM_READWAIT
&&
393 fsm_state
!= SDEDM_FSM_READCRC
)) ||
395 (fsm_state
!= SDEDM_FSM_WRITEDATA
&&
396 fsm_state
!= SDEDM_FSM_WRITESTART1
&&
397 fsm_state
!= SDEDM_FSM_WRITESTART2
))) {
398 hsts
= readl(host
->ioaddr
+ SDHSTS
);
399 dev_err(dev
, "fsm %x, hsts %08x\n",
401 if (hsts
& SDHSTS_ERROR_MASK
)
405 if (time_after(jiffies
, wait_max
)) {
406 dev_err(dev
, "PIO %s timeout - EDM %08x\n",
407 is_read
? "read" : "write",
409 hsts
= SDHSTS_REW_TIME_OUT
;
412 ndelay((burst_words
- words
) *
413 host
->ns_per_fifo_word
);
415 } else if (words
> copy_words
) {
423 *(buf
++) = readl(host
->ioaddr
+ SDDATA
);
425 writel(*(buf
++), host
->ioaddr
+ SDDATA
);
430 if (hsts
& SDHSTS_ERROR_MASK
)
434 sg_miter_stop(&host
->sg_miter
);
436 local_irq_restore(flags
);
439 static void bcm2835_transfer_pio(struct bcm2835_host
*host
)
441 struct device
*dev
= &host
->pdev
->dev
;
445 is_read
= (host
->data
->flags
& MMC_DATA_READ
) != 0;
446 bcm2835_transfer_block_pio(host
, is_read
);
448 sdhsts
= readl(host
->ioaddr
+ SDHSTS
);
449 if (sdhsts
& (SDHSTS_CRC16_ERROR
|
451 SDHSTS_FIFO_ERROR
)) {
452 dev_err(dev
, "%s transfer error - HSTS %08x\n",
453 is_read
? "read" : "write", sdhsts
);
454 host
->data
->error
= -EILSEQ
;
455 } else if ((sdhsts
& (SDHSTS_CMD_TIME_OUT
|
456 SDHSTS_REW_TIME_OUT
))) {
457 dev_err(dev
, "%s timeout error - HSTS %08x\n",
458 is_read
? "read" : "write", sdhsts
);
459 host
->data
->error
= -ETIMEDOUT
;
464 void bcm2835_prepare_dma(struct bcm2835_host
*host
, struct mmc_data
*data
)
466 int len
, dir_data
, dir_slave
;
467 struct dma_async_tx_descriptor
*desc
= NULL
;
468 struct dma_chan
*dma_chan
;
470 dma_chan
= host
->dma_chan_rxtx
;
471 if (data
->flags
& MMC_DATA_READ
) {
472 dir_data
= DMA_FROM_DEVICE
;
473 dir_slave
= DMA_DEV_TO_MEM
;
475 dir_data
= DMA_TO_DEVICE
;
476 dir_slave
= DMA_MEM_TO_DEV
;
479 /* The block doesn't manage the FIFO DREQs properly for
480 * multi-block transfers, so don't attempt to DMA the final
481 * few words. Unfortunately this requires the final sg entry
482 * to be trimmed. N.B. This code demands that the overspill
483 * is contained in a single sg entry.
486 host
->drain_words
= 0;
487 if ((data
->blocks
> 1) && (dir_data
== DMA_FROM_DEVICE
)) {
488 struct scatterlist
*sg
;
492 len
= min((u32
)(FIFO_READ_THRESHOLD
- 1) * 4,
493 (u32
)data
->blocks
* data
->blksz
);
495 for_each_sg(data
->sg
, sg
, data
->sg_len
, i
) {
496 if (sg_is_last(sg
)) {
497 WARN_ON(sg
->length
< len
);
499 host
->drain_page
= sg_page(sg
);
500 host
->drain_offset
= sg
->offset
+ sg
->length
;
503 host
->drain_words
= len
/ 4;
506 /* The parameters have already been validated, so this will not fail */
507 (void)dmaengine_slave_config(dma_chan
,
508 (dir_data
== DMA_FROM_DEVICE
) ?
512 len
= dma_map_sg(dma_chan
->device
->dev
, data
->sg
, data
->sg_len
,
516 desc
= dmaengine_prep_slave_sg(dma_chan
, data
->sg
,
523 desc
->callback
= bcm2835_dma_complete
;
524 desc
->callback_param
= host
;
525 host
->dma_desc
= desc
;
526 host
->dma_chan
= dma_chan
;
527 host
->dma_dir
= dir_data
;
531 static void bcm2835_start_dma(struct bcm2835_host
*host
)
533 dmaengine_submit(host
->dma_desc
);
534 dma_async_issue_pending(host
->dma_chan
);
537 static void bcm2835_set_transfer_irqs(struct bcm2835_host
*host
)
539 u32 all_irqs
= SDHCFG_DATA_IRPT_EN
| SDHCFG_BLOCK_IRPT_EN
|
542 if (host
->dma_desc
) {
543 host
->hcfg
= (host
->hcfg
& ~all_irqs
) |
546 host
->hcfg
= (host
->hcfg
& ~all_irqs
) |
547 SDHCFG_DATA_IRPT_EN
|
551 writel(host
->hcfg
, host
->ioaddr
+ SDHCFG
);
555 void bcm2835_prepare_data(struct bcm2835_host
*host
, struct mmc_command
*cmd
)
557 struct mmc_data
*data
= cmd
->data
;
565 host
->data_complete
= false;
566 host
->data
->bytes_xfered
= 0;
568 if (!host
->dma_desc
) {
570 int flags
= SG_MITER_ATOMIC
;
572 if (data
->flags
& MMC_DATA_READ
)
573 flags
|= SG_MITER_TO_SG
;
575 flags
|= SG_MITER_FROM_SG
;
576 sg_miter_start(&host
->sg_miter
, data
->sg
, data
->sg_len
, flags
);
577 host
->blocks
= data
->blocks
;
580 bcm2835_set_transfer_irqs(host
);
582 writel(data
->blksz
, host
->ioaddr
+ SDHBCT
);
583 writel(data
->blocks
, host
->ioaddr
+ SDHBLC
);
586 static u32
bcm2835_read_wait_sdcmd(struct bcm2835_host
*host
, u32 max_ms
)
588 struct device
*dev
= &host
->pdev
->dev
;
592 ret
= readl_poll_timeout(host
->ioaddr
+ SDCMD
, value
,
593 !(value
& SDCMD_NEW_FLAG
), 1, 10);
594 if (ret
== -ETIMEDOUT
)
595 /* if it takes a while make poll interval bigger */
596 ret
= readl_poll_timeout(host
->ioaddr
+ SDCMD
, value
,
597 !(value
& SDCMD_NEW_FLAG
),
599 if (ret
== -ETIMEDOUT
)
600 dev_err(dev
, "%s: timeout (%d ms)\n", __func__
, max_ms
);
605 static void bcm2835_finish_request(struct bcm2835_host
*host
)
607 struct dma_chan
*terminate_chan
= NULL
;
608 struct mmc_request
*mrq
;
610 cancel_delayed_work(&host
->timeout_work
);
618 host
->dma_desc
= NULL
;
619 terminate_chan
= host
->dma_chan
;
620 host
->dma_chan
= NULL
;
622 if (terminate_chan
) {
623 int err
= dmaengine_terminate_all(terminate_chan
);
626 dev_err(&host
->pdev
->dev
,
627 "failed to terminate DMA (%d)\n", err
);
630 mmc_request_done(host
->mmc
, mrq
);
634 bool bcm2835_send_command(struct bcm2835_host
*host
, struct mmc_command
*cmd
)
636 struct device
*dev
= &host
->pdev
->dev
;
638 unsigned long timeout
;
642 sdcmd
= bcm2835_read_wait_sdcmd(host
, 100);
643 if (sdcmd
& SDCMD_NEW_FLAG
) {
644 dev_err(dev
, "previous command never completed.\n");
645 bcm2835_dumpregs(host
);
646 cmd
->error
= -EILSEQ
;
647 bcm2835_finish_request(host
);
651 if (!cmd
->data
&& cmd
->busy_timeout
> 9000)
652 timeout
= DIV_ROUND_UP(cmd
->busy_timeout
, 1000) * HZ
+ HZ
;
655 schedule_delayed_work(&host
->timeout_work
, timeout
);
659 /* Clear any error flags */
660 sdhsts
= readl(host
->ioaddr
+ SDHSTS
);
661 if (sdhsts
& SDHSTS_ERROR_MASK
)
662 writel(sdhsts
, host
->ioaddr
+ SDHSTS
);
664 if ((cmd
->flags
& MMC_RSP_136
) && (cmd
->flags
& MMC_RSP_BUSY
)) {
665 dev_err(dev
, "unsupported response type!\n");
666 cmd
->error
= -EINVAL
;
667 bcm2835_finish_request(host
);
671 bcm2835_prepare_data(host
, cmd
);
673 writel(cmd
->arg
, host
->ioaddr
+ SDARG
);
675 sdcmd
= cmd
->opcode
& SDCMD_CMD_MASK
;
677 host
->use_busy
= false;
678 if (!(cmd
->flags
& MMC_RSP_PRESENT
)) {
679 sdcmd
|= SDCMD_NO_RESPONSE
;
681 if (cmd
->flags
& MMC_RSP_136
)
682 sdcmd
|= SDCMD_LONG_RESPONSE
;
683 if (cmd
->flags
& MMC_RSP_BUSY
) {
684 sdcmd
|= SDCMD_BUSYWAIT
;
685 host
->use_busy
= true;
690 if (cmd
->data
->flags
& MMC_DATA_WRITE
)
691 sdcmd
|= SDCMD_WRITE_CMD
;
692 if (cmd
->data
->flags
& MMC_DATA_READ
)
693 sdcmd
|= SDCMD_READ_CMD
;
696 writel(sdcmd
| SDCMD_NEW_FLAG
, host
->ioaddr
+ SDCMD
);
701 static void bcm2835_transfer_complete(struct bcm2835_host
*host
)
703 struct mmc_data
*data
;
705 WARN_ON(!host
->data_complete
);
710 /* Need to send CMD12 if -
711 * a) open-ended multiblock transfer (no CMD23)
712 * b) error in multiblock transfer
714 if (host
->mrq
->stop
&& (data
->error
|| !host
->use_sbc
)) {
715 if (bcm2835_send_command(host
, host
->mrq
->stop
)) {
716 /* No busy, so poll for completion */
718 bcm2835_finish_command(host
);
721 bcm2835_wait_transfer_complete(host
);
722 bcm2835_finish_request(host
);
726 static void bcm2835_finish_data(struct bcm2835_host
*host
)
728 struct device
*dev
= &host
->pdev
->dev
;
729 struct mmc_data
*data
;
733 host
->hcfg
&= ~(SDHCFG_DATA_IRPT_EN
| SDHCFG_BLOCK_IRPT_EN
);
734 writel(host
->hcfg
, host
->ioaddr
+ SDHCFG
);
736 data
->bytes_xfered
= data
->error
? 0 : (data
->blksz
* data
->blocks
);
738 host
->data_complete
= true;
741 /* Data managed to finish before the
742 * command completed. Make sure we do
743 * things in the proper order.
745 dev_dbg(dev
, "Finished early - HSTS %08x\n",
746 readl(host
->ioaddr
+ SDHSTS
));
748 bcm2835_transfer_complete(host
);
752 static void bcm2835_finish_command(struct bcm2835_host
*host
)
754 struct device
*dev
= &host
->pdev
->dev
;
755 struct mmc_command
*cmd
= host
->cmd
;
758 sdcmd
= bcm2835_read_wait_sdcmd(host
, 100);
760 /* Check for errors */
761 if (sdcmd
& SDCMD_NEW_FLAG
) {
762 dev_err(dev
, "command never completed.\n");
763 bcm2835_dumpregs(host
);
764 host
->cmd
->error
= -EIO
;
765 bcm2835_finish_request(host
);
767 } else if (sdcmd
& SDCMD_FAIL_FLAG
) {
768 u32 sdhsts
= readl(host
->ioaddr
+ SDHSTS
);
770 /* Clear the errors */
771 writel(SDHSTS_ERROR_MASK
, host
->ioaddr
+ SDHSTS
);
773 if (!(sdhsts
& SDHSTS_CRC7_ERROR
) ||
774 (host
->cmd
->opcode
!= MMC_SEND_OP_COND
)) {
775 if (sdhsts
& SDHSTS_CMD_TIME_OUT
) {
776 host
->cmd
->error
= -ETIMEDOUT
;
778 dev_err(dev
, "unexpected command %d error\n",
780 bcm2835_dumpregs(host
);
781 host
->cmd
->error
= -EILSEQ
;
783 bcm2835_finish_request(host
);
788 if (cmd
->flags
& MMC_RSP_PRESENT
) {
789 if (cmd
->flags
& MMC_RSP_136
) {
792 for (i
= 0; i
< 4; i
++) {
794 readl(host
->ioaddr
+ SDRSP0
+ i
* 4);
797 cmd
->resp
[0] = readl(host
->ioaddr
+ SDRSP0
);
801 if (cmd
== host
->mrq
->sbc
) {
802 /* Finished CMD23, now send actual command. */
804 if (bcm2835_send_command(host
, host
->mrq
->cmd
)) {
805 if (host
->data
&& host
->dma_desc
)
806 /* DMA transfer starts now, PIO starts
809 bcm2835_start_dma(host
);
812 bcm2835_finish_command(host
);
814 } else if (cmd
== host
->mrq
->stop
) {
816 bcm2835_finish_request(host
);
818 /* Processed actual command. */
821 bcm2835_finish_request(host
);
822 else if (host
->data_complete
)
823 bcm2835_transfer_complete(host
);
827 static void bcm2835_timeout(struct work_struct
*work
)
829 struct delayed_work
*d
= to_delayed_work(work
);
830 struct bcm2835_host
*host
=
831 container_of(d
, struct bcm2835_host
, timeout_work
);
832 struct device
*dev
= &host
->pdev
->dev
;
834 mutex_lock(&host
->mutex
);
837 dev_err(dev
, "timeout waiting for hardware interrupt.\n");
838 bcm2835_dumpregs(host
);
841 host
->data
->error
= -ETIMEDOUT
;
842 bcm2835_finish_data(host
);
845 host
->cmd
->error
= -ETIMEDOUT
;
847 host
->mrq
->cmd
->error
= -ETIMEDOUT
;
849 bcm2835_finish_request(host
);
853 mutex_unlock(&host
->mutex
);
856 static bool bcm2835_check_cmd_error(struct bcm2835_host
*host
, u32 intmask
)
858 struct device
*dev
= &host
->pdev
->dev
;
860 if (!(intmask
& SDHSTS_ERROR_MASK
))
866 dev_err(dev
, "sdhost_busy_irq: intmask %08x\n", intmask
);
867 if (intmask
& SDHSTS_CRC7_ERROR
) {
868 host
->cmd
->error
= -EILSEQ
;
869 } else if (intmask
& (SDHSTS_CRC16_ERROR
|
870 SDHSTS_FIFO_ERROR
)) {
872 host
->mrq
->data
->error
= -EILSEQ
;
874 host
->cmd
->error
= -EILSEQ
;
875 } else if (intmask
& SDHSTS_REW_TIME_OUT
) {
877 host
->mrq
->data
->error
= -ETIMEDOUT
;
879 host
->cmd
->error
= -ETIMEDOUT
;
880 } else if (intmask
& SDHSTS_CMD_TIME_OUT
) {
881 host
->cmd
->error
= -ETIMEDOUT
;
883 bcm2835_dumpregs(host
);
887 static void bcm2835_check_data_error(struct bcm2835_host
*host
, u32 intmask
)
891 if (intmask
& (SDHSTS_CRC16_ERROR
| SDHSTS_FIFO_ERROR
))
892 host
->data
->error
= -EILSEQ
;
893 if (intmask
& SDHSTS_REW_TIME_OUT
)
894 host
->data
->error
= -ETIMEDOUT
;
897 static void bcm2835_busy_irq(struct bcm2835_host
*host
)
899 if (WARN_ON(!host
->cmd
)) {
900 bcm2835_dumpregs(host
);
904 if (WARN_ON(!host
->use_busy
)) {
905 bcm2835_dumpregs(host
);
908 host
->use_busy
= false;
910 bcm2835_finish_command(host
);
913 static void bcm2835_data_irq(struct bcm2835_host
*host
, u32 intmask
)
915 /* There are no dedicated data/space available interrupt
916 * status bits, so it is necessary to use the single shared
917 * data/space available FIFO status bits. It is therefore not
918 * an error to get here when there is no data transfer in
924 bcm2835_check_data_error(host
, intmask
);
925 if (host
->data
->error
)
928 if (host
->data
->flags
& MMC_DATA_WRITE
) {
929 /* Use the block interrupt for writes after the first block */
930 host
->hcfg
&= ~(SDHCFG_DATA_IRPT_EN
);
931 host
->hcfg
|= SDHCFG_BLOCK_IRPT_EN
;
932 writel(host
->hcfg
, host
->ioaddr
+ SDHCFG
);
933 bcm2835_transfer_pio(host
);
935 bcm2835_transfer_pio(host
);
937 if ((host
->blocks
== 0) || host
->data
->error
)
943 host
->hcfg
&= ~(SDHCFG_DATA_IRPT_EN
| SDHCFG_BLOCK_IRPT_EN
);
944 writel(host
->hcfg
, host
->ioaddr
+ SDHCFG
);
947 static void bcm2835_data_threaded_irq(struct bcm2835_host
*host
)
951 if ((host
->blocks
== 0) || host
->data
->error
)
952 bcm2835_finish_data(host
);
955 static void bcm2835_block_irq(struct bcm2835_host
*host
)
957 if (WARN_ON(!host
->data
)) {
958 bcm2835_dumpregs(host
);
962 if (!host
->dma_desc
) {
963 WARN_ON(!host
->blocks
);
964 if (host
->data
->error
|| (--host
->blocks
== 0))
965 bcm2835_finish_data(host
);
967 bcm2835_transfer_pio(host
);
968 } else if (host
->data
->flags
& MMC_DATA_WRITE
) {
969 bcm2835_finish_data(host
);
973 static irqreturn_t
bcm2835_irq(int irq
, void *dev_id
)
975 irqreturn_t result
= IRQ_NONE
;
976 struct bcm2835_host
*host
= dev_id
;
979 spin_lock(&host
->lock
);
981 intmask
= readl(host
->ioaddr
+ SDHSTS
);
983 writel(SDHSTS_BUSY_IRPT
|
987 host
->ioaddr
+ SDHSTS
);
989 if (intmask
& SDHSTS_BLOCK_IRPT
) {
990 bcm2835_check_data_error(host
, intmask
);
991 host
->irq_block
= true;
992 result
= IRQ_WAKE_THREAD
;
995 if (intmask
& SDHSTS_BUSY_IRPT
) {
996 if (!bcm2835_check_cmd_error(host
, intmask
)) {
997 host
->irq_busy
= true;
998 result
= IRQ_WAKE_THREAD
;
1000 result
= IRQ_HANDLED
;
1004 /* There is no true data interrupt status bit, so it is
1005 * necessary to qualify the data flag with the interrupt
1008 if ((intmask
& SDHSTS_DATA_FLAG
) &&
1009 (host
->hcfg
& SDHCFG_DATA_IRPT_EN
)) {
1010 bcm2835_data_irq(host
, intmask
);
1011 host
->irq_data
= true;
1012 result
= IRQ_WAKE_THREAD
;
1015 spin_unlock(&host
->lock
);
1020 static irqreturn_t
bcm2835_threaded_irq(int irq
, void *dev_id
)
1022 struct bcm2835_host
*host
= dev_id
;
1023 unsigned long flags
;
1024 bool block
, busy
, data
;
1026 spin_lock_irqsave(&host
->lock
, flags
);
1028 block
= host
->irq_block
;
1029 busy
= host
->irq_busy
;
1030 data
= host
->irq_data
;
1031 host
->irq_block
= false;
1032 host
->irq_busy
= false;
1033 host
->irq_data
= false;
1035 spin_unlock_irqrestore(&host
->lock
, flags
);
1037 mutex_lock(&host
->mutex
);
1040 bcm2835_block_irq(host
);
1042 bcm2835_busy_irq(host
);
1044 bcm2835_data_threaded_irq(host
);
1046 mutex_unlock(&host
->mutex
);
1051 static void bcm2835_dma_complete_work(struct work_struct
*work
)
1053 struct bcm2835_host
*host
=
1054 container_of(work
, struct bcm2835_host
, dma_work
);
1055 struct mmc_data
*data
= host
->data
;
1057 mutex_lock(&host
->mutex
);
1059 if (host
->dma_chan
) {
1060 dma_unmap_sg(host
->dma_chan
->device
->dev
,
1061 data
->sg
, data
->sg_len
,
1064 host
->dma_chan
= NULL
;
1067 if (host
->drain_words
) {
1068 unsigned long flags
;
1072 if (host
->drain_offset
& PAGE_MASK
) {
1073 host
->drain_page
+= host
->drain_offset
>> PAGE_SHIFT
;
1074 host
->drain_offset
&= ~PAGE_MASK
;
1076 local_irq_save(flags
);
1077 page
= kmap_atomic(host
->drain_page
);
1078 buf
= page
+ host
->drain_offset
;
1080 while (host
->drain_words
) {
1081 u32 edm
= readl(host
->ioaddr
+ SDEDM
);
1083 if ((edm
>> 4) & 0x1f)
1084 *(buf
++) = readl(host
->ioaddr
+ SDDATA
);
1085 host
->drain_words
--;
1088 kunmap_atomic(page
);
1089 local_irq_restore(flags
);
1092 bcm2835_finish_data(host
);
1094 mutex_unlock(&host
->mutex
);
1097 static void bcm2835_set_clock(struct bcm2835_host
*host
, unsigned int clock
)
1101 /* The SDCDIV register has 11 bits, and holds (div - 2). But
1102 * in data mode the max is 50MHz wihout a minimum, and only
1103 * the bottom 3 bits are used. Since the switch over is
1104 * automatic (unless we have marked the card as slow...),
1105 * chosen values have to make sense in both modes. Ident mode
1106 * must be 100-400KHz, so can range check the requested
1107 * clock. CMD15 must be used to return to data mode, so this
1110 * clock 250MHz -> 0->125MHz, 1->83.3MHz, 2->62.5MHz, 3->50.0MHz
1111 * 4->41.7MHz, 5->35.7MHz, 6->31.3MHz, 7->27.8MHz
1113 * 623->400KHz/27.8MHz
1114 * reset value (507)->491159/50MHz
1116 * BUT, the 3-bit clock divisor in data mode is too small if
1117 * the core clock is higher than 250MHz, so instead use the
1118 * SLOW_CARD configuration bit to force the use of the ident
1119 * clock divisor at all times.
1122 if (clock
< 100000) {
1123 /* Can't stop the clock, but make it as slow as possible
1126 host
->cdiv
= SDCDIV_MAX_CDIV
;
1127 writel(host
->cdiv
, host
->ioaddr
+ SDCDIV
);
1131 div
= host
->max_clk
/ clock
;
1134 if ((host
->max_clk
/ div
) > clock
)
1138 if (div
> SDCDIV_MAX_CDIV
)
1139 div
= SDCDIV_MAX_CDIV
;
1141 clock
= host
->max_clk
/ (div
+ 2);
1142 host
->mmc
->actual_clock
= clock
;
1144 /* Calibrate some delays */
1146 host
->ns_per_fifo_word
= (1000000000 / clock
) *
1147 ((host
->mmc
->caps
& MMC_CAP_4_BIT_DATA
) ? 8 : 32);
1150 writel(host
->cdiv
, host
->ioaddr
+ SDCDIV
);
1152 /* Set the timeout to 500ms */
1153 writel(host
->mmc
->actual_clock
/ 2, host
->ioaddr
+ SDTOUT
);
1156 static void bcm2835_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
1158 struct bcm2835_host
*host
= mmc_priv(mmc
);
1159 struct device
*dev
= &host
->pdev
->dev
;
1162 /* Reset the error statuses in case this is a retry */
1164 mrq
->sbc
->error
= 0;
1166 mrq
->cmd
->error
= 0;
1168 mrq
->data
->error
= 0;
1170 mrq
->stop
->error
= 0;
1172 if (mrq
->data
&& !is_power_of_2(mrq
->data
->blksz
)) {
1173 dev_err(dev
, "unsupported block size (%d bytes)\n",
1177 mrq
->cmd
->error
= -EINVAL
;
1179 mmc_request_done(mmc
, mrq
);
1183 if (host
->use_dma
&& mrq
->data
&& (mrq
->data
->blocks
> PIO_THRESHOLD
))
1184 bcm2835_prepare_dma(host
, mrq
->data
);
1186 mutex_lock(&host
->mutex
);
1191 edm
= readl(host
->ioaddr
+ SDEDM
);
1192 fsm
= edm
& SDEDM_FSM_MASK
;
1194 if ((fsm
!= SDEDM_FSM_IDENTMODE
) &&
1195 (fsm
!= SDEDM_FSM_DATAMODE
)) {
1196 dev_err(dev
, "previous command (%d) not complete (EDM %08x)\n",
1197 readl(host
->ioaddr
+ SDCMD
) & SDCMD_CMD_MASK
,
1199 bcm2835_dumpregs(host
);
1202 mrq
->cmd
->error
= -EILSEQ
;
1204 bcm2835_finish_request(host
);
1205 mutex_unlock(&host
->mutex
);
1209 host
->use_sbc
= !!mrq
->sbc
&& host
->mrq
->data
&&
1210 (host
->mrq
->data
->flags
& MMC_DATA_READ
);
1211 if (host
->use_sbc
) {
1212 if (bcm2835_send_command(host
, mrq
->sbc
)) {
1213 if (!host
->use_busy
)
1214 bcm2835_finish_command(host
);
1216 } else if (mrq
->cmd
&& bcm2835_send_command(host
, mrq
->cmd
)) {
1217 if (host
->data
&& host
->dma_desc
) {
1218 /* DMA transfer starts now, PIO starts after irq */
1219 bcm2835_start_dma(host
);
1222 if (!host
->use_busy
)
1223 bcm2835_finish_command(host
);
1226 mutex_unlock(&host
->mutex
);
1229 static void bcm2835_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
1231 struct bcm2835_host
*host
= mmc_priv(mmc
);
1233 mutex_lock(&host
->mutex
);
1235 if (!ios
->clock
|| ios
->clock
!= host
->clock
) {
1236 bcm2835_set_clock(host
, ios
->clock
);
1237 host
->clock
= ios
->clock
;
1241 host
->hcfg
&= ~SDHCFG_WIDE_EXT_BUS
;
1242 if (ios
->bus_width
== MMC_BUS_WIDTH_4
)
1243 host
->hcfg
|= SDHCFG_WIDE_EXT_BUS
;
1245 host
->hcfg
|= SDHCFG_WIDE_INT_BUS
;
1247 /* Disable clever clock switching, to cope with fast core clocks */
1248 host
->hcfg
|= SDHCFG_SLOW_CARD
;
1250 writel(host
->hcfg
, host
->ioaddr
+ SDHCFG
);
1252 mutex_unlock(&host
->mutex
);
1255 static const struct mmc_host_ops bcm2835_ops
= {
1256 .request
= bcm2835_request
,
1257 .set_ios
= bcm2835_set_ios
,
1258 .hw_reset
= bcm2835_reset
,
1261 static int bcm2835_add_host(struct bcm2835_host
*host
)
1263 struct mmc_host
*mmc
= host
->mmc
;
1264 struct device
*dev
= &host
->pdev
->dev
;
1265 char pio_limit_string
[20];
1268 if (!mmc
->f_max
|| mmc
->f_max
> host
->max_clk
)
1269 mmc
->f_max
= host
->max_clk
;
1270 mmc
->f_min
= host
->max_clk
/ SDCDIV_MAX_CDIV
;
1272 mmc
->max_busy_timeout
= ~0 / (mmc
->f_max
/ 1000);
1274 dev_dbg(dev
, "f_max %d, f_min %d, max_busy_timeout %d\n",
1275 mmc
->f_max
, mmc
->f_min
, mmc
->max_busy_timeout
);
1277 /* host controller capabilities */
1278 mmc
->caps
|= MMC_CAP_SD_HIGHSPEED
| MMC_CAP_MMC_HIGHSPEED
|
1279 MMC_CAP_NEEDS_POLL
| MMC_CAP_HW_RESET
| MMC_CAP_ERASE
|
1282 spin_lock_init(&host
->lock
);
1283 mutex_init(&host
->mutex
);
1285 if (IS_ERR_OR_NULL(host
->dma_chan_rxtx
)) {
1286 dev_warn(dev
, "unable to initialise DMA channel. Falling back to PIO\n");
1287 host
->use_dma
= false;
1289 host
->use_dma
= true;
1291 host
->dma_cfg_tx
.src_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
1292 host
->dma_cfg_tx
.dst_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
1293 host
->dma_cfg_tx
.slave_id
= 13; /* DREQ channel */
1294 host
->dma_cfg_tx
.direction
= DMA_MEM_TO_DEV
;
1295 host
->dma_cfg_tx
.src_addr
= 0;
1296 host
->dma_cfg_tx
.dst_addr
= host
->phys_addr
+ SDDATA
;
1298 host
->dma_cfg_rx
.src_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
1299 host
->dma_cfg_rx
.dst_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
1300 host
->dma_cfg_rx
.slave_id
= 13; /* DREQ channel */
1301 host
->dma_cfg_rx
.direction
= DMA_DEV_TO_MEM
;
1302 host
->dma_cfg_rx
.src_addr
= host
->phys_addr
+ SDDATA
;
1303 host
->dma_cfg_rx
.dst_addr
= 0;
1305 if (dmaengine_slave_config(host
->dma_chan_rxtx
,
1306 &host
->dma_cfg_tx
) != 0 ||
1307 dmaengine_slave_config(host
->dma_chan_rxtx
,
1308 &host
->dma_cfg_rx
) != 0)
1309 host
->use_dma
= false;
1312 mmc
->max_segs
= 128;
1313 mmc
->max_req_size
= 524288;
1314 mmc
->max_seg_size
= mmc
->max_req_size
;
1315 mmc
->max_blk_size
= 1024;
1316 mmc
->max_blk_count
= 65535;
1318 /* report supported voltage ranges */
1319 mmc
->ocr_avail
= MMC_VDD_32_33
| MMC_VDD_33_34
;
1321 INIT_WORK(&host
->dma_work
, bcm2835_dma_complete_work
);
1322 INIT_DELAYED_WORK(&host
->timeout_work
, bcm2835_timeout
);
1324 /* Set interrupt enables */
1325 host
->hcfg
= SDHCFG_BUSY_IRPT_EN
;
1327 bcm2835_reset_internal(host
);
1329 ret
= request_threaded_irq(host
->irq
, bcm2835_irq
,
1330 bcm2835_threaded_irq
,
1331 0, mmc_hostname(mmc
), host
);
1333 dev_err(dev
, "failed to request IRQ %d: %d\n", host
->irq
, ret
);
1337 ret
= mmc_add_host(mmc
);
1339 free_irq(host
->irq
, host
);
1343 pio_limit_string
[0] = '\0';
1344 if (host
->use_dma
&& (PIO_THRESHOLD
> 0))
1345 sprintf(pio_limit_string
, " (>%d)", PIO_THRESHOLD
);
1346 dev_info(dev
, "loaded - DMA %s%s\n",
1347 host
->use_dma
? "enabled" : "disabled", pio_limit_string
);
1352 static int bcm2835_probe(struct platform_device
*pdev
)
1354 struct device
*dev
= &pdev
->dev
;
1356 struct resource
*iomem
;
1357 struct bcm2835_host
*host
;
1358 struct mmc_host
*mmc
;
1359 const __be32
*regaddr_p
;
1362 dev_dbg(dev
, "%s\n", __func__
);
1363 mmc
= mmc_alloc_host(sizeof(*host
), dev
);
1367 mmc
->ops
= &bcm2835_ops
;
1368 host
= mmc_priv(mmc
);
1371 spin_lock_init(&host
->lock
);
1373 iomem
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1374 host
->ioaddr
= devm_ioremap_resource(dev
, iomem
);
1375 if (IS_ERR(host
->ioaddr
)) {
1376 ret
= PTR_ERR(host
->ioaddr
);
1380 /* Parse OF address directly to get the physical address for
1381 * DMA to our registers.
1383 regaddr_p
= of_get_address(pdev
->dev
.of_node
, 0, NULL
, NULL
);
1385 dev_err(dev
, "Can't get phys address\n");
1390 host
->phys_addr
= be32_to_cpup(regaddr_p
);
1392 host
->dma_chan
= NULL
;
1393 host
->dma_desc
= NULL
;
1395 host
->dma_chan_rxtx
= dma_request_slave_channel(dev
, "rx-tx");
1397 clk
= devm_clk_get(dev
, NULL
);
1400 if (ret
!= -EPROBE_DEFER
)
1401 dev_err(dev
, "could not get clk: %d\n", ret
);
1405 host
->max_clk
= clk_get_rate(clk
);
1407 host
->irq
= platform_get_irq(pdev
, 0);
1408 if (host
->irq
<= 0) {
1409 dev_err(dev
, "get IRQ failed\n");
1414 ret
= mmc_of_parse(mmc
);
1418 ret
= bcm2835_add_host(host
);
1422 platform_set_drvdata(pdev
, host
);
1424 dev_dbg(dev
, "%s -> OK\n", __func__
);
1429 dev_dbg(dev
, "%s -> err %d\n", __func__
, ret
);
1435 static int bcm2835_remove(struct platform_device
*pdev
)
1437 struct bcm2835_host
*host
= platform_get_drvdata(pdev
);
1439 mmc_remove_host(host
->mmc
);
1441 writel(SDVDD_POWER_OFF
, host
->ioaddr
+ SDVDD
);
1443 free_irq(host
->irq
, host
);
1445 cancel_work_sync(&host
->dma_work
);
1446 cancel_delayed_work_sync(&host
->timeout_work
);
1448 mmc_free_host(host
->mmc
);
1449 platform_set_drvdata(pdev
, NULL
);
1454 static const struct of_device_id bcm2835_match
[] = {
1455 { .compatible
= "brcm,bcm2835-sdhost" },
1458 MODULE_DEVICE_TABLE(of
, bcm2835_match
);
1460 static struct platform_driver bcm2835_driver
= {
1461 .probe
= bcm2835_probe
,
1462 .remove
= bcm2835_remove
,
1464 .name
= "sdhost-bcm2835",
1465 .of_match_table
= bcm2835_match
,
1468 module_platform_driver(bcm2835_driver
);
1470 MODULE_ALIAS("platform:sdhost-bcm2835");
1471 MODULE_DESCRIPTION("BCM2835 SDHost driver");
1472 MODULE_LICENSE("GPL v2");
1473 MODULE_AUTHOR("Phil Elwell");