2 * bcm2835 sdhost driver.
4 * The 2835 has two SD controllers: The Arasan sdhci controller
5 * (supported by the iproc driver) and a custom sdhost controller
6 * (supported by this driver).
8 * The sdhci controller supports both sdcard and sdio. The sdhost
9 * controller supports the sdcard only, but has better performance.
10 * Also note that the rpi3 has sdio wifi, so driving the sdcard with
11 * the sdhost controller allows to use the sdhci controller for wifi
14 * The configuration is done by devicetree via pin muxing. Both
15 * SD controller are available on the same pins (2 pin groups = pin 22
16 * to 27 + pin 48 to 53). So it's possible to use both SD controllers
17 * at the same time with different pin groups.
19 * Author: Phil Elwell <phil@raspberrypi.org>
20 * Copyright (C) 2015-2016 Raspberry Pi (Trading) Ltd.
23 * mmc-bcm2835.c by Gellert Weisz
24 * which is, in turn, based on
25 * sdhci-bcm2708.c by Broadcom
26 * sdhci-bcm2835.c by Stephen Warren and Oleksandr Tymoshenko
27 * sdhci.c and sdhci-pci.c by Pierre Ossman
29 * This program is free software; you can redistribute it and/or modify it
30 * under the terms and conditions of the GNU General Public License,
31 * version 2, as published by the Free Software Foundation.
33 * This program is distributed in the hope it will be useful, but WITHOUT
34 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
35 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
38 * You should have received a copy of the GNU General Public License
39 * along with this program. If not, see <http://www.gnu.org/licenses/>.
41 #include <linux/clk.h>
42 #include <linux/delay.h>
43 #include <linux/device.h>
44 #include <linux/dmaengine.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/err.h>
47 #include <linux/highmem.h>
48 #include <linux/interrupt.h>
50 #include <linux/iopoll.h>
51 #include <linux/module.h>
52 #include <linux/of_address.h>
53 #include <linux/of_irq.h>
54 #include <linux/platform_device.h>
55 #include <linux/scatterlist.h>
56 #include <linux/time.h>
57 #include <linux/workqueue.h>
59 #include <linux/mmc/host.h>
60 #include <linux/mmc/mmc.h>
61 #include <linux/mmc/sd.h>
63 #define SDCMD 0x00 /* Command to SD card - 16 R/W */
64 #define SDARG 0x04 /* Argument to SD card - 32 R/W */
65 #define SDTOUT 0x08 /* Start value for timeout counter - 32 R/W */
66 #define SDCDIV 0x0c /* Start value for clock divider - 11 R/W */
67 #define SDRSP0 0x10 /* SD card response (31:0) - 32 R */
68 #define SDRSP1 0x14 /* SD card response (63:32) - 32 R */
69 #define SDRSP2 0x18 /* SD card response (95:64) - 32 R */
70 #define SDRSP3 0x1c /* SD card response (127:96) - 32 R */
71 #define SDHSTS 0x20 /* SD host status - 11 R/W */
72 #define SDVDD 0x30 /* SD card power control - 1 R/W */
73 #define SDEDM 0x34 /* Emergency Debug Mode - 13 R/W */
74 #define SDHCFG 0x38 /* Host configuration - 2 R/W */
75 #define SDHBCT 0x3c /* Host byte count (debug) - 32 R/W */
76 #define SDDATA 0x40 /* Data to/from SD card - 32 R/W */
77 #define SDHBLC 0x50 /* Host block count (SDIO/SDHC) - 9 R/W */
79 #define SDCMD_NEW_FLAG 0x8000
80 #define SDCMD_FAIL_FLAG 0x4000
81 #define SDCMD_BUSYWAIT 0x800
82 #define SDCMD_NO_RESPONSE 0x400
83 #define SDCMD_LONG_RESPONSE 0x200
84 #define SDCMD_WRITE_CMD 0x80
85 #define SDCMD_READ_CMD 0x40
86 #define SDCMD_CMD_MASK 0x3f
88 #define SDCDIV_MAX_CDIV 0x7ff
90 #define SDHSTS_BUSY_IRPT 0x400
91 #define SDHSTS_BLOCK_IRPT 0x200
92 #define SDHSTS_SDIO_IRPT 0x100
93 #define SDHSTS_REW_TIME_OUT 0x80
94 #define SDHSTS_CMD_TIME_OUT 0x40
95 #define SDHSTS_CRC16_ERROR 0x20
96 #define SDHSTS_CRC7_ERROR 0x10
97 #define SDHSTS_FIFO_ERROR 0x08
100 #define SDHSTS_DATA_FLAG 0x01
102 #define SDHSTS_TRANSFER_ERROR_MASK (SDHSTS_CRC7_ERROR | \
103 SDHSTS_CRC16_ERROR | \
104 SDHSTS_REW_TIME_OUT | \
107 #define SDHSTS_ERROR_MASK (SDHSTS_CMD_TIME_OUT | \
108 SDHSTS_TRANSFER_ERROR_MASK)
110 #define SDHCFG_BUSY_IRPT_EN BIT(10)
111 #define SDHCFG_BLOCK_IRPT_EN BIT(8)
112 #define SDHCFG_SDIO_IRPT_EN BIT(5)
113 #define SDHCFG_DATA_IRPT_EN BIT(4)
114 #define SDHCFG_SLOW_CARD BIT(3)
115 #define SDHCFG_WIDE_EXT_BUS BIT(2)
116 #define SDHCFG_WIDE_INT_BUS BIT(1)
117 #define SDHCFG_REL_CMD_LINE BIT(0)
119 #define SDVDD_POWER_OFF 0
120 #define SDVDD_POWER_ON 1
122 #define SDEDM_FORCE_DATA_MODE BIT(19)
123 #define SDEDM_CLOCK_PULSE BIT(20)
124 #define SDEDM_BYPASS BIT(21)
126 #define SDEDM_WRITE_THRESHOLD_SHIFT 9
127 #define SDEDM_READ_THRESHOLD_SHIFT 14
128 #define SDEDM_THRESHOLD_MASK 0x1f
130 #define SDEDM_FSM_MASK 0xf
131 #define SDEDM_FSM_IDENTMODE 0x0
132 #define SDEDM_FSM_DATAMODE 0x1
133 #define SDEDM_FSM_READDATA 0x2
134 #define SDEDM_FSM_WRITEDATA 0x3
135 #define SDEDM_FSM_READWAIT 0x4
136 #define SDEDM_FSM_READCRC 0x5
137 #define SDEDM_FSM_WRITECRC 0x6
138 #define SDEDM_FSM_WRITEWAIT1 0x7
139 #define SDEDM_FSM_POWERDOWN 0x8
140 #define SDEDM_FSM_POWERUP 0x9
141 #define SDEDM_FSM_WRITESTART1 0xa
142 #define SDEDM_FSM_WRITESTART2 0xb
143 #define SDEDM_FSM_GENPULSES 0xc
144 #define SDEDM_FSM_WRITEWAIT2 0xd
145 #define SDEDM_FSM_STARTPOWDOWN 0xf
147 #define SDDATA_FIFO_WORDS 16
149 #define FIFO_READ_THRESHOLD 4
150 #define FIFO_WRITE_THRESHOLD 4
151 #define SDDATA_FIFO_PIO_BURST 8
153 #define PIO_THRESHOLD 1 /* Maximum block count for PIO (0 = always DMA) */
155 struct bcm2835_host
{
159 void __iomem
*ioaddr
;
162 struct mmc_host
*mmc
;
163 struct platform_device
*pdev
;
165 int clock
; /* Current clock speed */
166 unsigned int max_clk
; /* Max possible freq */
167 struct work_struct dma_work
;
168 struct delayed_work timeout_work
; /* Timer for timeouts */
169 struct sg_mapping_iter sg_miter
; /* SG state for PIO */
170 unsigned int blocks
; /* remaining PIO blocks */
171 int irq
; /* Device IRQ */
173 u32 ns_per_fifo_word
;
175 /* cached registers */
179 struct mmc_request
*mrq
; /* Current request */
180 struct mmc_command
*cmd
; /* Current command */
181 struct mmc_data
*data
; /* Current data request */
182 bool data_complete
:1;/* Data finished before cmd */
183 bool use_busy
:1; /* Wait for busy interrupt */
184 bool use_sbc
:1; /* Send CMD23 */
186 /* for threaded irq handler */
192 struct dma_chan
*dma_chan_rxtx
;
193 struct dma_chan
*dma_chan
;
194 struct dma_slave_config dma_cfg_rx
;
195 struct dma_slave_config dma_cfg_tx
;
196 struct dma_async_tx_descriptor
*dma_desc
;
199 struct page
*drain_page
;
204 static void bcm2835_dumpcmd(struct bcm2835_host
*host
, struct mmc_command
*cmd
,
207 struct device
*dev
= &host
->pdev
->dev
;
212 dev_dbg(dev
, "%c%s op %d arg 0x%x flags 0x%x - resp %08x %08x %08x %08x, err %d\n",
213 (cmd
== host
->cmd
) ? '>' : ' ',
214 label
, cmd
->opcode
, cmd
->arg
, cmd
->flags
,
215 cmd
->resp
[0], cmd
->resp
[1], cmd
->resp
[2], cmd
->resp
[3],
219 static void bcm2835_dumpregs(struct bcm2835_host
*host
)
221 struct mmc_request
*mrq
= host
->mrq
;
222 struct device
*dev
= &host
->pdev
->dev
;
225 bcm2835_dumpcmd(host
, mrq
->sbc
, "sbc");
226 bcm2835_dumpcmd(host
, mrq
->cmd
, "cmd");
228 dev_dbg(dev
, "data blocks %x blksz %x - err %d\n",
233 bcm2835_dumpcmd(host
, mrq
->stop
, "stop");
236 dev_dbg(dev
, "=========== REGISTER DUMP ===========\n");
237 dev_dbg(dev
, "SDCMD 0x%08x\n", readl(host
->ioaddr
+ SDCMD
));
238 dev_dbg(dev
, "SDARG 0x%08x\n", readl(host
->ioaddr
+ SDARG
));
239 dev_dbg(dev
, "SDTOUT 0x%08x\n", readl(host
->ioaddr
+ SDTOUT
));
240 dev_dbg(dev
, "SDCDIV 0x%08x\n", readl(host
->ioaddr
+ SDCDIV
));
241 dev_dbg(dev
, "SDRSP0 0x%08x\n", readl(host
->ioaddr
+ SDRSP0
));
242 dev_dbg(dev
, "SDRSP1 0x%08x\n", readl(host
->ioaddr
+ SDRSP1
));
243 dev_dbg(dev
, "SDRSP2 0x%08x\n", readl(host
->ioaddr
+ SDRSP2
));
244 dev_dbg(dev
, "SDRSP3 0x%08x\n", readl(host
->ioaddr
+ SDRSP3
));
245 dev_dbg(dev
, "SDHSTS 0x%08x\n", readl(host
->ioaddr
+ SDHSTS
));
246 dev_dbg(dev
, "SDVDD 0x%08x\n", readl(host
->ioaddr
+ SDVDD
));
247 dev_dbg(dev
, "SDEDM 0x%08x\n", readl(host
->ioaddr
+ SDEDM
));
248 dev_dbg(dev
, "SDHCFG 0x%08x\n", readl(host
->ioaddr
+ SDHCFG
));
249 dev_dbg(dev
, "SDHBCT 0x%08x\n", readl(host
->ioaddr
+ SDHBCT
));
250 dev_dbg(dev
, "SDHBLC 0x%08x\n", readl(host
->ioaddr
+ SDHBLC
));
251 dev_dbg(dev
, "===========================================\n");
254 static void bcm2835_reset_internal(struct bcm2835_host
*host
)
258 writel(SDVDD_POWER_OFF
, host
->ioaddr
+ SDVDD
);
259 writel(0, host
->ioaddr
+ SDCMD
);
260 writel(0, host
->ioaddr
+ SDARG
);
261 writel(0xf00000, host
->ioaddr
+ SDTOUT
);
262 writel(0, host
->ioaddr
+ SDCDIV
);
263 writel(0x7f8, host
->ioaddr
+ SDHSTS
); /* Write 1s to clear */
264 writel(0, host
->ioaddr
+ SDHCFG
);
265 writel(0, host
->ioaddr
+ SDHBCT
);
266 writel(0, host
->ioaddr
+ SDHBLC
);
268 /* Limit fifo usage due to silicon bug */
269 temp
= readl(host
->ioaddr
+ SDEDM
);
270 temp
&= ~((SDEDM_THRESHOLD_MASK
<< SDEDM_READ_THRESHOLD_SHIFT
) |
271 (SDEDM_THRESHOLD_MASK
<< SDEDM_WRITE_THRESHOLD_SHIFT
));
272 temp
|= (FIFO_READ_THRESHOLD
<< SDEDM_READ_THRESHOLD_SHIFT
) |
273 (FIFO_WRITE_THRESHOLD
<< SDEDM_WRITE_THRESHOLD_SHIFT
);
274 writel(temp
, host
->ioaddr
+ SDEDM
);
276 writel(SDVDD_POWER_ON
, host
->ioaddr
+ SDVDD
);
279 writel(host
->hcfg
, host
->ioaddr
+ SDHCFG
);
280 writel(host
->cdiv
, host
->ioaddr
+ SDCDIV
);
283 static void bcm2835_reset(struct mmc_host
*mmc
)
285 struct bcm2835_host
*host
= mmc_priv(mmc
);
288 dmaengine_terminate_sync(host
->dma_chan
);
289 host
->dma_chan
= NULL
;
290 bcm2835_reset_internal(host
);
293 static void bcm2835_finish_command(struct bcm2835_host
*host
);
295 static void bcm2835_wait_transfer_complete(struct bcm2835_host
*host
)
300 alternate_idle
= (host
->mrq
->data
->flags
& MMC_DATA_READ
) ?
301 SDEDM_FSM_READWAIT
: SDEDM_FSM_WRITESTART1
;
308 edm
= readl(host
->ioaddr
+ SDEDM
);
309 fsm
= edm
& SDEDM_FSM_MASK
;
311 if ((fsm
== SDEDM_FSM_IDENTMODE
) ||
312 (fsm
== SDEDM_FSM_DATAMODE
))
314 if (fsm
== alternate_idle
) {
315 writel(edm
| SDEDM_FORCE_DATA_MODE
,
316 host
->ioaddr
+ SDEDM
);
321 if (timediff
== 100000) {
322 dev_err(&host
->pdev
->dev
,
323 "wait_transfer_complete - still waiting after %d retries\n",
325 bcm2835_dumpregs(host
);
326 host
->mrq
->data
->error
= -ETIMEDOUT
;
333 static void bcm2835_dma_complete(void *param
)
335 struct bcm2835_host
*host
= param
;
337 schedule_work(&host
->dma_work
);
340 static void bcm2835_transfer_block_pio(struct bcm2835_host
*host
, bool is_read
)
344 unsigned long wait_max
;
346 blksize
= host
->data
->blksz
;
348 wait_max
= jiffies
+ msecs_to_jiffies(500);
350 local_irq_save(flags
);
358 if (!sg_miter_next(&host
->sg_miter
)) {
359 host
->data
->error
= -EINVAL
;
363 len
= min(host
->sg_miter
.length
, blksize
);
365 host
->data
->error
= -EINVAL
;
370 host
->sg_miter
.consumed
= len
;
372 buf
= (u32
*)host
->sg_miter
.addr
;
374 copy_words
= len
/ 4;
377 int burst_words
, words
;
380 burst_words
= min(SDDATA_FIFO_PIO_BURST
, copy_words
);
381 edm
= readl(host
->ioaddr
+ SDEDM
);
383 words
= ((edm
>> 4) & 0x1f);
385 words
= SDDATA_FIFO_WORDS
- ((edm
>> 4) & 0x1f);
387 if (words
< burst_words
) {
388 int fsm_state
= (edm
& SDEDM_FSM_MASK
);
389 struct device
*dev
= &host
->pdev
->dev
;
392 (fsm_state
!= SDEDM_FSM_READDATA
&&
393 fsm_state
!= SDEDM_FSM_READWAIT
&&
394 fsm_state
!= SDEDM_FSM_READCRC
)) ||
396 (fsm_state
!= SDEDM_FSM_WRITEDATA
&&
397 fsm_state
!= SDEDM_FSM_WRITESTART1
&&
398 fsm_state
!= SDEDM_FSM_WRITESTART2
))) {
399 hsts
= readl(host
->ioaddr
+ SDHSTS
);
400 dev_err(dev
, "fsm %x, hsts %08x\n",
402 if (hsts
& SDHSTS_ERROR_MASK
)
406 if (time_after(jiffies
, wait_max
)) {
407 dev_err(dev
, "PIO %s timeout - EDM %08x\n",
408 is_read
? "read" : "write",
410 hsts
= SDHSTS_REW_TIME_OUT
;
413 ndelay((burst_words
- words
) *
414 host
->ns_per_fifo_word
);
416 } else if (words
> copy_words
) {
424 *(buf
++) = readl(host
->ioaddr
+ SDDATA
);
426 writel(*(buf
++), host
->ioaddr
+ SDDATA
);
431 if (hsts
& SDHSTS_ERROR_MASK
)
435 sg_miter_stop(&host
->sg_miter
);
437 local_irq_restore(flags
);
440 static void bcm2835_transfer_pio(struct bcm2835_host
*host
)
442 struct device
*dev
= &host
->pdev
->dev
;
446 is_read
= (host
->data
->flags
& MMC_DATA_READ
) != 0;
447 bcm2835_transfer_block_pio(host
, is_read
);
449 sdhsts
= readl(host
->ioaddr
+ SDHSTS
);
450 if (sdhsts
& (SDHSTS_CRC16_ERROR
|
452 SDHSTS_FIFO_ERROR
)) {
453 dev_err(dev
, "%s transfer error - HSTS %08x\n",
454 is_read
? "read" : "write", sdhsts
);
455 host
->data
->error
= -EILSEQ
;
456 } else if ((sdhsts
& (SDHSTS_CMD_TIME_OUT
|
457 SDHSTS_REW_TIME_OUT
))) {
458 dev_err(dev
, "%s timeout error - HSTS %08x\n",
459 is_read
? "read" : "write", sdhsts
);
460 host
->data
->error
= -ETIMEDOUT
;
465 void bcm2835_prepare_dma(struct bcm2835_host
*host
, struct mmc_data
*data
)
467 int len
, dir_data
, dir_slave
;
468 struct dma_async_tx_descriptor
*desc
= NULL
;
469 struct dma_chan
*dma_chan
;
471 dma_chan
= host
->dma_chan_rxtx
;
472 if (data
->flags
& MMC_DATA_READ
) {
473 dir_data
= DMA_FROM_DEVICE
;
474 dir_slave
= DMA_DEV_TO_MEM
;
476 dir_data
= DMA_TO_DEVICE
;
477 dir_slave
= DMA_MEM_TO_DEV
;
480 /* The block doesn't manage the FIFO DREQs properly for
481 * multi-block transfers, so don't attempt to DMA the final
482 * few words. Unfortunately this requires the final sg entry
483 * to be trimmed. N.B. This code demands that the overspill
484 * is contained in a single sg entry.
487 host
->drain_words
= 0;
488 if ((data
->blocks
> 1) && (dir_data
== DMA_FROM_DEVICE
)) {
489 struct scatterlist
*sg
;
493 len
= min((u32
)(FIFO_READ_THRESHOLD
- 1) * 4,
494 (u32
)data
->blocks
* data
->blksz
);
496 for_each_sg(data
->sg
, sg
, data
->sg_len
, i
) {
497 if (sg_is_last(sg
)) {
498 WARN_ON(sg
->length
< len
);
500 host
->drain_page
= sg_page(sg
);
501 host
->drain_offset
= sg
->offset
+ sg
->length
;
504 host
->drain_words
= len
/ 4;
507 /* The parameters have already been validated, so this will not fail */
508 (void)dmaengine_slave_config(dma_chan
,
509 (dir_data
== DMA_FROM_DEVICE
) ?
513 len
= dma_map_sg(dma_chan
->device
->dev
, data
->sg
, data
->sg_len
,
517 desc
= dmaengine_prep_slave_sg(dma_chan
, data
->sg
,
524 desc
->callback
= bcm2835_dma_complete
;
525 desc
->callback_param
= host
;
526 host
->dma_desc
= desc
;
527 host
->dma_chan
= dma_chan
;
528 host
->dma_dir
= dir_data
;
532 static void bcm2835_start_dma(struct bcm2835_host
*host
)
534 dmaengine_submit(host
->dma_desc
);
535 dma_async_issue_pending(host
->dma_chan
);
538 static void bcm2835_set_transfer_irqs(struct bcm2835_host
*host
)
540 u32 all_irqs
= SDHCFG_DATA_IRPT_EN
| SDHCFG_BLOCK_IRPT_EN
|
543 if (host
->dma_desc
) {
544 host
->hcfg
= (host
->hcfg
& ~all_irqs
) |
547 host
->hcfg
= (host
->hcfg
& ~all_irqs
) |
548 SDHCFG_DATA_IRPT_EN
|
552 writel(host
->hcfg
, host
->ioaddr
+ SDHCFG
);
556 void bcm2835_prepare_data(struct bcm2835_host
*host
, struct mmc_command
*cmd
)
558 struct mmc_data
*data
= cmd
->data
;
566 host
->data_complete
= false;
567 host
->data
->bytes_xfered
= 0;
569 if (!host
->dma_desc
) {
571 int flags
= SG_MITER_ATOMIC
;
573 if (data
->flags
& MMC_DATA_READ
)
574 flags
|= SG_MITER_TO_SG
;
576 flags
|= SG_MITER_FROM_SG
;
577 sg_miter_start(&host
->sg_miter
, data
->sg
, data
->sg_len
, flags
);
578 host
->blocks
= data
->blocks
;
581 bcm2835_set_transfer_irqs(host
);
583 writel(data
->blksz
, host
->ioaddr
+ SDHBCT
);
584 writel(data
->blocks
, host
->ioaddr
+ SDHBLC
);
587 static u32
bcm2835_read_wait_sdcmd(struct bcm2835_host
*host
, u32 max_ms
)
589 struct device
*dev
= &host
->pdev
->dev
;
593 ret
= readl_poll_timeout(host
->ioaddr
+ SDCMD
, value
,
594 !(value
& SDCMD_NEW_FLAG
), 1, 10);
595 if (ret
== -ETIMEDOUT
)
596 /* if it takes a while make poll interval bigger */
597 ret
= readl_poll_timeout(host
->ioaddr
+ SDCMD
, value
,
598 !(value
& SDCMD_NEW_FLAG
),
600 if (ret
== -ETIMEDOUT
)
601 dev_err(dev
, "%s: timeout (%d ms)\n", __func__
, max_ms
);
606 static void bcm2835_finish_request(struct bcm2835_host
*host
)
608 struct dma_chan
*terminate_chan
= NULL
;
609 struct mmc_request
*mrq
;
611 cancel_delayed_work(&host
->timeout_work
);
619 host
->dma_desc
= NULL
;
620 terminate_chan
= host
->dma_chan
;
621 host
->dma_chan
= NULL
;
623 if (terminate_chan
) {
624 int err
= dmaengine_terminate_all(terminate_chan
);
627 dev_err(&host
->pdev
->dev
,
628 "failed to terminate DMA (%d)\n", err
);
631 mmc_request_done(host
->mmc
, mrq
);
635 bool bcm2835_send_command(struct bcm2835_host
*host
, struct mmc_command
*cmd
)
637 struct device
*dev
= &host
->pdev
->dev
;
639 unsigned long timeout
;
643 sdcmd
= bcm2835_read_wait_sdcmd(host
, 100);
644 if (sdcmd
& SDCMD_NEW_FLAG
) {
645 dev_err(dev
, "previous command never completed.\n");
646 bcm2835_dumpregs(host
);
647 cmd
->error
= -EILSEQ
;
648 bcm2835_finish_request(host
);
652 if (!cmd
->data
&& cmd
->busy_timeout
> 9000)
653 timeout
= DIV_ROUND_UP(cmd
->busy_timeout
, 1000) * HZ
+ HZ
;
656 schedule_delayed_work(&host
->timeout_work
, timeout
);
660 /* Clear any error flags */
661 sdhsts
= readl(host
->ioaddr
+ SDHSTS
);
662 if (sdhsts
& SDHSTS_ERROR_MASK
)
663 writel(sdhsts
, host
->ioaddr
+ SDHSTS
);
665 if ((cmd
->flags
& MMC_RSP_136
) && (cmd
->flags
& MMC_RSP_BUSY
)) {
666 dev_err(dev
, "unsupported response type!\n");
667 cmd
->error
= -EINVAL
;
668 bcm2835_finish_request(host
);
672 bcm2835_prepare_data(host
, cmd
);
674 writel(cmd
->arg
, host
->ioaddr
+ SDARG
);
676 sdcmd
= cmd
->opcode
& SDCMD_CMD_MASK
;
678 host
->use_busy
= false;
679 if (!(cmd
->flags
& MMC_RSP_PRESENT
)) {
680 sdcmd
|= SDCMD_NO_RESPONSE
;
682 if (cmd
->flags
& MMC_RSP_136
)
683 sdcmd
|= SDCMD_LONG_RESPONSE
;
684 if (cmd
->flags
& MMC_RSP_BUSY
) {
685 sdcmd
|= SDCMD_BUSYWAIT
;
686 host
->use_busy
= true;
691 if (cmd
->data
->flags
& MMC_DATA_WRITE
)
692 sdcmd
|= SDCMD_WRITE_CMD
;
693 if (cmd
->data
->flags
& MMC_DATA_READ
)
694 sdcmd
|= SDCMD_READ_CMD
;
697 writel(sdcmd
| SDCMD_NEW_FLAG
, host
->ioaddr
+ SDCMD
);
702 static void bcm2835_transfer_complete(struct bcm2835_host
*host
)
704 struct mmc_data
*data
;
706 WARN_ON(!host
->data_complete
);
711 /* Need to send CMD12 if -
712 * a) open-ended multiblock transfer (no CMD23)
713 * b) error in multiblock transfer
715 if (host
->mrq
->stop
&& (data
->error
|| !host
->use_sbc
)) {
716 if (bcm2835_send_command(host
, host
->mrq
->stop
)) {
717 /* No busy, so poll for completion */
719 bcm2835_finish_command(host
);
722 bcm2835_wait_transfer_complete(host
);
723 bcm2835_finish_request(host
);
727 static void bcm2835_finish_data(struct bcm2835_host
*host
)
729 struct device
*dev
= &host
->pdev
->dev
;
730 struct mmc_data
*data
;
734 host
->hcfg
&= ~(SDHCFG_DATA_IRPT_EN
| SDHCFG_BLOCK_IRPT_EN
);
735 writel(host
->hcfg
, host
->ioaddr
+ SDHCFG
);
737 data
->bytes_xfered
= data
->error
? 0 : (data
->blksz
* data
->blocks
);
739 host
->data_complete
= true;
742 /* Data managed to finish before the
743 * command completed. Make sure we do
744 * things in the proper order.
746 dev_dbg(dev
, "Finished early - HSTS %08x\n",
747 readl(host
->ioaddr
+ SDHSTS
));
749 bcm2835_transfer_complete(host
);
753 static void bcm2835_finish_command(struct bcm2835_host
*host
)
755 struct device
*dev
= &host
->pdev
->dev
;
756 struct mmc_command
*cmd
= host
->cmd
;
759 sdcmd
= bcm2835_read_wait_sdcmd(host
, 100);
761 /* Check for errors */
762 if (sdcmd
& SDCMD_NEW_FLAG
) {
763 dev_err(dev
, "command never completed.\n");
764 bcm2835_dumpregs(host
);
765 host
->cmd
->error
= -EIO
;
766 bcm2835_finish_request(host
);
768 } else if (sdcmd
& SDCMD_FAIL_FLAG
) {
769 u32 sdhsts
= readl(host
->ioaddr
+ SDHSTS
);
771 /* Clear the errors */
772 writel(SDHSTS_ERROR_MASK
, host
->ioaddr
+ SDHSTS
);
774 if (!(sdhsts
& SDHSTS_CRC7_ERROR
) ||
775 (host
->cmd
->opcode
!= MMC_SEND_OP_COND
)) {
778 if (sdhsts
& SDHSTS_CMD_TIME_OUT
) {
779 host
->cmd
->error
= -ETIMEDOUT
;
781 dev_err(dev
, "unexpected command %d error\n",
783 bcm2835_dumpregs(host
);
784 host
->cmd
->error
= -EILSEQ
;
786 edm
= readl(host
->ioaddr
+ SDEDM
);
787 fsm
= edm
& SDEDM_FSM_MASK
;
788 if (fsm
== SDEDM_FSM_READWAIT
||
789 fsm
== SDEDM_FSM_WRITESTART1
)
790 /* Kick the FSM out of its wait */
791 writel(edm
| SDEDM_FORCE_DATA_MODE
,
792 host
->ioaddr
+ SDEDM
);
793 bcm2835_finish_request(host
);
798 if (cmd
->flags
& MMC_RSP_PRESENT
) {
799 if (cmd
->flags
& MMC_RSP_136
) {
802 for (i
= 0; i
< 4; i
++) {
804 readl(host
->ioaddr
+ SDRSP0
+ i
* 4);
807 cmd
->resp
[0] = readl(host
->ioaddr
+ SDRSP0
);
811 if (cmd
== host
->mrq
->sbc
) {
812 /* Finished CMD23, now send actual command. */
814 if (bcm2835_send_command(host
, host
->mrq
->cmd
)) {
815 if (host
->data
&& host
->dma_desc
)
816 /* DMA transfer starts now, PIO starts
819 bcm2835_start_dma(host
);
822 bcm2835_finish_command(host
);
824 } else if (cmd
== host
->mrq
->stop
) {
826 bcm2835_finish_request(host
);
828 /* Processed actual command. */
831 bcm2835_finish_request(host
);
832 else if (host
->data_complete
)
833 bcm2835_transfer_complete(host
);
837 static void bcm2835_timeout(struct work_struct
*work
)
839 struct delayed_work
*d
= to_delayed_work(work
);
840 struct bcm2835_host
*host
=
841 container_of(d
, struct bcm2835_host
, timeout_work
);
842 struct device
*dev
= &host
->pdev
->dev
;
844 mutex_lock(&host
->mutex
);
847 dev_err(dev
, "timeout waiting for hardware interrupt.\n");
848 bcm2835_dumpregs(host
);
850 bcm2835_reset(host
->mmc
);
853 host
->data
->error
= -ETIMEDOUT
;
854 bcm2835_finish_data(host
);
857 host
->cmd
->error
= -ETIMEDOUT
;
859 host
->mrq
->cmd
->error
= -ETIMEDOUT
;
861 bcm2835_finish_request(host
);
865 mutex_unlock(&host
->mutex
);
868 static bool bcm2835_check_cmd_error(struct bcm2835_host
*host
, u32 intmask
)
870 struct device
*dev
= &host
->pdev
->dev
;
872 if (!(intmask
& SDHSTS_ERROR_MASK
))
878 dev_err(dev
, "sdhost_busy_irq: intmask %08x\n", intmask
);
879 if (intmask
& SDHSTS_CRC7_ERROR
) {
880 host
->cmd
->error
= -EILSEQ
;
881 } else if (intmask
& (SDHSTS_CRC16_ERROR
|
882 SDHSTS_FIFO_ERROR
)) {
884 host
->mrq
->data
->error
= -EILSEQ
;
886 host
->cmd
->error
= -EILSEQ
;
887 } else if (intmask
& SDHSTS_REW_TIME_OUT
) {
889 host
->mrq
->data
->error
= -ETIMEDOUT
;
891 host
->cmd
->error
= -ETIMEDOUT
;
892 } else if (intmask
& SDHSTS_CMD_TIME_OUT
) {
893 host
->cmd
->error
= -ETIMEDOUT
;
895 bcm2835_dumpregs(host
);
899 static void bcm2835_check_data_error(struct bcm2835_host
*host
, u32 intmask
)
903 if (intmask
& (SDHSTS_CRC16_ERROR
| SDHSTS_FIFO_ERROR
))
904 host
->data
->error
= -EILSEQ
;
905 if (intmask
& SDHSTS_REW_TIME_OUT
)
906 host
->data
->error
= -ETIMEDOUT
;
909 static void bcm2835_busy_irq(struct bcm2835_host
*host
)
911 if (WARN_ON(!host
->cmd
)) {
912 bcm2835_dumpregs(host
);
916 if (WARN_ON(!host
->use_busy
)) {
917 bcm2835_dumpregs(host
);
920 host
->use_busy
= false;
922 bcm2835_finish_command(host
);
925 static void bcm2835_data_irq(struct bcm2835_host
*host
, u32 intmask
)
927 /* There are no dedicated data/space available interrupt
928 * status bits, so it is necessary to use the single shared
929 * data/space available FIFO status bits. It is therefore not
930 * an error to get here when there is no data transfer in
936 bcm2835_check_data_error(host
, intmask
);
937 if (host
->data
->error
)
940 if (host
->data
->flags
& MMC_DATA_WRITE
) {
941 /* Use the block interrupt for writes after the first block */
942 host
->hcfg
&= ~(SDHCFG_DATA_IRPT_EN
);
943 host
->hcfg
|= SDHCFG_BLOCK_IRPT_EN
;
944 writel(host
->hcfg
, host
->ioaddr
+ SDHCFG
);
945 bcm2835_transfer_pio(host
);
947 bcm2835_transfer_pio(host
);
949 if ((host
->blocks
== 0) || host
->data
->error
)
955 host
->hcfg
&= ~(SDHCFG_DATA_IRPT_EN
| SDHCFG_BLOCK_IRPT_EN
);
956 writel(host
->hcfg
, host
->ioaddr
+ SDHCFG
);
959 static void bcm2835_data_threaded_irq(struct bcm2835_host
*host
)
963 if ((host
->blocks
== 0) || host
->data
->error
)
964 bcm2835_finish_data(host
);
967 static void bcm2835_block_irq(struct bcm2835_host
*host
)
969 if (WARN_ON(!host
->data
)) {
970 bcm2835_dumpregs(host
);
974 if (!host
->dma_desc
) {
975 WARN_ON(!host
->blocks
);
976 if (host
->data
->error
|| (--host
->blocks
== 0))
977 bcm2835_finish_data(host
);
979 bcm2835_transfer_pio(host
);
980 } else if (host
->data
->flags
& MMC_DATA_WRITE
) {
981 bcm2835_finish_data(host
);
985 static irqreturn_t
bcm2835_irq(int irq
, void *dev_id
)
987 irqreturn_t result
= IRQ_NONE
;
988 struct bcm2835_host
*host
= dev_id
;
991 spin_lock(&host
->lock
);
993 intmask
= readl(host
->ioaddr
+ SDHSTS
);
995 writel(SDHSTS_BUSY_IRPT
|
999 host
->ioaddr
+ SDHSTS
);
1001 if (intmask
& SDHSTS_BLOCK_IRPT
) {
1002 bcm2835_check_data_error(host
, intmask
);
1003 host
->irq_block
= true;
1004 result
= IRQ_WAKE_THREAD
;
1007 if (intmask
& SDHSTS_BUSY_IRPT
) {
1008 if (!bcm2835_check_cmd_error(host
, intmask
)) {
1009 host
->irq_busy
= true;
1010 result
= IRQ_WAKE_THREAD
;
1012 result
= IRQ_HANDLED
;
1016 /* There is no true data interrupt status bit, so it is
1017 * necessary to qualify the data flag with the interrupt
1020 if ((intmask
& SDHSTS_DATA_FLAG
) &&
1021 (host
->hcfg
& SDHCFG_DATA_IRPT_EN
)) {
1022 bcm2835_data_irq(host
, intmask
);
1023 host
->irq_data
= true;
1024 result
= IRQ_WAKE_THREAD
;
1027 spin_unlock(&host
->lock
);
1032 static irqreturn_t
bcm2835_threaded_irq(int irq
, void *dev_id
)
1034 struct bcm2835_host
*host
= dev_id
;
1035 unsigned long flags
;
1036 bool block
, busy
, data
;
1038 spin_lock_irqsave(&host
->lock
, flags
);
1040 block
= host
->irq_block
;
1041 busy
= host
->irq_busy
;
1042 data
= host
->irq_data
;
1043 host
->irq_block
= false;
1044 host
->irq_busy
= false;
1045 host
->irq_data
= false;
1047 spin_unlock_irqrestore(&host
->lock
, flags
);
1049 mutex_lock(&host
->mutex
);
1052 bcm2835_block_irq(host
);
1054 bcm2835_busy_irq(host
);
1056 bcm2835_data_threaded_irq(host
);
1058 mutex_unlock(&host
->mutex
);
1063 static void bcm2835_dma_complete_work(struct work_struct
*work
)
1065 struct bcm2835_host
*host
=
1066 container_of(work
, struct bcm2835_host
, dma_work
);
1067 struct mmc_data
*data
= host
->data
;
1069 mutex_lock(&host
->mutex
);
1071 if (host
->dma_chan
) {
1072 dma_unmap_sg(host
->dma_chan
->device
->dev
,
1073 data
->sg
, data
->sg_len
,
1076 host
->dma_chan
= NULL
;
1079 if (host
->drain_words
) {
1080 unsigned long flags
;
1084 if (host
->drain_offset
& PAGE_MASK
) {
1085 host
->drain_page
+= host
->drain_offset
>> PAGE_SHIFT
;
1086 host
->drain_offset
&= ~PAGE_MASK
;
1088 local_irq_save(flags
);
1089 page
= kmap_atomic(host
->drain_page
);
1090 buf
= page
+ host
->drain_offset
;
1092 while (host
->drain_words
) {
1093 u32 edm
= readl(host
->ioaddr
+ SDEDM
);
1095 if ((edm
>> 4) & 0x1f)
1096 *(buf
++) = readl(host
->ioaddr
+ SDDATA
);
1097 host
->drain_words
--;
1100 kunmap_atomic(page
);
1101 local_irq_restore(flags
);
1104 bcm2835_finish_data(host
);
1106 mutex_unlock(&host
->mutex
);
1109 static void bcm2835_set_clock(struct bcm2835_host
*host
, unsigned int clock
)
1113 /* The SDCDIV register has 11 bits, and holds (div - 2). But
1114 * in data mode the max is 50MHz wihout a minimum, and only
1115 * the bottom 3 bits are used. Since the switch over is
1116 * automatic (unless we have marked the card as slow...),
1117 * chosen values have to make sense in both modes. Ident mode
1118 * must be 100-400KHz, so can range check the requested
1119 * clock. CMD15 must be used to return to data mode, so this
1122 * clock 250MHz -> 0->125MHz, 1->83.3MHz, 2->62.5MHz, 3->50.0MHz
1123 * 4->41.7MHz, 5->35.7MHz, 6->31.3MHz, 7->27.8MHz
1125 * 623->400KHz/27.8MHz
1126 * reset value (507)->491159/50MHz
1128 * BUT, the 3-bit clock divisor in data mode is too small if
1129 * the core clock is higher than 250MHz, so instead use the
1130 * SLOW_CARD configuration bit to force the use of the ident
1131 * clock divisor at all times.
1134 if (clock
< 100000) {
1135 /* Can't stop the clock, but make it as slow as possible
1138 host
->cdiv
= SDCDIV_MAX_CDIV
;
1139 writel(host
->cdiv
, host
->ioaddr
+ SDCDIV
);
1143 div
= host
->max_clk
/ clock
;
1146 if ((host
->max_clk
/ div
) > clock
)
1150 if (div
> SDCDIV_MAX_CDIV
)
1151 div
= SDCDIV_MAX_CDIV
;
1153 clock
= host
->max_clk
/ (div
+ 2);
1154 host
->mmc
->actual_clock
= clock
;
1156 /* Calibrate some delays */
1158 host
->ns_per_fifo_word
= (1000000000 / clock
) *
1159 ((host
->mmc
->caps
& MMC_CAP_4_BIT_DATA
) ? 8 : 32);
1162 writel(host
->cdiv
, host
->ioaddr
+ SDCDIV
);
1164 /* Set the timeout to 500ms */
1165 writel(host
->mmc
->actual_clock
/ 2, host
->ioaddr
+ SDTOUT
);
1168 static void bcm2835_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
1170 struct bcm2835_host
*host
= mmc_priv(mmc
);
1171 struct device
*dev
= &host
->pdev
->dev
;
1174 /* Reset the error statuses in case this is a retry */
1176 mrq
->sbc
->error
= 0;
1178 mrq
->cmd
->error
= 0;
1180 mrq
->data
->error
= 0;
1182 mrq
->stop
->error
= 0;
1184 if (mrq
->data
&& !is_power_of_2(mrq
->data
->blksz
)) {
1185 dev_err(dev
, "unsupported block size (%d bytes)\n",
1189 mrq
->cmd
->error
= -EINVAL
;
1191 mmc_request_done(mmc
, mrq
);
1195 if (host
->use_dma
&& mrq
->data
&& (mrq
->data
->blocks
> PIO_THRESHOLD
))
1196 bcm2835_prepare_dma(host
, mrq
->data
);
1198 mutex_lock(&host
->mutex
);
1203 edm
= readl(host
->ioaddr
+ SDEDM
);
1204 fsm
= edm
& SDEDM_FSM_MASK
;
1206 if ((fsm
!= SDEDM_FSM_IDENTMODE
) &&
1207 (fsm
!= SDEDM_FSM_DATAMODE
)) {
1208 dev_err(dev
, "previous command (%d) not complete (EDM %08x)\n",
1209 readl(host
->ioaddr
+ SDCMD
) & SDCMD_CMD_MASK
,
1211 bcm2835_dumpregs(host
);
1214 mrq
->cmd
->error
= -EILSEQ
;
1216 bcm2835_finish_request(host
);
1217 mutex_unlock(&host
->mutex
);
1221 host
->use_sbc
= !!mrq
->sbc
&& host
->mrq
->data
&&
1222 (host
->mrq
->data
->flags
& MMC_DATA_READ
);
1223 if (host
->use_sbc
) {
1224 if (bcm2835_send_command(host
, mrq
->sbc
)) {
1225 if (!host
->use_busy
)
1226 bcm2835_finish_command(host
);
1228 } else if (mrq
->cmd
&& bcm2835_send_command(host
, mrq
->cmd
)) {
1229 if (host
->data
&& host
->dma_desc
) {
1230 /* DMA transfer starts now, PIO starts after irq */
1231 bcm2835_start_dma(host
);
1234 if (!host
->use_busy
)
1235 bcm2835_finish_command(host
);
1238 mutex_unlock(&host
->mutex
);
1241 static void bcm2835_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
1243 struct bcm2835_host
*host
= mmc_priv(mmc
);
1245 mutex_lock(&host
->mutex
);
1247 if (!ios
->clock
|| ios
->clock
!= host
->clock
) {
1248 bcm2835_set_clock(host
, ios
->clock
);
1249 host
->clock
= ios
->clock
;
1253 host
->hcfg
&= ~SDHCFG_WIDE_EXT_BUS
;
1254 if (ios
->bus_width
== MMC_BUS_WIDTH_4
)
1255 host
->hcfg
|= SDHCFG_WIDE_EXT_BUS
;
1257 host
->hcfg
|= SDHCFG_WIDE_INT_BUS
;
1259 /* Disable clever clock switching, to cope with fast core clocks */
1260 host
->hcfg
|= SDHCFG_SLOW_CARD
;
1262 writel(host
->hcfg
, host
->ioaddr
+ SDHCFG
);
1264 mutex_unlock(&host
->mutex
);
1267 static const struct mmc_host_ops bcm2835_ops
= {
1268 .request
= bcm2835_request
,
1269 .set_ios
= bcm2835_set_ios
,
1270 .hw_reset
= bcm2835_reset
,
1273 static int bcm2835_add_host(struct bcm2835_host
*host
)
1275 struct mmc_host
*mmc
= host
->mmc
;
1276 struct device
*dev
= &host
->pdev
->dev
;
1277 char pio_limit_string
[20];
1280 if (!mmc
->f_max
|| mmc
->f_max
> host
->max_clk
)
1281 mmc
->f_max
= host
->max_clk
;
1282 mmc
->f_min
= host
->max_clk
/ SDCDIV_MAX_CDIV
;
1284 mmc
->max_busy_timeout
= ~0 / (mmc
->f_max
/ 1000);
1286 dev_dbg(dev
, "f_max %d, f_min %d, max_busy_timeout %d\n",
1287 mmc
->f_max
, mmc
->f_min
, mmc
->max_busy_timeout
);
1289 /* host controller capabilities */
1290 mmc
->caps
|= MMC_CAP_SD_HIGHSPEED
| MMC_CAP_MMC_HIGHSPEED
|
1291 MMC_CAP_NEEDS_POLL
| MMC_CAP_HW_RESET
| MMC_CAP_ERASE
|
1294 spin_lock_init(&host
->lock
);
1295 mutex_init(&host
->mutex
);
1297 if (IS_ERR_OR_NULL(host
->dma_chan_rxtx
)) {
1298 dev_warn(dev
, "unable to initialise DMA channel. Falling back to PIO\n");
1299 host
->use_dma
= false;
1301 host
->use_dma
= true;
1303 host
->dma_cfg_tx
.src_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
1304 host
->dma_cfg_tx
.dst_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
1305 host
->dma_cfg_tx
.slave_id
= 13; /* DREQ channel */
1306 host
->dma_cfg_tx
.direction
= DMA_MEM_TO_DEV
;
1307 host
->dma_cfg_tx
.src_addr
= 0;
1308 host
->dma_cfg_tx
.dst_addr
= host
->phys_addr
+ SDDATA
;
1310 host
->dma_cfg_rx
.src_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
1311 host
->dma_cfg_rx
.dst_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
1312 host
->dma_cfg_rx
.slave_id
= 13; /* DREQ channel */
1313 host
->dma_cfg_rx
.direction
= DMA_DEV_TO_MEM
;
1314 host
->dma_cfg_rx
.src_addr
= host
->phys_addr
+ SDDATA
;
1315 host
->dma_cfg_rx
.dst_addr
= 0;
1317 if (dmaengine_slave_config(host
->dma_chan_rxtx
,
1318 &host
->dma_cfg_tx
) != 0 ||
1319 dmaengine_slave_config(host
->dma_chan_rxtx
,
1320 &host
->dma_cfg_rx
) != 0)
1321 host
->use_dma
= false;
1324 mmc
->max_segs
= 128;
1325 mmc
->max_req_size
= 524288;
1326 mmc
->max_seg_size
= mmc
->max_req_size
;
1327 mmc
->max_blk_size
= 1024;
1328 mmc
->max_blk_count
= 65535;
1330 /* report supported voltage ranges */
1331 mmc
->ocr_avail
= MMC_VDD_32_33
| MMC_VDD_33_34
;
1333 INIT_WORK(&host
->dma_work
, bcm2835_dma_complete_work
);
1334 INIT_DELAYED_WORK(&host
->timeout_work
, bcm2835_timeout
);
1336 /* Set interrupt enables */
1337 host
->hcfg
= SDHCFG_BUSY_IRPT_EN
;
1339 bcm2835_reset_internal(host
);
1341 ret
= request_threaded_irq(host
->irq
, bcm2835_irq
,
1342 bcm2835_threaded_irq
,
1343 0, mmc_hostname(mmc
), host
);
1345 dev_err(dev
, "failed to request IRQ %d: %d\n", host
->irq
, ret
);
1349 ret
= mmc_add_host(mmc
);
1351 free_irq(host
->irq
, host
);
1355 pio_limit_string
[0] = '\0';
1356 if (host
->use_dma
&& (PIO_THRESHOLD
> 0))
1357 sprintf(pio_limit_string
, " (>%d)", PIO_THRESHOLD
);
1358 dev_info(dev
, "loaded - DMA %s%s\n",
1359 host
->use_dma
? "enabled" : "disabled", pio_limit_string
);
1364 static int bcm2835_probe(struct platform_device
*pdev
)
1366 struct device
*dev
= &pdev
->dev
;
1368 struct resource
*iomem
;
1369 struct bcm2835_host
*host
;
1370 struct mmc_host
*mmc
;
1371 const __be32
*regaddr_p
;
1374 dev_dbg(dev
, "%s\n", __func__
);
1375 mmc
= mmc_alloc_host(sizeof(*host
), dev
);
1379 mmc
->ops
= &bcm2835_ops
;
1380 host
= mmc_priv(mmc
);
1383 spin_lock_init(&host
->lock
);
1385 iomem
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1386 host
->ioaddr
= devm_ioremap_resource(dev
, iomem
);
1387 if (IS_ERR(host
->ioaddr
)) {
1388 ret
= PTR_ERR(host
->ioaddr
);
1392 /* Parse OF address directly to get the physical address for
1393 * DMA to our registers.
1395 regaddr_p
= of_get_address(pdev
->dev
.of_node
, 0, NULL
, NULL
);
1397 dev_err(dev
, "Can't get phys address\n");
1402 host
->phys_addr
= be32_to_cpup(regaddr_p
);
1404 host
->dma_chan
= NULL
;
1405 host
->dma_desc
= NULL
;
1407 host
->dma_chan_rxtx
= dma_request_slave_channel(dev
, "rx-tx");
1409 clk
= devm_clk_get(dev
, NULL
);
1412 if (ret
!= -EPROBE_DEFER
)
1413 dev_err(dev
, "could not get clk: %d\n", ret
);
1417 host
->max_clk
= clk_get_rate(clk
);
1419 host
->irq
= platform_get_irq(pdev
, 0);
1420 if (host
->irq
<= 0) {
1421 dev_err(dev
, "get IRQ failed\n");
1426 ret
= mmc_of_parse(mmc
);
1430 ret
= bcm2835_add_host(host
);
1434 platform_set_drvdata(pdev
, host
);
1436 dev_dbg(dev
, "%s -> OK\n", __func__
);
1441 dev_dbg(dev
, "%s -> err %d\n", __func__
, ret
);
1442 if (host
->dma_chan_rxtx
)
1443 dma_release_channel(host
->dma_chan_rxtx
);
1449 static int bcm2835_remove(struct platform_device
*pdev
)
1451 struct bcm2835_host
*host
= platform_get_drvdata(pdev
);
1453 mmc_remove_host(host
->mmc
);
1455 writel(SDVDD_POWER_OFF
, host
->ioaddr
+ SDVDD
);
1457 free_irq(host
->irq
, host
);
1459 cancel_work_sync(&host
->dma_work
);
1460 cancel_delayed_work_sync(&host
->timeout_work
);
1462 mmc_free_host(host
->mmc
);
1463 platform_set_drvdata(pdev
, NULL
);
1468 static const struct of_device_id bcm2835_match
[] = {
1469 { .compatible
= "brcm,bcm2835-sdhost" },
1472 MODULE_DEVICE_TABLE(of
, bcm2835_match
);
1474 static struct platform_driver bcm2835_driver
= {
1475 .probe
= bcm2835_probe
,
1476 .remove
= bcm2835_remove
,
1478 .name
= "sdhost-bcm2835",
1479 .of_match_table
= bcm2835_match
,
1482 module_platform_driver(bcm2835_driver
);
1484 MODULE_ALIAS("platform:sdhost-bcm2835");
1485 MODULE_DESCRIPTION("BCM2835 SDHost driver");
1486 MODULE_LICENSE("GPL v2");
1487 MODULE_AUTHOR("Phil Elwell");