2 * Marvell MMC/SD/SDIO driver
4 * Authors: Maen Suleiman, Nicolas Pitre
5 * Copyright (C) 2008-2009 Marvell Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/module.h>
13 #include <linux/init.h>
15 #include <linux/platform_device.h>
16 #include <linux/mbus.h>
17 #include <linux/delay.h>
18 #include <linux/interrupt.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/scatterlist.h>
21 #include <linux/irq.h>
22 #include <linux/clk.h>
23 #include <linux/of_irq.h>
24 #include <linux/mmc/host.h>
25 #include <linux/mmc/slot-gpio.h>
27 #include <asm/sizes.h>
28 #include <asm/unaligned.h>
32 #define DRIVER_NAME "mvsdio"
39 struct mmc_request
*mrq
;
41 unsigned int xfer_mode
;
44 unsigned int pio_size
;
46 unsigned int sg_frags
;
47 unsigned int ns_per_clk
;
49 unsigned int base_clock
;
50 struct timer_list timer
;
56 #define mvsd_write(offs, val) writel(val, iobase + (offs))
57 #define mvsd_read(offs) readl(iobase + (offs))
59 static int mvsd_setup_data(struct mvsd_host
*host
, struct mmc_data
*data
)
61 void __iomem
*iobase
= host
->base
;
66 * Hardware weirdness. The FIFO_EMPTY bit of the HW_STATE
67 * register is sometimes not set before a while when some
68 * "unusual" data block sizes are used (such as with the SWITCH
69 * command), even despite the fact that the XFER_DONE interrupt
70 * was raised. And if another data transfer starts before
71 * this bit comes to good sense (which eventually happens by
72 * itself) then the new transfer simply fails with a timeout.
74 if (!(mvsd_read(MVSD_HW_STATE
) & (1 << 13))) {
75 unsigned long t
= jiffies
+ HZ
;
76 unsigned int hw_state
, count
= 0;
78 hw_state
= mvsd_read(MVSD_HW_STATE
);
79 if (time_after(jiffies
, t
)) {
80 dev_warn(host
->dev
, "FIFO_EMPTY bit missing\n");
84 } while (!(hw_state
& (1 << 13)));
85 dev_dbg(host
->dev
, "*** wait for FIFO_EMPTY bit "
86 "(hw=0x%04x, count=%d, jiffies=%ld)\n",
87 hw_state
, count
, jiffies
- (t
- HZ
));
90 /* If timeout=0 then maximum timeout index is used. */
91 tmout
= DIV_ROUND_UP(data
->timeout_ns
, host
->ns_per_clk
);
92 tmout
+= data
->timeout_clks
;
93 tmout_index
= fls(tmout
- 1) - 12;
96 if (tmout_index
> MVSD_HOST_CTRL_TMOUT_MAX
)
97 tmout_index
= MVSD_HOST_CTRL_TMOUT_MAX
;
99 dev_dbg(host
->dev
, "data %s at 0x%08x: blocks=%d blksz=%d tmout=%u (%d)\n",
100 (data
->flags
& MMC_DATA_READ
) ? "read" : "write",
101 (u32
)sg_virt(data
->sg
), data
->blocks
, data
->blksz
,
104 host
->ctrl
&= ~MVSD_HOST_CTRL_TMOUT_MASK
;
105 host
->ctrl
|= MVSD_HOST_CTRL_TMOUT(tmout_index
);
106 mvsd_write(MVSD_HOST_CTRL
, host
->ctrl
);
107 mvsd_write(MVSD_BLK_COUNT
, data
->blocks
);
108 mvsd_write(MVSD_BLK_SIZE
, data
->blksz
);
110 if (nodma
|| (data
->blksz
| data
->sg
->offset
) & 3 ||
111 ((!(data
->flags
& MMC_DATA_READ
) && data
->sg
->offset
& 0x3f))) {
113 * We cannot do DMA on a buffer which offset or size
114 * is not aligned on a 4-byte boundary.
116 * It also appears the host to card DMA can corrupt
117 * data when the buffer is not aligned on a 64 byte
120 host
->pio_size
= data
->blocks
* data
->blksz
;
121 host
->pio_ptr
= sg_virt(data
->sg
);
123 dev_dbg(host
->dev
, "fallback to PIO for data at 0x%p size %d\n",
124 host
->pio_ptr
, host
->pio_size
);
127 dma_addr_t phys_addr
;
128 int dma_dir
= (data
->flags
& MMC_DATA_READ
) ?
129 DMA_FROM_DEVICE
: DMA_TO_DEVICE
;
130 host
->sg_frags
= dma_map_sg(mmc_dev(host
->mmc
), data
->sg
,
131 data
->sg_len
, dma_dir
);
132 phys_addr
= sg_dma_address(data
->sg
);
133 mvsd_write(MVSD_SYS_ADDR_LOW
, (u32
)phys_addr
& 0xffff);
134 mvsd_write(MVSD_SYS_ADDR_HI
, (u32
)phys_addr
>> 16);
139 static void mvsd_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
141 struct mvsd_host
*host
= mmc_priv(mmc
);
142 void __iomem
*iobase
= host
->base
;
143 struct mmc_command
*cmd
= mrq
->cmd
;
144 u32 cmdreg
= 0, xfer
= 0, intr
= 0;
147 BUG_ON(host
->mrq
!= NULL
);
150 dev_dbg(host
->dev
, "cmd %d (hw state 0x%04x)\n",
151 cmd
->opcode
, mvsd_read(MVSD_HW_STATE
));
153 cmdreg
= MVSD_CMD_INDEX(cmd
->opcode
);
155 if (cmd
->flags
& MMC_RSP_BUSY
)
156 cmdreg
|= MVSD_CMD_RSP_48BUSY
;
157 else if (cmd
->flags
& MMC_RSP_136
)
158 cmdreg
|= MVSD_CMD_RSP_136
;
159 else if (cmd
->flags
& MMC_RSP_PRESENT
)
160 cmdreg
|= MVSD_CMD_RSP_48
;
162 cmdreg
|= MVSD_CMD_RSP_NONE
;
164 if (cmd
->flags
& MMC_RSP_CRC
)
165 cmdreg
|= MVSD_CMD_CHECK_CMDCRC
;
167 if (cmd
->flags
& MMC_RSP_OPCODE
)
168 cmdreg
|= MVSD_CMD_INDX_CHECK
;
170 if (cmd
->flags
& MMC_RSP_PRESENT
) {
171 cmdreg
|= MVSD_UNEXPECTED_RESP
;
172 intr
|= MVSD_NOR_UNEXP_RSP
;
176 struct mmc_data
*data
= mrq
->data
;
179 cmdreg
|= MVSD_CMD_DATA_PRESENT
| MVSD_CMD_CHECK_DATACRC16
;
180 xfer
|= MVSD_XFER_MODE_HW_WR_DATA_EN
;
181 if (data
->flags
& MMC_DATA_READ
)
182 xfer
|= MVSD_XFER_MODE_TO_HOST
;
184 pio
= mvsd_setup_data(host
, data
);
186 xfer
|= MVSD_XFER_MODE_PIO
;
187 /* PIO section of mvsd_irq has comments on those bits */
188 if (data
->flags
& MMC_DATA_WRITE
)
189 intr
|= MVSD_NOR_TX_AVAIL
;
190 else if (host
->pio_size
> 32)
191 intr
|= MVSD_NOR_RX_FIFO_8W
;
193 intr
|= MVSD_NOR_RX_READY
;
197 struct mmc_command
*stop
= data
->stop
;
200 mvsd_write(MVSD_AUTOCMD12_ARG_LOW
, stop
->arg
& 0xffff);
201 mvsd_write(MVSD_AUTOCMD12_ARG_HI
, stop
->arg
>> 16);
203 if (stop
->flags
& MMC_RSP_BUSY
)
204 cmd12reg
|= MVSD_AUTOCMD12_BUSY
;
205 if (stop
->flags
& MMC_RSP_OPCODE
)
206 cmd12reg
|= MVSD_AUTOCMD12_INDX_CHECK
;
207 cmd12reg
|= MVSD_AUTOCMD12_INDEX(stop
->opcode
);
208 mvsd_write(MVSD_AUTOCMD12_CMD
, cmd12reg
);
210 xfer
|= MVSD_XFER_MODE_AUTO_CMD12
;
211 intr
|= MVSD_NOR_AUTOCMD12_DONE
;
213 intr
|= MVSD_NOR_XFER_DONE
;
216 intr
|= MVSD_NOR_CMD_DONE
;
219 mvsd_write(MVSD_ARG_LOW
, cmd
->arg
& 0xffff);
220 mvsd_write(MVSD_ARG_HI
, cmd
->arg
>> 16);
222 spin_lock_irqsave(&host
->lock
, flags
);
224 host
->xfer_mode
&= MVSD_XFER_MODE_INT_CHK_EN
;
225 host
->xfer_mode
|= xfer
;
226 mvsd_write(MVSD_XFER_MODE
, host
->xfer_mode
);
228 mvsd_write(MVSD_NOR_INTR_STATUS
, ~MVSD_NOR_CARD_INT
);
229 mvsd_write(MVSD_ERR_INTR_STATUS
, 0xffff);
230 mvsd_write(MVSD_CMD
, cmdreg
);
232 host
->intr_en
&= MVSD_NOR_CARD_INT
;
233 host
->intr_en
|= intr
| MVSD_NOR_ERROR
;
234 mvsd_write(MVSD_NOR_INTR_EN
, host
->intr_en
);
235 mvsd_write(MVSD_ERR_INTR_EN
, 0xffff);
237 mod_timer(&host
->timer
, jiffies
+ 5 * HZ
);
239 spin_unlock_irqrestore(&host
->lock
, flags
);
242 static u32
mvsd_finish_cmd(struct mvsd_host
*host
, struct mmc_command
*cmd
,
245 void __iomem
*iobase
= host
->base
;
247 if (cmd
->flags
& MMC_RSP_136
) {
248 unsigned int response
[8], i
;
249 for (i
= 0; i
< 8; i
++)
250 response
[i
] = mvsd_read(MVSD_RSP(i
));
251 cmd
->resp
[0] = ((response
[0] & 0x03ff) << 22) |
252 ((response
[1] & 0xffff) << 6) |
253 ((response
[2] & 0xfc00) >> 10);
254 cmd
->resp
[1] = ((response
[2] & 0x03ff) << 22) |
255 ((response
[3] & 0xffff) << 6) |
256 ((response
[4] & 0xfc00) >> 10);
257 cmd
->resp
[2] = ((response
[4] & 0x03ff) << 22) |
258 ((response
[5] & 0xffff) << 6) |
259 ((response
[6] & 0xfc00) >> 10);
260 cmd
->resp
[3] = ((response
[6] & 0x03ff) << 22) |
261 ((response
[7] & 0x3fff) << 8);
262 } else if (cmd
->flags
& MMC_RSP_PRESENT
) {
263 unsigned int response
[3], i
;
264 for (i
= 0; i
< 3; i
++)
265 response
[i
] = mvsd_read(MVSD_RSP(i
));
266 cmd
->resp
[0] = ((response
[2] & 0x003f) << (8 - 8)) |
267 ((response
[1] & 0xffff) << (14 - 8)) |
268 ((response
[0] & 0x03ff) << (30 - 8));
269 cmd
->resp
[1] = ((response
[0] & 0xfc00) >> 10);
274 if (err_status
& MVSD_ERR_CMD_TIMEOUT
) {
275 cmd
->error
= -ETIMEDOUT
;
276 } else if (err_status
& (MVSD_ERR_CMD_CRC
| MVSD_ERR_CMD_ENDBIT
|
277 MVSD_ERR_CMD_INDEX
| MVSD_ERR_CMD_STARTBIT
)) {
278 cmd
->error
= -EILSEQ
;
280 err_status
&= ~(MVSD_ERR_CMD_TIMEOUT
| MVSD_ERR_CMD_CRC
|
281 MVSD_ERR_CMD_ENDBIT
| MVSD_ERR_CMD_INDEX
|
282 MVSD_ERR_CMD_STARTBIT
);
287 static u32
mvsd_finish_data(struct mvsd_host
*host
, struct mmc_data
*data
,
290 void __iomem
*iobase
= host
->base
;
293 host
->pio_ptr
= NULL
;
296 dma_unmap_sg(mmc_dev(host
->mmc
), data
->sg
, host
->sg_frags
,
297 (data
->flags
& MMC_DATA_READ
) ?
298 DMA_FROM_DEVICE
: DMA_TO_DEVICE
);
301 if (err_status
& MVSD_ERR_DATA_TIMEOUT
)
302 data
->error
= -ETIMEDOUT
;
303 else if (err_status
& (MVSD_ERR_DATA_CRC
| MVSD_ERR_DATA_ENDBIT
))
304 data
->error
= -EILSEQ
;
305 else if (err_status
& MVSD_ERR_XFER_SIZE
)
306 data
->error
= -EBADE
;
307 err_status
&= ~(MVSD_ERR_DATA_TIMEOUT
| MVSD_ERR_DATA_CRC
|
308 MVSD_ERR_DATA_ENDBIT
| MVSD_ERR_XFER_SIZE
);
310 dev_dbg(host
->dev
, "data done: blocks_left=%d, bytes_left=%d\n",
311 mvsd_read(MVSD_CURR_BLK_LEFT
), mvsd_read(MVSD_CURR_BYTE_LEFT
));
313 (data
->blocks
- mvsd_read(MVSD_CURR_BLK_LEFT
)) * data
->blksz
;
314 /* We can't be sure about the last block when errors are detected */
315 if (data
->bytes_xfered
&& data
->error
)
316 data
->bytes_xfered
-= data
->blksz
;
318 /* Handle Auto cmd 12 response */
320 unsigned int response
[3], i
;
321 for (i
= 0; i
< 3; i
++)
322 response
[i
] = mvsd_read(MVSD_AUTO_RSP(i
));
323 data
->stop
->resp
[0] = ((response
[2] & 0x003f) << (8 - 8)) |
324 ((response
[1] & 0xffff) << (14 - 8)) |
325 ((response
[0] & 0x03ff) << (30 - 8));
326 data
->stop
->resp
[1] = ((response
[0] & 0xfc00) >> 10);
327 data
->stop
->resp
[2] = 0;
328 data
->stop
->resp
[3] = 0;
330 if (err_status
& MVSD_ERR_AUTOCMD12
) {
331 u32 err_cmd12
= mvsd_read(MVSD_AUTOCMD12_ERR_STATUS
);
332 dev_dbg(host
->dev
, "c12err 0x%04x\n", err_cmd12
);
333 if (err_cmd12
& MVSD_AUTOCMD12_ERR_NOTEXE
)
334 data
->stop
->error
= -ENOEXEC
;
335 else if (err_cmd12
& MVSD_AUTOCMD12_ERR_TIMEOUT
)
336 data
->stop
->error
= -ETIMEDOUT
;
338 data
->stop
->error
= -EILSEQ
;
339 err_status
&= ~MVSD_ERR_AUTOCMD12
;
346 static irqreturn_t
mvsd_irq(int irq
, void *dev
)
348 struct mvsd_host
*host
= dev
;
349 void __iomem
*iobase
= host
->base
;
350 u32 intr_status
, intr_done_mask
;
353 intr_status
= mvsd_read(MVSD_NOR_INTR_STATUS
);
354 dev_dbg(host
->dev
, "intr 0x%04x intr_en 0x%04x hw_state 0x%04x\n",
355 intr_status
, mvsd_read(MVSD_NOR_INTR_EN
),
356 mvsd_read(MVSD_HW_STATE
));
359 * It looks like, SDIO IP can issue one late, spurious irq
360 * although all irqs should be disabled. To work around this,
361 * bail out early, if we didn't expect any irqs to occur.
363 if (!mvsd_read(MVSD_NOR_INTR_EN
) && !mvsd_read(MVSD_ERR_INTR_EN
)) {
364 dev_dbg(host
->dev
, "spurious irq detected intr 0x%04x intr_en 0x%04x erri 0x%04x erri_en 0x%04x\n",
365 mvsd_read(MVSD_NOR_INTR_STATUS
),
366 mvsd_read(MVSD_NOR_INTR_EN
),
367 mvsd_read(MVSD_ERR_INTR_STATUS
),
368 mvsd_read(MVSD_ERR_INTR_EN
));
372 spin_lock(&host
->lock
);
374 /* PIO handling, if needed. Messy business... */
375 if (host
->pio_size
&&
376 (intr_status
& host
->intr_en
&
377 (MVSD_NOR_RX_READY
| MVSD_NOR_RX_FIFO_8W
))) {
378 u16
*p
= host
->pio_ptr
;
379 int s
= host
->pio_size
;
380 while (s
>= 32 && (intr_status
& MVSD_NOR_RX_FIFO_8W
)) {
381 readsw(iobase
+ MVSD_FIFO
, p
, 16);
384 intr_status
= mvsd_read(MVSD_NOR_INTR_STATUS
);
387 * Normally we'd use < 32 here, but the RX_FIFO_8W bit
388 * doesn't appear to assert when there is exactly 32 bytes
389 * (8 words) left to fetch in a transfer.
392 while (s
>= 4 && (intr_status
& MVSD_NOR_RX_READY
)) {
393 put_unaligned(mvsd_read(MVSD_FIFO
), p
++);
394 put_unaligned(mvsd_read(MVSD_FIFO
), p
++);
396 intr_status
= mvsd_read(MVSD_NOR_INTR_STATUS
);
398 if (s
&& s
< 4 && (intr_status
& MVSD_NOR_RX_READY
)) {
400 val
[0] = mvsd_read(MVSD_FIFO
);
401 val
[1] = mvsd_read(MVSD_FIFO
);
402 memcpy(p
, ((void *)&val
) + 4 - s
, s
);
404 intr_status
= mvsd_read(MVSD_NOR_INTR_STATUS
);
408 ~(MVSD_NOR_RX_READY
| MVSD_NOR_RX_FIFO_8W
);
409 mvsd_write(MVSD_NOR_INTR_EN
, host
->intr_en
);
410 } else if (host
->intr_en
& MVSD_NOR_RX_FIFO_8W
) {
411 host
->intr_en
&= ~MVSD_NOR_RX_FIFO_8W
;
412 host
->intr_en
|= MVSD_NOR_RX_READY
;
413 mvsd_write(MVSD_NOR_INTR_EN
, host
->intr_en
);
416 dev_dbg(host
->dev
, "pio %d intr 0x%04x hw_state 0x%04x\n",
417 s
, intr_status
, mvsd_read(MVSD_HW_STATE
));
421 } else if (host
->pio_size
&&
422 (intr_status
& host
->intr_en
&
423 (MVSD_NOR_TX_AVAIL
| MVSD_NOR_TX_FIFO_8W
))) {
424 u16
*p
= host
->pio_ptr
;
425 int s
= host
->pio_size
;
427 * The TX_FIFO_8W bit is unreliable. When set, bursting
428 * 16 halfwords all at once in the FIFO drops data. Actually
429 * TX_AVAIL does go off after only one word is pushed even if
430 * TX_FIFO_8W remains set.
432 while (s
>= 4 && (intr_status
& MVSD_NOR_TX_AVAIL
)) {
433 mvsd_write(MVSD_FIFO
, get_unaligned(p
++));
434 mvsd_write(MVSD_FIFO
, get_unaligned(p
++));
436 intr_status
= mvsd_read(MVSD_NOR_INTR_STATUS
);
439 if (s
&& (intr_status
& MVSD_NOR_TX_AVAIL
)) {
441 memcpy(((void *)&val
) + 4 - s
, p
, s
);
442 mvsd_write(MVSD_FIFO
, val
[0]);
443 mvsd_write(MVSD_FIFO
, val
[1]);
445 intr_status
= mvsd_read(MVSD_NOR_INTR_STATUS
);
449 ~(MVSD_NOR_TX_AVAIL
| MVSD_NOR_TX_FIFO_8W
);
450 mvsd_write(MVSD_NOR_INTR_EN
, host
->intr_en
);
453 dev_dbg(host
->dev
, "pio %d intr 0x%04x hw_state 0x%04x\n",
454 s
, intr_status
, mvsd_read(MVSD_HW_STATE
));
460 mvsd_write(MVSD_NOR_INTR_STATUS
, intr_status
);
462 intr_done_mask
= MVSD_NOR_CARD_INT
| MVSD_NOR_RX_READY
|
463 MVSD_NOR_RX_FIFO_8W
| MVSD_NOR_TX_FIFO_8W
;
464 if (intr_status
& host
->intr_en
& ~intr_done_mask
) {
465 struct mmc_request
*mrq
= host
->mrq
;
466 struct mmc_command
*cmd
= mrq
->cmd
;
469 del_timer(&host
->timer
);
472 host
->intr_en
&= MVSD_NOR_CARD_INT
;
473 mvsd_write(MVSD_NOR_INTR_EN
, host
->intr_en
);
474 mvsd_write(MVSD_ERR_INTR_EN
, 0);
476 spin_unlock(&host
->lock
);
478 if (intr_status
& MVSD_NOR_UNEXP_RSP
) {
479 cmd
->error
= -EPROTO
;
480 } else if (intr_status
& MVSD_NOR_ERROR
) {
481 err_status
= mvsd_read(MVSD_ERR_INTR_STATUS
);
482 dev_dbg(host
->dev
, "err 0x%04x\n", err_status
);
485 err_status
= mvsd_finish_cmd(host
, cmd
, err_status
);
487 err_status
= mvsd_finish_data(host
, mrq
->data
, err_status
);
489 dev_err(host
->dev
, "unhandled error status %#04x\n",
491 cmd
->error
= -ENOMSG
;
494 mmc_request_done(host
->mmc
, mrq
);
497 spin_unlock(&host
->lock
);
499 if (intr_status
& MVSD_NOR_CARD_INT
) {
500 mmc_signal_sdio_irq(host
->mmc
);
507 dev_err(host
->dev
, "unhandled interrupt status=0x%04x en=0x%04x pio=%d\n",
508 intr_status
, host
->intr_en
, host
->pio_size
);
512 static void mvsd_timeout_timer(unsigned long data
)
514 struct mvsd_host
*host
= (struct mvsd_host
*)data
;
515 void __iomem
*iobase
= host
->base
;
516 struct mmc_request
*mrq
;
519 spin_lock_irqsave(&host
->lock
, flags
);
522 dev_err(host
->dev
, "Timeout waiting for hardware interrupt.\n");
523 dev_err(host
->dev
, "hw_state=0x%04x, intr_status=0x%04x intr_en=0x%04x\n",
524 mvsd_read(MVSD_HW_STATE
),
525 mvsd_read(MVSD_NOR_INTR_STATUS
),
526 mvsd_read(MVSD_NOR_INTR_EN
));
530 mvsd_write(MVSD_SW_RESET
, MVSD_SW_RESET_NOW
);
532 host
->xfer_mode
&= MVSD_XFER_MODE_INT_CHK_EN
;
533 mvsd_write(MVSD_XFER_MODE
, host
->xfer_mode
);
535 host
->intr_en
&= MVSD_NOR_CARD_INT
;
536 mvsd_write(MVSD_NOR_INTR_EN
, host
->intr_en
);
537 mvsd_write(MVSD_ERR_INTR_EN
, 0);
538 mvsd_write(MVSD_ERR_INTR_STATUS
, 0xffff);
540 mrq
->cmd
->error
= -ETIMEDOUT
;
541 mvsd_finish_cmd(host
, mrq
->cmd
, 0);
543 mrq
->data
->error
= -ETIMEDOUT
;
544 mvsd_finish_data(host
, mrq
->data
, 0);
547 spin_unlock_irqrestore(&host
->lock
, flags
);
550 mmc_request_done(host
->mmc
, mrq
);
553 static void mvsd_enable_sdio_irq(struct mmc_host
*mmc
, int enable
)
555 struct mvsd_host
*host
= mmc_priv(mmc
);
556 void __iomem
*iobase
= host
->base
;
559 spin_lock_irqsave(&host
->lock
, flags
);
561 host
->xfer_mode
|= MVSD_XFER_MODE_INT_CHK_EN
;
562 host
->intr_en
|= MVSD_NOR_CARD_INT
;
564 host
->xfer_mode
&= ~MVSD_XFER_MODE_INT_CHK_EN
;
565 host
->intr_en
&= ~MVSD_NOR_CARD_INT
;
567 mvsd_write(MVSD_XFER_MODE
, host
->xfer_mode
);
568 mvsd_write(MVSD_NOR_INTR_EN
, host
->intr_en
);
569 spin_unlock_irqrestore(&host
->lock
, flags
);
572 static void mvsd_power_up(struct mvsd_host
*host
)
574 void __iomem
*iobase
= host
->base
;
575 dev_dbg(host
->dev
, "power up\n");
576 mvsd_write(MVSD_NOR_INTR_EN
, 0);
577 mvsd_write(MVSD_ERR_INTR_EN
, 0);
578 mvsd_write(MVSD_SW_RESET
, MVSD_SW_RESET_NOW
);
579 mvsd_write(MVSD_XFER_MODE
, 0);
580 mvsd_write(MVSD_NOR_STATUS_EN
, 0xffff);
581 mvsd_write(MVSD_ERR_STATUS_EN
, 0xffff);
582 mvsd_write(MVSD_NOR_INTR_STATUS
, 0xffff);
583 mvsd_write(MVSD_ERR_INTR_STATUS
, 0xffff);
586 static void mvsd_power_down(struct mvsd_host
*host
)
588 void __iomem
*iobase
= host
->base
;
589 dev_dbg(host
->dev
, "power down\n");
590 mvsd_write(MVSD_NOR_INTR_EN
, 0);
591 mvsd_write(MVSD_ERR_INTR_EN
, 0);
592 mvsd_write(MVSD_SW_RESET
, MVSD_SW_RESET_NOW
);
593 mvsd_write(MVSD_XFER_MODE
, MVSD_XFER_MODE_STOP_CLK
);
594 mvsd_write(MVSD_NOR_STATUS_EN
, 0);
595 mvsd_write(MVSD_ERR_STATUS_EN
, 0);
596 mvsd_write(MVSD_NOR_INTR_STATUS
, 0xffff);
597 mvsd_write(MVSD_ERR_INTR_STATUS
, 0xffff);
600 static void mvsd_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
602 struct mvsd_host
*host
= mmc_priv(mmc
);
603 void __iomem
*iobase
= host
->base
;
606 if (ios
->power_mode
== MMC_POWER_UP
)
609 if (ios
->clock
== 0) {
610 mvsd_write(MVSD_XFER_MODE
, MVSD_XFER_MODE_STOP_CLK
);
611 mvsd_write(MVSD_CLK_DIV
, MVSD_BASE_DIV_MAX
);
613 dev_dbg(host
->dev
, "clock off\n");
614 } else if (ios
->clock
!= host
->clock
) {
615 u32 m
= DIV_ROUND_UP(host
->base_clock
, ios
->clock
) - 1;
616 if (m
> MVSD_BASE_DIV_MAX
)
617 m
= MVSD_BASE_DIV_MAX
;
618 mvsd_write(MVSD_CLK_DIV
, m
);
619 host
->clock
= ios
->clock
;
620 host
->ns_per_clk
= 1000000000 / (host
->base_clock
/ (m
+1));
621 dev_dbg(host
->dev
, "clock=%d (%d), div=0x%04x\n",
622 ios
->clock
, host
->base_clock
/ (m
+1), m
);
625 /* default transfer mode */
626 ctrl_reg
|= MVSD_HOST_CTRL_BIG_ENDIAN
;
627 ctrl_reg
&= ~MVSD_HOST_CTRL_LSB_FIRST
;
629 /* default to maximum timeout */
630 ctrl_reg
|= MVSD_HOST_CTRL_TMOUT_MASK
;
631 ctrl_reg
|= MVSD_HOST_CTRL_TMOUT_EN
;
633 if (ios
->bus_mode
== MMC_BUSMODE_PUSHPULL
)
634 ctrl_reg
|= MVSD_HOST_CTRL_PUSH_PULL_EN
;
636 if (ios
->bus_width
== MMC_BUS_WIDTH_4
)
637 ctrl_reg
|= MVSD_HOST_CTRL_DATA_WIDTH_4_BITS
;
640 * The HI_SPEED_EN bit is causing trouble with many (but not all)
641 * high speed SD, SDHC and SDIO cards. Not enabling that bit
642 * makes all cards work. So let's just ignore that bit for now
643 * and revisit this issue if problems for not enabling this bit
647 if (ios
->timing
== MMC_TIMING_MMC_HS
||
648 ios
->timing
== MMC_TIMING_SD_HS
)
649 ctrl_reg
|= MVSD_HOST_CTRL_HI_SPEED_EN
;
652 host
->ctrl
= ctrl_reg
;
653 mvsd_write(MVSD_HOST_CTRL
, ctrl_reg
);
654 dev_dbg(host
->dev
, "ctrl 0x%04x: %s %s %s\n", ctrl_reg
,
655 (ctrl_reg
& MVSD_HOST_CTRL_PUSH_PULL_EN
) ?
656 "push-pull" : "open-drain",
657 (ctrl_reg
& MVSD_HOST_CTRL_DATA_WIDTH_4_BITS
) ?
658 "4bit-width" : "1bit-width",
659 (ctrl_reg
& MVSD_HOST_CTRL_HI_SPEED_EN
) ?
662 if (ios
->power_mode
== MMC_POWER_OFF
)
663 mvsd_power_down(host
);
666 static const struct mmc_host_ops mvsd_ops
= {
667 .request
= mvsd_request
,
668 .get_ro
= mmc_gpio_get_ro
,
669 .set_ios
= mvsd_set_ios
,
670 .enable_sdio_irq
= mvsd_enable_sdio_irq
,
674 mv_conf_mbus_windows(struct mvsd_host
*host
,
675 const struct mbus_dram_target_info
*dram
)
677 void __iomem
*iobase
= host
->base
;
680 for (i
= 0; i
< 4; i
++) {
681 writel(0, iobase
+ MVSD_WINDOW_CTRL(i
));
682 writel(0, iobase
+ MVSD_WINDOW_BASE(i
));
685 for (i
= 0; i
< dram
->num_cs
; i
++) {
686 const struct mbus_dram_window
*cs
= dram
->cs
+ i
;
687 writel(((cs
->size
- 1) & 0xffff0000) |
688 (cs
->mbus_attr
<< 8) |
689 (dram
->mbus_dram_target_id
<< 4) | 1,
690 iobase
+ MVSD_WINDOW_CTRL(i
));
691 writel(cs
->base
, iobase
+ MVSD_WINDOW_BASE(i
));
695 static int mvsd_probe(struct platform_device
*pdev
)
697 struct device_node
*np
= pdev
->dev
.of_node
;
698 struct mmc_host
*mmc
= NULL
;
699 struct mvsd_host
*host
= NULL
;
700 const struct mbus_dram_target_info
*dram
;
705 dev_err(&pdev
->dev
, "no DT node\n");
708 r
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
709 irq
= platform_get_irq(pdev
, 0);
713 mmc
= mmc_alloc_host(sizeof(struct mvsd_host
), &pdev
->dev
);
719 host
= mmc_priv(mmc
);
721 host
->dev
= &pdev
->dev
;
724 * Some non-DT platforms do not pass a clock, and the clock
725 * frequency is passed through platform_data. On DT platforms,
726 * a clock must always be passed, even if there is no gatable
727 * clock associated to the SDIO interface (it can simply be a
730 host
->clk
= devm_clk_get(&pdev
->dev
, NULL
);
731 if (IS_ERR(host
->clk
)) {
732 dev_err(&pdev
->dev
, "no clock associated\n");
736 clk_prepare_enable(host
->clk
);
738 mmc
->ops
= &mvsd_ops
;
740 mmc
->ocr_avail
= MMC_VDD_32_33
| MMC_VDD_33_34
;
742 mmc
->f_min
= DIV_ROUND_UP(host
->base_clock
, MVSD_BASE_DIV_MAX
);
743 mmc
->f_max
= MVSD_CLOCKRATE_MAX
;
745 mmc
->max_blk_size
= 2048;
746 mmc
->max_blk_count
= 65535;
749 mmc
->max_seg_size
= mmc
->max_blk_size
* mmc
->max_blk_count
;
750 mmc
->max_req_size
= mmc
->max_blk_size
* mmc
->max_blk_count
;
752 host
->base_clock
= clk_get_rate(host
->clk
) / 2;
753 ret
= mmc_of_parse(mmc
);
757 mmc
->f_max
= maxfreq
;
759 spin_lock_init(&host
->lock
);
761 host
->base
= devm_ioremap_resource(&pdev
->dev
, r
);
762 if (IS_ERR(host
->base
)) {
763 ret
= PTR_ERR(host
->base
);
767 /* (Re-)program MBUS remapping windows if we are asked to. */
768 dram
= mv_mbus_dram_info();
770 mv_conf_mbus_windows(host
, dram
);
772 mvsd_power_down(host
);
774 ret
= devm_request_irq(&pdev
->dev
, irq
, mvsd_irq
, 0, DRIVER_NAME
, host
);
776 dev_err(&pdev
->dev
, "cannot assign irq %d\n", irq
);
780 setup_timer(&host
->timer
, mvsd_timeout_timer
, (unsigned long)host
);
781 platform_set_drvdata(pdev
, mmc
);
782 ret
= mmc_add_host(mmc
);
786 if (!(mmc
->caps
& MMC_CAP_NEEDS_POLL
))
787 dev_dbg(&pdev
->dev
, "using GPIO for card detection\n");
789 dev_dbg(&pdev
->dev
, "lacking card detect (fall back to polling)\n");
795 if (!IS_ERR(host
->clk
))
796 clk_disable_unprepare(host
->clk
);
803 static int mvsd_remove(struct platform_device
*pdev
)
805 struct mmc_host
*mmc
= platform_get_drvdata(pdev
);
807 struct mvsd_host
*host
= mmc_priv(mmc
);
809 mmc_remove_host(mmc
);
810 del_timer_sync(&host
->timer
);
811 mvsd_power_down(host
);
813 if (!IS_ERR(host
->clk
))
814 clk_disable_unprepare(host
->clk
);
820 static const struct of_device_id mvsdio_dt_ids
[] = {
821 { .compatible
= "marvell,orion-sdio" },
824 MODULE_DEVICE_TABLE(of
, mvsdio_dt_ids
);
826 static struct platform_driver mvsd_driver
= {
828 .remove
= mvsd_remove
,
831 .of_match_table
= mvsdio_dt_ids
,
835 module_platform_driver(mvsd_driver
);
837 /* maximum card clock frequency (default 50MHz) */
838 module_param(maxfreq
, int, 0);
840 /* force PIO transfers all the time */
841 module_param(nodma
, int, 0);
843 MODULE_AUTHOR("Maen Suleiman, Nicolas Pitre");
844 MODULE_DESCRIPTION("Marvell MMC,SD,SDIO Host Controller driver");
845 MODULE_LICENSE("GPL");
846 MODULE_ALIAS("platform:mvsdio");