1 // SPDX-License-Identifier: GPL-2.0-only
3 * Marvell MMC/SD/SDIO driver
5 * Authors: Maen Suleiman, Nicolas Pitre
6 * Copyright (C) 2008-2009 Marvell Ltd.
9 #include <linux/module.h>
10 #include <linux/init.h>
12 #include <linux/platform_device.h>
13 #include <linux/mbus.h>
14 #include <linux/delay.h>
15 #include <linux/interrupt.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/scatterlist.h>
18 #include <linux/irq.h>
19 #include <linux/clk.h>
20 #include <linux/of_irq.h>
21 #include <linux/mmc/host.h>
22 #include <linux/mmc/slot-gpio.h>
24 #include <linux/sizes.h>
25 #include <asm/unaligned.h>
29 #define DRIVER_NAME "mvsdio"
36 struct mmc_request
*mrq
;
38 unsigned int xfer_mode
;
41 unsigned int pio_size
;
43 unsigned int sg_frags
;
44 unsigned int ns_per_clk
;
46 unsigned int base_clock
;
47 struct timer_list timer
;
53 #define mvsd_write(offs, val) writel(val, iobase + (offs))
54 #define mvsd_read(offs) readl(iobase + (offs))
56 static int mvsd_setup_data(struct mvsd_host
*host
, struct mmc_data
*data
)
58 void __iomem
*iobase
= host
->base
;
63 * Hardware weirdness. The FIFO_EMPTY bit of the HW_STATE
64 * register is sometimes not set before a while when some
65 * "unusual" data block sizes are used (such as with the SWITCH
66 * command), even despite the fact that the XFER_DONE interrupt
67 * was raised. And if another data transfer starts before
68 * this bit comes to good sense (which eventually happens by
69 * itself) then the new transfer simply fails with a timeout.
71 if (!(mvsd_read(MVSD_HW_STATE
) & (1 << 13))) {
72 unsigned long t
= jiffies
+ HZ
;
73 unsigned int hw_state
, count
= 0;
75 hw_state
= mvsd_read(MVSD_HW_STATE
);
76 if (time_after(jiffies
, t
)) {
77 dev_warn(host
->dev
, "FIFO_EMPTY bit missing\n");
81 } while (!(hw_state
& (1 << 13)));
82 dev_dbg(host
->dev
, "*** wait for FIFO_EMPTY bit "
83 "(hw=0x%04x, count=%d, jiffies=%ld)\n",
84 hw_state
, count
, jiffies
- (t
- HZ
));
87 /* If timeout=0 then maximum timeout index is used. */
88 tmout
= DIV_ROUND_UP(data
->timeout_ns
, host
->ns_per_clk
);
89 tmout
+= data
->timeout_clks
;
90 tmout_index
= fls(tmout
- 1) - 12;
93 if (tmout_index
> MVSD_HOST_CTRL_TMOUT_MAX
)
94 tmout_index
= MVSD_HOST_CTRL_TMOUT_MAX
;
96 dev_dbg(host
->dev
, "data %s at 0x%08x: blocks=%d blksz=%d tmout=%u (%d)\n",
97 (data
->flags
& MMC_DATA_READ
) ? "read" : "write",
98 (u32
)sg_virt(data
->sg
), data
->blocks
, data
->blksz
,
101 host
->ctrl
&= ~MVSD_HOST_CTRL_TMOUT_MASK
;
102 host
->ctrl
|= MVSD_HOST_CTRL_TMOUT(tmout_index
);
103 mvsd_write(MVSD_HOST_CTRL
, host
->ctrl
);
104 mvsd_write(MVSD_BLK_COUNT
, data
->blocks
);
105 mvsd_write(MVSD_BLK_SIZE
, data
->blksz
);
107 if (nodma
|| (data
->blksz
| data
->sg
->offset
) & 3 ||
108 ((!(data
->flags
& MMC_DATA_READ
) && data
->sg
->offset
& 0x3f))) {
110 * We cannot do DMA on a buffer which offset or size
111 * is not aligned on a 4-byte boundary.
113 * It also appears the host to card DMA can corrupt
114 * data when the buffer is not aligned on a 64 byte
117 host
->pio_size
= data
->blocks
* data
->blksz
;
118 host
->pio_ptr
= sg_virt(data
->sg
);
120 dev_dbg(host
->dev
, "fallback to PIO for data at 0x%p size %d\n",
121 host
->pio_ptr
, host
->pio_size
);
124 dma_addr_t phys_addr
;
126 host
->sg_frags
= dma_map_sg(mmc_dev(host
->mmc
),
127 data
->sg
, data
->sg_len
,
128 mmc_get_dma_dir(data
));
129 phys_addr
= sg_dma_address(data
->sg
);
130 mvsd_write(MVSD_SYS_ADDR_LOW
, (u32
)phys_addr
& 0xffff);
131 mvsd_write(MVSD_SYS_ADDR_HI
, (u32
)phys_addr
>> 16);
136 static void mvsd_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
138 struct mvsd_host
*host
= mmc_priv(mmc
);
139 void __iomem
*iobase
= host
->base
;
140 struct mmc_command
*cmd
= mrq
->cmd
;
141 u32 cmdreg
= 0, xfer
= 0, intr
= 0;
143 unsigned int timeout
;
145 BUG_ON(host
->mrq
!= NULL
);
148 dev_dbg(host
->dev
, "cmd %d (hw state 0x%04x)\n",
149 cmd
->opcode
, mvsd_read(MVSD_HW_STATE
));
151 cmdreg
= MVSD_CMD_INDEX(cmd
->opcode
);
153 if (cmd
->flags
& MMC_RSP_BUSY
)
154 cmdreg
|= MVSD_CMD_RSP_48BUSY
;
155 else if (cmd
->flags
& MMC_RSP_136
)
156 cmdreg
|= MVSD_CMD_RSP_136
;
157 else if (cmd
->flags
& MMC_RSP_PRESENT
)
158 cmdreg
|= MVSD_CMD_RSP_48
;
160 cmdreg
|= MVSD_CMD_RSP_NONE
;
162 if (cmd
->flags
& MMC_RSP_CRC
)
163 cmdreg
|= MVSD_CMD_CHECK_CMDCRC
;
165 if (cmd
->flags
& MMC_RSP_OPCODE
)
166 cmdreg
|= MVSD_CMD_INDX_CHECK
;
168 if (cmd
->flags
& MMC_RSP_PRESENT
) {
169 cmdreg
|= MVSD_UNEXPECTED_RESP
;
170 intr
|= MVSD_NOR_UNEXP_RSP
;
174 struct mmc_data
*data
= mrq
->data
;
177 cmdreg
|= MVSD_CMD_DATA_PRESENT
| MVSD_CMD_CHECK_DATACRC16
;
178 xfer
|= MVSD_XFER_MODE_HW_WR_DATA_EN
;
179 if (data
->flags
& MMC_DATA_READ
)
180 xfer
|= MVSD_XFER_MODE_TO_HOST
;
182 pio
= mvsd_setup_data(host
, data
);
184 xfer
|= MVSD_XFER_MODE_PIO
;
185 /* PIO section of mvsd_irq has comments on those bits */
186 if (data
->flags
& MMC_DATA_WRITE
)
187 intr
|= MVSD_NOR_TX_AVAIL
;
188 else if (host
->pio_size
> 32)
189 intr
|= MVSD_NOR_RX_FIFO_8W
;
191 intr
|= MVSD_NOR_RX_READY
;
195 struct mmc_command
*stop
= data
->stop
;
198 mvsd_write(MVSD_AUTOCMD12_ARG_LOW
, stop
->arg
& 0xffff);
199 mvsd_write(MVSD_AUTOCMD12_ARG_HI
, stop
->arg
>> 16);
201 if (stop
->flags
& MMC_RSP_BUSY
)
202 cmd12reg
|= MVSD_AUTOCMD12_BUSY
;
203 if (stop
->flags
& MMC_RSP_OPCODE
)
204 cmd12reg
|= MVSD_AUTOCMD12_INDX_CHECK
;
205 cmd12reg
|= MVSD_AUTOCMD12_INDEX(stop
->opcode
);
206 mvsd_write(MVSD_AUTOCMD12_CMD
, cmd12reg
);
208 xfer
|= MVSD_XFER_MODE_AUTO_CMD12
;
209 intr
|= MVSD_NOR_AUTOCMD12_DONE
;
211 intr
|= MVSD_NOR_XFER_DONE
;
214 intr
|= MVSD_NOR_CMD_DONE
;
217 mvsd_write(MVSD_ARG_LOW
, cmd
->arg
& 0xffff);
218 mvsd_write(MVSD_ARG_HI
, cmd
->arg
>> 16);
220 spin_lock_irqsave(&host
->lock
, flags
);
222 host
->xfer_mode
&= MVSD_XFER_MODE_INT_CHK_EN
;
223 host
->xfer_mode
|= xfer
;
224 mvsd_write(MVSD_XFER_MODE
, host
->xfer_mode
);
226 mvsd_write(MVSD_NOR_INTR_STATUS
, ~MVSD_NOR_CARD_INT
);
227 mvsd_write(MVSD_ERR_INTR_STATUS
, 0xffff);
228 mvsd_write(MVSD_CMD
, cmdreg
);
230 host
->intr_en
&= MVSD_NOR_CARD_INT
;
231 host
->intr_en
|= intr
| MVSD_NOR_ERROR
;
232 mvsd_write(MVSD_NOR_INTR_EN
, host
->intr_en
);
233 mvsd_write(MVSD_ERR_INTR_EN
, 0xffff);
235 timeout
= cmd
->busy_timeout
? cmd
->busy_timeout
: 5000;
236 mod_timer(&host
->timer
, jiffies
+ msecs_to_jiffies(timeout
));
238 spin_unlock_irqrestore(&host
->lock
, flags
);
241 static u32
mvsd_finish_cmd(struct mvsd_host
*host
, struct mmc_command
*cmd
,
244 void __iomem
*iobase
= host
->base
;
246 if (cmd
->flags
& MMC_RSP_136
) {
247 unsigned int response
[8], i
;
248 for (i
= 0; i
< 8; i
++)
249 response
[i
] = mvsd_read(MVSD_RSP(i
));
250 cmd
->resp
[0] = ((response
[0] & 0x03ff) << 22) |
251 ((response
[1] & 0xffff) << 6) |
252 ((response
[2] & 0xfc00) >> 10);
253 cmd
->resp
[1] = ((response
[2] & 0x03ff) << 22) |
254 ((response
[3] & 0xffff) << 6) |
255 ((response
[4] & 0xfc00) >> 10);
256 cmd
->resp
[2] = ((response
[4] & 0x03ff) << 22) |
257 ((response
[5] & 0xffff) << 6) |
258 ((response
[6] & 0xfc00) >> 10);
259 cmd
->resp
[3] = ((response
[6] & 0x03ff) << 22) |
260 ((response
[7] & 0x3fff) << 8);
261 } else if (cmd
->flags
& MMC_RSP_PRESENT
) {
262 unsigned int response
[3], i
;
263 for (i
= 0; i
< 3; i
++)
264 response
[i
] = mvsd_read(MVSD_RSP(i
));
265 cmd
->resp
[0] = ((response
[2] & 0x003f) << (8 - 8)) |
266 ((response
[1] & 0xffff) << (14 - 8)) |
267 ((response
[0] & 0x03ff) << (30 - 8));
268 cmd
->resp
[1] = ((response
[0] & 0xfc00) >> 10);
273 if (err_status
& MVSD_ERR_CMD_TIMEOUT
) {
274 cmd
->error
= -ETIMEDOUT
;
275 } else if (err_status
& (MVSD_ERR_CMD_CRC
| MVSD_ERR_CMD_ENDBIT
|
276 MVSD_ERR_CMD_INDEX
| MVSD_ERR_CMD_STARTBIT
)) {
277 cmd
->error
= -EILSEQ
;
279 err_status
&= ~(MVSD_ERR_CMD_TIMEOUT
| MVSD_ERR_CMD_CRC
|
280 MVSD_ERR_CMD_ENDBIT
| MVSD_ERR_CMD_INDEX
|
281 MVSD_ERR_CMD_STARTBIT
);
286 static u32
mvsd_finish_data(struct mvsd_host
*host
, struct mmc_data
*data
,
289 void __iomem
*iobase
= host
->base
;
292 host
->pio_ptr
= NULL
;
295 dma_unmap_sg(mmc_dev(host
->mmc
), data
->sg
, host
->sg_frags
,
296 mmc_get_dma_dir(data
));
299 if (err_status
& MVSD_ERR_DATA_TIMEOUT
)
300 data
->error
= -ETIMEDOUT
;
301 else if (err_status
& (MVSD_ERR_DATA_CRC
| MVSD_ERR_DATA_ENDBIT
))
302 data
->error
= -EILSEQ
;
303 else if (err_status
& MVSD_ERR_XFER_SIZE
)
304 data
->error
= -EBADE
;
305 err_status
&= ~(MVSD_ERR_DATA_TIMEOUT
| MVSD_ERR_DATA_CRC
|
306 MVSD_ERR_DATA_ENDBIT
| MVSD_ERR_XFER_SIZE
);
308 dev_dbg(host
->dev
, "data done: blocks_left=%d, bytes_left=%d\n",
309 mvsd_read(MVSD_CURR_BLK_LEFT
), mvsd_read(MVSD_CURR_BYTE_LEFT
));
311 (data
->blocks
- mvsd_read(MVSD_CURR_BLK_LEFT
)) * data
->blksz
;
312 /* We can't be sure about the last block when errors are detected */
313 if (data
->bytes_xfered
&& data
->error
)
314 data
->bytes_xfered
-= data
->blksz
;
316 /* Handle Auto cmd 12 response */
318 unsigned int response
[3], i
;
319 for (i
= 0; i
< 3; i
++)
320 response
[i
] = mvsd_read(MVSD_AUTO_RSP(i
));
321 data
->stop
->resp
[0] = ((response
[2] & 0x003f) << (8 - 8)) |
322 ((response
[1] & 0xffff) << (14 - 8)) |
323 ((response
[0] & 0x03ff) << (30 - 8));
324 data
->stop
->resp
[1] = ((response
[0] & 0xfc00) >> 10);
325 data
->stop
->resp
[2] = 0;
326 data
->stop
->resp
[3] = 0;
328 if (err_status
& MVSD_ERR_AUTOCMD12
) {
329 u32 err_cmd12
= mvsd_read(MVSD_AUTOCMD12_ERR_STATUS
);
330 dev_dbg(host
->dev
, "c12err 0x%04x\n", err_cmd12
);
331 if (err_cmd12
& MVSD_AUTOCMD12_ERR_NOTEXE
)
332 data
->stop
->error
= -ENOEXEC
;
333 else if (err_cmd12
& MVSD_AUTOCMD12_ERR_TIMEOUT
)
334 data
->stop
->error
= -ETIMEDOUT
;
336 data
->stop
->error
= -EILSEQ
;
337 err_status
&= ~MVSD_ERR_AUTOCMD12
;
344 static irqreturn_t
mvsd_irq(int irq
, void *dev
)
346 struct mvsd_host
*host
= dev
;
347 void __iomem
*iobase
= host
->base
;
348 u32 intr_status
, intr_done_mask
;
351 intr_status
= mvsd_read(MVSD_NOR_INTR_STATUS
);
352 dev_dbg(host
->dev
, "intr 0x%04x intr_en 0x%04x hw_state 0x%04x\n",
353 intr_status
, mvsd_read(MVSD_NOR_INTR_EN
),
354 mvsd_read(MVSD_HW_STATE
));
357 * It looks like, SDIO IP can issue one late, spurious irq
358 * although all irqs should be disabled. To work around this,
359 * bail out early, if we didn't expect any irqs to occur.
361 if (!mvsd_read(MVSD_NOR_INTR_EN
) && !mvsd_read(MVSD_ERR_INTR_EN
)) {
362 dev_dbg(host
->dev
, "spurious irq detected intr 0x%04x intr_en 0x%04x erri 0x%04x erri_en 0x%04x\n",
363 mvsd_read(MVSD_NOR_INTR_STATUS
),
364 mvsd_read(MVSD_NOR_INTR_EN
),
365 mvsd_read(MVSD_ERR_INTR_STATUS
),
366 mvsd_read(MVSD_ERR_INTR_EN
));
370 spin_lock(&host
->lock
);
372 /* PIO handling, if needed. Messy business... */
373 if (host
->pio_size
&&
374 (intr_status
& host
->intr_en
&
375 (MVSD_NOR_RX_READY
| MVSD_NOR_RX_FIFO_8W
))) {
376 u16
*p
= host
->pio_ptr
;
377 int s
= host
->pio_size
;
378 while (s
>= 32 && (intr_status
& MVSD_NOR_RX_FIFO_8W
)) {
379 readsw(iobase
+ MVSD_FIFO
, p
, 16);
382 intr_status
= mvsd_read(MVSD_NOR_INTR_STATUS
);
385 * Normally we'd use < 32 here, but the RX_FIFO_8W bit
386 * doesn't appear to assert when there is exactly 32 bytes
387 * (8 words) left to fetch in a transfer.
390 while (s
>= 4 && (intr_status
& MVSD_NOR_RX_READY
)) {
391 put_unaligned(mvsd_read(MVSD_FIFO
), p
++);
392 put_unaligned(mvsd_read(MVSD_FIFO
), p
++);
394 intr_status
= mvsd_read(MVSD_NOR_INTR_STATUS
);
396 if (s
&& s
< 4 && (intr_status
& MVSD_NOR_RX_READY
)) {
398 val
[0] = mvsd_read(MVSD_FIFO
);
399 val
[1] = mvsd_read(MVSD_FIFO
);
400 memcpy(p
, ((void *)&val
) + 4 - s
, s
);
402 intr_status
= mvsd_read(MVSD_NOR_INTR_STATUS
);
406 ~(MVSD_NOR_RX_READY
| MVSD_NOR_RX_FIFO_8W
);
407 mvsd_write(MVSD_NOR_INTR_EN
, host
->intr_en
);
408 } else if (host
->intr_en
& MVSD_NOR_RX_FIFO_8W
) {
409 host
->intr_en
&= ~MVSD_NOR_RX_FIFO_8W
;
410 host
->intr_en
|= MVSD_NOR_RX_READY
;
411 mvsd_write(MVSD_NOR_INTR_EN
, host
->intr_en
);
414 dev_dbg(host
->dev
, "pio %d intr 0x%04x hw_state 0x%04x\n",
415 s
, intr_status
, mvsd_read(MVSD_HW_STATE
));
419 } else if (host
->pio_size
&&
420 (intr_status
& host
->intr_en
&
421 (MVSD_NOR_TX_AVAIL
| MVSD_NOR_TX_FIFO_8W
))) {
422 u16
*p
= host
->pio_ptr
;
423 int s
= host
->pio_size
;
425 * The TX_FIFO_8W bit is unreliable. When set, bursting
426 * 16 halfwords all at once in the FIFO drops data. Actually
427 * TX_AVAIL does go off after only one word is pushed even if
428 * TX_FIFO_8W remains set.
430 while (s
>= 4 && (intr_status
& MVSD_NOR_TX_AVAIL
)) {
431 mvsd_write(MVSD_FIFO
, get_unaligned(p
++));
432 mvsd_write(MVSD_FIFO
, get_unaligned(p
++));
434 intr_status
= mvsd_read(MVSD_NOR_INTR_STATUS
);
437 if (s
&& (intr_status
& MVSD_NOR_TX_AVAIL
)) {
439 memcpy(((void *)&val
) + 4 - s
, p
, s
);
440 mvsd_write(MVSD_FIFO
, val
[0]);
441 mvsd_write(MVSD_FIFO
, val
[1]);
443 intr_status
= mvsd_read(MVSD_NOR_INTR_STATUS
);
447 ~(MVSD_NOR_TX_AVAIL
| MVSD_NOR_TX_FIFO_8W
);
448 mvsd_write(MVSD_NOR_INTR_EN
, host
->intr_en
);
451 dev_dbg(host
->dev
, "pio %d intr 0x%04x hw_state 0x%04x\n",
452 s
, intr_status
, mvsd_read(MVSD_HW_STATE
));
458 mvsd_write(MVSD_NOR_INTR_STATUS
, intr_status
);
460 intr_done_mask
= MVSD_NOR_CARD_INT
| MVSD_NOR_RX_READY
|
461 MVSD_NOR_RX_FIFO_8W
| MVSD_NOR_TX_FIFO_8W
;
462 if (intr_status
& host
->intr_en
& ~intr_done_mask
) {
463 struct mmc_request
*mrq
= host
->mrq
;
464 struct mmc_command
*cmd
= mrq
->cmd
;
467 del_timer(&host
->timer
);
470 host
->intr_en
&= MVSD_NOR_CARD_INT
;
471 mvsd_write(MVSD_NOR_INTR_EN
, host
->intr_en
);
472 mvsd_write(MVSD_ERR_INTR_EN
, 0);
474 spin_unlock(&host
->lock
);
476 if (intr_status
& MVSD_NOR_UNEXP_RSP
) {
477 cmd
->error
= -EPROTO
;
478 } else if (intr_status
& MVSD_NOR_ERROR
) {
479 err_status
= mvsd_read(MVSD_ERR_INTR_STATUS
);
480 dev_dbg(host
->dev
, "err 0x%04x\n", err_status
);
483 err_status
= mvsd_finish_cmd(host
, cmd
, err_status
);
485 err_status
= mvsd_finish_data(host
, mrq
->data
, err_status
);
487 dev_err(host
->dev
, "unhandled error status %#04x\n",
489 cmd
->error
= -ENOMSG
;
492 mmc_request_done(host
->mmc
, mrq
);
495 spin_unlock(&host
->lock
);
497 if (intr_status
& MVSD_NOR_CARD_INT
) {
498 mmc_signal_sdio_irq(host
->mmc
);
505 dev_err(host
->dev
, "unhandled interrupt status=0x%04x en=0x%04x pio=%d\n",
506 intr_status
, host
->intr_en
, host
->pio_size
);
510 static void mvsd_timeout_timer(struct timer_list
*t
)
512 struct mvsd_host
*host
= from_timer(host
, t
, timer
);
513 void __iomem
*iobase
= host
->base
;
514 struct mmc_request
*mrq
;
517 spin_lock_irqsave(&host
->lock
, flags
);
520 dev_err(host
->dev
, "Timeout waiting for hardware interrupt.\n");
521 dev_err(host
->dev
, "hw_state=0x%04x, intr_status=0x%04x intr_en=0x%04x\n",
522 mvsd_read(MVSD_HW_STATE
),
523 mvsd_read(MVSD_NOR_INTR_STATUS
),
524 mvsd_read(MVSD_NOR_INTR_EN
));
528 mvsd_write(MVSD_SW_RESET
, MVSD_SW_RESET_NOW
);
530 host
->xfer_mode
&= MVSD_XFER_MODE_INT_CHK_EN
;
531 mvsd_write(MVSD_XFER_MODE
, host
->xfer_mode
);
533 host
->intr_en
&= MVSD_NOR_CARD_INT
;
534 mvsd_write(MVSD_NOR_INTR_EN
, host
->intr_en
);
535 mvsd_write(MVSD_ERR_INTR_EN
, 0);
536 mvsd_write(MVSD_ERR_INTR_STATUS
, 0xffff);
538 mrq
->cmd
->error
= -ETIMEDOUT
;
539 mvsd_finish_cmd(host
, mrq
->cmd
, 0);
541 mrq
->data
->error
= -ETIMEDOUT
;
542 mvsd_finish_data(host
, mrq
->data
, 0);
545 spin_unlock_irqrestore(&host
->lock
, flags
);
548 mmc_request_done(host
->mmc
, mrq
);
551 static void mvsd_enable_sdio_irq(struct mmc_host
*mmc
, int enable
)
553 struct mvsd_host
*host
= mmc_priv(mmc
);
554 void __iomem
*iobase
= host
->base
;
557 spin_lock_irqsave(&host
->lock
, flags
);
559 host
->xfer_mode
|= MVSD_XFER_MODE_INT_CHK_EN
;
560 host
->intr_en
|= MVSD_NOR_CARD_INT
;
562 host
->xfer_mode
&= ~MVSD_XFER_MODE_INT_CHK_EN
;
563 host
->intr_en
&= ~MVSD_NOR_CARD_INT
;
565 mvsd_write(MVSD_XFER_MODE
, host
->xfer_mode
);
566 mvsd_write(MVSD_NOR_INTR_EN
, host
->intr_en
);
567 spin_unlock_irqrestore(&host
->lock
, flags
);
570 static void mvsd_power_up(struct mvsd_host
*host
)
572 void __iomem
*iobase
= host
->base
;
573 dev_dbg(host
->dev
, "power up\n");
574 mvsd_write(MVSD_NOR_INTR_EN
, 0);
575 mvsd_write(MVSD_ERR_INTR_EN
, 0);
576 mvsd_write(MVSD_SW_RESET
, MVSD_SW_RESET_NOW
);
577 mvsd_write(MVSD_XFER_MODE
, 0);
578 mvsd_write(MVSD_NOR_STATUS_EN
, 0xffff);
579 mvsd_write(MVSD_ERR_STATUS_EN
, 0xffff);
580 mvsd_write(MVSD_NOR_INTR_STATUS
, 0xffff);
581 mvsd_write(MVSD_ERR_INTR_STATUS
, 0xffff);
584 static void mvsd_power_down(struct mvsd_host
*host
)
586 void __iomem
*iobase
= host
->base
;
587 dev_dbg(host
->dev
, "power down\n");
588 mvsd_write(MVSD_NOR_INTR_EN
, 0);
589 mvsd_write(MVSD_ERR_INTR_EN
, 0);
590 mvsd_write(MVSD_SW_RESET
, MVSD_SW_RESET_NOW
);
591 mvsd_write(MVSD_XFER_MODE
, MVSD_XFER_MODE_STOP_CLK
);
592 mvsd_write(MVSD_NOR_STATUS_EN
, 0);
593 mvsd_write(MVSD_ERR_STATUS_EN
, 0);
594 mvsd_write(MVSD_NOR_INTR_STATUS
, 0xffff);
595 mvsd_write(MVSD_ERR_INTR_STATUS
, 0xffff);
598 static void mvsd_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
600 struct mvsd_host
*host
= mmc_priv(mmc
);
601 void __iomem
*iobase
= host
->base
;
604 if (ios
->power_mode
== MMC_POWER_UP
)
607 if (ios
->clock
== 0) {
608 mvsd_write(MVSD_XFER_MODE
, MVSD_XFER_MODE_STOP_CLK
);
609 mvsd_write(MVSD_CLK_DIV
, MVSD_BASE_DIV_MAX
);
611 dev_dbg(host
->dev
, "clock off\n");
612 } else if (ios
->clock
!= host
->clock
) {
613 u32 m
= DIV_ROUND_UP(host
->base_clock
, ios
->clock
) - 1;
614 if (m
> MVSD_BASE_DIV_MAX
)
615 m
= MVSD_BASE_DIV_MAX
;
616 mvsd_write(MVSD_CLK_DIV
, m
);
617 host
->clock
= ios
->clock
;
618 host
->ns_per_clk
= 1000000000 / (host
->base_clock
/ (m
+1));
619 dev_dbg(host
->dev
, "clock=%d (%d), div=0x%04x\n",
620 ios
->clock
, host
->base_clock
/ (m
+1), m
);
623 /* default transfer mode */
624 ctrl_reg
|= MVSD_HOST_CTRL_BIG_ENDIAN
;
625 ctrl_reg
&= ~MVSD_HOST_CTRL_LSB_FIRST
;
627 /* default to maximum timeout */
628 ctrl_reg
|= MVSD_HOST_CTRL_TMOUT_MASK
;
629 ctrl_reg
|= MVSD_HOST_CTRL_TMOUT_EN
;
631 if (ios
->bus_mode
== MMC_BUSMODE_PUSHPULL
)
632 ctrl_reg
|= MVSD_HOST_CTRL_PUSH_PULL_EN
;
634 if (ios
->bus_width
== MMC_BUS_WIDTH_4
)
635 ctrl_reg
|= MVSD_HOST_CTRL_DATA_WIDTH_4_BITS
;
638 * The HI_SPEED_EN bit is causing trouble with many (but not all)
639 * high speed SD, SDHC and SDIO cards. Not enabling that bit
640 * makes all cards work. So let's just ignore that bit for now
641 * and revisit this issue if problems for not enabling this bit
645 if (ios
->timing
== MMC_TIMING_MMC_HS
||
646 ios
->timing
== MMC_TIMING_SD_HS
)
647 ctrl_reg
|= MVSD_HOST_CTRL_HI_SPEED_EN
;
650 host
->ctrl
= ctrl_reg
;
651 mvsd_write(MVSD_HOST_CTRL
, ctrl_reg
);
652 dev_dbg(host
->dev
, "ctrl 0x%04x: %s %s %s\n", ctrl_reg
,
653 (ctrl_reg
& MVSD_HOST_CTRL_PUSH_PULL_EN
) ?
654 "push-pull" : "open-drain",
655 (ctrl_reg
& MVSD_HOST_CTRL_DATA_WIDTH_4_BITS
) ?
656 "4bit-width" : "1bit-width",
657 (ctrl_reg
& MVSD_HOST_CTRL_HI_SPEED_EN
) ?
660 if (ios
->power_mode
== MMC_POWER_OFF
)
661 mvsd_power_down(host
);
664 static const struct mmc_host_ops mvsd_ops
= {
665 .request
= mvsd_request
,
666 .get_ro
= mmc_gpio_get_ro
,
667 .set_ios
= mvsd_set_ios
,
668 .enable_sdio_irq
= mvsd_enable_sdio_irq
,
672 mv_conf_mbus_windows(struct mvsd_host
*host
,
673 const struct mbus_dram_target_info
*dram
)
675 void __iomem
*iobase
= host
->base
;
678 for (i
= 0; i
< 4; i
++) {
679 writel(0, iobase
+ MVSD_WINDOW_CTRL(i
));
680 writel(0, iobase
+ MVSD_WINDOW_BASE(i
));
683 for (i
= 0; i
< dram
->num_cs
; i
++) {
684 const struct mbus_dram_window
*cs
= dram
->cs
+ i
;
685 writel(((cs
->size
- 1) & 0xffff0000) |
686 (cs
->mbus_attr
<< 8) |
687 (dram
->mbus_dram_target_id
<< 4) | 1,
688 iobase
+ MVSD_WINDOW_CTRL(i
));
689 writel(cs
->base
, iobase
+ MVSD_WINDOW_BASE(i
));
693 static int mvsd_probe(struct platform_device
*pdev
)
695 struct device_node
*np
= pdev
->dev
.of_node
;
696 struct mmc_host
*mmc
= NULL
;
697 struct mvsd_host
*host
= NULL
;
698 const struct mbus_dram_target_info
*dram
;
702 dev_err(&pdev
->dev
, "no DT node\n");
705 irq
= platform_get_irq(pdev
, 0);
709 mmc
= mmc_alloc_host(sizeof(struct mvsd_host
), &pdev
->dev
);
715 host
= mmc_priv(mmc
);
717 host
->dev
= &pdev
->dev
;
720 * Some non-DT platforms do not pass a clock, and the clock
721 * frequency is passed through platform_data. On DT platforms,
722 * a clock must always be passed, even if there is no gatable
723 * clock associated to the SDIO interface (it can simply be a
726 host
->clk
= devm_clk_get(&pdev
->dev
, NULL
);
727 if (IS_ERR(host
->clk
)) {
728 dev_err(&pdev
->dev
, "no clock associated\n");
732 clk_prepare_enable(host
->clk
);
734 mmc
->ops
= &mvsd_ops
;
736 mmc
->ocr_avail
= MMC_VDD_32_33
| MMC_VDD_33_34
;
738 mmc
->f_min
= DIV_ROUND_UP(host
->base_clock
, MVSD_BASE_DIV_MAX
);
739 mmc
->f_max
= MVSD_CLOCKRATE_MAX
;
741 mmc
->max_blk_size
= 2048;
742 mmc
->max_blk_count
= 65535;
745 mmc
->max_seg_size
= mmc
->max_blk_size
* mmc
->max_blk_count
;
746 mmc
->max_req_size
= mmc
->max_blk_size
* mmc
->max_blk_count
;
748 host
->base_clock
= clk_get_rate(host
->clk
) / 2;
749 ret
= mmc_of_parse(mmc
);
753 mmc
->f_max
= maxfreq
;
755 spin_lock_init(&host
->lock
);
757 host
->base
= devm_platform_ioremap_resource(pdev
, 0);
758 if (IS_ERR(host
->base
)) {
759 ret
= PTR_ERR(host
->base
);
763 /* (Re-)program MBUS remapping windows if we are asked to. */
764 dram
= mv_mbus_dram_info();
766 mv_conf_mbus_windows(host
, dram
);
768 mvsd_power_down(host
);
770 ret
= devm_request_irq(&pdev
->dev
, irq
, mvsd_irq
, 0, DRIVER_NAME
, host
);
772 dev_err(&pdev
->dev
, "cannot assign irq %d\n", irq
);
776 timer_setup(&host
->timer
, mvsd_timeout_timer
, 0);
777 platform_set_drvdata(pdev
, mmc
);
778 ret
= mmc_add_host(mmc
);
782 if (!(mmc
->caps
& MMC_CAP_NEEDS_POLL
))
783 dev_dbg(&pdev
->dev
, "using GPIO for card detection\n");
785 dev_dbg(&pdev
->dev
, "lacking card detect (fall back to polling)\n");
791 if (!IS_ERR(host
->clk
))
792 clk_disable_unprepare(host
->clk
);
799 static int mvsd_remove(struct platform_device
*pdev
)
801 struct mmc_host
*mmc
= platform_get_drvdata(pdev
);
803 struct mvsd_host
*host
= mmc_priv(mmc
);
805 mmc_remove_host(mmc
);
806 del_timer_sync(&host
->timer
);
807 mvsd_power_down(host
);
809 if (!IS_ERR(host
->clk
))
810 clk_disable_unprepare(host
->clk
);
816 static const struct of_device_id mvsdio_dt_ids
[] = {
817 { .compatible
= "marvell,orion-sdio" },
820 MODULE_DEVICE_TABLE(of
, mvsdio_dt_ids
);
822 static struct platform_driver mvsd_driver
= {
824 .remove
= mvsd_remove
,
827 .probe_type
= PROBE_PREFER_ASYNCHRONOUS
,
828 .of_match_table
= mvsdio_dt_ids
,
832 module_platform_driver(mvsd_driver
);
834 /* maximum card clock frequency (default 50MHz) */
835 module_param(maxfreq
, int, 0);
837 /* force PIO transfers all the time */
838 module_param(nodma
, int, 0);
840 MODULE_AUTHOR("Maen Suleiman, Nicolas Pitre");
841 MODULE_DESCRIPTION("Marvell MMC,SD,SDIO Host Controller driver");
842 MODULE_LICENSE("GPL");
843 MODULE_ALIAS("platform:mvsdio");