2 * Marvell MMC/SD/SDIO driver
4 * Authors: Maen Suleiman, Nicolas Pitre
5 * Copyright (C) 2008-2009 Marvell Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/module.h>
13 #include <linux/init.h>
15 #include <linux/platform_device.h>
16 #include <linux/mbus.h>
17 #include <linux/delay.h>
18 #include <linux/interrupt.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/scatterlist.h>
21 #include <linux/irq.h>
22 #include <linux/clk.h>
23 #include <linux/gpio.h>
24 #include <linux/of_gpio.h>
25 #include <linux/of_irq.h>
26 #include <linux/mmc/host.h>
27 #include <linux/mmc/slot-gpio.h>
29 #include <asm/sizes.h>
30 #include <asm/unaligned.h>
31 #include <linux/platform_data/mmc-mvsdio.h>
35 #define DRIVER_NAME "mvsdio"
42 struct mmc_request
*mrq
;
44 unsigned int xfer_mode
;
47 unsigned int pio_size
;
49 unsigned int sg_frags
;
50 unsigned int ns_per_clk
;
52 unsigned int base_clock
;
53 struct timer_list timer
;
59 #define mvsd_write(offs, val) writel(val, iobase + (offs))
60 #define mvsd_read(offs) readl(iobase + (offs))
62 static int mvsd_setup_data(struct mvsd_host
*host
, struct mmc_data
*data
)
64 void __iomem
*iobase
= host
->base
;
69 * Hardware weirdness. The FIFO_EMPTY bit of the HW_STATE
70 * register is sometimes not set before a while when some
71 * "unusual" data block sizes are used (such as with the SWITCH
72 * command), even despite the fact that the XFER_DONE interrupt
73 * was raised. And if another data transfer starts before
74 * this bit comes to good sense (which eventually happens by
75 * itself) then the new transfer simply fails with a timeout.
77 if (!(mvsd_read(MVSD_HW_STATE
) & (1 << 13))) {
78 unsigned long t
= jiffies
+ HZ
;
79 unsigned int hw_state
, count
= 0;
81 hw_state
= mvsd_read(MVSD_HW_STATE
);
82 if (time_after(jiffies
, t
)) {
83 dev_warn(host
->dev
, "FIFO_EMPTY bit missing\n");
87 } while (!(hw_state
& (1 << 13)));
88 dev_dbg(host
->dev
, "*** wait for FIFO_EMPTY bit "
89 "(hw=0x%04x, count=%d, jiffies=%ld)\n",
90 hw_state
, count
, jiffies
- (t
- HZ
));
93 /* If timeout=0 then maximum timeout index is used. */
94 tmout
= DIV_ROUND_UP(data
->timeout_ns
, host
->ns_per_clk
);
95 tmout
+= data
->timeout_clks
;
96 tmout_index
= fls(tmout
- 1) - 12;
99 if (tmout_index
> MVSD_HOST_CTRL_TMOUT_MAX
)
100 tmout_index
= MVSD_HOST_CTRL_TMOUT_MAX
;
102 dev_dbg(host
->dev
, "data %s at 0x%08x: blocks=%d blksz=%d tmout=%u (%d)\n",
103 (data
->flags
& MMC_DATA_READ
) ? "read" : "write",
104 (u32
)sg_virt(data
->sg
), data
->blocks
, data
->blksz
,
107 host
->ctrl
&= ~MVSD_HOST_CTRL_TMOUT_MASK
;
108 host
->ctrl
|= MVSD_HOST_CTRL_TMOUT(tmout_index
);
109 mvsd_write(MVSD_HOST_CTRL
, host
->ctrl
);
110 mvsd_write(MVSD_BLK_COUNT
, data
->blocks
);
111 mvsd_write(MVSD_BLK_SIZE
, data
->blksz
);
113 if (nodma
|| (data
->blksz
| data
->sg
->offset
) & 3 ||
114 ((!(data
->flags
& MMC_DATA_READ
) && data
->sg
->offset
& 0x3f))) {
116 * We cannot do DMA on a buffer which offset or size
117 * is not aligned on a 4-byte boundary.
119 * It also appears the host to card DMA can corrupt
120 * data when the buffer is not aligned on a 64 byte
123 host
->pio_size
= data
->blocks
* data
->blksz
;
124 host
->pio_ptr
= sg_virt(data
->sg
);
126 dev_dbg(host
->dev
, "fallback to PIO for data at 0x%p size %d\n",
127 host
->pio_ptr
, host
->pio_size
);
130 dma_addr_t phys_addr
;
131 int dma_dir
= (data
->flags
& MMC_DATA_READ
) ?
132 DMA_FROM_DEVICE
: DMA_TO_DEVICE
;
133 host
->sg_frags
= dma_map_sg(mmc_dev(host
->mmc
), data
->sg
,
134 data
->sg_len
, dma_dir
);
135 phys_addr
= sg_dma_address(data
->sg
);
136 mvsd_write(MVSD_SYS_ADDR_LOW
, (u32
)phys_addr
& 0xffff);
137 mvsd_write(MVSD_SYS_ADDR_HI
, (u32
)phys_addr
>> 16);
142 static void mvsd_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
144 struct mvsd_host
*host
= mmc_priv(mmc
);
145 void __iomem
*iobase
= host
->base
;
146 struct mmc_command
*cmd
= mrq
->cmd
;
147 u32 cmdreg
= 0, xfer
= 0, intr
= 0;
150 BUG_ON(host
->mrq
!= NULL
);
153 dev_dbg(host
->dev
, "cmd %d (hw state 0x%04x)\n",
154 cmd
->opcode
, mvsd_read(MVSD_HW_STATE
));
156 cmdreg
= MVSD_CMD_INDEX(cmd
->opcode
);
158 if (cmd
->flags
& MMC_RSP_BUSY
)
159 cmdreg
|= MVSD_CMD_RSP_48BUSY
;
160 else if (cmd
->flags
& MMC_RSP_136
)
161 cmdreg
|= MVSD_CMD_RSP_136
;
162 else if (cmd
->flags
& MMC_RSP_PRESENT
)
163 cmdreg
|= MVSD_CMD_RSP_48
;
165 cmdreg
|= MVSD_CMD_RSP_NONE
;
167 if (cmd
->flags
& MMC_RSP_CRC
)
168 cmdreg
|= MVSD_CMD_CHECK_CMDCRC
;
170 if (cmd
->flags
& MMC_RSP_OPCODE
)
171 cmdreg
|= MVSD_CMD_INDX_CHECK
;
173 if (cmd
->flags
& MMC_RSP_PRESENT
) {
174 cmdreg
|= MVSD_UNEXPECTED_RESP
;
175 intr
|= MVSD_NOR_UNEXP_RSP
;
179 struct mmc_data
*data
= mrq
->data
;
182 cmdreg
|= MVSD_CMD_DATA_PRESENT
| MVSD_CMD_CHECK_DATACRC16
;
183 xfer
|= MVSD_XFER_MODE_HW_WR_DATA_EN
;
184 if (data
->flags
& MMC_DATA_READ
)
185 xfer
|= MVSD_XFER_MODE_TO_HOST
;
187 pio
= mvsd_setup_data(host
, data
);
189 xfer
|= MVSD_XFER_MODE_PIO
;
190 /* PIO section of mvsd_irq has comments on those bits */
191 if (data
->flags
& MMC_DATA_WRITE
)
192 intr
|= MVSD_NOR_TX_AVAIL
;
193 else if (host
->pio_size
> 32)
194 intr
|= MVSD_NOR_RX_FIFO_8W
;
196 intr
|= MVSD_NOR_RX_READY
;
200 struct mmc_command
*stop
= data
->stop
;
203 mvsd_write(MVSD_AUTOCMD12_ARG_LOW
, stop
->arg
& 0xffff);
204 mvsd_write(MVSD_AUTOCMD12_ARG_HI
, stop
->arg
>> 16);
206 if (stop
->flags
& MMC_RSP_BUSY
)
207 cmd12reg
|= MVSD_AUTOCMD12_BUSY
;
208 if (stop
->flags
& MMC_RSP_OPCODE
)
209 cmd12reg
|= MVSD_AUTOCMD12_INDX_CHECK
;
210 cmd12reg
|= MVSD_AUTOCMD12_INDEX(stop
->opcode
);
211 mvsd_write(MVSD_AUTOCMD12_CMD
, cmd12reg
);
213 xfer
|= MVSD_XFER_MODE_AUTO_CMD12
;
214 intr
|= MVSD_NOR_AUTOCMD12_DONE
;
216 intr
|= MVSD_NOR_XFER_DONE
;
219 intr
|= MVSD_NOR_CMD_DONE
;
222 mvsd_write(MVSD_ARG_LOW
, cmd
->arg
& 0xffff);
223 mvsd_write(MVSD_ARG_HI
, cmd
->arg
>> 16);
225 spin_lock_irqsave(&host
->lock
, flags
);
227 host
->xfer_mode
&= MVSD_XFER_MODE_INT_CHK_EN
;
228 host
->xfer_mode
|= xfer
;
229 mvsd_write(MVSD_XFER_MODE
, host
->xfer_mode
);
231 mvsd_write(MVSD_NOR_INTR_STATUS
, ~MVSD_NOR_CARD_INT
);
232 mvsd_write(MVSD_ERR_INTR_STATUS
, 0xffff);
233 mvsd_write(MVSD_CMD
, cmdreg
);
235 host
->intr_en
&= MVSD_NOR_CARD_INT
;
236 host
->intr_en
|= intr
| MVSD_NOR_ERROR
;
237 mvsd_write(MVSD_NOR_INTR_EN
, host
->intr_en
);
238 mvsd_write(MVSD_ERR_INTR_EN
, 0xffff);
240 mod_timer(&host
->timer
, jiffies
+ 5 * HZ
);
242 spin_unlock_irqrestore(&host
->lock
, flags
);
245 static u32
mvsd_finish_cmd(struct mvsd_host
*host
, struct mmc_command
*cmd
,
248 void __iomem
*iobase
= host
->base
;
250 if (cmd
->flags
& MMC_RSP_136
) {
251 unsigned int response
[8], i
;
252 for (i
= 0; i
< 8; i
++)
253 response
[i
] = mvsd_read(MVSD_RSP(i
));
254 cmd
->resp
[0] = ((response
[0] & 0x03ff) << 22) |
255 ((response
[1] & 0xffff) << 6) |
256 ((response
[2] & 0xfc00) >> 10);
257 cmd
->resp
[1] = ((response
[2] & 0x03ff) << 22) |
258 ((response
[3] & 0xffff) << 6) |
259 ((response
[4] & 0xfc00) >> 10);
260 cmd
->resp
[2] = ((response
[4] & 0x03ff) << 22) |
261 ((response
[5] & 0xffff) << 6) |
262 ((response
[6] & 0xfc00) >> 10);
263 cmd
->resp
[3] = ((response
[6] & 0x03ff) << 22) |
264 ((response
[7] & 0x3fff) << 8);
265 } else if (cmd
->flags
& MMC_RSP_PRESENT
) {
266 unsigned int response
[3], i
;
267 for (i
= 0; i
< 3; i
++)
268 response
[i
] = mvsd_read(MVSD_RSP(i
));
269 cmd
->resp
[0] = ((response
[2] & 0x003f) << (8 - 8)) |
270 ((response
[1] & 0xffff) << (14 - 8)) |
271 ((response
[0] & 0x03ff) << (30 - 8));
272 cmd
->resp
[1] = ((response
[0] & 0xfc00) >> 10);
277 if (err_status
& MVSD_ERR_CMD_TIMEOUT
) {
278 cmd
->error
= -ETIMEDOUT
;
279 } else if (err_status
& (MVSD_ERR_CMD_CRC
| MVSD_ERR_CMD_ENDBIT
|
280 MVSD_ERR_CMD_INDEX
| MVSD_ERR_CMD_STARTBIT
)) {
281 cmd
->error
= -EILSEQ
;
283 err_status
&= ~(MVSD_ERR_CMD_TIMEOUT
| MVSD_ERR_CMD_CRC
|
284 MVSD_ERR_CMD_ENDBIT
| MVSD_ERR_CMD_INDEX
|
285 MVSD_ERR_CMD_STARTBIT
);
290 static u32
mvsd_finish_data(struct mvsd_host
*host
, struct mmc_data
*data
,
293 void __iomem
*iobase
= host
->base
;
296 host
->pio_ptr
= NULL
;
299 dma_unmap_sg(mmc_dev(host
->mmc
), data
->sg
, host
->sg_frags
,
300 (data
->flags
& MMC_DATA_READ
) ?
301 DMA_FROM_DEVICE
: DMA_TO_DEVICE
);
304 if (err_status
& MVSD_ERR_DATA_TIMEOUT
)
305 data
->error
= -ETIMEDOUT
;
306 else if (err_status
& (MVSD_ERR_DATA_CRC
| MVSD_ERR_DATA_ENDBIT
))
307 data
->error
= -EILSEQ
;
308 else if (err_status
& MVSD_ERR_XFER_SIZE
)
309 data
->error
= -EBADE
;
310 err_status
&= ~(MVSD_ERR_DATA_TIMEOUT
| MVSD_ERR_DATA_CRC
|
311 MVSD_ERR_DATA_ENDBIT
| MVSD_ERR_XFER_SIZE
);
313 dev_dbg(host
->dev
, "data done: blocks_left=%d, bytes_left=%d\n",
314 mvsd_read(MVSD_CURR_BLK_LEFT
), mvsd_read(MVSD_CURR_BYTE_LEFT
));
316 (data
->blocks
- mvsd_read(MVSD_CURR_BLK_LEFT
)) * data
->blksz
;
317 /* We can't be sure about the last block when errors are detected */
318 if (data
->bytes_xfered
&& data
->error
)
319 data
->bytes_xfered
-= data
->blksz
;
321 /* Handle Auto cmd 12 response */
323 unsigned int response
[3], i
;
324 for (i
= 0; i
< 3; i
++)
325 response
[i
] = mvsd_read(MVSD_AUTO_RSP(i
));
326 data
->stop
->resp
[0] = ((response
[2] & 0x003f) << (8 - 8)) |
327 ((response
[1] & 0xffff) << (14 - 8)) |
328 ((response
[0] & 0x03ff) << (30 - 8));
329 data
->stop
->resp
[1] = ((response
[0] & 0xfc00) >> 10);
330 data
->stop
->resp
[2] = 0;
331 data
->stop
->resp
[3] = 0;
333 if (err_status
& MVSD_ERR_AUTOCMD12
) {
334 u32 err_cmd12
= mvsd_read(MVSD_AUTOCMD12_ERR_STATUS
);
335 dev_dbg(host
->dev
, "c12err 0x%04x\n", err_cmd12
);
336 if (err_cmd12
& MVSD_AUTOCMD12_ERR_NOTEXE
)
337 data
->stop
->error
= -ENOEXEC
;
338 else if (err_cmd12
& MVSD_AUTOCMD12_ERR_TIMEOUT
)
339 data
->stop
->error
= -ETIMEDOUT
;
341 data
->stop
->error
= -EILSEQ
;
342 err_status
&= ~MVSD_ERR_AUTOCMD12
;
349 static irqreturn_t
mvsd_irq(int irq
, void *dev
)
351 struct mvsd_host
*host
= dev
;
352 void __iomem
*iobase
= host
->base
;
353 u32 intr_status
, intr_done_mask
;
356 intr_status
= mvsd_read(MVSD_NOR_INTR_STATUS
);
357 dev_dbg(host
->dev
, "intr 0x%04x intr_en 0x%04x hw_state 0x%04x\n",
358 intr_status
, mvsd_read(MVSD_NOR_INTR_EN
),
359 mvsd_read(MVSD_HW_STATE
));
362 * It looks like, SDIO IP can issue one late, spurious irq
363 * although all irqs should be disabled. To work around this,
364 * bail out early, if we didn't expect any irqs to occur.
366 if (!mvsd_read(MVSD_NOR_INTR_EN
) && !mvsd_read(MVSD_ERR_INTR_EN
)) {
367 dev_dbg(host
->dev
, "spurious irq detected intr 0x%04x intr_en 0x%04x erri 0x%04x erri_en 0x%04x\n",
368 mvsd_read(MVSD_NOR_INTR_STATUS
),
369 mvsd_read(MVSD_NOR_INTR_EN
),
370 mvsd_read(MVSD_ERR_INTR_STATUS
),
371 mvsd_read(MVSD_ERR_INTR_EN
));
375 spin_lock(&host
->lock
);
377 /* PIO handling, if needed. Messy business... */
378 if (host
->pio_size
&&
379 (intr_status
& host
->intr_en
&
380 (MVSD_NOR_RX_READY
| MVSD_NOR_RX_FIFO_8W
))) {
381 u16
*p
= host
->pio_ptr
;
382 int s
= host
->pio_size
;
383 while (s
>= 32 && (intr_status
& MVSD_NOR_RX_FIFO_8W
)) {
384 readsw(iobase
+ MVSD_FIFO
, p
, 16);
387 intr_status
= mvsd_read(MVSD_NOR_INTR_STATUS
);
390 * Normally we'd use < 32 here, but the RX_FIFO_8W bit
391 * doesn't appear to assert when there is exactly 32 bytes
392 * (8 words) left to fetch in a transfer.
395 while (s
>= 4 && (intr_status
& MVSD_NOR_RX_READY
)) {
396 put_unaligned(mvsd_read(MVSD_FIFO
), p
++);
397 put_unaligned(mvsd_read(MVSD_FIFO
), p
++);
399 intr_status
= mvsd_read(MVSD_NOR_INTR_STATUS
);
401 if (s
&& s
< 4 && (intr_status
& MVSD_NOR_RX_READY
)) {
403 val
[0] = mvsd_read(MVSD_FIFO
);
404 val
[1] = mvsd_read(MVSD_FIFO
);
405 memcpy(p
, ((void *)&val
) + 4 - s
, s
);
407 intr_status
= mvsd_read(MVSD_NOR_INTR_STATUS
);
411 ~(MVSD_NOR_RX_READY
| MVSD_NOR_RX_FIFO_8W
);
412 mvsd_write(MVSD_NOR_INTR_EN
, host
->intr_en
);
413 } else if (host
->intr_en
& MVSD_NOR_RX_FIFO_8W
) {
414 host
->intr_en
&= ~MVSD_NOR_RX_FIFO_8W
;
415 host
->intr_en
|= MVSD_NOR_RX_READY
;
416 mvsd_write(MVSD_NOR_INTR_EN
, host
->intr_en
);
419 dev_dbg(host
->dev
, "pio %d intr 0x%04x hw_state 0x%04x\n",
420 s
, intr_status
, mvsd_read(MVSD_HW_STATE
));
424 } else if (host
->pio_size
&&
425 (intr_status
& host
->intr_en
&
426 (MVSD_NOR_TX_AVAIL
| MVSD_NOR_TX_FIFO_8W
))) {
427 u16
*p
= host
->pio_ptr
;
428 int s
= host
->pio_size
;
430 * The TX_FIFO_8W bit is unreliable. When set, bursting
431 * 16 halfwords all at once in the FIFO drops data. Actually
432 * TX_AVAIL does go off after only one word is pushed even if
433 * TX_FIFO_8W remains set.
435 while (s
>= 4 && (intr_status
& MVSD_NOR_TX_AVAIL
)) {
436 mvsd_write(MVSD_FIFO
, get_unaligned(p
++));
437 mvsd_write(MVSD_FIFO
, get_unaligned(p
++));
439 intr_status
= mvsd_read(MVSD_NOR_INTR_STATUS
);
442 if (s
&& (intr_status
& MVSD_NOR_TX_AVAIL
)) {
444 memcpy(((void *)&val
) + 4 - s
, p
, s
);
445 mvsd_write(MVSD_FIFO
, val
[0]);
446 mvsd_write(MVSD_FIFO
, val
[1]);
448 intr_status
= mvsd_read(MVSD_NOR_INTR_STATUS
);
452 ~(MVSD_NOR_TX_AVAIL
| MVSD_NOR_TX_FIFO_8W
);
453 mvsd_write(MVSD_NOR_INTR_EN
, host
->intr_en
);
456 dev_dbg(host
->dev
, "pio %d intr 0x%04x hw_state 0x%04x\n",
457 s
, intr_status
, mvsd_read(MVSD_HW_STATE
));
463 mvsd_write(MVSD_NOR_INTR_STATUS
, intr_status
);
465 intr_done_mask
= MVSD_NOR_CARD_INT
| MVSD_NOR_RX_READY
|
466 MVSD_NOR_RX_FIFO_8W
| MVSD_NOR_TX_FIFO_8W
;
467 if (intr_status
& host
->intr_en
& ~intr_done_mask
) {
468 struct mmc_request
*mrq
= host
->mrq
;
469 struct mmc_command
*cmd
= mrq
->cmd
;
472 del_timer(&host
->timer
);
475 host
->intr_en
&= MVSD_NOR_CARD_INT
;
476 mvsd_write(MVSD_NOR_INTR_EN
, host
->intr_en
);
477 mvsd_write(MVSD_ERR_INTR_EN
, 0);
479 spin_unlock(&host
->lock
);
481 if (intr_status
& MVSD_NOR_UNEXP_RSP
) {
482 cmd
->error
= -EPROTO
;
483 } else if (intr_status
& MVSD_NOR_ERROR
) {
484 err_status
= mvsd_read(MVSD_ERR_INTR_STATUS
);
485 dev_dbg(host
->dev
, "err 0x%04x\n", err_status
);
488 err_status
= mvsd_finish_cmd(host
, cmd
, err_status
);
490 err_status
= mvsd_finish_data(host
, mrq
->data
, err_status
);
492 dev_err(host
->dev
, "unhandled error status %#04x\n",
494 cmd
->error
= -ENOMSG
;
497 mmc_request_done(host
->mmc
, mrq
);
500 spin_unlock(&host
->lock
);
502 if (intr_status
& MVSD_NOR_CARD_INT
) {
503 mmc_signal_sdio_irq(host
->mmc
);
510 dev_err(host
->dev
, "unhandled interrupt status=0x%04x en=0x%04x pio=%d\n",
511 intr_status
, host
->intr_en
, host
->pio_size
);
515 static void mvsd_timeout_timer(unsigned long data
)
517 struct mvsd_host
*host
= (struct mvsd_host
*)data
;
518 void __iomem
*iobase
= host
->base
;
519 struct mmc_request
*mrq
;
522 spin_lock_irqsave(&host
->lock
, flags
);
525 dev_err(host
->dev
, "Timeout waiting for hardware interrupt.\n");
526 dev_err(host
->dev
, "hw_state=0x%04x, intr_status=0x%04x intr_en=0x%04x\n",
527 mvsd_read(MVSD_HW_STATE
),
528 mvsd_read(MVSD_NOR_INTR_STATUS
),
529 mvsd_read(MVSD_NOR_INTR_EN
));
533 mvsd_write(MVSD_SW_RESET
, MVSD_SW_RESET_NOW
);
535 host
->xfer_mode
&= MVSD_XFER_MODE_INT_CHK_EN
;
536 mvsd_write(MVSD_XFER_MODE
, host
->xfer_mode
);
538 host
->intr_en
&= MVSD_NOR_CARD_INT
;
539 mvsd_write(MVSD_NOR_INTR_EN
, host
->intr_en
);
540 mvsd_write(MVSD_ERR_INTR_EN
, 0);
541 mvsd_write(MVSD_ERR_INTR_STATUS
, 0xffff);
543 mrq
->cmd
->error
= -ETIMEDOUT
;
544 mvsd_finish_cmd(host
, mrq
->cmd
, 0);
546 mrq
->data
->error
= -ETIMEDOUT
;
547 mvsd_finish_data(host
, mrq
->data
, 0);
550 spin_unlock_irqrestore(&host
->lock
, flags
);
553 mmc_request_done(host
->mmc
, mrq
);
556 static void mvsd_enable_sdio_irq(struct mmc_host
*mmc
, int enable
)
558 struct mvsd_host
*host
= mmc_priv(mmc
);
559 void __iomem
*iobase
= host
->base
;
562 spin_lock_irqsave(&host
->lock
, flags
);
564 host
->xfer_mode
|= MVSD_XFER_MODE_INT_CHK_EN
;
565 host
->intr_en
|= MVSD_NOR_CARD_INT
;
567 host
->xfer_mode
&= ~MVSD_XFER_MODE_INT_CHK_EN
;
568 host
->intr_en
&= ~MVSD_NOR_CARD_INT
;
570 mvsd_write(MVSD_XFER_MODE
, host
->xfer_mode
);
571 mvsd_write(MVSD_NOR_INTR_EN
, host
->intr_en
);
572 spin_unlock_irqrestore(&host
->lock
, flags
);
575 static void mvsd_power_up(struct mvsd_host
*host
)
577 void __iomem
*iobase
= host
->base
;
578 dev_dbg(host
->dev
, "power up\n");
579 mvsd_write(MVSD_NOR_INTR_EN
, 0);
580 mvsd_write(MVSD_ERR_INTR_EN
, 0);
581 mvsd_write(MVSD_SW_RESET
, MVSD_SW_RESET_NOW
);
582 mvsd_write(MVSD_XFER_MODE
, 0);
583 mvsd_write(MVSD_NOR_STATUS_EN
, 0xffff);
584 mvsd_write(MVSD_ERR_STATUS_EN
, 0xffff);
585 mvsd_write(MVSD_NOR_INTR_STATUS
, 0xffff);
586 mvsd_write(MVSD_ERR_INTR_STATUS
, 0xffff);
589 static void mvsd_power_down(struct mvsd_host
*host
)
591 void __iomem
*iobase
= host
->base
;
592 dev_dbg(host
->dev
, "power down\n");
593 mvsd_write(MVSD_NOR_INTR_EN
, 0);
594 mvsd_write(MVSD_ERR_INTR_EN
, 0);
595 mvsd_write(MVSD_SW_RESET
, MVSD_SW_RESET_NOW
);
596 mvsd_write(MVSD_XFER_MODE
, MVSD_XFER_MODE_STOP_CLK
);
597 mvsd_write(MVSD_NOR_STATUS_EN
, 0);
598 mvsd_write(MVSD_ERR_STATUS_EN
, 0);
599 mvsd_write(MVSD_NOR_INTR_STATUS
, 0xffff);
600 mvsd_write(MVSD_ERR_INTR_STATUS
, 0xffff);
603 static void mvsd_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
605 struct mvsd_host
*host
= mmc_priv(mmc
);
606 void __iomem
*iobase
= host
->base
;
609 if (ios
->power_mode
== MMC_POWER_UP
)
612 if (ios
->clock
== 0) {
613 mvsd_write(MVSD_XFER_MODE
, MVSD_XFER_MODE_STOP_CLK
);
614 mvsd_write(MVSD_CLK_DIV
, MVSD_BASE_DIV_MAX
);
616 dev_dbg(host
->dev
, "clock off\n");
617 } else if (ios
->clock
!= host
->clock
) {
618 u32 m
= DIV_ROUND_UP(host
->base_clock
, ios
->clock
) - 1;
619 if (m
> MVSD_BASE_DIV_MAX
)
620 m
= MVSD_BASE_DIV_MAX
;
621 mvsd_write(MVSD_CLK_DIV
, m
);
622 host
->clock
= ios
->clock
;
623 host
->ns_per_clk
= 1000000000 / (host
->base_clock
/ (m
+1));
624 dev_dbg(host
->dev
, "clock=%d (%d), div=0x%04x\n",
625 ios
->clock
, host
->base_clock
/ (m
+1), m
);
628 /* default transfer mode */
629 ctrl_reg
|= MVSD_HOST_CTRL_BIG_ENDIAN
;
630 ctrl_reg
&= ~MVSD_HOST_CTRL_LSB_FIRST
;
632 /* default to maximum timeout */
633 ctrl_reg
|= MVSD_HOST_CTRL_TMOUT_MASK
;
634 ctrl_reg
|= MVSD_HOST_CTRL_TMOUT_EN
;
636 if (ios
->bus_mode
== MMC_BUSMODE_PUSHPULL
)
637 ctrl_reg
|= MVSD_HOST_CTRL_PUSH_PULL_EN
;
639 if (ios
->bus_width
== MMC_BUS_WIDTH_4
)
640 ctrl_reg
|= MVSD_HOST_CTRL_DATA_WIDTH_4_BITS
;
643 * The HI_SPEED_EN bit is causing trouble with many (but not all)
644 * high speed SD, SDHC and SDIO cards. Not enabling that bit
645 * makes all cards work. So let's just ignore that bit for now
646 * and revisit this issue if problems for not enabling this bit
650 if (ios
->timing
== MMC_TIMING_MMC_HS
||
651 ios
->timing
== MMC_TIMING_SD_HS
)
652 ctrl_reg
|= MVSD_HOST_CTRL_HI_SPEED_EN
;
655 host
->ctrl
= ctrl_reg
;
656 mvsd_write(MVSD_HOST_CTRL
, ctrl_reg
);
657 dev_dbg(host
->dev
, "ctrl 0x%04x: %s %s %s\n", ctrl_reg
,
658 (ctrl_reg
& MVSD_HOST_CTRL_PUSH_PULL_EN
) ?
659 "push-pull" : "open-drain",
660 (ctrl_reg
& MVSD_HOST_CTRL_DATA_WIDTH_4_BITS
) ?
661 "4bit-width" : "1bit-width",
662 (ctrl_reg
& MVSD_HOST_CTRL_HI_SPEED_EN
) ?
665 if (ios
->power_mode
== MMC_POWER_OFF
)
666 mvsd_power_down(host
);
669 static const struct mmc_host_ops mvsd_ops
= {
670 .request
= mvsd_request
,
671 .get_ro
= mmc_gpio_get_ro
,
672 .set_ios
= mvsd_set_ios
,
673 .enable_sdio_irq
= mvsd_enable_sdio_irq
,
677 mv_conf_mbus_windows(struct mvsd_host
*host
,
678 const struct mbus_dram_target_info
*dram
)
680 void __iomem
*iobase
= host
->base
;
683 for (i
= 0; i
< 4; i
++) {
684 writel(0, iobase
+ MVSD_WINDOW_CTRL(i
));
685 writel(0, iobase
+ MVSD_WINDOW_BASE(i
));
688 for (i
= 0; i
< dram
->num_cs
; i
++) {
689 const struct mbus_dram_window
*cs
= dram
->cs
+ i
;
690 writel(((cs
->size
- 1) & 0xffff0000) |
691 (cs
->mbus_attr
<< 8) |
692 (dram
->mbus_dram_target_id
<< 4) | 1,
693 iobase
+ MVSD_WINDOW_CTRL(i
));
694 writel(cs
->base
, iobase
+ MVSD_WINDOW_BASE(i
));
698 static int mvsd_probe(struct platform_device
*pdev
)
700 struct device_node
*np
= pdev
->dev
.of_node
;
701 struct mmc_host
*mmc
= NULL
;
702 struct mvsd_host
*host
= NULL
;
703 const struct mbus_dram_target_info
*dram
;
707 r
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
708 irq
= platform_get_irq(pdev
, 0);
712 mmc
= mmc_alloc_host(sizeof(struct mvsd_host
), &pdev
->dev
);
718 host
= mmc_priv(mmc
);
720 host
->dev
= &pdev
->dev
;
723 * Some non-DT platforms do not pass a clock, and the clock
724 * frequency is passed through platform_data. On DT platforms,
725 * a clock must always be passed, even if there is no gatable
726 * clock associated to the SDIO interface (it can simply be a
729 host
->clk
= devm_clk_get(&pdev
->dev
, NULL
);
730 if (!IS_ERR(host
->clk
))
731 clk_prepare_enable(host
->clk
);
733 mmc
->ops
= &mvsd_ops
;
735 mmc
->ocr_avail
= MMC_VDD_32_33
| MMC_VDD_33_34
;
737 mmc
->f_min
= DIV_ROUND_UP(host
->base_clock
, MVSD_BASE_DIV_MAX
);
738 mmc
->f_max
= MVSD_CLOCKRATE_MAX
;
740 mmc
->max_blk_size
= 2048;
741 mmc
->max_blk_count
= 65535;
744 mmc
->max_seg_size
= mmc
->max_blk_size
* mmc
->max_blk_count
;
745 mmc
->max_req_size
= mmc
->max_blk_size
* mmc
->max_blk_count
;
748 if (IS_ERR(host
->clk
)) {
749 dev_err(&pdev
->dev
, "DT platforms must have a clock associated\n");
754 host
->base_clock
= clk_get_rate(host
->clk
) / 2;
755 ret
= mmc_of_parse(mmc
);
759 const struct mvsdio_platform_data
*mvsd_data
;
761 mvsd_data
= pdev
->dev
.platform_data
;
766 mmc
->caps
= MMC_CAP_4_BIT_DATA
| MMC_CAP_SDIO_IRQ
|
767 MMC_CAP_SD_HIGHSPEED
| MMC_CAP_MMC_HIGHSPEED
;
768 host
->base_clock
= mvsd_data
->clock
/ 2;
769 /* GPIO 0 regarded as invalid for backward compatibility */
770 if (mvsd_data
->gpio_card_detect
&&
771 gpio_is_valid(mvsd_data
->gpio_card_detect
)) {
772 ret
= mmc_gpio_request_cd(mmc
,
773 mvsd_data
->gpio_card_detect
,
778 mmc
->caps
|= MMC_CAP_NEEDS_POLL
;
781 if (mvsd_data
->gpio_write_protect
&&
782 gpio_is_valid(mvsd_data
->gpio_write_protect
))
783 mmc_gpio_request_ro(mmc
, mvsd_data
->gpio_write_protect
);
787 mmc
->f_max
= maxfreq
;
789 spin_lock_init(&host
->lock
);
791 host
->base
= devm_ioremap_resource(&pdev
->dev
, r
);
792 if (IS_ERR(host
->base
)) {
793 ret
= PTR_ERR(host
->base
);
797 /* (Re-)program MBUS remapping windows if we are asked to. */
798 dram
= mv_mbus_dram_info();
800 mv_conf_mbus_windows(host
, dram
);
802 mvsd_power_down(host
);
804 ret
= devm_request_irq(&pdev
->dev
, irq
, mvsd_irq
, 0, DRIVER_NAME
, host
);
806 dev_err(&pdev
->dev
, "cannot assign irq %d\n", irq
);
810 setup_timer(&host
->timer
, mvsd_timeout_timer
, (unsigned long)host
);
811 platform_set_drvdata(pdev
, mmc
);
812 ret
= mmc_add_host(mmc
);
816 if (!(mmc
->caps
& MMC_CAP_NEEDS_POLL
))
817 dev_dbg(&pdev
->dev
, "using GPIO for card detection\n");
819 dev_dbg(&pdev
->dev
, "lacking card detect (fall back to polling)\n");
825 if (!IS_ERR(host
->clk
))
826 clk_disable_unprepare(host
->clk
);
833 static int mvsd_remove(struct platform_device
*pdev
)
835 struct mmc_host
*mmc
= platform_get_drvdata(pdev
);
837 struct mvsd_host
*host
= mmc_priv(mmc
);
839 mmc_remove_host(mmc
);
840 del_timer_sync(&host
->timer
);
841 mvsd_power_down(host
);
843 if (!IS_ERR(host
->clk
))
844 clk_disable_unprepare(host
->clk
);
850 static const struct of_device_id mvsdio_dt_ids
[] = {
851 { .compatible
= "marvell,orion-sdio" },
854 MODULE_DEVICE_TABLE(of
, mvsdio_dt_ids
);
856 static struct platform_driver mvsd_driver
= {
858 .remove
= mvsd_remove
,
861 .of_match_table
= mvsdio_dt_ids
,
865 module_platform_driver(mvsd_driver
);
867 /* maximum card clock frequency (default 50MHz) */
868 module_param(maxfreq
, int, 0);
870 /* force PIO transfers all the time */
871 module_param(nodma
, int, 0);
873 MODULE_AUTHOR("Maen Suleiman, Nicolas Pitre");
874 MODULE_DESCRIPTION("Marvell MMC,SD,SDIO Host Controller driver");
875 MODULE_LICENSE("GPL");
876 MODULE_ALIAS("platform:mvsdio");