x86/xen: resume timer irqs early
[linux/fpc-iii.git] / drivers / mmc / host / wmt-sdmmc.c
blob34231d5168fcf12dfe804d2a26b3cda2c6647976
1 /*
2 * WM8505/WM8650 SD/MMC Host Controller
4 * Copyright (C) 2010 Tony Prisk
5 * Copyright (C) 2008 WonderMedia Technologies, Inc.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/platform_device.h>
15 #include <linux/ioport.h>
16 #include <linux/errno.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/delay.h>
19 #include <linux/io.h>
20 #include <linux/irq.h>
21 #include <linux/clk.h>
22 #include <linux/gpio.h>
24 #include <linux/of.h>
25 #include <linux/of_address.h>
26 #include <linux/of_irq.h>
27 #include <linux/of_device.h>
29 #include <linux/mmc/host.h>
30 #include <linux/mmc/mmc.h>
31 #include <linux/mmc/sd.h>
33 #include <asm/byteorder.h>
36 #define DRIVER_NAME "wmt-sdhc"
39 /* MMC/SD controller registers */
40 #define SDMMC_CTLR 0x00
41 #define SDMMC_CMD 0x01
42 #define SDMMC_RSPTYPE 0x02
43 #define SDMMC_ARG 0x04
44 #define SDMMC_BUSMODE 0x08
45 #define SDMMC_BLKLEN 0x0C
46 #define SDMMC_BLKCNT 0x0E
47 #define SDMMC_RSP 0x10
48 #define SDMMC_CBCR 0x20
49 #define SDMMC_INTMASK0 0x24
50 #define SDMMC_INTMASK1 0x25
51 #define SDMMC_STS0 0x28
52 #define SDMMC_STS1 0x29
53 #define SDMMC_STS2 0x2A
54 #define SDMMC_STS3 0x2B
55 #define SDMMC_RSPTIMEOUT 0x2C
56 #define SDMMC_CLK 0x30 /* VT8500 only */
57 #define SDMMC_EXTCTRL 0x34
58 #define SDMMC_SBLKLEN 0x38
59 #define SDMMC_DMATIMEOUT 0x3C
62 /* SDMMC_CTLR bit fields */
63 #define CTLR_CMD_START 0x01
64 #define CTLR_CMD_WRITE 0x04
65 #define CTLR_FIFO_RESET 0x08
67 /* SDMMC_BUSMODE bit fields */
68 #define BM_SPI_MODE 0x01
69 #define BM_FOURBIT_MODE 0x02
70 #define BM_EIGHTBIT_MODE 0x04
71 #define BM_SD_OFF 0x10
72 #define BM_SPI_CS 0x20
73 #define BM_SD_POWER 0x40
74 #define BM_SOFT_RESET 0x80
75 #define BM_ONEBIT_MASK 0xFD
77 /* SDMMC_BLKLEN bit fields */
78 #define BLKL_CRCERR_ABORT 0x0800
79 #define BLKL_CD_POL_HIGH 0x1000
80 #define BLKL_GPI_CD 0x2000
81 #define BLKL_DATA3_CD 0x4000
82 #define BLKL_INT_ENABLE 0x8000
84 /* SDMMC_INTMASK0 bit fields */
85 #define INT0_MBLK_TRAN_DONE_INT_EN 0x10
86 #define INT0_BLK_TRAN_DONE_INT_EN 0x20
87 #define INT0_CD_INT_EN 0x40
88 #define INT0_DI_INT_EN 0x80
90 /* SDMMC_INTMASK1 bit fields */
91 #define INT1_CMD_RES_TRAN_DONE_INT_EN 0x02
92 #define INT1_CMD_RES_TOUT_INT_EN 0x04
93 #define INT1_MBLK_AUTO_STOP_INT_EN 0x08
94 #define INT1_DATA_TOUT_INT_EN 0x10
95 #define INT1_RESCRC_ERR_INT_EN 0x20
96 #define INT1_RCRC_ERR_INT_EN 0x40
97 #define INT1_WCRC_ERR_INT_EN 0x80
99 /* SDMMC_STS0 bit fields */
100 #define STS0_WRITE_PROTECT 0x02
101 #define STS0_CD_DATA3 0x04
102 #define STS0_CD_GPI 0x08
103 #define STS0_MBLK_DONE 0x10
104 #define STS0_BLK_DONE 0x20
105 #define STS0_CARD_DETECT 0x40
106 #define STS0_DEVICE_INS 0x80
108 /* SDMMC_STS1 bit fields */
109 #define STS1_SDIO_INT 0x01
110 #define STS1_CMDRSP_DONE 0x02
111 #define STS1_RSP_TIMEOUT 0x04
112 #define STS1_AUTOSTOP_DONE 0x08
113 #define STS1_DATA_TIMEOUT 0x10
114 #define STS1_RSP_CRC_ERR 0x20
115 #define STS1_RCRC_ERR 0x40
116 #define STS1_WCRC_ERR 0x80
118 /* SDMMC_STS2 bit fields */
119 #define STS2_CMD_RES_BUSY 0x10
120 #define STS2_DATARSP_BUSY 0x20
121 #define STS2_DIS_FORCECLK 0x80
124 /* MMC/SD DMA Controller Registers */
125 #define SDDMA_GCR 0x100
126 #define SDDMA_IER 0x104
127 #define SDDMA_ISR 0x108
128 #define SDDMA_DESPR 0x10C
129 #define SDDMA_RBR 0x110
130 #define SDDMA_DAR 0x114
131 #define SDDMA_BAR 0x118
132 #define SDDMA_CPR 0x11C
133 #define SDDMA_CCR 0x120
136 /* SDDMA_GCR bit fields */
137 #define DMA_GCR_DMA_EN 0x00000001
138 #define DMA_GCR_SOFT_RESET 0x00000100
140 /* SDDMA_IER bit fields */
141 #define DMA_IER_INT_EN 0x00000001
143 /* SDDMA_ISR bit fields */
144 #define DMA_ISR_INT_STS 0x00000001
146 /* SDDMA_RBR bit fields */
147 #define DMA_RBR_FORMAT 0x40000000
148 #define DMA_RBR_END 0x80000000
150 /* SDDMA_CCR bit fields */
151 #define DMA_CCR_RUN 0x00000080
152 #define DMA_CCR_IF_TO_PERIPHERAL 0x00000000
153 #define DMA_CCR_PERIPHERAL_TO_IF 0x00400000
155 /* SDDMA_CCR event status */
156 #define DMA_CCR_EVT_NO_STATUS 0x00000000
157 #define DMA_CCR_EVT_UNDERRUN 0x00000001
158 #define DMA_CCR_EVT_OVERRUN 0x00000002
159 #define DMA_CCR_EVT_DESP_READ 0x00000003
160 #define DMA_CCR_EVT_DATA_RW 0x00000004
161 #define DMA_CCR_EVT_EARLY_END 0x00000005
162 #define DMA_CCR_EVT_SUCCESS 0x0000000F
164 #define PDMA_READ 0x00
165 #define PDMA_WRITE 0x01
167 #define WMT_SD_POWER_OFF 0
168 #define WMT_SD_POWER_ON 1
170 struct wmt_dma_descriptor {
171 u32 flags;
172 u32 data_buffer_addr;
173 u32 branch_addr;
174 u32 reserved1;
177 struct wmt_mci_caps {
178 unsigned int f_min;
179 unsigned int f_max;
180 u32 ocr_avail;
181 u32 caps;
182 u32 max_seg_size;
183 u32 max_segs;
184 u32 max_blk_size;
187 struct wmt_mci_priv {
188 struct mmc_host *mmc;
189 void __iomem *sdmmc_base;
191 int irq_regular;
192 int irq_dma;
194 void *dma_desc_buffer;
195 dma_addr_t dma_desc_device_addr;
197 struct completion cmdcomp;
198 struct completion datacomp;
200 struct completion *comp_cmd;
201 struct completion *comp_dma;
203 struct mmc_request *req;
204 struct mmc_command *cmd;
206 struct clk *clk_sdmmc;
207 struct device *dev;
209 u8 power_inverted;
210 u8 cd_inverted;
213 static void wmt_set_sd_power(struct wmt_mci_priv *priv, int enable)
215 u32 reg_tmp;
216 if (enable) {
217 if (priv->power_inverted) {
218 reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
219 writeb(reg_tmp | BM_SD_OFF,
220 priv->sdmmc_base + SDMMC_BUSMODE);
221 } else {
222 reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
223 writeb(reg_tmp & (~BM_SD_OFF),
224 priv->sdmmc_base + SDMMC_BUSMODE);
226 } else {
227 if (priv->power_inverted) {
228 reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
229 writeb(reg_tmp & (~BM_SD_OFF),
230 priv->sdmmc_base + SDMMC_BUSMODE);
231 } else {
232 reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
233 writeb(reg_tmp | BM_SD_OFF,
234 priv->sdmmc_base + SDMMC_BUSMODE);
239 static void wmt_mci_read_response(struct mmc_host *mmc)
241 struct wmt_mci_priv *priv;
242 int idx1, idx2;
243 u8 tmp_resp;
244 u32 response;
246 priv = mmc_priv(mmc);
248 for (idx1 = 0; idx1 < 4; idx1++) {
249 response = 0;
250 for (idx2 = 0; idx2 < 4; idx2++) {
251 if ((idx1 == 3) && (idx2 == 3))
252 tmp_resp = readb(priv->sdmmc_base + SDMMC_RSP);
253 else
254 tmp_resp = readb(priv->sdmmc_base + SDMMC_RSP +
255 (idx1*4) + idx2 + 1);
256 response |= (tmp_resp << (idx2 * 8));
258 priv->cmd->resp[idx1] = cpu_to_be32(response);
262 static void wmt_mci_start_command(struct wmt_mci_priv *priv)
264 u32 reg_tmp;
266 reg_tmp = readb(priv->sdmmc_base + SDMMC_CTLR);
267 writeb(reg_tmp | CTLR_CMD_START, priv->sdmmc_base + SDMMC_CTLR);
270 static int wmt_mci_send_command(struct mmc_host *mmc, u8 command, u8 cmdtype,
271 u32 arg, u8 rsptype)
273 struct wmt_mci_priv *priv;
274 u32 reg_tmp;
276 priv = mmc_priv(mmc);
278 /* write command, arg, resptype registers */
279 writeb(command, priv->sdmmc_base + SDMMC_CMD);
280 writel(arg, priv->sdmmc_base + SDMMC_ARG);
281 writeb(rsptype, priv->sdmmc_base + SDMMC_RSPTYPE);
283 /* reset response FIFO */
284 reg_tmp = readb(priv->sdmmc_base + SDMMC_CTLR);
285 writeb(reg_tmp | CTLR_FIFO_RESET, priv->sdmmc_base + SDMMC_CTLR);
287 /* ensure clock enabled - VT3465 */
288 wmt_set_sd_power(priv, WMT_SD_POWER_ON);
290 /* clear status bits */
291 writeb(0xFF, priv->sdmmc_base + SDMMC_STS0);
292 writeb(0xFF, priv->sdmmc_base + SDMMC_STS1);
293 writeb(0xFF, priv->sdmmc_base + SDMMC_STS2);
294 writeb(0xFF, priv->sdmmc_base + SDMMC_STS3);
296 /* set command type */
297 reg_tmp = readb(priv->sdmmc_base + SDMMC_CTLR);
298 writeb((reg_tmp & 0x0F) | (cmdtype << 4),
299 priv->sdmmc_base + SDMMC_CTLR);
301 return 0;
304 static void wmt_mci_disable_dma(struct wmt_mci_priv *priv)
306 writel(DMA_ISR_INT_STS, priv->sdmmc_base + SDDMA_ISR);
307 writel(0, priv->sdmmc_base + SDDMA_IER);
310 static void wmt_complete_data_request(struct wmt_mci_priv *priv)
312 struct mmc_request *req;
313 req = priv->req;
315 req->data->bytes_xfered = req->data->blksz * req->data->blocks;
317 /* unmap the DMA pages used for write data */
318 if (req->data->flags & MMC_DATA_WRITE)
319 dma_unmap_sg(mmc_dev(priv->mmc), req->data->sg,
320 req->data->sg_len, DMA_TO_DEVICE);
321 else
322 dma_unmap_sg(mmc_dev(priv->mmc), req->data->sg,
323 req->data->sg_len, DMA_FROM_DEVICE);
325 /* Check if the DMA ISR returned a data error */
326 if ((req->cmd->error) || (req->data->error))
327 mmc_request_done(priv->mmc, req);
328 else {
329 wmt_mci_read_response(priv->mmc);
330 if (!req->data->stop) {
331 /* single-block read/write requests end here */
332 mmc_request_done(priv->mmc, req);
333 } else {
335 * we change the priv->cmd variable so the response is
336 * stored in the stop struct rather than the original
337 * calling command struct
339 priv->comp_cmd = &priv->cmdcomp;
340 init_completion(priv->comp_cmd);
341 priv->cmd = req->data->stop;
342 wmt_mci_send_command(priv->mmc, req->data->stop->opcode,
343 7, req->data->stop->arg, 9);
344 wmt_mci_start_command(priv);
349 static irqreturn_t wmt_mci_dma_isr(int irq_num, void *data)
351 struct wmt_mci_priv *priv;
353 int status;
355 priv = (struct wmt_mci_priv *)data;
357 status = readl(priv->sdmmc_base + SDDMA_CCR) & 0x0F;
359 if (status != DMA_CCR_EVT_SUCCESS) {
360 dev_err(priv->dev, "DMA Error: Status = %d\n", status);
361 priv->req->data->error = -ETIMEDOUT;
362 complete(priv->comp_dma);
363 return IRQ_HANDLED;
366 priv->req->data->error = 0;
368 wmt_mci_disable_dma(priv);
370 complete(priv->comp_dma);
372 if (priv->comp_cmd) {
373 if (completion_done(priv->comp_cmd)) {
375 * if the command (regular) interrupt has already
376 * completed, finish off the request otherwise we wait
377 * for the command interrupt and finish from there.
379 wmt_complete_data_request(priv);
383 return IRQ_HANDLED;
386 static irqreturn_t wmt_mci_regular_isr(int irq_num, void *data)
388 struct wmt_mci_priv *priv;
389 u32 status0;
390 u32 status1;
391 u32 status2;
392 u32 reg_tmp;
393 int cmd_done;
395 priv = (struct wmt_mci_priv *)data;
396 cmd_done = 0;
397 status0 = readb(priv->sdmmc_base + SDMMC_STS0);
398 status1 = readb(priv->sdmmc_base + SDMMC_STS1);
399 status2 = readb(priv->sdmmc_base + SDMMC_STS2);
401 /* Check for card insertion */
402 reg_tmp = readb(priv->sdmmc_base + SDMMC_INTMASK0);
403 if ((reg_tmp & INT0_DI_INT_EN) && (status0 & STS0_DEVICE_INS)) {
404 mmc_detect_change(priv->mmc, 0);
405 if (priv->cmd)
406 priv->cmd->error = -ETIMEDOUT;
407 if (priv->comp_cmd)
408 complete(priv->comp_cmd);
409 if (priv->comp_dma) {
410 wmt_mci_disable_dma(priv);
411 complete(priv->comp_dma);
413 writeb(STS0_DEVICE_INS, priv->sdmmc_base + SDMMC_STS0);
414 return IRQ_HANDLED;
417 if ((!priv->req->data) ||
418 ((priv->req->data->stop) && (priv->cmd == priv->req->data->stop))) {
419 /* handle non-data & stop_transmission requests */
420 if (status1 & STS1_CMDRSP_DONE) {
421 priv->cmd->error = 0;
422 cmd_done = 1;
423 } else if ((status1 & STS1_RSP_TIMEOUT) ||
424 (status1 & STS1_DATA_TIMEOUT)) {
425 priv->cmd->error = -ETIMEDOUT;
426 cmd_done = 1;
429 if (cmd_done) {
430 priv->comp_cmd = NULL;
432 if (!priv->cmd->error)
433 wmt_mci_read_response(priv->mmc);
435 priv->cmd = NULL;
437 mmc_request_done(priv->mmc, priv->req);
439 } else {
440 /* handle data requests */
441 if (status1 & STS1_CMDRSP_DONE) {
442 if (priv->cmd)
443 priv->cmd->error = 0;
444 if (priv->comp_cmd)
445 complete(priv->comp_cmd);
448 if ((status1 & STS1_RSP_TIMEOUT) ||
449 (status1 & STS1_DATA_TIMEOUT)) {
450 if (priv->cmd)
451 priv->cmd->error = -ETIMEDOUT;
452 if (priv->comp_cmd)
453 complete(priv->comp_cmd);
454 if (priv->comp_dma) {
455 wmt_mci_disable_dma(priv);
456 complete(priv->comp_dma);
460 if (priv->comp_dma) {
462 * If the dma interrupt has already completed, finish
463 * off the request; otherwise we wait for the DMA
464 * interrupt and finish from there.
466 if (completion_done(priv->comp_dma))
467 wmt_complete_data_request(priv);
471 writeb(status0, priv->sdmmc_base + SDMMC_STS0);
472 writeb(status1, priv->sdmmc_base + SDMMC_STS1);
473 writeb(status2, priv->sdmmc_base + SDMMC_STS2);
475 return IRQ_HANDLED;
478 static void wmt_reset_hardware(struct mmc_host *mmc)
480 struct wmt_mci_priv *priv;
481 u32 reg_tmp;
483 priv = mmc_priv(mmc);
485 /* reset controller */
486 reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
487 writeb(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base + SDMMC_BUSMODE);
489 /* reset response FIFO */
490 reg_tmp = readb(priv->sdmmc_base + SDMMC_CTLR);
491 writeb(reg_tmp | CTLR_FIFO_RESET, priv->sdmmc_base + SDMMC_CTLR);
493 /* enable GPI pin to detect card */
494 writew(BLKL_INT_ENABLE | BLKL_GPI_CD, priv->sdmmc_base + SDMMC_BLKLEN);
496 /* clear interrupt status */
497 writeb(0xFF, priv->sdmmc_base + SDMMC_STS0);
498 writeb(0xFF, priv->sdmmc_base + SDMMC_STS1);
500 /* setup interrupts */
501 writeb(INT0_CD_INT_EN | INT0_DI_INT_EN, priv->sdmmc_base +
502 SDMMC_INTMASK0);
503 writeb(INT1_DATA_TOUT_INT_EN | INT1_CMD_RES_TRAN_DONE_INT_EN |
504 INT1_CMD_RES_TOUT_INT_EN, priv->sdmmc_base + SDMMC_INTMASK1);
506 /* set the DMA timeout */
507 writew(8191, priv->sdmmc_base + SDMMC_DMATIMEOUT);
509 /* auto clock freezing enable */
510 reg_tmp = readb(priv->sdmmc_base + SDMMC_STS2);
511 writeb(reg_tmp | STS2_DIS_FORCECLK, priv->sdmmc_base + SDMMC_STS2);
513 /* set a default clock speed of 400Khz */
514 clk_set_rate(priv->clk_sdmmc, 400000);
517 static int wmt_dma_init(struct mmc_host *mmc)
519 struct wmt_mci_priv *priv;
521 priv = mmc_priv(mmc);
523 writel(DMA_GCR_SOFT_RESET, priv->sdmmc_base + SDDMA_GCR);
524 writel(DMA_GCR_DMA_EN, priv->sdmmc_base + SDDMA_GCR);
525 if ((readl(priv->sdmmc_base + SDDMA_GCR) & DMA_GCR_DMA_EN) != 0)
526 return 0;
527 else
528 return 1;
531 static void wmt_dma_init_descriptor(struct wmt_dma_descriptor *desc,
532 u16 req_count, u32 buffer_addr, u32 branch_addr, int end)
534 desc->flags = 0x40000000 | req_count;
535 if (end)
536 desc->flags |= 0x80000000;
537 desc->data_buffer_addr = buffer_addr;
538 desc->branch_addr = branch_addr;
541 static void wmt_dma_config(struct mmc_host *mmc, u32 descaddr, u8 dir)
543 struct wmt_mci_priv *priv;
544 u32 reg_tmp;
546 priv = mmc_priv(mmc);
548 /* Enable DMA Interrupts */
549 writel(DMA_IER_INT_EN, priv->sdmmc_base + SDDMA_IER);
551 /* Write DMA Descriptor Pointer Register */
552 writel(descaddr, priv->sdmmc_base + SDDMA_DESPR);
554 writel(0x00, priv->sdmmc_base + SDDMA_CCR);
556 if (dir == PDMA_WRITE) {
557 reg_tmp = readl(priv->sdmmc_base + SDDMA_CCR);
558 writel(reg_tmp & DMA_CCR_IF_TO_PERIPHERAL, priv->sdmmc_base +
559 SDDMA_CCR);
560 } else {
561 reg_tmp = readl(priv->sdmmc_base + SDDMA_CCR);
562 writel(reg_tmp | DMA_CCR_PERIPHERAL_TO_IF, priv->sdmmc_base +
563 SDDMA_CCR);
567 static void wmt_dma_start(struct wmt_mci_priv *priv)
569 u32 reg_tmp;
571 reg_tmp = readl(priv->sdmmc_base + SDDMA_CCR);
572 writel(reg_tmp | DMA_CCR_RUN, priv->sdmmc_base + SDDMA_CCR);
575 static void wmt_mci_request(struct mmc_host *mmc, struct mmc_request *req)
577 struct wmt_mci_priv *priv;
578 struct wmt_dma_descriptor *desc;
579 u8 command;
580 u8 cmdtype;
581 u32 arg;
582 u8 rsptype;
583 u32 reg_tmp;
585 struct scatterlist *sg;
586 int i;
587 int sg_cnt;
588 int offset;
589 u32 dma_address;
590 int desc_cnt;
592 priv = mmc_priv(mmc);
593 priv->req = req;
596 * Use the cmd variable to pass a pointer to the resp[] structure
597 * This is required on multi-block requests to pass the pointer to the
598 * stop command
600 priv->cmd = req->cmd;
602 command = req->cmd->opcode;
603 arg = req->cmd->arg;
604 rsptype = mmc_resp_type(req->cmd);
605 cmdtype = 0;
607 /* rsptype=7 only valid for SPI commands - should be =2 for SD */
608 if (rsptype == 7)
609 rsptype = 2;
610 /* rsptype=21 is R1B, convert for controller */
611 if (rsptype == 21)
612 rsptype = 9;
614 if (!req->data) {
615 wmt_mci_send_command(mmc, command, cmdtype, arg, rsptype);
616 wmt_mci_start_command(priv);
617 /* completion is now handled in the regular_isr() */
619 if (req->data) {
620 priv->comp_cmd = &priv->cmdcomp;
621 init_completion(priv->comp_cmd);
623 wmt_dma_init(mmc);
625 /* set controller data length */
626 reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN);
627 writew((reg_tmp & 0xF800) | (req->data->blksz - 1),
628 priv->sdmmc_base + SDMMC_BLKLEN);
630 /* set controller block count */
631 writew(req->data->blocks, priv->sdmmc_base + SDMMC_BLKCNT);
633 desc = (struct wmt_dma_descriptor *)priv->dma_desc_buffer;
635 if (req->data->flags & MMC_DATA_WRITE) {
636 sg_cnt = dma_map_sg(mmc_dev(mmc), req->data->sg,
637 req->data->sg_len, DMA_TO_DEVICE);
638 cmdtype = 1;
639 if (req->data->blocks > 1)
640 cmdtype = 3;
641 } else {
642 sg_cnt = dma_map_sg(mmc_dev(mmc), req->data->sg,
643 req->data->sg_len, DMA_FROM_DEVICE);
644 cmdtype = 2;
645 if (req->data->blocks > 1)
646 cmdtype = 4;
649 dma_address = priv->dma_desc_device_addr + 16;
650 desc_cnt = 0;
652 for_each_sg(req->data->sg, sg, sg_cnt, i) {
653 offset = 0;
654 while (offset < sg_dma_len(sg)) {
655 wmt_dma_init_descriptor(desc, req->data->blksz,
656 sg_dma_address(sg)+offset,
657 dma_address, 0);
658 desc++;
659 desc_cnt++;
660 offset += req->data->blksz;
661 dma_address += 16;
662 if (desc_cnt == req->data->blocks)
663 break;
666 desc--;
667 desc->flags |= 0x80000000;
669 if (req->data->flags & MMC_DATA_WRITE)
670 wmt_dma_config(mmc, priv->dma_desc_device_addr,
671 PDMA_WRITE);
672 else
673 wmt_dma_config(mmc, priv->dma_desc_device_addr,
674 PDMA_READ);
676 wmt_mci_send_command(mmc, command, cmdtype, arg, rsptype);
678 priv->comp_dma = &priv->datacomp;
679 init_completion(priv->comp_dma);
681 wmt_dma_start(priv);
682 wmt_mci_start_command(priv);
686 static void wmt_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
688 struct wmt_mci_priv *priv;
689 u32 reg_tmp;
691 priv = mmc_priv(mmc);
693 if (ios->power_mode == MMC_POWER_UP) {
694 wmt_reset_hardware(mmc);
696 wmt_set_sd_power(priv, WMT_SD_POWER_ON);
698 if (ios->power_mode == MMC_POWER_OFF)
699 wmt_set_sd_power(priv, WMT_SD_POWER_OFF);
701 if (ios->clock != 0)
702 clk_set_rate(priv->clk_sdmmc, ios->clock);
704 switch (ios->bus_width) {
705 case MMC_BUS_WIDTH_8:
706 reg_tmp = readb(priv->sdmmc_base + SDMMC_EXTCTRL);
707 writeb(reg_tmp | 0x04, priv->sdmmc_base + SDMMC_EXTCTRL);
708 break;
709 case MMC_BUS_WIDTH_4:
710 reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
711 writeb(reg_tmp | BM_FOURBIT_MODE, priv->sdmmc_base +
712 SDMMC_BUSMODE);
714 reg_tmp = readb(priv->sdmmc_base + SDMMC_EXTCTRL);
715 writeb(reg_tmp & 0xFB, priv->sdmmc_base + SDMMC_EXTCTRL);
716 break;
717 case MMC_BUS_WIDTH_1:
718 reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
719 writeb(reg_tmp & BM_ONEBIT_MASK, priv->sdmmc_base +
720 SDMMC_BUSMODE);
722 reg_tmp = readb(priv->sdmmc_base + SDMMC_EXTCTRL);
723 writeb(reg_tmp & 0xFB, priv->sdmmc_base + SDMMC_EXTCTRL);
724 break;
728 static int wmt_mci_get_ro(struct mmc_host *mmc)
730 struct wmt_mci_priv *priv = mmc_priv(mmc);
732 return !(readb(priv->sdmmc_base + SDMMC_STS0) & STS0_WRITE_PROTECT);
735 static int wmt_mci_get_cd(struct mmc_host *mmc)
737 struct wmt_mci_priv *priv = mmc_priv(mmc);
738 u32 cd = (readb(priv->sdmmc_base + SDMMC_STS0) & STS0_CD_GPI) >> 3;
740 return !(cd ^ priv->cd_inverted);
743 static struct mmc_host_ops wmt_mci_ops = {
744 .request = wmt_mci_request,
745 .set_ios = wmt_mci_set_ios,
746 .get_ro = wmt_mci_get_ro,
747 .get_cd = wmt_mci_get_cd,
750 /* Controller capabilities */
751 static struct wmt_mci_caps wm8505_caps = {
752 .f_min = 390425,
753 .f_max = 50000000,
754 .ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34,
755 .caps = MMC_CAP_4_BIT_DATA | MMC_CAP_MMC_HIGHSPEED |
756 MMC_CAP_SD_HIGHSPEED,
757 .max_seg_size = 65024,
758 .max_segs = 128,
759 .max_blk_size = 2048,
762 static struct of_device_id wmt_mci_dt_ids[] = {
763 { .compatible = "wm,wm8505-sdhc", .data = &wm8505_caps },
764 { /* Sentinel */ },
767 static int wmt_mci_probe(struct platform_device *pdev)
769 struct mmc_host *mmc;
770 struct wmt_mci_priv *priv;
771 struct device_node *np = pdev->dev.of_node;
772 const struct of_device_id *of_id =
773 of_match_device(wmt_mci_dt_ids, &pdev->dev);
774 const struct wmt_mci_caps *wmt_caps = of_id->data;
775 int ret;
776 int regular_irq, dma_irq;
778 if (!of_id || !of_id->data) {
779 dev_err(&pdev->dev, "Controller capabilities data missing\n");
780 return -EFAULT;
783 if (!np) {
784 dev_err(&pdev->dev, "Missing SDMMC description in devicetree\n");
785 return -EFAULT;
788 regular_irq = irq_of_parse_and_map(np, 0);
789 dma_irq = irq_of_parse_and_map(np, 1);
791 if (!regular_irq || !dma_irq) {
792 dev_err(&pdev->dev, "Getting IRQs failed!\n");
793 ret = -ENXIO;
794 goto fail1;
797 mmc = mmc_alloc_host(sizeof(struct wmt_mci_priv), &pdev->dev);
798 if (!mmc) {
799 dev_err(&pdev->dev, "Failed to allocate mmc_host\n");
800 ret = -ENOMEM;
801 goto fail1;
804 mmc->ops = &wmt_mci_ops;
805 mmc->f_min = wmt_caps->f_min;
806 mmc->f_max = wmt_caps->f_max;
807 mmc->ocr_avail = wmt_caps->ocr_avail;
808 mmc->caps = wmt_caps->caps;
810 mmc->max_seg_size = wmt_caps->max_seg_size;
811 mmc->max_segs = wmt_caps->max_segs;
812 mmc->max_blk_size = wmt_caps->max_blk_size;
814 mmc->max_req_size = (16*512*mmc->max_segs);
815 mmc->max_blk_count = mmc->max_req_size / 512;
817 priv = mmc_priv(mmc);
818 priv->mmc = mmc;
819 priv->dev = &pdev->dev;
821 priv->power_inverted = 0;
822 priv->cd_inverted = 0;
824 if (of_get_property(np, "sdon-inverted", NULL))
825 priv->power_inverted = 1;
826 if (of_get_property(np, "cd-inverted", NULL))
827 priv->cd_inverted = 1;
829 priv->sdmmc_base = of_iomap(np, 0);
830 if (!priv->sdmmc_base) {
831 dev_err(&pdev->dev, "Failed to map IO space\n");
832 ret = -ENOMEM;
833 goto fail2;
836 priv->irq_regular = regular_irq;
837 priv->irq_dma = dma_irq;
839 ret = request_irq(regular_irq, wmt_mci_regular_isr, 0, "sdmmc", priv);
840 if (ret) {
841 dev_err(&pdev->dev, "Register regular IRQ fail\n");
842 goto fail3;
845 ret = request_irq(dma_irq, wmt_mci_dma_isr, 32, "sdmmc", priv);
846 if (ret) {
847 dev_err(&pdev->dev, "Register DMA IRQ fail\n");
848 goto fail4;
851 /* alloc some DMA buffers for descriptors/transfers */
852 priv->dma_desc_buffer = dma_alloc_coherent(&pdev->dev,
853 mmc->max_blk_count * 16,
854 &priv->dma_desc_device_addr,
855 208);
856 if (!priv->dma_desc_buffer) {
857 dev_err(&pdev->dev, "DMA alloc fail\n");
858 ret = -EPERM;
859 goto fail5;
862 platform_set_drvdata(pdev, mmc);
864 priv->clk_sdmmc = of_clk_get(np, 0);
865 if (IS_ERR(priv->clk_sdmmc)) {
866 dev_err(&pdev->dev, "Error getting clock\n");
867 ret = PTR_ERR(priv->clk_sdmmc);
868 goto fail5;
871 clk_prepare_enable(priv->clk_sdmmc);
873 /* configure the controller to a known 'ready' state */
874 wmt_reset_hardware(mmc);
876 mmc_add_host(mmc);
878 dev_info(&pdev->dev, "WMT SDHC Controller initialized\n");
880 return 0;
881 fail5:
882 free_irq(dma_irq, priv);
883 fail4:
884 free_irq(regular_irq, priv);
885 fail3:
886 iounmap(priv->sdmmc_base);
887 fail2:
888 mmc_free_host(mmc);
889 fail1:
890 return ret;
893 static int wmt_mci_remove(struct platform_device *pdev)
895 struct mmc_host *mmc;
896 struct wmt_mci_priv *priv;
897 struct resource *res;
898 u32 reg_tmp;
900 mmc = platform_get_drvdata(pdev);
901 priv = mmc_priv(mmc);
903 /* reset SD controller */
904 reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
905 writel(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base + SDMMC_BUSMODE);
906 reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN);
907 writew(reg_tmp & ~(0xA000), priv->sdmmc_base + SDMMC_BLKLEN);
908 writeb(0xFF, priv->sdmmc_base + SDMMC_STS0);
909 writeb(0xFF, priv->sdmmc_base + SDMMC_STS1);
911 /* release the dma buffers */
912 dma_free_coherent(&pdev->dev, priv->mmc->max_blk_count * 16,
913 priv->dma_desc_buffer, priv->dma_desc_device_addr);
915 mmc_remove_host(mmc);
917 free_irq(priv->irq_regular, priv);
918 free_irq(priv->irq_dma, priv);
920 iounmap(priv->sdmmc_base);
922 clk_disable_unprepare(priv->clk_sdmmc);
923 clk_put(priv->clk_sdmmc);
925 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
926 release_mem_region(res->start, resource_size(res));
928 mmc_free_host(mmc);
930 dev_info(&pdev->dev, "WMT MCI device removed\n");
932 return 0;
935 #ifdef CONFIG_PM
936 static int wmt_mci_suspend(struct device *dev)
938 u32 reg_tmp;
939 struct platform_device *pdev = to_platform_device(dev);
940 struct mmc_host *mmc = platform_get_drvdata(pdev);
941 struct wmt_mci_priv *priv;
942 int ret;
944 if (!mmc)
945 return 0;
947 priv = mmc_priv(mmc);
948 ret = mmc_suspend_host(mmc);
950 if (!ret) {
951 reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
952 writeb(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base +
953 SDMMC_BUSMODE);
955 reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN);
956 writew(reg_tmp & 0x5FFF, priv->sdmmc_base + SDMMC_BLKLEN);
958 writeb(0xFF, priv->sdmmc_base + SDMMC_STS0);
959 writeb(0xFF, priv->sdmmc_base + SDMMC_STS1);
961 clk_disable(priv->clk_sdmmc);
963 return ret;
966 static int wmt_mci_resume(struct device *dev)
968 u32 reg_tmp;
969 struct platform_device *pdev = to_platform_device(dev);
970 struct mmc_host *mmc = platform_get_drvdata(pdev);
971 struct wmt_mci_priv *priv;
972 int ret = 0;
974 if (mmc) {
975 priv = mmc_priv(mmc);
976 clk_enable(priv->clk_sdmmc);
978 reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
979 writeb(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base +
980 SDMMC_BUSMODE);
982 reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN);
983 writew(reg_tmp | (BLKL_GPI_CD | BLKL_INT_ENABLE),
984 priv->sdmmc_base + SDMMC_BLKLEN);
986 reg_tmp = readb(priv->sdmmc_base + SDMMC_INTMASK0);
987 writeb(reg_tmp | INT0_DI_INT_EN, priv->sdmmc_base +
988 SDMMC_INTMASK0);
990 ret = mmc_resume_host(mmc);
993 return ret;
996 static const struct dev_pm_ops wmt_mci_pm = {
997 .suspend = wmt_mci_suspend,
998 .resume = wmt_mci_resume,
1001 #define wmt_mci_pm_ops (&wmt_mci_pm)
1003 #else /* !CONFIG_PM */
1005 #define wmt_mci_pm_ops NULL
1007 #endif
1009 static struct platform_driver wmt_mci_driver = {
1010 .probe = wmt_mci_probe,
1011 .remove = wmt_mci_remove,
1012 .driver = {
1013 .name = DRIVER_NAME,
1014 .owner = THIS_MODULE,
1015 .pm = wmt_mci_pm_ops,
1016 .of_match_table = wmt_mci_dt_ids,
1020 module_platform_driver(wmt_mci_driver);
1022 MODULE_DESCRIPTION("Wondermedia MMC/SD Driver");
1023 MODULE_AUTHOR("Tony Prisk");
1024 MODULE_LICENSE("GPL v2");
1025 MODULE_DEVICE_TABLE(of, wmt_mci_dt_ids);