Merge tag 'block-5.11-2021-01-10' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / drivers / mmc / host / alcor.c
blobbfb8efeb7eb805588eb2f2ad1746e8c9901b3b3d
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright (C) 2018 Oleksij Rempel <linux@rempel-privat.de>
5 * Driver for Alcor Micro AU6601 and AU6621 controllers
6 */
8 /* Note: this driver was created without any documentation. Based
9 * on sniffing, testing and in some cases mimic of original driver.
10 * As soon as some one with documentation or more experience in SD/MMC, or
11 * reverse engineering then me, please review this driver and question every
12 * thing what I did. 2018 Oleksij Rempel <linux@rempel-privat.de>
15 #include <linux/delay.h>
16 #include <linux/pci.h>
17 #include <linux/module.h>
18 #include <linux/io.h>
19 #include <linux/pm.h>
20 #include <linux/irq.h>
21 #include <linux/interrupt.h>
22 #include <linux/platform_device.h>
24 #include <linux/mmc/host.h>
25 #include <linux/mmc/mmc.h>
27 #include <linux/alcor_pci.h>
29 enum alcor_cookie {
30 COOKIE_UNMAPPED,
31 COOKIE_PRE_MAPPED,
32 COOKIE_MAPPED,
35 struct alcor_pll_conf {
36 unsigned int clk_src_freq;
37 unsigned int clk_src_reg;
38 unsigned int min_div;
39 unsigned int max_div;
42 struct alcor_sdmmc_host {
43 struct device *dev;
44 struct alcor_pci_priv *alcor_pci;
46 struct mmc_request *mrq;
47 struct mmc_command *cmd;
48 struct mmc_data *data;
49 unsigned int dma_on:1;
51 struct mutex cmd_mutex;
53 struct delayed_work timeout_work;
55 struct sg_mapping_iter sg_miter; /* SG state for PIO */
56 struct scatterlist *sg;
57 unsigned int blocks; /* remaining PIO blocks */
58 int sg_count;
60 u32 irq_status_sd;
61 unsigned char cur_power_mode;
64 static const struct alcor_pll_conf alcor_pll_cfg[] = {
65 /* MHZ, CLK src, max div, min div */
66 { 31250000, AU6601_CLK_31_25_MHZ, 1, 511},
67 { 48000000, AU6601_CLK_48_MHZ, 1, 511},
68 {125000000, AU6601_CLK_125_MHZ, 1, 511},
69 {384000000, AU6601_CLK_384_MHZ, 1, 511},
72 static inline void alcor_rmw8(struct alcor_sdmmc_host *host, unsigned int addr,
73 u8 clear, u8 set)
75 struct alcor_pci_priv *priv = host->alcor_pci;
76 u32 var;
78 var = alcor_read8(priv, addr);
79 var &= ~clear;
80 var |= set;
81 alcor_write8(priv, var, addr);
84 /* As soon as irqs are masked, some status updates may be missed.
85 * Use this with care.
87 static inline void alcor_mask_sd_irqs(struct alcor_sdmmc_host *host)
89 struct alcor_pci_priv *priv = host->alcor_pci;
91 alcor_write32(priv, 0, AU6601_REG_INT_ENABLE);
94 static inline void alcor_unmask_sd_irqs(struct alcor_sdmmc_host *host)
96 struct alcor_pci_priv *priv = host->alcor_pci;
98 alcor_write32(priv, AU6601_INT_CMD_MASK | AU6601_INT_DATA_MASK |
99 AU6601_INT_CARD_INSERT | AU6601_INT_CARD_REMOVE |
100 AU6601_INT_OVER_CURRENT_ERR,
101 AU6601_REG_INT_ENABLE);
104 static void alcor_reset(struct alcor_sdmmc_host *host, u8 val)
106 struct alcor_pci_priv *priv = host->alcor_pci;
107 int i;
109 alcor_write8(priv, val | AU6601_BUF_CTRL_RESET,
110 AU6601_REG_SW_RESET);
111 for (i = 0; i < 100; i++) {
112 if (!(alcor_read8(priv, AU6601_REG_SW_RESET) & val))
113 return;
114 udelay(50);
116 dev_err(host->dev, "%s: timeout\n", __func__);
120 * Perform DMA I/O of a single page.
122 static void alcor_data_set_dma(struct alcor_sdmmc_host *host)
124 struct alcor_pci_priv *priv = host->alcor_pci;
125 u32 addr;
127 if (!host->sg_count)
128 return;
130 if (!host->sg) {
131 dev_err(host->dev, "have blocks, but no SG\n");
132 return;
135 if (!sg_dma_len(host->sg)) {
136 dev_err(host->dev, "DMA SG len == 0\n");
137 return;
141 addr = (u32)sg_dma_address(host->sg);
143 alcor_write32(priv, addr, AU6601_REG_SDMA_ADDR);
144 host->sg = sg_next(host->sg);
145 host->sg_count--;
148 static void alcor_trigger_data_transfer(struct alcor_sdmmc_host *host)
150 struct alcor_pci_priv *priv = host->alcor_pci;
151 struct mmc_data *data = host->data;
152 u8 ctrl = 0;
154 if (data->flags & MMC_DATA_WRITE)
155 ctrl |= AU6601_DATA_WRITE;
157 if (data->host_cookie == COOKIE_MAPPED) {
159 * For DMA transfers, this function is called just once,
160 * at the start of the operation. The hardware can only
161 * perform DMA I/O on a single page at a time, so here
162 * we kick off the transfer with the first page, and expect
163 * subsequent pages to be transferred upon IRQ events
164 * indicating that the single-page DMA was completed.
166 alcor_data_set_dma(host);
167 ctrl |= AU6601_DATA_DMA_MODE;
168 host->dma_on = 1;
169 alcor_write32(priv, data->sg_count * 0x1000,
170 AU6601_REG_BLOCK_SIZE);
171 } else {
173 * For PIO transfers, we break down each operation
174 * into several sector-sized transfers. When one sector has
175 * complete, the IRQ handler will call this function again
176 * to kick off the transfer of the next sector.
178 alcor_write32(priv, data->blksz, AU6601_REG_BLOCK_SIZE);
181 alcor_write8(priv, ctrl | AU6601_DATA_START_XFER,
182 AU6601_DATA_XFER_CTRL);
185 static void alcor_trf_block_pio(struct alcor_sdmmc_host *host, bool read)
187 struct alcor_pci_priv *priv = host->alcor_pci;
188 size_t blksize, len;
189 u8 *buf;
191 if (!host->blocks)
192 return;
194 if (host->dma_on) {
195 dev_err(host->dev, "configured DMA but got PIO request.\n");
196 return;
199 if (!!(host->data->flags & MMC_DATA_READ) != read) {
200 dev_err(host->dev, "got unexpected direction %i != %i\n",
201 !!(host->data->flags & MMC_DATA_READ), read);
204 if (!sg_miter_next(&host->sg_miter))
205 return;
207 blksize = host->data->blksz;
208 len = min(host->sg_miter.length, blksize);
210 dev_dbg(host->dev, "PIO, %s block size: 0x%zx\n",
211 read ? "read" : "write", blksize);
213 host->sg_miter.consumed = len;
214 host->blocks--;
216 buf = host->sg_miter.addr;
218 if (read)
219 ioread32_rep(priv->iobase + AU6601_REG_BUFFER, buf, len >> 2);
220 else
221 iowrite32_rep(priv->iobase + AU6601_REG_BUFFER, buf, len >> 2);
223 sg_miter_stop(&host->sg_miter);
226 static void alcor_prepare_sg_miter(struct alcor_sdmmc_host *host)
228 unsigned int flags = SG_MITER_ATOMIC;
229 struct mmc_data *data = host->data;
231 if (data->flags & MMC_DATA_READ)
232 flags |= SG_MITER_TO_SG;
233 else
234 flags |= SG_MITER_FROM_SG;
235 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
238 static void alcor_prepare_data(struct alcor_sdmmc_host *host,
239 struct mmc_command *cmd)
241 struct alcor_pci_priv *priv = host->alcor_pci;
242 struct mmc_data *data = cmd->data;
244 if (!data)
245 return;
248 host->data = data;
249 host->data->bytes_xfered = 0;
250 host->blocks = data->blocks;
251 host->sg = data->sg;
252 host->sg_count = data->sg_count;
253 dev_dbg(host->dev, "prepare DATA: sg %i, blocks: %i\n",
254 host->sg_count, host->blocks);
256 if (data->host_cookie != COOKIE_MAPPED)
257 alcor_prepare_sg_miter(host);
259 alcor_write8(priv, 0, AU6601_DATA_XFER_CTRL);
262 static void alcor_send_cmd(struct alcor_sdmmc_host *host,
263 struct mmc_command *cmd, bool set_timeout)
265 struct alcor_pci_priv *priv = host->alcor_pci;
266 unsigned long timeout = 0;
267 u8 ctrl = 0;
269 host->cmd = cmd;
270 alcor_prepare_data(host, cmd);
272 dev_dbg(host->dev, "send CMD. opcode: 0x%02x, arg; 0x%08x\n",
273 cmd->opcode, cmd->arg);
274 alcor_write8(priv, cmd->opcode | 0x40, AU6601_REG_CMD_OPCODE);
275 alcor_write32be(priv, cmd->arg, AU6601_REG_CMD_ARG);
277 switch (mmc_resp_type(cmd)) {
278 case MMC_RSP_NONE:
279 ctrl = AU6601_CMD_NO_RESP;
280 break;
281 case MMC_RSP_R1:
282 ctrl = AU6601_CMD_6_BYTE_CRC;
283 break;
284 case MMC_RSP_R1B:
285 ctrl = AU6601_CMD_6_BYTE_CRC | AU6601_CMD_STOP_WAIT_RDY;
286 break;
287 case MMC_RSP_R2:
288 ctrl = AU6601_CMD_17_BYTE_CRC;
289 break;
290 case MMC_RSP_R3:
291 ctrl = AU6601_CMD_6_BYTE_WO_CRC;
292 break;
293 default:
294 dev_err(host->dev, "%s: cmd->flag (0x%02x) is not valid\n",
295 mmc_hostname(mmc_from_priv(host)), mmc_resp_type(cmd));
296 break;
299 if (set_timeout) {
300 if (!cmd->data && cmd->busy_timeout)
301 timeout = cmd->busy_timeout;
302 else
303 timeout = 10000;
305 schedule_delayed_work(&host->timeout_work,
306 msecs_to_jiffies(timeout));
309 dev_dbg(host->dev, "xfer ctrl: 0x%02x; timeout: %lu\n", ctrl, timeout);
310 alcor_write8(priv, ctrl | AU6601_CMD_START_XFER,
311 AU6601_CMD_XFER_CTRL);
314 static void alcor_request_complete(struct alcor_sdmmc_host *host,
315 bool cancel_timeout)
317 struct mmc_request *mrq;
320 * If this work gets rescheduled while running, it will
321 * be run again afterwards but without any active request.
323 if (!host->mrq)
324 return;
326 if (cancel_timeout)
327 cancel_delayed_work(&host->timeout_work);
329 mrq = host->mrq;
331 host->mrq = NULL;
332 host->cmd = NULL;
333 host->data = NULL;
334 host->dma_on = 0;
336 mmc_request_done(mmc_from_priv(host), mrq);
339 static void alcor_finish_data(struct alcor_sdmmc_host *host)
341 struct mmc_data *data;
343 data = host->data;
344 host->data = NULL;
345 host->dma_on = 0;
348 * The specification states that the block count register must
349 * be updated, but it does not specify at what point in the
350 * data flow. That makes the register entirely useless to read
351 * back so we have to assume that nothing made it to the card
352 * in the event of an error.
354 if (data->error)
355 data->bytes_xfered = 0;
356 else
357 data->bytes_xfered = data->blksz * data->blocks;
360 * Need to send CMD12 if -
361 * a) open-ended multiblock transfer (no CMD23)
362 * b) error in multiblock transfer
364 if (data->stop &&
365 (data->error ||
366 !host->mrq->sbc)) {
369 * The controller needs a reset of internal state machines
370 * upon error conditions.
372 if (data->error)
373 alcor_reset(host, AU6601_RESET_CMD | AU6601_RESET_DATA);
375 alcor_unmask_sd_irqs(host);
376 alcor_send_cmd(host, data->stop, false);
377 return;
380 alcor_request_complete(host, 1);
383 static void alcor_err_irq(struct alcor_sdmmc_host *host, u32 intmask)
385 dev_dbg(host->dev, "ERR IRQ %x\n", intmask);
387 if (host->cmd) {
388 if (intmask & AU6601_INT_CMD_TIMEOUT_ERR)
389 host->cmd->error = -ETIMEDOUT;
390 else
391 host->cmd->error = -EILSEQ;
394 if (host->data) {
395 if (intmask & AU6601_INT_DATA_TIMEOUT_ERR)
396 host->data->error = -ETIMEDOUT;
397 else
398 host->data->error = -EILSEQ;
400 host->data->bytes_xfered = 0;
403 alcor_reset(host, AU6601_RESET_CMD | AU6601_RESET_DATA);
404 alcor_request_complete(host, 1);
407 static int alcor_cmd_irq_done(struct alcor_sdmmc_host *host, u32 intmask)
409 struct alcor_pci_priv *priv = host->alcor_pci;
411 intmask &= AU6601_INT_CMD_END;
413 if (!intmask)
414 return true;
416 /* got CMD_END but no CMD is in progress, wake thread an process the
417 * error
419 if (!host->cmd)
420 return false;
422 if (host->cmd->flags & MMC_RSP_PRESENT) {
423 struct mmc_command *cmd = host->cmd;
425 cmd->resp[0] = alcor_read32be(priv, AU6601_REG_CMD_RSP0);
426 dev_dbg(host->dev, "RSP0: 0x%04x\n", cmd->resp[0]);
427 if (host->cmd->flags & MMC_RSP_136) {
428 cmd->resp[1] =
429 alcor_read32be(priv, AU6601_REG_CMD_RSP1);
430 cmd->resp[2] =
431 alcor_read32be(priv, AU6601_REG_CMD_RSP2);
432 cmd->resp[3] =
433 alcor_read32be(priv, AU6601_REG_CMD_RSP3);
434 dev_dbg(host->dev, "RSP1,2,3: 0x%04x 0x%04x 0x%04x\n",
435 cmd->resp[1], cmd->resp[2], cmd->resp[3]);
440 host->cmd->error = 0;
442 /* Processed actual command. */
443 if (!host->data)
444 return false;
446 alcor_trigger_data_transfer(host);
447 host->cmd = NULL;
448 return true;
451 static void alcor_cmd_irq_thread(struct alcor_sdmmc_host *host, u32 intmask)
453 intmask &= AU6601_INT_CMD_END;
455 if (!intmask)
456 return;
458 if (!host->cmd && intmask & AU6601_INT_CMD_END) {
459 dev_dbg(host->dev, "Got command interrupt 0x%08x even though no command operation was in progress.\n",
460 intmask);
463 /* Processed actual command. */
464 if (!host->data)
465 alcor_request_complete(host, 1);
466 else
467 alcor_trigger_data_transfer(host);
468 host->cmd = NULL;
471 static int alcor_data_irq_done(struct alcor_sdmmc_host *host, u32 intmask)
473 u32 tmp;
475 intmask &= AU6601_INT_DATA_MASK;
477 /* nothing here to do */
478 if (!intmask)
479 return 1;
481 /* we was too fast and got DATA_END after it was processed?
482 * lets ignore it for now.
484 if (!host->data && intmask == AU6601_INT_DATA_END)
485 return 1;
487 /* looks like an error, so lets handle it. */
488 if (!host->data)
489 return 0;
491 tmp = intmask & (AU6601_INT_READ_BUF_RDY | AU6601_INT_WRITE_BUF_RDY
492 | AU6601_INT_DMA_END);
493 switch (tmp) {
494 case 0:
495 break;
496 case AU6601_INT_READ_BUF_RDY:
497 alcor_trf_block_pio(host, true);
498 return 1;
499 case AU6601_INT_WRITE_BUF_RDY:
500 alcor_trf_block_pio(host, false);
501 return 1;
502 case AU6601_INT_DMA_END:
503 if (!host->sg_count)
504 break;
506 alcor_data_set_dma(host);
507 break;
508 default:
509 dev_err(host->dev, "Got READ_BUF_RDY and WRITE_BUF_RDY at same time\n");
510 break;
513 if (intmask & AU6601_INT_DATA_END) {
514 if (!host->dma_on && host->blocks) {
515 alcor_trigger_data_transfer(host);
516 return 1;
517 } else {
518 return 0;
522 return 1;
525 static void alcor_data_irq_thread(struct alcor_sdmmc_host *host, u32 intmask)
527 intmask &= AU6601_INT_DATA_MASK;
529 if (!intmask)
530 return;
532 if (!host->data) {
533 dev_dbg(host->dev, "Got data interrupt 0x%08x even though no data operation was in progress.\n",
534 intmask);
535 alcor_reset(host, AU6601_RESET_DATA);
536 return;
539 if (alcor_data_irq_done(host, intmask))
540 return;
542 if ((intmask & AU6601_INT_DATA_END) || !host->blocks ||
543 (host->dma_on && !host->sg_count))
544 alcor_finish_data(host);
547 static void alcor_cd_irq(struct alcor_sdmmc_host *host, u32 intmask)
549 dev_dbg(host->dev, "card %s\n",
550 intmask & AU6601_INT_CARD_REMOVE ? "removed" : "inserted");
552 if (host->mrq) {
553 dev_dbg(host->dev, "cancel all pending tasks.\n");
555 if (host->data)
556 host->data->error = -ENOMEDIUM;
558 if (host->cmd)
559 host->cmd->error = -ENOMEDIUM;
560 else
561 host->mrq->cmd->error = -ENOMEDIUM;
563 alcor_request_complete(host, 1);
566 mmc_detect_change(mmc_from_priv(host), msecs_to_jiffies(1));
569 static irqreturn_t alcor_irq_thread(int irq, void *d)
571 struct alcor_sdmmc_host *host = d;
572 irqreturn_t ret = IRQ_HANDLED;
573 u32 intmask, tmp;
575 mutex_lock(&host->cmd_mutex);
577 intmask = host->irq_status_sd;
579 /* some thing bad */
580 if (unlikely(!intmask || AU6601_INT_ALL_MASK == intmask)) {
581 dev_dbg(host->dev, "unexpected IRQ: 0x%04x\n", intmask);
582 ret = IRQ_NONE;
583 goto exit;
586 tmp = intmask & (AU6601_INT_CMD_MASK | AU6601_INT_DATA_MASK);
587 if (tmp) {
588 if (tmp & AU6601_INT_ERROR_MASK)
589 alcor_err_irq(host, tmp);
590 else {
591 alcor_cmd_irq_thread(host, tmp);
592 alcor_data_irq_thread(host, tmp);
594 intmask &= ~(AU6601_INT_CMD_MASK | AU6601_INT_DATA_MASK);
597 if (intmask & (AU6601_INT_CARD_INSERT | AU6601_INT_CARD_REMOVE)) {
598 alcor_cd_irq(host, intmask);
599 intmask &= ~(AU6601_INT_CARD_INSERT | AU6601_INT_CARD_REMOVE);
602 if (intmask & AU6601_INT_OVER_CURRENT_ERR) {
603 dev_warn(host->dev,
604 "warning: over current detected!\n");
605 intmask &= ~AU6601_INT_OVER_CURRENT_ERR;
608 if (intmask)
609 dev_dbg(host->dev, "got not handled IRQ: 0x%04x\n", intmask);
611 exit:
612 mutex_unlock(&host->cmd_mutex);
613 alcor_unmask_sd_irqs(host);
614 return ret;
618 static irqreturn_t alcor_irq(int irq, void *d)
620 struct alcor_sdmmc_host *host = d;
621 struct alcor_pci_priv *priv = host->alcor_pci;
622 u32 status, tmp;
623 irqreturn_t ret;
624 int cmd_done, data_done;
626 status = alcor_read32(priv, AU6601_REG_INT_STATUS);
627 if (!status)
628 return IRQ_NONE;
630 alcor_write32(priv, status, AU6601_REG_INT_STATUS);
632 tmp = status & (AU6601_INT_READ_BUF_RDY | AU6601_INT_WRITE_BUF_RDY
633 | AU6601_INT_DATA_END | AU6601_INT_DMA_END
634 | AU6601_INT_CMD_END);
635 if (tmp == status) {
636 cmd_done = alcor_cmd_irq_done(host, tmp);
637 data_done = alcor_data_irq_done(host, tmp);
638 /* use fast path for simple tasks */
639 if (cmd_done && data_done) {
640 ret = IRQ_HANDLED;
641 goto alcor_irq_done;
645 host->irq_status_sd = status;
646 ret = IRQ_WAKE_THREAD;
647 alcor_mask_sd_irqs(host);
648 alcor_irq_done:
649 return ret;
652 static void alcor_set_clock(struct alcor_sdmmc_host *host, unsigned int clock)
654 struct alcor_pci_priv *priv = host->alcor_pci;
655 int i, diff = 0x7fffffff, tmp_clock = 0;
656 u16 clk_src = 0;
657 u8 clk_div = 0;
659 if (clock == 0) {
660 alcor_write16(priv, 0, AU6601_CLK_SELECT);
661 return;
664 for (i = 0; i < ARRAY_SIZE(alcor_pll_cfg); i++) {
665 unsigned int tmp_div, tmp_diff;
666 const struct alcor_pll_conf *cfg = &alcor_pll_cfg[i];
668 tmp_div = DIV_ROUND_UP(cfg->clk_src_freq, clock);
669 if (cfg->min_div > tmp_div || tmp_div > cfg->max_div)
670 continue;
672 tmp_clock = DIV_ROUND_UP(cfg->clk_src_freq, tmp_div);
673 tmp_diff = abs(clock - tmp_clock);
675 if (tmp_diff < diff) {
676 diff = tmp_diff;
677 clk_src = cfg->clk_src_reg;
678 clk_div = tmp_div;
682 clk_src |= ((clk_div - 1) << 8);
683 clk_src |= AU6601_CLK_ENABLE;
685 dev_dbg(host->dev, "set freq %d cal freq %d, use div %d, mod %x\n",
686 clock, tmp_clock, clk_div, clk_src);
688 alcor_write16(priv, clk_src, AU6601_CLK_SELECT);
692 static void alcor_set_timing(struct mmc_host *mmc, struct mmc_ios *ios)
694 struct alcor_sdmmc_host *host = mmc_priv(mmc);
696 if (ios->timing == MMC_TIMING_LEGACY) {
697 alcor_rmw8(host, AU6601_CLK_DELAY,
698 AU6601_CLK_POSITIVE_EDGE_ALL, 0);
699 } else {
700 alcor_rmw8(host, AU6601_CLK_DELAY,
701 0, AU6601_CLK_POSITIVE_EDGE_ALL);
705 static void alcor_set_bus_width(struct mmc_host *mmc, struct mmc_ios *ios)
707 struct alcor_sdmmc_host *host = mmc_priv(mmc);
708 struct alcor_pci_priv *priv = host->alcor_pci;
710 if (ios->bus_width == MMC_BUS_WIDTH_1) {
711 alcor_write8(priv, 0, AU6601_REG_BUS_CTRL);
712 } else if (ios->bus_width == MMC_BUS_WIDTH_4) {
713 alcor_write8(priv, AU6601_BUS_WIDTH_4BIT,
714 AU6601_REG_BUS_CTRL);
715 } else
716 dev_err(host->dev, "Unknown BUS mode\n");
720 static int alcor_card_busy(struct mmc_host *mmc)
722 struct alcor_sdmmc_host *host = mmc_priv(mmc);
723 struct alcor_pci_priv *priv = host->alcor_pci;
724 u8 status;
726 /* Check whether dat[0:3] low */
727 status = alcor_read8(priv, AU6601_DATA_PIN_STATE);
729 return !(status & AU6601_BUS_STAT_DAT_MASK);
732 static int alcor_get_cd(struct mmc_host *mmc)
734 struct alcor_sdmmc_host *host = mmc_priv(mmc);
735 struct alcor_pci_priv *priv = host->alcor_pci;
736 u8 detect;
738 detect = alcor_read8(priv, AU6601_DETECT_STATUS)
739 & AU6601_DETECT_STATUS_M;
740 /* check if card is present then send command and data */
741 return (detect == AU6601_SD_DETECTED);
744 static int alcor_get_ro(struct mmc_host *mmc)
746 struct alcor_sdmmc_host *host = mmc_priv(mmc);
747 struct alcor_pci_priv *priv = host->alcor_pci;
748 u8 status;
750 /* get write protect pin status */
751 status = alcor_read8(priv, AU6601_INTERFACE_MODE_CTRL);
753 return !!(status & AU6601_SD_CARD_WP);
756 static void alcor_request(struct mmc_host *mmc, struct mmc_request *mrq)
758 struct alcor_sdmmc_host *host = mmc_priv(mmc);
760 mutex_lock(&host->cmd_mutex);
762 host->mrq = mrq;
764 /* check if card is present then send command and data */
765 if (alcor_get_cd(mmc))
766 alcor_send_cmd(host, mrq->cmd, true);
767 else {
768 mrq->cmd->error = -ENOMEDIUM;
769 alcor_request_complete(host, 1);
772 mutex_unlock(&host->cmd_mutex);
775 static void alcor_pre_req(struct mmc_host *mmc,
776 struct mmc_request *mrq)
778 struct alcor_sdmmc_host *host = mmc_priv(mmc);
779 struct mmc_data *data = mrq->data;
780 struct mmc_command *cmd = mrq->cmd;
781 struct scatterlist *sg;
782 unsigned int i, sg_len;
784 if (!data || !cmd)
785 return;
787 data->host_cookie = COOKIE_UNMAPPED;
789 /* FIXME: looks like the DMA engine works only with CMD18 */
790 if (cmd->opcode != MMC_READ_MULTIPLE_BLOCK
791 && cmd->opcode != MMC_WRITE_MULTIPLE_BLOCK)
792 return;
794 * We don't do DMA on "complex" transfers, i.e. with
795 * non-word-aligned buffers or lengths. A future improvement
796 * could be made to use temporary DMA bounce-buffers when these
797 * requirements are not met.
799 * Also, we don't bother with all the DMA setup overhead for
800 * short transfers.
802 if (data->blocks * data->blksz < AU6601_MAX_DMA_BLOCK_SIZE)
803 return;
805 if (data->blksz & 3)
806 return;
808 for_each_sg(data->sg, sg, data->sg_len, i) {
809 if (sg->length != AU6601_MAX_DMA_BLOCK_SIZE)
810 return;
811 if (sg->offset != 0)
812 return;
815 /* This data might be unmapped at this time */
817 sg_len = dma_map_sg(host->dev, data->sg, data->sg_len,
818 mmc_get_dma_dir(data));
819 if (sg_len)
820 data->host_cookie = COOKIE_MAPPED;
822 data->sg_count = sg_len;
825 static void alcor_post_req(struct mmc_host *mmc,
826 struct mmc_request *mrq,
827 int err)
829 struct alcor_sdmmc_host *host = mmc_priv(mmc);
830 struct mmc_data *data = mrq->data;
832 if (!data)
833 return;
835 if (data->host_cookie == COOKIE_MAPPED) {
836 dma_unmap_sg(host->dev,
837 data->sg,
838 data->sg_len,
839 mmc_get_dma_dir(data));
842 data->host_cookie = COOKIE_UNMAPPED;
845 static void alcor_set_power_mode(struct mmc_host *mmc, struct mmc_ios *ios)
847 struct alcor_sdmmc_host *host = mmc_priv(mmc);
848 struct alcor_pci_priv *priv = host->alcor_pci;
850 switch (ios->power_mode) {
851 case MMC_POWER_OFF:
852 alcor_set_clock(host, ios->clock);
853 /* set all pins to input */
854 alcor_write8(priv, 0, AU6601_OUTPUT_ENABLE);
855 /* turn of VDD */
856 alcor_write8(priv, 0, AU6601_POWER_CONTROL);
857 break;
858 case MMC_POWER_UP:
859 break;
860 case MMC_POWER_ON:
861 /* This is most trickiest part. The order and timings of
862 * instructions seems to play important role. Any changes may
863 * confuse internal state engine if this HW.
864 * FIXME: If we will ever get access to documentation, then this
865 * part should be reviewed again.
868 /* enable SD card mode */
869 alcor_write8(priv, AU6601_SD_CARD,
870 AU6601_ACTIVE_CTRL);
871 /* set signal voltage to 3.3V */
872 alcor_write8(priv, 0, AU6601_OPT);
873 /* no documentation about clk delay, for now just try to mimic
874 * original driver.
876 alcor_write8(priv, 0x20, AU6601_CLK_DELAY);
877 /* set BUS width to 1 bit */
878 alcor_write8(priv, 0, AU6601_REG_BUS_CTRL);
879 /* set CLK first time */
880 alcor_set_clock(host, ios->clock);
881 /* power on VDD */
882 alcor_write8(priv, AU6601_SD_CARD,
883 AU6601_POWER_CONTROL);
884 /* wait until the CLK will get stable */
885 mdelay(20);
886 /* set CLK again, mimic original driver. */
887 alcor_set_clock(host, ios->clock);
889 /* enable output */
890 alcor_write8(priv, AU6601_SD_CARD,
891 AU6601_OUTPUT_ENABLE);
892 /* The clk will not work on au6621. We need to trigger data
893 * transfer.
895 alcor_write8(priv, AU6601_DATA_WRITE,
896 AU6601_DATA_XFER_CTRL);
897 /* configure timeout. Not clear what exactly it means. */
898 alcor_write8(priv, 0x7d, AU6601_TIME_OUT_CTRL);
899 mdelay(100);
900 break;
901 default:
902 dev_err(host->dev, "Unknown power parameter\n");
906 static void alcor_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
908 struct alcor_sdmmc_host *host = mmc_priv(mmc);
910 mutex_lock(&host->cmd_mutex);
912 dev_dbg(host->dev, "set ios. bus width: %x, power mode: %x\n",
913 ios->bus_width, ios->power_mode);
915 if (ios->power_mode != host->cur_power_mode) {
916 alcor_set_power_mode(mmc, ios);
917 host->cur_power_mode = ios->power_mode;
918 } else {
919 alcor_set_timing(mmc, ios);
920 alcor_set_bus_width(mmc, ios);
921 alcor_set_clock(host, ios->clock);
924 mutex_unlock(&host->cmd_mutex);
927 static int alcor_signal_voltage_switch(struct mmc_host *mmc,
928 struct mmc_ios *ios)
930 struct alcor_sdmmc_host *host = mmc_priv(mmc);
932 mutex_lock(&host->cmd_mutex);
934 switch (ios->signal_voltage) {
935 case MMC_SIGNAL_VOLTAGE_330:
936 alcor_rmw8(host, AU6601_OPT, AU6601_OPT_SD_18V, 0);
937 break;
938 case MMC_SIGNAL_VOLTAGE_180:
939 alcor_rmw8(host, AU6601_OPT, 0, AU6601_OPT_SD_18V);
940 break;
941 default:
942 /* No signal voltage switch required */
943 break;
946 mutex_unlock(&host->cmd_mutex);
947 return 0;
950 static const struct mmc_host_ops alcor_sdc_ops = {
951 .card_busy = alcor_card_busy,
952 .get_cd = alcor_get_cd,
953 .get_ro = alcor_get_ro,
954 .post_req = alcor_post_req,
955 .pre_req = alcor_pre_req,
956 .request = alcor_request,
957 .set_ios = alcor_set_ios,
958 .start_signal_voltage_switch = alcor_signal_voltage_switch,
961 static void alcor_timeout_timer(struct work_struct *work)
963 struct delayed_work *d = to_delayed_work(work);
964 struct alcor_sdmmc_host *host = container_of(d, struct alcor_sdmmc_host,
965 timeout_work);
966 mutex_lock(&host->cmd_mutex);
968 dev_dbg(host->dev, "triggered timeout\n");
969 if (host->mrq) {
970 dev_err(host->dev, "Timeout waiting for hardware interrupt.\n");
972 if (host->data) {
973 host->data->error = -ETIMEDOUT;
974 } else {
975 if (host->cmd)
976 host->cmd->error = -ETIMEDOUT;
977 else
978 host->mrq->cmd->error = -ETIMEDOUT;
981 alcor_reset(host, AU6601_RESET_CMD | AU6601_RESET_DATA);
982 alcor_request_complete(host, 0);
985 mutex_unlock(&host->cmd_mutex);
988 static void alcor_hw_init(struct alcor_sdmmc_host *host)
990 struct alcor_pci_priv *priv = host->alcor_pci;
991 struct alcor_dev_cfg *cfg = priv->cfg;
993 /* FIXME: This part is a mimics HW init of original driver.
994 * If we will ever get access to documentation, then this part
995 * should be reviewed again.
998 /* reset command state engine */
999 alcor_reset(host, AU6601_RESET_CMD);
1001 alcor_write8(priv, 0, AU6601_DMA_BOUNDARY);
1002 /* enable sd card mode */
1003 alcor_write8(priv, AU6601_SD_CARD, AU6601_ACTIVE_CTRL);
1005 /* set BUS width to 1 bit */
1006 alcor_write8(priv, 0, AU6601_REG_BUS_CTRL);
1008 /* reset data state engine */
1009 alcor_reset(host, AU6601_RESET_DATA);
1010 /* Not sure if a voodoo with AU6601_DMA_BOUNDARY is really needed */
1011 alcor_write8(priv, 0, AU6601_DMA_BOUNDARY);
1013 alcor_write8(priv, 0, AU6601_INTERFACE_MODE_CTRL);
1014 /* not clear what we are doing here. */
1015 alcor_write8(priv, 0x44, AU6601_PAD_DRIVE0);
1016 alcor_write8(priv, 0x44, AU6601_PAD_DRIVE1);
1017 alcor_write8(priv, 0x00, AU6601_PAD_DRIVE2);
1019 /* for 6601 - dma_boundary; for 6621 - dma_page_cnt
1020 * exact meaning of this register is not clear.
1022 alcor_write8(priv, cfg->dma, AU6601_DMA_BOUNDARY);
1024 /* make sure all pins are set to input and VDD is off */
1025 alcor_write8(priv, 0, AU6601_OUTPUT_ENABLE);
1026 alcor_write8(priv, 0, AU6601_POWER_CONTROL);
1028 alcor_write8(priv, AU6601_DETECT_EN, AU6601_DETECT_STATUS);
1029 /* now we should be safe to enable IRQs */
1030 alcor_unmask_sd_irqs(host);
1033 static void alcor_hw_uninit(struct alcor_sdmmc_host *host)
1035 struct alcor_pci_priv *priv = host->alcor_pci;
1037 alcor_mask_sd_irqs(host);
1038 alcor_reset(host, AU6601_RESET_CMD | AU6601_RESET_DATA);
1040 alcor_write8(priv, 0, AU6601_DETECT_STATUS);
1042 alcor_write8(priv, 0, AU6601_OUTPUT_ENABLE);
1043 alcor_write8(priv, 0, AU6601_POWER_CONTROL);
1045 alcor_write8(priv, 0, AU6601_OPT);
1048 static void alcor_init_mmc(struct alcor_sdmmc_host *host)
1050 struct mmc_host *mmc = mmc_from_priv(host);
1052 mmc->f_min = AU6601_MIN_CLOCK;
1053 mmc->f_max = AU6601_MAX_CLOCK;
1054 mmc->ocr_avail = MMC_VDD_33_34;
1055 mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SD_HIGHSPEED
1056 | MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR50
1057 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50;
1058 mmc->caps2 = MMC_CAP2_NO_SDIO;
1059 mmc->ops = &alcor_sdc_ops;
1061 /* The hardware does DMA data transfer of 4096 bytes to/from a single
1062 * buffer address. Scatterlists are not supported at the hardware
1063 * level, however we can work with them at the driver level,
1064 * provided that each segment is exactly 4096 bytes in size.
1065 * Upon DMA completion of a single segment (signalled via IRQ), we
1066 * immediately proceed to transfer the next segment from the
1067 * scatterlist.
1069 * The overall request is limited to 240 sectors, matching the
1070 * original vendor driver.
1072 mmc->max_segs = AU6601_MAX_DMA_SEGMENTS;
1073 mmc->max_seg_size = AU6601_MAX_DMA_BLOCK_SIZE;
1074 mmc->max_blk_count = 240;
1075 mmc->max_req_size = mmc->max_blk_count * mmc->max_blk_size;
1076 dma_set_max_seg_size(host->dev, mmc->max_seg_size);
1079 static int alcor_pci_sdmmc_drv_probe(struct platform_device *pdev)
1081 struct alcor_pci_priv *priv = pdev->dev.platform_data;
1082 struct mmc_host *mmc;
1083 struct alcor_sdmmc_host *host;
1084 int ret;
1086 mmc = mmc_alloc_host(sizeof(*host), &pdev->dev);
1087 if (!mmc) {
1088 dev_err(&pdev->dev, "Can't allocate MMC\n");
1089 return -ENOMEM;
1092 host = mmc_priv(mmc);
1093 host->dev = &pdev->dev;
1094 host->cur_power_mode = MMC_POWER_UNDEFINED;
1095 host->alcor_pci = priv;
1097 /* make sure irqs are disabled */
1098 alcor_write32(priv, 0, AU6601_REG_INT_ENABLE);
1099 alcor_write32(priv, 0, AU6601_MS_INT_ENABLE);
1101 ret = devm_request_threaded_irq(&pdev->dev, priv->irq,
1102 alcor_irq, alcor_irq_thread, IRQF_SHARED,
1103 DRV_NAME_ALCOR_PCI_SDMMC, host);
1105 if (ret) {
1106 dev_err(&pdev->dev, "Failed to get irq for data line\n");
1107 goto free_host;
1110 mutex_init(&host->cmd_mutex);
1111 INIT_DELAYED_WORK(&host->timeout_work, alcor_timeout_timer);
1113 alcor_init_mmc(host);
1114 alcor_hw_init(host);
1116 dev_set_drvdata(&pdev->dev, host);
1117 mmc_add_host(mmc);
1118 return 0;
1120 free_host:
1121 mmc_free_host(mmc);
1122 return ret;
1125 static int alcor_pci_sdmmc_drv_remove(struct platform_device *pdev)
1127 struct alcor_sdmmc_host *host = dev_get_drvdata(&pdev->dev);
1128 struct mmc_host *mmc = mmc_from_priv(host);
1130 if (cancel_delayed_work_sync(&host->timeout_work))
1131 alcor_request_complete(host, 0);
1133 alcor_hw_uninit(host);
1134 mmc_remove_host(mmc);
1135 mmc_free_host(mmc);
1137 return 0;
1140 #ifdef CONFIG_PM_SLEEP
1141 static int alcor_pci_sdmmc_suspend(struct device *dev)
1143 struct alcor_sdmmc_host *host = dev_get_drvdata(dev);
1145 if (cancel_delayed_work_sync(&host->timeout_work))
1146 alcor_request_complete(host, 0);
1148 alcor_hw_uninit(host);
1150 return 0;
1153 static int alcor_pci_sdmmc_resume(struct device *dev)
1155 struct alcor_sdmmc_host *host = dev_get_drvdata(dev);
1157 alcor_hw_init(host);
1159 return 0;
1161 #endif /* CONFIG_PM_SLEEP */
1163 static SIMPLE_DEV_PM_OPS(alcor_mmc_pm_ops, alcor_pci_sdmmc_suspend,
1164 alcor_pci_sdmmc_resume);
1166 static const struct platform_device_id alcor_pci_sdmmc_ids[] = {
1168 .name = DRV_NAME_ALCOR_PCI_SDMMC,
1169 }, {
1170 /* sentinel */
1173 MODULE_DEVICE_TABLE(platform, alcor_pci_sdmmc_ids);
1175 static struct platform_driver alcor_pci_sdmmc_driver = {
1176 .probe = alcor_pci_sdmmc_drv_probe,
1177 .remove = alcor_pci_sdmmc_drv_remove,
1178 .id_table = alcor_pci_sdmmc_ids,
1179 .driver = {
1180 .name = DRV_NAME_ALCOR_PCI_SDMMC,
1181 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
1182 .pm = &alcor_mmc_pm_ops
1185 module_platform_driver(alcor_pci_sdmmc_driver);
1187 MODULE_AUTHOR("Oleksij Rempel <linux@rempel-privat.de>");
1188 MODULE_DESCRIPTION("PCI driver for Alcor Micro AU6601 Secure Digital Host Controller Interface");
1189 MODULE_LICENSE("GPL");