1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (C) 2018 Oleksij Rempel <linux@rempel-privat.de>
5 * Driver for Alcor Micro AU6601 and AU6621 controllers
8 /* Note: this driver was created without any documentation. Based
9 * on sniffing, testing and in some cases mimic of original driver.
10 * As soon as some one with documentation or more experience in SD/MMC, or
11 * reverse engineering then me, please review this driver and question every
12 * thing what I did. 2018 Oleksij Rempel <linux@rempel-privat.de>
15 #include <linux/delay.h>
16 #include <linux/pci.h>
17 #include <linux/module.h>
20 #include <linux/irq.h>
21 #include <linux/interrupt.h>
22 #include <linux/platform_device.h>
24 #include <linux/mmc/host.h>
25 #include <linux/mmc/mmc.h>
27 #include <linux/alcor_pci.h>
35 struct alcor_pll_conf
{
36 unsigned int clk_src_freq
;
37 unsigned int clk_src_reg
;
42 struct alcor_sdmmc_host
{
44 struct alcor_pci_priv
*alcor_pci
;
46 struct mmc_request
*mrq
;
47 struct mmc_command
*cmd
;
48 struct mmc_data
*data
;
49 unsigned int dma_on
:1;
51 struct mutex cmd_mutex
;
53 struct delayed_work timeout_work
;
55 struct sg_mapping_iter sg_miter
; /* SG state for PIO */
56 struct scatterlist
*sg
;
57 unsigned int blocks
; /* remaining PIO blocks */
61 unsigned char cur_power_mode
;
64 static const struct alcor_pll_conf alcor_pll_cfg
[] = {
65 /* MHZ, CLK src, max div, min div */
66 { 31250000, AU6601_CLK_31_25_MHZ
, 1, 511},
67 { 48000000, AU6601_CLK_48_MHZ
, 1, 511},
68 {125000000, AU6601_CLK_125_MHZ
, 1, 511},
69 {384000000, AU6601_CLK_384_MHZ
, 1, 511},
72 static inline void alcor_rmw8(struct alcor_sdmmc_host
*host
, unsigned int addr
,
75 struct alcor_pci_priv
*priv
= host
->alcor_pci
;
78 var
= alcor_read8(priv
, addr
);
81 alcor_write8(priv
, var
, addr
);
84 /* As soon as irqs are masked, some status updates may be missed.
87 static inline void alcor_mask_sd_irqs(struct alcor_sdmmc_host
*host
)
89 struct alcor_pci_priv
*priv
= host
->alcor_pci
;
91 alcor_write32(priv
, 0, AU6601_REG_INT_ENABLE
);
94 static inline void alcor_unmask_sd_irqs(struct alcor_sdmmc_host
*host
)
96 struct alcor_pci_priv
*priv
= host
->alcor_pci
;
98 alcor_write32(priv
, AU6601_INT_CMD_MASK
| AU6601_INT_DATA_MASK
|
99 AU6601_INT_CARD_INSERT
| AU6601_INT_CARD_REMOVE
|
100 AU6601_INT_OVER_CURRENT_ERR
,
101 AU6601_REG_INT_ENABLE
);
104 static void alcor_reset(struct alcor_sdmmc_host
*host
, u8 val
)
106 struct alcor_pci_priv
*priv
= host
->alcor_pci
;
109 alcor_write8(priv
, val
| AU6601_BUF_CTRL_RESET
,
110 AU6601_REG_SW_RESET
);
111 for (i
= 0; i
< 100; i
++) {
112 if (!(alcor_read8(priv
, AU6601_REG_SW_RESET
) & val
))
116 dev_err(host
->dev
, "%s: timeout\n", __func__
);
120 * Perform DMA I/O of a single page.
122 static void alcor_data_set_dma(struct alcor_sdmmc_host
*host
)
124 struct alcor_pci_priv
*priv
= host
->alcor_pci
;
131 dev_err(host
->dev
, "have blocks, but no SG\n");
135 if (!sg_dma_len(host
->sg
)) {
136 dev_err(host
->dev
, "DMA SG len == 0\n");
141 addr
= (u32
)sg_dma_address(host
->sg
);
143 alcor_write32(priv
, addr
, AU6601_REG_SDMA_ADDR
);
144 host
->sg
= sg_next(host
->sg
);
148 static void alcor_trigger_data_transfer(struct alcor_sdmmc_host
*host
)
150 struct alcor_pci_priv
*priv
= host
->alcor_pci
;
151 struct mmc_data
*data
= host
->data
;
154 if (data
->flags
& MMC_DATA_WRITE
)
155 ctrl
|= AU6601_DATA_WRITE
;
157 if (data
->host_cookie
== COOKIE_MAPPED
) {
159 * For DMA transfers, this function is called just once,
160 * at the start of the operation. The hardware can only
161 * perform DMA I/O on a single page at a time, so here
162 * we kick off the transfer with the first page, and expect
163 * subsequent pages to be transferred upon IRQ events
164 * indicating that the single-page DMA was completed.
166 alcor_data_set_dma(host
);
167 ctrl
|= AU6601_DATA_DMA_MODE
;
169 alcor_write32(priv
, data
->sg_count
* 0x1000,
170 AU6601_REG_BLOCK_SIZE
);
173 * For PIO transfers, we break down each operation
174 * into several sector-sized transfers. When one sector has
175 * complete, the IRQ handler will call this function again
176 * to kick off the transfer of the next sector.
178 alcor_write32(priv
, data
->blksz
, AU6601_REG_BLOCK_SIZE
);
181 alcor_write8(priv
, ctrl
| AU6601_DATA_START_XFER
,
182 AU6601_DATA_XFER_CTRL
);
185 static void alcor_trf_block_pio(struct alcor_sdmmc_host
*host
, bool read
)
187 struct alcor_pci_priv
*priv
= host
->alcor_pci
;
195 dev_err(host
->dev
, "configured DMA but got PIO request.\n");
199 if (!!(host
->data
->flags
& MMC_DATA_READ
) != read
) {
200 dev_err(host
->dev
, "got unexpected direction %i != %i\n",
201 !!(host
->data
->flags
& MMC_DATA_READ
), read
);
204 if (!sg_miter_next(&host
->sg_miter
))
207 blksize
= host
->data
->blksz
;
208 len
= min(host
->sg_miter
.length
, blksize
);
210 dev_dbg(host
->dev
, "PIO, %s block size: 0x%zx\n",
211 read
? "read" : "write", blksize
);
213 host
->sg_miter
.consumed
= len
;
216 buf
= host
->sg_miter
.addr
;
219 ioread32_rep(priv
->iobase
+ AU6601_REG_BUFFER
, buf
, len
>> 2);
221 iowrite32_rep(priv
->iobase
+ AU6601_REG_BUFFER
, buf
, len
>> 2);
223 sg_miter_stop(&host
->sg_miter
);
226 static void alcor_prepare_sg_miter(struct alcor_sdmmc_host
*host
)
228 unsigned int flags
= SG_MITER_ATOMIC
;
229 struct mmc_data
*data
= host
->data
;
231 if (data
->flags
& MMC_DATA_READ
)
232 flags
|= SG_MITER_TO_SG
;
234 flags
|= SG_MITER_FROM_SG
;
235 sg_miter_start(&host
->sg_miter
, data
->sg
, data
->sg_len
, flags
);
238 static void alcor_prepare_data(struct alcor_sdmmc_host
*host
,
239 struct mmc_command
*cmd
)
241 struct alcor_pci_priv
*priv
= host
->alcor_pci
;
242 struct mmc_data
*data
= cmd
->data
;
249 host
->data
->bytes_xfered
= 0;
250 host
->blocks
= data
->blocks
;
252 host
->sg_count
= data
->sg_count
;
253 dev_dbg(host
->dev
, "prepare DATA: sg %i, blocks: %i\n",
254 host
->sg_count
, host
->blocks
);
256 if (data
->host_cookie
!= COOKIE_MAPPED
)
257 alcor_prepare_sg_miter(host
);
259 alcor_write8(priv
, 0, AU6601_DATA_XFER_CTRL
);
262 static void alcor_send_cmd(struct alcor_sdmmc_host
*host
,
263 struct mmc_command
*cmd
, bool set_timeout
)
265 struct alcor_pci_priv
*priv
= host
->alcor_pci
;
266 unsigned long timeout
= 0;
270 alcor_prepare_data(host
, cmd
);
272 dev_dbg(host
->dev
, "send CMD. opcode: 0x%02x, arg; 0x%08x\n",
273 cmd
->opcode
, cmd
->arg
);
274 alcor_write8(priv
, cmd
->opcode
| 0x40, AU6601_REG_CMD_OPCODE
);
275 alcor_write32be(priv
, cmd
->arg
, AU6601_REG_CMD_ARG
);
277 switch (mmc_resp_type(cmd
)) {
279 ctrl
= AU6601_CMD_NO_RESP
;
282 ctrl
= AU6601_CMD_6_BYTE_CRC
;
285 ctrl
= AU6601_CMD_6_BYTE_CRC
| AU6601_CMD_STOP_WAIT_RDY
;
288 ctrl
= AU6601_CMD_17_BYTE_CRC
;
291 ctrl
= AU6601_CMD_6_BYTE_WO_CRC
;
294 dev_err(host
->dev
, "%s: cmd->flag (0x%02x) is not valid\n",
295 mmc_hostname(mmc_from_priv(host
)), mmc_resp_type(cmd
));
300 if (!cmd
->data
&& cmd
->busy_timeout
)
301 timeout
= cmd
->busy_timeout
;
305 schedule_delayed_work(&host
->timeout_work
,
306 msecs_to_jiffies(timeout
));
309 dev_dbg(host
->dev
, "xfer ctrl: 0x%02x; timeout: %lu\n", ctrl
, timeout
);
310 alcor_write8(priv
, ctrl
| AU6601_CMD_START_XFER
,
311 AU6601_CMD_XFER_CTRL
);
314 static void alcor_request_complete(struct alcor_sdmmc_host
*host
,
317 struct mmc_request
*mrq
;
320 * If this work gets rescheduled while running, it will
321 * be run again afterwards but without any active request.
327 cancel_delayed_work(&host
->timeout_work
);
336 mmc_request_done(mmc_from_priv(host
), mrq
);
339 static void alcor_finish_data(struct alcor_sdmmc_host
*host
)
341 struct mmc_data
*data
;
348 * The specification states that the block count register must
349 * be updated, but it does not specify at what point in the
350 * data flow. That makes the register entirely useless to read
351 * back so we have to assume that nothing made it to the card
352 * in the event of an error.
355 data
->bytes_xfered
= 0;
357 data
->bytes_xfered
= data
->blksz
* data
->blocks
;
360 * Need to send CMD12 if -
361 * a) open-ended multiblock transfer (no CMD23)
362 * b) error in multiblock transfer
369 * The controller needs a reset of internal state machines
370 * upon error conditions.
373 alcor_reset(host
, AU6601_RESET_CMD
| AU6601_RESET_DATA
);
375 alcor_unmask_sd_irqs(host
);
376 alcor_send_cmd(host
, data
->stop
, false);
380 alcor_request_complete(host
, 1);
383 static void alcor_err_irq(struct alcor_sdmmc_host
*host
, u32 intmask
)
385 dev_dbg(host
->dev
, "ERR IRQ %x\n", intmask
);
388 if (intmask
& AU6601_INT_CMD_TIMEOUT_ERR
)
389 host
->cmd
->error
= -ETIMEDOUT
;
391 host
->cmd
->error
= -EILSEQ
;
395 if (intmask
& AU6601_INT_DATA_TIMEOUT_ERR
)
396 host
->data
->error
= -ETIMEDOUT
;
398 host
->data
->error
= -EILSEQ
;
400 host
->data
->bytes_xfered
= 0;
403 alcor_reset(host
, AU6601_RESET_CMD
| AU6601_RESET_DATA
);
404 alcor_request_complete(host
, 1);
407 static int alcor_cmd_irq_done(struct alcor_sdmmc_host
*host
, u32 intmask
)
409 struct alcor_pci_priv
*priv
= host
->alcor_pci
;
411 intmask
&= AU6601_INT_CMD_END
;
416 /* got CMD_END but no CMD is in progress, wake thread an process the
422 if (host
->cmd
->flags
& MMC_RSP_PRESENT
) {
423 struct mmc_command
*cmd
= host
->cmd
;
425 cmd
->resp
[0] = alcor_read32be(priv
, AU6601_REG_CMD_RSP0
);
426 dev_dbg(host
->dev
, "RSP0: 0x%04x\n", cmd
->resp
[0]);
427 if (host
->cmd
->flags
& MMC_RSP_136
) {
429 alcor_read32be(priv
, AU6601_REG_CMD_RSP1
);
431 alcor_read32be(priv
, AU6601_REG_CMD_RSP2
);
433 alcor_read32be(priv
, AU6601_REG_CMD_RSP3
);
434 dev_dbg(host
->dev
, "RSP1,2,3: 0x%04x 0x%04x 0x%04x\n",
435 cmd
->resp
[1], cmd
->resp
[2], cmd
->resp
[3]);
440 host
->cmd
->error
= 0;
442 /* Processed actual command. */
446 alcor_trigger_data_transfer(host
);
451 static void alcor_cmd_irq_thread(struct alcor_sdmmc_host
*host
, u32 intmask
)
453 intmask
&= AU6601_INT_CMD_END
;
458 if (!host
->cmd
&& intmask
& AU6601_INT_CMD_END
) {
459 dev_dbg(host
->dev
, "Got command interrupt 0x%08x even though no command operation was in progress.\n",
463 /* Processed actual command. */
465 alcor_request_complete(host
, 1);
467 alcor_trigger_data_transfer(host
);
471 static int alcor_data_irq_done(struct alcor_sdmmc_host
*host
, u32 intmask
)
475 intmask
&= AU6601_INT_DATA_MASK
;
477 /* nothing here to do */
481 /* we was too fast and got DATA_END after it was processed?
482 * lets ignore it for now.
484 if (!host
->data
&& intmask
== AU6601_INT_DATA_END
)
487 /* looks like an error, so lets handle it. */
491 tmp
= intmask
& (AU6601_INT_READ_BUF_RDY
| AU6601_INT_WRITE_BUF_RDY
492 | AU6601_INT_DMA_END
);
496 case AU6601_INT_READ_BUF_RDY
:
497 alcor_trf_block_pio(host
, true);
499 case AU6601_INT_WRITE_BUF_RDY
:
500 alcor_trf_block_pio(host
, false);
502 case AU6601_INT_DMA_END
:
506 alcor_data_set_dma(host
);
509 dev_err(host
->dev
, "Got READ_BUF_RDY and WRITE_BUF_RDY at same time\n");
513 if (intmask
& AU6601_INT_DATA_END
) {
514 if (!host
->dma_on
&& host
->blocks
) {
515 alcor_trigger_data_transfer(host
);
525 static void alcor_data_irq_thread(struct alcor_sdmmc_host
*host
, u32 intmask
)
527 intmask
&= AU6601_INT_DATA_MASK
;
533 dev_dbg(host
->dev
, "Got data interrupt 0x%08x even though no data operation was in progress.\n",
535 alcor_reset(host
, AU6601_RESET_DATA
);
539 if (alcor_data_irq_done(host
, intmask
))
542 if ((intmask
& AU6601_INT_DATA_END
) || !host
->blocks
||
543 (host
->dma_on
&& !host
->sg_count
))
544 alcor_finish_data(host
);
547 static void alcor_cd_irq(struct alcor_sdmmc_host
*host
, u32 intmask
)
549 dev_dbg(host
->dev
, "card %s\n",
550 intmask
& AU6601_INT_CARD_REMOVE
? "removed" : "inserted");
553 dev_dbg(host
->dev
, "cancel all pending tasks.\n");
556 host
->data
->error
= -ENOMEDIUM
;
559 host
->cmd
->error
= -ENOMEDIUM
;
561 host
->mrq
->cmd
->error
= -ENOMEDIUM
;
563 alcor_request_complete(host
, 1);
566 mmc_detect_change(mmc_from_priv(host
), msecs_to_jiffies(1));
569 static irqreturn_t
alcor_irq_thread(int irq
, void *d
)
571 struct alcor_sdmmc_host
*host
= d
;
572 irqreturn_t ret
= IRQ_HANDLED
;
575 mutex_lock(&host
->cmd_mutex
);
577 intmask
= host
->irq_status_sd
;
580 if (unlikely(!intmask
|| AU6601_INT_ALL_MASK
== intmask
)) {
581 dev_dbg(host
->dev
, "unexpected IRQ: 0x%04x\n", intmask
);
586 tmp
= intmask
& (AU6601_INT_CMD_MASK
| AU6601_INT_DATA_MASK
);
588 if (tmp
& AU6601_INT_ERROR_MASK
)
589 alcor_err_irq(host
, tmp
);
591 alcor_cmd_irq_thread(host
, tmp
);
592 alcor_data_irq_thread(host
, tmp
);
594 intmask
&= ~(AU6601_INT_CMD_MASK
| AU6601_INT_DATA_MASK
);
597 if (intmask
& (AU6601_INT_CARD_INSERT
| AU6601_INT_CARD_REMOVE
)) {
598 alcor_cd_irq(host
, intmask
);
599 intmask
&= ~(AU6601_INT_CARD_INSERT
| AU6601_INT_CARD_REMOVE
);
602 if (intmask
& AU6601_INT_OVER_CURRENT_ERR
) {
604 "warning: over current detected!\n");
605 intmask
&= ~AU6601_INT_OVER_CURRENT_ERR
;
609 dev_dbg(host
->dev
, "got not handled IRQ: 0x%04x\n", intmask
);
612 mutex_unlock(&host
->cmd_mutex
);
613 alcor_unmask_sd_irqs(host
);
618 static irqreturn_t
alcor_irq(int irq
, void *d
)
620 struct alcor_sdmmc_host
*host
= d
;
621 struct alcor_pci_priv
*priv
= host
->alcor_pci
;
624 int cmd_done
, data_done
;
626 status
= alcor_read32(priv
, AU6601_REG_INT_STATUS
);
630 alcor_write32(priv
, status
, AU6601_REG_INT_STATUS
);
632 tmp
= status
& (AU6601_INT_READ_BUF_RDY
| AU6601_INT_WRITE_BUF_RDY
633 | AU6601_INT_DATA_END
| AU6601_INT_DMA_END
634 | AU6601_INT_CMD_END
);
636 cmd_done
= alcor_cmd_irq_done(host
, tmp
);
637 data_done
= alcor_data_irq_done(host
, tmp
);
638 /* use fast path for simple tasks */
639 if (cmd_done
&& data_done
) {
645 host
->irq_status_sd
= status
;
646 ret
= IRQ_WAKE_THREAD
;
647 alcor_mask_sd_irqs(host
);
652 static void alcor_set_clock(struct alcor_sdmmc_host
*host
, unsigned int clock
)
654 struct alcor_pci_priv
*priv
= host
->alcor_pci
;
655 int i
, diff
= 0x7fffffff, tmp_clock
= 0;
660 alcor_write16(priv
, 0, AU6601_CLK_SELECT
);
664 for (i
= 0; i
< ARRAY_SIZE(alcor_pll_cfg
); i
++) {
665 unsigned int tmp_div
, tmp_diff
;
666 const struct alcor_pll_conf
*cfg
= &alcor_pll_cfg
[i
];
668 tmp_div
= DIV_ROUND_UP(cfg
->clk_src_freq
, clock
);
669 if (cfg
->min_div
> tmp_div
|| tmp_div
> cfg
->max_div
)
672 tmp_clock
= DIV_ROUND_UP(cfg
->clk_src_freq
, tmp_div
);
673 tmp_diff
= abs(clock
- tmp_clock
);
675 if (tmp_diff
< diff
) {
677 clk_src
= cfg
->clk_src_reg
;
682 clk_src
|= ((clk_div
- 1) << 8);
683 clk_src
|= AU6601_CLK_ENABLE
;
685 dev_dbg(host
->dev
, "set freq %d cal freq %d, use div %d, mod %x\n",
686 clock
, tmp_clock
, clk_div
, clk_src
);
688 alcor_write16(priv
, clk_src
, AU6601_CLK_SELECT
);
692 static void alcor_set_timing(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
694 struct alcor_sdmmc_host
*host
= mmc_priv(mmc
);
696 if (ios
->timing
== MMC_TIMING_LEGACY
) {
697 alcor_rmw8(host
, AU6601_CLK_DELAY
,
698 AU6601_CLK_POSITIVE_EDGE_ALL
, 0);
700 alcor_rmw8(host
, AU6601_CLK_DELAY
,
701 0, AU6601_CLK_POSITIVE_EDGE_ALL
);
705 static void alcor_set_bus_width(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
707 struct alcor_sdmmc_host
*host
= mmc_priv(mmc
);
708 struct alcor_pci_priv
*priv
= host
->alcor_pci
;
710 if (ios
->bus_width
== MMC_BUS_WIDTH_1
) {
711 alcor_write8(priv
, 0, AU6601_REG_BUS_CTRL
);
712 } else if (ios
->bus_width
== MMC_BUS_WIDTH_4
) {
713 alcor_write8(priv
, AU6601_BUS_WIDTH_4BIT
,
714 AU6601_REG_BUS_CTRL
);
716 dev_err(host
->dev
, "Unknown BUS mode\n");
720 static int alcor_card_busy(struct mmc_host
*mmc
)
722 struct alcor_sdmmc_host
*host
= mmc_priv(mmc
);
723 struct alcor_pci_priv
*priv
= host
->alcor_pci
;
726 /* Check whether dat[0:3] low */
727 status
= alcor_read8(priv
, AU6601_DATA_PIN_STATE
);
729 return !(status
& AU6601_BUS_STAT_DAT_MASK
);
732 static int alcor_get_cd(struct mmc_host
*mmc
)
734 struct alcor_sdmmc_host
*host
= mmc_priv(mmc
);
735 struct alcor_pci_priv
*priv
= host
->alcor_pci
;
738 detect
= alcor_read8(priv
, AU6601_DETECT_STATUS
)
739 & AU6601_DETECT_STATUS_M
;
740 /* check if card is present then send command and data */
741 return (detect
== AU6601_SD_DETECTED
);
744 static int alcor_get_ro(struct mmc_host
*mmc
)
746 struct alcor_sdmmc_host
*host
= mmc_priv(mmc
);
747 struct alcor_pci_priv
*priv
= host
->alcor_pci
;
750 /* get write protect pin status */
751 status
= alcor_read8(priv
, AU6601_INTERFACE_MODE_CTRL
);
753 return !!(status
& AU6601_SD_CARD_WP
);
756 static void alcor_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
758 struct alcor_sdmmc_host
*host
= mmc_priv(mmc
);
760 mutex_lock(&host
->cmd_mutex
);
764 /* check if card is present then send command and data */
765 if (alcor_get_cd(mmc
))
766 alcor_send_cmd(host
, mrq
->cmd
, true);
768 mrq
->cmd
->error
= -ENOMEDIUM
;
769 alcor_request_complete(host
, 1);
772 mutex_unlock(&host
->cmd_mutex
);
775 static void alcor_pre_req(struct mmc_host
*mmc
,
776 struct mmc_request
*mrq
)
778 struct alcor_sdmmc_host
*host
= mmc_priv(mmc
);
779 struct mmc_data
*data
= mrq
->data
;
780 struct mmc_command
*cmd
= mrq
->cmd
;
781 struct scatterlist
*sg
;
782 unsigned int i
, sg_len
;
787 data
->host_cookie
= COOKIE_UNMAPPED
;
789 /* FIXME: looks like the DMA engine works only with CMD18 */
790 if (cmd
->opcode
!= MMC_READ_MULTIPLE_BLOCK
791 && cmd
->opcode
!= MMC_WRITE_MULTIPLE_BLOCK
)
794 * We don't do DMA on "complex" transfers, i.e. with
795 * non-word-aligned buffers or lengths. A future improvement
796 * could be made to use temporary DMA bounce-buffers when these
797 * requirements are not met.
799 * Also, we don't bother with all the DMA setup overhead for
802 if (data
->blocks
* data
->blksz
< AU6601_MAX_DMA_BLOCK_SIZE
)
808 for_each_sg(data
->sg
, sg
, data
->sg_len
, i
) {
809 if (sg
->length
!= AU6601_MAX_DMA_BLOCK_SIZE
)
815 /* This data might be unmapped at this time */
817 sg_len
= dma_map_sg(host
->dev
, data
->sg
, data
->sg_len
,
818 mmc_get_dma_dir(data
));
820 data
->host_cookie
= COOKIE_MAPPED
;
822 data
->sg_count
= sg_len
;
825 static void alcor_post_req(struct mmc_host
*mmc
,
826 struct mmc_request
*mrq
,
829 struct alcor_sdmmc_host
*host
= mmc_priv(mmc
);
830 struct mmc_data
*data
= mrq
->data
;
835 if (data
->host_cookie
== COOKIE_MAPPED
) {
836 dma_unmap_sg(host
->dev
,
839 mmc_get_dma_dir(data
));
842 data
->host_cookie
= COOKIE_UNMAPPED
;
845 static void alcor_set_power_mode(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
847 struct alcor_sdmmc_host
*host
= mmc_priv(mmc
);
848 struct alcor_pci_priv
*priv
= host
->alcor_pci
;
850 switch (ios
->power_mode
) {
852 alcor_set_clock(host
, ios
->clock
);
853 /* set all pins to input */
854 alcor_write8(priv
, 0, AU6601_OUTPUT_ENABLE
);
856 alcor_write8(priv
, 0, AU6601_POWER_CONTROL
);
861 /* This is most trickiest part. The order and timings of
862 * instructions seems to play important role. Any changes may
863 * confuse internal state engine if this HW.
864 * FIXME: If we will ever get access to documentation, then this
865 * part should be reviewed again.
868 /* enable SD card mode */
869 alcor_write8(priv
, AU6601_SD_CARD
,
871 /* set signal voltage to 3.3V */
872 alcor_write8(priv
, 0, AU6601_OPT
);
873 /* no documentation about clk delay, for now just try to mimic
876 alcor_write8(priv
, 0x20, AU6601_CLK_DELAY
);
877 /* set BUS width to 1 bit */
878 alcor_write8(priv
, 0, AU6601_REG_BUS_CTRL
);
879 /* set CLK first time */
880 alcor_set_clock(host
, ios
->clock
);
882 alcor_write8(priv
, AU6601_SD_CARD
,
883 AU6601_POWER_CONTROL
);
884 /* wait until the CLK will get stable */
886 /* set CLK again, mimic original driver. */
887 alcor_set_clock(host
, ios
->clock
);
890 alcor_write8(priv
, AU6601_SD_CARD
,
891 AU6601_OUTPUT_ENABLE
);
892 /* The clk will not work on au6621. We need to trigger data
895 alcor_write8(priv
, AU6601_DATA_WRITE
,
896 AU6601_DATA_XFER_CTRL
);
897 /* configure timeout. Not clear what exactly it means. */
898 alcor_write8(priv
, 0x7d, AU6601_TIME_OUT_CTRL
);
902 dev_err(host
->dev
, "Unknown power parameter\n");
906 static void alcor_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
908 struct alcor_sdmmc_host
*host
= mmc_priv(mmc
);
910 mutex_lock(&host
->cmd_mutex
);
912 dev_dbg(host
->dev
, "set ios. bus width: %x, power mode: %x\n",
913 ios
->bus_width
, ios
->power_mode
);
915 if (ios
->power_mode
!= host
->cur_power_mode
) {
916 alcor_set_power_mode(mmc
, ios
);
917 host
->cur_power_mode
= ios
->power_mode
;
919 alcor_set_timing(mmc
, ios
);
920 alcor_set_bus_width(mmc
, ios
);
921 alcor_set_clock(host
, ios
->clock
);
924 mutex_unlock(&host
->cmd_mutex
);
927 static int alcor_signal_voltage_switch(struct mmc_host
*mmc
,
930 struct alcor_sdmmc_host
*host
= mmc_priv(mmc
);
932 mutex_lock(&host
->cmd_mutex
);
934 switch (ios
->signal_voltage
) {
935 case MMC_SIGNAL_VOLTAGE_330
:
936 alcor_rmw8(host
, AU6601_OPT
, AU6601_OPT_SD_18V
, 0);
938 case MMC_SIGNAL_VOLTAGE_180
:
939 alcor_rmw8(host
, AU6601_OPT
, 0, AU6601_OPT_SD_18V
);
942 /* No signal voltage switch required */
946 mutex_unlock(&host
->cmd_mutex
);
950 static const struct mmc_host_ops alcor_sdc_ops
= {
951 .card_busy
= alcor_card_busy
,
952 .get_cd
= alcor_get_cd
,
953 .get_ro
= alcor_get_ro
,
954 .post_req
= alcor_post_req
,
955 .pre_req
= alcor_pre_req
,
956 .request
= alcor_request
,
957 .set_ios
= alcor_set_ios
,
958 .start_signal_voltage_switch
= alcor_signal_voltage_switch
,
961 static void alcor_timeout_timer(struct work_struct
*work
)
963 struct delayed_work
*d
= to_delayed_work(work
);
964 struct alcor_sdmmc_host
*host
= container_of(d
, struct alcor_sdmmc_host
,
966 mutex_lock(&host
->cmd_mutex
);
968 dev_dbg(host
->dev
, "triggered timeout\n");
970 dev_err(host
->dev
, "Timeout waiting for hardware interrupt.\n");
973 host
->data
->error
= -ETIMEDOUT
;
976 host
->cmd
->error
= -ETIMEDOUT
;
978 host
->mrq
->cmd
->error
= -ETIMEDOUT
;
981 alcor_reset(host
, AU6601_RESET_CMD
| AU6601_RESET_DATA
);
982 alcor_request_complete(host
, 0);
985 mutex_unlock(&host
->cmd_mutex
);
988 static void alcor_hw_init(struct alcor_sdmmc_host
*host
)
990 struct alcor_pci_priv
*priv
= host
->alcor_pci
;
991 struct alcor_dev_cfg
*cfg
= priv
->cfg
;
993 /* FIXME: This part is a mimics HW init of original driver.
994 * If we will ever get access to documentation, then this part
995 * should be reviewed again.
998 /* reset command state engine */
999 alcor_reset(host
, AU6601_RESET_CMD
);
1001 alcor_write8(priv
, 0, AU6601_DMA_BOUNDARY
);
1002 /* enable sd card mode */
1003 alcor_write8(priv
, AU6601_SD_CARD
, AU6601_ACTIVE_CTRL
);
1005 /* set BUS width to 1 bit */
1006 alcor_write8(priv
, 0, AU6601_REG_BUS_CTRL
);
1008 /* reset data state engine */
1009 alcor_reset(host
, AU6601_RESET_DATA
);
1010 /* Not sure if a voodoo with AU6601_DMA_BOUNDARY is really needed */
1011 alcor_write8(priv
, 0, AU6601_DMA_BOUNDARY
);
1013 alcor_write8(priv
, 0, AU6601_INTERFACE_MODE_CTRL
);
1014 /* not clear what we are doing here. */
1015 alcor_write8(priv
, 0x44, AU6601_PAD_DRIVE0
);
1016 alcor_write8(priv
, 0x44, AU6601_PAD_DRIVE1
);
1017 alcor_write8(priv
, 0x00, AU6601_PAD_DRIVE2
);
1019 /* for 6601 - dma_boundary; for 6621 - dma_page_cnt
1020 * exact meaning of this register is not clear.
1022 alcor_write8(priv
, cfg
->dma
, AU6601_DMA_BOUNDARY
);
1024 /* make sure all pins are set to input and VDD is off */
1025 alcor_write8(priv
, 0, AU6601_OUTPUT_ENABLE
);
1026 alcor_write8(priv
, 0, AU6601_POWER_CONTROL
);
1028 alcor_write8(priv
, AU6601_DETECT_EN
, AU6601_DETECT_STATUS
);
1029 /* now we should be safe to enable IRQs */
1030 alcor_unmask_sd_irqs(host
);
1033 static void alcor_hw_uninit(struct alcor_sdmmc_host
*host
)
1035 struct alcor_pci_priv
*priv
= host
->alcor_pci
;
1037 alcor_mask_sd_irqs(host
);
1038 alcor_reset(host
, AU6601_RESET_CMD
| AU6601_RESET_DATA
);
1040 alcor_write8(priv
, 0, AU6601_DETECT_STATUS
);
1042 alcor_write8(priv
, 0, AU6601_OUTPUT_ENABLE
);
1043 alcor_write8(priv
, 0, AU6601_POWER_CONTROL
);
1045 alcor_write8(priv
, 0, AU6601_OPT
);
1048 static void alcor_init_mmc(struct alcor_sdmmc_host
*host
)
1050 struct mmc_host
*mmc
= mmc_from_priv(host
);
1052 mmc
->f_min
= AU6601_MIN_CLOCK
;
1053 mmc
->f_max
= AU6601_MAX_CLOCK
;
1054 mmc
->ocr_avail
= MMC_VDD_33_34
;
1055 mmc
->caps
= MMC_CAP_4_BIT_DATA
| MMC_CAP_SD_HIGHSPEED
1056 | MMC_CAP_UHS_SDR12
| MMC_CAP_UHS_SDR25
| MMC_CAP_UHS_SDR50
1057 | MMC_CAP_UHS_SDR104
| MMC_CAP_UHS_DDR50
;
1058 mmc
->caps2
= MMC_CAP2_NO_SDIO
;
1059 mmc
->ops
= &alcor_sdc_ops
;
1061 /* The hardware does DMA data transfer of 4096 bytes to/from a single
1062 * buffer address. Scatterlists are not supported at the hardware
1063 * level, however we can work with them at the driver level,
1064 * provided that each segment is exactly 4096 bytes in size.
1065 * Upon DMA completion of a single segment (signalled via IRQ), we
1066 * immediately proceed to transfer the next segment from the
1069 * The overall request is limited to 240 sectors, matching the
1070 * original vendor driver.
1072 mmc
->max_segs
= AU6601_MAX_DMA_SEGMENTS
;
1073 mmc
->max_seg_size
= AU6601_MAX_DMA_BLOCK_SIZE
;
1074 mmc
->max_blk_count
= 240;
1075 mmc
->max_req_size
= mmc
->max_blk_count
* mmc
->max_blk_size
;
1076 dma_set_max_seg_size(host
->dev
, mmc
->max_seg_size
);
1079 static int alcor_pci_sdmmc_drv_probe(struct platform_device
*pdev
)
1081 struct alcor_pci_priv
*priv
= pdev
->dev
.platform_data
;
1082 struct mmc_host
*mmc
;
1083 struct alcor_sdmmc_host
*host
;
1086 mmc
= mmc_alloc_host(sizeof(*host
), &pdev
->dev
);
1088 dev_err(&pdev
->dev
, "Can't allocate MMC\n");
1092 host
= mmc_priv(mmc
);
1093 host
->dev
= &pdev
->dev
;
1094 host
->cur_power_mode
= MMC_POWER_UNDEFINED
;
1095 host
->alcor_pci
= priv
;
1097 /* make sure irqs are disabled */
1098 alcor_write32(priv
, 0, AU6601_REG_INT_ENABLE
);
1099 alcor_write32(priv
, 0, AU6601_MS_INT_ENABLE
);
1101 ret
= devm_request_threaded_irq(&pdev
->dev
, priv
->irq
,
1102 alcor_irq
, alcor_irq_thread
, IRQF_SHARED
,
1103 DRV_NAME_ALCOR_PCI_SDMMC
, host
);
1106 dev_err(&pdev
->dev
, "Failed to get irq for data line\n");
1110 mutex_init(&host
->cmd_mutex
);
1111 INIT_DELAYED_WORK(&host
->timeout_work
, alcor_timeout_timer
);
1113 alcor_init_mmc(host
);
1114 alcor_hw_init(host
);
1116 dev_set_drvdata(&pdev
->dev
, host
);
1125 static int alcor_pci_sdmmc_drv_remove(struct platform_device
*pdev
)
1127 struct alcor_sdmmc_host
*host
= dev_get_drvdata(&pdev
->dev
);
1128 struct mmc_host
*mmc
= mmc_from_priv(host
);
1130 if (cancel_delayed_work_sync(&host
->timeout_work
))
1131 alcor_request_complete(host
, 0);
1133 alcor_hw_uninit(host
);
1134 mmc_remove_host(mmc
);
1140 #ifdef CONFIG_PM_SLEEP
1141 static int alcor_pci_sdmmc_suspend(struct device
*dev
)
1143 struct alcor_sdmmc_host
*host
= dev_get_drvdata(dev
);
1145 if (cancel_delayed_work_sync(&host
->timeout_work
))
1146 alcor_request_complete(host
, 0);
1148 alcor_hw_uninit(host
);
1153 static int alcor_pci_sdmmc_resume(struct device
*dev
)
1155 struct alcor_sdmmc_host
*host
= dev_get_drvdata(dev
);
1157 alcor_hw_init(host
);
1161 #endif /* CONFIG_PM_SLEEP */
1163 static SIMPLE_DEV_PM_OPS(alcor_mmc_pm_ops
, alcor_pci_sdmmc_suspend
,
1164 alcor_pci_sdmmc_resume
);
1166 static const struct platform_device_id alcor_pci_sdmmc_ids
[] = {
1168 .name
= DRV_NAME_ALCOR_PCI_SDMMC
,
1173 MODULE_DEVICE_TABLE(platform
, alcor_pci_sdmmc_ids
);
1175 static struct platform_driver alcor_pci_sdmmc_driver
= {
1176 .probe
= alcor_pci_sdmmc_drv_probe
,
1177 .remove
= alcor_pci_sdmmc_drv_remove
,
1178 .id_table
= alcor_pci_sdmmc_ids
,
1180 .name
= DRV_NAME_ALCOR_PCI_SDMMC
,
1181 .probe_type
= PROBE_PREFER_ASYNCHRONOUS
,
1182 .pm
= &alcor_mmc_pm_ops
1185 module_platform_driver(alcor_pci_sdmmc_driver
);
1187 MODULE_AUTHOR("Oleksij Rempel <linux@rempel-privat.de>");
1188 MODULE_DESCRIPTION("PCI driver for Alcor Micro AU6601 Secure Digital Host Controller Interface");
1189 MODULE_LICENSE("GPL");