2 * Copyright 2007, Google Inc.
3 * Copyright 2012, Intel Inc.
5 * based on omap.c driver, which was
6 * Copyright (C) 2004 Nokia Corporation
7 * Written by Tuukka Tikkanen and Juha Yrjölä <juha.yrjola@nokia.com>
8 * Misc hacks here and there by Tony Lindgren <tony@atomide.com>
9 * Other hacks (DMA, SD, etc) by David Brownell
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
16 #include <linux/module.h>
17 #include <linux/platform_device.h>
18 #include <linux/major.h>
20 #include <linux/types.h>
21 #include <linux/pci.h>
22 #include <linux/interrupt.h>
24 #include <linux/kernel.h>
26 #include <linux/errno.h>
27 #include <linux/hdreg.h>
28 #include <linux/kdev_t.h>
29 #include <linux/blkdev.h>
30 #include <linux/mutex.h>
31 #include <linux/scatterlist.h>
32 #include <linux/mmc/mmc.h>
33 #include <linux/mmc/sdio.h>
34 #include <linux/mmc/host.h>
35 #include <linux/mmc/card.h>
37 #include <linux/moduleparam.h>
38 #include <linux/init.h>
39 #include <linux/ioport.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/delay.h>
42 #include <linux/spinlock.h>
43 #include <linux/timer.h>
44 #include <linux/clk.h>
48 #include <asm/scatterlist.h>
50 #include <asm/types.h>
52 #include <asm/uaccess.h>
54 #define DRIVER_NAME "goldfish_mmc"
56 #define BUFFER_SIZE 16384
58 #define GOLDFISH_MMC_READ(host, addr) (readl(host->reg_base + addr))
59 #define GOLDFISH_MMC_WRITE(host, addr, x) (writel(x, host->reg_base + addr))
63 MMC_INT_STATUS
= 0x00,
64 /* set this to enable IRQ */
65 MMC_INT_ENABLE
= 0x04,
66 /* set this to specify buffer address */
67 MMC_SET_BUFFER
= 0x08,
69 /* MMC command number */
75 /* MMC response (or R2 bits 0 - 31) */
78 /* MMC R2 response bits 32 - 63 */
81 /* MMC R2 response bits 64 - 95 */
84 /* MMC R2 response bits 96 - 127 */
87 MMC_BLOCK_LENGTH
= 0x24,
88 MMC_BLOCK_COUNT
= 0x28,
93 /* MMC_INT_STATUS bits */
95 MMC_STAT_END_OF_CMD
= 1U << 0,
96 MMC_STAT_END_OF_DATA
= 1U << 1,
97 MMC_STAT_STATE_CHANGE
= 1U << 2,
98 MMC_STAT_CMD_TIMEOUT
= 1U << 3,
101 MMC_STATE_INSERTED
= 1U << 0,
102 MMC_STATE_READ_ONLY
= 1U << 1,
108 #define OMAP_MMC_CMDTYPE_BC 0
109 #define OMAP_MMC_CMDTYPE_BCR 1
110 #define OMAP_MMC_CMDTYPE_AC 2
111 #define OMAP_MMC_CMDTYPE_ADTC 3
114 struct goldfish_mmc_host
{
115 struct mmc_request
*mrq
;
116 struct mmc_command
*cmd
;
117 struct mmc_data
*data
;
118 struct mmc_host
*mmc
;
120 unsigned char id
; /* 16xx chips have 2 MMC blocks */
121 void __iomem
*virt_base
;
122 unsigned int phys_base
;
124 unsigned char bus_mode
;
125 unsigned char hw_bus_mode
;
129 unsigned dma_in_use
:1;
131 void __iomem
*reg_base
;
135 goldfish_mmc_cover_is_open(struct goldfish_mmc_host
*host
)
141 goldfish_mmc_show_cover_switch(struct device
*dev
,
142 struct device_attribute
*attr
, char *buf
)
144 struct goldfish_mmc_host
*host
= dev_get_drvdata(dev
);
146 return sprintf(buf
, "%s\n", goldfish_mmc_cover_is_open(host
) ? "open" :
150 static DEVICE_ATTR(cover_switch
, S_IRUGO
, goldfish_mmc_show_cover_switch
, NULL
);
153 goldfish_mmc_start_command(struct goldfish_mmc_host
*host
, struct mmc_command
*cmd
)
164 /* Our hardware needs to know exact type */
165 switch (mmc_resp_type(cmd
)) {
170 /* resp 1, 1b, 6, 7 */
180 dev_err(mmc_dev(host
->mmc
),
181 "Invalid response type: %04x\n", mmc_resp_type(cmd
));
185 if (mmc_cmd_type(cmd
) == MMC_CMD_ADTC
)
186 cmdtype
= OMAP_MMC_CMDTYPE_ADTC
;
187 else if (mmc_cmd_type(cmd
) == MMC_CMD_BC
)
188 cmdtype
= OMAP_MMC_CMDTYPE_BC
;
189 else if (mmc_cmd_type(cmd
) == MMC_CMD_BCR
)
190 cmdtype
= OMAP_MMC_CMDTYPE_BCR
;
192 cmdtype
= OMAP_MMC_CMDTYPE_AC
;
194 cmdreg
= cmd
->opcode
| (resptype
<< 8) | (cmdtype
<< 12);
196 if (host
->bus_mode
== MMC_BUSMODE_OPENDRAIN
)
199 if (cmd
->flags
& MMC_RSP_BUSY
)
202 if (host
->data
&& !(host
->data
->flags
& MMC_DATA_WRITE
))
205 GOLDFISH_MMC_WRITE(host
, MMC_ARG
, cmd
->arg
);
206 GOLDFISH_MMC_WRITE(host
, MMC_CMD
, cmdreg
);
209 static void goldfish_mmc_xfer_done(struct goldfish_mmc_host
*host
,
210 struct mmc_data
*data
)
212 if (host
->dma_in_use
) {
213 enum dma_data_direction dma_data_dir
;
215 if (data
->flags
& MMC_DATA_WRITE
)
216 dma_data_dir
= DMA_TO_DEVICE
;
218 dma_data_dir
= DMA_FROM_DEVICE
;
220 if (dma_data_dir
== DMA_FROM_DEVICE
) {
222 * We don't really have DMA, so we need
223 * to copy from our platform driver buffer
225 uint8_t *dest
= (uint8_t *)sg_virt(data
->sg
);
226 memcpy(dest
, host
->virt_base
, data
->sg
->length
);
228 host
->data
->bytes_xfered
+= data
->sg
->length
;
229 dma_unmap_sg(mmc_dev(host
->mmc
), data
->sg
, host
->sg_len
,
237 * NOTE: MMC layer will sometimes poll-wait CMD13 next, issuing
238 * dozens of requests until the card finishes writing data.
239 * It'd be cheaper to just wait till an EOFB interrupt arrives...
244 mmc_request_done(host
->mmc
, data
->mrq
);
248 goldfish_mmc_start_command(host
, data
->stop
);
251 static void goldfish_mmc_end_of_data(struct goldfish_mmc_host
*host
,
252 struct mmc_data
*data
)
254 if (!host
->dma_in_use
) {
255 goldfish_mmc_xfer_done(host
, data
);
259 goldfish_mmc_xfer_done(host
, data
);
262 static void goldfish_mmc_cmd_done(struct goldfish_mmc_host
*host
,
263 struct mmc_command
*cmd
)
266 if (cmd
->flags
& MMC_RSP_PRESENT
) {
267 if (cmd
->flags
& MMC_RSP_136
) {
268 /* response type 2 */
270 GOLDFISH_MMC_READ(host
, MMC_RESP_0
);
272 GOLDFISH_MMC_READ(host
, MMC_RESP_1
);
274 GOLDFISH_MMC_READ(host
, MMC_RESP_2
);
276 GOLDFISH_MMC_READ(host
, MMC_RESP_3
);
278 /* response types 1, 1b, 3, 4, 5, 6 */
280 GOLDFISH_MMC_READ(host
, MMC_RESP_0
);
284 if (host
->data
== NULL
|| cmd
->error
) {
286 mmc_request_done(host
->mmc
, cmd
->mrq
);
290 static irqreturn_t
goldfish_mmc_irq(int irq
, void *dev_id
)
292 struct goldfish_mmc_host
*host
= (struct goldfish_mmc_host
*)dev_id
;
295 int end_transfer
= 0;
296 int transfer_error
= 0;
297 int state_changed
= 0;
300 while ((status
= GOLDFISH_MMC_READ(host
, MMC_INT_STATUS
)) != 0) {
301 GOLDFISH_MMC_WRITE(host
, MMC_INT_STATUS
, status
);
303 if (status
& MMC_STAT_END_OF_CMD
)
306 if (status
& MMC_STAT_END_OF_DATA
)
309 if (status
& MMC_STAT_STATE_CHANGE
)
312 if (status
& MMC_STAT_CMD_TIMEOUT
) {
319 struct mmc_request
*mrq
= host
->mrq
;
320 mrq
->cmd
->error
= -ETIMEDOUT
;
322 mmc_request_done(host
->mmc
, mrq
);
326 goldfish_mmc_cmd_done(host
, host
->cmd
);
329 goldfish_mmc_xfer_done(host
, host
->data
);
330 else if (end_transfer
) {
332 goldfish_mmc_end_of_data(host
, host
->data
);
333 } else if (host
->data
!= NULL
) {
335 * WORKAROUND -- after porting this driver from 2.6 to 3.4,
336 * during device initialization, cases where host->data is
337 * non-null but end_transfer is false would occur. Doing
338 * nothing in such cases results in no further interrupts,
339 * and initialization failure.
340 * TODO -- find the real cause.
343 goldfish_mmc_end_of_data(host
, host
->data
);
347 u32 state
= GOLDFISH_MMC_READ(host
, MMC_STATE
);
348 pr_info("%s: Card detect now %d\n", __func__
,
349 (state
& MMC_STATE_INSERTED
));
350 mmc_detect_change(host
->mmc
, 0);
353 if (!end_command
&& !end_transfer
&&
354 !transfer_error
&& !state_changed
&& !cmd_timeout
) {
355 status
= GOLDFISH_MMC_READ(host
, MMC_INT_STATUS
);
356 dev_info(mmc_dev(host
->mmc
),"spurious irq 0x%04x\n", status
);
358 GOLDFISH_MMC_WRITE(host
, MMC_INT_STATUS
, status
);
359 GOLDFISH_MMC_WRITE(host
, MMC_INT_ENABLE
, 0);
366 static void goldfish_mmc_prepare_data(struct goldfish_mmc_host
*host
,
367 struct mmc_request
*req
)
369 struct mmc_data
*data
= req
->data
;
372 enum dma_data_direction dma_data_dir
;
376 GOLDFISH_MMC_WRITE(host
, MMC_BLOCK_LENGTH
, 0);
377 GOLDFISH_MMC_WRITE(host
, MMC_BLOCK_COUNT
, 0);
378 host
->dma_in_use
= 0;
382 block_size
= data
->blksz
;
384 GOLDFISH_MMC_WRITE(host
, MMC_BLOCK_COUNT
, data
->blocks
- 1);
385 GOLDFISH_MMC_WRITE(host
, MMC_BLOCK_LENGTH
, block_size
- 1);
388 * Cope with calling layer confusion; it issues "single
389 * block" writes using multi-block scatterlists.
391 sg_len
= (data
->blocks
== 1) ? 1 : data
->sg_len
;
393 if (data
->flags
& MMC_DATA_WRITE
)
394 dma_data_dir
= DMA_TO_DEVICE
;
396 dma_data_dir
= DMA_FROM_DEVICE
;
398 host
->sg_len
= dma_map_sg(mmc_dev(host
->mmc
), data
->sg
,
399 sg_len
, dma_data_dir
);
401 host
->dma_in_use
= 1;
403 if (dma_data_dir
== DMA_TO_DEVICE
) {
405 * We don't really have DMA, so we need to copy to our
406 * platform driver buffer
408 const uint8_t *src
= (uint8_t *)sg_virt(data
->sg
);
409 memcpy(host
->virt_base
, src
, data
->sg
->length
);
413 static void goldfish_mmc_request(struct mmc_host
*mmc
, struct mmc_request
*req
)
415 struct goldfish_mmc_host
*host
= mmc_priv(mmc
);
417 WARN_ON(host
->mrq
!= NULL
);
420 goldfish_mmc_prepare_data(host
, req
);
421 goldfish_mmc_start_command(host
, req
->cmd
);
424 * This is to avoid accidentally being detected as an SDIO card
425 * in mmc_attach_sdio().
427 if (req
->cmd
->opcode
== SD_IO_SEND_OP_COND
&&
428 req
->cmd
->flags
== (MMC_RSP_SPI_R4
| MMC_RSP_R4
| MMC_CMD_BCR
))
429 req
->cmd
->error
= -EINVAL
;
432 static void goldfish_mmc_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
434 struct goldfish_mmc_host
*host
= mmc_priv(mmc
);
436 host
->bus_mode
= ios
->bus_mode
;
437 host
->hw_bus_mode
= host
->bus_mode
;
440 static int goldfish_mmc_get_ro(struct mmc_host
*mmc
)
443 struct goldfish_mmc_host
*host
= mmc_priv(mmc
);
445 state
= GOLDFISH_MMC_READ(host
, MMC_STATE
);
446 return ((state
& MMC_STATE_READ_ONLY
) != 0);
449 static const struct mmc_host_ops goldfish_mmc_ops
= {
450 .request
= goldfish_mmc_request
,
451 .set_ios
= goldfish_mmc_set_ios
,
452 .get_ro
= goldfish_mmc_get_ro
,
455 static int goldfish_mmc_probe(struct platform_device
*pdev
)
457 struct mmc_host
*mmc
;
458 struct goldfish_mmc_host
*host
= NULL
;
459 struct resource
*res
;
464 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
465 irq
= platform_get_irq(pdev
, 0);
466 if (res
== NULL
|| irq
< 0)
469 mmc
= mmc_alloc_host(sizeof(struct goldfish_mmc_host
), &pdev
->dev
);
472 goto err_alloc_host_failed
;
475 host
= mmc_priv(mmc
);
478 pr_err("mmc: Mapping %lX to %lX\n", (long)res
->start
, (long)res
->end
);
479 host
->reg_base
= ioremap(res
->start
, res
->end
- res
->start
+ 1);
480 if (host
->reg_base
== NULL
) {
484 host
->virt_base
= dma_alloc_coherent(&pdev
->dev
, BUFFER_SIZE
,
485 &buf_addr
, GFP_KERNEL
);
487 if (host
->virt_base
== 0) {
489 goto dma_alloc_failed
;
491 host
->phys_base
= buf_addr
;
496 mmc
->ops
= &goldfish_mmc_ops
;
498 mmc
->f_max
= 24000000;
499 mmc
->ocr_avail
= MMC_VDD_32_33
| MMC_VDD_33_34
;
500 mmc
->caps
= MMC_CAP_4_BIT_DATA
;
502 /* Use scatterlist DMA to reduce per-transfer costs.
503 * NOTE max_seg_size assumption that small blocks aren't
504 * normally used (except e.g. for reading SD registers).
507 mmc
->max_blk_size
= 2048; /* MMC_BLOCK_LENGTH is 11 bits (+1) */
508 mmc
->max_blk_count
= 2048; /* MMC_BLOCK_COUNT is 11 bits (+1) */
509 mmc
->max_req_size
= BUFFER_SIZE
;
510 mmc
->max_seg_size
= mmc
->max_req_size
;
512 ret
= request_irq(host
->irq
, goldfish_mmc_irq
, 0, DRIVER_NAME
, host
);
514 dev_err(&pdev
->dev
, "Failed IRQ Adding goldfish MMC\n");
515 goto err_request_irq_failed
;
518 host
->dev
= &pdev
->dev
;
519 platform_set_drvdata(pdev
, host
);
521 ret
= device_create_file(&pdev
->dev
, &dev_attr_cover_switch
);
523 dev_warn(mmc_dev(host
->mmc
),
524 "Unable to create sysfs attributes\n");
526 GOLDFISH_MMC_WRITE(host
, MMC_SET_BUFFER
, host
->phys_base
);
527 GOLDFISH_MMC_WRITE(host
, MMC_INT_ENABLE
,
528 MMC_STAT_END_OF_CMD
| MMC_STAT_END_OF_DATA
|
529 MMC_STAT_STATE_CHANGE
| MMC_STAT_CMD_TIMEOUT
);
534 err_request_irq_failed
:
535 dma_free_coherent(&pdev
->dev
, BUFFER_SIZE
, host
->virt_base
,
538 iounmap(host
->reg_base
);
540 mmc_free_host(host
->mmc
);
541 err_alloc_host_failed
:
545 static int goldfish_mmc_remove(struct platform_device
*pdev
)
547 struct goldfish_mmc_host
*host
= platform_get_drvdata(pdev
);
549 platform_set_drvdata(pdev
, NULL
);
551 BUG_ON(host
== NULL
);
553 mmc_remove_host(host
->mmc
);
554 free_irq(host
->irq
, host
);
555 dma_free_coherent(&pdev
->dev
, BUFFER_SIZE
, host
->virt_base
, host
->phys_base
);
556 iounmap(host
->reg_base
);
557 mmc_free_host(host
->mmc
);
561 static struct platform_driver goldfish_mmc_driver
= {
562 .probe
= goldfish_mmc_probe
,
563 .remove
= goldfish_mmc_remove
,
569 module_platform_driver(goldfish_mmc_driver
);
570 MODULE_LICENSE("GPL v2");