2 * Copyright 2007, Google Inc.
3 * Copyright 2012, Intel Inc.
5 * based on omap.c driver, which was
6 * Copyright (C) 2004 Nokia Corporation
7 * Written by Tuukka Tikkanen and Juha Yrjölä <juha.yrjola@nokia.com>
8 * Misc hacks here and there by Tony Lindgren <tony@atomide.com>
9 * Other hacks (DMA, SD, etc) by David Brownell
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
16 #include <linux/module.h>
17 #include <linux/platform_device.h>
18 #include <linux/major.h>
20 #include <linux/types.h>
21 #include <linux/pci.h>
22 #include <linux/interrupt.h>
24 #include <linux/kernel.h>
26 #include <linux/errno.h>
27 #include <linux/hdreg.h>
28 #include <linux/kdev_t.h>
29 #include <linux/blkdev.h>
30 #include <linux/mutex.h>
31 #include <linux/scatterlist.h>
32 #include <linux/mmc/mmc.h>
33 #include <linux/mmc/sdio.h>
34 #include <linux/mmc/host.h>
35 #include <linux/mmc/card.h>
37 #include <linux/moduleparam.h>
38 #include <linux/init.h>
39 #include <linux/ioport.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/delay.h>
42 #include <linux/spinlock.h>
43 #include <linux/timer.h>
44 #include <linux/clk.h>
49 #include <asm/types.h>
50 #include <linux/uaccess.h>
52 #define DRIVER_NAME "goldfish_mmc"
54 #define BUFFER_SIZE 16384
56 #define GOLDFISH_MMC_READ(host, addr) (readl(host->reg_base + addr))
57 #define GOLDFISH_MMC_WRITE(host, addr, x) (writel(x, host->reg_base + addr))
61 MMC_INT_STATUS
= 0x00,
62 /* set this to enable IRQ */
63 MMC_INT_ENABLE
= 0x04,
64 /* set this to specify buffer address */
65 MMC_SET_BUFFER
= 0x08,
67 /* MMC command number */
73 /* MMC response (or R2 bits 0 - 31) */
76 /* MMC R2 response bits 32 - 63 */
79 /* MMC R2 response bits 64 - 95 */
82 /* MMC R2 response bits 96 - 127 */
85 MMC_BLOCK_LENGTH
= 0x24,
86 MMC_BLOCK_COUNT
= 0x28,
91 /* MMC_INT_STATUS bits */
93 MMC_STAT_END_OF_CMD
= 1U << 0,
94 MMC_STAT_END_OF_DATA
= 1U << 1,
95 MMC_STAT_STATE_CHANGE
= 1U << 2,
96 MMC_STAT_CMD_TIMEOUT
= 1U << 3,
99 MMC_STATE_INSERTED
= 1U << 0,
100 MMC_STATE_READ_ONLY
= 1U << 1,
106 #define OMAP_MMC_CMDTYPE_BC 0
107 #define OMAP_MMC_CMDTYPE_BCR 1
108 #define OMAP_MMC_CMDTYPE_AC 2
109 #define OMAP_MMC_CMDTYPE_ADTC 3
112 struct goldfish_mmc_host
{
113 struct mmc_request
*mrq
;
114 struct mmc_command
*cmd
;
115 struct mmc_data
*data
;
116 struct mmc_host
*mmc
;
118 unsigned char id
; /* 16xx chips have 2 MMC blocks */
120 unsigned int phys_base
;
122 unsigned char bus_mode
;
123 unsigned char hw_bus_mode
;
127 unsigned dma_in_use
:1;
129 void __iomem
*reg_base
;
133 goldfish_mmc_cover_is_open(struct goldfish_mmc_host
*host
)
139 goldfish_mmc_show_cover_switch(struct device
*dev
,
140 struct device_attribute
*attr
, char *buf
)
142 struct goldfish_mmc_host
*host
= dev_get_drvdata(dev
);
144 return sprintf(buf
, "%s\n", goldfish_mmc_cover_is_open(host
) ? "open" :
148 static DEVICE_ATTR(cover_switch
, S_IRUGO
, goldfish_mmc_show_cover_switch
, NULL
);
151 goldfish_mmc_start_command(struct goldfish_mmc_host
*host
, struct mmc_command
*cmd
)
162 /* Our hardware needs to know exact type */
163 switch (mmc_resp_type(cmd
)) {
168 /* resp 1, 1b, 6, 7 */
178 dev_err(mmc_dev(host
->mmc
),
179 "Invalid response type: %04x\n", mmc_resp_type(cmd
));
183 if (mmc_cmd_type(cmd
) == MMC_CMD_ADTC
)
184 cmdtype
= OMAP_MMC_CMDTYPE_ADTC
;
185 else if (mmc_cmd_type(cmd
) == MMC_CMD_BC
)
186 cmdtype
= OMAP_MMC_CMDTYPE_BC
;
187 else if (mmc_cmd_type(cmd
) == MMC_CMD_BCR
)
188 cmdtype
= OMAP_MMC_CMDTYPE_BCR
;
190 cmdtype
= OMAP_MMC_CMDTYPE_AC
;
192 cmdreg
= cmd
->opcode
| (resptype
<< 8) | (cmdtype
<< 12);
194 if (host
->bus_mode
== MMC_BUSMODE_OPENDRAIN
)
197 if (cmd
->flags
& MMC_RSP_BUSY
)
200 if (host
->data
&& !(host
->data
->flags
& MMC_DATA_WRITE
))
203 GOLDFISH_MMC_WRITE(host
, MMC_ARG
, cmd
->arg
);
204 GOLDFISH_MMC_WRITE(host
, MMC_CMD
, cmdreg
);
207 static void goldfish_mmc_xfer_done(struct goldfish_mmc_host
*host
,
208 struct mmc_data
*data
)
210 if (host
->dma_in_use
) {
211 enum dma_data_direction dma_data_dir
;
213 dma_data_dir
= mmc_get_dma_dir(data
);
215 if (dma_data_dir
== DMA_FROM_DEVICE
) {
217 * We don't really have DMA, so we need
218 * to copy from our platform driver buffer
220 sg_copy_from_buffer(data
->sg
, 1, host
->virt_base
,
223 host
->data
->bytes_xfered
+= data
->sg
->length
;
224 dma_unmap_sg(mmc_dev(host
->mmc
), data
->sg
, host
->sg_len
,
232 * NOTE: MMC layer will sometimes poll-wait CMD13 next, issuing
233 * dozens of requests until the card finishes writing data.
234 * It'd be cheaper to just wait till an EOFB interrupt arrives...
239 mmc_request_done(host
->mmc
, data
->mrq
);
243 goldfish_mmc_start_command(host
, data
->stop
);
246 static void goldfish_mmc_end_of_data(struct goldfish_mmc_host
*host
,
247 struct mmc_data
*data
)
249 if (!host
->dma_in_use
) {
250 goldfish_mmc_xfer_done(host
, data
);
254 goldfish_mmc_xfer_done(host
, data
);
257 static void goldfish_mmc_cmd_done(struct goldfish_mmc_host
*host
,
258 struct mmc_command
*cmd
)
261 if (cmd
->flags
& MMC_RSP_PRESENT
) {
262 if (cmd
->flags
& MMC_RSP_136
) {
263 /* response type 2 */
265 GOLDFISH_MMC_READ(host
, MMC_RESP_0
);
267 GOLDFISH_MMC_READ(host
, MMC_RESP_1
);
269 GOLDFISH_MMC_READ(host
, MMC_RESP_2
);
271 GOLDFISH_MMC_READ(host
, MMC_RESP_3
);
273 /* response types 1, 1b, 3, 4, 5, 6 */
275 GOLDFISH_MMC_READ(host
, MMC_RESP_0
);
279 if (host
->data
== NULL
|| cmd
->error
) {
281 mmc_request_done(host
->mmc
, cmd
->mrq
);
285 static irqreturn_t
goldfish_mmc_irq(int irq
, void *dev_id
)
287 struct goldfish_mmc_host
*host
= (struct goldfish_mmc_host
*)dev_id
;
290 int end_transfer
= 0;
291 int state_changed
= 0;
294 while ((status
= GOLDFISH_MMC_READ(host
, MMC_INT_STATUS
)) != 0) {
295 GOLDFISH_MMC_WRITE(host
, MMC_INT_STATUS
, status
);
297 if (status
& MMC_STAT_END_OF_CMD
)
300 if (status
& MMC_STAT_END_OF_DATA
)
303 if (status
& MMC_STAT_STATE_CHANGE
)
306 if (status
& MMC_STAT_CMD_TIMEOUT
) {
313 struct mmc_request
*mrq
= host
->mrq
;
314 mrq
->cmd
->error
= -ETIMEDOUT
;
316 mmc_request_done(host
->mmc
, mrq
);
320 goldfish_mmc_cmd_done(host
, host
->cmd
);
324 goldfish_mmc_end_of_data(host
, host
->data
);
325 } else if (host
->data
!= NULL
) {
327 * WORKAROUND -- after porting this driver from 2.6 to 3.4,
328 * during device initialization, cases where host->data is
329 * non-null but end_transfer is false would occur. Doing
330 * nothing in such cases results in no further interrupts,
331 * and initialization failure.
332 * TODO -- find the real cause.
335 goldfish_mmc_end_of_data(host
, host
->data
);
339 u32 state
= GOLDFISH_MMC_READ(host
, MMC_STATE
);
340 pr_info("%s: Card detect now %d\n", __func__
,
341 (state
& MMC_STATE_INSERTED
));
342 mmc_detect_change(host
->mmc
, 0);
345 if (!end_command
&& !end_transfer
&& !state_changed
&& !cmd_timeout
) {
346 status
= GOLDFISH_MMC_READ(host
, MMC_INT_STATUS
);
347 dev_info(mmc_dev(host
->mmc
),"spurious irq 0x%04x\n", status
);
349 GOLDFISH_MMC_WRITE(host
, MMC_INT_STATUS
, status
);
350 GOLDFISH_MMC_WRITE(host
, MMC_INT_ENABLE
, 0);
357 static void goldfish_mmc_prepare_data(struct goldfish_mmc_host
*host
,
358 struct mmc_request
*req
)
360 struct mmc_data
*data
= req
->data
;
363 enum dma_data_direction dma_data_dir
;
367 GOLDFISH_MMC_WRITE(host
, MMC_BLOCK_LENGTH
, 0);
368 GOLDFISH_MMC_WRITE(host
, MMC_BLOCK_COUNT
, 0);
369 host
->dma_in_use
= 0;
373 block_size
= data
->blksz
;
375 GOLDFISH_MMC_WRITE(host
, MMC_BLOCK_COUNT
, data
->blocks
- 1);
376 GOLDFISH_MMC_WRITE(host
, MMC_BLOCK_LENGTH
, block_size
- 1);
379 * Cope with calling layer confusion; it issues "single
380 * block" writes using multi-block scatterlists.
382 sg_len
= (data
->blocks
== 1) ? 1 : data
->sg_len
;
384 dma_data_dir
= mmc_get_dma_dir(data
);
386 host
->sg_len
= dma_map_sg(mmc_dev(host
->mmc
), data
->sg
,
387 sg_len
, dma_data_dir
);
389 host
->dma_in_use
= 1;
391 if (dma_data_dir
== DMA_TO_DEVICE
) {
393 * We don't really have DMA, so we need to copy to our
394 * platform driver buffer
396 sg_copy_to_buffer(data
->sg
, 1, host
->virt_base
,
401 static void goldfish_mmc_request(struct mmc_host
*mmc
, struct mmc_request
*req
)
403 struct goldfish_mmc_host
*host
= mmc_priv(mmc
);
405 WARN_ON(host
->mrq
!= NULL
);
408 goldfish_mmc_prepare_data(host
, req
);
409 goldfish_mmc_start_command(host
, req
->cmd
);
412 * This is to avoid accidentally being detected as an SDIO card
413 * in mmc_attach_sdio().
415 if (req
->cmd
->opcode
== SD_IO_SEND_OP_COND
&&
416 req
->cmd
->flags
== (MMC_RSP_SPI_R4
| MMC_RSP_R4
| MMC_CMD_BCR
))
417 req
->cmd
->error
= -EINVAL
;
420 static void goldfish_mmc_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
422 struct goldfish_mmc_host
*host
= mmc_priv(mmc
);
424 host
->bus_mode
= ios
->bus_mode
;
425 host
->hw_bus_mode
= host
->bus_mode
;
428 static int goldfish_mmc_get_ro(struct mmc_host
*mmc
)
431 struct goldfish_mmc_host
*host
= mmc_priv(mmc
);
433 state
= GOLDFISH_MMC_READ(host
, MMC_STATE
);
434 return ((state
& MMC_STATE_READ_ONLY
) != 0);
437 static const struct mmc_host_ops goldfish_mmc_ops
= {
438 .request
= goldfish_mmc_request
,
439 .set_ios
= goldfish_mmc_set_ios
,
440 .get_ro
= goldfish_mmc_get_ro
,
443 static int goldfish_mmc_probe(struct platform_device
*pdev
)
445 struct mmc_host
*mmc
;
446 struct goldfish_mmc_host
*host
= NULL
;
447 struct resource
*res
;
452 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
453 irq
= platform_get_irq(pdev
, 0);
454 if (res
== NULL
|| irq
< 0)
457 mmc
= mmc_alloc_host(sizeof(struct goldfish_mmc_host
), &pdev
->dev
);
460 goto err_alloc_host_failed
;
463 host
= mmc_priv(mmc
);
466 pr_err("mmc: Mapping %lX to %lX\n", (long)res
->start
, (long)res
->end
);
467 host
->reg_base
= ioremap(res
->start
, resource_size(res
));
468 if (host
->reg_base
== NULL
) {
472 host
->virt_base
= dma_alloc_coherent(&pdev
->dev
, BUFFER_SIZE
,
473 &buf_addr
, GFP_KERNEL
);
475 if (host
->virt_base
== 0) {
477 goto dma_alloc_failed
;
479 host
->phys_base
= buf_addr
;
484 mmc
->ops
= &goldfish_mmc_ops
;
486 mmc
->f_max
= 24000000;
487 mmc
->ocr_avail
= MMC_VDD_32_33
| MMC_VDD_33_34
;
488 mmc
->caps
= MMC_CAP_4_BIT_DATA
;
490 /* Use scatterlist DMA to reduce per-transfer costs.
491 * NOTE max_seg_size assumption that small blocks aren't
492 * normally used (except e.g. for reading SD registers).
495 mmc
->max_blk_size
= 2048; /* MMC_BLOCK_LENGTH is 11 bits (+1) */
496 mmc
->max_blk_count
= 2048; /* MMC_BLOCK_COUNT is 11 bits (+1) */
497 mmc
->max_req_size
= BUFFER_SIZE
;
498 mmc
->max_seg_size
= mmc
->max_req_size
;
500 ret
= request_irq(host
->irq
, goldfish_mmc_irq
, 0, DRIVER_NAME
, host
);
502 dev_err(&pdev
->dev
, "Failed IRQ Adding goldfish MMC\n");
503 goto err_request_irq_failed
;
506 host
->dev
= &pdev
->dev
;
507 platform_set_drvdata(pdev
, host
);
509 ret
= device_create_file(&pdev
->dev
, &dev_attr_cover_switch
);
511 dev_warn(mmc_dev(host
->mmc
),
512 "Unable to create sysfs attributes\n");
514 GOLDFISH_MMC_WRITE(host
, MMC_SET_BUFFER
, host
->phys_base
);
515 GOLDFISH_MMC_WRITE(host
, MMC_INT_ENABLE
,
516 MMC_STAT_END_OF_CMD
| MMC_STAT_END_OF_DATA
|
517 MMC_STAT_STATE_CHANGE
| MMC_STAT_CMD_TIMEOUT
);
522 err_request_irq_failed
:
523 dma_free_coherent(&pdev
->dev
, BUFFER_SIZE
, host
->virt_base
,
526 iounmap(host
->reg_base
);
528 mmc_free_host(host
->mmc
);
529 err_alloc_host_failed
:
533 static int goldfish_mmc_remove(struct platform_device
*pdev
)
535 struct goldfish_mmc_host
*host
= platform_get_drvdata(pdev
);
537 BUG_ON(host
== NULL
);
539 mmc_remove_host(host
->mmc
);
540 free_irq(host
->irq
, host
);
541 dma_free_coherent(&pdev
->dev
, BUFFER_SIZE
, host
->virt_base
, host
->phys_base
);
542 iounmap(host
->reg_base
);
543 mmc_free_host(host
->mmc
);
547 static struct platform_driver goldfish_mmc_driver
= {
548 .probe
= goldfish_mmc_probe
,
549 .remove
= goldfish_mmc_remove
,
555 module_platform_driver(goldfish_mmc_driver
);
556 MODULE_LICENSE("GPL v2");