1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright 2007, Google Inc.
4 * Copyright 2012, Intel Inc.
6 * based on omap.c driver, which was
7 * Copyright (C) 2004 Nokia Corporation
8 * Written by Tuukka Tikkanen and Juha Yrjölä <juha.yrjola@nokia.com>
9 * Misc hacks here and there by Tony Lindgren <tony@atomide.com>
10 * Other hacks (DMA, SD, etc) by David Brownell
13 #include <linux/module.h>
14 #include <linux/platform_device.h>
15 #include <linux/major.h>
17 #include <linux/types.h>
18 #include <linux/pci.h>
19 #include <linux/interrupt.h>
21 #include <linux/kernel.h>
23 #include <linux/errno.h>
24 #include <linux/hdreg.h>
25 #include <linux/kdev_t.h>
26 #include <linux/blkdev.h>
27 #include <linux/mutex.h>
28 #include <linux/scatterlist.h>
29 #include <linux/mmc/mmc.h>
30 #include <linux/mmc/host.h>
31 #include <linux/mmc/card.h>
33 #include <linux/moduleparam.h>
34 #include <linux/init.h>
35 #include <linux/ioport.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/delay.h>
38 #include <linux/spinlock.h>
39 #include <linux/timer.h>
40 #include <linux/clk.h>
45 #include <asm/types.h>
46 #include <linux/uaccess.h>
48 #define DRIVER_NAME "goldfish_mmc"
50 #define BUFFER_SIZE 16384
52 #define GOLDFISH_MMC_READ(host, addr) (readl(host->reg_base + addr))
53 #define GOLDFISH_MMC_WRITE(host, addr, x) (writel(x, host->reg_base + addr))
57 MMC_INT_STATUS
= 0x00,
58 /* set this to enable IRQ */
59 MMC_INT_ENABLE
= 0x04,
60 /* set this to specify buffer address */
61 MMC_SET_BUFFER
= 0x08,
63 /* MMC command number */
69 /* MMC response (or R2 bits 0 - 31) */
72 /* MMC R2 response bits 32 - 63 */
75 /* MMC R2 response bits 64 - 95 */
78 /* MMC R2 response bits 96 - 127 */
81 MMC_BLOCK_LENGTH
= 0x24,
82 MMC_BLOCK_COUNT
= 0x28,
87 /* MMC_INT_STATUS bits */
89 MMC_STAT_END_OF_CMD
= 1U << 0,
90 MMC_STAT_END_OF_DATA
= 1U << 1,
91 MMC_STAT_STATE_CHANGE
= 1U << 2,
92 MMC_STAT_CMD_TIMEOUT
= 1U << 3,
95 MMC_STATE_INSERTED
= 1U << 0,
96 MMC_STATE_READ_ONLY
= 1U << 1,
102 #define OMAP_MMC_CMDTYPE_BC 0
103 #define OMAP_MMC_CMDTYPE_BCR 1
104 #define OMAP_MMC_CMDTYPE_AC 2
105 #define OMAP_MMC_CMDTYPE_ADTC 3
108 struct goldfish_mmc_host
{
109 struct mmc_request
*mrq
;
110 struct mmc_command
*cmd
;
111 struct mmc_data
*data
;
113 unsigned char id
; /* 16xx chips have 2 MMC blocks */
115 unsigned int phys_base
;
117 unsigned char bus_mode
;
118 unsigned char hw_bus_mode
;
122 unsigned dma_in_use
:1;
124 void __iomem
*reg_base
;
128 goldfish_mmc_cover_is_open(struct goldfish_mmc_host
*host
)
134 goldfish_mmc_show_cover_switch(struct device
*dev
,
135 struct device_attribute
*attr
, char *buf
)
137 struct goldfish_mmc_host
*host
= dev_get_drvdata(dev
);
139 return sprintf(buf
, "%s\n", goldfish_mmc_cover_is_open(host
) ? "open" :
143 static DEVICE_ATTR(cover_switch
, S_IRUGO
, goldfish_mmc_show_cover_switch
, NULL
);
146 goldfish_mmc_start_command(struct goldfish_mmc_host
*host
, struct mmc_command
*cmd
)
157 /* Our hardware needs to know exact type */
158 switch (mmc_resp_type(cmd
)) {
163 /* resp 1, 1b, 6, 7 */
173 dev_err(mmc_dev(mmc_from_priv(host
)),
174 "Invalid response type: %04x\n", mmc_resp_type(cmd
));
178 if (mmc_cmd_type(cmd
) == MMC_CMD_ADTC
)
179 cmdtype
= OMAP_MMC_CMDTYPE_ADTC
;
180 else if (mmc_cmd_type(cmd
) == MMC_CMD_BC
)
181 cmdtype
= OMAP_MMC_CMDTYPE_BC
;
182 else if (mmc_cmd_type(cmd
) == MMC_CMD_BCR
)
183 cmdtype
= OMAP_MMC_CMDTYPE_BCR
;
185 cmdtype
= OMAP_MMC_CMDTYPE_AC
;
187 cmdreg
= cmd
->opcode
| (resptype
<< 8) | (cmdtype
<< 12);
189 if (host
->bus_mode
== MMC_BUSMODE_OPENDRAIN
)
192 if (cmd
->flags
& MMC_RSP_BUSY
)
195 if (host
->data
&& !(host
->data
->flags
& MMC_DATA_WRITE
))
198 GOLDFISH_MMC_WRITE(host
, MMC_ARG
, cmd
->arg
);
199 GOLDFISH_MMC_WRITE(host
, MMC_CMD
, cmdreg
);
202 static void goldfish_mmc_xfer_done(struct goldfish_mmc_host
*host
,
203 struct mmc_data
*data
)
205 if (host
->dma_in_use
) {
206 enum dma_data_direction dma_data_dir
;
208 dma_data_dir
= mmc_get_dma_dir(data
);
210 if (dma_data_dir
== DMA_FROM_DEVICE
) {
212 * We don't really have DMA, so we need
213 * to copy from our platform driver buffer
215 sg_copy_from_buffer(data
->sg
, 1, host
->virt_base
,
218 host
->data
->bytes_xfered
+= data
->sg
->length
;
219 dma_unmap_sg(mmc_dev(mmc_from_priv(host
)), data
->sg
,
220 host
->sg_len
, dma_data_dir
);
227 * NOTE: MMC layer will sometimes poll-wait CMD13 next, issuing
228 * dozens of requests until the card finishes writing data.
229 * It'd be cheaper to just wait till an EOFB interrupt arrives...
234 mmc_request_done(mmc_from_priv(host
), data
->mrq
);
238 goldfish_mmc_start_command(host
, data
->stop
);
241 static void goldfish_mmc_end_of_data(struct goldfish_mmc_host
*host
,
242 struct mmc_data
*data
)
244 if (!host
->dma_in_use
) {
245 goldfish_mmc_xfer_done(host
, data
);
249 goldfish_mmc_xfer_done(host
, data
);
252 static void goldfish_mmc_cmd_done(struct goldfish_mmc_host
*host
,
253 struct mmc_command
*cmd
)
256 if (cmd
->flags
& MMC_RSP_PRESENT
) {
257 if (cmd
->flags
& MMC_RSP_136
) {
258 /* response type 2 */
260 GOLDFISH_MMC_READ(host
, MMC_RESP_0
);
262 GOLDFISH_MMC_READ(host
, MMC_RESP_1
);
264 GOLDFISH_MMC_READ(host
, MMC_RESP_2
);
266 GOLDFISH_MMC_READ(host
, MMC_RESP_3
);
268 /* response types 1, 1b, 3, 4, 5, 6 */
270 GOLDFISH_MMC_READ(host
, MMC_RESP_0
);
274 if (host
->data
== NULL
|| cmd
->error
) {
276 mmc_request_done(mmc_from_priv(host
), cmd
->mrq
);
280 static irqreturn_t
goldfish_mmc_irq(int irq
, void *dev_id
)
282 struct goldfish_mmc_host
*host
= (struct goldfish_mmc_host
*)dev_id
;
285 int end_transfer
= 0;
286 int state_changed
= 0;
289 while ((status
= GOLDFISH_MMC_READ(host
, MMC_INT_STATUS
)) != 0) {
290 GOLDFISH_MMC_WRITE(host
, MMC_INT_STATUS
, status
);
292 if (status
& MMC_STAT_END_OF_CMD
)
295 if (status
& MMC_STAT_END_OF_DATA
)
298 if (status
& MMC_STAT_STATE_CHANGE
)
301 if (status
& MMC_STAT_CMD_TIMEOUT
) {
308 struct mmc_request
*mrq
= host
->mrq
;
309 mrq
->cmd
->error
= -ETIMEDOUT
;
311 mmc_request_done(mmc_from_priv(host
), mrq
);
315 goldfish_mmc_cmd_done(host
, host
->cmd
);
319 goldfish_mmc_end_of_data(host
, host
->data
);
320 } else if (host
->data
!= NULL
) {
322 * WORKAROUND -- after porting this driver from 2.6 to 3.4,
323 * during device initialization, cases where host->data is
324 * non-null but end_transfer is false would occur. Doing
325 * nothing in such cases results in no further interrupts,
326 * and initialization failure.
327 * TODO -- find the real cause.
330 goldfish_mmc_end_of_data(host
, host
->data
);
334 u32 state
= GOLDFISH_MMC_READ(host
, MMC_STATE
);
335 pr_info("%s: Card detect now %d\n", __func__
,
336 (state
& MMC_STATE_INSERTED
));
337 mmc_detect_change(mmc_from_priv(host
), 0);
340 if (!end_command
&& !end_transfer
&& !state_changed
&& !cmd_timeout
) {
341 status
= GOLDFISH_MMC_READ(host
, MMC_INT_STATUS
);
342 dev_info(mmc_dev(mmc_from_priv(host
)), "spurious irq 0x%04x\n",
345 GOLDFISH_MMC_WRITE(host
, MMC_INT_STATUS
, status
);
346 GOLDFISH_MMC_WRITE(host
, MMC_INT_ENABLE
, 0);
353 static void goldfish_mmc_prepare_data(struct goldfish_mmc_host
*host
,
354 struct mmc_request
*req
)
356 struct mmc_data
*data
= req
->data
;
359 enum dma_data_direction dma_data_dir
;
363 GOLDFISH_MMC_WRITE(host
, MMC_BLOCK_LENGTH
, 0);
364 GOLDFISH_MMC_WRITE(host
, MMC_BLOCK_COUNT
, 0);
365 host
->dma_in_use
= 0;
369 block_size
= data
->blksz
;
371 GOLDFISH_MMC_WRITE(host
, MMC_BLOCK_COUNT
, data
->blocks
- 1);
372 GOLDFISH_MMC_WRITE(host
, MMC_BLOCK_LENGTH
, block_size
- 1);
375 * Cope with calling layer confusion; it issues "single
376 * block" writes using multi-block scatterlists.
378 sg_len
= (data
->blocks
== 1) ? 1 : data
->sg_len
;
380 dma_data_dir
= mmc_get_dma_dir(data
);
382 host
->sg_len
= dma_map_sg(mmc_dev(mmc_from_priv(host
)), data
->sg
,
383 sg_len
, dma_data_dir
);
385 host
->dma_in_use
= 1;
387 if (dma_data_dir
== DMA_TO_DEVICE
) {
389 * We don't really have DMA, so we need to copy to our
390 * platform driver buffer
392 sg_copy_to_buffer(data
->sg
, 1, host
->virt_base
,
397 static void goldfish_mmc_request(struct mmc_host
*mmc
, struct mmc_request
*req
)
399 struct goldfish_mmc_host
*host
= mmc_priv(mmc
);
401 WARN_ON(host
->mrq
!= NULL
);
404 goldfish_mmc_prepare_data(host
, req
);
405 goldfish_mmc_start_command(host
, req
->cmd
);
408 static void goldfish_mmc_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
410 struct goldfish_mmc_host
*host
= mmc_priv(mmc
);
412 host
->bus_mode
= ios
->bus_mode
;
413 host
->hw_bus_mode
= host
->bus_mode
;
416 static int goldfish_mmc_get_ro(struct mmc_host
*mmc
)
419 struct goldfish_mmc_host
*host
= mmc_priv(mmc
);
421 state
= GOLDFISH_MMC_READ(host
, MMC_STATE
);
422 return ((state
& MMC_STATE_READ_ONLY
) != 0);
425 static const struct mmc_host_ops goldfish_mmc_ops
= {
426 .request
= goldfish_mmc_request
,
427 .set_ios
= goldfish_mmc_set_ios
,
428 .get_ro
= goldfish_mmc_get_ro
,
431 static int goldfish_mmc_probe(struct platform_device
*pdev
)
433 struct mmc_host
*mmc
;
434 struct goldfish_mmc_host
*host
= NULL
;
435 struct resource
*res
;
440 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
441 irq
= platform_get_irq(pdev
, 0);
442 if (res
== NULL
|| irq
< 0)
445 mmc
= mmc_alloc_host(sizeof(struct goldfish_mmc_host
), &pdev
->dev
);
448 goto err_alloc_host_failed
;
451 host
= mmc_priv(mmc
);
453 pr_err("mmc: Mapping %lX to %lX\n", (long)res
->start
, (long)res
->end
);
454 host
->reg_base
= ioremap(res
->start
, resource_size(res
));
455 if (host
->reg_base
== NULL
) {
459 host
->virt_base
= dma_alloc_coherent(&pdev
->dev
, BUFFER_SIZE
,
460 &buf_addr
, GFP_KERNEL
);
462 if (host
->virt_base
== 0) {
464 goto dma_alloc_failed
;
466 host
->phys_base
= buf_addr
;
471 mmc
->ops
= &goldfish_mmc_ops
;
473 mmc
->f_max
= 24000000;
474 mmc
->ocr_avail
= MMC_VDD_32_33
| MMC_VDD_33_34
;
475 mmc
->caps
= MMC_CAP_4_BIT_DATA
;
476 mmc
->caps2
= MMC_CAP2_NO_SDIO
;
478 /* Use scatterlist DMA to reduce per-transfer costs.
479 * NOTE max_seg_size assumption that small blocks aren't
480 * normally used (except e.g. for reading SD registers).
483 mmc
->max_blk_size
= 2048; /* MMC_BLOCK_LENGTH is 11 bits (+1) */
484 mmc
->max_blk_count
= 2048; /* MMC_BLOCK_COUNT is 11 bits (+1) */
485 mmc
->max_req_size
= BUFFER_SIZE
;
486 mmc
->max_seg_size
= mmc
->max_req_size
;
488 ret
= request_irq(host
->irq
, goldfish_mmc_irq
, 0, DRIVER_NAME
, host
);
490 dev_err(&pdev
->dev
, "Failed IRQ Adding goldfish MMC\n");
491 goto err_request_irq_failed
;
494 host
->dev
= &pdev
->dev
;
495 platform_set_drvdata(pdev
, host
);
497 ret
= device_create_file(&pdev
->dev
, &dev_attr_cover_switch
);
499 dev_warn(mmc_dev(mmc
), "Unable to create sysfs attributes\n");
501 GOLDFISH_MMC_WRITE(host
, MMC_SET_BUFFER
, host
->phys_base
);
502 GOLDFISH_MMC_WRITE(host
, MMC_INT_ENABLE
,
503 MMC_STAT_END_OF_CMD
| MMC_STAT_END_OF_DATA
|
504 MMC_STAT_STATE_CHANGE
| MMC_STAT_CMD_TIMEOUT
);
509 err_request_irq_failed
:
510 dma_free_coherent(&pdev
->dev
, BUFFER_SIZE
, host
->virt_base
,
513 iounmap(host
->reg_base
);
516 err_alloc_host_failed
:
520 static int goldfish_mmc_remove(struct platform_device
*pdev
)
522 struct goldfish_mmc_host
*host
= platform_get_drvdata(pdev
);
523 struct mmc_host
*mmc
= mmc_from_priv(host
);
525 BUG_ON(host
== NULL
);
527 mmc_remove_host(mmc
);
528 free_irq(host
->irq
, host
);
529 dma_free_coherent(&pdev
->dev
, BUFFER_SIZE
, host
->virt_base
, host
->phys_base
);
530 iounmap(host
->reg_base
);
535 static struct platform_driver goldfish_mmc_driver
= {
536 .probe
= goldfish_mmc_probe
,
537 .remove
= goldfish_mmc_remove
,
540 .probe_type
= PROBE_PREFER_ASYNCHRONOUS
,
544 module_platform_driver(goldfish_mmc_driver
);
545 MODULE_LICENSE("GPL v2");