2 * Amlogic SD/eMMC driver for the GX/S905 family SoCs
4 * Copyright (c) 2016 BayLibre, SAS.
5 * Author: Kevin Hilman <khilman@baylibre.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 * The full GNU General Public License is included in this distribution
19 * in the file called COPYING.
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/init.h>
24 #include <linux/device.h>
25 #include <linux/of_device.h>
26 #include <linux/platform_device.h>
27 #include <linux/ioport.h>
28 #include <linux/spinlock.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/mmc/host.h>
31 #include <linux/mmc/mmc.h>
32 #include <linux/mmc/sdio.h>
33 #include <linux/mmc/slot-gpio.h>
35 #include <linux/clk.h>
36 #include <linux/clk-provider.h>
37 #include <linux/regulator/consumer.h>
39 #define DRIVER_NAME "meson-gx-mmc"
41 #define SD_EMMC_CLOCK 0x0
42 #define CLK_DIV_SHIFT 0
43 #define CLK_DIV_WIDTH 6
44 #define CLK_DIV_MASK 0x3f
45 #define CLK_DIV_MAX 63
46 #define CLK_SRC_SHIFT 6
47 #define CLK_SRC_WIDTH 2
48 #define CLK_SRC_MASK 0x3
49 #define CLK_SRC_XTAL 0 /* external crystal */
50 #define CLK_SRC_XTAL_RATE 24000000
51 #define CLK_SRC_PLL 1 /* FCLK_DIV2 */
52 #define CLK_SRC_PLL_RATE 1000000000
53 #define CLK_PHASE_SHIFT 8
54 #define CLK_PHASE_MASK 0x3
56 #define CLK_PHASE_90 1
57 #define CLK_PHASE_180 2
58 #define CLK_PHASE_270 3
59 #define CLK_ALWAYS_ON BIT(24)
61 #define SD_EMMC_DElAY 0x4
62 #define SD_EMMC_ADJUST 0x8
63 #define SD_EMMC_CALOUT 0x10
64 #define SD_EMMC_START 0x40
65 #define START_DESC_INIT BIT(0)
66 #define START_DESC_BUSY BIT(1)
67 #define START_DESC_ADDR_SHIFT 2
68 #define START_DESC_ADDR_MASK (~0x3)
70 #define SD_EMMC_CFG 0x44
71 #define CFG_BUS_WIDTH_SHIFT 0
72 #define CFG_BUS_WIDTH_MASK 0x3
73 #define CFG_BUS_WIDTH_1 0x0
74 #define CFG_BUS_WIDTH_4 0x1
75 #define CFG_BUS_WIDTH_8 0x2
76 #define CFG_DDR BIT(2)
77 #define CFG_BLK_LEN_SHIFT 4
78 #define CFG_BLK_LEN_MASK 0xf
79 #define CFG_RESP_TIMEOUT_SHIFT 8
80 #define CFG_RESP_TIMEOUT_MASK 0xf
81 #define CFG_RC_CC_SHIFT 12
82 #define CFG_RC_CC_MASK 0xf
83 #define CFG_STOP_CLOCK BIT(22)
84 #define CFG_CLK_ALWAYS_ON BIT(18)
85 #define CFG_AUTO_CLK BIT(23)
87 #define SD_EMMC_STATUS 0x48
88 #define STATUS_BUSY BIT(31)
90 #define SD_EMMC_IRQ_EN 0x4c
91 #define IRQ_EN_MASK 0x3fff
92 #define IRQ_RXD_ERR_SHIFT 0
93 #define IRQ_RXD_ERR_MASK 0xff
94 #define IRQ_TXD_ERR BIT(8)
95 #define IRQ_DESC_ERR BIT(9)
96 #define IRQ_RESP_ERR BIT(10)
97 #define IRQ_RESP_TIMEOUT BIT(11)
98 #define IRQ_DESC_TIMEOUT BIT(12)
99 #define IRQ_END_OF_CHAIN BIT(13)
100 #define IRQ_RESP_STATUS BIT(14)
101 #define IRQ_SDIO BIT(15)
103 #define SD_EMMC_CMD_CFG 0x50
104 #define SD_EMMC_CMD_ARG 0x54
105 #define SD_EMMC_CMD_DAT 0x58
106 #define SD_EMMC_CMD_RSP 0x5c
107 #define SD_EMMC_CMD_RSP1 0x60
108 #define SD_EMMC_CMD_RSP2 0x64
109 #define SD_EMMC_CMD_RSP3 0x68
111 #define SD_EMMC_RXD 0x94
112 #define SD_EMMC_TXD 0x94
113 #define SD_EMMC_LAST_REG SD_EMMC_TXD
115 #define SD_EMMC_CFG_BLK_SIZE 512 /* internal buffer max: 512 bytes */
116 #define SD_EMMC_CFG_RESP_TIMEOUT 256 /* in clock cycles */
117 #define SD_EMMC_CFG_CMD_GAP 16 /* in clock cycles */
118 #define MUX_CLK_NUM_PARENTS 2
122 struct mmc_host
*mmc
;
123 struct mmc_request
*mrq
;
124 struct mmc_command
*cmd
;
130 struct clk
*core_clk
;
133 struct clk
*mux_parent
[MUX_CLK_NUM_PARENTS
];
134 unsigned long mux_parent_rate
[MUX_CLK_NUM_PARENTS
];
136 struct clk_divider cfg_div
;
137 struct clk
*cfg_div_clk
;
139 unsigned int bounce_buf_size
;
141 dma_addr_t bounce_dma_addr
;
146 struct sd_emmc_desc
{
152 #define CMD_CFG_LENGTH_SHIFT 0
153 #define CMD_CFG_LENGTH_MASK 0x1ff
154 #define CMD_CFG_BLOCK_MODE BIT(9)
155 #define CMD_CFG_R1B BIT(10)
156 #define CMD_CFG_END_OF_CHAIN BIT(11)
157 #define CMD_CFG_TIMEOUT_SHIFT 12
158 #define CMD_CFG_TIMEOUT_MASK 0xf
159 #define CMD_CFG_NO_RESP BIT(16)
160 #define CMD_CFG_NO_CMD BIT(17)
161 #define CMD_CFG_DATA_IO BIT(18)
162 #define CMD_CFG_DATA_WR BIT(19)
163 #define CMD_CFG_RESP_NOCRC BIT(20)
164 #define CMD_CFG_RESP_128 BIT(21)
165 #define CMD_CFG_RESP_NUM BIT(22)
166 #define CMD_CFG_DATA_NUM BIT(23)
167 #define CMD_CFG_CMD_INDEX_SHIFT 24
168 #define CMD_CFG_CMD_INDEX_MASK 0x3f
169 #define CMD_CFG_ERROR BIT(30)
170 #define CMD_CFG_OWNER BIT(31)
172 #define CMD_DATA_MASK (~0x3)
173 #define CMD_DATA_BIG_ENDIAN BIT(1)
174 #define CMD_DATA_SRAM BIT(0)
175 #define CMD_RESP_MASK (~0x1)
176 #define CMD_RESP_SRAM BIT(0)
178 static int meson_mmc_clk_set(struct meson_host
*host
, unsigned long clk_rate
)
180 struct mmc_host
*mmc
= host
->mmc
;
185 if (WARN_ON(clk_rate
> mmc
->f_max
))
186 clk_rate
= mmc
->f_max
;
187 else if (WARN_ON(clk_rate
< mmc
->f_min
))
188 clk_rate
= mmc
->f_min
;
191 if (clk_rate
== mmc
->actual_clock
)
195 cfg
= readl(host
->regs
+ SD_EMMC_CFG
);
196 if (!(cfg
& CFG_STOP_CLOCK
)) {
197 cfg
|= CFG_STOP_CLOCK
;
198 writel(cfg
, host
->regs
+ SD_EMMC_CFG
);
201 dev_dbg(host
->dev
, "change clock rate %u -> %lu\n",
202 mmc
->actual_clock
, clk_rate
);
205 mmc
->actual_clock
= 0;
209 ret
= clk_set_rate(host
->cfg_div_clk
, clk_rate
);
211 dev_warn(host
->dev
, "Unable to set cfg_div_clk to %lu. ret=%d\n",
213 else if (clk_rate
&& clk_rate
!= clk_get_rate(host
->cfg_div_clk
))
214 dev_warn(host
->dev
, "divider requested rate %lu != actual rate %lu: ret=%d\n",
215 clk_rate
, clk_get_rate(host
->cfg_div_clk
), ret
);
217 mmc
->actual_clock
= clk_rate
;
219 /* (re)start clock, if non-zero */
220 if (!ret
&& clk_rate
) {
221 cfg
= readl(host
->regs
+ SD_EMMC_CFG
);
222 cfg
&= ~CFG_STOP_CLOCK
;
223 writel(cfg
, host
->regs
+ SD_EMMC_CFG
);
230 * The SD/eMMC IP block has an internal mux and divider used for
231 * generating the MMC clock. Use the clock framework to create and
232 * manage these clocks.
234 static int meson_mmc_clk_init(struct meson_host
*host
)
236 struct clk_init_data init
;
239 const char *mux_parent_names
[MUX_CLK_NUM_PARENTS
];
240 unsigned int mux_parent_count
= 0;
241 const char *clk_div_parents
[1];
242 unsigned int f_min
= UINT_MAX
;
245 /* get the mux parents */
246 for (i
= 0; i
< MUX_CLK_NUM_PARENTS
; i
++) {
249 snprintf(name
, sizeof(name
), "clkin%d", i
);
250 host
->mux_parent
[i
] = devm_clk_get(host
->dev
, name
);
251 if (IS_ERR(host
->mux_parent
[i
])) {
252 ret
= PTR_ERR(host
->mux_parent
[i
]);
253 if (PTR_ERR(host
->mux_parent
[i
]) != -EPROBE_DEFER
)
254 dev_err(host
->dev
, "Missing clock %s\n", name
);
255 host
->mux_parent
[i
] = NULL
;
259 host
->mux_parent_rate
[i
] = clk_get_rate(host
->mux_parent
[i
]);
260 mux_parent_names
[i
] = __clk_get_name(host
->mux_parent
[i
]);
262 if (host
->mux_parent_rate
[i
] < f_min
)
263 f_min
= host
->mux_parent_rate
[i
];
266 /* cacluate f_min based on input clocks, and max divider value */
267 if (f_min
!= UINT_MAX
)
268 f_min
= DIV_ROUND_UP(CLK_SRC_XTAL_RATE
, CLK_DIV_MAX
);
270 f_min
= 4000000; /* default min: 400 MHz */
271 host
->mmc
->f_min
= f_min
;
274 snprintf(clk_name
, sizeof(clk_name
), "%s#mux", dev_name(host
->dev
));
275 init
.name
= clk_name
;
276 init
.ops
= &clk_mux_ops
;
278 init
.parent_names
= mux_parent_names
;
279 init
.num_parents
= mux_parent_count
;
281 host
->mux
.reg
= host
->regs
+ SD_EMMC_CLOCK
;
282 host
->mux
.shift
= CLK_SRC_SHIFT
;
283 host
->mux
.mask
= CLK_SRC_MASK
;
285 host
->mux
.table
= NULL
;
286 host
->mux
.hw
.init
= &init
;
288 host
->mux_clk
= devm_clk_register(host
->dev
, &host
->mux
.hw
);
289 if (WARN_ON(IS_ERR(host
->mux_clk
)))
290 return PTR_ERR(host
->mux_clk
);
292 /* create the divider */
293 snprintf(clk_name
, sizeof(clk_name
), "%s#div", dev_name(host
->dev
));
294 init
.name
= devm_kstrdup(host
->dev
, clk_name
, GFP_KERNEL
);
295 init
.ops
= &clk_divider_ops
;
296 init
.flags
= CLK_SET_RATE_PARENT
;
297 clk_div_parents
[0] = __clk_get_name(host
->mux_clk
);
298 init
.parent_names
= clk_div_parents
;
299 init
.num_parents
= ARRAY_SIZE(clk_div_parents
);
301 host
->cfg_div
.reg
= host
->regs
+ SD_EMMC_CLOCK
;
302 host
->cfg_div
.shift
= CLK_DIV_SHIFT
;
303 host
->cfg_div
.width
= CLK_DIV_WIDTH
;
304 host
->cfg_div
.hw
.init
= &init
;
305 host
->cfg_div
.flags
= CLK_DIVIDER_ONE_BASED
|
306 CLK_DIVIDER_ROUND_CLOSEST
| CLK_DIVIDER_ALLOW_ZERO
;
308 host
->cfg_div_clk
= devm_clk_register(host
->dev
, &host
->cfg_div
.hw
);
309 if (WARN_ON(PTR_ERR_OR_ZERO(host
->cfg_div_clk
)))
310 return PTR_ERR(host
->cfg_div_clk
);
312 /* init SD_EMMC_CLOCK to sane defaults w/min clock rate */
314 clk_reg
|= CLK_PHASE_180
<< CLK_PHASE_SHIFT
;
315 clk_reg
|= CLK_SRC_XTAL
<< CLK_SRC_SHIFT
;
316 clk_reg
|= CLK_DIV_MAX
<< CLK_DIV_SHIFT
;
317 clk_reg
&= ~CLK_ALWAYS_ON
;
318 writel(clk_reg
, host
->regs
+ SD_EMMC_CLOCK
);
320 /* Ensure clock starts in "auto" mode, not "always on" */
321 cfg
= readl(host
->regs
+ SD_EMMC_CFG
);
322 cfg
&= ~CFG_CLK_ALWAYS_ON
;
324 writel(cfg
, host
->regs
+ SD_EMMC_CFG
);
326 ret
= clk_prepare_enable(host
->cfg_div_clk
);
328 ret
= meson_mmc_clk_set(host
, f_min
);
331 clk_disable_unprepare(host
->cfg_div_clk
);
336 static void meson_mmc_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
338 struct meson_host
*host
= mmc_priv(mmc
);
343 * GPIO regulator, only controls switching between 1v8 and
344 * 3v3, doesn't support MMC_POWER_OFF, MMC_POWER_ON.
346 switch (ios
->power_mode
) {
348 if (!IS_ERR(mmc
->supply
.vmmc
))
349 mmc_regulator_set_ocr(mmc
, mmc
->supply
.vmmc
, 0);
351 if (!IS_ERR(mmc
->supply
.vqmmc
) && host
->vqmmc_enabled
) {
352 regulator_disable(mmc
->supply
.vqmmc
);
353 host
->vqmmc_enabled
= false;
359 if (!IS_ERR(mmc
->supply
.vmmc
))
360 mmc_regulator_set_ocr(mmc
, mmc
->supply
.vmmc
, ios
->vdd
);
364 if (!IS_ERR(mmc
->supply
.vqmmc
) && !host
->vqmmc_enabled
) {
365 int ret
= regulator_enable(mmc
->supply
.vqmmc
);
368 dev_err(mmc_dev(mmc
),
369 "failed to enable vqmmc regulator\n");
371 host
->vqmmc_enabled
= true;
378 meson_mmc_clk_set(host
, ios
->clock
);
381 val
= readl(host
->regs
+ SD_EMMC_CFG
);
382 switch (ios
->bus_width
) {
383 case MMC_BUS_WIDTH_1
:
384 bus_width
= CFG_BUS_WIDTH_1
;
386 case MMC_BUS_WIDTH_4
:
387 bus_width
= CFG_BUS_WIDTH_4
;
389 case MMC_BUS_WIDTH_8
:
390 bus_width
= CFG_BUS_WIDTH_8
;
393 dev_err(host
->dev
, "Invalid ios->bus_width: %u. Setting to 4.\n",
395 bus_width
= CFG_BUS_WIDTH_4
;
399 val
= readl(host
->regs
+ SD_EMMC_CFG
);
402 val
&= ~(CFG_BUS_WIDTH_MASK
<< CFG_BUS_WIDTH_SHIFT
);
403 val
|= bus_width
<< CFG_BUS_WIDTH_SHIFT
;
405 val
&= ~(CFG_BLK_LEN_MASK
<< CFG_BLK_LEN_SHIFT
);
406 val
|= ilog2(SD_EMMC_CFG_BLK_SIZE
) << CFG_BLK_LEN_SHIFT
;
408 val
&= ~(CFG_RESP_TIMEOUT_MASK
<< CFG_RESP_TIMEOUT_SHIFT
);
409 val
|= ilog2(SD_EMMC_CFG_RESP_TIMEOUT
) << CFG_RESP_TIMEOUT_SHIFT
;
411 val
&= ~(CFG_RC_CC_MASK
<< CFG_RC_CC_SHIFT
);
412 val
|= ilog2(SD_EMMC_CFG_CMD_GAP
) << CFG_RC_CC_SHIFT
;
414 writel(val
, host
->regs
+ SD_EMMC_CFG
);
417 dev_dbg(host
->dev
, "%s: SD_EMMC_CFG: 0x%08x -> 0x%08x\n",
418 __func__
, orig
, val
);
421 static int meson_mmc_request_done(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
423 struct meson_host
*host
= mmc_priv(mmc
);
425 WARN_ON(host
->mrq
!= mrq
);
429 mmc_request_done(host
->mmc
, mrq
);
434 static void meson_mmc_start_cmd(struct mmc_host
*mmc
, struct mmc_command
*cmd
)
436 struct meson_host
*host
= mmc_priv(mmc
);
437 struct sd_emmc_desc
*desc
, desc_tmp
;
439 u8 blk_len
, cmd_cfg_timeout
;
440 unsigned int xfer_bytes
= 0;
442 /* Setup descriptors */
445 memset(desc
, 0, sizeof(struct sd_emmc_desc
));
447 desc
->cmd_cfg
|= (cmd
->opcode
& CMD_CFG_CMD_INDEX_MASK
) <<
448 CMD_CFG_CMD_INDEX_SHIFT
;
449 desc
->cmd_cfg
|= CMD_CFG_OWNER
; /* owned by CPU */
450 desc
->cmd_arg
= cmd
->arg
;
453 if (cmd
->flags
& MMC_RSP_PRESENT
) {
454 desc
->cmd_cfg
&= ~CMD_CFG_NO_RESP
;
455 if (cmd
->flags
& MMC_RSP_136
)
456 desc
->cmd_cfg
|= CMD_CFG_RESP_128
;
457 desc
->cmd_cfg
|= CMD_CFG_RESP_NUM
;
460 if (!(cmd
->flags
& MMC_RSP_CRC
))
461 desc
->cmd_cfg
|= CMD_CFG_RESP_NOCRC
;
463 if (cmd
->flags
& MMC_RSP_BUSY
)
464 desc
->cmd_cfg
|= CMD_CFG_R1B
;
466 desc
->cmd_cfg
|= CMD_CFG_NO_RESP
;
471 desc
->cmd_cfg
|= CMD_CFG_DATA_IO
;
472 if (cmd
->data
->blocks
> 1) {
473 desc
->cmd_cfg
|= CMD_CFG_BLOCK_MODE
;
475 (cmd
->data
->blocks
& CMD_CFG_LENGTH_MASK
) <<
476 CMD_CFG_LENGTH_SHIFT
;
478 /* check if block-size matches, if not update */
479 cfg
= readl(host
->regs
+ SD_EMMC_CFG
);
480 blk_len
= cfg
& (CFG_BLK_LEN_MASK
<< CFG_BLK_LEN_SHIFT
);
481 blk_len
>>= CFG_BLK_LEN_SHIFT
;
482 if (blk_len
!= ilog2(cmd
->data
->blksz
)) {
483 dev_warn(host
->dev
, "%s: update blk_len %d -> %d\n",
485 ilog2(cmd
->data
->blksz
));
486 blk_len
= ilog2(cmd
->data
->blksz
);
487 cfg
&= ~(CFG_BLK_LEN_MASK
<< CFG_BLK_LEN_SHIFT
);
488 cfg
|= blk_len
<< CFG_BLK_LEN_SHIFT
;
489 writel(cfg
, host
->regs
+ SD_EMMC_CFG
);
492 desc
->cmd_cfg
&= ~CMD_CFG_BLOCK_MODE
;
494 (cmd
->data
->blksz
& CMD_CFG_LENGTH_MASK
) <<
495 CMD_CFG_LENGTH_SHIFT
;
498 cmd
->data
->bytes_xfered
= 0;
499 xfer_bytes
= cmd
->data
->blksz
* cmd
->data
->blocks
;
500 if (cmd
->data
->flags
& MMC_DATA_WRITE
) {
501 desc
->cmd_cfg
|= CMD_CFG_DATA_WR
;
502 WARN_ON(xfer_bytes
> host
->bounce_buf_size
);
503 sg_copy_to_buffer(cmd
->data
->sg
, cmd
->data
->sg_len
,
504 host
->bounce_buf
, xfer_bytes
);
505 cmd
->data
->bytes_xfered
= xfer_bytes
;
508 desc
->cmd_cfg
&= ~CMD_CFG_DATA_WR
;
511 if (xfer_bytes
> 0) {
512 desc
->cmd_cfg
&= ~CMD_CFG_DATA_NUM
;
513 desc
->cmd_data
= host
->bounce_dma_addr
& CMD_DATA_MASK
;
515 /* write data to data_addr */
516 desc
->cmd_cfg
|= CMD_CFG_DATA_NUM
;
520 cmd_cfg_timeout
= 12;
522 desc
->cmd_cfg
&= ~CMD_CFG_DATA_IO
;
523 cmd_cfg_timeout
= 10;
525 desc
->cmd_cfg
|= (cmd_cfg_timeout
& CMD_CFG_TIMEOUT_MASK
) <<
526 CMD_CFG_TIMEOUT_SHIFT
;
530 /* Last descriptor */
531 desc
->cmd_cfg
|= CMD_CFG_END_OF_CHAIN
;
532 writel(desc
->cmd_cfg
, host
->regs
+ SD_EMMC_CMD_CFG
);
533 writel(desc
->cmd_data
, host
->regs
+ SD_EMMC_CMD_DAT
);
534 writel(desc
->cmd_resp
, host
->regs
+ SD_EMMC_CMD_RSP
);
535 wmb(); /* ensure descriptor is written before kicked */
536 writel(desc
->cmd_arg
, host
->regs
+ SD_EMMC_CMD_ARG
);
539 static void meson_mmc_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
541 struct meson_host
*host
= mmc_priv(mmc
);
543 WARN_ON(host
->mrq
!= NULL
);
546 writel(0, host
->regs
+ SD_EMMC_START
);
548 /* clear, ack, enable all interrupts */
549 writel(0, host
->regs
+ SD_EMMC_IRQ_EN
);
550 writel(IRQ_EN_MASK
, host
->regs
+ SD_EMMC_STATUS
);
551 writel(IRQ_EN_MASK
, host
->regs
+ SD_EMMC_IRQ_EN
);
556 meson_mmc_start_cmd(mmc
, mrq
->sbc
);
558 meson_mmc_start_cmd(mmc
, mrq
->cmd
);
561 static int meson_mmc_read_resp(struct mmc_host
*mmc
, struct mmc_command
*cmd
)
563 struct meson_host
*host
= mmc_priv(mmc
);
565 if (cmd
->flags
& MMC_RSP_136
) {
566 cmd
->resp
[0] = readl(host
->regs
+ SD_EMMC_CMD_RSP3
);
567 cmd
->resp
[1] = readl(host
->regs
+ SD_EMMC_CMD_RSP2
);
568 cmd
->resp
[2] = readl(host
->regs
+ SD_EMMC_CMD_RSP1
);
569 cmd
->resp
[3] = readl(host
->regs
+ SD_EMMC_CMD_RSP
);
570 } else if (cmd
->flags
& MMC_RSP_PRESENT
) {
571 cmd
->resp
[0] = readl(host
->regs
+ SD_EMMC_CMD_RSP
);
577 static irqreturn_t
meson_mmc_irq(int irq
, void *dev_id
)
579 struct meson_host
*host
= dev_id
;
580 struct mmc_request
*mrq
;
581 struct mmc_command
*cmd
= host
->cmd
;
582 u32 irq_en
, status
, raw_status
;
583 irqreturn_t ret
= IRQ_HANDLED
;
596 spin_lock(&host
->lock
);
597 irq_en
= readl(host
->regs
+ SD_EMMC_IRQ_EN
);
598 raw_status
= readl(host
->regs
+ SD_EMMC_STATUS
);
599 status
= raw_status
& irq_en
;
602 dev_warn(host
->dev
, "Spurious IRQ! status=0x%08x, irq_en=0x%08x\n",
609 if (status
& IRQ_RXD_ERR_MASK
) {
610 dev_dbg(host
->dev
, "Unhandled IRQ: RXD error\n");
611 cmd
->error
= -EILSEQ
;
613 if (status
& IRQ_TXD_ERR
) {
614 dev_dbg(host
->dev
, "Unhandled IRQ: TXD error\n");
615 cmd
->error
= -EILSEQ
;
617 if (status
& IRQ_DESC_ERR
)
618 dev_dbg(host
->dev
, "Unhandled IRQ: Descriptor error\n");
619 if (status
& IRQ_RESP_ERR
) {
620 dev_dbg(host
->dev
, "Unhandled IRQ: Response error\n");
621 cmd
->error
= -EILSEQ
;
623 if (status
& IRQ_RESP_TIMEOUT
) {
624 dev_dbg(host
->dev
, "Unhandled IRQ: Response timeout\n");
625 cmd
->error
= -ETIMEDOUT
;
627 if (status
& IRQ_DESC_TIMEOUT
) {
628 dev_dbg(host
->dev
, "Unhandled IRQ: Descriptor timeout\n");
629 cmd
->error
= -ETIMEDOUT
;
631 if (status
& IRQ_SDIO
)
632 dev_dbg(host
->dev
, "Unhandled IRQ: SDIO.\n");
634 if (status
& (IRQ_END_OF_CHAIN
| IRQ_RESP_STATUS
))
635 ret
= IRQ_WAKE_THREAD
;
637 dev_warn(host
->dev
, "Unknown IRQ! status=0x%04x: MMC CMD%u arg=0x%08x flags=0x%08x stop=%d\n",
638 status
, cmd
->opcode
, cmd
->arg
,
639 cmd
->flags
, mrq
->stop
? 1 : 0);
641 struct mmc_data
*data
= cmd
->data
;
643 dev_warn(host
->dev
, "\tblksz %u blocks %u flags 0x%08x (%s%s)",
644 data
->blksz
, data
->blocks
, data
->flags
,
645 data
->flags
& MMC_DATA_WRITE
? "write" : "",
646 data
->flags
& MMC_DATA_READ
? "read" : "");
651 /* ack all (enabled) interrupts */
652 writel(status
, host
->regs
+ SD_EMMC_STATUS
);
654 if (ret
== IRQ_HANDLED
) {
655 meson_mmc_read_resp(host
->mmc
, cmd
);
656 meson_mmc_request_done(host
->mmc
, cmd
->mrq
);
659 spin_unlock(&host
->lock
);
663 static irqreturn_t
meson_mmc_irq_thread(int irq
, void *dev_id
)
665 struct meson_host
*host
= dev_id
;
666 struct mmc_request
*mrq
= host
->mrq
;
667 struct mmc_command
*cmd
= host
->cmd
;
668 struct mmc_data
*data
;
669 unsigned int xfer_bytes
;
670 int ret
= IRQ_HANDLED
;
680 xfer_bytes
= data
->blksz
* data
->blocks
;
681 if (data
->flags
& MMC_DATA_READ
) {
682 WARN_ON(xfer_bytes
> host
->bounce_buf_size
);
683 sg_copy_from_buffer(data
->sg
, data
->sg_len
,
684 host
->bounce_buf
, xfer_bytes
);
685 data
->bytes_xfered
= xfer_bytes
;
689 meson_mmc_read_resp(host
->mmc
, cmd
);
690 if (!data
|| !data
->stop
|| mrq
->sbc
)
691 meson_mmc_request_done(host
->mmc
, mrq
);
693 meson_mmc_start_cmd(host
->mmc
, data
->stop
);
699 * NOTE: we only need this until the GPIO/pinctrl driver can handle
700 * interrupts. For now, the MMC core will use this for polling.
702 static int meson_mmc_get_cd(struct mmc_host
*mmc
)
704 int status
= mmc_gpio_get_cd(mmc
);
706 if (status
== -ENOSYS
)
707 return 1; /* assume present */
712 static const struct mmc_host_ops meson_mmc_ops
= {
713 .request
= meson_mmc_request
,
714 .set_ios
= meson_mmc_set_ios
,
715 .get_cd
= meson_mmc_get_cd
,
718 static int meson_mmc_probe(struct platform_device
*pdev
)
720 struct resource
*res
;
721 struct meson_host
*host
;
722 struct mmc_host
*mmc
;
725 mmc
= mmc_alloc_host(sizeof(struct meson_host
), &pdev
->dev
);
728 host
= mmc_priv(mmc
);
730 host
->dev
= &pdev
->dev
;
731 dev_set_drvdata(&pdev
->dev
, host
);
733 spin_lock_init(&host
->lock
);
735 /* Get regulators and the supported OCR mask */
736 host
->vqmmc_enabled
= false;
737 ret
= mmc_regulator_get_supply(mmc
);
738 if (ret
== -EPROBE_DEFER
)
741 ret
= mmc_of_parse(mmc
);
743 dev_warn(&pdev
->dev
, "error parsing DT: %d\n", ret
);
747 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
748 host
->regs
= devm_ioremap_resource(&pdev
->dev
, res
);
749 if (IS_ERR(host
->regs
)) {
750 ret
= PTR_ERR(host
->regs
);
754 host
->irq
= platform_get_irq(pdev
, 0);
755 if (host
->irq
== 0) {
756 dev_err(&pdev
->dev
, "failed to get interrupt resource.\n");
761 host
->core_clk
= devm_clk_get(&pdev
->dev
, "core");
762 if (IS_ERR(host
->core_clk
)) {
763 ret
= PTR_ERR(host
->core_clk
);
767 ret
= clk_prepare_enable(host
->core_clk
);
771 ret
= meson_mmc_clk_init(host
);
776 writel(0, host
->regs
+ SD_EMMC_START
);
778 /* clear, ack, enable all interrupts */
779 writel(0, host
->regs
+ SD_EMMC_IRQ_EN
);
780 writel(IRQ_EN_MASK
, host
->regs
+ SD_EMMC_STATUS
);
782 ret
= devm_request_threaded_irq(&pdev
->dev
, host
->irq
,
783 meson_mmc_irq
, meson_mmc_irq_thread
,
784 IRQF_SHARED
, DRIVER_NAME
, host
);
788 /* data bounce buffer */
789 host
->bounce_buf_size
= SZ_512K
;
791 dma_alloc_coherent(host
->dev
, host
->bounce_buf_size
,
792 &host
->bounce_dma_addr
, GFP_KERNEL
);
793 if (host
->bounce_buf
== NULL
) {
794 dev_err(host
->dev
, "Unable to map allocate DMA bounce buffer.\n");
799 mmc
->ops
= &meson_mmc_ops
;
805 clk_disable_unprepare(host
->cfg_div_clk
);
806 clk_disable_unprepare(host
->core_clk
);
811 static int meson_mmc_remove(struct platform_device
*pdev
)
813 struct meson_host
*host
= dev_get_drvdata(&pdev
->dev
);
818 if (host
->bounce_buf
)
819 dma_free_coherent(host
->dev
, host
->bounce_buf_size
,
820 host
->bounce_buf
, host
->bounce_dma_addr
);
822 clk_disable_unprepare(host
->cfg_div_clk
);
823 clk_disable_unprepare(host
->core_clk
);
825 mmc_free_host(host
->mmc
);
829 static const struct of_device_id meson_mmc_of_match
[] = {
830 { .compatible
= "amlogic,meson-gx-mmc", },
831 { .compatible
= "amlogic,meson-gxbb-mmc", },
832 { .compatible
= "amlogic,meson-gxl-mmc", },
833 { .compatible
= "amlogic,meson-gxm-mmc", },
836 MODULE_DEVICE_TABLE(of
, meson_mmc_of_match
);
838 static struct platform_driver meson_mmc_driver
= {
839 .probe
= meson_mmc_probe
,
840 .remove
= meson_mmc_remove
,
843 .of_match_table
= of_match_ptr(meson_mmc_of_match
),
847 module_platform_driver(meson_mmc_driver
);
849 MODULE_DESCRIPTION("Amlogic S905*/GX* SD/eMMC driver");
850 MODULE_AUTHOR("Kevin Hilman <khilman@baylibre.com>");
851 MODULE_LICENSE("GPL v2");