treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / mmc / host / usdhi6rol0.c
blob9a0b1e4e405dc3bb788d603423524123b3e30e0d
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2013-2014 Renesas Electronics Europe Ltd.
4 * Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
5 */
7 #include <linux/clk.h>
8 #include <linux/delay.h>
9 #include <linux/device.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/dmaengine.h>
12 #include <linux/highmem.h>
13 #include <linux/interrupt.h>
14 #include <linux/io.h>
15 #include <linux/log2.h>
16 #include <linux/mmc/host.h>
17 #include <linux/mmc/mmc.h>
18 #include <linux/mmc/sd.h>
19 #include <linux/mmc/sdio.h>
20 #include <linux/module.h>
21 #include <linux/pagemap.h>
22 #include <linux/pinctrl/consumer.h>
23 #include <linux/platform_device.h>
24 #include <linux/scatterlist.h>
25 #include <linux/string.h>
26 #include <linux/time.h>
27 #include <linux/virtio.h>
28 #include <linux/workqueue.h>
30 #define USDHI6_SD_CMD 0x0000
31 #define USDHI6_SD_PORT_SEL 0x0004
32 #define USDHI6_SD_ARG 0x0008
33 #define USDHI6_SD_STOP 0x0010
34 #define USDHI6_SD_SECCNT 0x0014
35 #define USDHI6_SD_RSP10 0x0018
36 #define USDHI6_SD_RSP32 0x0020
37 #define USDHI6_SD_RSP54 0x0028
38 #define USDHI6_SD_RSP76 0x0030
39 #define USDHI6_SD_INFO1 0x0038
40 #define USDHI6_SD_INFO2 0x003c
41 #define USDHI6_SD_INFO1_MASK 0x0040
42 #define USDHI6_SD_INFO2_MASK 0x0044
43 #define USDHI6_SD_CLK_CTRL 0x0048
44 #define USDHI6_SD_SIZE 0x004c
45 #define USDHI6_SD_OPTION 0x0050
46 #define USDHI6_SD_ERR_STS1 0x0058
47 #define USDHI6_SD_ERR_STS2 0x005c
48 #define USDHI6_SD_BUF0 0x0060
49 #define USDHI6_SDIO_MODE 0x0068
50 #define USDHI6_SDIO_INFO1 0x006c
51 #define USDHI6_SDIO_INFO1_MASK 0x0070
52 #define USDHI6_CC_EXT_MODE 0x01b0
53 #define USDHI6_SOFT_RST 0x01c0
54 #define USDHI6_VERSION 0x01c4
55 #define USDHI6_HOST_MODE 0x01c8
56 #define USDHI6_SDIF_MODE 0x01cc
58 #define USDHI6_SD_CMD_APP 0x0040
59 #define USDHI6_SD_CMD_MODE_RSP_AUTO 0x0000
60 #define USDHI6_SD_CMD_MODE_RSP_NONE 0x0300
61 #define USDHI6_SD_CMD_MODE_RSP_R1 0x0400 /* Also R5, R6, R7 */
62 #define USDHI6_SD_CMD_MODE_RSP_R1B 0x0500 /* R1b */
63 #define USDHI6_SD_CMD_MODE_RSP_R2 0x0600
64 #define USDHI6_SD_CMD_MODE_RSP_R3 0x0700 /* Also R4 */
65 #define USDHI6_SD_CMD_DATA 0x0800
66 #define USDHI6_SD_CMD_READ 0x1000
67 #define USDHI6_SD_CMD_MULTI 0x2000
68 #define USDHI6_SD_CMD_CMD12_AUTO_OFF 0x4000
70 #define USDHI6_CC_EXT_MODE_SDRW BIT(1)
72 #define USDHI6_SD_INFO1_RSP_END BIT(0)
73 #define USDHI6_SD_INFO1_ACCESS_END BIT(2)
74 #define USDHI6_SD_INFO1_CARD_OUT BIT(3)
75 #define USDHI6_SD_INFO1_CARD_IN BIT(4)
76 #define USDHI6_SD_INFO1_CD BIT(5)
77 #define USDHI6_SD_INFO1_WP BIT(7)
78 #define USDHI6_SD_INFO1_D3_CARD_OUT BIT(8)
79 #define USDHI6_SD_INFO1_D3_CARD_IN BIT(9)
81 #define USDHI6_SD_INFO2_CMD_ERR BIT(0)
82 #define USDHI6_SD_INFO2_CRC_ERR BIT(1)
83 #define USDHI6_SD_INFO2_END_ERR BIT(2)
84 #define USDHI6_SD_INFO2_TOUT BIT(3)
85 #define USDHI6_SD_INFO2_IWA_ERR BIT(4)
86 #define USDHI6_SD_INFO2_IRA_ERR BIT(5)
87 #define USDHI6_SD_INFO2_RSP_TOUT BIT(6)
88 #define USDHI6_SD_INFO2_SDDAT0 BIT(7)
89 #define USDHI6_SD_INFO2_BRE BIT(8)
90 #define USDHI6_SD_INFO2_BWE BIT(9)
91 #define USDHI6_SD_INFO2_SCLKDIVEN BIT(13)
92 #define USDHI6_SD_INFO2_CBSY BIT(14)
93 #define USDHI6_SD_INFO2_ILA BIT(15)
95 #define USDHI6_SD_INFO1_CARD_INSERT (USDHI6_SD_INFO1_CARD_IN | USDHI6_SD_INFO1_D3_CARD_IN)
96 #define USDHI6_SD_INFO1_CARD_EJECT (USDHI6_SD_INFO1_CARD_OUT | USDHI6_SD_INFO1_D3_CARD_OUT)
97 #define USDHI6_SD_INFO1_CARD (USDHI6_SD_INFO1_CARD_INSERT | USDHI6_SD_INFO1_CARD_EJECT)
98 #define USDHI6_SD_INFO1_CARD_CD (USDHI6_SD_INFO1_CARD_IN | USDHI6_SD_INFO1_CARD_OUT)
100 #define USDHI6_SD_INFO2_ERR (USDHI6_SD_INFO2_CMD_ERR | \
101 USDHI6_SD_INFO2_CRC_ERR | USDHI6_SD_INFO2_END_ERR | \
102 USDHI6_SD_INFO2_TOUT | USDHI6_SD_INFO2_IWA_ERR | \
103 USDHI6_SD_INFO2_IRA_ERR | USDHI6_SD_INFO2_RSP_TOUT | \
104 USDHI6_SD_INFO2_ILA)
106 #define USDHI6_SD_INFO1_IRQ (USDHI6_SD_INFO1_RSP_END | USDHI6_SD_INFO1_ACCESS_END | \
107 USDHI6_SD_INFO1_CARD)
109 #define USDHI6_SD_INFO2_IRQ (USDHI6_SD_INFO2_ERR | USDHI6_SD_INFO2_BRE | \
110 USDHI6_SD_INFO2_BWE | 0x0800 | USDHI6_SD_INFO2_ILA)
112 #define USDHI6_SD_CLK_CTRL_SCLKEN BIT(8)
114 #define USDHI6_SD_STOP_STP BIT(0)
115 #define USDHI6_SD_STOP_SEC BIT(8)
117 #define USDHI6_SDIO_INFO1_IOIRQ BIT(0)
118 #define USDHI6_SDIO_INFO1_EXPUB52 BIT(14)
119 #define USDHI6_SDIO_INFO1_EXWT BIT(15)
121 #define USDHI6_SD_ERR_STS1_CRC_NO_ERROR BIT(13)
123 #define USDHI6_SOFT_RST_RESERVED (BIT(1) | BIT(2))
124 #define USDHI6_SOFT_RST_RESET BIT(0)
126 #define USDHI6_SD_OPTION_TIMEOUT_SHIFT 4
127 #define USDHI6_SD_OPTION_TIMEOUT_MASK (0xf << USDHI6_SD_OPTION_TIMEOUT_SHIFT)
128 #define USDHI6_SD_OPTION_WIDTH_1 BIT(15)
130 #define USDHI6_SD_PORT_SEL_PORTS_SHIFT 8
132 #define USDHI6_SD_CLK_CTRL_DIV_MASK 0xff
134 #define USDHI6_SDIO_INFO1_IRQ (USDHI6_SDIO_INFO1_IOIRQ | 3 | \
135 USDHI6_SDIO_INFO1_EXPUB52 | USDHI6_SDIO_INFO1_EXWT)
137 #define USDHI6_MIN_DMA 64
139 enum usdhi6_wait_for {
140 USDHI6_WAIT_FOR_REQUEST,
141 USDHI6_WAIT_FOR_CMD,
142 USDHI6_WAIT_FOR_MREAD,
143 USDHI6_WAIT_FOR_MWRITE,
144 USDHI6_WAIT_FOR_READ,
145 USDHI6_WAIT_FOR_WRITE,
146 USDHI6_WAIT_FOR_DATA_END,
147 USDHI6_WAIT_FOR_STOP,
148 USDHI6_WAIT_FOR_DMA,
151 struct usdhi6_page {
152 struct page *page;
153 void *mapped; /* mapped page */
156 struct usdhi6_host {
157 struct mmc_host *mmc;
158 struct mmc_request *mrq;
159 void __iomem *base;
160 struct clk *clk;
162 /* SG memory handling */
164 /* Common for multiple and single block requests */
165 struct usdhi6_page pg; /* current page from an SG */
166 void *blk_page; /* either a mapped page, or the bounce buffer */
167 size_t offset; /* offset within a page, including sg->offset */
169 /* Blocks, crossing a page boundary */
170 size_t head_len;
171 struct usdhi6_page head_pg;
173 /* A bounce buffer for unaligned blocks or blocks, crossing a page boundary */
174 struct scatterlist bounce_sg;
175 u8 bounce_buf[512];
177 /* Multiple block requests only */
178 struct scatterlist *sg; /* current SG segment */
179 int page_idx; /* page index within an SG segment */
181 enum usdhi6_wait_for wait;
182 u32 status_mask;
183 u32 status2_mask;
184 u32 sdio_mask;
185 u32 io_error;
186 u32 irq_status;
187 unsigned long imclk;
188 unsigned long rate;
189 bool app_cmd;
191 /* Timeout handling */
192 struct delayed_work timeout_work;
193 unsigned long timeout;
195 /* DMA support */
196 struct dma_chan *chan_rx;
197 struct dma_chan *chan_tx;
198 bool dma_active;
200 /* Pin control */
201 struct pinctrl *pinctrl;
202 struct pinctrl_state *pins_uhs;
205 /* I/O primitives */
207 static void usdhi6_write(struct usdhi6_host *host, u32 reg, u32 data)
209 iowrite32(data, host->base + reg);
210 dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__,
211 host->base, reg, data);
214 static void usdhi6_write16(struct usdhi6_host *host, u32 reg, u16 data)
216 iowrite16(data, host->base + reg);
217 dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__,
218 host->base, reg, data);
221 static u32 usdhi6_read(struct usdhi6_host *host, u32 reg)
223 u32 data = ioread32(host->base + reg);
224 dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__,
225 host->base, reg, data);
226 return data;
229 static u16 usdhi6_read16(struct usdhi6_host *host, u32 reg)
231 u16 data = ioread16(host->base + reg);
232 dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__,
233 host->base, reg, data);
234 return data;
237 static void usdhi6_irq_enable(struct usdhi6_host *host, u32 info1, u32 info2)
239 host->status_mask = USDHI6_SD_INFO1_IRQ & ~info1;
240 host->status2_mask = USDHI6_SD_INFO2_IRQ & ~info2;
241 usdhi6_write(host, USDHI6_SD_INFO1_MASK, host->status_mask);
242 usdhi6_write(host, USDHI6_SD_INFO2_MASK, host->status2_mask);
245 static void usdhi6_wait_for_resp(struct usdhi6_host *host)
247 usdhi6_irq_enable(host, USDHI6_SD_INFO1_RSP_END |
248 USDHI6_SD_INFO1_ACCESS_END | USDHI6_SD_INFO1_CARD_CD,
249 USDHI6_SD_INFO2_ERR);
252 static void usdhi6_wait_for_brwe(struct usdhi6_host *host, bool read)
254 usdhi6_irq_enable(host, USDHI6_SD_INFO1_ACCESS_END |
255 USDHI6_SD_INFO1_CARD_CD, USDHI6_SD_INFO2_ERR |
256 (read ? USDHI6_SD_INFO2_BRE : USDHI6_SD_INFO2_BWE));
259 static void usdhi6_only_cd(struct usdhi6_host *host)
261 /* Mask all except card hotplug */
262 usdhi6_irq_enable(host, USDHI6_SD_INFO1_CARD_CD, 0);
265 static void usdhi6_mask_all(struct usdhi6_host *host)
267 usdhi6_irq_enable(host, 0, 0);
270 static int usdhi6_error_code(struct usdhi6_host *host)
272 u32 err;
274 usdhi6_write(host, USDHI6_SD_STOP, USDHI6_SD_STOP_STP);
276 if (host->io_error &
277 (USDHI6_SD_INFO2_RSP_TOUT | USDHI6_SD_INFO2_TOUT)) {
278 u32 rsp54 = usdhi6_read(host, USDHI6_SD_RSP54);
279 int opc = host->mrq ? host->mrq->cmd->opcode : -1;
281 err = usdhi6_read(host, USDHI6_SD_ERR_STS2);
282 /* Response timeout is often normal, don't spam the log */
283 if (host->wait == USDHI6_WAIT_FOR_CMD)
284 dev_dbg(mmc_dev(host->mmc),
285 "T-out sts 0x%x, resp 0x%x, state %u, CMD%d\n",
286 err, rsp54, host->wait, opc);
287 else
288 dev_warn(mmc_dev(host->mmc),
289 "T-out sts 0x%x, resp 0x%x, state %u, CMD%d\n",
290 err, rsp54, host->wait, opc);
291 return -ETIMEDOUT;
294 err = usdhi6_read(host, USDHI6_SD_ERR_STS1);
295 if (err != USDHI6_SD_ERR_STS1_CRC_NO_ERROR)
296 dev_warn(mmc_dev(host->mmc), "Err sts 0x%x, state %u, CMD%d\n",
297 err, host->wait, host->mrq ? host->mrq->cmd->opcode : -1);
298 if (host->io_error & USDHI6_SD_INFO2_ILA)
299 return -EILSEQ;
301 return -EIO;
304 /* Scatter-Gather management */
307 * In PIO mode we have to map each page separately, using kmap(). That way
308 * adjacent pages are mapped to non-adjacent virtual addresses. That's why we
309 * have to use a bounce buffer for blocks, crossing page boundaries. Such blocks
310 * have been observed with an SDIO WiFi card (b43 driver).
312 static void usdhi6_blk_bounce(struct usdhi6_host *host,
313 struct scatterlist *sg)
315 struct mmc_data *data = host->mrq->data;
316 size_t blk_head = host->head_len;
318 dev_dbg(mmc_dev(host->mmc), "%s(): CMD%u of %u SG: %ux%u @ 0x%x\n",
319 __func__, host->mrq->cmd->opcode, data->sg_len,
320 data->blksz, data->blocks, sg->offset);
322 host->head_pg.page = host->pg.page;
323 host->head_pg.mapped = host->pg.mapped;
324 host->pg.page = nth_page(host->pg.page, 1);
325 host->pg.mapped = kmap(host->pg.page);
327 host->blk_page = host->bounce_buf;
328 host->offset = 0;
330 if (data->flags & MMC_DATA_READ)
331 return;
333 memcpy(host->bounce_buf, host->head_pg.mapped + PAGE_SIZE - blk_head,
334 blk_head);
335 memcpy(host->bounce_buf + blk_head, host->pg.mapped,
336 data->blksz - blk_head);
339 /* Only called for multiple block IO */
340 static void usdhi6_sg_prep(struct usdhi6_host *host)
342 struct mmc_request *mrq = host->mrq;
343 struct mmc_data *data = mrq->data;
345 usdhi6_write(host, USDHI6_SD_SECCNT, data->blocks);
347 host->sg = data->sg;
348 /* TODO: if we always map, this is redundant */
349 host->offset = host->sg->offset;
352 /* Map the first page in an SG segment: common for multiple and single block IO */
353 static void *usdhi6_sg_map(struct usdhi6_host *host)
355 struct mmc_data *data = host->mrq->data;
356 struct scatterlist *sg = data->sg_len > 1 ? host->sg : data->sg;
357 size_t head = PAGE_SIZE - sg->offset;
358 size_t blk_head = head % data->blksz;
360 WARN(host->pg.page, "%p not properly unmapped!\n", host->pg.page);
361 if (WARN(sg_dma_len(sg) % data->blksz,
362 "SG size %u isn't a multiple of block size %u\n",
363 sg_dma_len(sg), data->blksz))
364 return NULL;
366 host->pg.page = sg_page(sg);
367 host->pg.mapped = kmap(host->pg.page);
368 host->offset = sg->offset;
371 * Block size must be a power of 2 for multi-block transfers,
372 * therefore blk_head is equal for all pages in this SG
374 host->head_len = blk_head;
376 if (head < data->blksz)
378 * The first block in the SG crosses a page boundary.
379 * Max blksz = 512, so blocks can only span 2 pages
381 usdhi6_blk_bounce(host, sg);
382 else
383 host->blk_page = host->pg.mapped;
385 dev_dbg(mmc_dev(host->mmc), "Mapped %p (%lx) at %p + %u for CMD%u @ 0x%p\n",
386 host->pg.page, page_to_pfn(host->pg.page), host->pg.mapped,
387 sg->offset, host->mrq->cmd->opcode, host->mrq);
389 return host->blk_page + host->offset;
392 /* Unmap the current page: common for multiple and single block IO */
393 static void usdhi6_sg_unmap(struct usdhi6_host *host, bool force)
395 struct mmc_data *data = host->mrq->data;
396 struct page *page = host->head_pg.page;
398 if (page) {
399 /* Previous block was cross-page boundary */
400 struct scatterlist *sg = data->sg_len > 1 ?
401 host->sg : data->sg;
402 size_t blk_head = host->head_len;
404 if (!data->error && data->flags & MMC_DATA_READ) {
405 memcpy(host->head_pg.mapped + PAGE_SIZE - blk_head,
406 host->bounce_buf, blk_head);
407 memcpy(host->pg.mapped, host->bounce_buf + blk_head,
408 data->blksz - blk_head);
411 flush_dcache_page(page);
412 kunmap(page);
414 host->head_pg.page = NULL;
416 if (!force && sg_dma_len(sg) + sg->offset >
417 (host->page_idx << PAGE_SHIFT) + data->blksz - blk_head)
418 /* More blocks in this SG, don't unmap the next page */
419 return;
422 page = host->pg.page;
423 if (!page)
424 return;
426 flush_dcache_page(page);
427 kunmap(page);
429 host->pg.page = NULL;
432 /* Called from MMC_WRITE_MULTIPLE_BLOCK or MMC_READ_MULTIPLE_BLOCK */
433 static void usdhi6_sg_advance(struct usdhi6_host *host)
435 struct mmc_data *data = host->mrq->data;
436 size_t done, total;
438 /* New offset: set at the end of the previous block */
439 if (host->head_pg.page) {
440 /* Finished a cross-page block, jump to the new page */
441 host->page_idx++;
442 host->offset = data->blksz - host->head_len;
443 host->blk_page = host->pg.mapped;
444 usdhi6_sg_unmap(host, false);
445 } else {
446 host->offset += data->blksz;
447 /* The completed block didn't cross a page boundary */
448 if (host->offset == PAGE_SIZE) {
449 /* If required, we'll map the page below */
450 host->offset = 0;
451 host->page_idx++;
456 * Now host->blk_page + host->offset point at the end of our last block
457 * and host->page_idx is the index of the page, in which our new block
458 * is located, if any
461 done = (host->page_idx << PAGE_SHIFT) + host->offset;
462 total = host->sg->offset + sg_dma_len(host->sg);
464 dev_dbg(mmc_dev(host->mmc), "%s(): %zu of %zu @ %zu\n", __func__,
465 done, total, host->offset);
467 if (done < total && host->offset) {
468 /* More blocks in this page */
469 if (host->offset + data->blksz > PAGE_SIZE)
470 /* We approached at a block, that spans 2 pages */
471 usdhi6_blk_bounce(host, host->sg);
473 return;
476 /* Finished current page or an SG segment */
477 usdhi6_sg_unmap(host, false);
479 if (done == total) {
481 * End of an SG segment or the complete SG: jump to the next
482 * segment, we'll map it later in usdhi6_blk_read() or
483 * usdhi6_blk_write()
485 struct scatterlist *next = sg_next(host->sg);
487 host->page_idx = 0;
489 if (!next)
490 host->wait = USDHI6_WAIT_FOR_DATA_END;
491 host->sg = next;
493 if (WARN(next && sg_dma_len(next) % data->blksz,
494 "SG size %u isn't a multiple of block size %u\n",
495 sg_dma_len(next), data->blksz))
496 data->error = -EINVAL;
498 return;
501 /* We cannot get here after crossing a page border */
503 /* Next page in the same SG */
504 host->pg.page = nth_page(sg_page(host->sg), host->page_idx);
505 host->pg.mapped = kmap(host->pg.page);
506 host->blk_page = host->pg.mapped;
508 dev_dbg(mmc_dev(host->mmc), "Mapped %p (%lx) at %p for CMD%u @ 0x%p\n",
509 host->pg.page, page_to_pfn(host->pg.page), host->pg.mapped,
510 host->mrq->cmd->opcode, host->mrq);
513 /* DMA handling */
515 static void usdhi6_dma_release(struct usdhi6_host *host)
517 host->dma_active = false;
518 if (host->chan_tx) {
519 struct dma_chan *chan = host->chan_tx;
520 host->chan_tx = NULL;
521 dma_release_channel(chan);
523 if (host->chan_rx) {
524 struct dma_chan *chan = host->chan_rx;
525 host->chan_rx = NULL;
526 dma_release_channel(chan);
530 static void usdhi6_dma_stop_unmap(struct usdhi6_host *host)
532 struct mmc_data *data = host->mrq->data;
534 if (!host->dma_active)
535 return;
537 usdhi6_write(host, USDHI6_CC_EXT_MODE, 0);
538 host->dma_active = false;
540 if (data->flags & MMC_DATA_READ)
541 dma_unmap_sg(host->chan_rx->device->dev, data->sg,
542 data->sg_len, DMA_FROM_DEVICE);
543 else
544 dma_unmap_sg(host->chan_tx->device->dev, data->sg,
545 data->sg_len, DMA_TO_DEVICE);
548 static void usdhi6_dma_complete(void *arg)
550 struct usdhi6_host *host = arg;
551 struct mmc_request *mrq = host->mrq;
553 if (WARN(!mrq || !mrq->data, "%s: NULL data in DMA completion for %p!\n",
554 dev_name(mmc_dev(host->mmc)), mrq))
555 return;
557 dev_dbg(mmc_dev(host->mmc), "%s(): CMD%u DMA completed\n", __func__,
558 mrq->cmd->opcode);
560 usdhi6_dma_stop_unmap(host);
561 usdhi6_wait_for_brwe(host, mrq->data->flags & MMC_DATA_READ);
564 static int usdhi6_dma_setup(struct usdhi6_host *host, struct dma_chan *chan,
565 enum dma_transfer_direction dir)
567 struct mmc_data *data = host->mrq->data;
568 struct scatterlist *sg = data->sg;
569 struct dma_async_tx_descriptor *desc = NULL;
570 dma_cookie_t cookie = -EINVAL;
571 enum dma_data_direction data_dir;
572 int ret;
574 switch (dir) {
575 case DMA_MEM_TO_DEV:
576 data_dir = DMA_TO_DEVICE;
577 break;
578 case DMA_DEV_TO_MEM:
579 data_dir = DMA_FROM_DEVICE;
580 break;
581 default:
582 return -EINVAL;
585 ret = dma_map_sg(chan->device->dev, sg, data->sg_len, data_dir);
586 if (ret > 0) {
587 host->dma_active = true;
588 desc = dmaengine_prep_slave_sg(chan, sg, ret, dir,
589 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
592 if (desc) {
593 desc->callback = usdhi6_dma_complete;
594 desc->callback_param = host;
595 cookie = dmaengine_submit(desc);
598 dev_dbg(mmc_dev(host->mmc), "%s(): mapped %d -> %d, cookie %d @ %p\n",
599 __func__, data->sg_len, ret, cookie, desc);
601 if (cookie < 0) {
602 /* DMA failed, fall back to PIO */
603 if (ret >= 0)
604 ret = cookie;
605 usdhi6_dma_release(host);
606 dev_warn(mmc_dev(host->mmc),
607 "DMA failed: %d, falling back to PIO\n", ret);
610 return cookie;
613 static int usdhi6_dma_start(struct usdhi6_host *host)
615 if (!host->chan_rx || !host->chan_tx)
616 return -ENODEV;
618 if (host->mrq->data->flags & MMC_DATA_READ)
619 return usdhi6_dma_setup(host, host->chan_rx, DMA_DEV_TO_MEM);
621 return usdhi6_dma_setup(host, host->chan_tx, DMA_MEM_TO_DEV);
624 static void usdhi6_dma_kill(struct usdhi6_host *host)
626 struct mmc_data *data = host->mrq->data;
628 dev_dbg(mmc_dev(host->mmc), "%s(): SG of %u: %ux%u\n",
629 __func__, data->sg_len, data->blocks, data->blksz);
630 /* Abort DMA */
631 if (data->flags & MMC_DATA_READ)
632 dmaengine_terminate_all(host->chan_rx);
633 else
634 dmaengine_terminate_all(host->chan_tx);
637 static void usdhi6_dma_check_error(struct usdhi6_host *host)
639 struct mmc_data *data = host->mrq->data;
641 dev_dbg(mmc_dev(host->mmc), "%s(): IO error %d, status 0x%x\n",
642 __func__, host->io_error, usdhi6_read(host, USDHI6_SD_INFO1));
644 if (host->io_error) {
645 data->error = usdhi6_error_code(host);
646 data->bytes_xfered = 0;
647 usdhi6_dma_kill(host);
648 usdhi6_dma_release(host);
649 dev_warn(mmc_dev(host->mmc),
650 "DMA failed: %d, falling back to PIO\n", data->error);
651 return;
655 * The datasheet tells us to check a response from the card, whereas
656 * responses only come after the command phase, not after the data
657 * phase. Let's check anyway.
659 if (host->irq_status & USDHI6_SD_INFO1_RSP_END)
660 dev_warn(mmc_dev(host->mmc), "Unexpected response received!\n");
663 static void usdhi6_dma_kick(struct usdhi6_host *host)
665 if (host->mrq->data->flags & MMC_DATA_READ)
666 dma_async_issue_pending(host->chan_rx);
667 else
668 dma_async_issue_pending(host->chan_tx);
671 static void usdhi6_dma_request(struct usdhi6_host *host, phys_addr_t start)
673 struct dma_slave_config cfg = {
674 .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
675 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
677 int ret;
679 host->chan_tx = dma_request_chan(mmc_dev(host->mmc), "tx");
680 dev_dbg(mmc_dev(host->mmc), "%s: TX: got channel %p\n", __func__,
681 host->chan_tx);
683 if (IS_ERR(host->chan_tx)) {
684 host->chan_tx = NULL;
685 return;
688 cfg.direction = DMA_MEM_TO_DEV;
689 cfg.dst_addr = start + USDHI6_SD_BUF0;
690 cfg.dst_maxburst = 128; /* 128 words * 4 bytes = 512 bytes */
691 cfg.src_addr = 0;
692 ret = dmaengine_slave_config(host->chan_tx, &cfg);
693 if (ret < 0)
694 goto e_release_tx;
696 host->chan_rx = dma_request_chan(mmc_dev(host->mmc), "rx");
697 dev_dbg(mmc_dev(host->mmc), "%s: RX: got channel %p\n", __func__,
698 host->chan_rx);
700 if (IS_ERR(host->chan_rx)) {
701 host->chan_rx = NULL;
702 goto e_release_tx;
705 cfg.direction = DMA_DEV_TO_MEM;
706 cfg.src_addr = cfg.dst_addr;
707 cfg.src_maxburst = 128; /* 128 words * 4 bytes = 512 bytes */
708 cfg.dst_addr = 0;
709 ret = dmaengine_slave_config(host->chan_rx, &cfg);
710 if (ret < 0)
711 goto e_release_rx;
713 return;
715 e_release_rx:
716 dma_release_channel(host->chan_rx);
717 host->chan_rx = NULL;
718 e_release_tx:
719 dma_release_channel(host->chan_tx);
720 host->chan_tx = NULL;
723 /* API helpers */
725 static void usdhi6_clk_set(struct usdhi6_host *host, struct mmc_ios *ios)
727 unsigned long rate = ios->clock;
728 u32 val;
729 unsigned int i;
731 for (i = 1000; i; i--) {
732 if (usdhi6_read(host, USDHI6_SD_INFO2) & USDHI6_SD_INFO2_SCLKDIVEN)
733 break;
734 usleep_range(10, 100);
737 if (!i) {
738 dev_err(mmc_dev(host->mmc), "SD bus busy, clock set aborted\n");
739 return;
742 val = usdhi6_read(host, USDHI6_SD_CLK_CTRL) & ~USDHI6_SD_CLK_CTRL_DIV_MASK;
744 if (rate) {
745 unsigned long new_rate;
747 if (host->imclk <= rate) {
748 if (ios->timing != MMC_TIMING_UHS_DDR50) {
749 /* Cannot have 1-to-1 clock in DDR mode */
750 new_rate = host->imclk;
751 val |= 0xff;
752 } else {
753 new_rate = host->imclk / 2;
755 } else {
756 unsigned long div =
757 roundup_pow_of_two(DIV_ROUND_UP(host->imclk, rate));
758 val |= div >> 2;
759 new_rate = host->imclk / div;
762 if (host->rate == new_rate)
763 return;
765 host->rate = new_rate;
767 dev_dbg(mmc_dev(host->mmc), "target %lu, div %u, set %lu\n",
768 rate, (val & 0xff) << 2, new_rate);
772 * if old or new rate is equal to input rate, have to switch the clock
773 * off before changing and on after
775 if (host->imclk == rate || host->imclk == host->rate || !rate)
776 usdhi6_write(host, USDHI6_SD_CLK_CTRL,
777 val & ~USDHI6_SD_CLK_CTRL_SCLKEN);
779 if (!rate) {
780 host->rate = 0;
781 return;
784 usdhi6_write(host, USDHI6_SD_CLK_CTRL, val);
786 if (host->imclk == rate || host->imclk == host->rate ||
787 !(val & USDHI6_SD_CLK_CTRL_SCLKEN))
788 usdhi6_write(host, USDHI6_SD_CLK_CTRL,
789 val | USDHI6_SD_CLK_CTRL_SCLKEN);
792 static void usdhi6_set_power(struct usdhi6_host *host, struct mmc_ios *ios)
794 struct mmc_host *mmc = host->mmc;
796 if (!IS_ERR(mmc->supply.vmmc))
797 /* Errors ignored... */
798 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
799 ios->power_mode ? ios->vdd : 0);
802 static int usdhi6_reset(struct usdhi6_host *host)
804 int i;
806 usdhi6_write(host, USDHI6_SOFT_RST, USDHI6_SOFT_RST_RESERVED);
807 cpu_relax();
808 usdhi6_write(host, USDHI6_SOFT_RST, USDHI6_SOFT_RST_RESERVED | USDHI6_SOFT_RST_RESET);
809 for (i = 1000; i; i--)
810 if (usdhi6_read(host, USDHI6_SOFT_RST) & USDHI6_SOFT_RST_RESET)
811 break;
813 return i ? 0 : -ETIMEDOUT;
816 static void usdhi6_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
818 struct usdhi6_host *host = mmc_priv(mmc);
819 u32 option, mode;
820 int ret;
822 dev_dbg(mmc_dev(mmc), "%uHz, OCR: %u, power %u, bus-width %u, timing %u\n",
823 ios->clock, ios->vdd, ios->power_mode, ios->bus_width, ios->timing);
825 switch (ios->power_mode) {
826 case MMC_POWER_OFF:
827 usdhi6_set_power(host, ios);
828 usdhi6_only_cd(host);
829 break;
830 case MMC_POWER_UP:
832 * We only also touch USDHI6_SD_OPTION from .request(), which
833 * cannot race with MMC_POWER_UP
835 ret = usdhi6_reset(host);
836 if (ret < 0) {
837 dev_err(mmc_dev(mmc), "Cannot reset the interface!\n");
838 } else {
839 usdhi6_set_power(host, ios);
840 usdhi6_only_cd(host);
842 break;
843 case MMC_POWER_ON:
844 option = usdhi6_read(host, USDHI6_SD_OPTION);
846 * The eMMC standard only allows 4 or 8 bits in the DDR mode,
847 * the same probably holds for SD cards. We check here anyway,
848 * since the datasheet explicitly requires 4 bits for DDR.
850 if (ios->bus_width == MMC_BUS_WIDTH_1) {
851 if (ios->timing == MMC_TIMING_UHS_DDR50)
852 dev_err(mmc_dev(mmc),
853 "4 bits are required for DDR\n");
854 option |= USDHI6_SD_OPTION_WIDTH_1;
855 mode = 0;
856 } else {
857 option &= ~USDHI6_SD_OPTION_WIDTH_1;
858 mode = ios->timing == MMC_TIMING_UHS_DDR50;
860 usdhi6_write(host, USDHI6_SD_OPTION, option);
861 usdhi6_write(host, USDHI6_SDIF_MODE, mode);
862 break;
865 if (host->rate != ios->clock)
866 usdhi6_clk_set(host, ios);
869 /* This is data timeout. Response timeout is fixed to 640 clock cycles */
870 static void usdhi6_timeout_set(struct usdhi6_host *host)
872 struct mmc_request *mrq = host->mrq;
873 u32 val;
874 unsigned long ticks;
876 if (!mrq->data)
877 ticks = host->rate / 1000 * mrq->cmd->busy_timeout;
878 else
879 ticks = host->rate / 1000000 * (mrq->data->timeout_ns / 1000) +
880 mrq->data->timeout_clks;
882 if (!ticks || ticks > 1 << 27)
883 /* Max timeout */
884 val = 14;
885 else if (ticks < 1 << 13)
886 /* Min timeout */
887 val = 0;
888 else
889 val = order_base_2(ticks) - 13;
891 dev_dbg(mmc_dev(host->mmc), "Set %s timeout %lu ticks @ %lu Hz\n",
892 mrq->data ? "data" : "cmd", ticks, host->rate);
894 /* Timeout Counter mask: 0xf0 */
895 usdhi6_write(host, USDHI6_SD_OPTION, (val << USDHI6_SD_OPTION_TIMEOUT_SHIFT) |
896 (usdhi6_read(host, USDHI6_SD_OPTION) & ~USDHI6_SD_OPTION_TIMEOUT_MASK));
899 static void usdhi6_request_done(struct usdhi6_host *host)
901 struct mmc_request *mrq = host->mrq;
902 struct mmc_data *data = mrq->data;
904 if (WARN(host->pg.page || host->head_pg.page,
905 "Page %p or %p not unmapped: wait %u, CMD%d(%c) @ +0x%zx %ux%u in SG%u!\n",
906 host->pg.page, host->head_pg.page, host->wait, mrq->cmd->opcode,
907 data ? (data->flags & MMC_DATA_READ ? 'R' : 'W') : '-',
908 data ? host->offset : 0, data ? data->blocks : 0,
909 data ? data->blksz : 0, data ? data->sg_len : 0))
910 usdhi6_sg_unmap(host, true);
912 if (mrq->cmd->error ||
913 (data && data->error) ||
914 (mrq->stop && mrq->stop->error))
915 dev_dbg(mmc_dev(host->mmc), "%s(CMD%d: %ux%u): err %d %d %d\n",
916 __func__, mrq->cmd->opcode, data ? data->blocks : 0,
917 data ? data->blksz : 0,
918 mrq->cmd->error,
919 data ? data->error : 1,
920 mrq->stop ? mrq->stop->error : 1);
922 /* Disable DMA */
923 usdhi6_write(host, USDHI6_CC_EXT_MODE, 0);
924 host->wait = USDHI6_WAIT_FOR_REQUEST;
925 host->mrq = NULL;
927 mmc_request_done(host->mmc, mrq);
930 static int usdhi6_cmd_flags(struct usdhi6_host *host)
932 struct mmc_request *mrq = host->mrq;
933 struct mmc_command *cmd = mrq->cmd;
934 u16 opc = cmd->opcode;
936 if (host->app_cmd) {
937 host->app_cmd = false;
938 opc |= USDHI6_SD_CMD_APP;
941 if (mrq->data) {
942 opc |= USDHI6_SD_CMD_DATA;
944 if (mrq->data->flags & MMC_DATA_READ)
945 opc |= USDHI6_SD_CMD_READ;
947 if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK ||
948 cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK ||
949 (cmd->opcode == SD_IO_RW_EXTENDED &&
950 mrq->data->blocks > 1)) {
951 opc |= USDHI6_SD_CMD_MULTI;
952 if (!mrq->stop)
953 opc |= USDHI6_SD_CMD_CMD12_AUTO_OFF;
956 switch (mmc_resp_type(cmd)) {
957 case MMC_RSP_NONE:
958 opc |= USDHI6_SD_CMD_MODE_RSP_NONE;
959 break;
960 case MMC_RSP_R1:
961 opc |= USDHI6_SD_CMD_MODE_RSP_R1;
962 break;
963 case MMC_RSP_R1B:
964 opc |= USDHI6_SD_CMD_MODE_RSP_R1B;
965 break;
966 case MMC_RSP_R2:
967 opc |= USDHI6_SD_CMD_MODE_RSP_R2;
968 break;
969 case MMC_RSP_R3:
970 opc |= USDHI6_SD_CMD_MODE_RSP_R3;
971 break;
972 default:
973 dev_warn(mmc_dev(host->mmc),
974 "Unknown response type %d\n",
975 mmc_resp_type(cmd));
976 return -EINVAL;
980 return opc;
983 static int usdhi6_rq_start(struct usdhi6_host *host)
985 struct mmc_request *mrq = host->mrq;
986 struct mmc_command *cmd = mrq->cmd;
987 struct mmc_data *data = mrq->data;
988 int opc = usdhi6_cmd_flags(host);
989 int i;
991 if (opc < 0)
992 return opc;
994 for (i = 1000; i; i--) {
995 if (!(usdhi6_read(host, USDHI6_SD_INFO2) & USDHI6_SD_INFO2_CBSY))
996 break;
997 usleep_range(10, 100);
1000 if (!i) {
1001 dev_dbg(mmc_dev(host->mmc), "Command active, request aborted\n");
1002 return -EAGAIN;
1005 if (data) {
1006 bool use_dma;
1007 int ret = 0;
1009 host->page_idx = 0;
1011 if (cmd->opcode == SD_IO_RW_EXTENDED && data->blocks > 1) {
1012 switch (data->blksz) {
1013 case 512:
1014 break;
1015 case 32:
1016 case 64:
1017 case 128:
1018 case 256:
1019 if (mrq->stop)
1020 ret = -EINVAL;
1021 break;
1022 default:
1023 ret = -EINVAL;
1025 } else if ((cmd->opcode == MMC_READ_MULTIPLE_BLOCK ||
1026 cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK) &&
1027 data->blksz != 512) {
1028 ret = -EINVAL;
1031 if (ret < 0) {
1032 dev_warn(mmc_dev(host->mmc), "%s(): %u blocks of %u bytes\n",
1033 __func__, data->blocks, data->blksz);
1034 return -EINVAL;
1037 if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK ||
1038 cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK ||
1039 (cmd->opcode == SD_IO_RW_EXTENDED &&
1040 data->blocks > 1))
1041 usdhi6_sg_prep(host);
1043 usdhi6_write(host, USDHI6_SD_SIZE, data->blksz);
1045 if ((data->blksz >= USDHI6_MIN_DMA ||
1046 data->blocks > 1) &&
1047 (data->blksz % 4 ||
1048 data->sg->offset % 4))
1049 dev_dbg(mmc_dev(host->mmc),
1050 "Bad SG of %u: %ux%u @ %u\n", data->sg_len,
1051 data->blksz, data->blocks, data->sg->offset);
1053 /* Enable DMA for USDHI6_MIN_DMA bytes or more */
1054 use_dma = data->blksz >= USDHI6_MIN_DMA &&
1055 !(data->blksz % 4) &&
1056 usdhi6_dma_start(host) >= DMA_MIN_COOKIE;
1058 if (use_dma)
1059 usdhi6_write(host, USDHI6_CC_EXT_MODE, USDHI6_CC_EXT_MODE_SDRW);
1061 dev_dbg(mmc_dev(host->mmc),
1062 "%s(): request opcode %u, %u blocks of %u bytes in %u segments, %s %s @+0x%x%s\n",
1063 __func__, cmd->opcode, data->blocks, data->blksz,
1064 data->sg_len, use_dma ? "DMA" : "PIO",
1065 data->flags & MMC_DATA_READ ? "read" : "write",
1066 data->sg->offset, mrq->stop ? " + stop" : "");
1067 } else {
1068 dev_dbg(mmc_dev(host->mmc), "%s(): request opcode %u\n",
1069 __func__, cmd->opcode);
1072 /* We have to get a command completion interrupt with DMA too */
1073 usdhi6_wait_for_resp(host);
1075 host->wait = USDHI6_WAIT_FOR_CMD;
1076 schedule_delayed_work(&host->timeout_work, host->timeout);
1078 /* SEC bit is required to enable block counting by the core */
1079 usdhi6_write(host, USDHI6_SD_STOP,
1080 data && data->blocks > 1 ? USDHI6_SD_STOP_SEC : 0);
1081 usdhi6_write(host, USDHI6_SD_ARG, cmd->arg);
1083 /* Kick command execution */
1084 usdhi6_write(host, USDHI6_SD_CMD, opc);
1086 return 0;
1089 static void usdhi6_request(struct mmc_host *mmc, struct mmc_request *mrq)
1091 struct usdhi6_host *host = mmc_priv(mmc);
1092 int ret;
1094 cancel_delayed_work_sync(&host->timeout_work);
1096 host->mrq = mrq;
1097 host->sg = NULL;
1099 usdhi6_timeout_set(host);
1100 ret = usdhi6_rq_start(host);
1101 if (ret < 0) {
1102 mrq->cmd->error = ret;
1103 usdhi6_request_done(host);
1107 static int usdhi6_get_cd(struct mmc_host *mmc)
1109 struct usdhi6_host *host = mmc_priv(mmc);
1110 /* Read is atomic, no need to lock */
1111 u32 status = usdhi6_read(host, USDHI6_SD_INFO1) & USDHI6_SD_INFO1_CD;
1114 * level status.CD CD_ACTIVE_HIGH card present
1115 * 1 0 0 0
1116 * 1 0 1 1
1117 * 0 1 0 1
1118 * 0 1 1 0
1120 return !status ^ !(mmc->caps2 & MMC_CAP2_CD_ACTIVE_HIGH);
1123 static int usdhi6_get_ro(struct mmc_host *mmc)
1125 struct usdhi6_host *host = mmc_priv(mmc);
1126 /* No locking as above */
1127 u32 status = usdhi6_read(host, USDHI6_SD_INFO1) & USDHI6_SD_INFO1_WP;
1130 * level status.WP RO_ACTIVE_HIGH card read-only
1131 * 1 0 0 0
1132 * 1 0 1 1
1133 * 0 1 0 1
1134 * 0 1 1 0
1136 return !status ^ !(mmc->caps2 & MMC_CAP2_RO_ACTIVE_HIGH);
1139 static void usdhi6_enable_sdio_irq(struct mmc_host *mmc, int enable)
1141 struct usdhi6_host *host = mmc_priv(mmc);
1143 dev_dbg(mmc_dev(mmc), "%s(): %sable\n", __func__, enable ? "en" : "dis");
1145 if (enable) {
1146 host->sdio_mask = USDHI6_SDIO_INFO1_IRQ & ~USDHI6_SDIO_INFO1_IOIRQ;
1147 usdhi6_write(host, USDHI6_SDIO_INFO1_MASK, host->sdio_mask);
1148 usdhi6_write(host, USDHI6_SDIO_MODE, 1);
1149 } else {
1150 usdhi6_write(host, USDHI6_SDIO_MODE, 0);
1151 usdhi6_write(host, USDHI6_SDIO_INFO1_MASK, USDHI6_SDIO_INFO1_IRQ);
1152 host->sdio_mask = USDHI6_SDIO_INFO1_IRQ;
1156 static int usdhi6_set_pinstates(struct usdhi6_host *host, int voltage)
1158 if (IS_ERR(host->pins_uhs))
1159 return 0;
1161 switch (voltage) {
1162 case MMC_SIGNAL_VOLTAGE_180:
1163 case MMC_SIGNAL_VOLTAGE_120:
1164 return pinctrl_select_state(host->pinctrl,
1165 host->pins_uhs);
1167 default:
1168 return pinctrl_select_default_state(mmc_dev(host->mmc));
1172 static int usdhi6_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios)
1174 int ret;
1176 ret = mmc_regulator_set_vqmmc(mmc, ios);
1177 if (ret < 0)
1178 return ret;
1180 ret = usdhi6_set_pinstates(mmc_priv(mmc), ios->signal_voltage);
1181 if (ret)
1182 dev_warn_once(mmc_dev(mmc),
1183 "Failed to set pinstate err=%d\n", ret);
1184 return ret;
1187 static const struct mmc_host_ops usdhi6_ops = {
1188 .request = usdhi6_request,
1189 .set_ios = usdhi6_set_ios,
1190 .get_cd = usdhi6_get_cd,
1191 .get_ro = usdhi6_get_ro,
1192 .enable_sdio_irq = usdhi6_enable_sdio_irq,
1193 .start_signal_voltage_switch = usdhi6_sig_volt_switch,
1196 /* State machine handlers */
1198 static void usdhi6_resp_cmd12(struct usdhi6_host *host)
1200 struct mmc_command *cmd = host->mrq->stop;
1201 cmd->resp[0] = usdhi6_read(host, USDHI6_SD_RSP10);
1204 static void usdhi6_resp_read(struct usdhi6_host *host)
1206 struct mmc_command *cmd = host->mrq->cmd;
1207 u32 *rsp = cmd->resp, tmp = 0;
1208 int i;
1211 * RSP10 39-8
1212 * RSP32 71-40
1213 * RSP54 103-72
1214 * RSP76 127-104
1215 * R2-type response:
1216 * resp[0] = r[127..96]
1217 * resp[1] = r[95..64]
1218 * resp[2] = r[63..32]
1219 * resp[3] = r[31..0]
1220 * Other responses:
1221 * resp[0] = r[39..8]
1224 if (mmc_resp_type(cmd) == MMC_RSP_NONE)
1225 return;
1227 if (!(host->irq_status & USDHI6_SD_INFO1_RSP_END)) {
1228 dev_err(mmc_dev(host->mmc),
1229 "CMD%d: response expected but is missing!\n", cmd->opcode);
1230 return;
1233 if (mmc_resp_type(cmd) & MMC_RSP_136)
1234 for (i = 0; i < 4; i++) {
1235 if (i)
1236 rsp[3 - i] = tmp >> 24;
1237 tmp = usdhi6_read(host, USDHI6_SD_RSP10 + i * 8);
1238 rsp[3 - i] |= tmp << 8;
1240 else if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK ||
1241 cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK)
1242 /* Read RSP54 to avoid conflict with auto CMD12 */
1243 rsp[0] = usdhi6_read(host, USDHI6_SD_RSP54);
1244 else
1245 rsp[0] = usdhi6_read(host, USDHI6_SD_RSP10);
1247 dev_dbg(mmc_dev(host->mmc), "Response 0x%x\n", rsp[0]);
1250 static int usdhi6_blk_read(struct usdhi6_host *host)
1252 struct mmc_data *data = host->mrq->data;
1253 u32 *p;
1254 int i, rest;
1256 if (host->io_error) {
1257 data->error = usdhi6_error_code(host);
1258 goto error;
1261 if (host->pg.page) {
1262 p = host->blk_page + host->offset;
1263 } else {
1264 p = usdhi6_sg_map(host);
1265 if (!p) {
1266 data->error = -ENOMEM;
1267 goto error;
1271 for (i = 0; i < data->blksz / 4; i++, p++)
1272 *p = usdhi6_read(host, USDHI6_SD_BUF0);
1274 rest = data->blksz % 4;
1275 for (i = 0; i < (rest + 1) / 2; i++) {
1276 u16 d = usdhi6_read16(host, USDHI6_SD_BUF0);
1277 ((u8 *)p)[2 * i] = ((u8 *)&d)[0];
1278 if (rest > 1 && !i)
1279 ((u8 *)p)[2 * i + 1] = ((u8 *)&d)[1];
1282 return 0;
1284 error:
1285 dev_dbg(mmc_dev(host->mmc), "%s(): %d\n", __func__, data->error);
1286 host->wait = USDHI6_WAIT_FOR_REQUEST;
1287 return data->error;
1290 static int usdhi6_blk_write(struct usdhi6_host *host)
1292 struct mmc_data *data = host->mrq->data;
1293 u32 *p;
1294 int i, rest;
1296 if (host->io_error) {
1297 data->error = usdhi6_error_code(host);
1298 goto error;
1301 if (host->pg.page) {
1302 p = host->blk_page + host->offset;
1303 } else {
1304 p = usdhi6_sg_map(host);
1305 if (!p) {
1306 data->error = -ENOMEM;
1307 goto error;
1311 for (i = 0; i < data->blksz / 4; i++, p++)
1312 usdhi6_write(host, USDHI6_SD_BUF0, *p);
1314 rest = data->blksz % 4;
1315 for (i = 0; i < (rest + 1) / 2; i++) {
1316 u16 d;
1317 ((u8 *)&d)[0] = ((u8 *)p)[2 * i];
1318 if (rest > 1 && !i)
1319 ((u8 *)&d)[1] = ((u8 *)p)[2 * i + 1];
1320 else
1321 ((u8 *)&d)[1] = 0;
1322 usdhi6_write16(host, USDHI6_SD_BUF0, d);
1325 return 0;
1327 error:
1328 dev_dbg(mmc_dev(host->mmc), "%s(): %d\n", __func__, data->error);
1329 host->wait = USDHI6_WAIT_FOR_REQUEST;
1330 return data->error;
1333 static int usdhi6_stop_cmd(struct usdhi6_host *host)
1335 struct mmc_request *mrq = host->mrq;
1337 switch (mrq->cmd->opcode) {
1338 case MMC_READ_MULTIPLE_BLOCK:
1339 case MMC_WRITE_MULTIPLE_BLOCK:
1340 if (mrq->stop->opcode == MMC_STOP_TRANSMISSION) {
1341 host->wait = USDHI6_WAIT_FOR_STOP;
1342 return 0;
1344 /* fall through - Unsupported STOP command. */
1345 default:
1346 dev_err(mmc_dev(host->mmc),
1347 "unsupported stop CMD%d for CMD%d\n",
1348 mrq->stop->opcode, mrq->cmd->opcode);
1349 mrq->stop->error = -EOPNOTSUPP;
1352 return -EOPNOTSUPP;
1355 static bool usdhi6_end_cmd(struct usdhi6_host *host)
1357 struct mmc_request *mrq = host->mrq;
1358 struct mmc_command *cmd = mrq->cmd;
1360 if (host->io_error) {
1361 cmd->error = usdhi6_error_code(host);
1362 return false;
1365 usdhi6_resp_read(host);
1367 if (!mrq->data)
1368 return false;
1370 if (host->dma_active) {
1371 usdhi6_dma_kick(host);
1372 if (!mrq->stop)
1373 host->wait = USDHI6_WAIT_FOR_DMA;
1374 else if (usdhi6_stop_cmd(host) < 0)
1375 return false;
1376 } else if (mrq->data->flags & MMC_DATA_READ) {
1377 if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK ||
1378 (cmd->opcode == SD_IO_RW_EXTENDED &&
1379 mrq->data->blocks > 1))
1380 host->wait = USDHI6_WAIT_FOR_MREAD;
1381 else
1382 host->wait = USDHI6_WAIT_FOR_READ;
1383 } else {
1384 if (cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK ||
1385 (cmd->opcode == SD_IO_RW_EXTENDED &&
1386 mrq->data->blocks > 1))
1387 host->wait = USDHI6_WAIT_FOR_MWRITE;
1388 else
1389 host->wait = USDHI6_WAIT_FOR_WRITE;
1392 return true;
1395 static bool usdhi6_read_block(struct usdhi6_host *host)
1397 /* ACCESS_END IRQ is already unmasked */
1398 int ret = usdhi6_blk_read(host);
1401 * Have to force unmapping both pages: the single block could have been
1402 * cross-page, in which case for single-block IO host->page_idx == 0.
1403 * So, if we don't force, the second page won't be unmapped.
1405 usdhi6_sg_unmap(host, true);
1407 if (ret < 0)
1408 return false;
1410 host->wait = USDHI6_WAIT_FOR_DATA_END;
1411 return true;
1414 static bool usdhi6_mread_block(struct usdhi6_host *host)
1416 int ret = usdhi6_blk_read(host);
1418 if (ret < 0)
1419 return false;
1421 usdhi6_sg_advance(host);
1423 return !host->mrq->data->error &&
1424 (host->wait != USDHI6_WAIT_FOR_DATA_END || !host->mrq->stop);
1427 static bool usdhi6_write_block(struct usdhi6_host *host)
1429 int ret = usdhi6_blk_write(host);
1431 /* See comment in usdhi6_read_block() */
1432 usdhi6_sg_unmap(host, true);
1434 if (ret < 0)
1435 return false;
1437 host->wait = USDHI6_WAIT_FOR_DATA_END;
1438 return true;
1441 static bool usdhi6_mwrite_block(struct usdhi6_host *host)
1443 int ret = usdhi6_blk_write(host);
1445 if (ret < 0)
1446 return false;
1448 usdhi6_sg_advance(host);
1450 return !host->mrq->data->error &&
1451 (host->wait != USDHI6_WAIT_FOR_DATA_END || !host->mrq->stop);
1454 /* Interrupt & timeout handlers */
1456 static irqreturn_t usdhi6_sd_bh(int irq, void *dev_id)
1458 struct usdhi6_host *host = dev_id;
1459 struct mmc_request *mrq;
1460 struct mmc_command *cmd;
1461 struct mmc_data *data;
1462 bool io_wait = false;
1464 cancel_delayed_work_sync(&host->timeout_work);
1466 mrq = host->mrq;
1467 if (!mrq)
1468 return IRQ_HANDLED;
1470 cmd = mrq->cmd;
1471 data = mrq->data;
1473 switch (host->wait) {
1474 case USDHI6_WAIT_FOR_REQUEST:
1475 /* We're too late, the timeout has already kicked in */
1476 return IRQ_HANDLED;
1477 case USDHI6_WAIT_FOR_CMD:
1478 /* Wait for data? */
1479 io_wait = usdhi6_end_cmd(host);
1480 break;
1481 case USDHI6_WAIT_FOR_MREAD:
1482 /* Wait for more data? */
1483 io_wait = usdhi6_mread_block(host);
1484 break;
1485 case USDHI6_WAIT_FOR_READ:
1486 /* Wait for data end? */
1487 io_wait = usdhi6_read_block(host);
1488 break;
1489 case USDHI6_WAIT_FOR_MWRITE:
1490 /* Wait data to write? */
1491 io_wait = usdhi6_mwrite_block(host);
1492 break;
1493 case USDHI6_WAIT_FOR_WRITE:
1494 /* Wait for data end? */
1495 io_wait = usdhi6_write_block(host);
1496 break;
1497 case USDHI6_WAIT_FOR_DMA:
1498 usdhi6_dma_check_error(host);
1499 break;
1500 case USDHI6_WAIT_FOR_STOP:
1501 usdhi6_write(host, USDHI6_SD_STOP, 0);
1502 if (host->io_error) {
1503 int ret = usdhi6_error_code(host);
1504 if (mrq->stop)
1505 mrq->stop->error = ret;
1506 else
1507 mrq->data->error = ret;
1508 dev_warn(mmc_dev(host->mmc), "%s(): %d\n", __func__, ret);
1509 break;
1511 usdhi6_resp_cmd12(host);
1512 mrq->stop->error = 0;
1513 break;
1514 case USDHI6_WAIT_FOR_DATA_END:
1515 if (host->io_error) {
1516 mrq->data->error = usdhi6_error_code(host);
1517 dev_warn(mmc_dev(host->mmc), "%s(): %d\n", __func__,
1518 mrq->data->error);
1520 break;
1521 default:
1522 cmd->error = -EFAULT;
1523 dev_err(mmc_dev(host->mmc), "Invalid state %u\n", host->wait);
1524 usdhi6_request_done(host);
1525 return IRQ_HANDLED;
1528 if (io_wait) {
1529 schedule_delayed_work(&host->timeout_work, host->timeout);
1530 /* Wait for more data or ACCESS_END */
1531 if (!host->dma_active)
1532 usdhi6_wait_for_brwe(host, mrq->data->flags & MMC_DATA_READ);
1533 return IRQ_HANDLED;
1536 if (!cmd->error) {
1537 if (data) {
1538 if (!data->error) {
1539 if (host->wait != USDHI6_WAIT_FOR_STOP &&
1540 host->mrq->stop &&
1541 !host->mrq->stop->error &&
1542 !usdhi6_stop_cmd(host)) {
1543 /* Sending STOP */
1544 usdhi6_wait_for_resp(host);
1546 schedule_delayed_work(&host->timeout_work,
1547 host->timeout);
1549 return IRQ_HANDLED;
1552 data->bytes_xfered = data->blocks * data->blksz;
1553 } else {
1554 /* Data error: might need to unmap the last page */
1555 dev_warn(mmc_dev(host->mmc), "%s(): data error %d\n",
1556 __func__, data->error);
1557 usdhi6_sg_unmap(host, true);
1559 } else if (cmd->opcode == MMC_APP_CMD) {
1560 host->app_cmd = true;
1564 usdhi6_request_done(host);
1566 return IRQ_HANDLED;
1569 static irqreturn_t usdhi6_sd(int irq, void *dev_id)
1571 struct usdhi6_host *host = dev_id;
1572 u16 status, status2, error;
1574 status = usdhi6_read(host, USDHI6_SD_INFO1) & ~host->status_mask &
1575 ~USDHI6_SD_INFO1_CARD;
1576 status2 = usdhi6_read(host, USDHI6_SD_INFO2) & ~host->status2_mask;
1578 usdhi6_only_cd(host);
1580 dev_dbg(mmc_dev(host->mmc),
1581 "IRQ status = 0x%08x, status2 = 0x%08x\n", status, status2);
1583 if (!status && !status2)
1584 return IRQ_NONE;
1586 error = status2 & USDHI6_SD_INFO2_ERR;
1588 /* Ack / clear interrupts */
1589 if (USDHI6_SD_INFO1_IRQ & status)
1590 usdhi6_write(host, USDHI6_SD_INFO1,
1591 0xffff & ~(USDHI6_SD_INFO1_IRQ & status));
1593 if (USDHI6_SD_INFO2_IRQ & status2) {
1594 if (error)
1595 /* In error cases BWE and BRE aren't cleared automatically */
1596 status2 |= USDHI6_SD_INFO2_BWE | USDHI6_SD_INFO2_BRE;
1598 usdhi6_write(host, USDHI6_SD_INFO2,
1599 0xffff & ~(USDHI6_SD_INFO2_IRQ & status2));
1602 host->io_error = error;
1603 host->irq_status = status;
1605 if (error) {
1606 /* Don't pollute the log with unsupported command timeouts */
1607 if (host->wait != USDHI6_WAIT_FOR_CMD ||
1608 error != USDHI6_SD_INFO2_RSP_TOUT)
1609 dev_warn(mmc_dev(host->mmc),
1610 "%s(): INFO2 error bits 0x%08x\n",
1611 __func__, error);
1612 else
1613 dev_dbg(mmc_dev(host->mmc),
1614 "%s(): INFO2 error bits 0x%08x\n",
1615 __func__, error);
1618 return IRQ_WAKE_THREAD;
1621 static irqreturn_t usdhi6_sdio(int irq, void *dev_id)
1623 struct usdhi6_host *host = dev_id;
1624 u32 status = usdhi6_read(host, USDHI6_SDIO_INFO1) & ~host->sdio_mask;
1626 dev_dbg(mmc_dev(host->mmc), "%s(): status 0x%x\n", __func__, status);
1628 if (!status)
1629 return IRQ_NONE;
1631 usdhi6_write(host, USDHI6_SDIO_INFO1, ~status);
1633 mmc_signal_sdio_irq(host->mmc);
1635 return IRQ_HANDLED;
1638 static irqreturn_t usdhi6_cd(int irq, void *dev_id)
1640 struct usdhi6_host *host = dev_id;
1641 struct mmc_host *mmc = host->mmc;
1642 u16 status;
1644 /* We're only interested in hotplug events here */
1645 status = usdhi6_read(host, USDHI6_SD_INFO1) & ~host->status_mask &
1646 USDHI6_SD_INFO1_CARD;
1648 if (!status)
1649 return IRQ_NONE;
1651 /* Ack */
1652 usdhi6_write(host, USDHI6_SD_INFO1, ~status);
1654 if (!work_pending(&mmc->detect.work) &&
1655 (((status & USDHI6_SD_INFO1_CARD_INSERT) &&
1656 !mmc->card) ||
1657 ((status & USDHI6_SD_INFO1_CARD_EJECT) &&
1658 mmc->card)))
1659 mmc_detect_change(mmc, msecs_to_jiffies(100));
1661 return IRQ_HANDLED;
1665 * Actually this should not be needed, if the built-in timeout works reliably in
1666 * the both PIO cases and DMA never fails. But if DMA does fail, a timeout
1667 * handler might be the only way to catch the error.
1669 static void usdhi6_timeout_work(struct work_struct *work)
1671 struct delayed_work *d = to_delayed_work(work);
1672 struct usdhi6_host *host = container_of(d, struct usdhi6_host, timeout_work);
1673 struct mmc_request *mrq = host->mrq;
1674 struct mmc_data *data = mrq ? mrq->data : NULL;
1675 struct scatterlist *sg;
1677 dev_warn(mmc_dev(host->mmc),
1678 "%s timeout wait %u CMD%d: IRQ 0x%08x:0x%08x, last IRQ 0x%08x\n",
1679 host->dma_active ? "DMA" : "PIO",
1680 host->wait, mrq ? mrq->cmd->opcode : -1,
1681 usdhi6_read(host, USDHI6_SD_INFO1),
1682 usdhi6_read(host, USDHI6_SD_INFO2), host->irq_status);
1684 if (host->dma_active) {
1685 usdhi6_dma_kill(host);
1686 usdhi6_dma_stop_unmap(host);
1689 switch (host->wait) {
1690 default:
1691 dev_err(mmc_dev(host->mmc), "Invalid state %u\n", host->wait);
1692 /* fall through - mrq can be NULL, but is impossible. */
1693 case USDHI6_WAIT_FOR_CMD:
1694 usdhi6_error_code(host);
1695 if (mrq)
1696 mrq->cmd->error = -ETIMEDOUT;
1697 break;
1698 case USDHI6_WAIT_FOR_STOP:
1699 usdhi6_error_code(host);
1700 mrq->stop->error = -ETIMEDOUT;
1701 break;
1702 case USDHI6_WAIT_FOR_DMA:
1703 case USDHI6_WAIT_FOR_MREAD:
1704 case USDHI6_WAIT_FOR_MWRITE:
1705 case USDHI6_WAIT_FOR_READ:
1706 case USDHI6_WAIT_FOR_WRITE:
1707 sg = host->sg ?: data->sg;
1708 dev_dbg(mmc_dev(host->mmc),
1709 "%c: page #%u @ +0x%zx %ux%u in SG%u. Current SG %u bytes @ %u\n",
1710 data->flags & MMC_DATA_READ ? 'R' : 'W', host->page_idx,
1711 host->offset, data->blocks, data->blksz, data->sg_len,
1712 sg_dma_len(sg), sg->offset);
1713 usdhi6_sg_unmap(host, true);
1714 /* fall through - page unmapped in USDHI6_WAIT_FOR_DATA_END. */
1715 case USDHI6_WAIT_FOR_DATA_END:
1716 usdhi6_error_code(host);
1717 data->error = -ETIMEDOUT;
1720 if (mrq)
1721 usdhi6_request_done(host);
1724 /* Probe / release */
1726 static const struct of_device_id usdhi6_of_match[] = {
1727 {.compatible = "renesas,usdhi6rol0"},
1730 MODULE_DEVICE_TABLE(of, usdhi6_of_match);
1732 static int usdhi6_probe(struct platform_device *pdev)
1734 struct device *dev = &pdev->dev;
1735 struct mmc_host *mmc;
1736 struct usdhi6_host *host;
1737 struct resource *res;
1738 int irq_cd, irq_sd, irq_sdio;
1739 u32 version;
1740 int ret;
1742 if (!dev->of_node)
1743 return -ENODEV;
1745 irq_cd = platform_get_irq_byname(pdev, "card detect");
1746 irq_sd = platform_get_irq_byname(pdev, "data");
1747 irq_sdio = platform_get_irq_byname(pdev, "SDIO");
1748 if (irq_sd < 0 || irq_sdio < 0)
1749 return -ENODEV;
1751 mmc = mmc_alloc_host(sizeof(struct usdhi6_host), dev);
1752 if (!mmc)
1753 return -ENOMEM;
1755 ret = mmc_regulator_get_supply(mmc);
1756 if (ret)
1757 goto e_free_mmc;
1759 ret = mmc_of_parse(mmc);
1760 if (ret < 0)
1761 goto e_free_mmc;
1763 host = mmc_priv(mmc);
1764 host->mmc = mmc;
1765 host->wait = USDHI6_WAIT_FOR_REQUEST;
1766 host->timeout = msecs_to_jiffies(4000);
1768 host->pinctrl = devm_pinctrl_get(&pdev->dev);
1769 if (IS_ERR(host->pinctrl)) {
1770 ret = PTR_ERR(host->pinctrl);
1771 goto e_free_mmc;
1774 host->pins_uhs = pinctrl_lookup_state(host->pinctrl, "state_uhs");
1776 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1777 host->base = devm_ioremap_resource(dev, res);
1778 if (IS_ERR(host->base)) {
1779 ret = PTR_ERR(host->base);
1780 goto e_free_mmc;
1783 host->clk = devm_clk_get(dev, NULL);
1784 if (IS_ERR(host->clk)) {
1785 ret = PTR_ERR(host->clk);
1786 goto e_free_mmc;
1789 host->imclk = clk_get_rate(host->clk);
1791 ret = clk_prepare_enable(host->clk);
1792 if (ret < 0)
1793 goto e_free_mmc;
1795 version = usdhi6_read(host, USDHI6_VERSION);
1796 if ((version & 0xfff) != 0xa0d) {
1797 dev_err(dev, "Version not recognized %x\n", version);
1798 goto e_clk_off;
1801 dev_info(dev, "A USDHI6ROL0 SD host detected with %d ports\n",
1802 usdhi6_read(host, USDHI6_SD_PORT_SEL) >> USDHI6_SD_PORT_SEL_PORTS_SHIFT);
1804 usdhi6_mask_all(host);
1806 if (irq_cd >= 0) {
1807 ret = devm_request_irq(dev, irq_cd, usdhi6_cd, 0,
1808 dev_name(dev), host);
1809 if (ret < 0)
1810 goto e_clk_off;
1811 } else {
1812 mmc->caps |= MMC_CAP_NEEDS_POLL;
1815 ret = devm_request_threaded_irq(dev, irq_sd, usdhi6_sd, usdhi6_sd_bh, 0,
1816 dev_name(dev), host);
1817 if (ret < 0)
1818 goto e_clk_off;
1820 ret = devm_request_irq(dev, irq_sdio, usdhi6_sdio, 0,
1821 dev_name(dev), host);
1822 if (ret < 0)
1823 goto e_clk_off;
1825 INIT_DELAYED_WORK(&host->timeout_work, usdhi6_timeout_work);
1827 usdhi6_dma_request(host, res->start);
1829 mmc->ops = &usdhi6_ops;
1830 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED |
1831 MMC_CAP_SDIO_IRQ;
1832 /* Set .max_segs to some random number. Feel free to adjust. */
1833 mmc->max_segs = 32;
1834 mmc->max_blk_size = 512;
1835 mmc->max_req_size = PAGE_SIZE * mmc->max_segs;
1836 mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size;
1838 * Setting .max_seg_size to 1 page would simplify our page-mapping code,
1839 * But OTOH, having large segments makes DMA more efficient. We could
1840 * check, whether we managed to get DMA and fall back to 1 page
1841 * segments, but if we do manage to obtain DMA and then it fails at
1842 * run-time and we fall back to PIO, we will continue getting large
1843 * segments. So, we wouldn't be able to get rid of the code anyway.
1845 mmc->max_seg_size = mmc->max_req_size;
1846 if (!mmc->f_max)
1847 mmc->f_max = host->imclk;
1848 mmc->f_min = host->imclk / 512;
1850 platform_set_drvdata(pdev, host);
1852 ret = mmc_add_host(mmc);
1853 if (ret < 0)
1854 goto e_clk_off;
1856 return 0;
1858 e_clk_off:
1859 clk_disable_unprepare(host->clk);
1860 e_free_mmc:
1861 mmc_free_host(mmc);
1863 return ret;
1866 static int usdhi6_remove(struct platform_device *pdev)
1868 struct usdhi6_host *host = platform_get_drvdata(pdev);
1870 mmc_remove_host(host->mmc);
1872 usdhi6_mask_all(host);
1873 cancel_delayed_work_sync(&host->timeout_work);
1874 usdhi6_dma_release(host);
1875 clk_disable_unprepare(host->clk);
1876 mmc_free_host(host->mmc);
1878 return 0;
1881 static struct platform_driver usdhi6_driver = {
1882 .probe = usdhi6_probe,
1883 .remove = usdhi6_remove,
1884 .driver = {
1885 .name = "usdhi6rol0",
1886 .of_match_table = usdhi6_of_match,
1890 module_platform_driver(usdhi6_driver);
1892 MODULE_DESCRIPTION("Renesas usdhi6rol0 SD/SDIO host driver");
1893 MODULE_LICENSE("GPL v2");
1894 MODULE_ALIAS("platform:usdhi6rol0");
1895 MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");