xtensa: support DMA buffers in high memory
[cris-mirror.git] / drivers / mmc / host / renesas_sdhi_sys_dmac.c
blob82d757c480b2b33655f1dc97d5cddd5d24a861a3
1 /*
2 * DMA support use of SYS DMAC with SDHI SD/SDIO controller
4 * Copyright (C) 2016-17 Renesas Electronics Corporation
5 * Copyright (C) 2016-17 Sang Engineering, Wolfram Sang
6 * Copyright (C) 2017 Horms Solutions, Simon Horman
7 * Copyright (C) 2010-2011 Guennadi Liakhovetski
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #include <linux/device.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/dmaengine.h>
17 #include <linux/mfd/tmio.h>
18 #include <linux/mmc/host.h>
19 #include <linux/mod_devicetable.h>
20 #include <linux/module.h>
21 #include <linux/of_device.h>
22 #include <linux/pagemap.h>
23 #include <linux/scatterlist.h>
24 #include <linux/sys_soc.h>
26 #include "renesas_sdhi.h"
27 #include "tmio_mmc.h"
29 #define TMIO_MMC_MIN_DMA_LEN 8
31 static const struct renesas_sdhi_of_data of_default_cfg = {
32 .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT,
35 static const struct renesas_sdhi_of_data of_rz_compatible = {
36 .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_32BIT_DATA_PORT |
37 TMIO_MMC_HAVE_CBSY,
38 .tmio_ocr_mask = MMC_VDD_32_33,
39 .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ,
42 static const struct renesas_sdhi_of_data of_rcar_gen1_compatible = {
43 .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_WRPROTECT_DISABLE |
44 TMIO_MMC_CLK_ACTUAL,
45 .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ,
48 /* Definitions for sampling clocks */
49 static struct renesas_sdhi_scc rcar_gen2_scc_taps[] = {
51 .clk_rate = 156000000,
52 .tap = 0x00000703,
55 .clk_rate = 0,
56 .tap = 0x00000300,
60 static const struct renesas_sdhi_of_data of_rcar_gen2_compatible = {
61 .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_WRPROTECT_DISABLE |
62 TMIO_MMC_CLK_ACTUAL | TMIO_MMC_HAVE_CBSY |
63 TMIO_MMC_MIN_RCAR2,
64 .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
65 MMC_CAP_CMD23,
66 .dma_buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES,
67 .dma_rx_offset = 0x2000,
68 .scc_offset = 0x0300,
69 .taps = rcar_gen2_scc_taps,
70 .taps_num = ARRAY_SIZE(rcar_gen2_scc_taps),
73 /* Definitions for sampling clocks */
74 static struct renesas_sdhi_scc rcar_gen3_scc_taps[] = {
76 .clk_rate = 0,
77 .tap = 0x00000300,
81 static const struct renesas_sdhi_of_data of_rcar_gen3_compatible = {
82 .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_WRPROTECT_DISABLE |
83 TMIO_MMC_CLK_ACTUAL | TMIO_MMC_HAVE_CBSY |
84 TMIO_MMC_MIN_RCAR2,
85 .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
86 MMC_CAP_CMD23,
87 .bus_shift = 2,
88 .scc_offset = 0x1000,
89 .taps = rcar_gen3_scc_taps,
90 .taps_num = ARRAY_SIZE(rcar_gen3_scc_taps),
93 static const struct of_device_id renesas_sdhi_sys_dmac_of_match[] = {
94 { .compatible = "renesas,sdhi-sh73a0", .data = &of_default_cfg, },
95 { .compatible = "renesas,sdhi-r8a73a4", .data = &of_default_cfg, },
96 { .compatible = "renesas,sdhi-r8a7740", .data = &of_default_cfg, },
97 { .compatible = "renesas,sdhi-r7s72100", .data = &of_rz_compatible, },
98 { .compatible = "renesas,sdhi-r8a7778", .data = &of_rcar_gen1_compatible, },
99 { .compatible = "renesas,sdhi-r8a7779", .data = &of_rcar_gen1_compatible, },
100 { .compatible = "renesas,sdhi-r8a7743", .data = &of_rcar_gen2_compatible, },
101 { .compatible = "renesas,sdhi-r8a7745", .data = &of_rcar_gen2_compatible, },
102 { .compatible = "renesas,sdhi-r8a7790", .data = &of_rcar_gen2_compatible, },
103 { .compatible = "renesas,sdhi-r8a7791", .data = &of_rcar_gen2_compatible, },
104 { .compatible = "renesas,sdhi-r8a7792", .data = &of_rcar_gen2_compatible, },
105 { .compatible = "renesas,sdhi-r8a7793", .data = &of_rcar_gen2_compatible, },
106 { .compatible = "renesas,sdhi-r8a7794", .data = &of_rcar_gen2_compatible, },
107 { .compatible = "renesas,sdhi-r8a7795", .data = &of_rcar_gen3_compatible, },
108 { .compatible = "renesas,sdhi-r8a7796", .data = &of_rcar_gen3_compatible, },
109 { .compatible = "renesas,rcar-gen1-sdhi", .data = &of_rcar_gen1_compatible, },
110 { .compatible = "renesas,rcar-gen2-sdhi", .data = &of_rcar_gen2_compatible, },
111 { .compatible = "renesas,rcar-gen3-sdhi", .data = &of_rcar_gen3_compatible, },
112 { .compatible = "renesas,sdhi-shmobile" },
115 MODULE_DEVICE_TABLE(of, renesas_sdhi_sys_dmac_of_match);
117 static void renesas_sdhi_sys_dmac_enable_dma(struct tmio_mmc_host *host,
118 bool enable)
120 struct renesas_sdhi *priv = host_to_priv(host);
122 if (!host->chan_tx || !host->chan_rx)
123 return;
125 if (priv->dma_priv.enable)
126 priv->dma_priv.enable(host, enable);
129 static void renesas_sdhi_sys_dmac_abort_dma(struct tmio_mmc_host *host)
131 renesas_sdhi_sys_dmac_enable_dma(host, false);
133 if (host->chan_rx)
134 dmaengine_terminate_all(host->chan_rx);
135 if (host->chan_tx)
136 dmaengine_terminate_all(host->chan_tx);
138 renesas_sdhi_sys_dmac_enable_dma(host, true);
141 static void renesas_sdhi_sys_dmac_dataend_dma(struct tmio_mmc_host *host)
143 struct renesas_sdhi *priv = host_to_priv(host);
145 complete(&priv->dma_priv.dma_dataend);
148 static void renesas_sdhi_sys_dmac_dma_callback(void *arg)
150 struct tmio_mmc_host *host = arg;
151 struct renesas_sdhi *priv = host_to_priv(host);
153 spin_lock_irq(&host->lock);
155 if (!host->data)
156 goto out;
158 if (host->data->flags & MMC_DATA_READ)
159 dma_unmap_sg(host->chan_rx->device->dev,
160 host->sg_ptr, host->sg_len,
161 DMA_FROM_DEVICE);
162 else
163 dma_unmap_sg(host->chan_tx->device->dev,
164 host->sg_ptr, host->sg_len,
165 DMA_TO_DEVICE);
167 spin_unlock_irq(&host->lock);
169 wait_for_completion(&priv->dma_priv.dma_dataend);
171 spin_lock_irq(&host->lock);
172 tmio_mmc_do_data_irq(host);
173 out:
174 spin_unlock_irq(&host->lock);
177 static void renesas_sdhi_sys_dmac_start_dma_rx(struct tmio_mmc_host *host)
179 struct renesas_sdhi *priv = host_to_priv(host);
180 struct scatterlist *sg = host->sg_ptr, *sg_tmp;
181 struct dma_async_tx_descriptor *desc = NULL;
182 struct dma_chan *chan = host->chan_rx;
183 dma_cookie_t cookie;
184 int ret, i;
185 bool aligned = true, multiple = true;
186 unsigned int align = (1 << host->pdata->alignment_shift) - 1;
188 for_each_sg(sg, sg_tmp, host->sg_len, i) {
189 if (sg_tmp->offset & align)
190 aligned = false;
191 if (sg_tmp->length & align) {
192 multiple = false;
193 break;
197 if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_SIZE ||
198 (align & PAGE_MASK))) || !multiple) {
199 ret = -EINVAL;
200 goto pio;
203 if (sg->length < TMIO_MMC_MIN_DMA_LEN) {
204 host->force_pio = true;
205 return;
208 tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_RXRDY);
210 /* The only sg element can be unaligned, use our bounce buffer then */
211 if (!aligned) {
212 sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
213 host->sg_ptr = &host->bounce_sg;
214 sg = host->sg_ptr;
217 ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE);
218 if (ret > 0)
219 desc = dmaengine_prep_slave_sg(chan, sg, ret, DMA_DEV_TO_MEM,
220 DMA_CTRL_ACK);
222 if (desc) {
223 reinit_completion(&priv->dma_priv.dma_dataend);
224 desc->callback = renesas_sdhi_sys_dmac_dma_callback;
225 desc->callback_param = host;
227 cookie = dmaengine_submit(desc);
228 if (cookie < 0) {
229 desc = NULL;
230 ret = cookie;
233 pio:
234 if (!desc) {
235 /* DMA failed, fall back to PIO */
236 renesas_sdhi_sys_dmac_enable_dma(host, false);
237 if (ret >= 0)
238 ret = -EIO;
239 host->chan_rx = NULL;
240 dma_release_channel(chan);
241 /* Free the Tx channel too */
242 chan = host->chan_tx;
243 if (chan) {
244 host->chan_tx = NULL;
245 dma_release_channel(chan);
247 dev_warn(&host->pdev->dev,
248 "DMA failed: %d, falling back to PIO\n", ret);
252 static void renesas_sdhi_sys_dmac_start_dma_tx(struct tmio_mmc_host *host)
254 struct renesas_sdhi *priv = host_to_priv(host);
255 struct scatterlist *sg = host->sg_ptr, *sg_tmp;
256 struct dma_async_tx_descriptor *desc = NULL;
257 struct dma_chan *chan = host->chan_tx;
258 dma_cookie_t cookie;
259 int ret, i;
260 bool aligned = true, multiple = true;
261 unsigned int align = (1 << host->pdata->alignment_shift) - 1;
263 for_each_sg(sg, sg_tmp, host->sg_len, i) {
264 if (sg_tmp->offset & align)
265 aligned = false;
266 if (sg_tmp->length & align) {
267 multiple = false;
268 break;
272 if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_SIZE ||
273 (align & PAGE_MASK))) || !multiple) {
274 ret = -EINVAL;
275 goto pio;
278 if (sg->length < TMIO_MMC_MIN_DMA_LEN) {
279 host->force_pio = true;
280 return;
283 tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_TXRQ);
285 /* The only sg element can be unaligned, use our bounce buffer then */
286 if (!aligned) {
287 unsigned long flags;
288 void *sg_vaddr = tmio_mmc_kmap_atomic(sg, &flags);
290 sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
291 memcpy(host->bounce_buf, sg_vaddr, host->bounce_sg.length);
292 tmio_mmc_kunmap_atomic(sg, &flags, sg_vaddr);
293 host->sg_ptr = &host->bounce_sg;
294 sg = host->sg_ptr;
297 ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE);
298 if (ret > 0)
299 desc = dmaengine_prep_slave_sg(chan, sg, ret, DMA_MEM_TO_DEV,
300 DMA_CTRL_ACK);
302 if (desc) {
303 reinit_completion(&priv->dma_priv.dma_dataend);
304 desc->callback = renesas_sdhi_sys_dmac_dma_callback;
305 desc->callback_param = host;
307 cookie = dmaengine_submit(desc);
308 if (cookie < 0) {
309 desc = NULL;
310 ret = cookie;
313 pio:
314 if (!desc) {
315 /* DMA failed, fall back to PIO */
316 renesas_sdhi_sys_dmac_enable_dma(host, false);
317 if (ret >= 0)
318 ret = -EIO;
319 host->chan_tx = NULL;
320 dma_release_channel(chan);
321 /* Free the Rx channel too */
322 chan = host->chan_rx;
323 if (chan) {
324 host->chan_rx = NULL;
325 dma_release_channel(chan);
327 dev_warn(&host->pdev->dev,
328 "DMA failed: %d, falling back to PIO\n", ret);
332 static void renesas_sdhi_sys_dmac_start_dma(struct tmio_mmc_host *host,
333 struct mmc_data *data)
335 if (data->flags & MMC_DATA_READ) {
336 if (host->chan_rx)
337 renesas_sdhi_sys_dmac_start_dma_rx(host);
338 } else {
339 if (host->chan_tx)
340 renesas_sdhi_sys_dmac_start_dma_tx(host);
344 static void renesas_sdhi_sys_dmac_issue_tasklet_fn(unsigned long priv)
346 struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv;
347 struct dma_chan *chan = NULL;
349 spin_lock_irq(&host->lock);
351 if (host->data) {
352 if (host->data->flags & MMC_DATA_READ)
353 chan = host->chan_rx;
354 else
355 chan = host->chan_tx;
358 spin_unlock_irq(&host->lock);
360 tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND);
362 if (chan)
363 dma_async_issue_pending(chan);
366 static void renesas_sdhi_sys_dmac_request_dma(struct tmio_mmc_host *host,
367 struct tmio_mmc_data *pdata)
369 struct renesas_sdhi *priv = host_to_priv(host);
371 /* We can only either use DMA for both Tx and Rx or not use it at all */
372 if (!host->pdev->dev.of_node &&
373 (!pdata->chan_priv_tx || !pdata->chan_priv_rx))
374 return;
376 if (!host->chan_tx && !host->chan_rx) {
377 struct resource *res = platform_get_resource(host->pdev,
378 IORESOURCE_MEM, 0);
379 struct dma_slave_config cfg = {};
380 dma_cap_mask_t mask;
381 int ret;
383 if (!res)
384 return;
386 dma_cap_zero(mask);
387 dma_cap_set(DMA_SLAVE, mask);
389 host->chan_tx = dma_request_slave_channel_compat(mask,
390 priv->dma_priv.filter, pdata->chan_priv_tx,
391 &host->pdev->dev, "tx");
392 dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__,
393 host->chan_tx);
395 if (!host->chan_tx)
396 return;
398 cfg.direction = DMA_MEM_TO_DEV;
399 cfg.dst_addr = res->start +
400 (CTL_SD_DATA_PORT << host->bus_shift);
401 cfg.dst_addr_width = priv->dma_priv.dma_buswidth;
402 if (!cfg.dst_addr_width)
403 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
404 cfg.src_addr = 0;
405 ret = dmaengine_slave_config(host->chan_tx, &cfg);
406 if (ret < 0)
407 goto ecfgtx;
409 host->chan_rx = dma_request_slave_channel_compat(mask,
410 priv->dma_priv.filter, pdata->chan_priv_rx,
411 &host->pdev->dev, "rx");
412 dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__,
413 host->chan_rx);
415 if (!host->chan_rx)
416 goto ereqrx;
418 cfg.direction = DMA_DEV_TO_MEM;
419 cfg.src_addr = cfg.dst_addr + host->pdata->dma_rx_offset;
420 cfg.src_addr_width = priv->dma_priv.dma_buswidth;
421 if (!cfg.src_addr_width)
422 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
423 cfg.dst_addr = 0;
424 ret = dmaengine_slave_config(host->chan_rx, &cfg);
425 if (ret < 0)
426 goto ecfgrx;
428 host->bounce_buf = (u8 *)__get_free_page(GFP_KERNEL | GFP_DMA);
429 if (!host->bounce_buf)
430 goto ebouncebuf;
432 init_completion(&priv->dma_priv.dma_dataend);
433 tasklet_init(&host->dma_issue,
434 renesas_sdhi_sys_dmac_issue_tasklet_fn,
435 (unsigned long)host);
438 renesas_sdhi_sys_dmac_enable_dma(host, true);
440 return;
442 ebouncebuf:
443 ecfgrx:
444 dma_release_channel(host->chan_rx);
445 host->chan_rx = NULL;
446 ereqrx:
447 ecfgtx:
448 dma_release_channel(host->chan_tx);
449 host->chan_tx = NULL;
452 static void renesas_sdhi_sys_dmac_release_dma(struct tmio_mmc_host *host)
454 if (host->chan_tx) {
455 struct dma_chan *chan = host->chan_tx;
457 host->chan_tx = NULL;
458 dma_release_channel(chan);
460 if (host->chan_rx) {
461 struct dma_chan *chan = host->chan_rx;
463 host->chan_rx = NULL;
464 dma_release_channel(chan);
466 if (host->bounce_buf) {
467 free_pages((unsigned long)host->bounce_buf, 0);
468 host->bounce_buf = NULL;
472 static const struct tmio_mmc_dma_ops renesas_sdhi_sys_dmac_dma_ops = {
473 .start = renesas_sdhi_sys_dmac_start_dma,
474 .enable = renesas_sdhi_sys_dmac_enable_dma,
475 .request = renesas_sdhi_sys_dmac_request_dma,
476 .release = renesas_sdhi_sys_dmac_release_dma,
477 .abort = renesas_sdhi_sys_dmac_abort_dma,
478 .dataend = renesas_sdhi_sys_dmac_dataend_dma,
482 * Whitelist of specific R-Car Gen3 SoC ES versions to use this DMAC
483 * implementation. Currently empty as all supported ES versions use
484 * the internal DMAC.
486 static const struct soc_device_attribute gen3_soc_whitelist[] = {
487 { /* sentinel */ }
490 static int renesas_sdhi_sys_dmac_probe(struct platform_device *pdev)
492 if (of_device_get_match_data(&pdev->dev) == &of_rcar_gen3_compatible &&
493 !soc_device_match(gen3_soc_whitelist))
494 return -ENODEV;
496 return renesas_sdhi_probe(pdev, &renesas_sdhi_sys_dmac_dma_ops);
499 static const struct dev_pm_ops renesas_sdhi_sys_dmac_dev_pm_ops = {
500 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
501 pm_runtime_force_resume)
502 SET_RUNTIME_PM_OPS(tmio_mmc_host_runtime_suspend,
503 tmio_mmc_host_runtime_resume,
504 NULL)
507 static struct platform_driver renesas_sys_dmac_sdhi_driver = {
508 .driver = {
509 .name = "sh_mobile_sdhi",
510 .pm = &renesas_sdhi_sys_dmac_dev_pm_ops,
511 .of_match_table = renesas_sdhi_sys_dmac_of_match,
513 .probe = renesas_sdhi_sys_dmac_probe,
514 .remove = renesas_sdhi_remove,
517 module_platform_driver(renesas_sys_dmac_sdhi_driver);
519 MODULE_DESCRIPTION("Renesas SDHI driver");
520 MODULE_AUTHOR("Magnus Damm");
521 MODULE_LICENSE("GPL v2");
522 MODULE_ALIAS("platform:sh_mobile_sdhi");