2 * linux/drivers/mmc/tmio_mmc_dma.c
4 * Copyright (C) 2010-2011 Guennadi Liakhovetski
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * DMA function for TMIO MMC implementations
13 #include <linux/device.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/dmaengine.h>
16 #include <linux/mfd/tmio.h>
17 #include <linux/mmc/host.h>
18 #include <linux/pagemap.h>
19 #include <linux/scatterlist.h>
23 #define TMIO_MMC_MIN_DMA_LEN 8
25 void tmio_mmc_enable_dma(struct tmio_mmc_host
*host
, bool enable
)
27 if (!host
->chan_tx
|| !host
->chan_rx
)
30 if (host
->dma
->enable
)
31 host
->dma
->enable(host
, enable
);
34 void tmio_mmc_abort_dma(struct tmio_mmc_host
*host
)
36 tmio_mmc_enable_dma(host
, false);
39 dmaengine_terminate_all(host
->chan_rx
);
41 dmaengine_terminate_all(host
->chan_tx
);
43 tmio_mmc_enable_dma(host
, true);
46 static void tmio_mmc_start_dma_rx(struct tmio_mmc_host
*host
)
48 struct scatterlist
*sg
= host
->sg_ptr
, *sg_tmp
;
49 struct dma_async_tx_descriptor
*desc
= NULL
;
50 struct dma_chan
*chan
= host
->chan_rx
;
53 bool aligned
= true, multiple
= true;
54 unsigned int align
= (1 << host
->pdata
->alignment_shift
) - 1;
56 for_each_sg(sg
, sg_tmp
, host
->sg_len
, i
) {
57 if (sg_tmp
->offset
& align
)
59 if (sg_tmp
->length
& align
) {
65 if ((!aligned
&& (host
->sg_len
> 1 || sg
->length
> PAGE_SIZE
||
66 (align
& PAGE_MASK
))) || !multiple
) {
71 if (sg
->length
< TMIO_MMC_MIN_DMA_LEN
) {
72 host
->force_pio
= true;
76 tmio_mmc_disable_mmc_irqs(host
, TMIO_STAT_RXRDY
);
78 /* The only sg element can be unaligned, use our bounce buffer then */
80 sg_init_one(&host
->bounce_sg
, host
->bounce_buf
, sg
->length
);
81 host
->sg_ptr
= &host
->bounce_sg
;
85 ret
= dma_map_sg(chan
->device
->dev
, sg
, host
->sg_len
, DMA_FROM_DEVICE
);
87 desc
= dmaengine_prep_slave_sg(chan
, sg
, ret
,
88 DMA_DEV_TO_MEM
, DMA_CTRL_ACK
);
91 cookie
= dmaengine_submit(desc
);
99 /* DMA failed, fall back to PIO */
100 tmio_mmc_enable_dma(host
, false);
103 host
->chan_rx
= NULL
;
104 dma_release_channel(chan
);
105 /* Free the Tx channel too */
106 chan
= host
->chan_tx
;
108 host
->chan_tx
= NULL
;
109 dma_release_channel(chan
);
111 dev_warn(&host
->pdev
->dev
,
112 "DMA failed: %d, falling back to PIO\n", ret
);
116 static void tmio_mmc_start_dma_tx(struct tmio_mmc_host
*host
)
118 struct scatterlist
*sg
= host
->sg_ptr
, *sg_tmp
;
119 struct dma_async_tx_descriptor
*desc
= NULL
;
120 struct dma_chan
*chan
= host
->chan_tx
;
123 bool aligned
= true, multiple
= true;
124 unsigned int align
= (1 << host
->pdata
->alignment_shift
) - 1;
126 for_each_sg(sg
, sg_tmp
, host
->sg_len
, i
) {
127 if (sg_tmp
->offset
& align
)
129 if (sg_tmp
->length
& align
) {
135 if ((!aligned
&& (host
->sg_len
> 1 || sg
->length
> PAGE_SIZE
||
136 (align
& PAGE_MASK
))) || !multiple
) {
141 if (sg
->length
< TMIO_MMC_MIN_DMA_LEN
) {
142 host
->force_pio
= true;
146 tmio_mmc_disable_mmc_irqs(host
, TMIO_STAT_TXRQ
);
148 /* The only sg element can be unaligned, use our bounce buffer then */
151 void *sg_vaddr
= tmio_mmc_kmap_atomic(sg
, &flags
);
152 sg_init_one(&host
->bounce_sg
, host
->bounce_buf
, sg
->length
);
153 memcpy(host
->bounce_buf
, sg_vaddr
, host
->bounce_sg
.length
);
154 tmio_mmc_kunmap_atomic(sg
, &flags
, sg_vaddr
);
155 host
->sg_ptr
= &host
->bounce_sg
;
159 ret
= dma_map_sg(chan
->device
->dev
, sg
, host
->sg_len
, DMA_TO_DEVICE
);
161 desc
= dmaengine_prep_slave_sg(chan
, sg
, ret
,
162 DMA_MEM_TO_DEV
, DMA_CTRL_ACK
);
165 cookie
= dmaengine_submit(desc
);
173 /* DMA failed, fall back to PIO */
174 tmio_mmc_enable_dma(host
, false);
177 host
->chan_tx
= NULL
;
178 dma_release_channel(chan
);
179 /* Free the Rx channel too */
180 chan
= host
->chan_rx
;
182 host
->chan_rx
= NULL
;
183 dma_release_channel(chan
);
185 dev_warn(&host
->pdev
->dev
,
186 "DMA failed: %d, falling back to PIO\n", ret
);
190 void tmio_mmc_start_dma(struct tmio_mmc_host
*host
,
191 struct mmc_data
*data
)
193 if (data
->flags
& MMC_DATA_READ
) {
195 tmio_mmc_start_dma_rx(host
);
198 tmio_mmc_start_dma_tx(host
);
202 static void tmio_mmc_issue_tasklet_fn(unsigned long priv
)
204 struct tmio_mmc_host
*host
= (struct tmio_mmc_host
*)priv
;
205 struct dma_chan
*chan
= NULL
;
207 spin_lock_irq(&host
->lock
);
209 if (host
&& host
->data
) {
210 if (host
->data
->flags
& MMC_DATA_READ
)
211 chan
= host
->chan_rx
;
213 chan
= host
->chan_tx
;
216 spin_unlock_irq(&host
->lock
);
218 tmio_mmc_enable_mmc_irqs(host
, TMIO_STAT_DATAEND
);
221 dma_async_issue_pending(chan
);
224 static void tmio_mmc_tasklet_fn(unsigned long arg
)
226 struct tmio_mmc_host
*host
= (struct tmio_mmc_host
*)arg
;
228 spin_lock_irq(&host
->lock
);
233 if (host
->data
->flags
& MMC_DATA_READ
)
234 dma_unmap_sg(host
->chan_rx
->device
->dev
,
235 host
->sg_ptr
, host
->sg_len
,
238 dma_unmap_sg(host
->chan_tx
->device
->dev
,
239 host
->sg_ptr
, host
->sg_len
,
242 tmio_mmc_do_data_irq(host
);
244 spin_unlock_irq(&host
->lock
);
247 void tmio_mmc_request_dma(struct tmio_mmc_host
*host
, struct tmio_mmc_data
*pdata
)
249 /* We can only either use DMA for both Tx and Rx or not use it at all */
250 if (!host
->dma
|| (!host
->pdev
->dev
.of_node
&&
251 (!pdata
->chan_priv_tx
|| !pdata
->chan_priv_rx
)))
254 if (!host
->chan_tx
&& !host
->chan_rx
) {
255 struct resource
*res
= platform_get_resource(host
->pdev
,
257 struct dma_slave_config cfg
= {};
265 dma_cap_set(DMA_SLAVE
, mask
);
267 host
->chan_tx
= dma_request_slave_channel_compat(mask
,
268 host
->dma
->filter
, pdata
->chan_priv_tx
,
269 &host
->pdev
->dev
, "tx");
270 dev_dbg(&host
->pdev
->dev
, "%s: TX: got channel %p\n", __func__
,
276 cfg
.direction
= DMA_MEM_TO_DEV
;
277 cfg
.dst_addr
= res
->start
+ (CTL_SD_DATA_PORT
<< host
->bus_shift
);
278 cfg
.dst_addr_width
= host
->dma
->dma_buswidth
;
279 if (!cfg
.dst_addr_width
)
280 cfg
.dst_addr_width
= DMA_SLAVE_BUSWIDTH_2_BYTES
;
282 ret
= dmaengine_slave_config(host
->chan_tx
, &cfg
);
286 host
->chan_rx
= dma_request_slave_channel_compat(mask
,
287 host
->dma
->filter
, pdata
->chan_priv_rx
,
288 &host
->pdev
->dev
, "rx");
289 dev_dbg(&host
->pdev
->dev
, "%s: RX: got channel %p\n", __func__
,
295 cfg
.direction
= DMA_DEV_TO_MEM
;
296 cfg
.src_addr
= cfg
.dst_addr
+ host
->pdata
->dma_rx_offset
;
297 cfg
.src_addr_width
= host
->dma
->dma_buswidth
;
298 if (!cfg
.src_addr_width
)
299 cfg
.src_addr_width
= DMA_SLAVE_BUSWIDTH_2_BYTES
;
301 ret
= dmaengine_slave_config(host
->chan_rx
, &cfg
);
305 host
->bounce_buf
= (u8
*)__get_free_page(GFP_KERNEL
| GFP_DMA
);
306 if (!host
->bounce_buf
)
309 tasklet_init(&host
->dma_complete
, tmio_mmc_tasklet_fn
, (unsigned long)host
);
310 tasklet_init(&host
->dma_issue
, tmio_mmc_issue_tasklet_fn
, (unsigned long)host
);
313 tmio_mmc_enable_dma(host
, true);
319 dma_release_channel(host
->chan_rx
);
320 host
->chan_rx
= NULL
;
323 dma_release_channel(host
->chan_tx
);
324 host
->chan_tx
= NULL
;
327 void tmio_mmc_release_dma(struct tmio_mmc_host
*host
)
330 struct dma_chan
*chan
= host
->chan_tx
;
331 host
->chan_tx
= NULL
;
332 dma_release_channel(chan
);
335 struct dma_chan
*chan
= host
->chan_rx
;
336 host
->chan_rx
= NULL
;
337 dma_release_channel(chan
);
339 if (host
->bounce_buf
) {
340 free_pages((unsigned long)host
->bounce_buf
, 0);
341 host
->bounce_buf
= NULL
;