2 * linux/drivers/mmc/tmio_mmc_dma.c
4 * Copyright (C) 2010-2011 Guennadi Liakhovetski
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * DMA function for TMIO MMC implementations
13 #include <linux/device.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/dmaengine.h>
16 #include <linux/mfd/tmio.h>
17 #include <linux/mmc/host.h>
18 #include <linux/mmc/tmio.h>
19 #include <linux/pagemap.h>
20 #include <linux/scatterlist.h>
24 #define TMIO_MMC_MIN_DMA_LEN 8
26 void tmio_mmc_enable_dma(struct tmio_mmc_host
*host
, bool enable
)
28 if (!host
->chan_tx
|| !host
->chan_rx
)
31 #if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE)
32 /* Switch DMA mode on or off - SuperH specific? */
33 sd_ctrl_write16(host
, CTL_DMA_ENABLE
, enable
? 2 : 0);
37 void tmio_mmc_abort_dma(struct tmio_mmc_host
*host
)
39 tmio_mmc_enable_dma(host
, false);
42 dmaengine_terminate_all(host
->chan_rx
);
44 dmaengine_terminate_all(host
->chan_tx
);
46 tmio_mmc_enable_dma(host
, true);
49 static void tmio_mmc_start_dma_rx(struct tmio_mmc_host
*host
)
51 struct scatterlist
*sg
= host
->sg_ptr
, *sg_tmp
;
52 struct dma_async_tx_descriptor
*desc
= NULL
;
53 struct dma_chan
*chan
= host
->chan_rx
;
54 struct tmio_mmc_data
*pdata
= host
->pdata
;
57 bool aligned
= true, multiple
= true;
58 unsigned int align
= (1 << pdata
->dma
->alignment_shift
) - 1;
60 for_each_sg(sg
, sg_tmp
, host
->sg_len
, i
) {
61 if (sg_tmp
->offset
& align
)
63 if (sg_tmp
->length
& align
) {
69 if ((!aligned
&& (host
->sg_len
> 1 || sg
->length
> PAGE_CACHE_SIZE
||
70 (align
& PAGE_MASK
))) || !multiple
) {
75 if (sg
->length
< TMIO_MMC_MIN_DMA_LEN
) {
76 host
->force_pio
= true;
80 tmio_mmc_disable_mmc_irqs(host
, TMIO_STAT_RXRDY
);
82 /* The only sg element can be unaligned, use our bounce buffer then */
84 sg_init_one(&host
->bounce_sg
, host
->bounce_buf
, sg
->length
);
85 host
->sg_ptr
= &host
->bounce_sg
;
89 ret
= dma_map_sg(chan
->device
->dev
, sg
, host
->sg_len
, DMA_FROM_DEVICE
);
91 desc
= chan
->device
->device_prep_slave_sg(chan
, sg
, ret
,
92 DMA_DEV_TO_MEM
, DMA_CTRL_ACK
);
95 cookie
= dmaengine_submit(desc
);
101 dev_dbg(&host
->pdev
->dev
, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
102 __func__
, host
->sg_len
, ret
, cookie
, host
->mrq
);
106 /* DMA failed, fall back to PIO */
109 host
->chan_rx
= NULL
;
110 dma_release_channel(chan
);
111 /* Free the Tx channel too */
112 chan
= host
->chan_tx
;
114 host
->chan_tx
= NULL
;
115 dma_release_channel(chan
);
117 dev_warn(&host
->pdev
->dev
,
118 "DMA failed: %d, falling back to PIO\n", ret
);
119 tmio_mmc_enable_dma(host
, false);
122 dev_dbg(&host
->pdev
->dev
, "%s(): desc %p, cookie %d, sg[%d]\n", __func__
,
123 desc
, cookie
, host
->sg_len
);
126 static void tmio_mmc_start_dma_tx(struct tmio_mmc_host
*host
)
128 struct scatterlist
*sg
= host
->sg_ptr
, *sg_tmp
;
129 struct dma_async_tx_descriptor
*desc
= NULL
;
130 struct dma_chan
*chan
= host
->chan_tx
;
131 struct tmio_mmc_data
*pdata
= host
->pdata
;
134 bool aligned
= true, multiple
= true;
135 unsigned int align
= (1 << pdata
->dma
->alignment_shift
) - 1;
137 for_each_sg(sg
, sg_tmp
, host
->sg_len
, i
) {
138 if (sg_tmp
->offset
& align
)
140 if (sg_tmp
->length
& align
) {
146 if ((!aligned
&& (host
->sg_len
> 1 || sg
->length
> PAGE_CACHE_SIZE
||
147 (align
& PAGE_MASK
))) || !multiple
) {
152 if (sg
->length
< TMIO_MMC_MIN_DMA_LEN
) {
153 host
->force_pio
= true;
157 tmio_mmc_disable_mmc_irqs(host
, TMIO_STAT_TXRQ
);
159 /* The only sg element can be unaligned, use our bounce buffer then */
162 void *sg_vaddr
= tmio_mmc_kmap_atomic(sg
, &flags
);
163 sg_init_one(&host
->bounce_sg
, host
->bounce_buf
, sg
->length
);
164 memcpy(host
->bounce_buf
, sg_vaddr
, host
->bounce_sg
.length
);
165 tmio_mmc_kunmap_atomic(sg
, &flags
, sg_vaddr
);
166 host
->sg_ptr
= &host
->bounce_sg
;
170 ret
= dma_map_sg(chan
->device
->dev
, sg
, host
->sg_len
, DMA_TO_DEVICE
);
172 desc
= chan
->device
->device_prep_slave_sg(chan
, sg
, ret
,
173 DMA_MEM_TO_DEV
, DMA_CTRL_ACK
);
176 cookie
= dmaengine_submit(desc
);
182 dev_dbg(&host
->pdev
->dev
, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
183 __func__
, host
->sg_len
, ret
, cookie
, host
->mrq
);
187 /* DMA failed, fall back to PIO */
190 host
->chan_tx
= NULL
;
191 dma_release_channel(chan
);
192 /* Free the Rx channel too */
193 chan
= host
->chan_rx
;
195 host
->chan_rx
= NULL
;
196 dma_release_channel(chan
);
198 dev_warn(&host
->pdev
->dev
,
199 "DMA failed: %d, falling back to PIO\n", ret
);
200 tmio_mmc_enable_dma(host
, false);
203 dev_dbg(&host
->pdev
->dev
, "%s(): desc %p, cookie %d\n", __func__
,
207 void tmio_mmc_start_dma(struct tmio_mmc_host
*host
,
208 struct mmc_data
*data
)
210 if (data
->flags
& MMC_DATA_READ
) {
212 tmio_mmc_start_dma_rx(host
);
215 tmio_mmc_start_dma_tx(host
);
219 static void tmio_mmc_issue_tasklet_fn(unsigned long priv
)
221 struct tmio_mmc_host
*host
= (struct tmio_mmc_host
*)priv
;
222 struct dma_chan
*chan
= NULL
;
224 spin_lock_irq(&host
->lock
);
226 if (host
&& host
->data
) {
227 if (host
->data
->flags
& MMC_DATA_READ
)
228 chan
= host
->chan_rx
;
230 chan
= host
->chan_tx
;
233 spin_unlock_irq(&host
->lock
);
235 tmio_mmc_enable_mmc_irqs(host
, TMIO_STAT_DATAEND
);
238 dma_async_issue_pending(chan
);
241 static void tmio_mmc_tasklet_fn(unsigned long arg
)
243 struct tmio_mmc_host
*host
= (struct tmio_mmc_host
*)arg
;
245 spin_lock_irq(&host
->lock
);
250 if (host
->data
->flags
& MMC_DATA_READ
)
251 dma_unmap_sg(host
->chan_rx
->device
->dev
,
252 host
->sg_ptr
, host
->sg_len
,
255 dma_unmap_sg(host
->chan_tx
->device
->dev
,
256 host
->sg_ptr
, host
->sg_len
,
259 tmio_mmc_do_data_irq(host
);
261 spin_unlock_irq(&host
->lock
);
264 /* It might be necessary to make filter MFD specific */
265 static bool tmio_mmc_filter(struct dma_chan
*chan
, void *arg
)
267 dev_dbg(chan
->device
->dev
, "%s: slave data %p\n", __func__
, arg
);
272 void tmio_mmc_request_dma(struct tmio_mmc_host
*host
, struct tmio_mmc_data
*pdata
)
274 /* We can only either use DMA for both Tx and Rx or not use it at all */
278 if (!host
->chan_tx
&& !host
->chan_rx
) {
282 dma_cap_set(DMA_SLAVE
, mask
);
284 host
->chan_tx
= dma_request_channel(mask
, tmio_mmc_filter
,
285 pdata
->dma
->chan_priv_tx
);
286 dev_dbg(&host
->pdev
->dev
, "%s: TX: got channel %p\n", __func__
,
292 host
->chan_rx
= dma_request_channel(mask
, tmio_mmc_filter
,
293 pdata
->dma
->chan_priv_rx
);
294 dev_dbg(&host
->pdev
->dev
, "%s: RX: got channel %p\n", __func__
,
300 host
->bounce_buf
= (u8
*)__get_free_page(GFP_KERNEL
| GFP_DMA
);
301 if (!host
->bounce_buf
)
304 tasklet_init(&host
->dma_complete
, tmio_mmc_tasklet_fn
, (unsigned long)host
);
305 tasklet_init(&host
->dma_issue
, tmio_mmc_issue_tasklet_fn
, (unsigned long)host
);
308 tmio_mmc_enable_dma(host
, true);
313 dma_release_channel(host
->chan_rx
);
314 host
->chan_rx
= NULL
;
316 dma_release_channel(host
->chan_tx
);
317 host
->chan_tx
= NULL
;
320 void tmio_mmc_release_dma(struct tmio_mmc_host
*host
)
323 struct dma_chan
*chan
= host
->chan_tx
;
324 host
->chan_tx
= NULL
;
325 dma_release_channel(chan
);
328 struct dma_chan
*chan
= host
->chan_rx
;
329 host
->chan_rx
= NULL
;
330 dma_release_channel(chan
);
332 if (host
->bounce_buf
) {
333 free_pages((unsigned long)host
->bounce_buf
, 0);
334 host
->bounce_buf
= NULL
;