2 * linux/drivers/mmc/tmio_mmc_dma.c
4 * Copyright (C) 2010-2011 Guennadi Liakhovetski
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * DMA function for TMIO MMC implementations
13 #include <linux/device.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/dmaengine.h>
16 #include <linux/mfd/tmio.h>
17 #include <linux/mmc/host.h>
18 #include <linux/mmc/tmio.h>
19 #include <linux/pagemap.h>
20 #include <linux/scatterlist.h>
24 #define TMIO_MMC_MIN_DMA_LEN 8
26 void tmio_mmc_enable_dma(struct tmio_mmc_host
*host
, bool enable
)
28 if (!host
->chan_tx
|| !host
->chan_rx
)
31 if (host
->pdata
->flags
& TMIO_MMC_HAVE_CTL_DMA_REG
)
32 sd_ctrl_write16(host
, CTL_DMA_ENABLE
, enable
? 2 : 0);
35 void tmio_mmc_abort_dma(struct tmio_mmc_host
*host
)
37 tmio_mmc_enable_dma(host
, false);
40 dmaengine_terminate_all(host
->chan_rx
);
42 dmaengine_terminate_all(host
->chan_tx
);
44 tmio_mmc_enable_dma(host
, true);
47 static void tmio_mmc_start_dma_rx(struct tmio_mmc_host
*host
)
49 struct scatterlist
*sg
= host
->sg_ptr
, *sg_tmp
;
50 struct dma_async_tx_descriptor
*desc
= NULL
;
51 struct dma_chan
*chan
= host
->chan_rx
;
52 struct tmio_mmc_data
*pdata
= host
->pdata
;
55 bool aligned
= true, multiple
= true;
56 unsigned int align
= (1 << pdata
->dma
->alignment_shift
) - 1;
58 for_each_sg(sg
, sg_tmp
, host
->sg_len
, i
) {
59 if (sg_tmp
->offset
& align
)
61 if (sg_tmp
->length
& align
) {
67 if ((!aligned
&& (host
->sg_len
> 1 || sg
->length
> PAGE_CACHE_SIZE
||
68 (align
& PAGE_MASK
))) || !multiple
) {
73 if (sg
->length
< TMIO_MMC_MIN_DMA_LEN
) {
74 host
->force_pio
= true;
78 tmio_mmc_disable_mmc_irqs(host
, TMIO_STAT_RXRDY
);
80 /* The only sg element can be unaligned, use our bounce buffer then */
82 sg_init_one(&host
->bounce_sg
, host
->bounce_buf
, sg
->length
);
83 host
->sg_ptr
= &host
->bounce_sg
;
87 ret
= dma_map_sg(chan
->device
->dev
, sg
, host
->sg_len
, DMA_FROM_DEVICE
);
89 desc
= dmaengine_prep_slave_sg(chan
, sg
, ret
,
90 DMA_DEV_TO_MEM
, DMA_CTRL_ACK
);
93 cookie
= dmaengine_submit(desc
);
99 dev_dbg(&host
->pdev
->dev
, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
100 __func__
, host
->sg_len
, ret
, cookie
, host
->mrq
);
104 /* DMA failed, fall back to PIO */
105 tmio_mmc_enable_dma(host
, false);
108 host
->chan_rx
= NULL
;
109 dma_release_channel(chan
);
110 /* Free the Tx channel too */
111 chan
= host
->chan_tx
;
113 host
->chan_tx
= NULL
;
114 dma_release_channel(chan
);
116 dev_warn(&host
->pdev
->dev
,
117 "DMA failed: %d, falling back to PIO\n", ret
);
120 dev_dbg(&host
->pdev
->dev
, "%s(): desc %p, cookie %d, sg[%d]\n", __func__
,
121 desc
, cookie
, host
->sg_len
);
124 static void tmio_mmc_start_dma_tx(struct tmio_mmc_host
*host
)
126 struct scatterlist
*sg
= host
->sg_ptr
, *sg_tmp
;
127 struct dma_async_tx_descriptor
*desc
= NULL
;
128 struct dma_chan
*chan
= host
->chan_tx
;
129 struct tmio_mmc_data
*pdata
= host
->pdata
;
132 bool aligned
= true, multiple
= true;
133 unsigned int align
= (1 << pdata
->dma
->alignment_shift
) - 1;
135 for_each_sg(sg
, sg_tmp
, host
->sg_len
, i
) {
136 if (sg_tmp
->offset
& align
)
138 if (sg_tmp
->length
& align
) {
144 if ((!aligned
&& (host
->sg_len
> 1 || sg
->length
> PAGE_CACHE_SIZE
||
145 (align
& PAGE_MASK
))) || !multiple
) {
150 if (sg
->length
< TMIO_MMC_MIN_DMA_LEN
) {
151 host
->force_pio
= true;
155 tmio_mmc_disable_mmc_irqs(host
, TMIO_STAT_TXRQ
);
157 /* The only sg element can be unaligned, use our bounce buffer then */
160 void *sg_vaddr
= tmio_mmc_kmap_atomic(sg
, &flags
);
161 sg_init_one(&host
->bounce_sg
, host
->bounce_buf
, sg
->length
);
162 memcpy(host
->bounce_buf
, sg_vaddr
, host
->bounce_sg
.length
);
163 tmio_mmc_kunmap_atomic(sg
, &flags
, sg_vaddr
);
164 host
->sg_ptr
= &host
->bounce_sg
;
168 ret
= dma_map_sg(chan
->device
->dev
, sg
, host
->sg_len
, DMA_TO_DEVICE
);
170 desc
= dmaengine_prep_slave_sg(chan
, sg
, ret
,
171 DMA_MEM_TO_DEV
, DMA_CTRL_ACK
);
174 cookie
= dmaengine_submit(desc
);
180 dev_dbg(&host
->pdev
->dev
, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
181 __func__
, host
->sg_len
, ret
, cookie
, host
->mrq
);
185 /* DMA failed, fall back to PIO */
186 tmio_mmc_enable_dma(host
, false);
189 host
->chan_tx
= NULL
;
190 dma_release_channel(chan
);
191 /* Free the Rx channel too */
192 chan
= host
->chan_rx
;
194 host
->chan_rx
= NULL
;
195 dma_release_channel(chan
);
197 dev_warn(&host
->pdev
->dev
,
198 "DMA failed: %d, falling back to PIO\n", ret
);
201 dev_dbg(&host
->pdev
->dev
, "%s(): desc %p, cookie %d\n", __func__
,
205 void tmio_mmc_start_dma(struct tmio_mmc_host
*host
,
206 struct mmc_data
*data
)
208 if (data
->flags
& MMC_DATA_READ
) {
210 tmio_mmc_start_dma_rx(host
);
213 tmio_mmc_start_dma_tx(host
);
217 static void tmio_mmc_issue_tasklet_fn(unsigned long priv
)
219 struct tmio_mmc_host
*host
= (struct tmio_mmc_host
*)priv
;
220 struct dma_chan
*chan
= NULL
;
222 spin_lock_irq(&host
->lock
);
224 if (host
&& host
->data
) {
225 if (host
->data
->flags
& MMC_DATA_READ
)
226 chan
= host
->chan_rx
;
228 chan
= host
->chan_tx
;
231 spin_unlock_irq(&host
->lock
);
233 tmio_mmc_enable_mmc_irqs(host
, TMIO_STAT_DATAEND
);
236 dma_async_issue_pending(chan
);
239 static void tmio_mmc_tasklet_fn(unsigned long arg
)
241 struct tmio_mmc_host
*host
= (struct tmio_mmc_host
*)arg
;
243 spin_lock_irq(&host
->lock
);
248 if (host
->data
->flags
& MMC_DATA_READ
)
249 dma_unmap_sg(host
->chan_rx
->device
->dev
,
250 host
->sg_ptr
, host
->sg_len
,
253 dma_unmap_sg(host
->chan_tx
->device
->dev
,
254 host
->sg_ptr
, host
->sg_len
,
257 tmio_mmc_do_data_irq(host
);
259 spin_unlock_irq(&host
->lock
);
262 void tmio_mmc_request_dma(struct tmio_mmc_host
*host
, struct tmio_mmc_data
*pdata
)
264 /* We can only either use DMA for both Tx and Rx or not use it at all */
265 if (!pdata
->dma
|| (!host
->pdev
->dev
.of_node
&&
266 (!pdata
->dma
->chan_priv_tx
|| !pdata
->dma
->chan_priv_rx
)))
269 if (!host
->chan_tx
&& !host
->chan_rx
) {
270 struct resource
*res
= platform_get_resource(host
->pdev
,
272 struct dma_slave_config cfg
= {};
280 dma_cap_set(DMA_SLAVE
, mask
);
282 host
->chan_tx
= dma_request_slave_channel_compat(mask
,
283 pdata
->dma
->filter
, pdata
->dma
->chan_priv_tx
,
284 &host
->pdev
->dev
, "tx");
285 dev_dbg(&host
->pdev
->dev
, "%s: TX: got channel %p\n", __func__
,
291 if (pdata
->dma
->chan_priv_tx
)
292 cfg
.slave_id
= pdata
->dma
->slave_id_tx
;
293 cfg
.direction
= DMA_MEM_TO_DEV
;
294 cfg
.dst_addr
= res
->start
+ (CTL_SD_DATA_PORT
<< host
->pdata
->bus_shift
);
295 cfg
.dst_addr_width
= DMA_SLAVE_BUSWIDTH_2_BYTES
;
297 ret
= dmaengine_slave_config(host
->chan_tx
, &cfg
);
301 host
->chan_rx
= dma_request_slave_channel_compat(mask
,
302 pdata
->dma
->filter
, pdata
->dma
->chan_priv_rx
,
303 &host
->pdev
->dev
, "rx");
304 dev_dbg(&host
->pdev
->dev
, "%s: RX: got channel %p\n", __func__
,
310 if (pdata
->dma
->chan_priv_rx
)
311 cfg
.slave_id
= pdata
->dma
->slave_id_rx
;
312 cfg
.direction
= DMA_DEV_TO_MEM
;
313 cfg
.src_addr
= cfg
.dst_addr
+ pdata
->dma
->dma_rx_offset
;
314 cfg
.src_addr_width
= DMA_SLAVE_BUSWIDTH_2_BYTES
;
316 ret
= dmaengine_slave_config(host
->chan_rx
, &cfg
);
320 host
->bounce_buf
= (u8
*)__get_free_page(GFP_KERNEL
| GFP_DMA
);
321 if (!host
->bounce_buf
)
324 tasklet_init(&host
->dma_complete
, tmio_mmc_tasklet_fn
, (unsigned long)host
);
325 tasklet_init(&host
->dma_issue
, tmio_mmc_issue_tasklet_fn
, (unsigned long)host
);
328 tmio_mmc_enable_dma(host
, true);
334 dma_release_channel(host
->chan_rx
);
335 host
->chan_rx
= NULL
;
338 dma_release_channel(host
->chan_tx
);
339 host
->chan_tx
= NULL
;
342 void tmio_mmc_release_dma(struct tmio_mmc_host
*host
)
345 struct dma_chan
*chan
= host
->chan_tx
;
346 host
->chan_tx
= NULL
;
347 dma_release_channel(chan
);
350 struct dma_chan
*chan
= host
->chan_rx
;
351 host
->chan_rx
= NULL
;
352 dma_release_channel(chan
);
354 if (host
->bounce_buf
) {
355 free_pages((unsigned long)host
->bounce_buf
, 0);
356 host
->bounce_buf
= NULL
;