Merge remote-tracking branch 'cleancache/linux-next'
[linux-2.6/next.git] / drivers / dma / imx-dma.c
blobe18eaabe92b97bea100b6baa5528078696abb1ec
1 /*
2 * drivers/dma/imx-dma.c
4 * This file contains a driver for the Freescale i.MX DMA engine
5 * found on i.MX1/21/27
7 * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
9 * The code contained herein is licensed under the GNU General Public
10 * License. You may obtain a copy of the GNU General Public License
11 * Version 2 or later at the following locations:
13 * http://www.opensource.org/licenses/gpl-license.html
14 * http://www.gnu.org/copyleft/gpl.html
16 #include <linux/init.h>
17 #include <linux/types.h>
18 #include <linux/mm.h>
19 #include <linux/interrupt.h>
20 #include <linux/spinlock.h>
21 #include <linux/device.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/slab.h>
24 #include <linux/platform_device.h>
25 #include <linux/dmaengine.h>
27 #include <asm/irq.h>
28 #include <mach/dma-v1.h>
29 #include <mach/hardware.h>
31 struct imxdma_channel {
32 struct imxdma_engine *imxdma;
33 unsigned int channel;
34 unsigned int imxdma_channel;
36 enum dma_slave_buswidth word_size;
37 dma_addr_t per_address;
38 u32 watermark_level;
39 struct dma_chan chan;
40 spinlock_t lock;
41 struct dma_async_tx_descriptor desc;
42 dma_cookie_t last_completed;
43 enum dma_status status;
44 int dma_request;
45 struct scatterlist *sg_list;
48 #define MAX_DMA_CHANNELS 8
50 struct imxdma_engine {
51 struct device *dev;
52 struct device_dma_parameters dma_parms;
53 struct dma_device dma_device;
54 struct imxdma_channel channel[MAX_DMA_CHANNELS];
57 static struct imxdma_channel *to_imxdma_chan(struct dma_chan *chan)
59 return container_of(chan, struct imxdma_channel, chan);
62 static void imxdma_handle(struct imxdma_channel *imxdmac)
64 if (imxdmac->desc.callback)
65 imxdmac->desc.callback(imxdmac->desc.callback_param);
66 imxdmac->last_completed = imxdmac->desc.cookie;
69 static void imxdma_irq_handler(int channel, void *data)
71 struct imxdma_channel *imxdmac = data;
73 imxdmac->status = DMA_SUCCESS;
74 imxdma_handle(imxdmac);
77 static void imxdma_err_handler(int channel, void *data, int error)
79 struct imxdma_channel *imxdmac = data;
81 imxdmac->status = DMA_ERROR;
82 imxdma_handle(imxdmac);
85 static void imxdma_progression(int channel, void *data,
86 struct scatterlist *sg)
88 struct imxdma_channel *imxdmac = data;
90 imxdmac->status = DMA_SUCCESS;
91 imxdma_handle(imxdmac);
94 static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
95 unsigned long arg)
97 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
98 struct dma_slave_config *dmaengine_cfg = (void *)arg;
99 int ret;
100 unsigned int mode = 0;
102 switch (cmd) {
103 case DMA_TERMINATE_ALL:
104 imxdmac->status = DMA_ERROR;
105 imx_dma_disable(imxdmac->imxdma_channel);
106 return 0;
107 case DMA_SLAVE_CONFIG:
108 if (dmaengine_cfg->direction == DMA_FROM_DEVICE) {
109 imxdmac->per_address = dmaengine_cfg->src_addr;
110 imxdmac->watermark_level = dmaengine_cfg->src_maxburst;
111 imxdmac->word_size = dmaengine_cfg->src_addr_width;
112 } else {
113 imxdmac->per_address = dmaengine_cfg->dst_addr;
114 imxdmac->watermark_level = dmaengine_cfg->dst_maxburst;
115 imxdmac->word_size = dmaengine_cfg->dst_addr_width;
118 switch (imxdmac->word_size) {
119 case DMA_SLAVE_BUSWIDTH_1_BYTE:
120 mode = IMX_DMA_MEMSIZE_8;
121 break;
122 case DMA_SLAVE_BUSWIDTH_2_BYTES:
123 mode = IMX_DMA_MEMSIZE_16;
124 break;
125 default:
126 case DMA_SLAVE_BUSWIDTH_4_BYTES:
127 mode = IMX_DMA_MEMSIZE_32;
128 break;
130 ret = imx_dma_config_channel(imxdmac->imxdma_channel,
131 mode | IMX_DMA_TYPE_FIFO,
132 IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR,
133 imxdmac->dma_request, 1);
135 if (ret)
136 return ret;
138 imx_dma_config_burstlen(imxdmac->imxdma_channel, imxdmac->watermark_level);
140 return 0;
141 default:
142 return -ENOSYS;
145 return -EINVAL;
148 static enum dma_status imxdma_tx_status(struct dma_chan *chan,
149 dma_cookie_t cookie,
150 struct dma_tx_state *txstate)
152 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
153 dma_cookie_t last_used;
154 enum dma_status ret;
156 last_used = chan->cookie;
158 ret = dma_async_is_complete(cookie, imxdmac->last_completed, last_used);
159 dma_set_tx_state(txstate, imxdmac->last_completed, last_used, 0);
161 return ret;
164 static dma_cookie_t imxdma_assign_cookie(struct imxdma_channel *imxdma)
166 dma_cookie_t cookie = imxdma->chan.cookie;
168 if (++cookie < 0)
169 cookie = 1;
171 imxdma->chan.cookie = cookie;
172 imxdma->desc.cookie = cookie;
174 return cookie;
177 static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx)
179 struct imxdma_channel *imxdmac = to_imxdma_chan(tx->chan);
180 dma_cookie_t cookie;
182 spin_lock_irq(&imxdmac->lock);
184 cookie = imxdma_assign_cookie(imxdmac);
186 imx_dma_enable(imxdmac->imxdma_channel);
188 spin_unlock_irq(&imxdmac->lock);
190 return cookie;
193 static int imxdma_alloc_chan_resources(struct dma_chan *chan)
195 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
196 struct imx_dma_data *data = chan->private;
198 imxdmac->dma_request = data->dma_request;
200 dma_async_tx_descriptor_init(&imxdmac->desc, chan);
201 imxdmac->desc.tx_submit = imxdma_tx_submit;
202 /* txd.flags will be overwritten in prep funcs */
203 imxdmac->desc.flags = DMA_CTRL_ACK;
205 imxdmac->status = DMA_SUCCESS;
207 return 0;
210 static void imxdma_free_chan_resources(struct dma_chan *chan)
212 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
214 imx_dma_disable(imxdmac->imxdma_channel);
216 if (imxdmac->sg_list) {
217 kfree(imxdmac->sg_list);
218 imxdmac->sg_list = NULL;
222 static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
223 struct dma_chan *chan, struct scatterlist *sgl,
224 unsigned int sg_len, enum dma_data_direction direction,
225 unsigned long flags)
227 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
228 struct scatterlist *sg;
229 int i, ret, dma_length = 0;
230 unsigned int dmamode;
232 if (imxdmac->status == DMA_IN_PROGRESS)
233 return NULL;
235 imxdmac->status = DMA_IN_PROGRESS;
237 for_each_sg(sgl, sg, sg_len, i) {
238 dma_length += sg->length;
241 if (direction == DMA_FROM_DEVICE)
242 dmamode = DMA_MODE_READ;
243 else
244 dmamode = DMA_MODE_WRITE;
246 switch (imxdmac->word_size) {
247 case DMA_SLAVE_BUSWIDTH_4_BYTES:
248 if (sgl->length & 3 || sgl->dma_address & 3)
249 return NULL;
250 break;
251 case DMA_SLAVE_BUSWIDTH_2_BYTES:
252 if (sgl->length & 1 || sgl->dma_address & 1)
253 return NULL;
254 break;
255 case DMA_SLAVE_BUSWIDTH_1_BYTE:
256 break;
257 default:
258 return NULL;
261 ret = imx_dma_setup_sg(imxdmac->imxdma_channel, sgl, sg_len,
262 dma_length, imxdmac->per_address, dmamode);
263 if (ret)
264 return NULL;
266 return &imxdmac->desc;
269 static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
270 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
271 size_t period_len, enum dma_data_direction direction)
273 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
274 struct imxdma_engine *imxdma = imxdmac->imxdma;
275 int i, ret;
276 unsigned int periods = buf_len / period_len;
277 unsigned int dmamode;
279 dev_dbg(imxdma->dev, "%s channel: %d buf_len=%d period_len=%d\n",
280 __func__, imxdmac->channel, buf_len, period_len);
282 if (imxdmac->status == DMA_IN_PROGRESS)
283 return NULL;
284 imxdmac->status = DMA_IN_PROGRESS;
286 ret = imx_dma_setup_progression_handler(imxdmac->imxdma_channel,
287 imxdma_progression);
288 if (ret) {
289 dev_err(imxdma->dev, "Failed to setup the DMA handler\n");
290 return NULL;
293 if (imxdmac->sg_list)
294 kfree(imxdmac->sg_list);
296 imxdmac->sg_list = kcalloc(periods + 1,
297 sizeof(struct scatterlist), GFP_KERNEL);
298 if (!imxdmac->sg_list)
299 return NULL;
301 sg_init_table(imxdmac->sg_list, periods);
303 for (i = 0; i < periods; i++) {
304 imxdmac->sg_list[i].page_link = 0;
305 imxdmac->sg_list[i].offset = 0;
306 imxdmac->sg_list[i].dma_address = dma_addr;
307 imxdmac->sg_list[i].length = period_len;
308 dma_addr += period_len;
311 /* close the loop */
312 imxdmac->sg_list[periods].offset = 0;
313 imxdmac->sg_list[periods].length = 0;
314 imxdmac->sg_list[periods].page_link =
315 ((unsigned long)imxdmac->sg_list | 0x01) & ~0x02;
317 if (direction == DMA_FROM_DEVICE)
318 dmamode = DMA_MODE_READ;
319 else
320 dmamode = DMA_MODE_WRITE;
322 ret = imx_dma_setup_sg(imxdmac->imxdma_channel, imxdmac->sg_list, periods,
323 IMX_DMA_LENGTH_LOOP, imxdmac->per_address, dmamode);
324 if (ret)
325 return NULL;
327 return &imxdmac->desc;
330 static void imxdma_issue_pending(struct dma_chan *chan)
333 * Nothing to do. We only have a single descriptor
337 static int __init imxdma_probe(struct platform_device *pdev)
339 struct imxdma_engine *imxdma;
340 int ret, i;
342 imxdma = kzalloc(sizeof(*imxdma), GFP_KERNEL);
343 if (!imxdma)
344 return -ENOMEM;
346 INIT_LIST_HEAD(&imxdma->dma_device.channels);
348 dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask);
349 dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask);
351 /* Initialize channel parameters */
352 for (i = 0; i < MAX_DMA_CHANNELS; i++) {
353 struct imxdma_channel *imxdmac = &imxdma->channel[i];
355 imxdmac->imxdma_channel = imx_dma_request_by_prio("dmaengine",
356 DMA_PRIO_MEDIUM);
357 if ((int)imxdmac->channel < 0) {
358 ret = -ENODEV;
359 goto err_init;
362 imx_dma_setup_handlers(imxdmac->imxdma_channel,
363 imxdma_irq_handler, imxdma_err_handler, imxdmac);
365 imxdmac->imxdma = imxdma;
366 spin_lock_init(&imxdmac->lock);
368 imxdmac->chan.device = &imxdma->dma_device;
369 imxdmac->channel = i;
371 /* Add the channel to the DMAC list */
372 list_add_tail(&imxdmac->chan.device_node, &imxdma->dma_device.channels);
375 imxdma->dev = &pdev->dev;
376 imxdma->dma_device.dev = &pdev->dev;
378 imxdma->dma_device.device_alloc_chan_resources = imxdma_alloc_chan_resources;
379 imxdma->dma_device.device_free_chan_resources = imxdma_free_chan_resources;
380 imxdma->dma_device.device_tx_status = imxdma_tx_status;
381 imxdma->dma_device.device_prep_slave_sg = imxdma_prep_slave_sg;
382 imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic;
383 imxdma->dma_device.device_control = imxdma_control;
384 imxdma->dma_device.device_issue_pending = imxdma_issue_pending;
386 platform_set_drvdata(pdev, imxdma);
388 imxdma->dma_device.dev->dma_parms = &imxdma->dma_parms;
389 dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff);
391 ret = dma_async_device_register(&imxdma->dma_device);
392 if (ret) {
393 dev_err(&pdev->dev, "unable to register\n");
394 goto err_init;
397 return 0;
399 err_init:
400 while (--i >= 0) {
401 struct imxdma_channel *imxdmac = &imxdma->channel[i];
402 imx_dma_free(imxdmac->imxdma_channel);
405 kfree(imxdma);
406 return ret;
409 static int __exit imxdma_remove(struct platform_device *pdev)
411 struct imxdma_engine *imxdma = platform_get_drvdata(pdev);
412 int i;
414 dma_async_device_unregister(&imxdma->dma_device);
416 for (i = 0; i < MAX_DMA_CHANNELS; i++) {
417 struct imxdma_channel *imxdmac = &imxdma->channel[i];
419 imx_dma_free(imxdmac->imxdma_channel);
422 kfree(imxdma);
424 return 0;
427 static struct platform_driver imxdma_driver = {
428 .driver = {
429 .name = "imx-dma",
431 .remove = __exit_p(imxdma_remove),
434 static int __init imxdma_module_init(void)
436 return platform_driver_probe(&imxdma_driver, imxdma_probe);
438 subsys_initcall(imxdma_module_init);
440 MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
441 MODULE_DESCRIPTION("i.MX dma driver");
442 MODULE_LICENSE("GPL");