cipso: unsigned buf_len cannot be negative
[linux-2.6/next.git] / drivers / dma / fsldma.c
blob0b95dcce447e91baef611d769026386ac7c252c3
1 /*
2 * Freescale MPC85xx, MPC83xx DMA Engine support
4 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
6 * Author:
7 * Zhang Wei <wei.zhang@freescale.com>, Jul 2007
8 * Ebony Zhu <ebony.zhu@freescale.com>, May 2007
10 * Description:
11 * DMA engine driver for Freescale MPC8540 DMA controller, which is
12 * also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc.
13 * The support for MPC8349 DMA contorller is also added.
15 * This is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or
18 * (at your option) any later version.
22 #include <linux/init.h>
23 #include <linux/module.h>
24 #include <linux/pci.h>
25 #include <linux/interrupt.h>
26 #include <linux/dmaengine.h>
27 #include <linux/delay.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/dmapool.h>
30 #include <linux/of_platform.h>
32 #include "fsldma.h"
34 static void dma_init(struct fsl_dma_chan *fsl_chan)
36 /* Reset the channel */
37 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 0, 32);
39 switch (fsl_chan->feature & FSL_DMA_IP_MASK) {
40 case FSL_DMA_IP_85XX:
41 /* Set the channel to below modes:
42 * EIE - Error interrupt enable
43 * EOSIE - End of segments interrupt enable (basic mode)
44 * EOLNIE - End of links interrupt enable
46 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EIE
47 | FSL_DMA_MR_EOLNIE | FSL_DMA_MR_EOSIE, 32);
48 break;
49 case FSL_DMA_IP_83XX:
50 /* Set the channel to below modes:
51 * EOTIE - End-of-transfer interrupt enable
53 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EOTIE,
54 32);
55 break;
60 static void set_sr(struct fsl_dma_chan *fsl_chan, u32 val)
62 DMA_OUT(fsl_chan, &fsl_chan->reg_base->sr, val, 32);
65 static u32 get_sr(struct fsl_dma_chan *fsl_chan)
67 return DMA_IN(fsl_chan, &fsl_chan->reg_base->sr, 32);
70 static void set_desc_cnt(struct fsl_dma_chan *fsl_chan,
71 struct fsl_dma_ld_hw *hw, u32 count)
73 hw->count = CPU_TO_DMA(fsl_chan, count, 32);
76 static void set_desc_src(struct fsl_dma_chan *fsl_chan,
77 struct fsl_dma_ld_hw *hw, dma_addr_t src)
79 u64 snoop_bits;
81 snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
82 ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
83 hw->src_addr = CPU_TO_DMA(fsl_chan, snoop_bits | src, 64);
86 static void set_desc_dest(struct fsl_dma_chan *fsl_chan,
87 struct fsl_dma_ld_hw *hw, dma_addr_t dest)
89 u64 snoop_bits;
91 snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
92 ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
93 hw->dst_addr = CPU_TO_DMA(fsl_chan, snoop_bits | dest, 64);
96 static void set_desc_next(struct fsl_dma_chan *fsl_chan,
97 struct fsl_dma_ld_hw *hw, dma_addr_t next)
99 u64 snoop_bits;
101 snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
102 ? FSL_DMA_SNEN : 0;
103 hw->next_ln_addr = CPU_TO_DMA(fsl_chan, snoop_bits | next, 64);
106 static void set_cdar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr)
108 DMA_OUT(fsl_chan, &fsl_chan->reg_base->cdar, addr | FSL_DMA_SNEN, 64);
111 static dma_addr_t get_cdar(struct fsl_dma_chan *fsl_chan)
113 return DMA_IN(fsl_chan, &fsl_chan->reg_base->cdar, 64) & ~FSL_DMA_SNEN;
116 static void set_ndar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr)
118 DMA_OUT(fsl_chan, &fsl_chan->reg_base->ndar, addr, 64);
121 static dma_addr_t get_ndar(struct fsl_dma_chan *fsl_chan)
123 return DMA_IN(fsl_chan, &fsl_chan->reg_base->ndar, 64);
126 static u32 get_bcr(struct fsl_dma_chan *fsl_chan)
128 return DMA_IN(fsl_chan, &fsl_chan->reg_base->bcr, 32);
131 static int dma_is_idle(struct fsl_dma_chan *fsl_chan)
133 u32 sr = get_sr(fsl_chan);
134 return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH);
137 static void dma_start(struct fsl_dma_chan *fsl_chan)
139 u32 mr_set = 0;;
141 if (fsl_chan->feature & FSL_DMA_CHAN_PAUSE_EXT) {
142 DMA_OUT(fsl_chan, &fsl_chan->reg_base->bcr, 0, 32);
143 mr_set |= FSL_DMA_MR_EMP_EN;
144 } else
145 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
146 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32)
147 & ~FSL_DMA_MR_EMP_EN, 32);
149 if (fsl_chan->feature & FSL_DMA_CHAN_START_EXT)
150 mr_set |= FSL_DMA_MR_EMS_EN;
151 else
152 mr_set |= FSL_DMA_MR_CS;
154 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
155 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32)
156 | mr_set, 32);
159 static void dma_halt(struct fsl_dma_chan *fsl_chan)
161 int i = 0;
162 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
163 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | FSL_DMA_MR_CA,
164 32);
165 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
166 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & ~(FSL_DMA_MR_CS
167 | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA), 32);
169 while (!dma_is_idle(fsl_chan) && (i++ < 100))
170 udelay(10);
171 if (i >= 100 && !dma_is_idle(fsl_chan))
172 dev_err(fsl_chan->dev, "DMA halt timeout!\n");
175 static void set_ld_eol(struct fsl_dma_chan *fsl_chan,
176 struct fsl_desc_sw *desc)
178 desc->hw.next_ln_addr = CPU_TO_DMA(fsl_chan,
179 DMA_TO_CPU(fsl_chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL,
180 64);
183 static void append_ld_queue(struct fsl_dma_chan *fsl_chan,
184 struct fsl_desc_sw *new_desc)
186 struct fsl_desc_sw *queue_tail = to_fsl_desc(fsl_chan->ld_queue.prev);
188 if (list_empty(&fsl_chan->ld_queue))
189 return;
191 /* Link to the new descriptor physical address and
192 * Enable End-of-segment interrupt for
193 * the last link descriptor.
194 * (the previous node's next link descriptor)
196 * For FSL_DMA_IP_83xx, the snoop enable bit need be set.
198 queue_tail->hw.next_ln_addr = CPU_TO_DMA(fsl_chan,
199 new_desc->async_tx.phys | FSL_DMA_EOSIE |
200 (((fsl_chan->feature & FSL_DMA_IP_MASK)
201 == FSL_DMA_IP_83XX) ? FSL_DMA_SNEN : 0), 64);
205 * fsl_chan_set_src_loop_size - Set source address hold transfer size
206 * @fsl_chan : Freescale DMA channel
207 * @size : Address loop size, 0 for disable loop
209 * The set source address hold transfer size. The source
210 * address hold or loop transfer size is when the DMA transfer
211 * data from source address (SA), if the loop size is 4, the DMA will
212 * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA,
213 * SA + 1 ... and so on.
215 static void fsl_chan_set_src_loop_size(struct fsl_dma_chan *fsl_chan, int size)
217 switch (size) {
218 case 0:
219 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
220 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) &
221 (~FSL_DMA_MR_SAHE), 32);
222 break;
223 case 1:
224 case 2:
225 case 4:
226 case 8:
227 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
228 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) |
229 FSL_DMA_MR_SAHE | (__ilog2(size) << 14),
230 32);
231 break;
236 * fsl_chan_set_dest_loop_size - Set destination address hold transfer size
237 * @fsl_chan : Freescale DMA channel
238 * @size : Address loop size, 0 for disable loop
240 * The set destination address hold transfer size. The destination
241 * address hold or loop transfer size is when the DMA transfer
242 * data to destination address (TA), if the loop size is 4, the DMA will
243 * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA,
244 * TA + 1 ... and so on.
246 static void fsl_chan_set_dest_loop_size(struct fsl_dma_chan *fsl_chan, int size)
248 switch (size) {
249 case 0:
250 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
251 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) &
252 (~FSL_DMA_MR_DAHE), 32);
253 break;
254 case 1:
255 case 2:
256 case 4:
257 case 8:
258 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
259 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) |
260 FSL_DMA_MR_DAHE | (__ilog2(size) << 16),
261 32);
262 break;
267 * fsl_chan_toggle_ext_pause - Toggle channel external pause status
268 * @fsl_chan : Freescale DMA channel
269 * @size : Pause control size, 0 for disable external pause control.
270 * The maximum is 1024.
272 * The Freescale DMA channel can be controlled by the external
273 * signal DREQ#. The pause control size is how many bytes are allowed
274 * to transfer before pausing the channel, after which a new assertion
275 * of DREQ# resumes channel operation.
277 static void fsl_chan_toggle_ext_pause(struct fsl_dma_chan *fsl_chan, int size)
279 if (size > 1024)
280 return;
282 if (size) {
283 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
284 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32)
285 | ((__ilog2(size) << 24) & 0x0f000000),
286 32);
287 fsl_chan->feature |= FSL_DMA_CHAN_PAUSE_EXT;
288 } else
289 fsl_chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT;
293 * fsl_chan_toggle_ext_start - Toggle channel external start status
294 * @fsl_chan : Freescale DMA channel
295 * @enable : 0 is disabled, 1 is enabled.
297 * If enable the external start, the channel can be started by an
298 * external DMA start pin. So the dma_start() does not start the
299 * transfer immediately. The DMA channel will wait for the
300 * control pin asserted.
302 static void fsl_chan_toggle_ext_start(struct fsl_dma_chan *fsl_chan, int enable)
304 if (enable)
305 fsl_chan->feature |= FSL_DMA_CHAN_START_EXT;
306 else
307 fsl_chan->feature &= ~FSL_DMA_CHAN_START_EXT;
310 static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
312 struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
313 struct fsl_dma_chan *fsl_chan = to_fsl_chan(tx->chan);
314 unsigned long flags;
315 dma_cookie_t cookie;
317 /* cookie increment and adding to ld_queue must be atomic */
318 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
320 cookie = fsl_chan->common.cookie;
321 cookie++;
322 if (cookie < 0)
323 cookie = 1;
324 desc->async_tx.cookie = cookie;
325 fsl_chan->common.cookie = desc->async_tx.cookie;
327 append_ld_queue(fsl_chan, desc);
328 list_splice_init(&desc->async_tx.tx_list, fsl_chan->ld_queue.prev);
330 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
332 return cookie;
336 * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool.
337 * @fsl_chan : Freescale DMA channel
339 * Return - The descriptor allocated. NULL for failed.
341 static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
342 struct fsl_dma_chan *fsl_chan)
344 dma_addr_t pdesc;
345 struct fsl_desc_sw *desc_sw;
347 desc_sw = dma_pool_alloc(fsl_chan->desc_pool, GFP_ATOMIC, &pdesc);
348 if (desc_sw) {
349 memset(desc_sw, 0, sizeof(struct fsl_desc_sw));
350 dma_async_tx_descriptor_init(&desc_sw->async_tx,
351 &fsl_chan->common);
352 desc_sw->async_tx.tx_submit = fsl_dma_tx_submit;
353 INIT_LIST_HEAD(&desc_sw->async_tx.tx_list);
354 desc_sw->async_tx.phys = pdesc;
357 return desc_sw;
362 * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel.
363 * @fsl_chan : Freescale DMA channel
365 * This function will create a dma pool for descriptor allocation.
367 * Return - The number of descriptors allocated.
369 static int fsl_dma_alloc_chan_resources(struct dma_chan *chan,
370 struct dma_client *client)
372 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
374 /* Has this channel already been allocated? */
375 if (fsl_chan->desc_pool)
376 return 1;
378 /* We need the descriptor to be aligned to 32bytes
379 * for meeting FSL DMA specification requirement.
381 fsl_chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool",
382 fsl_chan->dev, sizeof(struct fsl_desc_sw),
383 32, 0);
384 if (!fsl_chan->desc_pool) {
385 dev_err(fsl_chan->dev, "No memory for channel %d "
386 "descriptor dma pool.\n", fsl_chan->id);
387 return 0;
390 return 1;
394 * fsl_dma_free_chan_resources - Free all resources of the channel.
395 * @fsl_chan : Freescale DMA channel
397 static void fsl_dma_free_chan_resources(struct dma_chan *chan)
399 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
400 struct fsl_desc_sw *desc, *_desc;
401 unsigned long flags;
403 dev_dbg(fsl_chan->dev, "Free all channel resources.\n");
404 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
405 list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) {
406 #ifdef FSL_DMA_LD_DEBUG
407 dev_dbg(fsl_chan->dev,
408 "LD %p will be released.\n", desc);
409 #endif
410 list_del(&desc->node);
411 /* free link descriptor */
412 dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys);
414 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
415 dma_pool_destroy(fsl_chan->desc_pool);
417 fsl_chan->desc_pool = NULL;
420 static struct dma_async_tx_descriptor *
421 fsl_dma_prep_interrupt(struct dma_chan *chan, unsigned long flags)
423 struct fsl_dma_chan *fsl_chan;
424 struct fsl_desc_sw *new;
426 if (!chan)
427 return NULL;
429 fsl_chan = to_fsl_chan(chan);
431 new = fsl_dma_alloc_descriptor(fsl_chan);
432 if (!new) {
433 dev_err(fsl_chan->dev, "No free memory for link descriptor\n");
434 return NULL;
437 new->async_tx.cookie = -EBUSY;
438 new->async_tx.flags = flags;
440 /* Insert the link descriptor to the LD ring */
441 list_add_tail(&new->node, &new->async_tx.tx_list);
443 /* Set End-of-link to the last link descriptor of new list*/
444 set_ld_eol(fsl_chan, new);
446 return &new->async_tx;
449 static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
450 struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
451 size_t len, unsigned long flags)
453 struct fsl_dma_chan *fsl_chan;
454 struct fsl_desc_sw *first = NULL, *prev = NULL, *new;
455 size_t copy;
456 LIST_HEAD(link_chain);
458 if (!chan)
459 return NULL;
461 if (!len)
462 return NULL;
464 fsl_chan = to_fsl_chan(chan);
466 do {
468 /* Allocate the link descriptor from DMA pool */
469 new = fsl_dma_alloc_descriptor(fsl_chan);
470 if (!new) {
471 dev_err(fsl_chan->dev,
472 "No free memory for link descriptor\n");
473 return NULL;
475 #ifdef FSL_DMA_LD_DEBUG
476 dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new);
477 #endif
479 copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT);
481 set_desc_cnt(fsl_chan, &new->hw, copy);
482 set_desc_src(fsl_chan, &new->hw, dma_src);
483 set_desc_dest(fsl_chan, &new->hw, dma_dest);
485 if (!first)
486 first = new;
487 else
488 set_desc_next(fsl_chan, &prev->hw, new->async_tx.phys);
490 new->async_tx.cookie = 0;
491 async_tx_ack(&new->async_tx);
493 prev = new;
494 len -= copy;
495 dma_src += copy;
496 dma_dest += copy;
498 /* Insert the link descriptor to the LD ring */
499 list_add_tail(&new->node, &first->async_tx.tx_list);
500 } while (len);
502 new->async_tx.flags = flags; /* client is in control of this ack */
503 new->async_tx.cookie = -EBUSY;
505 /* Set End-of-link to the last link descriptor of new list*/
506 set_ld_eol(fsl_chan, new);
508 return first ? &first->async_tx : NULL;
512 * fsl_dma_update_completed_cookie - Update the completed cookie.
513 * @fsl_chan : Freescale DMA channel
515 static void fsl_dma_update_completed_cookie(struct fsl_dma_chan *fsl_chan)
517 struct fsl_desc_sw *cur_desc, *desc;
518 dma_addr_t ld_phy;
520 ld_phy = get_cdar(fsl_chan) & FSL_DMA_NLDA_MASK;
522 if (ld_phy) {
523 cur_desc = NULL;
524 list_for_each_entry(desc, &fsl_chan->ld_queue, node)
525 if (desc->async_tx.phys == ld_phy) {
526 cur_desc = desc;
527 break;
530 if (cur_desc && cur_desc->async_tx.cookie) {
531 if (dma_is_idle(fsl_chan))
532 fsl_chan->completed_cookie =
533 cur_desc->async_tx.cookie;
534 else
535 fsl_chan->completed_cookie =
536 cur_desc->async_tx.cookie - 1;
542 * fsl_chan_ld_cleanup - Clean up link descriptors
543 * @fsl_chan : Freescale DMA channel
545 * This function clean up the ld_queue of DMA channel.
546 * If 'in_intr' is set, the function will move the link descriptor to
547 * the recycle list. Otherwise, free it directly.
549 static void fsl_chan_ld_cleanup(struct fsl_dma_chan *fsl_chan)
551 struct fsl_desc_sw *desc, *_desc;
552 unsigned long flags;
554 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
556 dev_dbg(fsl_chan->dev, "chan completed_cookie = %d\n",
557 fsl_chan->completed_cookie);
558 list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) {
559 dma_async_tx_callback callback;
560 void *callback_param;
562 if (dma_async_is_complete(desc->async_tx.cookie,
563 fsl_chan->completed_cookie, fsl_chan->common.cookie)
564 == DMA_IN_PROGRESS)
565 break;
567 callback = desc->async_tx.callback;
568 callback_param = desc->async_tx.callback_param;
570 /* Remove from ld_queue list */
571 list_del(&desc->node);
573 dev_dbg(fsl_chan->dev, "link descriptor %p will be recycle.\n",
574 desc);
575 dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys);
577 /* Run the link descriptor callback function */
578 if (callback) {
579 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
580 dev_dbg(fsl_chan->dev, "link descriptor %p callback\n",
581 desc);
582 callback(callback_param);
583 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
586 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
590 * fsl_chan_xfer_ld_queue - Transfer link descriptors in channel ld_queue.
591 * @fsl_chan : Freescale DMA channel
593 static void fsl_chan_xfer_ld_queue(struct fsl_dma_chan *fsl_chan)
595 struct list_head *ld_node;
596 dma_addr_t next_dest_addr;
597 unsigned long flags;
599 if (!dma_is_idle(fsl_chan))
600 return;
602 dma_halt(fsl_chan);
604 /* If there are some link descriptors
605 * not transfered in queue. We need to start it.
607 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
609 /* Find the first un-transfer desciptor */
610 for (ld_node = fsl_chan->ld_queue.next;
611 (ld_node != &fsl_chan->ld_queue)
612 && (dma_async_is_complete(
613 to_fsl_desc(ld_node)->async_tx.cookie,
614 fsl_chan->completed_cookie,
615 fsl_chan->common.cookie) == DMA_SUCCESS);
616 ld_node = ld_node->next);
618 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
620 if (ld_node != &fsl_chan->ld_queue) {
621 /* Get the ld start address from ld_queue */
622 next_dest_addr = to_fsl_desc(ld_node)->async_tx.phys;
623 dev_dbg(fsl_chan->dev, "xfer LDs staring from %p\n",
624 (void *)next_dest_addr);
625 set_cdar(fsl_chan, next_dest_addr);
626 dma_start(fsl_chan);
627 } else {
628 set_cdar(fsl_chan, 0);
629 set_ndar(fsl_chan, 0);
634 * fsl_dma_memcpy_issue_pending - Issue the DMA start command
635 * @fsl_chan : Freescale DMA channel
637 static void fsl_dma_memcpy_issue_pending(struct dma_chan *chan)
639 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
641 #ifdef FSL_DMA_LD_DEBUG
642 struct fsl_desc_sw *ld;
643 unsigned long flags;
645 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
646 if (list_empty(&fsl_chan->ld_queue)) {
647 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
648 return;
651 dev_dbg(fsl_chan->dev, "--memcpy issue--\n");
652 list_for_each_entry(ld, &fsl_chan->ld_queue, node) {
653 int i;
654 dev_dbg(fsl_chan->dev, "Ch %d, LD %08x\n",
655 fsl_chan->id, ld->async_tx.phys);
656 for (i = 0; i < 8; i++)
657 dev_dbg(fsl_chan->dev, "LD offset %d: %08x\n",
658 i, *(((u32 *)&ld->hw) + i));
660 dev_dbg(fsl_chan->dev, "----------------\n");
661 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
662 #endif
664 fsl_chan_xfer_ld_queue(fsl_chan);
668 * fsl_dma_is_complete - Determine the DMA status
669 * @fsl_chan : Freescale DMA channel
671 static enum dma_status fsl_dma_is_complete(struct dma_chan *chan,
672 dma_cookie_t cookie,
673 dma_cookie_t *done,
674 dma_cookie_t *used)
676 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
677 dma_cookie_t last_used;
678 dma_cookie_t last_complete;
680 fsl_chan_ld_cleanup(fsl_chan);
682 last_used = chan->cookie;
683 last_complete = fsl_chan->completed_cookie;
685 if (done)
686 *done = last_complete;
688 if (used)
689 *used = last_used;
691 return dma_async_is_complete(cookie, last_complete, last_used);
694 static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data)
696 struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data;
697 u32 stat;
698 int update_cookie = 0;
699 int xfer_ld_q = 0;
701 stat = get_sr(fsl_chan);
702 dev_dbg(fsl_chan->dev, "event: channel %d, stat = 0x%x\n",
703 fsl_chan->id, stat);
704 set_sr(fsl_chan, stat); /* Clear the event register */
706 stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH);
707 if (!stat)
708 return IRQ_NONE;
710 if (stat & FSL_DMA_SR_TE)
711 dev_err(fsl_chan->dev, "Transfer Error!\n");
713 /* Programming Error
714 * The DMA_INTERRUPT async_tx is a NULL transfer, which will
715 * triger a PE interrupt.
717 if (stat & FSL_DMA_SR_PE) {
718 dev_dbg(fsl_chan->dev, "event: Programming Error INT\n");
719 if (get_bcr(fsl_chan) == 0) {
720 /* BCR register is 0, this is a DMA_INTERRUPT async_tx.
721 * Now, update the completed cookie, and continue the
722 * next uncompleted transfer.
724 update_cookie = 1;
725 xfer_ld_q = 1;
727 stat &= ~FSL_DMA_SR_PE;
730 /* If the link descriptor segment transfer finishes,
731 * we will recycle the used descriptor.
733 if (stat & FSL_DMA_SR_EOSI) {
734 dev_dbg(fsl_chan->dev, "event: End-of-segments INT\n");
735 dev_dbg(fsl_chan->dev, "event: clndar %p, nlndar %p\n",
736 (void *)get_cdar(fsl_chan), (void *)get_ndar(fsl_chan));
737 stat &= ~FSL_DMA_SR_EOSI;
738 update_cookie = 1;
741 /* For MPC8349, EOCDI event need to update cookie
742 * and start the next transfer if it exist.
744 if (stat & FSL_DMA_SR_EOCDI) {
745 dev_dbg(fsl_chan->dev, "event: End-of-Chain link INT\n");
746 stat &= ~FSL_DMA_SR_EOCDI;
747 update_cookie = 1;
748 xfer_ld_q = 1;
751 /* If it current transfer is the end-of-transfer,
752 * we should clear the Channel Start bit for
753 * prepare next transfer.
755 if (stat & FSL_DMA_SR_EOLNI) {
756 dev_dbg(fsl_chan->dev, "event: End-of-link INT\n");
757 stat &= ~FSL_DMA_SR_EOLNI;
758 xfer_ld_q = 1;
761 if (update_cookie)
762 fsl_dma_update_completed_cookie(fsl_chan);
763 if (xfer_ld_q)
764 fsl_chan_xfer_ld_queue(fsl_chan);
765 if (stat)
766 dev_dbg(fsl_chan->dev, "event: unhandled sr 0x%02x\n",
767 stat);
769 dev_dbg(fsl_chan->dev, "event: Exit\n");
770 tasklet_schedule(&fsl_chan->tasklet);
771 return IRQ_HANDLED;
774 static irqreturn_t fsl_dma_do_interrupt(int irq, void *data)
776 struct fsl_dma_device *fdev = (struct fsl_dma_device *)data;
777 u32 gsr;
778 int ch_nr;
780 gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->reg_base)
781 : in_le32(fdev->reg_base);
782 ch_nr = (32 - ffs(gsr)) / 8;
784 return fdev->chan[ch_nr] ? fsl_dma_chan_do_interrupt(irq,
785 fdev->chan[ch_nr]) : IRQ_NONE;
788 static void dma_do_tasklet(unsigned long data)
790 struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data;
791 fsl_chan_ld_cleanup(fsl_chan);
794 static int __devinit fsl_dma_chan_probe(struct fsl_dma_device *fdev,
795 struct device_node *node, u32 feature, const char *compatible)
797 struct fsl_dma_chan *new_fsl_chan;
798 int err;
800 /* alloc channel */
801 new_fsl_chan = kzalloc(sizeof(struct fsl_dma_chan), GFP_KERNEL);
802 if (!new_fsl_chan) {
803 dev_err(fdev->dev, "No free memory for allocating "
804 "dma channels!\n");
805 return -ENOMEM;
808 /* get dma channel register base */
809 err = of_address_to_resource(node, 0, &new_fsl_chan->reg);
810 if (err) {
811 dev_err(fdev->dev, "Can't get %s property 'reg'\n",
812 node->full_name);
813 goto err_no_reg;
816 new_fsl_chan->feature = feature;
818 if (!fdev->feature)
819 fdev->feature = new_fsl_chan->feature;
821 /* If the DMA device's feature is different than its channels',
822 * report the bug.
824 WARN_ON(fdev->feature != new_fsl_chan->feature);
826 new_fsl_chan->dev = &new_fsl_chan->common.dev;
827 new_fsl_chan->reg_base = ioremap(new_fsl_chan->reg.start,
828 new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1);
830 new_fsl_chan->id = ((new_fsl_chan->reg.start - 0x100) & 0xfff) >> 7;
831 if (new_fsl_chan->id > FSL_DMA_MAX_CHANS_PER_DEVICE) {
832 dev_err(fdev->dev, "There is no %d channel!\n",
833 new_fsl_chan->id);
834 err = -EINVAL;
835 goto err_no_chan;
837 fdev->chan[new_fsl_chan->id] = new_fsl_chan;
838 tasklet_init(&new_fsl_chan->tasklet, dma_do_tasklet,
839 (unsigned long)new_fsl_chan);
841 /* Init the channel */
842 dma_init(new_fsl_chan);
844 /* Clear cdar registers */
845 set_cdar(new_fsl_chan, 0);
847 switch (new_fsl_chan->feature & FSL_DMA_IP_MASK) {
848 case FSL_DMA_IP_85XX:
849 new_fsl_chan->toggle_ext_start = fsl_chan_toggle_ext_start;
850 new_fsl_chan->toggle_ext_pause = fsl_chan_toggle_ext_pause;
851 case FSL_DMA_IP_83XX:
852 new_fsl_chan->set_src_loop_size = fsl_chan_set_src_loop_size;
853 new_fsl_chan->set_dest_loop_size = fsl_chan_set_dest_loop_size;
856 spin_lock_init(&new_fsl_chan->desc_lock);
857 INIT_LIST_HEAD(&new_fsl_chan->ld_queue);
859 new_fsl_chan->common.device = &fdev->common;
861 /* Add the channel to DMA device channel list */
862 list_add_tail(&new_fsl_chan->common.device_node,
863 &fdev->common.channels);
864 fdev->common.chancnt++;
866 new_fsl_chan->irq = irq_of_parse_and_map(node, 0);
867 if (new_fsl_chan->irq != NO_IRQ) {
868 err = request_irq(new_fsl_chan->irq,
869 &fsl_dma_chan_do_interrupt, IRQF_SHARED,
870 "fsldma-channel", new_fsl_chan);
871 if (err) {
872 dev_err(fdev->dev, "DMA channel %s request_irq error "
873 "with return %d\n", node->full_name, err);
874 goto err_no_irq;
878 dev_info(fdev->dev, "#%d (%s), irq %d\n", new_fsl_chan->id,
879 compatible, new_fsl_chan->irq);
881 return 0;
883 err_no_irq:
884 list_del(&new_fsl_chan->common.device_node);
885 err_no_chan:
886 iounmap(new_fsl_chan->reg_base);
887 err_no_reg:
888 kfree(new_fsl_chan);
889 return err;
892 static void fsl_dma_chan_remove(struct fsl_dma_chan *fchan)
894 free_irq(fchan->irq, fchan);
895 list_del(&fchan->common.device_node);
896 iounmap(fchan->reg_base);
897 kfree(fchan);
900 static int __devinit of_fsl_dma_probe(struct of_device *dev,
901 const struct of_device_id *match)
903 int err;
904 struct fsl_dma_device *fdev;
905 struct device_node *child;
907 fdev = kzalloc(sizeof(struct fsl_dma_device), GFP_KERNEL);
908 if (!fdev) {
909 dev_err(&dev->dev, "No enough memory for 'priv'\n");
910 return -ENOMEM;
912 fdev->dev = &dev->dev;
913 INIT_LIST_HEAD(&fdev->common.channels);
915 /* get DMA controller register base */
916 err = of_address_to_resource(dev->node, 0, &fdev->reg);
917 if (err) {
918 dev_err(&dev->dev, "Can't get %s property 'reg'\n",
919 dev->node->full_name);
920 goto err_no_reg;
923 dev_info(&dev->dev, "Probe the Freescale DMA driver for %s "
924 "controller at %p...\n",
925 match->compatible, (void *)fdev->reg.start);
926 fdev->reg_base = ioremap(fdev->reg.start, fdev->reg.end
927 - fdev->reg.start + 1);
929 dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask);
930 dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask);
931 fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources;
932 fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources;
933 fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt;
934 fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy;
935 fdev->common.device_is_tx_complete = fsl_dma_is_complete;
936 fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
937 fdev->common.dev = &dev->dev;
939 fdev->irq = irq_of_parse_and_map(dev->node, 0);
940 if (fdev->irq != NO_IRQ) {
941 err = request_irq(fdev->irq, &fsl_dma_do_interrupt, IRQF_SHARED,
942 "fsldma-device", fdev);
943 if (err) {
944 dev_err(&dev->dev, "DMA device request_irq error "
945 "with return %d\n", err);
946 goto err;
950 dev_set_drvdata(&(dev->dev), fdev);
952 /* We cannot use of_platform_bus_probe() because there is no
953 * of_platform_bus_remove. Instead, we manually instantiate every DMA
954 * channel object.
956 for_each_child_of_node(dev->node, child) {
957 if (of_device_is_compatible(child, "fsl,eloplus-dma-channel"))
958 fsl_dma_chan_probe(fdev, child,
959 FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN,
960 "fsl,eloplus-dma-channel");
961 if (of_device_is_compatible(child, "fsl,elo-dma-channel"))
962 fsl_dma_chan_probe(fdev, child,
963 FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN,
964 "fsl,elo-dma-channel");
967 dma_async_device_register(&fdev->common);
968 return 0;
970 err:
971 iounmap(fdev->reg_base);
972 err_no_reg:
973 kfree(fdev);
974 return err;
977 static int of_fsl_dma_remove(struct of_device *of_dev)
979 struct fsl_dma_device *fdev;
980 unsigned int i;
982 fdev = dev_get_drvdata(&of_dev->dev);
984 dma_async_device_unregister(&fdev->common);
986 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++)
987 if (fdev->chan[i])
988 fsl_dma_chan_remove(fdev->chan[i]);
990 if (fdev->irq != NO_IRQ)
991 free_irq(fdev->irq, fdev);
993 iounmap(fdev->reg_base);
995 kfree(fdev);
996 dev_set_drvdata(&of_dev->dev, NULL);
998 return 0;
1001 static struct of_device_id of_fsl_dma_ids[] = {
1002 { .compatible = "fsl,eloplus-dma", },
1003 { .compatible = "fsl,elo-dma", },
1007 static struct of_platform_driver of_fsl_dma_driver = {
1008 .name = "fsl-elo-dma",
1009 .match_table = of_fsl_dma_ids,
1010 .probe = of_fsl_dma_probe,
1011 .remove = of_fsl_dma_remove,
1014 static __init int of_fsl_dma_init(void)
1016 int ret;
1018 pr_info("Freescale Elo / Elo Plus DMA driver\n");
1020 ret = of_register_platform_driver(&of_fsl_dma_driver);
1021 if (ret)
1022 pr_err("fsldma: failed to register platform driver\n");
1024 return ret;
1027 static void __exit of_fsl_dma_exit(void)
1029 of_unregister_platform_driver(&of_fsl_dma_driver);
1032 subsys_initcall(of_fsl_dma_init);
1033 module_exit(of_fsl_dma_exit);
1035 MODULE_DESCRIPTION("Freescale Elo / Elo Plus DMA driver");
1036 MODULE_LICENSE("GPL");