2 * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
3 * Copyright (C) Semihalf 2009
5 * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
6 * (defines, structures and comments) was taken from MPC5121 DMA driver
7 * written by Hongjun Chen <hong-jun.chen@freescale.com>.
9 * Approved as OSADL project by a majority of OSADL members and funded
10 * by OSADL membership fees in 2009; for details see www.osadl.org.
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the Free
14 * Software Foundation; either version 2 of the License, or (at your option)
17 * This program is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
22 * You should have received a copy of the GNU General Public License along with
23 * this program; if not, write to the Free Software Foundation, Inc., 59
24 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26 * The full GNU General Public License is included in this distribution in the
27 * file called COPYING.
31 * This is initial version of MPC5121 DMA driver. Only memory to memory
32 * transfers are supported (tested using dmatest module).
35 #include <linux/module.h>
36 #include <linux/dmaengine.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/interrupt.h>
40 #include <linux/of_device.h>
41 #include <linux/of_platform.h>
43 #include <linux/random.h>
45 /* Number of DMA Transfer descriptors allocated per channel */
46 #define MPC_DMA_DESCRIPTORS 64
48 /* Macro definitions */
49 #define MPC_DMA_CHANNELS 64
50 #define MPC_DMA_TCD_OFFSET 0x1000
52 /* Arbitration mode of group and channel */
53 #define MPC_DMA_DMACR_EDCG (1 << 31)
54 #define MPC_DMA_DMACR_ERGA (1 << 3)
55 #define MPC_DMA_DMACR_ERCA (1 << 2)
58 #define MPC_DMA_DMAES_VLD (1 << 31)
59 #define MPC_DMA_DMAES_GPE (1 << 15)
60 #define MPC_DMA_DMAES_CPE (1 << 14)
61 #define MPC_DMA_DMAES_ERRCHN(err) \
63 #define MPC_DMA_DMAES_SAE (1 << 7)
64 #define MPC_DMA_DMAES_SOE (1 << 6)
65 #define MPC_DMA_DMAES_DAE (1 << 5)
66 #define MPC_DMA_DMAES_DOE (1 << 4)
67 #define MPC_DMA_DMAES_NCE (1 << 3)
68 #define MPC_DMA_DMAES_SGE (1 << 2)
69 #define MPC_DMA_DMAES_SBE (1 << 1)
70 #define MPC_DMA_DMAES_DBE (1 << 0)
72 #define MPC_DMA_TSIZE_1 0x00
73 #define MPC_DMA_TSIZE_2 0x01
74 #define MPC_DMA_TSIZE_4 0x02
75 #define MPC_DMA_TSIZE_16 0x04
76 #define MPC_DMA_TSIZE_32 0x05
78 /* MPC5121 DMA engine registers */
79 struct __attribute__ ((__packed__
)) mpc_dma_regs
{
81 u32 dmacr
; /* DMA control register */
82 u32 dmaes
; /* DMA error status */
84 u32 dmaerqh
; /* DMA enable request high(channels 63~32) */
85 u32 dmaerql
; /* DMA enable request low(channels 31~0) */
86 u32 dmaeeih
; /* DMA enable error interrupt high(ch63~32) */
87 u32 dmaeeil
; /* DMA enable error interrupt low(ch31~0) */
89 u8 dmaserq
; /* DMA set enable request */
90 u8 dmacerq
; /* DMA clear enable request */
91 u8 dmaseei
; /* DMA set enable error interrupt */
92 u8 dmaceei
; /* DMA clear enable error interrupt */
94 u8 dmacint
; /* DMA clear interrupt request */
95 u8 dmacerr
; /* DMA clear error */
96 u8 dmassrt
; /* DMA set start bit */
97 u8 dmacdne
; /* DMA clear DONE status bit */
99 u32 dmainth
; /* DMA interrupt request high(ch63~32) */
100 u32 dmaintl
; /* DMA interrupt request low(ch31~0) */
101 u32 dmaerrh
; /* DMA error high(ch63~32) */
102 u32 dmaerrl
; /* DMA error low(ch31~0) */
104 u32 dmahrsh
; /* DMA hw request status high(ch63~32) */
105 u32 dmahrsl
; /* DMA hardware request status low(ch31~0) */
106 u32 dmaihsa
; /* DMA interrupt high select AXE(ch63~32) */
107 u32 dmailsa
; /* DMA interrupt low select AXE(ch31~0) */
109 u32 reserve0
[48]; /* Reserved */
111 u8 dchpri
[MPC_DMA_CHANNELS
];
112 /* DMA channels(0~63) priority */
115 struct __attribute__ ((__packed__
)) mpc_dma_tcd
{
117 u32 saddr
; /* Source address */
119 u32 smod
:5; /* Source address modulo */
120 u32 ssize
:3; /* Source data transfer size */
121 u32 dmod
:5; /* Destination address modulo */
122 u32 dsize
:3; /* Destination data transfer size */
123 u32 soff
:16; /* Signed source address offset */
126 u32 nbytes
; /* Inner "minor" byte count */
127 u32 slast
; /* Last source address adjustment */
128 u32 daddr
; /* Destination address */
131 u32 citer_elink
:1; /* Enable channel-to-channel linking on
132 * minor loop complete
134 u32 citer_linkch
:6; /* Link channel for minor loop complete */
135 u32 citer
:9; /* Current "major" iteration count */
136 u32 doff
:16; /* Signed destination address offset */
139 u32 dlast_sga
; /* Last Destination address adjustment/scatter
144 u32 biter_elink
:1; /* Enable channel-to-channel linking on major
148 u32 biter
:9; /* Beginning "major" iteration count */
149 u32 bwc
:2; /* Bandwidth control */
150 u32 major_linkch
:6; /* Link channel number */
151 u32 done
:1; /* Channel done */
152 u32 active
:1; /* Channel active */
153 u32 major_elink
:1; /* Enable channel-to-channel linking on major
156 u32 e_sg
:1; /* Enable scatter/gather processing */
157 u32 d_req
:1; /* Disable request */
158 u32 int_half
:1; /* Enable an interrupt when major counter is
161 u32 int_maj
:1; /* Enable an interrupt when major iteration
164 u32 start
:1; /* Channel start */
167 struct mpc_dma_desc
{
168 struct dma_async_tx_descriptor desc
;
169 struct mpc_dma_tcd
*tcd
;
170 dma_addr_t tcd_paddr
;
172 struct list_head node
;
175 struct mpc_dma_chan
{
176 struct dma_chan chan
;
177 struct list_head free
;
178 struct list_head prepared
;
179 struct list_head queued
;
180 struct list_head active
;
181 struct list_head completed
;
182 struct mpc_dma_tcd
*tcd
;
183 dma_addr_t tcd_paddr
;
184 dma_cookie_t completed_cookie
;
186 /* Lock for this structure */
191 struct dma_device dma
;
192 struct tasklet_struct tasklet
;
193 struct mpc_dma_chan channels
[MPC_DMA_CHANNELS
];
194 struct mpc_dma_regs __iomem
*regs
;
195 struct mpc_dma_tcd __iomem
*tcd
;
199 /* Lock for error_status field in this structure */
200 spinlock_t error_status_lock
;
203 #define DRV_NAME "mpc512x_dma"
205 /* Convert struct dma_chan to struct mpc_dma_chan */
206 static inline struct mpc_dma_chan
*dma_chan_to_mpc_dma_chan(struct dma_chan
*c
)
208 return container_of(c
, struct mpc_dma_chan
, chan
);
211 /* Convert struct dma_chan to struct mpc_dma */
212 static inline struct mpc_dma
*dma_chan_to_mpc_dma(struct dma_chan
*c
)
214 struct mpc_dma_chan
*mchan
= dma_chan_to_mpc_dma_chan(c
);
215 return container_of(mchan
, struct mpc_dma
, channels
[c
->chan_id
]);
219 * Execute all queued DMA descriptors.
221 * Following requirements must be met while calling mpc_dma_execute():
222 * a) mchan->lock is acquired,
223 * b) mchan->active list is empty,
224 * c) mchan->queued list contains at least one entry.
226 static void mpc_dma_execute(struct mpc_dma_chan
*mchan
)
228 struct mpc_dma
*mdma
= dma_chan_to_mpc_dma(&mchan
->chan
);
229 struct mpc_dma_desc
*first
= NULL
;
230 struct mpc_dma_desc
*prev
= NULL
;
231 struct mpc_dma_desc
*mdesc
;
232 int cid
= mchan
->chan
.chan_id
;
234 /* Move all queued descriptors to active list */
235 list_splice_tail_init(&mchan
->queued
, &mchan
->active
);
237 /* Chain descriptors into one transaction */
238 list_for_each_entry(mdesc
, &mchan
->active
, node
) {
247 prev
->tcd
->dlast_sga
= mdesc
->tcd_paddr
;
249 mdesc
->tcd
->start
= 1;
254 prev
->tcd
->start
= 0;
255 prev
->tcd
->int_maj
= 1;
257 /* Send first descriptor in chain into hardware */
258 memcpy_toio(&mdma
->tcd
[cid
], first
->tcd
, sizeof(struct mpc_dma_tcd
));
259 out_8(&mdma
->regs
->dmassrt
, cid
);
262 /* Handle interrupt on one half of DMA controller (32 channels) */
263 static void mpc_dma_irq_process(struct mpc_dma
*mdma
, u32 is
, u32 es
, int off
)
265 struct mpc_dma_chan
*mchan
;
266 struct mpc_dma_desc
*mdesc
;
267 u32 status
= is
| es
;
270 while ((ch
= fls(status
) - 1) >= 0) {
271 status
&= ~(1 << ch
);
272 mchan
= &mdma
->channels
[ch
+ off
];
274 spin_lock(&mchan
->lock
);
276 /* Check error status */
278 list_for_each_entry(mdesc
, &mchan
->active
, node
)
281 /* Execute queued descriptors */
282 list_splice_tail_init(&mchan
->active
, &mchan
->completed
);
283 if (!list_empty(&mchan
->queued
))
284 mpc_dma_execute(mchan
);
286 spin_unlock(&mchan
->lock
);
290 /* Interrupt handler */
291 static irqreturn_t
mpc_dma_irq(int irq
, void *data
)
293 struct mpc_dma
*mdma
= data
;
296 /* Save error status register */
297 es
= in_be32(&mdma
->regs
->dmaes
);
298 spin_lock(&mdma
->error_status_lock
);
299 if ((es
& MPC_DMA_DMAES_VLD
) && mdma
->error_status
== 0)
300 mdma
->error_status
= es
;
301 spin_unlock(&mdma
->error_status_lock
);
303 /* Handle interrupt on each channel */
304 mpc_dma_irq_process(mdma
, in_be32(&mdma
->regs
->dmainth
),
305 in_be32(&mdma
->regs
->dmaerrh
), 32);
306 mpc_dma_irq_process(mdma
, in_be32(&mdma
->regs
->dmaintl
),
307 in_be32(&mdma
->regs
->dmaerrl
), 0);
309 /* Ack interrupt on all channels */
310 out_be32(&mdma
->regs
->dmainth
, 0xFFFFFFFF);
311 out_be32(&mdma
->regs
->dmaintl
, 0xFFFFFFFF);
312 out_be32(&mdma
->regs
->dmaerrh
, 0xFFFFFFFF);
313 out_be32(&mdma
->regs
->dmaerrl
, 0xFFFFFFFF);
315 /* Schedule tasklet */
316 tasklet_schedule(&mdma
->tasklet
);
322 static void mpc_dma_tasklet(unsigned long data
)
324 struct mpc_dma
*mdma
= (void *)data
;
325 dma_cookie_t last_cookie
= 0;
326 struct mpc_dma_chan
*mchan
;
327 struct mpc_dma_desc
*mdesc
;
328 struct dma_async_tx_descriptor
*desc
;
334 spin_lock_irqsave(&mdma
->error_status_lock
, flags
);
335 es
= mdma
->error_status
;
336 mdma
->error_status
= 0;
337 spin_unlock_irqrestore(&mdma
->error_status_lock
, flags
);
339 /* Print nice error report */
341 dev_err(mdma
->dma
.dev
,
342 "Hardware reported following error(s) on channel %u:\n",
343 MPC_DMA_DMAES_ERRCHN(es
));
345 if (es
& MPC_DMA_DMAES_GPE
)
346 dev_err(mdma
->dma
.dev
, "- Group Priority Error\n");
347 if (es
& MPC_DMA_DMAES_CPE
)
348 dev_err(mdma
->dma
.dev
, "- Channel Priority Error\n");
349 if (es
& MPC_DMA_DMAES_SAE
)
350 dev_err(mdma
->dma
.dev
, "- Source Address Error\n");
351 if (es
& MPC_DMA_DMAES_SOE
)
352 dev_err(mdma
->dma
.dev
, "- Source Offset"
353 " Configuration Error\n");
354 if (es
& MPC_DMA_DMAES_DAE
)
355 dev_err(mdma
->dma
.dev
, "- Destination Address"
357 if (es
& MPC_DMA_DMAES_DOE
)
358 dev_err(mdma
->dma
.dev
, "- Destination Offset"
359 " Configuration Error\n");
360 if (es
& MPC_DMA_DMAES_NCE
)
361 dev_err(mdma
->dma
.dev
, "- NBytes/Citter"
362 " Configuration Error\n");
363 if (es
& MPC_DMA_DMAES_SGE
)
364 dev_err(mdma
->dma
.dev
, "- Scatter/Gather"
365 " Configuration Error\n");
366 if (es
& MPC_DMA_DMAES_SBE
)
367 dev_err(mdma
->dma
.dev
, "- Source Bus Error\n");
368 if (es
& MPC_DMA_DMAES_DBE
)
369 dev_err(mdma
->dma
.dev
, "- Destination Bus Error\n");
372 for (i
= 0; i
< mdma
->dma
.chancnt
; i
++) {
373 mchan
= &mdma
->channels
[i
];
375 /* Get all completed descriptors */
376 spin_lock_irqsave(&mchan
->lock
, flags
);
377 if (!list_empty(&mchan
->completed
))
378 list_splice_tail_init(&mchan
->completed
, &list
);
379 spin_unlock_irqrestore(&mchan
->lock
, flags
);
381 if (list_empty(&list
))
384 /* Execute callbacks and run dependencies */
385 list_for_each_entry(mdesc
, &list
, node
) {
389 desc
->callback(desc
->callback_param
);
391 last_cookie
= desc
->cookie
;
392 dma_run_dependencies(desc
);
395 /* Free descriptors */
396 spin_lock_irqsave(&mchan
->lock
, flags
);
397 list_splice_tail_init(&list
, &mchan
->free
);
398 mchan
->completed_cookie
= last_cookie
;
399 spin_unlock_irqrestore(&mchan
->lock
, flags
);
403 /* Submit descriptor to hardware */
404 static dma_cookie_t
mpc_dma_tx_submit(struct dma_async_tx_descriptor
*txd
)
406 struct mpc_dma_chan
*mchan
= dma_chan_to_mpc_dma_chan(txd
->chan
);
407 struct mpc_dma_desc
*mdesc
;
411 mdesc
= container_of(txd
, struct mpc_dma_desc
, desc
);
413 spin_lock_irqsave(&mchan
->lock
, flags
);
415 /* Move descriptor to queue */
416 list_move_tail(&mdesc
->node
, &mchan
->queued
);
418 /* If channel is idle, execute all queued descriptors */
419 if (list_empty(&mchan
->active
))
420 mpc_dma_execute(mchan
);
423 cookie
= mchan
->chan
.cookie
+ 1;
427 mchan
->chan
.cookie
= cookie
;
428 mdesc
->desc
.cookie
= cookie
;
430 spin_unlock_irqrestore(&mchan
->lock
, flags
);
435 /* Alloc channel resources */
436 static int mpc_dma_alloc_chan_resources(struct dma_chan
*chan
)
438 struct mpc_dma
*mdma
= dma_chan_to_mpc_dma(chan
);
439 struct mpc_dma_chan
*mchan
= dma_chan_to_mpc_dma_chan(chan
);
440 struct mpc_dma_desc
*mdesc
;
441 struct mpc_dma_tcd
*tcd
;
442 dma_addr_t tcd_paddr
;
447 /* Alloc DMA memory for Transfer Control Descriptors */
448 tcd
= dma_alloc_coherent(mdma
->dma
.dev
,
449 MPC_DMA_DESCRIPTORS
* sizeof(struct mpc_dma_tcd
),
450 &tcd_paddr
, GFP_KERNEL
);
454 /* Alloc descriptors for this channel */
455 for (i
= 0; i
< MPC_DMA_DESCRIPTORS
; i
++) {
456 mdesc
= kzalloc(sizeof(struct mpc_dma_desc
), GFP_KERNEL
);
458 dev_notice(mdma
->dma
.dev
, "Memory allocation error. "
459 "Allocated only %u descriptors\n", i
);
463 dma_async_tx_descriptor_init(&mdesc
->desc
, chan
);
464 mdesc
->desc
.flags
= DMA_CTRL_ACK
;
465 mdesc
->desc
.tx_submit
= mpc_dma_tx_submit
;
467 mdesc
->tcd
= &tcd
[i
];
468 mdesc
->tcd_paddr
= tcd_paddr
+ (i
* sizeof(struct mpc_dma_tcd
));
470 list_add_tail(&mdesc
->node
, &descs
);
473 /* Return error only if no descriptors were allocated */
475 dma_free_coherent(mdma
->dma
.dev
,
476 MPC_DMA_DESCRIPTORS
* sizeof(struct mpc_dma_tcd
),
481 spin_lock_irqsave(&mchan
->lock
, flags
);
483 mchan
->tcd_paddr
= tcd_paddr
;
484 list_splice_tail_init(&descs
, &mchan
->free
);
485 spin_unlock_irqrestore(&mchan
->lock
, flags
);
487 /* Enable Error Interrupt */
488 out_8(&mdma
->regs
->dmaseei
, chan
->chan_id
);
493 /* Free channel resources */
494 static void mpc_dma_free_chan_resources(struct dma_chan
*chan
)
496 struct mpc_dma
*mdma
= dma_chan_to_mpc_dma(chan
);
497 struct mpc_dma_chan
*mchan
= dma_chan_to_mpc_dma_chan(chan
);
498 struct mpc_dma_desc
*mdesc
, *tmp
;
499 struct mpc_dma_tcd
*tcd
;
500 dma_addr_t tcd_paddr
;
504 spin_lock_irqsave(&mchan
->lock
, flags
);
506 /* Channel must be idle */
507 BUG_ON(!list_empty(&mchan
->prepared
));
508 BUG_ON(!list_empty(&mchan
->queued
));
509 BUG_ON(!list_empty(&mchan
->active
));
510 BUG_ON(!list_empty(&mchan
->completed
));
513 list_splice_tail_init(&mchan
->free
, &descs
);
515 tcd_paddr
= mchan
->tcd_paddr
;
517 spin_unlock_irqrestore(&mchan
->lock
, flags
);
519 /* Free DMA memory used by descriptors */
520 dma_free_coherent(mdma
->dma
.dev
,
521 MPC_DMA_DESCRIPTORS
* sizeof(struct mpc_dma_tcd
),
524 /* Free descriptors */
525 list_for_each_entry_safe(mdesc
, tmp
, &descs
, node
)
528 /* Disable Error Interrupt */
529 out_8(&mdma
->regs
->dmaceei
, chan
->chan_id
);
532 /* Send all pending descriptor to hardware */
533 static void mpc_dma_issue_pending(struct dma_chan
*chan
)
536 * We are posting descriptors to the hardware as soon as
537 * they are ready, so this function does nothing.
541 /* Check request completion status */
542 static enum dma_status
543 mpc_dma_is_tx_complete(struct dma_chan
*chan
, dma_cookie_t cookie
,
544 dma_cookie_t
*done
, dma_cookie_t
*used
)
546 struct mpc_dma_chan
*mchan
= dma_chan_to_mpc_dma_chan(chan
);
548 dma_cookie_t last_used
;
549 dma_cookie_t last_complete
;
551 spin_lock_irqsave(&mchan
->lock
, flags
);
552 last_used
= mchan
->chan
.cookie
;
553 last_complete
= mchan
->completed_cookie
;
554 spin_unlock_irqrestore(&mchan
->lock
, flags
);
557 *done
= last_complete
;
562 return dma_async_is_complete(cookie
, last_complete
, last_used
);
565 /* Prepare descriptor for memory to memory copy */
566 static struct dma_async_tx_descriptor
*
567 mpc_dma_prep_memcpy(struct dma_chan
*chan
, dma_addr_t dst
, dma_addr_t src
,
568 size_t len
, unsigned long flags
)
570 struct mpc_dma_chan
*mchan
= dma_chan_to_mpc_dma_chan(chan
);
571 struct mpc_dma_desc
*mdesc
= NULL
;
572 struct mpc_dma_tcd
*tcd
;
573 unsigned long iflags
;
575 /* Get free descriptor */
576 spin_lock_irqsave(&mchan
->lock
, iflags
);
577 if (!list_empty(&mchan
->free
)) {
578 mdesc
= list_first_entry(&mchan
->free
, struct mpc_dma_desc
,
580 list_del(&mdesc
->node
);
582 spin_unlock_irqrestore(&mchan
->lock
, iflags
);
590 /* Prepare Transfer Control Descriptor for this transaction */
591 memset(tcd
, 0, sizeof(struct mpc_dma_tcd
));
593 if (IS_ALIGNED(src
| dst
| len
, 32)) {
594 tcd
->ssize
= MPC_DMA_TSIZE_32
;
595 tcd
->dsize
= MPC_DMA_TSIZE_32
;
598 } else if (IS_ALIGNED(src
| dst
| len
, 16)) {
599 tcd
->ssize
= MPC_DMA_TSIZE_16
;
600 tcd
->dsize
= MPC_DMA_TSIZE_16
;
603 } else if (IS_ALIGNED(src
| dst
| len
, 4)) {
604 tcd
->ssize
= MPC_DMA_TSIZE_4
;
605 tcd
->dsize
= MPC_DMA_TSIZE_4
;
608 } else if (IS_ALIGNED(src
| dst
| len
, 2)) {
609 tcd
->ssize
= MPC_DMA_TSIZE_2
;
610 tcd
->dsize
= MPC_DMA_TSIZE_2
;
614 tcd
->ssize
= MPC_DMA_TSIZE_1
;
615 tcd
->dsize
= MPC_DMA_TSIZE_1
;
626 /* Place descriptor in prepared list */
627 spin_lock_irqsave(&mchan
->lock
, iflags
);
628 list_add_tail(&mdesc
->node
, &mchan
->prepared
);
629 spin_unlock_irqrestore(&mchan
->lock
, iflags
);
634 static int __devinit
mpc_dma_probe(struct of_device
*op
,
635 const struct of_device_id
*match
)
637 struct device_node
*dn
= op
->node
;
638 struct device
*dev
= &op
->dev
;
639 struct dma_device
*dma
;
640 struct mpc_dma
*mdma
;
641 struct mpc_dma_chan
*mchan
;
643 ulong regs_start
, regs_size
;
646 mdma
= devm_kzalloc(dev
, sizeof(struct mpc_dma
), GFP_KERNEL
);
648 dev_err(dev
, "Memory exhausted!\n");
652 mdma
->irq
= irq_of_parse_and_map(dn
, 0);
653 if (mdma
->irq
== NO_IRQ
) {
654 dev_err(dev
, "Error mapping IRQ!\n");
658 retval
= of_address_to_resource(dn
, 0, &res
);
660 dev_err(dev
, "Error parsing memory region!\n");
664 regs_start
= res
.start
;
665 regs_size
= res
.end
- res
.start
+ 1;
667 if (!devm_request_mem_region(dev
, regs_start
, regs_size
, DRV_NAME
)) {
668 dev_err(dev
, "Error requesting memory region!\n");
672 mdma
->regs
= devm_ioremap(dev
, regs_start
, regs_size
);
674 dev_err(dev
, "Error mapping memory region!\n");
678 mdma
->tcd
= (struct mpc_dma_tcd
*)((u8
*)(mdma
->regs
)
679 + MPC_DMA_TCD_OFFSET
);
681 retval
= devm_request_irq(dev
, mdma
->irq
, &mpc_dma_irq
, 0, DRV_NAME
,
684 dev_err(dev
, "Error requesting IRQ!\n");
688 spin_lock_init(&mdma
->error_status_lock
);
692 dma
->chancnt
= MPC_DMA_CHANNELS
;
693 dma
->device_alloc_chan_resources
= mpc_dma_alloc_chan_resources
;
694 dma
->device_free_chan_resources
= mpc_dma_free_chan_resources
;
695 dma
->device_issue_pending
= mpc_dma_issue_pending
;
696 dma
->device_is_tx_complete
= mpc_dma_is_tx_complete
;
697 dma
->device_prep_dma_memcpy
= mpc_dma_prep_memcpy
;
699 INIT_LIST_HEAD(&dma
->channels
);
700 dma_cap_set(DMA_MEMCPY
, dma
->cap_mask
);
702 for (i
= 0; i
< dma
->chancnt
; i
++) {
703 mchan
= &mdma
->channels
[i
];
705 mchan
->chan
.device
= dma
;
706 mchan
->chan
.chan_id
= i
;
707 mchan
->chan
.cookie
= 1;
708 mchan
->completed_cookie
= mchan
->chan
.cookie
;
710 INIT_LIST_HEAD(&mchan
->free
);
711 INIT_LIST_HEAD(&mchan
->prepared
);
712 INIT_LIST_HEAD(&mchan
->queued
);
713 INIT_LIST_HEAD(&mchan
->active
);
714 INIT_LIST_HEAD(&mchan
->completed
);
716 spin_lock_init(&mchan
->lock
);
717 list_add_tail(&mchan
->chan
.device_node
, &dma
->channels
);
720 tasklet_init(&mdma
->tasklet
, mpc_dma_tasklet
, (unsigned long)mdma
);
723 * Configure DMA Engine:
725 * - Round-robin group arbitration,
726 * - Round-robin channel arbitration.
728 out_be32(&mdma
->regs
->dmacr
, MPC_DMA_DMACR_EDCG
|
729 MPC_DMA_DMACR_ERGA
| MPC_DMA_DMACR_ERCA
);
731 /* Disable hardware DMA requests */
732 out_be32(&mdma
->regs
->dmaerqh
, 0);
733 out_be32(&mdma
->regs
->dmaerql
, 0);
735 /* Disable error interrupts */
736 out_be32(&mdma
->regs
->dmaeeih
, 0);
737 out_be32(&mdma
->regs
->dmaeeil
, 0);
739 /* Clear interrupts status */
740 out_be32(&mdma
->regs
->dmainth
, 0xFFFFFFFF);
741 out_be32(&mdma
->regs
->dmaintl
, 0xFFFFFFFF);
742 out_be32(&mdma
->regs
->dmaerrh
, 0xFFFFFFFF);
743 out_be32(&mdma
->regs
->dmaerrl
, 0xFFFFFFFF);
745 /* Route interrupts to IPIC */
746 out_be32(&mdma
->regs
->dmaihsa
, 0);
747 out_be32(&mdma
->regs
->dmailsa
, 0);
749 /* Register DMA engine */
750 dev_set_drvdata(dev
, mdma
);
751 retval
= dma_async_device_register(dma
);
753 devm_free_irq(dev
, mdma
->irq
, mdma
);
754 irq_dispose_mapping(mdma
->irq
);
760 static int __devexit
mpc_dma_remove(struct of_device
*op
)
762 struct device
*dev
= &op
->dev
;
763 struct mpc_dma
*mdma
= dev_get_drvdata(dev
);
765 dma_async_device_unregister(&mdma
->dma
);
766 devm_free_irq(dev
, mdma
->irq
, mdma
);
767 irq_dispose_mapping(mdma
->irq
);
772 static struct of_device_id mpc_dma_match
[] = {
773 { .compatible
= "fsl,mpc5121-dma", },
777 static struct of_platform_driver mpc_dma_driver
= {
778 .match_table
= mpc_dma_match
,
779 .probe
= mpc_dma_probe
,
780 .remove
= __devexit_p(mpc_dma_remove
),
783 .owner
= THIS_MODULE
,
787 static int __init
mpc_dma_init(void)
789 return of_register_platform_driver(&mpc_dma_driver
);
791 module_init(mpc_dma_init
);
793 static void __exit
mpc_dma_exit(void)
795 of_unregister_platform_driver(&mpc_dma_driver
);
797 module_exit(mpc_dma_exit
);
799 MODULE_LICENSE("GPL");
800 MODULE_AUTHOR("Piotr Ziecik <kosmo@semihalf.com>");