2 * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
3 * Copyright (C) Semihalf 2009
5 * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
6 * (defines, structures and comments) was taken from MPC5121 DMA driver
7 * written by Hongjun Chen <hong-jun.chen@freescale.com>.
9 * Approved as OSADL project by a majority of OSADL members and funded
10 * by OSADL membership fees in 2009; for details see www.osadl.org.
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the Free
14 * Software Foundation; either version 2 of the License, or (at your option)
17 * This program is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
22 * You should have received a copy of the GNU General Public License along with
23 * this program; if not, write to the Free Software Foundation, Inc., 59
24 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26 * The full GNU General Public License is included in this distribution in the
27 * file called COPYING.
31 * This is initial version of MPC5121 DMA driver. Only memory to memory
32 * transfers are supported (tested using dmatest module).
35 #include <linux/module.h>
36 #include <linux/dmaengine.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/interrupt.h>
40 #include <linux/slab.h>
41 #include <linux/of_device.h>
42 #include <linux/of_platform.h>
44 #include <linux/random.h>
46 /* Number of DMA Transfer descriptors allocated per channel */
47 #define MPC_DMA_DESCRIPTORS 64
49 /* Macro definitions */
50 #define MPC_DMA_CHANNELS 64
51 #define MPC_DMA_TCD_OFFSET 0x1000
53 /* Arbitration mode of group and channel */
54 #define MPC_DMA_DMACR_EDCG (1 << 31)
55 #define MPC_DMA_DMACR_ERGA (1 << 3)
56 #define MPC_DMA_DMACR_ERCA (1 << 2)
59 #define MPC_DMA_DMAES_VLD (1 << 31)
60 #define MPC_DMA_DMAES_GPE (1 << 15)
61 #define MPC_DMA_DMAES_CPE (1 << 14)
62 #define MPC_DMA_DMAES_ERRCHN(err) \
64 #define MPC_DMA_DMAES_SAE (1 << 7)
65 #define MPC_DMA_DMAES_SOE (1 << 6)
66 #define MPC_DMA_DMAES_DAE (1 << 5)
67 #define MPC_DMA_DMAES_DOE (1 << 4)
68 #define MPC_DMA_DMAES_NCE (1 << 3)
69 #define MPC_DMA_DMAES_SGE (1 << 2)
70 #define MPC_DMA_DMAES_SBE (1 << 1)
71 #define MPC_DMA_DMAES_DBE (1 << 0)
73 #define MPC_DMA_TSIZE_1 0x00
74 #define MPC_DMA_TSIZE_2 0x01
75 #define MPC_DMA_TSIZE_4 0x02
76 #define MPC_DMA_TSIZE_16 0x04
77 #define MPC_DMA_TSIZE_32 0x05
79 /* MPC5121 DMA engine registers */
80 struct __attribute__ ((__packed__
)) mpc_dma_regs
{
82 u32 dmacr
; /* DMA control register */
83 u32 dmaes
; /* DMA error status */
85 u32 dmaerqh
; /* DMA enable request high(channels 63~32) */
86 u32 dmaerql
; /* DMA enable request low(channels 31~0) */
87 u32 dmaeeih
; /* DMA enable error interrupt high(ch63~32) */
88 u32 dmaeeil
; /* DMA enable error interrupt low(ch31~0) */
90 u8 dmaserq
; /* DMA set enable request */
91 u8 dmacerq
; /* DMA clear enable request */
92 u8 dmaseei
; /* DMA set enable error interrupt */
93 u8 dmaceei
; /* DMA clear enable error interrupt */
95 u8 dmacint
; /* DMA clear interrupt request */
96 u8 dmacerr
; /* DMA clear error */
97 u8 dmassrt
; /* DMA set start bit */
98 u8 dmacdne
; /* DMA clear DONE status bit */
100 u32 dmainth
; /* DMA interrupt request high(ch63~32) */
101 u32 dmaintl
; /* DMA interrupt request low(ch31~0) */
102 u32 dmaerrh
; /* DMA error high(ch63~32) */
103 u32 dmaerrl
; /* DMA error low(ch31~0) */
105 u32 dmahrsh
; /* DMA hw request status high(ch63~32) */
106 u32 dmahrsl
; /* DMA hardware request status low(ch31~0) */
107 u32 dmaihsa
; /* DMA interrupt high select AXE(ch63~32) */
108 u32 dmailsa
; /* DMA interrupt low select AXE(ch31~0) */
110 u32 reserve0
[48]; /* Reserved */
112 u8 dchpri
[MPC_DMA_CHANNELS
];
113 /* DMA channels(0~63) priority */
116 struct __attribute__ ((__packed__
)) mpc_dma_tcd
{
118 u32 saddr
; /* Source address */
120 u32 smod
:5; /* Source address modulo */
121 u32 ssize
:3; /* Source data transfer size */
122 u32 dmod
:5; /* Destination address modulo */
123 u32 dsize
:3; /* Destination data transfer size */
124 u32 soff
:16; /* Signed source address offset */
127 u32 nbytes
; /* Inner "minor" byte count */
128 u32 slast
; /* Last source address adjustment */
129 u32 daddr
; /* Destination address */
132 u32 citer_elink
:1; /* Enable channel-to-channel linking on
133 * minor loop complete
135 u32 citer_linkch
:6; /* Link channel for minor loop complete */
136 u32 citer
:9; /* Current "major" iteration count */
137 u32 doff
:16; /* Signed destination address offset */
140 u32 dlast_sga
; /* Last Destination address adjustment/scatter
145 u32 biter_elink
:1; /* Enable channel-to-channel linking on major
149 u32 biter
:9; /* Beginning "major" iteration count */
150 u32 bwc
:2; /* Bandwidth control */
151 u32 major_linkch
:6; /* Link channel number */
152 u32 done
:1; /* Channel done */
153 u32 active
:1; /* Channel active */
154 u32 major_elink
:1; /* Enable channel-to-channel linking on major
157 u32 e_sg
:1; /* Enable scatter/gather processing */
158 u32 d_req
:1; /* Disable request */
159 u32 int_half
:1; /* Enable an interrupt when major counter is
162 u32 int_maj
:1; /* Enable an interrupt when major iteration
165 u32 start
:1; /* Channel start */
168 struct mpc_dma_desc
{
169 struct dma_async_tx_descriptor desc
;
170 struct mpc_dma_tcd
*tcd
;
171 dma_addr_t tcd_paddr
;
173 struct list_head node
;
176 struct mpc_dma_chan
{
177 struct dma_chan chan
;
178 struct list_head free
;
179 struct list_head prepared
;
180 struct list_head queued
;
181 struct list_head active
;
182 struct list_head completed
;
183 struct mpc_dma_tcd
*tcd
;
184 dma_addr_t tcd_paddr
;
185 dma_cookie_t completed_cookie
;
187 /* Lock for this structure */
192 struct dma_device dma
;
193 struct tasklet_struct tasklet
;
194 struct mpc_dma_chan channels
[MPC_DMA_CHANNELS
];
195 struct mpc_dma_regs __iomem
*regs
;
196 struct mpc_dma_tcd __iomem
*tcd
;
200 /* Lock for error_status field in this structure */
201 spinlock_t error_status_lock
;
204 #define DRV_NAME "mpc512x_dma"
206 /* Convert struct dma_chan to struct mpc_dma_chan */
207 static inline struct mpc_dma_chan
*dma_chan_to_mpc_dma_chan(struct dma_chan
*c
)
209 return container_of(c
, struct mpc_dma_chan
, chan
);
212 /* Convert struct dma_chan to struct mpc_dma */
213 static inline struct mpc_dma
*dma_chan_to_mpc_dma(struct dma_chan
*c
)
215 struct mpc_dma_chan
*mchan
= dma_chan_to_mpc_dma_chan(c
);
216 return container_of(mchan
, struct mpc_dma
, channels
[c
->chan_id
]);
220 * Execute all queued DMA descriptors.
222 * Following requirements must be met while calling mpc_dma_execute():
223 * a) mchan->lock is acquired,
224 * b) mchan->active list is empty,
225 * c) mchan->queued list contains at least one entry.
227 static void mpc_dma_execute(struct mpc_dma_chan
*mchan
)
229 struct mpc_dma
*mdma
= dma_chan_to_mpc_dma(&mchan
->chan
);
230 struct mpc_dma_desc
*first
= NULL
;
231 struct mpc_dma_desc
*prev
= NULL
;
232 struct mpc_dma_desc
*mdesc
;
233 int cid
= mchan
->chan
.chan_id
;
235 /* Move all queued descriptors to active list */
236 list_splice_tail_init(&mchan
->queued
, &mchan
->active
);
238 /* Chain descriptors into one transaction */
239 list_for_each_entry(mdesc
, &mchan
->active
, node
) {
248 prev
->tcd
->dlast_sga
= mdesc
->tcd_paddr
;
250 mdesc
->tcd
->start
= 1;
255 prev
->tcd
->start
= 0;
256 prev
->tcd
->int_maj
= 1;
258 /* Send first descriptor in chain into hardware */
259 memcpy_toio(&mdma
->tcd
[cid
], first
->tcd
, sizeof(struct mpc_dma_tcd
));
260 out_8(&mdma
->regs
->dmassrt
, cid
);
263 /* Handle interrupt on one half of DMA controller (32 channels) */
264 static void mpc_dma_irq_process(struct mpc_dma
*mdma
, u32 is
, u32 es
, int off
)
266 struct mpc_dma_chan
*mchan
;
267 struct mpc_dma_desc
*mdesc
;
268 u32 status
= is
| es
;
271 while ((ch
= fls(status
) - 1) >= 0) {
272 status
&= ~(1 << ch
);
273 mchan
= &mdma
->channels
[ch
+ off
];
275 spin_lock(&mchan
->lock
);
277 /* Check error status */
279 list_for_each_entry(mdesc
, &mchan
->active
, node
)
282 /* Execute queued descriptors */
283 list_splice_tail_init(&mchan
->active
, &mchan
->completed
);
284 if (!list_empty(&mchan
->queued
))
285 mpc_dma_execute(mchan
);
287 spin_unlock(&mchan
->lock
);
291 /* Interrupt handler */
292 static irqreturn_t
mpc_dma_irq(int irq
, void *data
)
294 struct mpc_dma
*mdma
= data
;
297 /* Save error status register */
298 es
= in_be32(&mdma
->regs
->dmaes
);
299 spin_lock(&mdma
->error_status_lock
);
300 if ((es
& MPC_DMA_DMAES_VLD
) && mdma
->error_status
== 0)
301 mdma
->error_status
= es
;
302 spin_unlock(&mdma
->error_status_lock
);
304 /* Handle interrupt on each channel */
305 mpc_dma_irq_process(mdma
, in_be32(&mdma
->regs
->dmainth
),
306 in_be32(&mdma
->regs
->dmaerrh
), 32);
307 mpc_dma_irq_process(mdma
, in_be32(&mdma
->regs
->dmaintl
),
308 in_be32(&mdma
->regs
->dmaerrl
), 0);
310 /* Ack interrupt on all channels */
311 out_be32(&mdma
->regs
->dmainth
, 0xFFFFFFFF);
312 out_be32(&mdma
->regs
->dmaintl
, 0xFFFFFFFF);
313 out_be32(&mdma
->regs
->dmaerrh
, 0xFFFFFFFF);
314 out_be32(&mdma
->regs
->dmaerrl
, 0xFFFFFFFF);
316 /* Schedule tasklet */
317 tasklet_schedule(&mdma
->tasklet
);
323 static void mpc_dma_tasklet(unsigned long data
)
325 struct mpc_dma
*mdma
= (void *)data
;
326 dma_cookie_t last_cookie
= 0;
327 struct mpc_dma_chan
*mchan
;
328 struct mpc_dma_desc
*mdesc
;
329 struct dma_async_tx_descriptor
*desc
;
335 spin_lock_irqsave(&mdma
->error_status_lock
, flags
);
336 es
= mdma
->error_status
;
337 mdma
->error_status
= 0;
338 spin_unlock_irqrestore(&mdma
->error_status_lock
, flags
);
340 /* Print nice error report */
342 dev_err(mdma
->dma
.dev
,
343 "Hardware reported following error(s) on channel %u:\n",
344 MPC_DMA_DMAES_ERRCHN(es
));
346 if (es
& MPC_DMA_DMAES_GPE
)
347 dev_err(mdma
->dma
.dev
, "- Group Priority Error\n");
348 if (es
& MPC_DMA_DMAES_CPE
)
349 dev_err(mdma
->dma
.dev
, "- Channel Priority Error\n");
350 if (es
& MPC_DMA_DMAES_SAE
)
351 dev_err(mdma
->dma
.dev
, "- Source Address Error\n");
352 if (es
& MPC_DMA_DMAES_SOE
)
353 dev_err(mdma
->dma
.dev
, "- Source Offset"
354 " Configuration Error\n");
355 if (es
& MPC_DMA_DMAES_DAE
)
356 dev_err(mdma
->dma
.dev
, "- Destination Address"
358 if (es
& MPC_DMA_DMAES_DOE
)
359 dev_err(mdma
->dma
.dev
, "- Destination Offset"
360 " Configuration Error\n");
361 if (es
& MPC_DMA_DMAES_NCE
)
362 dev_err(mdma
->dma
.dev
, "- NBytes/Citter"
363 " Configuration Error\n");
364 if (es
& MPC_DMA_DMAES_SGE
)
365 dev_err(mdma
->dma
.dev
, "- Scatter/Gather"
366 " Configuration Error\n");
367 if (es
& MPC_DMA_DMAES_SBE
)
368 dev_err(mdma
->dma
.dev
, "- Source Bus Error\n");
369 if (es
& MPC_DMA_DMAES_DBE
)
370 dev_err(mdma
->dma
.dev
, "- Destination Bus Error\n");
373 for (i
= 0; i
< mdma
->dma
.chancnt
; i
++) {
374 mchan
= &mdma
->channels
[i
];
376 /* Get all completed descriptors */
377 spin_lock_irqsave(&mchan
->lock
, flags
);
378 if (!list_empty(&mchan
->completed
))
379 list_splice_tail_init(&mchan
->completed
, &list
);
380 spin_unlock_irqrestore(&mchan
->lock
, flags
);
382 if (list_empty(&list
))
385 /* Execute callbacks and run dependencies */
386 list_for_each_entry(mdesc
, &list
, node
) {
390 desc
->callback(desc
->callback_param
);
392 last_cookie
= desc
->cookie
;
393 dma_run_dependencies(desc
);
396 /* Free descriptors */
397 spin_lock_irqsave(&mchan
->lock
, flags
);
398 list_splice_tail_init(&list
, &mchan
->free
);
399 mchan
->completed_cookie
= last_cookie
;
400 spin_unlock_irqrestore(&mchan
->lock
, flags
);
404 /* Submit descriptor to hardware */
405 static dma_cookie_t
mpc_dma_tx_submit(struct dma_async_tx_descriptor
*txd
)
407 struct mpc_dma_chan
*mchan
= dma_chan_to_mpc_dma_chan(txd
->chan
);
408 struct mpc_dma_desc
*mdesc
;
412 mdesc
= container_of(txd
, struct mpc_dma_desc
, desc
);
414 spin_lock_irqsave(&mchan
->lock
, flags
);
416 /* Move descriptor to queue */
417 list_move_tail(&mdesc
->node
, &mchan
->queued
);
419 /* If channel is idle, execute all queued descriptors */
420 if (list_empty(&mchan
->active
))
421 mpc_dma_execute(mchan
);
424 cookie
= mchan
->chan
.cookie
+ 1;
428 mchan
->chan
.cookie
= cookie
;
429 mdesc
->desc
.cookie
= cookie
;
431 spin_unlock_irqrestore(&mchan
->lock
, flags
);
436 /* Alloc channel resources */
437 static int mpc_dma_alloc_chan_resources(struct dma_chan
*chan
)
439 struct mpc_dma
*mdma
= dma_chan_to_mpc_dma(chan
);
440 struct mpc_dma_chan
*mchan
= dma_chan_to_mpc_dma_chan(chan
);
441 struct mpc_dma_desc
*mdesc
;
442 struct mpc_dma_tcd
*tcd
;
443 dma_addr_t tcd_paddr
;
448 /* Alloc DMA memory for Transfer Control Descriptors */
449 tcd
= dma_alloc_coherent(mdma
->dma
.dev
,
450 MPC_DMA_DESCRIPTORS
* sizeof(struct mpc_dma_tcd
),
451 &tcd_paddr
, GFP_KERNEL
);
455 /* Alloc descriptors for this channel */
456 for (i
= 0; i
< MPC_DMA_DESCRIPTORS
; i
++) {
457 mdesc
= kzalloc(sizeof(struct mpc_dma_desc
), GFP_KERNEL
);
459 dev_notice(mdma
->dma
.dev
, "Memory allocation error. "
460 "Allocated only %u descriptors\n", i
);
464 dma_async_tx_descriptor_init(&mdesc
->desc
, chan
);
465 mdesc
->desc
.flags
= DMA_CTRL_ACK
;
466 mdesc
->desc
.tx_submit
= mpc_dma_tx_submit
;
468 mdesc
->tcd
= &tcd
[i
];
469 mdesc
->tcd_paddr
= tcd_paddr
+ (i
* sizeof(struct mpc_dma_tcd
));
471 list_add_tail(&mdesc
->node
, &descs
);
474 /* Return error only if no descriptors were allocated */
476 dma_free_coherent(mdma
->dma
.dev
,
477 MPC_DMA_DESCRIPTORS
* sizeof(struct mpc_dma_tcd
),
482 spin_lock_irqsave(&mchan
->lock
, flags
);
484 mchan
->tcd_paddr
= tcd_paddr
;
485 list_splice_tail_init(&descs
, &mchan
->free
);
486 spin_unlock_irqrestore(&mchan
->lock
, flags
);
488 /* Enable Error Interrupt */
489 out_8(&mdma
->regs
->dmaseei
, chan
->chan_id
);
494 /* Free channel resources */
495 static void mpc_dma_free_chan_resources(struct dma_chan
*chan
)
497 struct mpc_dma
*mdma
= dma_chan_to_mpc_dma(chan
);
498 struct mpc_dma_chan
*mchan
= dma_chan_to_mpc_dma_chan(chan
);
499 struct mpc_dma_desc
*mdesc
, *tmp
;
500 struct mpc_dma_tcd
*tcd
;
501 dma_addr_t tcd_paddr
;
505 spin_lock_irqsave(&mchan
->lock
, flags
);
507 /* Channel must be idle */
508 BUG_ON(!list_empty(&mchan
->prepared
));
509 BUG_ON(!list_empty(&mchan
->queued
));
510 BUG_ON(!list_empty(&mchan
->active
));
511 BUG_ON(!list_empty(&mchan
->completed
));
514 list_splice_tail_init(&mchan
->free
, &descs
);
516 tcd_paddr
= mchan
->tcd_paddr
;
518 spin_unlock_irqrestore(&mchan
->lock
, flags
);
520 /* Free DMA memory used by descriptors */
521 dma_free_coherent(mdma
->dma
.dev
,
522 MPC_DMA_DESCRIPTORS
* sizeof(struct mpc_dma_tcd
),
525 /* Free descriptors */
526 list_for_each_entry_safe(mdesc
, tmp
, &descs
, node
)
529 /* Disable Error Interrupt */
530 out_8(&mdma
->regs
->dmaceei
, chan
->chan_id
);
533 /* Send all pending descriptor to hardware */
534 static void mpc_dma_issue_pending(struct dma_chan
*chan
)
537 * We are posting descriptors to the hardware as soon as
538 * they are ready, so this function does nothing.
542 /* Check request completion status */
543 static enum dma_status
544 mpc_dma_is_tx_complete(struct dma_chan
*chan
, dma_cookie_t cookie
,
545 dma_cookie_t
*done
, dma_cookie_t
*used
)
547 struct mpc_dma_chan
*mchan
= dma_chan_to_mpc_dma_chan(chan
);
549 dma_cookie_t last_used
;
550 dma_cookie_t last_complete
;
552 spin_lock_irqsave(&mchan
->lock
, flags
);
553 last_used
= mchan
->chan
.cookie
;
554 last_complete
= mchan
->completed_cookie
;
555 spin_unlock_irqrestore(&mchan
->lock
, flags
);
558 *done
= last_complete
;
563 return dma_async_is_complete(cookie
, last_complete
, last_used
);
566 /* Prepare descriptor for memory to memory copy */
567 static struct dma_async_tx_descriptor
*
568 mpc_dma_prep_memcpy(struct dma_chan
*chan
, dma_addr_t dst
, dma_addr_t src
,
569 size_t len
, unsigned long flags
)
571 struct mpc_dma_chan
*mchan
= dma_chan_to_mpc_dma_chan(chan
);
572 struct mpc_dma_desc
*mdesc
= NULL
;
573 struct mpc_dma_tcd
*tcd
;
574 unsigned long iflags
;
576 /* Get free descriptor */
577 spin_lock_irqsave(&mchan
->lock
, iflags
);
578 if (!list_empty(&mchan
->free
)) {
579 mdesc
= list_first_entry(&mchan
->free
, struct mpc_dma_desc
,
581 list_del(&mdesc
->node
);
583 spin_unlock_irqrestore(&mchan
->lock
, iflags
);
591 /* Prepare Transfer Control Descriptor for this transaction */
592 memset(tcd
, 0, sizeof(struct mpc_dma_tcd
));
594 if (IS_ALIGNED(src
| dst
| len
, 32)) {
595 tcd
->ssize
= MPC_DMA_TSIZE_32
;
596 tcd
->dsize
= MPC_DMA_TSIZE_32
;
599 } else if (IS_ALIGNED(src
| dst
| len
, 16)) {
600 tcd
->ssize
= MPC_DMA_TSIZE_16
;
601 tcd
->dsize
= MPC_DMA_TSIZE_16
;
604 } else if (IS_ALIGNED(src
| dst
| len
, 4)) {
605 tcd
->ssize
= MPC_DMA_TSIZE_4
;
606 tcd
->dsize
= MPC_DMA_TSIZE_4
;
609 } else if (IS_ALIGNED(src
| dst
| len
, 2)) {
610 tcd
->ssize
= MPC_DMA_TSIZE_2
;
611 tcd
->dsize
= MPC_DMA_TSIZE_2
;
615 tcd
->ssize
= MPC_DMA_TSIZE_1
;
616 tcd
->dsize
= MPC_DMA_TSIZE_1
;
627 /* Place descriptor in prepared list */
628 spin_lock_irqsave(&mchan
->lock
, iflags
);
629 list_add_tail(&mdesc
->node
, &mchan
->prepared
);
630 spin_unlock_irqrestore(&mchan
->lock
, iflags
);
635 static int __devinit
mpc_dma_probe(struct of_device
*op
,
636 const struct of_device_id
*match
)
638 struct device_node
*dn
= op
->node
;
639 struct device
*dev
= &op
->dev
;
640 struct dma_device
*dma
;
641 struct mpc_dma
*mdma
;
642 struct mpc_dma_chan
*mchan
;
644 ulong regs_start
, regs_size
;
647 mdma
= devm_kzalloc(dev
, sizeof(struct mpc_dma
), GFP_KERNEL
);
649 dev_err(dev
, "Memory exhausted!\n");
653 mdma
->irq
= irq_of_parse_and_map(dn
, 0);
654 if (mdma
->irq
== NO_IRQ
) {
655 dev_err(dev
, "Error mapping IRQ!\n");
659 retval
= of_address_to_resource(dn
, 0, &res
);
661 dev_err(dev
, "Error parsing memory region!\n");
665 regs_start
= res
.start
;
666 regs_size
= res
.end
- res
.start
+ 1;
668 if (!devm_request_mem_region(dev
, regs_start
, regs_size
, DRV_NAME
)) {
669 dev_err(dev
, "Error requesting memory region!\n");
673 mdma
->regs
= devm_ioremap(dev
, regs_start
, regs_size
);
675 dev_err(dev
, "Error mapping memory region!\n");
679 mdma
->tcd
= (struct mpc_dma_tcd
*)((u8
*)(mdma
->regs
)
680 + MPC_DMA_TCD_OFFSET
);
682 retval
= devm_request_irq(dev
, mdma
->irq
, &mpc_dma_irq
, 0, DRV_NAME
,
685 dev_err(dev
, "Error requesting IRQ!\n");
689 spin_lock_init(&mdma
->error_status_lock
);
693 dma
->chancnt
= MPC_DMA_CHANNELS
;
694 dma
->device_alloc_chan_resources
= mpc_dma_alloc_chan_resources
;
695 dma
->device_free_chan_resources
= mpc_dma_free_chan_resources
;
696 dma
->device_issue_pending
= mpc_dma_issue_pending
;
697 dma
->device_is_tx_complete
= mpc_dma_is_tx_complete
;
698 dma
->device_prep_dma_memcpy
= mpc_dma_prep_memcpy
;
700 INIT_LIST_HEAD(&dma
->channels
);
701 dma_cap_set(DMA_MEMCPY
, dma
->cap_mask
);
703 for (i
= 0; i
< dma
->chancnt
; i
++) {
704 mchan
= &mdma
->channels
[i
];
706 mchan
->chan
.device
= dma
;
707 mchan
->chan
.chan_id
= i
;
708 mchan
->chan
.cookie
= 1;
709 mchan
->completed_cookie
= mchan
->chan
.cookie
;
711 INIT_LIST_HEAD(&mchan
->free
);
712 INIT_LIST_HEAD(&mchan
->prepared
);
713 INIT_LIST_HEAD(&mchan
->queued
);
714 INIT_LIST_HEAD(&mchan
->active
);
715 INIT_LIST_HEAD(&mchan
->completed
);
717 spin_lock_init(&mchan
->lock
);
718 list_add_tail(&mchan
->chan
.device_node
, &dma
->channels
);
721 tasklet_init(&mdma
->tasklet
, mpc_dma_tasklet
, (unsigned long)mdma
);
724 * Configure DMA Engine:
726 * - Round-robin group arbitration,
727 * - Round-robin channel arbitration.
729 out_be32(&mdma
->regs
->dmacr
, MPC_DMA_DMACR_EDCG
|
730 MPC_DMA_DMACR_ERGA
| MPC_DMA_DMACR_ERCA
);
732 /* Disable hardware DMA requests */
733 out_be32(&mdma
->regs
->dmaerqh
, 0);
734 out_be32(&mdma
->regs
->dmaerql
, 0);
736 /* Disable error interrupts */
737 out_be32(&mdma
->regs
->dmaeeih
, 0);
738 out_be32(&mdma
->regs
->dmaeeil
, 0);
740 /* Clear interrupts status */
741 out_be32(&mdma
->regs
->dmainth
, 0xFFFFFFFF);
742 out_be32(&mdma
->regs
->dmaintl
, 0xFFFFFFFF);
743 out_be32(&mdma
->regs
->dmaerrh
, 0xFFFFFFFF);
744 out_be32(&mdma
->regs
->dmaerrl
, 0xFFFFFFFF);
746 /* Route interrupts to IPIC */
747 out_be32(&mdma
->regs
->dmaihsa
, 0);
748 out_be32(&mdma
->regs
->dmailsa
, 0);
750 /* Register DMA engine */
751 dev_set_drvdata(dev
, mdma
);
752 retval
= dma_async_device_register(dma
);
754 devm_free_irq(dev
, mdma
->irq
, mdma
);
755 irq_dispose_mapping(mdma
->irq
);
761 static int __devexit
mpc_dma_remove(struct of_device
*op
)
763 struct device
*dev
= &op
->dev
;
764 struct mpc_dma
*mdma
= dev_get_drvdata(dev
);
766 dma_async_device_unregister(&mdma
->dma
);
767 devm_free_irq(dev
, mdma
->irq
, mdma
);
768 irq_dispose_mapping(mdma
->irq
);
773 static struct of_device_id mpc_dma_match
[] = {
774 { .compatible
= "fsl,mpc5121-dma", },
778 static struct of_platform_driver mpc_dma_driver
= {
779 .match_table
= mpc_dma_match
,
780 .probe
= mpc_dma_probe
,
781 .remove
= __devexit_p(mpc_dma_remove
),
784 .owner
= THIS_MODULE
,
788 static int __init
mpc_dma_init(void)
790 return of_register_platform_driver(&mpc_dma_driver
);
792 module_init(mpc_dma_init
);
794 static void __exit
mpc_dma_exit(void)
796 of_unregister_platform_driver(&mpc_dma_driver
);
798 module_exit(mpc_dma_exit
);
800 MODULE_LICENSE("GPL");
801 MODULE_AUTHOR("Piotr Ziecik <kosmo@semihalf.com>");