2 * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
3 * Copyright (C) Semihalf 2009
4 * Copyright (C) Ilya Yanok, Emcraft Systems 2010
5 * Copyright (C) Alexander Popov, Promcontroller 2014
6 * Copyright (C) Mario Six, Guntermann & Drunck GmbH, 2016
8 * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
9 * (defines, structures and comments) was taken from MPC5121 DMA driver
10 * written by Hongjun Chen <hong-jun.chen@freescale.com>.
12 * Approved as OSADL project by a majority of OSADL members and funded
13 * by OSADL membership fees in 2009; for details see www.osadl.org.
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the Free
17 * Software Foundation; either version 2 of the License, or (at your option)
20 * This program is distributed in the hope that it will be useful, but WITHOUT
21 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
22 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
25 * The full GNU General Public License is included in this distribution in the
26 * file called COPYING.
30 * MPC512x and MPC8308 DMA driver. It supports memory to memory data transfers
31 * (tested using dmatest module) and data transfers between memory and
32 * peripheral I/O memory by means of slave scatter/gather with these
34 * - chunked transfers (described by s/g lists with more than one item) are
35 * refused as long as proper support for scatter/gather is missing
36 * - transfers on MPC8308 always start from software as this SoC does not have
37 * external request lines for peripheral flow control
38 * - memory <-> I/O memory transfer chunks of sizes of 1, 2, 4, 16 (for
39 * MPC512x), and 32 bytes are supported, and, consequently, source
40 * addresses and destination addresses must be aligned accordingly;
41 * furthermore, for MPC512x SoCs, the transfer size must be aligned on
42 * (chunk size * maxburst)
45 #include <linux/module.h>
46 #include <linux/dmaengine.h>
47 #include <linux/dma-mapping.h>
48 #include <linux/interrupt.h>
50 #include <linux/slab.h>
51 #include <linux/of_address.h>
52 #include <linux/of_device.h>
53 #include <linux/of_irq.h>
54 #include <linux/of_dma.h>
55 #include <linux/of_platform.h>
57 #include <linux/random.h>
59 #include "dmaengine.h"
61 /* Number of DMA Transfer descriptors allocated per channel */
62 #define MPC_DMA_DESCRIPTORS 64
64 /* Macro definitions */
65 #define MPC_DMA_TCD_OFFSET 0x1000
68 * Maximum channel counts for individual hardware variants
69 * and the maximum channel count over all supported controllers,
70 * used for data structure size
72 #define MPC8308_DMACHAN_MAX 16
73 #define MPC512x_DMACHAN_MAX 64
74 #define MPC_DMA_CHANNELS 64
76 /* Arbitration mode of group and channel */
77 #define MPC_DMA_DMACR_EDCG (1 << 31)
78 #define MPC_DMA_DMACR_ERGA (1 << 3)
79 #define MPC_DMA_DMACR_ERCA (1 << 2)
82 #define MPC_DMA_DMAES_VLD (1 << 31)
83 #define MPC_DMA_DMAES_GPE (1 << 15)
84 #define MPC_DMA_DMAES_CPE (1 << 14)
85 #define MPC_DMA_DMAES_ERRCHN(err) \
87 #define MPC_DMA_DMAES_SAE (1 << 7)
88 #define MPC_DMA_DMAES_SOE (1 << 6)
89 #define MPC_DMA_DMAES_DAE (1 << 5)
90 #define MPC_DMA_DMAES_DOE (1 << 4)
91 #define MPC_DMA_DMAES_NCE (1 << 3)
92 #define MPC_DMA_DMAES_SGE (1 << 2)
93 #define MPC_DMA_DMAES_SBE (1 << 1)
94 #define MPC_DMA_DMAES_DBE (1 << 0)
96 #define MPC_DMA_DMAGPOR_SNOOP_ENABLE (1 << 6)
98 #define MPC_DMA_TSIZE_1 0x00
99 #define MPC_DMA_TSIZE_2 0x01
100 #define MPC_DMA_TSIZE_4 0x02
101 #define MPC_DMA_TSIZE_16 0x04
102 #define MPC_DMA_TSIZE_32 0x05
104 /* MPC5121 DMA engine registers */
105 struct __attribute__ ((__packed__
)) mpc_dma_regs
{
107 u32 dmacr
; /* DMA control register */
108 u32 dmaes
; /* DMA error status */
110 u32 dmaerqh
; /* DMA enable request high(channels 63~32) */
111 u32 dmaerql
; /* DMA enable request low(channels 31~0) */
112 u32 dmaeeih
; /* DMA enable error interrupt high(ch63~32) */
113 u32 dmaeeil
; /* DMA enable error interrupt low(ch31~0) */
115 u8 dmaserq
; /* DMA set enable request */
116 u8 dmacerq
; /* DMA clear enable request */
117 u8 dmaseei
; /* DMA set enable error interrupt */
118 u8 dmaceei
; /* DMA clear enable error interrupt */
120 u8 dmacint
; /* DMA clear interrupt request */
121 u8 dmacerr
; /* DMA clear error */
122 u8 dmassrt
; /* DMA set start bit */
123 u8 dmacdne
; /* DMA clear DONE status bit */
125 u32 dmainth
; /* DMA interrupt request high(ch63~32) */
126 u32 dmaintl
; /* DMA interrupt request low(ch31~0) */
127 u32 dmaerrh
; /* DMA error high(ch63~32) */
128 u32 dmaerrl
; /* DMA error low(ch31~0) */
130 u32 dmahrsh
; /* DMA hw request status high(ch63~32) */
131 u32 dmahrsl
; /* DMA hardware request status low(ch31~0) */
133 u32 dmaihsa
; /* DMA interrupt high select AXE(ch63~32) */
134 u32 dmagpor
; /* (General purpose register on MPC8308) */
136 u32 dmailsa
; /* DMA interrupt low select AXE(ch31~0) */
138 u32 reserve0
[48]; /* Reserved */
140 u8 dchpri
[MPC_DMA_CHANNELS
];
141 /* DMA channels(0~63) priority */
144 struct __attribute__ ((__packed__
)) mpc_dma_tcd
{
146 u32 saddr
; /* Source address */
148 u32 smod
:5; /* Source address modulo */
149 u32 ssize
:3; /* Source data transfer size */
150 u32 dmod
:5; /* Destination address modulo */
151 u32 dsize
:3; /* Destination data transfer size */
152 u32 soff
:16; /* Signed source address offset */
155 u32 nbytes
; /* Inner "minor" byte count */
156 u32 slast
; /* Last source address adjustment */
157 u32 daddr
; /* Destination address */
160 u32 citer_elink
:1; /* Enable channel-to-channel linking on
161 * minor loop complete
163 u32 citer_linkch
:6; /* Link channel for minor loop complete */
164 u32 citer
:9; /* Current "major" iteration count */
165 u32 doff
:16; /* Signed destination address offset */
168 u32 dlast_sga
; /* Last Destination address adjustment/scatter
173 u32 biter_elink
:1; /* Enable channel-to-channel linking on major
177 u32 biter
:9; /* Beginning "major" iteration count */
178 u32 bwc
:2; /* Bandwidth control */
179 u32 major_linkch
:6; /* Link channel number */
180 u32 done
:1; /* Channel done */
181 u32 active
:1; /* Channel active */
182 u32 major_elink
:1; /* Enable channel-to-channel linking on major
185 u32 e_sg
:1; /* Enable scatter/gather processing */
186 u32 d_req
:1; /* Disable request */
187 u32 int_half
:1; /* Enable an interrupt when major counter is
190 u32 int_maj
:1; /* Enable an interrupt when major iteration
193 u32 start
:1; /* Channel start */
196 struct mpc_dma_desc
{
197 struct dma_async_tx_descriptor desc
;
198 struct mpc_dma_tcd
*tcd
;
199 dma_addr_t tcd_paddr
;
201 struct list_head node
;
202 int will_access_peripheral
;
205 struct mpc_dma_chan
{
206 struct dma_chan chan
;
207 struct list_head free
;
208 struct list_head prepared
;
209 struct list_head queued
;
210 struct list_head active
;
211 struct list_head completed
;
212 struct mpc_dma_tcd
*tcd
;
213 dma_addr_t tcd_paddr
;
215 /* Settings for access to peripheral FIFO */
216 dma_addr_t src_per_paddr
;
219 dma_addr_t dst_per_paddr
;
223 /* Lock for this structure */
228 struct dma_device dma
;
229 struct tasklet_struct tasklet
;
230 struct mpc_dma_chan channels
[MPC_DMA_CHANNELS
];
231 struct mpc_dma_regs __iomem
*regs
;
232 struct mpc_dma_tcd __iomem
*tcd
;
238 /* Lock for error_status field in this structure */
239 spinlock_t error_status_lock
;
242 #define DRV_NAME "mpc512x_dma"
244 /* Convert struct dma_chan to struct mpc_dma_chan */
245 static inline struct mpc_dma_chan
*dma_chan_to_mpc_dma_chan(struct dma_chan
*c
)
247 return container_of(c
, struct mpc_dma_chan
, chan
);
250 /* Convert struct dma_chan to struct mpc_dma */
251 static inline struct mpc_dma
*dma_chan_to_mpc_dma(struct dma_chan
*c
)
253 struct mpc_dma_chan
*mchan
= dma_chan_to_mpc_dma_chan(c
);
255 return container_of(mchan
, struct mpc_dma
, channels
[c
->chan_id
]);
259 * Execute all queued DMA descriptors.
261 * Following requirements must be met while calling mpc_dma_execute():
262 * a) mchan->lock is acquired,
263 * b) mchan->active list is empty,
264 * c) mchan->queued list contains at least one entry.
266 static void mpc_dma_execute(struct mpc_dma_chan
*mchan
)
268 struct mpc_dma
*mdma
= dma_chan_to_mpc_dma(&mchan
->chan
);
269 struct mpc_dma_desc
*first
= NULL
;
270 struct mpc_dma_desc
*prev
= NULL
;
271 struct mpc_dma_desc
*mdesc
;
272 int cid
= mchan
->chan
.chan_id
;
274 while (!list_empty(&mchan
->queued
)) {
275 mdesc
= list_first_entry(&mchan
->queued
,
276 struct mpc_dma_desc
, node
);
278 * Grab either several mem-to-mem transfer descriptors
279 * or one peripheral transfer descriptor,
280 * don't mix mem-to-mem and peripheral transfer descriptors
281 * within the same 'active' list.
283 if (mdesc
->will_access_peripheral
) {
284 if (list_empty(&mchan
->active
))
285 list_move_tail(&mdesc
->node
, &mchan
->active
);
288 list_move_tail(&mdesc
->node
, &mchan
->active
);
292 /* Chain descriptors into one transaction */
293 list_for_each_entry(mdesc
, &mchan
->active
, node
) {
302 prev
->tcd
->dlast_sga
= mdesc
->tcd_paddr
;
304 mdesc
->tcd
->start
= 1;
309 prev
->tcd
->int_maj
= 1;
311 /* Send first descriptor in chain into hardware */
312 memcpy_toio(&mdma
->tcd
[cid
], first
->tcd
, sizeof(struct mpc_dma_tcd
));
315 mdma
->tcd
[cid
].e_sg
= 1;
317 if (mdma
->is_mpc8308
) {
318 /* MPC8308, no request lines, software initiated start */
319 out_8(&mdma
->regs
->dmassrt
, cid
);
320 } else if (first
->will_access_peripheral
) {
321 /* Peripherals involved, start by external request signal */
322 out_8(&mdma
->regs
->dmaserq
, cid
);
324 /* Memory to memory transfer, software initiated start */
325 out_8(&mdma
->regs
->dmassrt
, cid
);
329 /* Handle interrupt on one half of DMA controller (32 channels) */
330 static void mpc_dma_irq_process(struct mpc_dma
*mdma
, u32 is
, u32 es
, int off
)
332 struct mpc_dma_chan
*mchan
;
333 struct mpc_dma_desc
*mdesc
;
334 u32 status
= is
| es
;
337 while ((ch
= fls(status
) - 1) >= 0) {
338 status
&= ~(1 << ch
);
339 mchan
= &mdma
->channels
[ch
+ off
];
341 spin_lock(&mchan
->lock
);
343 out_8(&mdma
->regs
->dmacint
, ch
+ off
);
344 out_8(&mdma
->regs
->dmacerr
, ch
+ off
);
346 /* Check error status */
348 list_for_each_entry(mdesc
, &mchan
->active
, node
)
351 /* Execute queued descriptors */
352 list_splice_tail_init(&mchan
->active
, &mchan
->completed
);
353 if (!list_empty(&mchan
->queued
))
354 mpc_dma_execute(mchan
);
356 spin_unlock(&mchan
->lock
);
360 /* Interrupt handler */
361 static irqreturn_t
mpc_dma_irq(int irq
, void *data
)
363 struct mpc_dma
*mdma
= data
;
366 /* Save error status register */
367 es
= in_be32(&mdma
->regs
->dmaes
);
368 spin_lock(&mdma
->error_status_lock
);
369 if ((es
& MPC_DMA_DMAES_VLD
) && mdma
->error_status
== 0)
370 mdma
->error_status
= es
;
371 spin_unlock(&mdma
->error_status_lock
);
373 /* Handle interrupt on each channel */
374 if (mdma
->dma
.chancnt
> 32) {
375 mpc_dma_irq_process(mdma
, in_be32(&mdma
->regs
->dmainth
),
376 in_be32(&mdma
->regs
->dmaerrh
), 32);
378 mpc_dma_irq_process(mdma
, in_be32(&mdma
->regs
->dmaintl
),
379 in_be32(&mdma
->regs
->dmaerrl
), 0);
381 /* Schedule tasklet */
382 tasklet_schedule(&mdma
->tasklet
);
387 /* process completed descriptors */
388 static void mpc_dma_process_completed(struct mpc_dma
*mdma
)
390 dma_cookie_t last_cookie
= 0;
391 struct mpc_dma_chan
*mchan
;
392 struct mpc_dma_desc
*mdesc
;
393 struct dma_async_tx_descriptor
*desc
;
398 for (i
= 0; i
< mdma
->dma
.chancnt
; i
++) {
399 mchan
= &mdma
->channels
[i
];
401 /* Get all completed descriptors */
402 spin_lock_irqsave(&mchan
->lock
, flags
);
403 if (!list_empty(&mchan
->completed
))
404 list_splice_tail_init(&mchan
->completed
, &list
);
405 spin_unlock_irqrestore(&mchan
->lock
, flags
);
407 if (list_empty(&list
))
410 /* Execute callbacks and run dependencies */
411 list_for_each_entry(mdesc
, &list
, node
) {
415 desc
->callback(desc
->callback_param
);
417 last_cookie
= desc
->cookie
;
418 dma_run_dependencies(desc
);
421 /* Free descriptors */
422 spin_lock_irqsave(&mchan
->lock
, flags
);
423 list_splice_tail_init(&list
, &mchan
->free
);
424 mchan
->chan
.completed_cookie
= last_cookie
;
425 spin_unlock_irqrestore(&mchan
->lock
, flags
);
430 static void mpc_dma_tasklet(unsigned long data
)
432 struct mpc_dma
*mdma
= (void *)data
;
436 spin_lock_irqsave(&mdma
->error_status_lock
, flags
);
437 es
= mdma
->error_status
;
438 mdma
->error_status
= 0;
439 spin_unlock_irqrestore(&mdma
->error_status_lock
, flags
);
441 /* Print nice error report */
443 dev_err(mdma
->dma
.dev
,
444 "Hardware reported following error(s) on channel %u:\n",
445 MPC_DMA_DMAES_ERRCHN(es
));
447 if (es
& MPC_DMA_DMAES_GPE
)
448 dev_err(mdma
->dma
.dev
, "- Group Priority Error\n");
449 if (es
& MPC_DMA_DMAES_CPE
)
450 dev_err(mdma
->dma
.dev
, "- Channel Priority Error\n");
451 if (es
& MPC_DMA_DMAES_SAE
)
452 dev_err(mdma
->dma
.dev
, "- Source Address Error\n");
453 if (es
& MPC_DMA_DMAES_SOE
)
454 dev_err(mdma
->dma
.dev
, "- Source Offset Configuration Error\n");
455 if (es
& MPC_DMA_DMAES_DAE
)
456 dev_err(mdma
->dma
.dev
, "- Destination Address Error\n");
457 if (es
& MPC_DMA_DMAES_DOE
)
458 dev_err(mdma
->dma
.dev
, "- Destination Offset Configuration Error\n");
459 if (es
& MPC_DMA_DMAES_NCE
)
460 dev_err(mdma
->dma
.dev
, "- NBytes/Citter Configuration Error\n");
461 if (es
& MPC_DMA_DMAES_SGE
)
462 dev_err(mdma
->dma
.dev
, "- Scatter/Gather Configuration Error\n");
463 if (es
& MPC_DMA_DMAES_SBE
)
464 dev_err(mdma
->dma
.dev
, "- Source Bus Error\n");
465 if (es
& MPC_DMA_DMAES_DBE
)
466 dev_err(mdma
->dma
.dev
, "- Destination Bus Error\n");
469 mpc_dma_process_completed(mdma
);
472 /* Submit descriptor to hardware */
473 static dma_cookie_t
mpc_dma_tx_submit(struct dma_async_tx_descriptor
*txd
)
475 struct mpc_dma_chan
*mchan
= dma_chan_to_mpc_dma_chan(txd
->chan
);
476 struct mpc_dma_desc
*mdesc
;
480 mdesc
= container_of(txd
, struct mpc_dma_desc
, desc
);
482 spin_lock_irqsave(&mchan
->lock
, flags
);
484 /* Move descriptor to queue */
485 list_move_tail(&mdesc
->node
, &mchan
->queued
);
487 /* If channel is idle, execute all queued descriptors */
488 if (list_empty(&mchan
->active
))
489 mpc_dma_execute(mchan
);
492 cookie
= dma_cookie_assign(txd
);
493 spin_unlock_irqrestore(&mchan
->lock
, flags
);
498 /* Alloc channel resources */
499 static int mpc_dma_alloc_chan_resources(struct dma_chan
*chan
)
501 struct mpc_dma
*mdma
= dma_chan_to_mpc_dma(chan
);
502 struct mpc_dma_chan
*mchan
= dma_chan_to_mpc_dma_chan(chan
);
503 struct mpc_dma_desc
*mdesc
;
504 struct mpc_dma_tcd
*tcd
;
505 dma_addr_t tcd_paddr
;
510 /* Alloc DMA memory for Transfer Control Descriptors */
511 tcd
= dma_alloc_coherent(mdma
->dma
.dev
,
512 MPC_DMA_DESCRIPTORS
* sizeof(struct mpc_dma_tcd
),
513 &tcd_paddr
, GFP_KERNEL
);
517 /* Alloc descriptors for this channel */
518 for (i
= 0; i
< MPC_DMA_DESCRIPTORS
; i
++) {
519 mdesc
= kzalloc(sizeof(struct mpc_dma_desc
), GFP_KERNEL
);
521 dev_notice(mdma
->dma
.dev
,
522 "Memory allocation error. Allocated only %u descriptors\n", i
);
526 dma_async_tx_descriptor_init(&mdesc
->desc
, chan
);
527 mdesc
->desc
.flags
= DMA_CTRL_ACK
;
528 mdesc
->desc
.tx_submit
= mpc_dma_tx_submit
;
530 mdesc
->tcd
= &tcd
[i
];
531 mdesc
->tcd_paddr
= tcd_paddr
+ (i
* sizeof(struct mpc_dma_tcd
));
533 list_add_tail(&mdesc
->node
, &descs
);
536 /* Return error only if no descriptors were allocated */
538 dma_free_coherent(mdma
->dma
.dev
,
539 MPC_DMA_DESCRIPTORS
* sizeof(struct mpc_dma_tcd
),
544 spin_lock_irqsave(&mchan
->lock
, flags
);
546 mchan
->tcd_paddr
= tcd_paddr
;
547 list_splice_tail_init(&descs
, &mchan
->free
);
548 spin_unlock_irqrestore(&mchan
->lock
, flags
);
550 /* Enable Error Interrupt */
551 out_8(&mdma
->regs
->dmaseei
, chan
->chan_id
);
556 /* Free channel resources */
557 static void mpc_dma_free_chan_resources(struct dma_chan
*chan
)
559 struct mpc_dma
*mdma
= dma_chan_to_mpc_dma(chan
);
560 struct mpc_dma_chan
*mchan
= dma_chan_to_mpc_dma_chan(chan
);
561 struct mpc_dma_desc
*mdesc
, *tmp
;
562 struct mpc_dma_tcd
*tcd
;
563 dma_addr_t tcd_paddr
;
567 spin_lock_irqsave(&mchan
->lock
, flags
);
569 /* Channel must be idle */
570 BUG_ON(!list_empty(&mchan
->prepared
));
571 BUG_ON(!list_empty(&mchan
->queued
));
572 BUG_ON(!list_empty(&mchan
->active
));
573 BUG_ON(!list_empty(&mchan
->completed
));
576 list_splice_tail_init(&mchan
->free
, &descs
);
578 tcd_paddr
= mchan
->tcd_paddr
;
580 spin_unlock_irqrestore(&mchan
->lock
, flags
);
582 /* Free DMA memory used by descriptors */
583 dma_free_coherent(mdma
->dma
.dev
,
584 MPC_DMA_DESCRIPTORS
* sizeof(struct mpc_dma_tcd
),
587 /* Free descriptors */
588 list_for_each_entry_safe(mdesc
, tmp
, &descs
, node
)
591 /* Disable Error Interrupt */
592 out_8(&mdma
->regs
->dmaceei
, chan
->chan_id
);
595 /* Send all pending descriptor to hardware */
596 static void mpc_dma_issue_pending(struct dma_chan
*chan
)
599 * We are posting descriptors to the hardware as soon as
600 * they are ready, so this function does nothing.
604 /* Check request completion status */
605 static enum dma_status
606 mpc_dma_tx_status(struct dma_chan
*chan
, dma_cookie_t cookie
,
607 struct dma_tx_state
*txstate
)
609 return dma_cookie_status(chan
, cookie
, txstate
);
612 /* Prepare descriptor for memory to memory copy */
613 static struct dma_async_tx_descriptor
*
614 mpc_dma_prep_memcpy(struct dma_chan
*chan
, dma_addr_t dst
, dma_addr_t src
,
615 size_t len
, unsigned long flags
)
617 struct mpc_dma
*mdma
= dma_chan_to_mpc_dma(chan
);
618 struct mpc_dma_chan
*mchan
= dma_chan_to_mpc_dma_chan(chan
);
619 struct mpc_dma_desc
*mdesc
= NULL
;
620 struct mpc_dma_tcd
*tcd
;
621 unsigned long iflags
;
623 /* Get free descriptor */
624 spin_lock_irqsave(&mchan
->lock
, iflags
);
625 if (!list_empty(&mchan
->free
)) {
626 mdesc
= list_first_entry(&mchan
->free
, struct mpc_dma_desc
,
628 list_del(&mdesc
->node
);
630 spin_unlock_irqrestore(&mchan
->lock
, iflags
);
633 /* try to free completed descriptors */
634 mpc_dma_process_completed(mdma
);
639 mdesc
->will_access_peripheral
= 0;
642 /* Prepare Transfer Control Descriptor for this transaction */
643 memset(tcd
, 0, sizeof(struct mpc_dma_tcd
));
645 if (IS_ALIGNED(src
| dst
| len
, 32)) {
646 tcd
->ssize
= MPC_DMA_TSIZE_32
;
647 tcd
->dsize
= MPC_DMA_TSIZE_32
;
650 } else if (!mdma
->is_mpc8308
&& IS_ALIGNED(src
| dst
| len
, 16)) {
651 /* MPC8308 doesn't support 16 byte transfers */
652 tcd
->ssize
= MPC_DMA_TSIZE_16
;
653 tcd
->dsize
= MPC_DMA_TSIZE_16
;
656 } else if (IS_ALIGNED(src
| dst
| len
, 4)) {
657 tcd
->ssize
= MPC_DMA_TSIZE_4
;
658 tcd
->dsize
= MPC_DMA_TSIZE_4
;
661 } else if (IS_ALIGNED(src
| dst
| len
, 2)) {
662 tcd
->ssize
= MPC_DMA_TSIZE_2
;
663 tcd
->dsize
= MPC_DMA_TSIZE_2
;
667 tcd
->ssize
= MPC_DMA_TSIZE_1
;
668 tcd
->dsize
= MPC_DMA_TSIZE_1
;
679 /* Place descriptor in prepared list */
680 spin_lock_irqsave(&mchan
->lock
, iflags
);
681 list_add_tail(&mdesc
->node
, &mchan
->prepared
);
682 spin_unlock_irqrestore(&mchan
->lock
, iflags
);
687 inline u8
buswidth_to_dmatsize(u8 buswidth
)
691 for (res
= 0; buswidth
> 1; buswidth
/= 2)
696 static struct dma_async_tx_descriptor
*
697 mpc_dma_prep_slave_sg(struct dma_chan
*chan
, struct scatterlist
*sgl
,
698 unsigned int sg_len
, enum dma_transfer_direction direction
,
699 unsigned long flags
, void *context
)
701 struct mpc_dma
*mdma
= dma_chan_to_mpc_dma(chan
);
702 struct mpc_dma_chan
*mchan
= dma_chan_to_mpc_dma_chan(chan
);
703 struct mpc_dma_desc
*mdesc
= NULL
;
704 dma_addr_t per_paddr
;
706 struct mpc_dma_tcd
*tcd
;
707 unsigned long iflags
;
708 struct scatterlist
*sg
;
712 /* Currently there is no proper support for scatter/gather */
716 if (!is_slave_direction(direction
))
719 for_each_sg(sgl
, sg
, sg_len
, i
) {
720 spin_lock_irqsave(&mchan
->lock
, iflags
);
722 mdesc
= list_first_entry(&mchan
->free
,
723 struct mpc_dma_desc
, node
);
725 spin_unlock_irqrestore(&mchan
->lock
, iflags
);
726 /* Try to free completed descriptors */
727 mpc_dma_process_completed(mdma
);
731 list_del(&mdesc
->node
);
733 if (direction
== DMA_DEV_TO_MEM
) {
734 per_paddr
= mchan
->src_per_paddr
;
735 tcd_nunits
= mchan
->src_tcd_nunits
;
737 per_paddr
= mchan
->dst_per_paddr
;
738 tcd_nunits
= mchan
->dst_tcd_nunits
;
741 spin_unlock_irqrestore(&mchan
->lock
, iflags
);
743 if (per_paddr
== 0 || tcd_nunits
== 0)
747 mdesc
->will_access_peripheral
= 1;
749 /* Prepare Transfer Control Descriptor for this transaction */
752 memset(tcd
, 0, sizeof(struct mpc_dma_tcd
));
754 if (direction
== DMA_DEV_TO_MEM
) {
755 tcd
->saddr
= per_paddr
;
756 tcd
->daddr
= sg_dma_address(sg
);
758 if (!IS_ALIGNED(sg_dma_address(sg
), mchan
->dwidth
))
762 tcd
->doff
= mchan
->dwidth
;
764 tcd
->saddr
= sg_dma_address(sg
);
765 tcd
->daddr
= per_paddr
;
767 if (!IS_ALIGNED(sg_dma_address(sg
), mchan
->swidth
))
770 tcd
->soff
= mchan
->swidth
;
774 tcd
->ssize
= buswidth_to_dmatsize(mchan
->swidth
);
775 tcd
->dsize
= buswidth_to_dmatsize(mchan
->dwidth
);
777 if (mdma
->is_mpc8308
) {
778 tcd
->nbytes
= sg_dma_len(sg
);
779 if (!IS_ALIGNED(tcd
->nbytes
, mchan
->swidth
))
782 /* No major loops for MPC8303 */
786 len
= sg_dma_len(sg
);
787 tcd
->nbytes
= tcd_nunits
* tcd
->ssize
;
788 if (!IS_ALIGNED(len
, tcd
->nbytes
))
791 iter
= len
/ tcd
->nbytes
;
792 if (iter
>= 1 << 15) {
796 /* citer_linkch contains the high bits of iter */
797 tcd
->biter
= iter
& 0x1ff;
798 tcd
->biter_linkch
= iter
>> 9;
799 tcd
->citer
= tcd
->biter
;
800 tcd
->citer_linkch
= tcd
->biter_linkch
;
806 /* Place descriptor in prepared list */
807 spin_lock_irqsave(&mchan
->lock
, iflags
);
808 list_add_tail(&mdesc
->node
, &mchan
->prepared
);
809 spin_unlock_irqrestore(&mchan
->lock
, iflags
);
815 /* Put the descriptor back */
816 spin_lock_irqsave(&mchan
->lock
, iflags
);
817 list_add_tail(&mdesc
->node
, &mchan
->free
);
818 spin_unlock_irqrestore(&mchan
->lock
, iflags
);
823 inline bool is_buswidth_valid(u8 buswidth
, bool is_mpc8308
)
841 static int mpc_dma_device_config(struct dma_chan
*chan
,
842 struct dma_slave_config
*cfg
)
844 struct mpc_dma_chan
*mchan
= dma_chan_to_mpc_dma_chan(chan
);
845 struct mpc_dma
*mdma
= dma_chan_to_mpc_dma(&mchan
->chan
);
849 * Software constraints:
850 * - only transfers between a peripheral device and memory are
852 * - transfer chunk sizes of 1, 2, 4, 16 (for MPC512x), and 32 bytes
853 * are supported, and, consequently, source addresses and
854 * destination addresses; must be aligned accordingly; furthermore,
855 * for MPC512x SoCs, the transfer size must be aligned on (chunk
857 * - during the transfer, the RAM address is incremented by the size
859 * - the peripheral port's address is constant during the transfer.
862 if (!IS_ALIGNED(cfg
->src_addr
, cfg
->src_addr_width
) ||
863 !IS_ALIGNED(cfg
->dst_addr
, cfg
->dst_addr_width
)) {
867 if (!is_buswidth_valid(cfg
->src_addr_width
, mdma
->is_mpc8308
) ||
868 !is_buswidth_valid(cfg
->dst_addr_width
, mdma
->is_mpc8308
))
871 spin_lock_irqsave(&mchan
->lock
, flags
);
873 mchan
->src_per_paddr
= cfg
->src_addr
;
874 mchan
->src_tcd_nunits
= cfg
->src_maxburst
;
875 mchan
->swidth
= cfg
->src_addr_width
;
876 mchan
->dst_per_paddr
= cfg
->dst_addr
;
877 mchan
->dst_tcd_nunits
= cfg
->dst_maxburst
;
878 mchan
->dwidth
= cfg
->dst_addr_width
;
881 if (mchan
->src_tcd_nunits
== 0)
882 mchan
->src_tcd_nunits
= 1;
883 if (mchan
->dst_tcd_nunits
== 0)
884 mchan
->dst_tcd_nunits
= 1;
886 spin_unlock_irqrestore(&mchan
->lock
, flags
);
891 static int mpc_dma_device_terminate_all(struct dma_chan
*chan
)
893 struct mpc_dma_chan
*mchan
= dma_chan_to_mpc_dma_chan(chan
);
894 struct mpc_dma
*mdma
= dma_chan_to_mpc_dma(chan
);
897 /* Disable channel requests */
898 spin_lock_irqsave(&mchan
->lock
, flags
);
900 out_8(&mdma
->regs
->dmacerq
, chan
->chan_id
);
901 list_splice_tail_init(&mchan
->prepared
, &mchan
->free
);
902 list_splice_tail_init(&mchan
->queued
, &mchan
->free
);
903 list_splice_tail_init(&mchan
->active
, &mchan
->free
);
905 spin_unlock_irqrestore(&mchan
->lock
, flags
);
910 static int mpc_dma_probe(struct platform_device
*op
)
912 struct device_node
*dn
= op
->dev
.of_node
;
913 struct device
*dev
= &op
->dev
;
914 struct dma_device
*dma
;
915 struct mpc_dma
*mdma
;
916 struct mpc_dma_chan
*mchan
;
918 ulong regs_start
, regs_size
;
922 mdma
= devm_kzalloc(dev
, sizeof(struct mpc_dma
), GFP_KERNEL
);
928 mdma
->irq
= irq_of_parse_and_map(dn
, 0);
929 if (mdma
->irq
== NO_IRQ
) {
930 dev_err(dev
, "Error mapping IRQ!\n");
935 if (of_device_is_compatible(dn
, "fsl,mpc8308-dma")) {
936 mdma
->is_mpc8308
= 1;
937 mdma
->irq2
= irq_of_parse_and_map(dn
, 1);
938 if (mdma
->irq2
== NO_IRQ
) {
939 dev_err(dev
, "Error mapping IRQ!\n");
945 retval
= of_address_to_resource(dn
, 0, &res
);
947 dev_err(dev
, "Error parsing memory region!\n");
951 regs_start
= res
.start
;
952 regs_size
= resource_size(&res
);
954 if (!devm_request_mem_region(dev
, regs_start
, regs_size
, DRV_NAME
)) {
955 dev_err(dev
, "Error requesting memory region!\n");
960 mdma
->regs
= devm_ioremap(dev
, regs_start
, regs_size
);
962 dev_err(dev
, "Error mapping memory region!\n");
967 mdma
->tcd
= (struct mpc_dma_tcd
*)((u8
*)(mdma
->regs
)
968 + MPC_DMA_TCD_OFFSET
);
970 retval
= request_irq(mdma
->irq
, &mpc_dma_irq
, 0, DRV_NAME
, mdma
);
972 dev_err(dev
, "Error requesting IRQ!\n");
977 if (mdma
->is_mpc8308
) {
978 retval
= request_irq(mdma
->irq2
, &mpc_dma_irq
, 0,
981 dev_err(dev
, "Error requesting IRQ2!\n");
987 spin_lock_init(&mdma
->error_status_lock
);
991 dma
->device_alloc_chan_resources
= mpc_dma_alloc_chan_resources
;
992 dma
->device_free_chan_resources
= mpc_dma_free_chan_resources
;
993 dma
->device_issue_pending
= mpc_dma_issue_pending
;
994 dma
->device_tx_status
= mpc_dma_tx_status
;
995 dma
->device_prep_dma_memcpy
= mpc_dma_prep_memcpy
;
996 dma
->device_prep_slave_sg
= mpc_dma_prep_slave_sg
;
997 dma
->device_config
= mpc_dma_device_config
;
998 dma
->device_terminate_all
= mpc_dma_device_terminate_all
;
1000 INIT_LIST_HEAD(&dma
->channels
);
1001 dma_cap_set(DMA_MEMCPY
, dma
->cap_mask
);
1002 dma_cap_set(DMA_SLAVE
, dma
->cap_mask
);
1004 if (mdma
->is_mpc8308
)
1005 chancnt
= MPC8308_DMACHAN_MAX
;
1007 chancnt
= MPC512x_DMACHAN_MAX
;
1009 for (i
= 0; i
< chancnt
; i
++) {
1010 mchan
= &mdma
->channels
[i
];
1012 mchan
->chan
.device
= dma
;
1013 dma_cookie_init(&mchan
->chan
);
1015 INIT_LIST_HEAD(&mchan
->free
);
1016 INIT_LIST_HEAD(&mchan
->prepared
);
1017 INIT_LIST_HEAD(&mchan
->queued
);
1018 INIT_LIST_HEAD(&mchan
->active
);
1019 INIT_LIST_HEAD(&mchan
->completed
);
1021 spin_lock_init(&mchan
->lock
);
1022 list_add_tail(&mchan
->chan
.device_node
, &dma
->channels
);
1025 tasklet_init(&mdma
->tasklet
, mpc_dma_tasklet
, (unsigned long)mdma
);
1028 * Configure DMA Engine:
1030 * - Round-robin group arbitration,
1031 * - Round-robin channel arbitration.
1033 if (mdma
->is_mpc8308
) {
1034 /* MPC8308 has 16 channels and lacks some registers */
1035 out_be32(&mdma
->regs
->dmacr
, MPC_DMA_DMACR_ERCA
);
1037 /* enable snooping */
1038 out_be32(&mdma
->regs
->dmagpor
, MPC_DMA_DMAGPOR_SNOOP_ENABLE
);
1039 /* Disable error interrupts */
1040 out_be32(&mdma
->regs
->dmaeeil
, 0);
1042 /* Clear interrupts status */
1043 out_be32(&mdma
->regs
->dmaintl
, 0xFFFF);
1044 out_be32(&mdma
->regs
->dmaerrl
, 0xFFFF);
1046 out_be32(&mdma
->regs
->dmacr
, MPC_DMA_DMACR_EDCG
|
1047 MPC_DMA_DMACR_ERGA
|
1048 MPC_DMA_DMACR_ERCA
);
1050 /* Disable hardware DMA requests */
1051 out_be32(&mdma
->regs
->dmaerqh
, 0);
1052 out_be32(&mdma
->regs
->dmaerql
, 0);
1054 /* Disable error interrupts */
1055 out_be32(&mdma
->regs
->dmaeeih
, 0);
1056 out_be32(&mdma
->regs
->dmaeeil
, 0);
1058 /* Clear interrupts status */
1059 out_be32(&mdma
->regs
->dmainth
, 0xFFFFFFFF);
1060 out_be32(&mdma
->regs
->dmaintl
, 0xFFFFFFFF);
1061 out_be32(&mdma
->regs
->dmaerrh
, 0xFFFFFFFF);
1062 out_be32(&mdma
->regs
->dmaerrl
, 0xFFFFFFFF);
1064 /* Route interrupts to IPIC */
1065 out_be32(&mdma
->regs
->dmaihsa
, 0);
1066 out_be32(&mdma
->regs
->dmailsa
, 0);
1069 /* Register DMA engine */
1070 dev_set_drvdata(dev
, mdma
);
1071 retval
= dma_async_device_register(dma
);
1075 /* Register with OF helpers for DMA lookups (nonfatal) */
1077 retval
= of_dma_controller_register(dev
->of_node
,
1078 of_dma_xlate_by_chan_id
, mdma
);
1080 dev_warn(dev
, "Could not register for OF lookup\n");
1086 if (mdma
->is_mpc8308
)
1087 free_irq(mdma
->irq2
, mdma
);
1089 free_irq(mdma
->irq
, mdma
);
1091 if (mdma
->is_mpc8308
)
1092 irq_dispose_mapping(mdma
->irq2
);
1094 irq_dispose_mapping(mdma
->irq
);
1099 static int mpc_dma_remove(struct platform_device
*op
)
1101 struct device
*dev
= &op
->dev
;
1102 struct mpc_dma
*mdma
= dev_get_drvdata(dev
);
1105 of_dma_controller_free(dev
->of_node
);
1106 dma_async_device_unregister(&mdma
->dma
);
1107 if (mdma
->is_mpc8308
) {
1108 free_irq(mdma
->irq2
, mdma
);
1109 irq_dispose_mapping(mdma
->irq2
);
1111 free_irq(mdma
->irq
, mdma
);
1112 irq_dispose_mapping(mdma
->irq
);
1117 static const struct of_device_id mpc_dma_match
[] = {
1118 { .compatible
= "fsl,mpc5121-dma", },
1119 { .compatible
= "fsl,mpc8308-dma", },
1122 MODULE_DEVICE_TABLE(of
, mpc_dma_match
);
1124 static struct platform_driver mpc_dma_driver
= {
1125 .probe
= mpc_dma_probe
,
1126 .remove
= mpc_dma_remove
,
1129 .of_match_table
= mpc_dma_match
,
1133 module_platform_driver(mpc_dma_driver
);
1135 MODULE_LICENSE("GPL");
1136 MODULE_AUTHOR("Piotr Ziecik <kosmo@semihalf.com>");