2 * Freescale MPC85xx, MPC83xx DMA Engine support
4 * Copyright (C) 2007-2010 Freescale Semiconductor, Inc. All rights reserved.
7 * Zhang Wei <wei.zhang@freescale.com>, Jul 2007
8 * Ebony Zhu <ebony.zhu@freescale.com>, May 2007
11 * DMA engine driver for Freescale MPC8540 DMA controller, which is
12 * also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc.
13 * The support for MPC8349 DMA controller is also added.
15 * This driver instructs the DMA controller to issue the PCI Read Multiple
16 * command for PCI read operations, instead of using the default PCI Read Line
17 * command. Please be aware that this setting may result in read pre-fetching
20 * This is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2 of the License, or
23 * (at your option) any later version.
27 #include <linux/init.h>
28 #include <linux/module.h>
29 #include <linux/pci.h>
30 #include <linux/slab.h>
31 #include <linux/interrupt.h>
32 #include <linux/dmaengine.h>
33 #include <linux/delay.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/dmapool.h>
36 #include <linux/of_platform.h>
40 #define chan_dbg(chan, fmt, arg...) \
41 dev_dbg(chan->dev, "%s: " fmt, chan->name, ##arg)
42 #define chan_err(chan, fmt, arg...) \
43 dev_err(chan->dev, "%s: " fmt, chan->name, ##arg)
45 static const char msg_ld_oom
[] = "No free memory for link descriptor";
51 static void set_sr(struct fsldma_chan
*chan
, u32 val
)
53 DMA_OUT(chan
, &chan
->regs
->sr
, val
, 32);
56 static u32
get_sr(struct fsldma_chan
*chan
)
58 return DMA_IN(chan
, &chan
->regs
->sr
, 32);
61 static void set_cdar(struct fsldma_chan
*chan
, dma_addr_t addr
)
63 DMA_OUT(chan
, &chan
->regs
->cdar
, addr
| FSL_DMA_SNEN
, 64);
66 static dma_addr_t
get_cdar(struct fsldma_chan
*chan
)
68 return DMA_IN(chan
, &chan
->regs
->cdar
, 64) & ~FSL_DMA_SNEN
;
71 static u32
get_bcr(struct fsldma_chan
*chan
)
73 return DMA_IN(chan
, &chan
->regs
->bcr
, 32);
80 static void set_desc_cnt(struct fsldma_chan
*chan
,
81 struct fsl_dma_ld_hw
*hw
, u32 count
)
83 hw
->count
= CPU_TO_DMA(chan
, count
, 32);
86 static u32
get_desc_cnt(struct fsldma_chan
*chan
, struct fsl_desc_sw
*desc
)
88 return DMA_TO_CPU(chan
, desc
->hw
.count
, 32);
91 static void set_desc_src(struct fsldma_chan
*chan
,
92 struct fsl_dma_ld_hw
*hw
, dma_addr_t src
)
96 snoop_bits
= ((chan
->feature
& FSL_DMA_IP_MASK
) == FSL_DMA_IP_85XX
)
97 ? ((u64
)FSL_DMA_SATR_SREADTYPE_SNOOP_READ
<< 32) : 0;
98 hw
->src_addr
= CPU_TO_DMA(chan
, snoop_bits
| src
, 64);
101 static dma_addr_t
get_desc_src(struct fsldma_chan
*chan
,
102 struct fsl_desc_sw
*desc
)
106 snoop_bits
= ((chan
->feature
& FSL_DMA_IP_MASK
) == FSL_DMA_IP_85XX
)
107 ? ((u64
)FSL_DMA_SATR_SREADTYPE_SNOOP_READ
<< 32) : 0;
108 return DMA_TO_CPU(chan
, desc
->hw
.src_addr
, 64) & ~snoop_bits
;
111 static void set_desc_dst(struct fsldma_chan
*chan
,
112 struct fsl_dma_ld_hw
*hw
, dma_addr_t dst
)
116 snoop_bits
= ((chan
->feature
& FSL_DMA_IP_MASK
) == FSL_DMA_IP_85XX
)
117 ? ((u64
)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE
<< 32) : 0;
118 hw
->dst_addr
= CPU_TO_DMA(chan
, snoop_bits
| dst
, 64);
121 static dma_addr_t
get_desc_dst(struct fsldma_chan
*chan
,
122 struct fsl_desc_sw
*desc
)
126 snoop_bits
= ((chan
->feature
& FSL_DMA_IP_MASK
) == FSL_DMA_IP_85XX
)
127 ? ((u64
)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE
<< 32) : 0;
128 return DMA_TO_CPU(chan
, desc
->hw
.dst_addr
, 64) & ~snoop_bits
;
131 static void set_desc_next(struct fsldma_chan
*chan
,
132 struct fsl_dma_ld_hw
*hw
, dma_addr_t next
)
136 snoop_bits
= ((chan
->feature
& FSL_DMA_IP_MASK
) == FSL_DMA_IP_83XX
)
138 hw
->next_ln_addr
= CPU_TO_DMA(chan
, snoop_bits
| next
, 64);
141 static void set_ld_eol(struct fsldma_chan
*chan
, struct fsl_desc_sw
*desc
)
145 snoop_bits
= ((chan
->feature
& FSL_DMA_IP_MASK
) == FSL_DMA_IP_83XX
)
148 desc
->hw
.next_ln_addr
= CPU_TO_DMA(chan
,
149 DMA_TO_CPU(chan
, desc
->hw
.next_ln_addr
, 64) | FSL_DMA_EOL
154 * DMA Engine Hardware Control Helpers
157 static void dma_init(struct fsldma_chan
*chan
)
159 /* Reset the channel */
160 DMA_OUT(chan
, &chan
->regs
->mr
, 0, 32);
162 switch (chan
->feature
& FSL_DMA_IP_MASK
) {
163 case FSL_DMA_IP_85XX
:
164 /* Set the channel to below modes:
165 * EIE - Error interrupt enable
166 * EOLNIE - End of links interrupt enable
167 * BWC - Bandwidth sharing among channels
169 DMA_OUT(chan
, &chan
->regs
->mr
, FSL_DMA_MR_BWC
170 | FSL_DMA_MR_EIE
| FSL_DMA_MR_EOLNIE
, 32);
172 case FSL_DMA_IP_83XX
:
173 /* Set the channel to below modes:
174 * EOTIE - End-of-transfer interrupt enable
175 * PRC_RM - PCI read multiple
177 DMA_OUT(chan
, &chan
->regs
->mr
, FSL_DMA_MR_EOTIE
178 | FSL_DMA_MR_PRC_RM
, 32);
183 static int dma_is_idle(struct fsldma_chan
*chan
)
185 u32 sr
= get_sr(chan
);
186 return (!(sr
& FSL_DMA_SR_CB
)) || (sr
& FSL_DMA_SR_CH
);
190 * Start the DMA controller
193 * - the CDAR register must point to the start descriptor
194 * - the MRn[CS] bit must be cleared
196 static void dma_start(struct fsldma_chan
*chan
)
200 mode
= DMA_IN(chan
, &chan
->regs
->mr
, 32);
202 if (chan
->feature
& FSL_DMA_CHAN_PAUSE_EXT
) {
203 DMA_OUT(chan
, &chan
->regs
->bcr
, 0, 32);
204 mode
|= FSL_DMA_MR_EMP_EN
;
206 mode
&= ~FSL_DMA_MR_EMP_EN
;
209 if (chan
->feature
& FSL_DMA_CHAN_START_EXT
) {
210 mode
|= FSL_DMA_MR_EMS_EN
;
212 mode
&= ~FSL_DMA_MR_EMS_EN
;
213 mode
|= FSL_DMA_MR_CS
;
216 DMA_OUT(chan
, &chan
->regs
->mr
, mode
, 32);
219 static void dma_halt(struct fsldma_chan
*chan
)
224 /* read the mode register */
225 mode
= DMA_IN(chan
, &chan
->regs
->mr
, 32);
228 * The 85xx controller supports channel abort, which will stop
229 * the current transfer. On 83xx, this bit is the transfer error
230 * mask bit, which should not be changed.
232 if ((chan
->feature
& FSL_DMA_IP_MASK
) == FSL_DMA_IP_85XX
) {
233 mode
|= FSL_DMA_MR_CA
;
234 DMA_OUT(chan
, &chan
->regs
->mr
, mode
, 32);
236 mode
&= ~FSL_DMA_MR_CA
;
239 /* stop the DMA controller */
240 mode
&= ~(FSL_DMA_MR_CS
| FSL_DMA_MR_EMS_EN
);
241 DMA_OUT(chan
, &chan
->regs
->mr
, mode
, 32);
243 /* wait for the DMA controller to become idle */
244 for (i
= 0; i
< 100; i
++) {
245 if (dma_is_idle(chan
))
251 if (!dma_is_idle(chan
))
252 chan_err(chan
, "DMA halt timeout!\n");
256 * fsl_chan_set_src_loop_size - Set source address hold transfer size
257 * @chan : Freescale DMA channel
258 * @size : Address loop size, 0 for disable loop
260 * The set source address hold transfer size. The source
261 * address hold or loop transfer size is when the DMA transfer
262 * data from source address (SA), if the loop size is 4, the DMA will
263 * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA,
264 * SA + 1 ... and so on.
266 static void fsl_chan_set_src_loop_size(struct fsldma_chan
*chan
, int size
)
270 mode
= DMA_IN(chan
, &chan
->regs
->mr
, 32);
274 mode
&= ~FSL_DMA_MR_SAHE
;
280 mode
|= FSL_DMA_MR_SAHE
| (__ilog2(size
) << 14);
284 DMA_OUT(chan
, &chan
->regs
->mr
, mode
, 32);
288 * fsl_chan_set_dst_loop_size - Set destination address hold transfer size
289 * @chan : Freescale DMA channel
290 * @size : Address loop size, 0 for disable loop
292 * The set destination address hold transfer size. The destination
293 * address hold or loop transfer size is when the DMA transfer
294 * data to destination address (TA), if the loop size is 4, the DMA will
295 * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA,
296 * TA + 1 ... and so on.
298 static void fsl_chan_set_dst_loop_size(struct fsldma_chan
*chan
, int size
)
302 mode
= DMA_IN(chan
, &chan
->regs
->mr
, 32);
306 mode
&= ~FSL_DMA_MR_DAHE
;
312 mode
|= FSL_DMA_MR_DAHE
| (__ilog2(size
) << 16);
316 DMA_OUT(chan
, &chan
->regs
->mr
, mode
, 32);
320 * fsl_chan_set_request_count - Set DMA Request Count for external control
321 * @chan : Freescale DMA channel
322 * @size : Number of bytes to transfer in a single request
324 * The Freescale DMA channel can be controlled by the external signal DREQ#.
325 * The DMA request count is how many bytes are allowed to transfer before
326 * pausing the channel, after which a new assertion of DREQ# resumes channel
329 * A size of 0 disables external pause control. The maximum size is 1024.
331 static void fsl_chan_set_request_count(struct fsldma_chan
*chan
, int size
)
337 mode
= DMA_IN(chan
, &chan
->regs
->mr
, 32);
338 mode
|= (__ilog2(size
) << 24) & 0x0f000000;
340 DMA_OUT(chan
, &chan
->regs
->mr
, mode
, 32);
344 * fsl_chan_toggle_ext_pause - Toggle channel external pause status
345 * @chan : Freescale DMA channel
346 * @enable : 0 is disabled, 1 is enabled.
348 * The Freescale DMA channel can be controlled by the external signal DREQ#.
349 * The DMA Request Count feature should be used in addition to this feature
350 * to set the number of bytes to transfer before pausing the channel.
352 static void fsl_chan_toggle_ext_pause(struct fsldma_chan
*chan
, int enable
)
355 chan
->feature
|= FSL_DMA_CHAN_PAUSE_EXT
;
357 chan
->feature
&= ~FSL_DMA_CHAN_PAUSE_EXT
;
361 * fsl_chan_toggle_ext_start - Toggle channel external start status
362 * @chan : Freescale DMA channel
363 * @enable : 0 is disabled, 1 is enabled.
365 * If enable the external start, the channel can be started by an
366 * external DMA start pin. So the dma_start() does not start the
367 * transfer immediately. The DMA channel will wait for the
368 * control pin asserted.
370 static void fsl_chan_toggle_ext_start(struct fsldma_chan
*chan
, int enable
)
373 chan
->feature
|= FSL_DMA_CHAN_START_EXT
;
375 chan
->feature
&= ~FSL_DMA_CHAN_START_EXT
;
378 static void append_ld_queue(struct fsldma_chan
*chan
, struct fsl_desc_sw
*desc
)
380 struct fsl_desc_sw
*tail
= to_fsl_desc(chan
->ld_pending
.prev
);
382 if (list_empty(&chan
->ld_pending
))
386 * Add the hardware descriptor to the chain of hardware descriptors
387 * that already exists in memory.
389 * This will un-set the EOL bit of the existing transaction, and the
390 * last link in this transaction will become the EOL descriptor.
392 set_desc_next(chan
, &tail
->hw
, desc
->async_tx
.phys
);
395 * Add the software descriptor and all children to the list
396 * of pending transactions
399 list_splice_tail_init(&desc
->tx_list
, &chan
->ld_pending
);
402 static dma_cookie_t
fsl_dma_tx_submit(struct dma_async_tx_descriptor
*tx
)
404 struct fsldma_chan
*chan
= to_fsl_chan(tx
->chan
);
405 struct fsl_desc_sw
*desc
= tx_to_fsl_desc(tx
);
406 struct fsl_desc_sw
*child
;
410 spin_lock_irqsave(&chan
->desc_lock
, flags
);
413 * assign cookies to all of the software descriptors
414 * that make up this transaction
416 cookie
= chan
->common
.cookie
;
417 list_for_each_entry(child
, &desc
->tx_list
, node
) {
419 if (cookie
< DMA_MIN_COOKIE
)
420 cookie
= DMA_MIN_COOKIE
;
422 child
->async_tx
.cookie
= cookie
;
425 chan
->common
.cookie
= cookie
;
427 /* put this transaction onto the tail of the pending queue */
428 append_ld_queue(chan
, desc
);
430 spin_unlock_irqrestore(&chan
->desc_lock
, flags
);
436 * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool.
437 * @chan : Freescale DMA channel
439 * Return - The descriptor allocated. NULL for failed.
441 static struct fsl_desc_sw
*fsl_dma_alloc_descriptor(struct fsldma_chan
*chan
)
443 struct fsl_desc_sw
*desc
;
446 desc
= dma_pool_alloc(chan
->desc_pool
, GFP_ATOMIC
, &pdesc
);
448 chan_dbg(chan
, "out of memory for link descriptor\n");
452 memset(desc
, 0, sizeof(*desc
));
453 INIT_LIST_HEAD(&desc
->tx_list
);
454 dma_async_tx_descriptor_init(&desc
->async_tx
, &chan
->common
);
455 desc
->async_tx
.tx_submit
= fsl_dma_tx_submit
;
456 desc
->async_tx
.phys
= pdesc
;
458 #ifdef FSL_DMA_LD_DEBUG
459 chan_dbg(chan
, "LD %p allocated\n", desc
);
466 * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel.
467 * @chan : Freescale DMA channel
469 * This function will create a dma pool for descriptor allocation.
471 * Return - The number of descriptors allocated.
473 static int fsl_dma_alloc_chan_resources(struct dma_chan
*dchan
)
475 struct fsldma_chan
*chan
= to_fsl_chan(dchan
);
477 /* Has this channel already been allocated? */
482 * We need the descriptor to be aligned to 32bytes
483 * for meeting FSL DMA specification requirement.
485 chan
->desc_pool
= dma_pool_create(chan
->name
, chan
->dev
,
486 sizeof(struct fsl_desc_sw
),
487 __alignof__(struct fsl_desc_sw
), 0);
488 if (!chan
->desc_pool
) {
489 chan_err(chan
, "unable to allocate descriptor pool\n");
493 /* there is at least one descriptor free to be allocated */
498 * fsldma_free_desc_list - Free all descriptors in a queue
499 * @chan: Freescae DMA channel
500 * @list: the list to free
502 * LOCKING: must hold chan->desc_lock
504 static void fsldma_free_desc_list(struct fsldma_chan
*chan
,
505 struct list_head
*list
)
507 struct fsl_desc_sw
*desc
, *_desc
;
509 list_for_each_entry_safe(desc
, _desc
, list
, node
) {
510 list_del(&desc
->node
);
511 #ifdef FSL_DMA_LD_DEBUG
512 chan_dbg(chan
, "LD %p free\n", desc
);
514 dma_pool_free(chan
->desc_pool
, desc
, desc
->async_tx
.phys
);
518 static void fsldma_free_desc_list_reverse(struct fsldma_chan
*chan
,
519 struct list_head
*list
)
521 struct fsl_desc_sw
*desc
, *_desc
;
523 list_for_each_entry_safe_reverse(desc
, _desc
, list
, node
) {
524 list_del(&desc
->node
);
525 #ifdef FSL_DMA_LD_DEBUG
526 chan_dbg(chan
, "LD %p free\n", desc
);
528 dma_pool_free(chan
->desc_pool
, desc
, desc
->async_tx
.phys
);
533 * fsl_dma_free_chan_resources - Free all resources of the channel.
534 * @chan : Freescale DMA channel
536 static void fsl_dma_free_chan_resources(struct dma_chan
*dchan
)
538 struct fsldma_chan
*chan
= to_fsl_chan(dchan
);
541 chan_dbg(chan
, "free all channel resources\n");
542 spin_lock_irqsave(&chan
->desc_lock
, flags
);
543 fsldma_free_desc_list(chan
, &chan
->ld_pending
);
544 fsldma_free_desc_list(chan
, &chan
->ld_running
);
545 spin_unlock_irqrestore(&chan
->desc_lock
, flags
);
547 dma_pool_destroy(chan
->desc_pool
);
548 chan
->desc_pool
= NULL
;
551 static struct dma_async_tx_descriptor
*
552 fsl_dma_prep_interrupt(struct dma_chan
*dchan
, unsigned long flags
)
554 struct fsldma_chan
*chan
;
555 struct fsl_desc_sw
*new;
560 chan
= to_fsl_chan(dchan
);
562 new = fsl_dma_alloc_descriptor(chan
);
564 chan_err(chan
, "%s\n", msg_ld_oom
);
568 new->async_tx
.cookie
= -EBUSY
;
569 new->async_tx
.flags
= flags
;
571 /* Insert the link descriptor to the LD ring */
572 list_add_tail(&new->node
, &new->tx_list
);
574 /* Set End-of-link to the last link descriptor of new list */
575 set_ld_eol(chan
, new);
577 return &new->async_tx
;
580 static struct dma_async_tx_descriptor
*
581 fsl_dma_prep_memcpy(struct dma_chan
*dchan
,
582 dma_addr_t dma_dst
, dma_addr_t dma_src
,
583 size_t len
, unsigned long flags
)
585 struct fsldma_chan
*chan
;
586 struct fsl_desc_sw
*first
= NULL
, *prev
= NULL
, *new;
595 chan
= to_fsl_chan(dchan
);
599 /* Allocate the link descriptor from DMA pool */
600 new = fsl_dma_alloc_descriptor(chan
);
602 chan_err(chan
, "%s\n", msg_ld_oom
);
606 copy
= min(len
, (size_t)FSL_DMA_BCR_MAX_CNT
);
608 set_desc_cnt(chan
, &new->hw
, copy
);
609 set_desc_src(chan
, &new->hw
, dma_src
);
610 set_desc_dst(chan
, &new->hw
, dma_dst
);
615 set_desc_next(chan
, &prev
->hw
, new->async_tx
.phys
);
617 new->async_tx
.cookie
= 0;
618 async_tx_ack(&new->async_tx
);
625 /* Insert the link descriptor to the LD ring */
626 list_add_tail(&new->node
, &first
->tx_list
);
629 new->async_tx
.flags
= flags
; /* client is in control of this ack */
630 new->async_tx
.cookie
= -EBUSY
;
632 /* Set End-of-link to the last link descriptor of new list */
633 set_ld_eol(chan
, new);
635 return &first
->async_tx
;
641 fsldma_free_desc_list_reverse(chan
, &first
->tx_list
);
645 static struct dma_async_tx_descriptor
*fsl_dma_prep_sg(struct dma_chan
*dchan
,
646 struct scatterlist
*dst_sg
, unsigned int dst_nents
,
647 struct scatterlist
*src_sg
, unsigned int src_nents
,
650 struct fsl_desc_sw
*first
= NULL
, *prev
= NULL
, *new = NULL
;
651 struct fsldma_chan
*chan
= to_fsl_chan(dchan
);
652 size_t dst_avail
, src_avail
;
656 /* basic sanity checks */
657 if (dst_nents
== 0 || src_nents
== 0)
660 if (dst_sg
== NULL
|| src_sg
== NULL
)
664 * TODO: should we check that both scatterlists have the same
665 * TODO: number of bytes in total? Is that really an error?
668 /* get prepared for the loop */
669 dst_avail
= sg_dma_len(dst_sg
);
670 src_avail
= sg_dma_len(src_sg
);
672 /* run until we are out of scatterlist entries */
675 /* create the largest transaction possible */
676 len
= min_t(size_t, src_avail
, dst_avail
);
677 len
= min_t(size_t, len
, FSL_DMA_BCR_MAX_CNT
);
681 dst
= sg_dma_address(dst_sg
) + sg_dma_len(dst_sg
) - dst_avail
;
682 src
= sg_dma_address(src_sg
) + sg_dma_len(src_sg
) - src_avail
;
684 /* allocate and populate the descriptor */
685 new = fsl_dma_alloc_descriptor(chan
);
687 chan_err(chan
, "%s\n", msg_ld_oom
);
691 set_desc_cnt(chan
, &new->hw
, len
);
692 set_desc_src(chan
, &new->hw
, src
);
693 set_desc_dst(chan
, &new->hw
, dst
);
698 set_desc_next(chan
, &prev
->hw
, new->async_tx
.phys
);
700 new->async_tx
.cookie
= 0;
701 async_tx_ack(&new->async_tx
);
704 /* Insert the link descriptor to the LD ring */
705 list_add_tail(&new->node
, &first
->tx_list
);
707 /* update metadata */
712 /* fetch the next dst scatterlist entry */
713 if (dst_avail
== 0) {
715 /* no more entries: we're done */
719 /* fetch the next entry: if there are no more: done */
720 dst_sg
= sg_next(dst_sg
);
725 dst_avail
= sg_dma_len(dst_sg
);
728 /* fetch the next src scatterlist entry */
729 if (src_avail
== 0) {
731 /* no more entries: we're done */
735 /* fetch the next entry: if there are no more: done */
736 src_sg
= sg_next(src_sg
);
741 src_avail
= sg_dma_len(src_sg
);
745 new->async_tx
.flags
= flags
; /* client is in control of this ack */
746 new->async_tx
.cookie
= -EBUSY
;
748 /* Set End-of-link to the last link descriptor of new list */
749 set_ld_eol(chan
, new);
751 return &first
->async_tx
;
757 fsldma_free_desc_list_reverse(chan
, &first
->tx_list
);
762 * fsl_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
764 * @sgl: scatterlist to transfer to/from
765 * @sg_len: number of entries in @scatterlist
766 * @direction: DMA direction
767 * @flags: DMAEngine flags
769 * Prepare a set of descriptors for a DMA_SLAVE transaction. Following the
770 * DMA_SLAVE API, this gets the device-specific information from the
771 * chan->private variable.
773 static struct dma_async_tx_descriptor
*fsl_dma_prep_slave_sg(
774 struct dma_chan
*dchan
, struct scatterlist
*sgl
, unsigned int sg_len
,
775 enum dma_transfer_direction direction
, unsigned long flags
)
778 * This operation is not supported on the Freescale DMA controller
780 * However, we need to provide the function pointer to allow the
781 * device_control() method to work.
786 static int fsl_dma_device_control(struct dma_chan
*dchan
,
787 enum dma_ctrl_cmd cmd
, unsigned long arg
)
789 struct dma_slave_config
*config
;
790 struct fsldma_chan
*chan
;
797 chan
= to_fsl_chan(dchan
);
800 case DMA_TERMINATE_ALL
:
801 spin_lock_irqsave(&chan
->desc_lock
, flags
);
803 /* Halt the DMA engine */
806 /* Remove and free all of the descriptors in the LD queue */
807 fsldma_free_desc_list(chan
, &chan
->ld_pending
);
808 fsldma_free_desc_list(chan
, &chan
->ld_running
);
811 spin_unlock_irqrestore(&chan
->desc_lock
, flags
);
814 case DMA_SLAVE_CONFIG
:
815 config
= (struct dma_slave_config
*)arg
;
817 /* make sure the channel supports setting burst size */
818 if (!chan
->set_request_count
)
821 /* we set the controller burst size depending on direction */
822 if (config
->direction
== DMA_MEM_TO_DEV
)
823 size
= config
->dst_addr_width
* config
->dst_maxburst
;
825 size
= config
->src_addr_width
* config
->src_maxburst
;
827 chan
->set_request_count(chan
, size
);
830 case FSLDMA_EXTERNAL_START
:
832 /* make sure the channel supports external start */
833 if (!chan
->toggle_ext_start
)
836 chan
->toggle_ext_start(chan
, arg
);
847 * fsldma_cleanup_descriptor - cleanup and free a single link descriptor
848 * @chan: Freescale DMA channel
849 * @desc: descriptor to cleanup and free
851 * This function is used on a descriptor which has been executed by the DMA
852 * controller. It will run any callbacks, submit any dependencies, and then
853 * free the descriptor.
855 static void fsldma_cleanup_descriptor(struct fsldma_chan
*chan
,
856 struct fsl_desc_sw
*desc
)
858 struct dma_async_tx_descriptor
*txd
= &desc
->async_tx
;
859 struct device
*dev
= chan
->common
.device
->dev
;
860 dma_addr_t src
= get_desc_src(chan
, desc
);
861 dma_addr_t dst
= get_desc_dst(chan
, desc
);
862 u32 len
= get_desc_cnt(chan
, desc
);
864 /* Run the link descriptor callback function */
866 #ifdef FSL_DMA_LD_DEBUG
867 chan_dbg(chan
, "LD %p callback\n", desc
);
869 txd
->callback(txd
->callback_param
);
872 /* Run any dependencies */
873 dma_run_dependencies(txd
);
875 /* Unmap the dst buffer, if requested */
876 if (!(txd
->flags
& DMA_COMPL_SKIP_DEST_UNMAP
)) {
877 if (txd
->flags
& DMA_COMPL_DEST_UNMAP_SINGLE
)
878 dma_unmap_single(dev
, dst
, len
, DMA_FROM_DEVICE
);
880 dma_unmap_page(dev
, dst
, len
, DMA_FROM_DEVICE
);
883 /* Unmap the src buffer, if requested */
884 if (!(txd
->flags
& DMA_COMPL_SKIP_SRC_UNMAP
)) {
885 if (txd
->flags
& DMA_COMPL_SRC_UNMAP_SINGLE
)
886 dma_unmap_single(dev
, src
, len
, DMA_TO_DEVICE
);
888 dma_unmap_page(dev
, src
, len
, DMA_TO_DEVICE
);
891 #ifdef FSL_DMA_LD_DEBUG
892 chan_dbg(chan
, "LD %p free\n", desc
);
894 dma_pool_free(chan
->desc_pool
, desc
, txd
->phys
);
898 * fsl_chan_xfer_ld_queue - transfer any pending transactions
899 * @chan : Freescale DMA channel
901 * HARDWARE STATE: idle
902 * LOCKING: must hold chan->desc_lock
904 static void fsl_chan_xfer_ld_queue(struct fsldma_chan
*chan
)
906 struct fsl_desc_sw
*desc
;
909 * If the list of pending descriptors is empty, then we
910 * don't need to do any work at all
912 if (list_empty(&chan
->ld_pending
)) {
913 chan_dbg(chan
, "no pending LDs\n");
918 * The DMA controller is not idle, which means that the interrupt
919 * handler will start any queued transactions when it runs after
920 * this transaction finishes
923 chan_dbg(chan
, "DMA controller still busy\n");
928 * If there are some link descriptors which have not been
929 * transferred, we need to start the controller
933 * Move all elements from the queue of pending transactions
934 * onto the list of running transactions
936 chan_dbg(chan
, "idle, starting controller\n");
937 desc
= list_first_entry(&chan
->ld_pending
, struct fsl_desc_sw
, node
);
938 list_splice_tail_init(&chan
->ld_pending
, &chan
->ld_running
);
941 * The 85xx DMA controller doesn't clear the channel start bit
942 * automatically at the end of a transfer. Therefore we must clear
943 * it in software before starting the transfer.
945 if ((chan
->feature
& FSL_DMA_IP_MASK
) == FSL_DMA_IP_85XX
) {
948 mode
= DMA_IN(chan
, &chan
->regs
->mr
, 32);
949 mode
&= ~FSL_DMA_MR_CS
;
950 DMA_OUT(chan
, &chan
->regs
->mr
, mode
, 32);
954 * Program the descriptor's address into the DMA controller,
955 * then start the DMA transaction
957 set_cdar(chan
, desc
->async_tx
.phys
);
965 * fsl_dma_memcpy_issue_pending - Issue the DMA start command
966 * @chan : Freescale DMA channel
968 static void fsl_dma_memcpy_issue_pending(struct dma_chan
*dchan
)
970 struct fsldma_chan
*chan
= to_fsl_chan(dchan
);
973 spin_lock_irqsave(&chan
->desc_lock
, flags
);
974 fsl_chan_xfer_ld_queue(chan
);
975 spin_unlock_irqrestore(&chan
->desc_lock
, flags
);
979 * fsl_tx_status - Determine the DMA status
980 * @chan : Freescale DMA channel
982 static enum dma_status
fsl_tx_status(struct dma_chan
*dchan
,
984 struct dma_tx_state
*txstate
)
986 struct fsldma_chan
*chan
= to_fsl_chan(dchan
);
987 dma_cookie_t last_complete
;
988 dma_cookie_t last_used
;
991 spin_lock_irqsave(&chan
->desc_lock
, flags
);
993 last_complete
= chan
->completed_cookie
;
994 last_used
= dchan
->cookie
;
996 spin_unlock_irqrestore(&chan
->desc_lock
, flags
);
998 dma_set_tx_state(txstate
, last_complete
, last_used
, 0);
999 return dma_async_is_complete(cookie
, last_complete
, last_used
);
1002 /*----------------------------------------------------------------------------*/
1003 /* Interrupt Handling */
1004 /*----------------------------------------------------------------------------*/
1006 static irqreturn_t
fsldma_chan_irq(int irq
, void *data
)
1008 struct fsldma_chan
*chan
= data
;
1011 /* save and clear the status register */
1012 stat
= get_sr(chan
);
1014 chan_dbg(chan
, "irq: stat = 0x%x\n", stat
);
1016 /* check that this was really our device */
1017 stat
&= ~(FSL_DMA_SR_CB
| FSL_DMA_SR_CH
);
1021 if (stat
& FSL_DMA_SR_TE
)
1022 chan_err(chan
, "Transfer Error!\n");
1026 * The DMA_INTERRUPT async_tx is a NULL transfer, which will
1027 * triger a PE interrupt.
1029 if (stat
& FSL_DMA_SR_PE
) {
1030 chan_dbg(chan
, "irq: Programming Error INT\n");
1031 stat
&= ~FSL_DMA_SR_PE
;
1032 if (get_bcr(chan
) != 0)
1033 chan_err(chan
, "Programming Error!\n");
1037 * For MPC8349, EOCDI event need to update cookie
1038 * and start the next transfer if it exist.
1040 if (stat
& FSL_DMA_SR_EOCDI
) {
1041 chan_dbg(chan
, "irq: End-of-Chain link INT\n");
1042 stat
&= ~FSL_DMA_SR_EOCDI
;
1046 * If it current transfer is the end-of-transfer,
1047 * we should clear the Channel Start bit for
1048 * prepare next transfer.
1050 if (stat
& FSL_DMA_SR_EOLNI
) {
1051 chan_dbg(chan
, "irq: End-of-link INT\n");
1052 stat
&= ~FSL_DMA_SR_EOLNI
;
1055 /* check that the DMA controller is really idle */
1056 if (!dma_is_idle(chan
))
1057 chan_err(chan
, "irq: controller not idle!\n");
1059 /* check that we handled all of the bits */
1061 chan_err(chan
, "irq: unhandled sr 0x%08x\n", stat
);
1064 * Schedule the tasklet to handle all cleanup of the current
1065 * transaction. It will start a new transaction if there is
1068 tasklet_schedule(&chan
->tasklet
);
1069 chan_dbg(chan
, "irq: Exit\n");
1073 static void dma_do_tasklet(unsigned long data
)
1075 struct fsldma_chan
*chan
= (struct fsldma_chan
*)data
;
1076 struct fsl_desc_sw
*desc
, *_desc
;
1077 LIST_HEAD(ld_cleanup
);
1078 unsigned long flags
;
1080 chan_dbg(chan
, "tasklet entry\n");
1082 spin_lock_irqsave(&chan
->desc_lock
, flags
);
1084 /* update the cookie if we have some descriptors to cleanup */
1085 if (!list_empty(&chan
->ld_running
)) {
1086 dma_cookie_t cookie
;
1088 desc
= to_fsl_desc(chan
->ld_running
.prev
);
1089 cookie
= desc
->async_tx
.cookie
;
1091 chan
->completed_cookie
= cookie
;
1092 chan_dbg(chan
, "completed_cookie=%d\n", cookie
);
1096 * move the descriptors to a temporary list so we can drop the lock
1097 * during the entire cleanup operation
1099 list_splice_tail_init(&chan
->ld_running
, &ld_cleanup
);
1101 /* the hardware is now idle and ready for more */
1105 * Start any pending transactions automatically
1107 * In the ideal case, we keep the DMA controller busy while we go
1108 * ahead and free the descriptors below.
1110 fsl_chan_xfer_ld_queue(chan
);
1111 spin_unlock_irqrestore(&chan
->desc_lock
, flags
);
1113 /* Run the callback for each descriptor, in order */
1114 list_for_each_entry_safe(desc
, _desc
, &ld_cleanup
, node
) {
1116 /* Remove from the list of transactions */
1117 list_del(&desc
->node
);
1119 /* Run all cleanup for this descriptor */
1120 fsldma_cleanup_descriptor(chan
, desc
);
1123 chan_dbg(chan
, "tasklet exit\n");
1126 static irqreturn_t
fsldma_ctrl_irq(int irq
, void *data
)
1128 struct fsldma_device
*fdev
= data
;
1129 struct fsldma_chan
*chan
;
1130 unsigned int handled
= 0;
1134 gsr
= (fdev
->feature
& FSL_DMA_BIG_ENDIAN
) ? in_be32(fdev
->regs
)
1135 : in_le32(fdev
->regs
);
1137 dev_dbg(fdev
->dev
, "IRQ: gsr 0x%.8x\n", gsr
);
1139 for (i
= 0; i
< FSL_DMA_MAX_CHANS_PER_DEVICE
; i
++) {
1140 chan
= fdev
->chan
[i
];
1145 dev_dbg(fdev
->dev
, "IRQ: chan %d\n", chan
->id
);
1146 fsldma_chan_irq(irq
, chan
);
1154 return IRQ_RETVAL(handled
);
1157 static void fsldma_free_irqs(struct fsldma_device
*fdev
)
1159 struct fsldma_chan
*chan
;
1162 if (fdev
->irq
!= NO_IRQ
) {
1163 dev_dbg(fdev
->dev
, "free per-controller IRQ\n");
1164 free_irq(fdev
->irq
, fdev
);
1168 for (i
= 0; i
< FSL_DMA_MAX_CHANS_PER_DEVICE
; i
++) {
1169 chan
= fdev
->chan
[i
];
1170 if (chan
&& chan
->irq
!= NO_IRQ
) {
1171 chan_dbg(chan
, "free per-channel IRQ\n");
1172 free_irq(chan
->irq
, chan
);
1177 static int fsldma_request_irqs(struct fsldma_device
*fdev
)
1179 struct fsldma_chan
*chan
;
1183 /* if we have a per-controller IRQ, use that */
1184 if (fdev
->irq
!= NO_IRQ
) {
1185 dev_dbg(fdev
->dev
, "request per-controller IRQ\n");
1186 ret
= request_irq(fdev
->irq
, fsldma_ctrl_irq
, IRQF_SHARED
,
1187 "fsldma-controller", fdev
);
1191 /* no per-controller IRQ, use the per-channel IRQs */
1192 for (i
= 0; i
< FSL_DMA_MAX_CHANS_PER_DEVICE
; i
++) {
1193 chan
= fdev
->chan
[i
];
1197 if (chan
->irq
== NO_IRQ
) {
1198 chan_err(chan
, "interrupts property missing in device tree\n");
1203 chan_dbg(chan
, "request per-channel IRQ\n");
1204 ret
= request_irq(chan
->irq
, fsldma_chan_irq
, IRQF_SHARED
,
1205 "fsldma-chan", chan
);
1207 chan_err(chan
, "unable to request per-channel IRQ\n");
1215 for (/* none */; i
>= 0; i
--) {
1216 chan
= fdev
->chan
[i
];
1220 if (chan
->irq
== NO_IRQ
)
1223 free_irq(chan
->irq
, chan
);
1229 /*----------------------------------------------------------------------------*/
1230 /* OpenFirmware Subsystem */
1231 /*----------------------------------------------------------------------------*/
1233 static int __devinit
fsl_dma_chan_probe(struct fsldma_device
*fdev
,
1234 struct device_node
*node
, u32 feature
, const char *compatible
)
1236 struct fsldma_chan
*chan
;
1237 struct resource res
;
1241 chan
= kzalloc(sizeof(*chan
), GFP_KERNEL
);
1243 dev_err(fdev
->dev
, "no free memory for DMA channels!\n");
1248 /* ioremap registers for use */
1249 chan
->regs
= of_iomap(node
, 0);
1251 dev_err(fdev
->dev
, "unable to ioremap registers\n");
1256 err
= of_address_to_resource(node
, 0, &res
);
1258 dev_err(fdev
->dev
, "unable to find 'reg' property\n");
1259 goto out_iounmap_regs
;
1262 chan
->feature
= feature
;
1264 fdev
->feature
= chan
->feature
;
1267 * If the DMA device's feature is different than the feature
1268 * of its channels, report the bug
1270 WARN_ON(fdev
->feature
!= chan
->feature
);
1272 chan
->dev
= fdev
->dev
;
1273 chan
->id
= ((res
.start
- 0x100) & 0xfff) >> 7;
1274 if (chan
->id
>= FSL_DMA_MAX_CHANS_PER_DEVICE
) {
1275 dev_err(fdev
->dev
, "too many channels for device\n");
1277 goto out_iounmap_regs
;
1280 fdev
->chan
[chan
->id
] = chan
;
1281 tasklet_init(&chan
->tasklet
, dma_do_tasklet
, (unsigned long)chan
);
1282 snprintf(chan
->name
, sizeof(chan
->name
), "chan%d", chan
->id
);
1284 /* Initialize the channel */
1287 /* Clear cdar registers */
1290 switch (chan
->feature
& FSL_DMA_IP_MASK
) {
1291 case FSL_DMA_IP_85XX
:
1292 chan
->toggle_ext_pause
= fsl_chan_toggle_ext_pause
;
1293 case FSL_DMA_IP_83XX
:
1294 chan
->toggle_ext_start
= fsl_chan_toggle_ext_start
;
1295 chan
->set_src_loop_size
= fsl_chan_set_src_loop_size
;
1296 chan
->set_dst_loop_size
= fsl_chan_set_dst_loop_size
;
1297 chan
->set_request_count
= fsl_chan_set_request_count
;
1300 spin_lock_init(&chan
->desc_lock
);
1301 INIT_LIST_HEAD(&chan
->ld_pending
);
1302 INIT_LIST_HEAD(&chan
->ld_running
);
1305 chan
->common
.device
= &fdev
->common
;
1307 /* find the IRQ line, if it exists in the device tree */
1308 chan
->irq
= irq_of_parse_and_map(node
, 0);
1310 /* Add the channel to DMA device channel list */
1311 list_add_tail(&chan
->common
.device_node
, &fdev
->common
.channels
);
1312 fdev
->common
.chancnt
++;
1314 dev_info(fdev
->dev
, "#%d (%s), irq %d\n", chan
->id
, compatible
,
1315 chan
->irq
!= NO_IRQ
? chan
->irq
: fdev
->irq
);
1320 iounmap(chan
->regs
);
1327 static void fsl_dma_chan_remove(struct fsldma_chan
*chan
)
1329 irq_dispose_mapping(chan
->irq
);
1330 list_del(&chan
->common
.device_node
);
1331 iounmap(chan
->regs
);
1335 static int __devinit
fsldma_of_probe(struct platform_device
*op
)
1337 struct fsldma_device
*fdev
;
1338 struct device_node
*child
;
1341 fdev
= kzalloc(sizeof(*fdev
), GFP_KERNEL
);
1343 dev_err(&op
->dev
, "No enough memory for 'priv'\n");
1348 fdev
->dev
= &op
->dev
;
1349 INIT_LIST_HEAD(&fdev
->common
.channels
);
1351 /* ioremap the registers for use */
1352 fdev
->regs
= of_iomap(op
->dev
.of_node
, 0);
1354 dev_err(&op
->dev
, "unable to ioremap registers\n");
1359 /* map the channel IRQ if it exists, but don't hookup the handler yet */
1360 fdev
->irq
= irq_of_parse_and_map(op
->dev
.of_node
, 0);
1362 dma_cap_set(DMA_MEMCPY
, fdev
->common
.cap_mask
);
1363 dma_cap_set(DMA_INTERRUPT
, fdev
->common
.cap_mask
);
1364 dma_cap_set(DMA_SG
, fdev
->common
.cap_mask
);
1365 dma_cap_set(DMA_SLAVE
, fdev
->common
.cap_mask
);
1366 fdev
->common
.device_alloc_chan_resources
= fsl_dma_alloc_chan_resources
;
1367 fdev
->common
.device_free_chan_resources
= fsl_dma_free_chan_resources
;
1368 fdev
->common
.device_prep_dma_interrupt
= fsl_dma_prep_interrupt
;
1369 fdev
->common
.device_prep_dma_memcpy
= fsl_dma_prep_memcpy
;
1370 fdev
->common
.device_prep_dma_sg
= fsl_dma_prep_sg
;
1371 fdev
->common
.device_tx_status
= fsl_tx_status
;
1372 fdev
->common
.device_issue_pending
= fsl_dma_memcpy_issue_pending
;
1373 fdev
->common
.device_prep_slave_sg
= fsl_dma_prep_slave_sg
;
1374 fdev
->common
.device_control
= fsl_dma_device_control
;
1375 fdev
->common
.dev
= &op
->dev
;
1377 dma_set_mask(&(op
->dev
), DMA_BIT_MASK(36));
1379 dev_set_drvdata(&op
->dev
, fdev
);
1382 * We cannot use of_platform_bus_probe() because there is no
1383 * of_platform_bus_remove(). Instead, we manually instantiate every DMA
1386 for_each_child_of_node(op
->dev
.of_node
, child
) {
1387 if (of_device_is_compatible(child
, "fsl,eloplus-dma-channel")) {
1388 fsl_dma_chan_probe(fdev
, child
,
1389 FSL_DMA_IP_85XX
| FSL_DMA_BIG_ENDIAN
,
1390 "fsl,eloplus-dma-channel");
1393 if (of_device_is_compatible(child
, "fsl,elo-dma-channel")) {
1394 fsl_dma_chan_probe(fdev
, child
,
1395 FSL_DMA_IP_83XX
| FSL_DMA_LITTLE_ENDIAN
,
1396 "fsl,elo-dma-channel");
1401 * Hookup the IRQ handler(s)
1403 * If we have a per-controller interrupt, we prefer that to the
1404 * per-channel interrupts to reduce the number of shared interrupt
1405 * handlers on the same IRQ line
1407 err
= fsldma_request_irqs(fdev
);
1409 dev_err(fdev
->dev
, "unable to request IRQs\n");
1413 dma_async_device_register(&fdev
->common
);
1417 irq_dispose_mapping(fdev
->irq
);
1423 static int fsldma_of_remove(struct platform_device
*op
)
1425 struct fsldma_device
*fdev
;
1428 fdev
= dev_get_drvdata(&op
->dev
);
1429 dma_async_device_unregister(&fdev
->common
);
1431 fsldma_free_irqs(fdev
);
1433 for (i
= 0; i
< FSL_DMA_MAX_CHANS_PER_DEVICE
; i
++) {
1435 fsl_dma_chan_remove(fdev
->chan
[i
]);
1438 iounmap(fdev
->regs
);
1439 dev_set_drvdata(&op
->dev
, NULL
);
1445 static const struct of_device_id fsldma_of_ids
[] = {
1446 { .compatible
= "fsl,eloplus-dma", },
1447 { .compatible
= "fsl,elo-dma", },
1451 static struct platform_driver fsldma_of_driver
= {
1453 .name
= "fsl-elo-dma",
1454 .owner
= THIS_MODULE
,
1455 .of_match_table
= fsldma_of_ids
,
1457 .probe
= fsldma_of_probe
,
1458 .remove
= fsldma_of_remove
,
1461 /*----------------------------------------------------------------------------*/
1462 /* Module Init / Exit */
1463 /*----------------------------------------------------------------------------*/
1465 static __init
int fsldma_init(void)
1467 pr_info("Freescale Elo / Elo Plus DMA driver\n");
1468 return platform_driver_register(&fsldma_of_driver
);
1471 static void __exit
fsldma_exit(void)
1473 platform_driver_unregister(&fsldma_of_driver
);
1476 subsys_initcall(fsldma_init
);
1477 module_exit(fsldma_exit
);
1479 MODULE_DESCRIPTION("Freescale Elo / Elo Plus DMA driver");
1480 MODULE_LICENSE("GPL");