2 * TI EDMA DMA engine driver
4 * Copyright 2012 Texas Instruments
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation version 2.
10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11 * kind, whether express or implied; without even the implied warranty
12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
16 #include <linux/dmaengine.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/err.h>
19 #include <linux/init.h>
20 #include <linux/interrupt.h>
21 #include <linux/list.h>
22 #include <linux/module.h>
23 #include <linux/platform_device.h>
24 #include <linux/slab.h>
25 #include <linux/spinlock.h>
27 #include <linux/platform_data/edma.h>
29 #include "dmaengine.h"
33 * This will go away when the private EDMA API is folded
34 * into this driver and the platform device(s) are
35 * instantiated in the arch code. We can only get away
36 * with this simplification because DA8XX may not be built
37 * in the same kernel image with other DaVinci parts. This
38 * avoids having to sprinkle dmaengine driver platform devices
39 * and data throughout all the existing board files.
41 #ifdef CONFIG_ARCH_DAVINCI_DA8XX
47 #endif /* CONFIG_ARCH_DAVINCI_DA8XX */
50 * Max of 20 segments per channel to conserve PaRAM slots
51 * Also note that MAX_NR_SG should be atleast the no.of periods
52 * that are required for ASoC, otherwise DMA prep calls will
53 * fail. Today davinci-pcm is the only user of this driver and
54 * requires atleast 17 slots, so we setup the default to 20.
57 #define EDMA_MAX_SLOTS MAX_NR_SG
58 #define EDMA_DESCRIPTORS 16
61 struct virt_dma_desc vdesc
;
62 struct list_head node
;
67 struct edmacc_param pset
[0];
73 struct virt_dma_chan vchan
;
74 struct list_head node
;
75 struct edma_desc
*edesc
;
79 int slot
[EDMA_MAX_SLOTS
];
81 struct dma_slave_config cfg
;
86 struct dma_device dma_slave
;
87 struct edma_chan slave_chans
[EDMA_CHANS
];
92 static inline struct edma_cc
*to_edma_cc(struct dma_device
*d
)
94 return container_of(d
, struct edma_cc
, dma_slave
);
97 static inline struct edma_chan
*to_edma_chan(struct dma_chan
*c
)
99 return container_of(c
, struct edma_chan
, vchan
.chan
);
102 static inline struct edma_desc
103 *to_edma_desc(struct dma_async_tx_descriptor
*tx
)
105 return container_of(tx
, struct edma_desc
, vdesc
.tx
);
108 static void edma_desc_free(struct virt_dma_desc
*vdesc
)
110 kfree(container_of(vdesc
, struct edma_desc
, vdesc
));
113 /* Dispatch a queued descriptor to the controller (caller holds lock) */
114 static void edma_execute(struct edma_chan
*echan
)
116 struct virt_dma_desc
*vdesc
;
117 struct edma_desc
*edesc
;
118 struct device
*dev
= echan
->vchan
.chan
.device
->dev
;
119 int i
, j
, left
, nslots
;
121 /* If either we processed all psets or we're still not started */
123 echan
->edesc
->pset_nr
== echan
->edesc
->processed
) {
125 vdesc
= vchan_next_desc(&echan
->vchan
);
130 list_del(&vdesc
->node
);
131 echan
->edesc
= to_edma_desc(&vdesc
->tx
);
134 edesc
= echan
->edesc
;
136 /* Find out how many left */
137 left
= edesc
->pset_nr
- edesc
->processed
;
138 nslots
= min(MAX_NR_SG
, left
);
140 /* Write descriptor PaRAM set(s) */
141 for (i
= 0; i
< nslots
; i
++) {
142 j
= i
+ edesc
->processed
;
143 edma_write_slot(echan
->slot
[i
], &edesc
->pset
[j
]);
144 dev_dbg(echan
->vchan
.chan
.device
->dev
,
156 j
, echan
->ch_num
, echan
->slot
[i
],
160 edesc
->pset
[j
].a_b_cnt
,
162 edesc
->pset
[j
].src_dst_bidx
,
163 edesc
->pset
[j
].src_dst_cidx
,
164 edesc
->pset
[j
].link_bcntrld
);
165 /* Link to the previous slot if not the last set */
166 if (i
!= (nslots
- 1))
167 edma_link(echan
->slot
[i
], echan
->slot
[i
+1]);
170 edesc
->processed
+= nslots
;
173 * If this is either the last set in a set of SG-list transactions
174 * then setup a link to the dummy slot, this results in all future
175 * events being absorbed and that's OK because we're done
177 if (edesc
->processed
== edesc
->pset_nr
) {
179 edma_link(echan
->slot
[nslots
-1], echan
->slot
[1]);
181 edma_link(echan
->slot
[nslots
-1],
182 echan
->ecc
->dummy_slot
);
185 edma_resume(echan
->ch_num
);
187 if (edesc
->processed
<= MAX_NR_SG
) {
188 dev_dbg(dev
, "first transfer starting %d\n", echan
->ch_num
);
189 edma_start(echan
->ch_num
);
193 * This happens due to setup times between intermediate transfers
194 * in long SG lists which have to be broken up into transfers of
198 dev_dbg(dev
, "missed event in execute detected\n");
199 edma_clean_channel(echan
->ch_num
);
200 edma_stop(echan
->ch_num
);
201 edma_start(echan
->ch_num
);
202 edma_trigger_channel(echan
->ch_num
);
207 static int edma_terminate_all(struct edma_chan
*echan
)
212 spin_lock_irqsave(&echan
->vchan
.lock
, flags
);
215 * Stop DMA activity: we assume the callback will not be called
216 * after edma_dma() returns (even if it does, it will see
217 * echan->edesc is NULL and exit.)
221 edma_stop(echan
->ch_num
);
224 vchan_get_all_descriptors(&echan
->vchan
, &head
);
225 spin_unlock_irqrestore(&echan
->vchan
.lock
, flags
);
226 vchan_dma_desc_free_list(&echan
->vchan
, &head
);
231 static int edma_slave_config(struct edma_chan
*echan
,
232 struct dma_slave_config
*cfg
)
234 if (cfg
->src_addr_width
== DMA_SLAVE_BUSWIDTH_8_BYTES
||
235 cfg
->dst_addr_width
== DMA_SLAVE_BUSWIDTH_8_BYTES
)
238 memcpy(&echan
->cfg
, cfg
, sizeof(echan
->cfg
));
243 static int edma_control(struct dma_chan
*chan
, enum dma_ctrl_cmd cmd
,
247 struct dma_slave_config
*config
;
248 struct edma_chan
*echan
= to_edma_chan(chan
);
251 case DMA_TERMINATE_ALL
:
252 edma_terminate_all(echan
);
254 case DMA_SLAVE_CONFIG
:
255 config
= (struct dma_slave_config
*)arg
;
256 ret
= edma_slave_config(echan
, config
);
266 * A PaRAM set configuration abstraction used by other modes
267 * @chan: Channel who's PaRAM set we're configuring
268 * @pset: PaRAM set to initialize and setup.
269 * @src_addr: Source address of the DMA
270 * @dst_addr: Destination address of the DMA
271 * @burst: In units of dev_width, how much to send
272 * @dev_width: How much is the dev_width
273 * @dma_length: Total length of the DMA transfer
274 * @direction: Direction of the transfer
276 static int edma_config_pset(struct dma_chan
*chan
, struct edmacc_param
*pset
,
277 dma_addr_t src_addr
, dma_addr_t dst_addr
, u32 burst
,
278 enum dma_slave_buswidth dev_width
, unsigned int dma_length
,
279 enum dma_transfer_direction direction
)
281 struct edma_chan
*echan
= to_edma_chan(chan
);
282 struct device
*dev
= chan
->device
->dev
;
283 int acnt
, bcnt
, ccnt
, cidx
;
284 int src_bidx
, dst_bidx
, src_cidx
, dst_cidx
;
289 * If the maxburst is equal to the fifo width, use
290 * A-synced transfers. This allows for large contiguous
291 * buffer transfers using only one PaRAM set.
295 * For the A-sync case, bcnt and ccnt are the remainder
296 * and quotient respectively of the division of:
297 * (dma_length / acnt) by (SZ_64K -1). This is so
298 * that in case bcnt over flows, we have ccnt to use.
299 * Note: In A-sync tranfer only, bcntrld is used, but it
300 * only applies for sg_dma_len(sg) >= SZ_64K.
301 * In this case, the best way adopted is- bccnt for the
302 * first frame will be the remainder below. Then for
303 * every successive frame, bcnt will be SZ_64K-1. This
304 * is assured as bcntrld = 0xffff in end of function.
307 ccnt
= dma_length
/ acnt
/ (SZ_64K
- 1);
308 bcnt
= dma_length
/ acnt
- ccnt
* (SZ_64K
- 1);
310 * If bcnt is non-zero, we have a remainder and hence an
311 * extra frame to transfer, so increment ccnt.
320 * If maxburst is greater than the fifo address_width,
321 * use AB-synced transfers where A count is the fifo
322 * address_width and B count is the maxburst. In this
323 * case, we are limited to transfers of C count frames
324 * of (address_width * maxburst) where C count is limited
325 * to SZ_64K-1. This places an upper bound on the length
326 * of an SG segment that can be handled.
330 ccnt
= dma_length
/ (acnt
* bcnt
);
331 if (ccnt
> (SZ_64K
- 1)) {
332 dev_err(dev
, "Exceeded max SG segment size\n");
338 if (direction
== DMA_MEM_TO_DEV
) {
343 } else if (direction
== DMA_DEV_TO_MEM
) {
349 dev_err(dev
, "%s: direction not implemented yet\n", __func__
);
353 pset
->opt
= EDMA_TCC(EDMA_CHAN_SLOT(echan
->ch_num
));
354 /* Configure A or AB synchronized transfers */
356 pset
->opt
|= SYNCDIM
;
358 pset
->src
= src_addr
;
359 pset
->dst
= dst_addr
;
361 pset
->src_dst_bidx
= (dst_bidx
<< 16) | src_bidx
;
362 pset
->src_dst_cidx
= (dst_cidx
<< 16) | src_cidx
;
364 pset
->a_b_cnt
= bcnt
<< 16 | acnt
;
367 * Only time when (bcntrld) auto reload is required is for
368 * A-sync case, and in this case, a requirement of reload value
369 * of SZ_64K-1 only is assured. 'link' is initially set to NULL
370 * and then later will be populated by edma_execute.
372 pset
->link_bcntrld
= 0xffffffff;
376 static struct dma_async_tx_descriptor
*edma_prep_slave_sg(
377 struct dma_chan
*chan
, struct scatterlist
*sgl
,
378 unsigned int sg_len
, enum dma_transfer_direction direction
,
379 unsigned long tx_flags
, void *context
)
381 struct edma_chan
*echan
= to_edma_chan(chan
);
382 struct device
*dev
= chan
->device
->dev
;
383 struct edma_desc
*edesc
;
384 dma_addr_t src_addr
= 0, dst_addr
= 0;
385 enum dma_slave_buswidth dev_width
;
387 struct scatterlist
*sg
;
390 if (unlikely(!echan
|| !sgl
|| !sg_len
))
393 if (direction
== DMA_DEV_TO_MEM
) {
394 src_addr
= echan
->cfg
.src_addr
;
395 dev_width
= echan
->cfg
.src_addr_width
;
396 burst
= echan
->cfg
.src_maxburst
;
397 } else if (direction
== DMA_MEM_TO_DEV
) {
398 dst_addr
= echan
->cfg
.dst_addr
;
399 dev_width
= echan
->cfg
.dst_addr_width
;
400 burst
= echan
->cfg
.dst_maxburst
;
402 dev_err(dev
, "%s: bad direction?\n", __func__
);
406 if (dev_width
== DMA_SLAVE_BUSWIDTH_UNDEFINED
) {
407 dev_err(dev
, "Undefined slave buswidth\n");
411 edesc
= kzalloc(sizeof(*edesc
) + sg_len
*
412 sizeof(edesc
->pset
[0]), GFP_ATOMIC
);
414 dev_dbg(dev
, "Failed to allocate a descriptor\n");
418 edesc
->pset_nr
= sg_len
;
420 /* Allocate a PaRAM slot, if needed */
421 nslots
= min_t(unsigned, MAX_NR_SG
, sg_len
);
423 for (i
= 0; i
< nslots
; i
++) {
424 if (echan
->slot
[i
] < 0) {
426 edma_alloc_slot(EDMA_CTLR(echan
->ch_num
),
428 if (echan
->slot
[i
] < 0) {
430 dev_err(dev
, "Failed to allocate slot\n");
436 /* Configure PaRAM sets for each SG */
437 for_each_sg(sgl
, sg
, sg_len
, i
) {
438 /* Get address for each SG */
439 if (direction
== DMA_DEV_TO_MEM
)
440 dst_addr
= sg_dma_address(sg
);
442 src_addr
= sg_dma_address(sg
);
444 ret
= edma_config_pset(chan
, &edesc
->pset
[i
], src_addr
,
445 dst_addr
, burst
, dev_width
,
446 sg_dma_len(sg
), direction
);
454 /* If this is the last in a current SG set of transactions,
455 enable interrupts so that next set is processed */
456 if (!((i
+1) % MAX_NR_SG
))
457 edesc
->pset
[i
].opt
|= TCINTEN
;
459 /* If this is the last set, enable completion interrupt flag */
461 edesc
->pset
[i
].opt
|= TCINTEN
;
464 return vchan_tx_prep(&echan
->vchan
, &edesc
->vdesc
, tx_flags
);
467 static struct dma_async_tx_descriptor
*edma_prep_dma_cyclic(
468 struct dma_chan
*chan
, dma_addr_t buf_addr
, size_t buf_len
,
469 size_t period_len
, enum dma_transfer_direction direction
,
470 unsigned long tx_flags
, void *context
)
472 struct edma_chan
*echan
= to_edma_chan(chan
);
473 struct device
*dev
= chan
->device
->dev
;
474 struct edma_desc
*edesc
;
475 dma_addr_t src_addr
, dst_addr
;
476 enum dma_slave_buswidth dev_width
;
480 if (unlikely(!echan
|| !buf_len
|| !period_len
))
483 if (direction
== DMA_DEV_TO_MEM
) {
484 src_addr
= echan
->cfg
.src_addr
;
486 dev_width
= echan
->cfg
.src_addr_width
;
487 burst
= echan
->cfg
.src_maxburst
;
488 } else if (direction
== DMA_MEM_TO_DEV
) {
490 dst_addr
= echan
->cfg
.dst_addr
;
491 dev_width
= echan
->cfg
.dst_addr_width
;
492 burst
= echan
->cfg
.dst_maxburst
;
494 dev_err(dev
, "%s: bad direction?\n", __func__
);
498 if (dev_width
== DMA_SLAVE_BUSWIDTH_UNDEFINED
) {
499 dev_err(dev
, "Undefined slave buswidth\n");
503 if (unlikely(buf_len
% period_len
)) {
504 dev_err(dev
, "Period should be multiple of Buffer length\n");
508 nslots
= (buf_len
/ period_len
) + 1;
511 * Cyclic DMA users such as audio cannot tolerate delays introduced
512 * by cases where the number of periods is more than the maximum
513 * number of SGs the EDMA driver can handle at a time. For DMA types
514 * such as Slave SGs, such delays are tolerable and synchronized,
515 * but the synchronization is difficult to achieve with Cyclic and
516 * cannot be guaranteed, so we error out early.
518 if (nslots
> MAX_NR_SG
)
521 edesc
= kzalloc(sizeof(*edesc
) + nslots
*
522 sizeof(edesc
->pset
[0]), GFP_ATOMIC
);
524 dev_dbg(dev
, "Failed to allocate a descriptor\n");
529 edesc
->pset_nr
= nslots
;
531 dev_dbg(dev
, "%s: nslots=%d\n", __func__
, nslots
);
532 dev_dbg(dev
, "%s: period_len=%d\n", __func__
, period_len
);
533 dev_dbg(dev
, "%s: buf_len=%d\n", __func__
, buf_len
);
535 for (i
= 0; i
< nslots
; i
++) {
536 /* Allocate a PaRAM slot, if needed */
537 if (echan
->slot
[i
] < 0) {
539 edma_alloc_slot(EDMA_CTLR(echan
->ch_num
),
541 if (echan
->slot
[i
] < 0) {
542 dev_err(dev
, "Failed to allocate slot\n");
547 if (i
== nslots
- 1) {
548 memcpy(&edesc
->pset
[i
], &edesc
->pset
[0],
549 sizeof(edesc
->pset
[0]));
553 ret
= edma_config_pset(chan
, &edesc
->pset
[i
], src_addr
,
554 dst_addr
, burst
, dev_width
, period_len
,
559 if (direction
== DMA_DEV_TO_MEM
)
560 dst_addr
+= period_len
;
562 src_addr
+= period_len
;
564 dev_dbg(dev
, "%s: Configure period %d of buf:\n", __func__
, i
);
577 i
, echan
->ch_num
, echan
->slot
[i
],
581 edesc
->pset
[i
].a_b_cnt
,
583 edesc
->pset
[i
].src_dst_bidx
,
584 edesc
->pset
[i
].src_dst_cidx
,
585 edesc
->pset
[i
].link_bcntrld
);
590 * Enable interrupts for every period because callback
591 * has to be called for every period.
593 edesc
->pset
[i
].opt
|= TCINTEN
;
596 return vchan_tx_prep(&echan
->vchan
, &edesc
->vdesc
, tx_flags
);
599 static void edma_callback(unsigned ch_num
, u16 ch_status
, void *data
)
601 struct edma_chan
*echan
= data
;
602 struct device
*dev
= echan
->vchan
.chan
.device
->dev
;
603 struct edma_desc
*edesc
;
605 struct edmacc_param p
;
607 edesc
= echan
->edesc
;
609 /* Pause the channel for non-cyclic */
610 if (!edesc
|| (edesc
&& !edesc
->cyclic
))
611 edma_pause(echan
->ch_num
);
614 case EDMA_DMA_COMPLETE
:
615 spin_lock_irqsave(&echan
->vchan
.lock
, flags
);
619 vchan_cyclic_callback(&edesc
->vdesc
);
620 } else if (edesc
->processed
== edesc
->pset_nr
) {
621 dev_dbg(dev
, "Transfer complete, stopping channel %d\n", ch_num
);
622 edma_stop(echan
->ch_num
);
623 vchan_cookie_complete(&edesc
->vdesc
);
626 dev_dbg(dev
, "Intermediate transfer complete on channel %d\n", ch_num
);
631 spin_unlock_irqrestore(&echan
->vchan
.lock
, flags
);
634 case EDMA_DMA_CC_ERROR
:
635 spin_lock_irqsave(&echan
->vchan
.lock
, flags
);
637 edma_read_slot(EDMA_CHAN_SLOT(echan
->slot
[0]), &p
);
640 * Issue later based on missed flag which will be sure
642 * (1) we finished transmitting an intermediate slot and
643 * edma_execute is coming up.
644 * (2) or we finished current transfer and issue will
647 * Important note: issuing can be dangerous here and
648 * lead to some nasty recursion when we are in a NULL
649 * slot. So we avoid doing so and set the missed flag.
651 if (p
.a_b_cnt
== 0 && p
.ccnt
== 0) {
652 dev_dbg(dev
, "Error occurred, looks like slot is null, just setting miss\n");
656 * The slot is already programmed but the event got
657 * missed, so its safe to issue it here.
659 dev_dbg(dev
, "Error occurred but slot is non-null, TRIGGERING\n");
660 edma_clean_channel(echan
->ch_num
);
661 edma_stop(echan
->ch_num
);
662 edma_start(echan
->ch_num
);
663 edma_trigger_channel(echan
->ch_num
);
666 spin_unlock_irqrestore(&echan
->vchan
.lock
, flags
);
674 /* Alloc channel resources */
675 static int edma_alloc_chan_resources(struct dma_chan
*chan
)
677 struct edma_chan
*echan
= to_edma_chan(chan
);
678 struct device
*dev
= chan
->device
->dev
;
683 a_ch_num
= edma_alloc_channel(echan
->ch_num
, edma_callback
,
684 chan
, EVENTQ_DEFAULT
);
691 if (a_ch_num
!= echan
->ch_num
) {
692 dev_err(dev
, "failed to allocate requested channel %u:%u\n",
693 EDMA_CTLR(echan
->ch_num
),
694 EDMA_CHAN_SLOT(echan
->ch_num
));
699 echan
->alloced
= true;
700 echan
->slot
[0] = echan
->ch_num
;
702 dev_dbg(dev
, "allocated channel for %u:%u\n",
703 EDMA_CTLR(echan
->ch_num
), EDMA_CHAN_SLOT(echan
->ch_num
));
708 edma_free_channel(a_ch_num
);
713 /* Free channel resources */
714 static void edma_free_chan_resources(struct dma_chan
*chan
)
716 struct edma_chan
*echan
= to_edma_chan(chan
);
717 struct device
*dev
= chan
->device
->dev
;
720 /* Terminate transfers */
721 edma_stop(echan
->ch_num
);
723 vchan_free_chan_resources(&echan
->vchan
);
725 /* Free EDMA PaRAM slots */
726 for (i
= 1; i
< EDMA_MAX_SLOTS
; i
++) {
727 if (echan
->slot
[i
] >= 0) {
728 edma_free_slot(echan
->slot
[i
]);
733 /* Free EDMA channel */
734 if (echan
->alloced
) {
735 edma_free_channel(echan
->ch_num
);
736 echan
->alloced
= false;
739 dev_dbg(dev
, "freeing channel for %u\n", echan
->ch_num
);
742 /* Send pending descriptor to hardware */
743 static void edma_issue_pending(struct dma_chan
*chan
)
745 struct edma_chan
*echan
= to_edma_chan(chan
);
748 spin_lock_irqsave(&echan
->vchan
.lock
, flags
);
749 if (vchan_issue_pending(&echan
->vchan
) && !echan
->edesc
)
751 spin_unlock_irqrestore(&echan
->vchan
.lock
, flags
);
754 static size_t edma_desc_size(struct edma_desc
*edesc
)
760 for (size
= i
= 0; i
< edesc
->pset_nr
; i
++)
761 size
+= (edesc
->pset
[i
].a_b_cnt
& 0xffff) *
762 (edesc
->pset
[i
].a_b_cnt
>> 16) *
765 size
= (edesc
->pset
[0].a_b_cnt
& 0xffff) *
766 (edesc
->pset
[0].a_b_cnt
>> 16) +
767 (edesc
->pset
[0].a_b_cnt
& 0xffff) *
768 (SZ_64K
- 1) * edesc
->pset
[0].ccnt
;
773 /* Check request completion status */
774 static enum dma_status
edma_tx_status(struct dma_chan
*chan
,
776 struct dma_tx_state
*txstate
)
778 struct edma_chan
*echan
= to_edma_chan(chan
);
779 struct virt_dma_desc
*vdesc
;
783 ret
= dma_cookie_status(chan
, cookie
, txstate
);
784 if (ret
== DMA_COMPLETE
|| !txstate
)
787 spin_lock_irqsave(&echan
->vchan
.lock
, flags
);
788 vdesc
= vchan_find_desc(&echan
->vchan
, cookie
);
790 txstate
->residue
= edma_desc_size(to_edma_desc(&vdesc
->tx
));
791 } else if (echan
->edesc
&& echan
->edesc
->vdesc
.tx
.cookie
== cookie
) {
792 struct edma_desc
*edesc
= echan
->edesc
;
793 txstate
->residue
= edma_desc_size(edesc
);
795 spin_unlock_irqrestore(&echan
->vchan
.lock
, flags
);
800 static void __init
edma_chan_init(struct edma_cc
*ecc
,
801 struct dma_device
*dma
,
802 struct edma_chan
*echans
)
806 for (i
= 0; i
< EDMA_CHANS
; i
++) {
807 struct edma_chan
*echan
= &echans
[i
];
808 echan
->ch_num
= EDMA_CTLR_CHAN(ecc
->ctlr
, i
);
810 echan
->vchan
.desc_free
= edma_desc_free
;
812 vchan_init(&echan
->vchan
, dma
);
814 INIT_LIST_HEAD(&echan
->node
);
815 for (j
= 0; j
< EDMA_MAX_SLOTS
; j
++)
820 static void edma_dma_init(struct edma_cc
*ecc
, struct dma_device
*dma
,
823 dma
->device_prep_slave_sg
= edma_prep_slave_sg
;
824 dma
->device_prep_dma_cyclic
= edma_prep_dma_cyclic
;
825 dma
->device_alloc_chan_resources
= edma_alloc_chan_resources
;
826 dma
->device_free_chan_resources
= edma_free_chan_resources
;
827 dma
->device_issue_pending
= edma_issue_pending
;
828 dma
->device_tx_status
= edma_tx_status
;
829 dma
->device_control
= edma_control
;
832 INIT_LIST_HEAD(&dma
->channels
);
835 static int edma_probe(struct platform_device
*pdev
)
840 ret
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
844 ecc
= devm_kzalloc(&pdev
->dev
, sizeof(*ecc
), GFP_KERNEL
);
846 dev_err(&pdev
->dev
, "Can't allocate controller\n");
850 ecc
->ctlr
= pdev
->id
;
851 ecc
->dummy_slot
= edma_alloc_slot(ecc
->ctlr
, EDMA_SLOT_ANY
);
852 if (ecc
->dummy_slot
< 0) {
853 dev_err(&pdev
->dev
, "Can't allocate PaRAM dummy slot\n");
857 dma_cap_zero(ecc
->dma_slave
.cap_mask
);
858 dma_cap_set(DMA_SLAVE
, ecc
->dma_slave
.cap_mask
);
860 edma_dma_init(ecc
, &ecc
->dma_slave
, &pdev
->dev
);
862 edma_chan_init(ecc
, &ecc
->dma_slave
, ecc
->slave_chans
);
864 ret
= dma_async_device_register(&ecc
->dma_slave
);
868 platform_set_drvdata(pdev
, ecc
);
870 dev_info(&pdev
->dev
, "TI EDMA DMA engine driver\n");
875 edma_free_slot(ecc
->dummy_slot
);
879 static int edma_remove(struct platform_device
*pdev
)
881 struct device
*dev
= &pdev
->dev
;
882 struct edma_cc
*ecc
= dev_get_drvdata(dev
);
884 dma_async_device_unregister(&ecc
->dma_slave
);
885 edma_free_slot(ecc
->dummy_slot
);
890 static struct platform_driver edma_driver
= {
892 .remove
= edma_remove
,
894 .name
= "edma-dma-engine",
895 .owner
= THIS_MODULE
,
899 bool edma_filter_fn(struct dma_chan
*chan
, void *param
)
901 if (chan
->device
->dev
->driver
== &edma_driver
.driver
) {
902 struct edma_chan
*echan
= to_edma_chan(chan
);
903 unsigned ch_req
= *(unsigned *)param
;
904 return ch_req
== echan
->ch_num
;
908 EXPORT_SYMBOL(edma_filter_fn
);
910 static struct platform_device
*pdev0
, *pdev1
;
912 static const struct platform_device_info edma_dev_info0
= {
913 .name
= "edma-dma-engine",
915 .dma_mask
= DMA_BIT_MASK(32),
918 static const struct platform_device_info edma_dev_info1
= {
919 .name
= "edma-dma-engine",
921 .dma_mask
= DMA_BIT_MASK(32),
924 static int edma_init(void)
926 int ret
= platform_driver_register(&edma_driver
);
929 pdev0
= platform_device_register_full(&edma_dev_info0
);
931 platform_driver_unregister(&edma_driver
);
932 ret
= PTR_ERR(pdev0
);
937 if (EDMA_CTLRS
== 2) {
938 pdev1
= platform_device_register_full(&edma_dev_info1
);
940 platform_driver_unregister(&edma_driver
);
941 platform_device_unregister(pdev0
);
942 ret
= PTR_ERR(pdev1
);
949 subsys_initcall(edma_init
);
951 static void __exit
edma_exit(void)
953 platform_device_unregister(pdev0
);
955 platform_device_unregister(pdev1
);
956 platform_driver_unregister(&edma_driver
);
958 module_exit(edma_exit
);
960 MODULE_AUTHOR("Matt Porter <matt.porter@linaro.org>");
961 MODULE_DESCRIPTION("TI EDMA DMA engine driver");
962 MODULE_LICENSE("GPL v2");