1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates.
4 * Synopsys DesignWare eDMA core driver
6 * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
9 #include <linux/module.h>
10 #include <linux/device.h>
11 #include <linux/kernel.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/dmaengine.h>
14 #include <linux/err.h>
15 #include <linux/interrupt.h>
16 #include <linux/irq.h>
17 #include <linux/dma/edma.h>
18 #include <linux/dma-mapping.h>
20 #include "dw-edma-core.h"
21 #include "dw-edma-v0-core.h"
22 #include "../dmaengine.h"
23 #include "../virt-dma.h"
26 struct device
*dchan2dev(struct dma_chan
*dchan
)
28 return &dchan
->dev
->device
;
32 struct device
*chan2dev(struct dw_edma_chan
*chan
)
34 return &chan
->vc
.chan
.dev
->device
;
38 struct dw_edma_desc
*vd2dw_edma_desc(struct virt_dma_desc
*vd
)
40 return container_of(vd
, struct dw_edma_desc
, vd
);
43 static struct dw_edma_burst
*dw_edma_alloc_burst(struct dw_edma_chunk
*chunk
)
45 struct dw_edma_burst
*burst
;
47 burst
= kzalloc(sizeof(*burst
), GFP_NOWAIT
);
51 INIT_LIST_HEAD(&burst
->list
);
53 /* Create and add new element into the linked list */
54 chunk
->bursts_alloc
++;
55 list_add_tail(&burst
->list
, &chunk
->burst
->list
);
58 chunk
->bursts_alloc
= 0;
65 static struct dw_edma_chunk
*dw_edma_alloc_chunk(struct dw_edma_desc
*desc
)
67 struct dw_edma_chan
*chan
= desc
->chan
;
68 struct dw_edma
*dw
= chan
->chip
->dw
;
69 struct dw_edma_chunk
*chunk
;
71 chunk
= kzalloc(sizeof(*chunk
), GFP_NOWAIT
);
75 INIT_LIST_HEAD(&chunk
->list
);
77 /* Toggling change bit (CB) in each chunk, this is a mechanism to
78 * inform the eDMA HW block that this is a new linked list ready
80 * - Odd chunks originate CB equal to 0
81 * - Even chunks originate CB equal to 1
83 chunk
->cb
= !(desc
->chunks_alloc
% 2);
84 chunk
->ll_region
.paddr
= dw
->ll_region
.paddr
+ chan
->ll_off
;
85 chunk
->ll_region
.vaddr
= dw
->ll_region
.vaddr
+ chan
->ll_off
;
88 /* Create and add new element into the linked list */
89 if (!dw_edma_alloc_burst(chunk
)) {
94 list_add_tail(&chunk
->list
, &desc
->chunk
->list
);
98 desc
->chunks_alloc
= 0;
105 static struct dw_edma_desc
*dw_edma_alloc_desc(struct dw_edma_chan
*chan
)
107 struct dw_edma_desc
*desc
;
109 desc
= kzalloc(sizeof(*desc
), GFP_NOWAIT
);
114 if (!dw_edma_alloc_chunk(desc
)) {
122 static void dw_edma_free_burst(struct dw_edma_chunk
*chunk
)
124 struct dw_edma_burst
*child
, *_next
;
126 /* Remove all the list elements */
127 list_for_each_entry_safe(child
, _next
, &chunk
->burst
->list
, list
) {
128 list_del(&child
->list
);
130 chunk
->bursts_alloc
--;
133 /* Remove the list head */
138 static void dw_edma_free_chunk(struct dw_edma_desc
*desc
)
140 struct dw_edma_chunk
*child
, *_next
;
145 /* Remove all the list elements */
146 list_for_each_entry_safe(child
, _next
, &desc
->chunk
->list
, list
) {
147 dw_edma_free_burst(child
);
148 list_del(&child
->list
);
150 desc
->chunks_alloc
--;
153 /* Remove the list head */
158 static void dw_edma_free_desc(struct dw_edma_desc
*desc
)
160 dw_edma_free_chunk(desc
);
164 static void vchan_free_desc(struct virt_dma_desc
*vdesc
)
166 dw_edma_free_desc(vd2dw_edma_desc(vdesc
));
169 static void dw_edma_start_transfer(struct dw_edma_chan
*chan
)
171 struct dw_edma_chunk
*child
;
172 struct dw_edma_desc
*desc
;
173 struct virt_dma_desc
*vd
;
175 vd
= vchan_next_desc(&chan
->vc
);
179 desc
= vd2dw_edma_desc(vd
);
183 child
= list_first_entry_or_null(&desc
->chunk
->list
,
184 struct dw_edma_chunk
, list
);
188 dw_edma_v0_core_start(child
, !desc
->xfer_sz
);
189 desc
->xfer_sz
+= child
->ll_region
.sz
;
190 dw_edma_free_burst(child
);
191 list_del(&child
->list
);
193 desc
->chunks_alloc
--;
196 static int dw_edma_device_config(struct dma_chan
*dchan
,
197 struct dma_slave_config
*config
)
199 struct dw_edma_chan
*chan
= dchan2dw_edma_chan(dchan
);
201 memcpy(&chan
->config
, config
, sizeof(*config
));
202 chan
->configured
= true;
207 static int dw_edma_device_pause(struct dma_chan
*dchan
)
209 struct dw_edma_chan
*chan
= dchan2dw_edma_chan(dchan
);
212 if (!chan
->configured
)
214 else if (chan
->status
!= EDMA_ST_BUSY
)
216 else if (chan
->request
!= EDMA_REQ_NONE
)
219 chan
->request
= EDMA_REQ_PAUSE
;
224 static int dw_edma_device_resume(struct dma_chan
*dchan
)
226 struct dw_edma_chan
*chan
= dchan2dw_edma_chan(dchan
);
229 if (!chan
->configured
) {
231 } else if (chan
->status
!= EDMA_ST_PAUSE
) {
233 } else if (chan
->request
!= EDMA_REQ_NONE
) {
236 chan
->status
= EDMA_ST_BUSY
;
237 dw_edma_start_transfer(chan
);
243 static int dw_edma_device_terminate_all(struct dma_chan
*dchan
)
245 struct dw_edma_chan
*chan
= dchan2dw_edma_chan(dchan
);
249 if (!chan
->configured
) {
251 } else if (chan
->status
== EDMA_ST_PAUSE
) {
252 chan
->status
= EDMA_ST_IDLE
;
253 chan
->configured
= false;
254 } else if (chan
->status
== EDMA_ST_IDLE
) {
255 chan
->configured
= false;
256 } else if (dw_edma_v0_core_ch_status(chan
) == DMA_COMPLETE
) {
258 * The channel is in a false BUSY state, probably didn't
259 * receive or lost an interrupt
261 chan
->status
= EDMA_ST_IDLE
;
262 chan
->configured
= false;
263 } else if (chan
->request
> EDMA_REQ_PAUSE
) {
266 chan
->request
= EDMA_REQ_STOP
;
272 static void dw_edma_device_issue_pending(struct dma_chan
*dchan
)
274 struct dw_edma_chan
*chan
= dchan2dw_edma_chan(dchan
);
277 spin_lock_irqsave(&chan
->vc
.lock
, flags
);
278 if (chan
->configured
&& chan
->request
== EDMA_REQ_NONE
&&
279 chan
->status
== EDMA_ST_IDLE
&& vchan_issue_pending(&chan
->vc
)) {
280 chan
->status
= EDMA_ST_BUSY
;
281 dw_edma_start_transfer(chan
);
283 spin_unlock_irqrestore(&chan
->vc
.lock
, flags
);
286 static enum dma_status
287 dw_edma_device_tx_status(struct dma_chan
*dchan
, dma_cookie_t cookie
,
288 struct dma_tx_state
*txstate
)
290 struct dw_edma_chan
*chan
= dchan2dw_edma_chan(dchan
);
291 struct dw_edma_desc
*desc
;
292 struct virt_dma_desc
*vd
;
297 ret
= dma_cookie_status(dchan
, cookie
, txstate
);
298 if (ret
== DMA_COMPLETE
)
301 if (ret
== DMA_IN_PROGRESS
&& chan
->status
== EDMA_ST_PAUSE
)
307 spin_lock_irqsave(&chan
->vc
.lock
, flags
);
308 vd
= vchan_find_desc(&chan
->vc
, cookie
);
310 desc
= vd2dw_edma_desc(vd
);
312 residue
= desc
->alloc_sz
- desc
->xfer_sz
;
314 spin_unlock_irqrestore(&chan
->vc
.lock
, flags
);
317 dma_set_residue(txstate
, residue
);
322 static struct dma_async_tx_descriptor
*
323 dw_edma_device_transfer(struct dw_edma_transfer
*xfer
)
325 struct dw_edma_chan
*chan
= dchan2dw_edma_chan(xfer
->dchan
);
326 enum dma_transfer_direction dir
= xfer
->direction
;
327 phys_addr_t src_addr
, dst_addr
;
328 struct scatterlist
*sg
= NULL
;
329 struct dw_edma_chunk
*chunk
;
330 struct dw_edma_burst
*burst
;
331 struct dw_edma_desc
*desc
;
335 if (!chan
->configured
)
338 switch (chan
->config
.direction
) {
339 case DMA_DEV_TO_MEM
: /* local dma */
340 if (dir
== DMA_DEV_TO_MEM
&& chan
->dir
== EDMA_DIR_READ
)
343 case DMA_MEM_TO_DEV
: /* local dma */
344 if (dir
== DMA_MEM_TO_DEV
&& chan
->dir
== EDMA_DIR_WRITE
)
347 default: /* remote dma */
348 if (dir
== DMA_MEM_TO_DEV
&& chan
->dir
== EDMA_DIR_READ
)
350 if (dir
== DMA_DEV_TO_MEM
&& chan
->dir
== EDMA_DIR_WRITE
)
356 if (!xfer
->xfer
.cyclic
.len
|| !xfer
->xfer
.cyclic
.cnt
)
359 if (xfer
->xfer
.sg
.len
< 1)
363 desc
= dw_edma_alloc_desc(chan
);
367 chunk
= dw_edma_alloc_chunk(desc
);
368 if (unlikely(!chunk
))
371 src_addr
= chan
->config
.src_addr
;
372 dst_addr
= chan
->config
.dst_addr
;
375 cnt
= xfer
->xfer
.cyclic
.cnt
;
377 cnt
= xfer
->xfer
.sg
.len
;
378 sg
= xfer
->xfer
.sg
.sgl
;
381 for (i
= 0; i
< cnt
; i
++) {
382 if (!xfer
->cyclic
&& !sg
)
385 if (chunk
->bursts_alloc
== chan
->ll_max
) {
386 chunk
= dw_edma_alloc_chunk(desc
);
387 if (unlikely(!chunk
))
391 burst
= dw_edma_alloc_burst(chunk
);
392 if (unlikely(!burst
))
396 burst
->sz
= xfer
->xfer
.cyclic
.len
;
398 burst
->sz
= sg_dma_len(sg
);
400 chunk
->ll_region
.sz
+= burst
->sz
;
401 desc
->alloc_sz
+= burst
->sz
;
403 if (chan
->dir
== EDMA_DIR_WRITE
) {
404 burst
->sar
= src_addr
;
406 burst
->dar
= xfer
->xfer
.cyclic
.paddr
;
408 burst
->dar
= dst_addr
;
409 /* Unlike the typical assumption by other
410 * drivers/IPs the peripheral memory isn't
411 * a FIFO memory, in this case, it's a
412 * linear memory and that why the source
413 * and destination addresses are increased
414 * by the same portion (data length)
418 burst
->dar
= dst_addr
;
420 burst
->sar
= xfer
->xfer
.cyclic
.paddr
;
422 burst
->sar
= src_addr
;
423 /* Unlike the typical assumption by other
424 * drivers/IPs the peripheral memory isn't
425 * a FIFO memory, in this case, it's a
426 * linear memory and that why the source
427 * and destination addresses are increased
428 * by the same portion (data length)
434 src_addr
+= sg_dma_len(sg
);
435 dst_addr
+= sg_dma_len(sg
);
440 return vchan_tx_prep(&chan
->vc
, &desc
->vd
, xfer
->flags
);
444 dw_edma_free_desc(desc
);
449 static struct dma_async_tx_descriptor
*
450 dw_edma_device_prep_slave_sg(struct dma_chan
*dchan
, struct scatterlist
*sgl
,
452 enum dma_transfer_direction direction
,
453 unsigned long flags
, void *context
)
455 struct dw_edma_transfer xfer
;
458 xfer
.direction
= direction
;
459 xfer
.xfer
.sg
.sgl
= sgl
;
460 xfer
.xfer
.sg
.len
= len
;
464 return dw_edma_device_transfer(&xfer
);
467 static struct dma_async_tx_descriptor
*
468 dw_edma_device_prep_dma_cyclic(struct dma_chan
*dchan
, dma_addr_t paddr
,
469 size_t len
, size_t count
,
470 enum dma_transfer_direction direction
,
473 struct dw_edma_transfer xfer
;
476 xfer
.direction
= direction
;
477 xfer
.xfer
.cyclic
.paddr
= paddr
;
478 xfer
.xfer
.cyclic
.len
= len
;
479 xfer
.xfer
.cyclic
.cnt
= count
;
483 return dw_edma_device_transfer(&xfer
);
486 static void dw_edma_done_interrupt(struct dw_edma_chan
*chan
)
488 struct dw_edma_desc
*desc
;
489 struct virt_dma_desc
*vd
;
492 dw_edma_v0_core_clear_done_int(chan
);
494 spin_lock_irqsave(&chan
->vc
.lock
, flags
);
495 vd
= vchan_next_desc(&chan
->vc
);
497 switch (chan
->request
) {
499 desc
= vd2dw_edma_desc(vd
);
500 if (desc
->chunks_alloc
) {
501 chan
->status
= EDMA_ST_BUSY
;
502 dw_edma_start_transfer(chan
);
505 vchan_cookie_complete(vd
);
506 chan
->status
= EDMA_ST_IDLE
;
512 vchan_cookie_complete(vd
);
513 chan
->request
= EDMA_REQ_NONE
;
514 chan
->status
= EDMA_ST_IDLE
;
518 chan
->request
= EDMA_REQ_NONE
;
519 chan
->status
= EDMA_ST_PAUSE
;
526 spin_unlock_irqrestore(&chan
->vc
.lock
, flags
);
529 static void dw_edma_abort_interrupt(struct dw_edma_chan
*chan
)
531 struct virt_dma_desc
*vd
;
534 dw_edma_v0_core_clear_abort_int(chan
);
536 spin_lock_irqsave(&chan
->vc
.lock
, flags
);
537 vd
= vchan_next_desc(&chan
->vc
);
540 vchan_cookie_complete(vd
);
542 spin_unlock_irqrestore(&chan
->vc
.lock
, flags
);
543 chan
->request
= EDMA_REQ_NONE
;
544 chan
->status
= EDMA_ST_IDLE
;
547 static irqreturn_t
dw_edma_interrupt(int irq
, void *data
, bool write
)
549 struct dw_edma_irq
*dw_irq
= data
;
550 struct dw_edma
*dw
= dw_irq
->dw
;
551 unsigned long total
, pos
, val
;
556 total
= dw
->wr_ch_cnt
;
558 mask
= dw_irq
->wr_mask
;
560 total
= dw
->rd_ch_cnt
;
562 mask
= dw_irq
->rd_mask
;
565 val
= dw_edma_v0_core_status_done_int(dw
, write
?
569 for_each_set_bit(pos
, &val
, total
) {
570 struct dw_edma_chan
*chan
= &dw
->chan
[pos
+ off
];
572 dw_edma_done_interrupt(chan
);
575 val
= dw_edma_v0_core_status_abort_int(dw
, write
?
579 for_each_set_bit(pos
, &val
, total
) {
580 struct dw_edma_chan
*chan
= &dw
->chan
[pos
+ off
];
582 dw_edma_abort_interrupt(chan
);
588 static inline irqreturn_t
dw_edma_interrupt_write(int irq
, void *data
)
590 return dw_edma_interrupt(irq
, data
, true);
593 static inline irqreturn_t
dw_edma_interrupt_read(int irq
, void *data
)
595 return dw_edma_interrupt(irq
, data
, false);
598 static irqreturn_t
dw_edma_interrupt_common(int irq
, void *data
)
600 dw_edma_interrupt(irq
, data
, true);
601 dw_edma_interrupt(irq
, data
, false);
606 static int dw_edma_alloc_chan_resources(struct dma_chan
*dchan
)
608 struct dw_edma_chan
*chan
= dchan2dw_edma_chan(dchan
);
610 if (chan
->status
!= EDMA_ST_IDLE
)
613 pm_runtime_get(chan
->chip
->dev
);
618 static void dw_edma_free_chan_resources(struct dma_chan
*dchan
)
620 unsigned long timeout
= jiffies
+ msecs_to_jiffies(5000);
621 struct dw_edma_chan
*chan
= dchan2dw_edma_chan(dchan
);
624 while (time_before(jiffies
, timeout
)) {
625 ret
= dw_edma_device_terminate_all(dchan
);
629 if (time_after_eq(jiffies
, timeout
))
635 pm_runtime_put(chan
->chip
->dev
);
638 static int dw_edma_channel_setup(struct dw_edma_chip
*chip
, bool write
,
639 u32 wr_alloc
, u32 rd_alloc
)
641 struct dw_edma_region
*dt_region
;
642 struct device
*dev
= chip
->dev
;
643 struct dw_edma
*dw
= chip
->dw
;
644 struct dw_edma_chan
*chan
;
645 size_t ll_chunk
, dt_chunk
;
646 struct dw_edma_irq
*irq
;
647 struct dma_device
*dma
;
648 u32 i
, j
, cnt
, ch_cnt
;
649 u32 alloc
, off_alloc
;
653 ch_cnt
= dw
->wr_ch_cnt
+ dw
->rd_ch_cnt
;
654 ll_chunk
= dw
->ll_region
.sz
;
655 dt_chunk
= dw
->dt_region
.sz
;
657 /* Calculate linked list chunk for each channel */
658 ll_chunk
/= roundup_pow_of_two(ch_cnt
);
660 /* Calculate linked list chunk for each channel */
661 dt_chunk
/= roundup_pow_of_two(ch_cnt
);
674 off_alloc
= wr_alloc
;
677 INIT_LIST_HEAD(&dma
->channels
);
678 for (j
= 0; (alloc
|| dw
->nr_irqs
== 1) && j
< cnt
; j
++, i
++) {
681 dt_region
= devm_kzalloc(dev
, sizeof(*dt_region
), GFP_KERNEL
);
685 chan
->vc
.chan
.private = dt_region
;
689 chan
->dir
= write
? EDMA_DIR_WRITE
: EDMA_DIR_READ
;
690 chan
->configured
= false;
691 chan
->request
= EDMA_REQ_NONE
;
692 chan
->status
= EDMA_ST_IDLE
;
694 chan
->ll_off
= (ll_chunk
* i
);
695 chan
->ll_max
= (ll_chunk
/ EDMA_LL_SZ
) - 1;
697 chan
->dt_off
= (dt_chunk
* i
);
699 dev_vdbg(dev
, "L. List:\tChannel %s[%u] off=0x%.8lx, max_cnt=%u\n",
700 write
? "write" : "read", j
,
701 chan
->ll_off
, chan
->ll_max
);
703 if (dw
->nr_irqs
== 1)
706 pos
= off_alloc
+ (j
% alloc
);
711 irq
->wr_mask
|= BIT(j
);
713 irq
->rd_mask
|= BIT(j
);
716 memcpy(&chan
->msi
, &irq
->msi
, sizeof(chan
->msi
));
718 dev_vdbg(dev
, "MSI:\t\tChannel %s[%u] addr=0x%.8x%.8x, data=0x%.8x\n",
719 write
? "write" : "read", j
,
720 chan
->msi
.address_hi
, chan
->msi
.address_lo
,
723 chan
->vc
.desc_free
= vchan_free_desc
;
724 vchan_init(&chan
->vc
, dma
);
726 dt_region
->paddr
= dw
->dt_region
.paddr
+ chan
->dt_off
;
727 dt_region
->vaddr
= dw
->dt_region
.vaddr
+ chan
->dt_off
;
728 dt_region
->sz
= dt_chunk
;
730 dev_vdbg(dev
, "Data:\tChannel %s[%u] off=0x%.8lx\n",
731 write
? "write" : "read", j
, chan
->dt_off
);
733 dw_edma_v0_core_device_config(chan
);
736 /* Set DMA channel capabilities */
737 dma_cap_zero(dma
->cap_mask
);
738 dma_cap_set(DMA_SLAVE
, dma
->cap_mask
);
739 dma_cap_set(DMA_CYCLIC
, dma
->cap_mask
);
740 dma_cap_set(DMA_PRIVATE
, dma
->cap_mask
);
741 dma
->directions
= BIT(write
? DMA_DEV_TO_MEM
: DMA_MEM_TO_DEV
);
742 dma
->src_addr_widths
= BIT(DMA_SLAVE_BUSWIDTH_4_BYTES
);
743 dma
->dst_addr_widths
= BIT(DMA_SLAVE_BUSWIDTH_4_BYTES
);
744 dma
->residue_granularity
= DMA_RESIDUE_GRANULARITY_DESCRIPTOR
;
747 /* Set DMA channel callbacks */
748 dma
->dev
= chip
->dev
;
749 dma
->device_alloc_chan_resources
= dw_edma_alloc_chan_resources
;
750 dma
->device_free_chan_resources
= dw_edma_free_chan_resources
;
751 dma
->device_config
= dw_edma_device_config
;
752 dma
->device_pause
= dw_edma_device_pause
;
753 dma
->device_resume
= dw_edma_device_resume
;
754 dma
->device_terminate_all
= dw_edma_device_terminate_all
;
755 dma
->device_issue_pending
= dw_edma_device_issue_pending
;
756 dma
->device_tx_status
= dw_edma_device_tx_status
;
757 dma
->device_prep_slave_sg
= dw_edma_device_prep_slave_sg
;
758 dma
->device_prep_dma_cyclic
= dw_edma_device_prep_dma_cyclic
;
760 dma_set_max_seg_size(dma
->dev
, U32_MAX
);
762 /* Register DMA device */
763 err
= dma_async_device_register(dma
);
768 static inline void dw_edma_dec_irq_alloc(int *nr_irqs
, u32
*alloc
, u16 cnt
)
770 if (*nr_irqs
&& *alloc
< cnt
) {
776 static inline void dw_edma_add_irq_mask(u32
*mask
, u32 alloc
, u16 cnt
)
778 while (*mask
* alloc
< cnt
)
782 static int dw_edma_irq_request(struct dw_edma_chip
*chip
,
783 u32
*wr_alloc
, u32
*rd_alloc
)
785 struct device
*dev
= chip
->dev
;
786 struct dw_edma
*dw
= chip
->dw
;
793 ch_cnt
= dw
->wr_ch_cnt
+ dw
->rd_ch_cnt
;
798 if (dw
->nr_irqs
== 1) {
799 /* Common IRQ shared among all channels */
800 irq
= dw
->ops
->irq_vector(dev
, 0);
801 err
= request_irq(irq
, dw_edma_interrupt_common
,
802 IRQF_SHARED
, dw
->name
, &dw
->irq
[0]);
808 if (irq_get_msi_desc(irq
))
809 get_cached_msi_msg(irq
, &dw
->irq
[0].msi
);
811 /* Distribute IRQs equally among all channels */
812 int tmp
= dw
->nr_irqs
;
814 while (tmp
&& (*wr_alloc
+ *rd_alloc
) < ch_cnt
) {
815 dw_edma_dec_irq_alloc(&tmp
, wr_alloc
, dw
->wr_ch_cnt
);
816 dw_edma_dec_irq_alloc(&tmp
, rd_alloc
, dw
->rd_ch_cnt
);
819 dw_edma_add_irq_mask(&wr_mask
, *wr_alloc
, dw
->wr_ch_cnt
);
820 dw_edma_add_irq_mask(&rd_mask
, *rd_alloc
, dw
->rd_ch_cnt
);
822 for (i
= 0; i
< (*wr_alloc
+ *rd_alloc
); i
++) {
823 irq
= dw
->ops
->irq_vector(dev
, i
);
824 err
= request_irq(irq
,
826 dw_edma_interrupt_write
:
827 dw_edma_interrupt_read
,
828 IRQF_SHARED
, dw
->name
,
835 if (irq_get_msi_desc(irq
))
836 get_cached_msi_msg(irq
, &dw
->irq
[i
].msi
);
845 int dw_edma_probe(struct dw_edma_chip
*chip
)
861 if (!dw
|| !dw
->irq
|| !dw
->ops
|| !dw
->ops
->irq_vector
)
864 raw_spin_lock_init(&dw
->lock
);
866 /* Find out how many write channels are supported by hardware */
867 dw
->wr_ch_cnt
= dw_edma_v0_core_ch_count(dw
, EDMA_DIR_WRITE
);
871 /* Find out how many read channels are supported by hardware */
872 dw
->rd_ch_cnt
= dw_edma_v0_core_ch_count(dw
, EDMA_DIR_READ
);
876 dev_vdbg(dev
, "Channels:\twrite=%d, read=%d\n",
877 dw
->wr_ch_cnt
, dw
->rd_ch_cnt
);
879 /* Allocate channels */
880 dw
->chan
= devm_kcalloc(dev
, dw
->wr_ch_cnt
+ dw
->rd_ch_cnt
,
881 sizeof(*dw
->chan
), GFP_KERNEL
);
885 snprintf(dw
->name
, sizeof(dw
->name
), "dw-edma-core:%d", chip
->id
);
887 /* Disable eDMA, only to establish the ideal initial conditions */
888 dw_edma_v0_core_off(dw
);
891 err
= dw_edma_irq_request(chip
, &wr_alloc
, &rd_alloc
);
895 /* Setup write channels */
896 err
= dw_edma_channel_setup(chip
, true, wr_alloc
, rd_alloc
);
900 /* Setup read channels */
901 err
= dw_edma_channel_setup(chip
, false, wr_alloc
, rd_alloc
);
905 /* Power management */
906 pm_runtime_enable(dev
);
908 /* Turn debugfs on */
909 dw_edma_v0_core_debugfs_on(chip
);
914 for (i
= (dw
->nr_irqs
- 1); i
>= 0; i
--)
915 free_irq(dw
->ops
->irq_vector(dev
, i
), &dw
->irq
[i
]);
921 EXPORT_SYMBOL_GPL(dw_edma_probe
);
923 int dw_edma_remove(struct dw_edma_chip
*chip
)
925 struct dw_edma_chan
*chan
, *_chan
;
926 struct device
*dev
= chip
->dev
;
927 struct dw_edma
*dw
= chip
->dw
;
931 dw_edma_v0_core_off(dw
);
934 for (i
= (dw
->nr_irqs
- 1); i
>= 0; i
--)
935 free_irq(dw
->ops
->irq_vector(dev
, i
), &dw
->irq
[i
]);
937 /* Power management */
938 pm_runtime_disable(dev
);
940 list_for_each_entry_safe(chan
, _chan
, &dw
->wr_edma
.channels
,
941 vc
.chan
.device_node
) {
942 list_del(&chan
->vc
.chan
.device_node
);
943 tasklet_kill(&chan
->vc
.task
);
946 list_for_each_entry_safe(chan
, _chan
, &dw
->rd_edma
.channels
,
947 vc
.chan
.device_node
) {
948 list_del(&chan
->vc
.chan
.device_node
);
949 tasklet_kill(&chan
->vc
.task
);
952 /* Deregister eDMA device */
953 dma_async_device_unregister(&dw
->wr_edma
);
954 dma_async_device_unregister(&dw
->rd_edma
);
956 /* Turn debugfs off */
957 dw_edma_v0_core_debugfs_off();
961 EXPORT_SYMBOL_GPL(dw_edma_remove
);
963 MODULE_LICENSE("GPL v2");
964 MODULE_DESCRIPTION("Synopsys DesignWare eDMA controller core driver");
965 MODULE_AUTHOR("Gustavo Pimentel <gustavo.pimentel@synopsys.com>");