1 // SPDX-License-Identifier: GPL-2.0+
3 // Copyright (c) 2013-2014 Freescale Semiconductor, Inc
4 // Copyright (c) 2017 Sysam, Angelo Dureghello <angelo@sysam.it>
6 #include <linux/dmapool.h>
7 #include <linux/module.h>
8 #include <linux/slab.h>
9 #include <linux/dma-mapping.h>
11 #include "fsl-edma-common.h"
17 #define EDMA_SERQ 0x1B
18 #define EDMA_CERQ 0x1A
19 #define EDMA_SEEI 0x19
20 #define EDMA_CEEI 0x18
21 #define EDMA_CINT 0x1F
22 #define EDMA_CERR 0x1E
23 #define EDMA_SSRT 0x1D
24 #define EDMA_CDNE 0x1C
25 #define EDMA_INTR 0x24
28 #define EDMA64_ERQH 0x08
29 #define EDMA64_EEIH 0x10
30 #define EDMA64_SERQ 0x18
31 #define EDMA64_CERQ 0x19
32 #define EDMA64_SEEI 0x1a
33 #define EDMA64_CEEI 0x1b
34 #define EDMA64_CINT 0x1c
35 #define EDMA64_CERR 0x1d
36 #define EDMA64_SSRT 0x1e
37 #define EDMA64_CDNE 0x1f
38 #define EDMA64_INTH 0x20
39 #define EDMA64_INTL 0x24
40 #define EDMA64_ERRH 0x28
41 #define EDMA64_ERRL 0x2c
43 #define EDMA_TCD 0x1000
45 static void fsl_edma_enable_request(struct fsl_edma_chan
*fsl_chan
)
47 struct edma_regs
*regs
= &fsl_chan
->edma
->regs
;
48 u32 ch
= fsl_chan
->vchan
.chan
.chan_id
;
50 if (fsl_chan
->edma
->version
== v1
) {
51 edma_writeb(fsl_chan
->edma
, EDMA_SEEI_SEEI(ch
), regs
->seei
);
52 edma_writeb(fsl_chan
->edma
, ch
, regs
->serq
);
54 /* ColdFire is big endian, and accesses natively
55 * big endian I/O peripherals
57 iowrite8(EDMA_SEEI_SEEI(ch
), regs
->seei
);
58 iowrite8(ch
, regs
->serq
);
62 void fsl_edma_disable_request(struct fsl_edma_chan
*fsl_chan
)
64 struct edma_regs
*regs
= &fsl_chan
->edma
->regs
;
65 u32 ch
= fsl_chan
->vchan
.chan
.chan_id
;
67 if (fsl_chan
->edma
->version
== v1
) {
68 edma_writeb(fsl_chan
->edma
, ch
, regs
->cerq
);
69 edma_writeb(fsl_chan
->edma
, EDMA_CEEI_CEEI(ch
), regs
->ceei
);
71 /* ColdFire is big endian, and accesses natively
72 * big endian I/O peripherals
74 iowrite8(ch
, regs
->cerq
);
75 iowrite8(EDMA_CEEI_CEEI(ch
), regs
->ceei
);
78 EXPORT_SYMBOL_GPL(fsl_edma_disable_request
);
80 void fsl_edma_chan_mux(struct fsl_edma_chan
*fsl_chan
,
81 unsigned int slot
, bool enable
)
83 u32 ch
= fsl_chan
->vchan
.chan
.chan_id
;
84 void __iomem
*muxaddr
;
85 unsigned int chans_per_mux
, ch_off
;
87 chans_per_mux
= fsl_chan
->edma
->n_chans
/ DMAMUX_NR
;
88 ch_off
= fsl_chan
->vchan
.chan
.chan_id
% chans_per_mux
;
89 muxaddr
= fsl_chan
->edma
->muxbase
[ch
/ chans_per_mux
];
90 slot
= EDMAMUX_CHCFG_SOURCE(slot
);
93 iowrite8(EDMAMUX_CHCFG_ENBL
| slot
, muxaddr
+ ch_off
);
95 iowrite8(EDMAMUX_CHCFG_DIS
, muxaddr
+ ch_off
);
97 EXPORT_SYMBOL_GPL(fsl_edma_chan_mux
);
99 static unsigned int fsl_edma_get_tcd_attr(enum dma_slave_buswidth addr_width
)
101 switch (addr_width
) {
103 return EDMA_TCD_ATTR_SSIZE_8BIT
| EDMA_TCD_ATTR_DSIZE_8BIT
;
105 return EDMA_TCD_ATTR_SSIZE_16BIT
| EDMA_TCD_ATTR_DSIZE_16BIT
;
107 return EDMA_TCD_ATTR_SSIZE_32BIT
| EDMA_TCD_ATTR_DSIZE_32BIT
;
109 return EDMA_TCD_ATTR_SSIZE_64BIT
| EDMA_TCD_ATTR_DSIZE_64BIT
;
111 return EDMA_TCD_ATTR_SSIZE_32BIT
| EDMA_TCD_ATTR_DSIZE_32BIT
;
115 void fsl_edma_free_desc(struct virt_dma_desc
*vdesc
)
117 struct fsl_edma_desc
*fsl_desc
;
120 fsl_desc
= to_fsl_edma_desc(vdesc
);
121 for (i
= 0; i
< fsl_desc
->n_tcds
; i
++)
122 dma_pool_free(fsl_desc
->echan
->tcd_pool
, fsl_desc
->tcd
[i
].vtcd
,
123 fsl_desc
->tcd
[i
].ptcd
);
126 EXPORT_SYMBOL_GPL(fsl_edma_free_desc
);
128 int fsl_edma_terminate_all(struct dma_chan
*chan
)
130 struct fsl_edma_chan
*fsl_chan
= to_fsl_edma_chan(chan
);
134 spin_lock_irqsave(&fsl_chan
->vchan
.lock
, flags
);
135 fsl_edma_disable_request(fsl_chan
);
136 fsl_chan
->edesc
= NULL
;
137 fsl_chan
->idle
= true;
138 vchan_get_all_descriptors(&fsl_chan
->vchan
, &head
);
139 spin_unlock_irqrestore(&fsl_chan
->vchan
.lock
, flags
);
140 vchan_dma_desc_free_list(&fsl_chan
->vchan
, &head
);
143 EXPORT_SYMBOL_GPL(fsl_edma_terminate_all
);
145 int fsl_edma_pause(struct dma_chan
*chan
)
147 struct fsl_edma_chan
*fsl_chan
= to_fsl_edma_chan(chan
);
150 spin_lock_irqsave(&fsl_chan
->vchan
.lock
, flags
);
151 if (fsl_chan
->edesc
) {
152 fsl_edma_disable_request(fsl_chan
);
153 fsl_chan
->status
= DMA_PAUSED
;
154 fsl_chan
->idle
= true;
156 spin_unlock_irqrestore(&fsl_chan
->vchan
.lock
, flags
);
159 EXPORT_SYMBOL_GPL(fsl_edma_pause
);
161 int fsl_edma_resume(struct dma_chan
*chan
)
163 struct fsl_edma_chan
*fsl_chan
= to_fsl_edma_chan(chan
);
166 spin_lock_irqsave(&fsl_chan
->vchan
.lock
, flags
);
167 if (fsl_chan
->edesc
) {
168 fsl_edma_enable_request(fsl_chan
);
169 fsl_chan
->status
= DMA_IN_PROGRESS
;
170 fsl_chan
->idle
= false;
172 spin_unlock_irqrestore(&fsl_chan
->vchan
.lock
, flags
);
175 EXPORT_SYMBOL_GPL(fsl_edma_resume
);
177 static void fsl_edma_unprep_slave_dma(struct fsl_edma_chan
*fsl_chan
)
179 if (fsl_chan
->dma_dir
!= DMA_NONE
)
180 dma_unmap_resource(fsl_chan
->vchan
.chan
.device
->dev
,
181 fsl_chan
->dma_dev_addr
,
182 fsl_chan
->dma_dev_size
,
183 fsl_chan
->dma_dir
, 0);
184 fsl_chan
->dma_dir
= DMA_NONE
;
187 static bool fsl_edma_prep_slave_dma(struct fsl_edma_chan
*fsl_chan
,
188 enum dma_transfer_direction dir
)
190 struct device
*dev
= fsl_chan
->vchan
.chan
.device
->dev
;
191 enum dma_data_direction dma_dir
;
192 phys_addr_t addr
= 0;
197 dma_dir
= DMA_FROM_DEVICE
;
198 addr
= fsl_chan
->cfg
.dst_addr
;
199 size
= fsl_chan
->cfg
.dst_maxburst
;
202 dma_dir
= DMA_TO_DEVICE
;
203 addr
= fsl_chan
->cfg
.src_addr
;
204 size
= fsl_chan
->cfg
.src_maxburst
;
211 /* Already mapped for this config? */
212 if (fsl_chan
->dma_dir
== dma_dir
)
215 fsl_edma_unprep_slave_dma(fsl_chan
);
217 fsl_chan
->dma_dev_addr
= dma_map_resource(dev
, addr
, size
, dma_dir
, 0);
218 if (dma_mapping_error(dev
, fsl_chan
->dma_dev_addr
))
220 fsl_chan
->dma_dev_size
= size
;
221 fsl_chan
->dma_dir
= dma_dir
;
226 int fsl_edma_slave_config(struct dma_chan
*chan
,
227 struct dma_slave_config
*cfg
)
229 struct fsl_edma_chan
*fsl_chan
= to_fsl_edma_chan(chan
);
231 memcpy(&fsl_chan
->cfg
, cfg
, sizeof(*cfg
));
232 fsl_edma_unprep_slave_dma(fsl_chan
);
236 EXPORT_SYMBOL_GPL(fsl_edma_slave_config
);
238 static size_t fsl_edma_desc_residue(struct fsl_edma_chan
*fsl_chan
,
239 struct virt_dma_desc
*vdesc
, bool in_progress
)
241 struct fsl_edma_desc
*edesc
= fsl_chan
->edesc
;
242 struct edma_regs
*regs
= &fsl_chan
->edma
->regs
;
243 u32 ch
= fsl_chan
->vchan
.chan
.chan_id
;
244 enum dma_transfer_direction dir
= edesc
->dirn
;
245 dma_addr_t cur_addr
, dma_addr
;
249 /* calculate the total size in this desc */
250 for (len
= i
= 0; i
< fsl_chan
->edesc
->n_tcds
; i
++)
251 len
+= le32_to_cpu(edesc
->tcd
[i
].vtcd
->nbytes
)
252 * le16_to_cpu(edesc
->tcd
[i
].vtcd
->biter
);
257 if (dir
== DMA_MEM_TO_DEV
)
258 cur_addr
= edma_readl(fsl_chan
->edma
, ®s
->tcd
[ch
].saddr
);
260 cur_addr
= edma_readl(fsl_chan
->edma
, ®s
->tcd
[ch
].daddr
);
262 /* figure out the finished and calculate the residue */
263 for (i
= 0; i
< fsl_chan
->edesc
->n_tcds
; i
++) {
264 size
= le32_to_cpu(edesc
->tcd
[i
].vtcd
->nbytes
)
265 * le16_to_cpu(edesc
->tcd
[i
].vtcd
->biter
);
266 if (dir
== DMA_MEM_TO_DEV
)
267 dma_addr
= le32_to_cpu(edesc
->tcd
[i
].vtcd
->saddr
);
269 dma_addr
= le32_to_cpu(edesc
->tcd
[i
].vtcd
->daddr
);
272 if (cur_addr
>= dma_addr
&& cur_addr
< dma_addr
+ size
) {
273 len
+= dma_addr
+ size
- cur_addr
;
281 enum dma_status
fsl_edma_tx_status(struct dma_chan
*chan
,
282 dma_cookie_t cookie
, struct dma_tx_state
*txstate
)
284 struct fsl_edma_chan
*fsl_chan
= to_fsl_edma_chan(chan
);
285 struct virt_dma_desc
*vdesc
;
286 enum dma_status status
;
289 status
= dma_cookie_status(chan
, cookie
, txstate
);
290 if (status
== DMA_COMPLETE
)
294 return fsl_chan
->status
;
296 spin_lock_irqsave(&fsl_chan
->vchan
.lock
, flags
);
297 vdesc
= vchan_find_desc(&fsl_chan
->vchan
, cookie
);
298 if (fsl_chan
->edesc
&& cookie
== fsl_chan
->edesc
->vdesc
.tx
.cookie
)
300 fsl_edma_desc_residue(fsl_chan
, vdesc
, true);
303 fsl_edma_desc_residue(fsl_chan
, vdesc
, false);
305 txstate
->residue
= 0;
307 spin_unlock_irqrestore(&fsl_chan
->vchan
.lock
, flags
);
309 return fsl_chan
->status
;
311 EXPORT_SYMBOL_GPL(fsl_edma_tx_status
);
313 static void fsl_edma_set_tcd_regs(struct fsl_edma_chan
*fsl_chan
,
314 struct fsl_edma_hw_tcd
*tcd
)
316 struct fsl_edma_engine
*edma
= fsl_chan
->edma
;
317 struct edma_regs
*regs
= &fsl_chan
->edma
->regs
;
318 u32 ch
= fsl_chan
->vchan
.chan
.chan_id
;
321 * TCD parameters are stored in struct fsl_edma_hw_tcd in little
322 * endian format. However, we need to load the TCD registers in
323 * big- or little-endian obeying the eDMA engine model endian.
325 edma_writew(edma
, 0, ®s
->tcd
[ch
].csr
);
326 edma_writel(edma
, le32_to_cpu(tcd
->saddr
), ®s
->tcd
[ch
].saddr
);
327 edma_writel(edma
, le32_to_cpu(tcd
->daddr
), ®s
->tcd
[ch
].daddr
);
329 edma_writew(edma
, le16_to_cpu(tcd
->attr
), ®s
->tcd
[ch
].attr
);
330 edma_writew(edma
, le16_to_cpu(tcd
->soff
), ®s
->tcd
[ch
].soff
);
332 edma_writel(edma
, le32_to_cpu(tcd
->nbytes
), ®s
->tcd
[ch
].nbytes
);
333 edma_writel(edma
, le32_to_cpu(tcd
->slast
), ®s
->tcd
[ch
].slast
);
335 edma_writew(edma
, le16_to_cpu(tcd
->citer
), ®s
->tcd
[ch
].citer
);
336 edma_writew(edma
, le16_to_cpu(tcd
->biter
), ®s
->tcd
[ch
].biter
);
337 edma_writew(edma
, le16_to_cpu(tcd
->doff
), ®s
->tcd
[ch
].doff
);
339 edma_writel(edma
, le32_to_cpu(tcd
->dlast_sga
),
340 ®s
->tcd
[ch
].dlast_sga
);
342 edma_writew(edma
, le16_to_cpu(tcd
->csr
), ®s
->tcd
[ch
].csr
);
346 void fsl_edma_fill_tcd(struct fsl_edma_hw_tcd
*tcd
, u32 src
, u32 dst
,
347 u16 attr
, u16 soff
, u32 nbytes
, u32 slast
, u16 citer
,
348 u16 biter
, u16 doff
, u32 dlast_sga
, bool major_int
,
349 bool disable_req
, bool enable_sg
)
354 * eDMA hardware SGs require the TCDs to be stored in little
355 * endian format irrespective of the register endian model.
356 * So we put the value in little endian in memory, waiting
357 * for fsl_edma_set_tcd_regs doing the swap.
359 tcd
->saddr
= cpu_to_le32(src
);
360 tcd
->daddr
= cpu_to_le32(dst
);
362 tcd
->attr
= cpu_to_le16(attr
);
364 tcd
->soff
= cpu_to_le16(soff
);
366 tcd
->nbytes
= cpu_to_le32(nbytes
);
367 tcd
->slast
= cpu_to_le32(slast
);
369 tcd
->citer
= cpu_to_le16(EDMA_TCD_CITER_CITER(citer
));
370 tcd
->doff
= cpu_to_le16(doff
);
372 tcd
->dlast_sga
= cpu_to_le32(dlast_sga
);
374 tcd
->biter
= cpu_to_le16(EDMA_TCD_BITER_BITER(biter
));
376 csr
|= EDMA_TCD_CSR_INT_MAJOR
;
379 csr
|= EDMA_TCD_CSR_D_REQ
;
382 csr
|= EDMA_TCD_CSR_E_SG
;
384 tcd
->csr
= cpu_to_le16(csr
);
387 static struct fsl_edma_desc
*fsl_edma_alloc_desc(struct fsl_edma_chan
*fsl_chan
,
390 struct fsl_edma_desc
*fsl_desc
;
393 fsl_desc
= kzalloc(struct_size(fsl_desc
, tcd
, sg_len
), GFP_NOWAIT
);
397 fsl_desc
->echan
= fsl_chan
;
398 fsl_desc
->n_tcds
= sg_len
;
399 for (i
= 0; i
< sg_len
; i
++) {
400 fsl_desc
->tcd
[i
].vtcd
= dma_pool_alloc(fsl_chan
->tcd_pool
,
401 GFP_NOWAIT
, &fsl_desc
->tcd
[i
].ptcd
);
402 if (!fsl_desc
->tcd
[i
].vtcd
)
409 dma_pool_free(fsl_chan
->tcd_pool
, fsl_desc
->tcd
[i
].vtcd
,
410 fsl_desc
->tcd
[i
].ptcd
);
415 struct dma_async_tx_descriptor
*fsl_edma_prep_dma_cyclic(
416 struct dma_chan
*chan
, dma_addr_t dma_addr
, size_t buf_len
,
417 size_t period_len
, enum dma_transfer_direction direction
,
420 struct fsl_edma_chan
*fsl_chan
= to_fsl_edma_chan(chan
);
421 struct fsl_edma_desc
*fsl_desc
;
422 dma_addr_t dma_buf_next
;
424 u32 src_addr
, dst_addr
, last_sg
, nbytes
;
425 u16 soff
, doff
, iter
;
427 if (!is_slave_direction(direction
))
430 if (!fsl_edma_prep_slave_dma(fsl_chan
, direction
))
433 sg_len
= buf_len
/ period_len
;
434 fsl_desc
= fsl_edma_alloc_desc(fsl_chan
, sg_len
);
437 fsl_desc
->iscyclic
= true;
438 fsl_desc
->dirn
= direction
;
440 dma_buf_next
= dma_addr
;
441 if (direction
== DMA_MEM_TO_DEV
) {
443 fsl_edma_get_tcd_attr(fsl_chan
->cfg
.dst_addr_width
);
444 nbytes
= fsl_chan
->cfg
.dst_addr_width
*
445 fsl_chan
->cfg
.dst_maxburst
;
448 fsl_edma_get_tcd_attr(fsl_chan
->cfg
.src_addr_width
);
449 nbytes
= fsl_chan
->cfg
.src_addr_width
*
450 fsl_chan
->cfg
.src_maxburst
;
453 iter
= period_len
/ nbytes
;
455 for (i
= 0; i
< sg_len
; i
++) {
456 if (dma_buf_next
>= dma_addr
+ buf_len
)
457 dma_buf_next
= dma_addr
;
459 /* get next sg's physical address */
460 last_sg
= fsl_desc
->tcd
[(i
+ 1) % sg_len
].ptcd
;
462 if (direction
== DMA_MEM_TO_DEV
) {
463 src_addr
= dma_buf_next
;
464 dst_addr
= fsl_chan
->dma_dev_addr
;
465 soff
= fsl_chan
->cfg
.dst_addr_width
;
468 src_addr
= fsl_chan
->dma_dev_addr
;
469 dst_addr
= dma_buf_next
;
471 doff
= fsl_chan
->cfg
.src_addr_width
;
474 fsl_edma_fill_tcd(fsl_desc
->tcd
[i
].vtcd
, src_addr
, dst_addr
,
475 fsl_chan
->attr
, soff
, nbytes
, 0, iter
,
476 iter
, doff
, last_sg
, true, false, true);
477 dma_buf_next
+= period_len
;
480 return vchan_tx_prep(&fsl_chan
->vchan
, &fsl_desc
->vdesc
, flags
);
482 EXPORT_SYMBOL_GPL(fsl_edma_prep_dma_cyclic
);
484 struct dma_async_tx_descriptor
*fsl_edma_prep_slave_sg(
485 struct dma_chan
*chan
, struct scatterlist
*sgl
,
486 unsigned int sg_len
, enum dma_transfer_direction direction
,
487 unsigned long flags
, void *context
)
489 struct fsl_edma_chan
*fsl_chan
= to_fsl_edma_chan(chan
);
490 struct fsl_edma_desc
*fsl_desc
;
491 struct scatterlist
*sg
;
492 u32 src_addr
, dst_addr
, last_sg
, nbytes
;
493 u16 soff
, doff
, iter
;
496 if (!is_slave_direction(direction
))
499 if (!fsl_edma_prep_slave_dma(fsl_chan
, direction
))
502 fsl_desc
= fsl_edma_alloc_desc(fsl_chan
, sg_len
);
505 fsl_desc
->iscyclic
= false;
506 fsl_desc
->dirn
= direction
;
508 if (direction
== DMA_MEM_TO_DEV
) {
510 fsl_edma_get_tcd_attr(fsl_chan
->cfg
.dst_addr_width
);
511 nbytes
= fsl_chan
->cfg
.dst_addr_width
*
512 fsl_chan
->cfg
.dst_maxburst
;
515 fsl_edma_get_tcd_attr(fsl_chan
->cfg
.src_addr_width
);
516 nbytes
= fsl_chan
->cfg
.src_addr_width
*
517 fsl_chan
->cfg
.src_maxburst
;
520 for_each_sg(sgl
, sg
, sg_len
, i
) {
521 /* get next sg's physical address */
522 last_sg
= fsl_desc
->tcd
[(i
+ 1) % sg_len
].ptcd
;
524 if (direction
== DMA_MEM_TO_DEV
) {
525 src_addr
= sg_dma_address(sg
);
526 dst_addr
= fsl_chan
->dma_dev_addr
;
527 soff
= fsl_chan
->cfg
.dst_addr_width
;
530 src_addr
= fsl_chan
->dma_dev_addr
;
531 dst_addr
= sg_dma_address(sg
);
533 doff
= fsl_chan
->cfg
.src_addr_width
;
536 iter
= sg_dma_len(sg
) / nbytes
;
537 if (i
< sg_len
- 1) {
538 last_sg
= fsl_desc
->tcd
[(i
+ 1)].ptcd
;
539 fsl_edma_fill_tcd(fsl_desc
->tcd
[i
].vtcd
, src_addr
,
540 dst_addr
, fsl_chan
->attr
, soff
,
541 nbytes
, 0, iter
, iter
, doff
, last_sg
,
545 fsl_edma_fill_tcd(fsl_desc
->tcd
[i
].vtcd
, src_addr
,
546 dst_addr
, fsl_chan
->attr
, soff
,
547 nbytes
, 0, iter
, iter
, doff
, last_sg
,
552 return vchan_tx_prep(&fsl_chan
->vchan
, &fsl_desc
->vdesc
, flags
);
554 EXPORT_SYMBOL_GPL(fsl_edma_prep_slave_sg
);
556 void fsl_edma_xfer_desc(struct fsl_edma_chan
*fsl_chan
)
558 struct virt_dma_desc
*vdesc
;
560 vdesc
= vchan_next_desc(&fsl_chan
->vchan
);
563 fsl_chan
->edesc
= to_fsl_edma_desc(vdesc
);
564 fsl_edma_set_tcd_regs(fsl_chan
, fsl_chan
->edesc
->tcd
[0].vtcd
);
565 fsl_edma_enable_request(fsl_chan
);
566 fsl_chan
->status
= DMA_IN_PROGRESS
;
567 fsl_chan
->idle
= false;
569 EXPORT_SYMBOL_GPL(fsl_edma_xfer_desc
);
571 void fsl_edma_issue_pending(struct dma_chan
*chan
)
573 struct fsl_edma_chan
*fsl_chan
= to_fsl_edma_chan(chan
);
576 spin_lock_irqsave(&fsl_chan
->vchan
.lock
, flags
);
578 if (unlikely(fsl_chan
->pm_state
!= RUNNING
)) {
579 spin_unlock_irqrestore(&fsl_chan
->vchan
.lock
, flags
);
580 /* cannot submit due to suspend */
584 if (vchan_issue_pending(&fsl_chan
->vchan
) && !fsl_chan
->edesc
)
585 fsl_edma_xfer_desc(fsl_chan
);
587 spin_unlock_irqrestore(&fsl_chan
->vchan
.lock
, flags
);
589 EXPORT_SYMBOL_GPL(fsl_edma_issue_pending
);
591 int fsl_edma_alloc_chan_resources(struct dma_chan
*chan
)
593 struct fsl_edma_chan
*fsl_chan
= to_fsl_edma_chan(chan
);
595 fsl_chan
->tcd_pool
= dma_pool_create("tcd_pool", chan
->device
->dev
,
596 sizeof(struct fsl_edma_hw_tcd
),
600 EXPORT_SYMBOL_GPL(fsl_edma_alloc_chan_resources
);
602 void fsl_edma_free_chan_resources(struct dma_chan
*chan
)
604 struct fsl_edma_chan
*fsl_chan
= to_fsl_edma_chan(chan
);
608 spin_lock_irqsave(&fsl_chan
->vchan
.lock
, flags
);
609 fsl_edma_disable_request(fsl_chan
);
610 fsl_edma_chan_mux(fsl_chan
, 0, false);
611 fsl_chan
->edesc
= NULL
;
612 vchan_get_all_descriptors(&fsl_chan
->vchan
, &head
);
613 fsl_edma_unprep_slave_dma(fsl_chan
);
614 spin_unlock_irqrestore(&fsl_chan
->vchan
.lock
, flags
);
616 vchan_dma_desc_free_list(&fsl_chan
->vchan
, &head
);
617 dma_pool_destroy(fsl_chan
->tcd_pool
);
618 fsl_chan
->tcd_pool
= NULL
;
620 EXPORT_SYMBOL_GPL(fsl_edma_free_chan_resources
);
622 void fsl_edma_cleanup_vchan(struct dma_device
*dmadev
)
624 struct fsl_edma_chan
*chan
, *_chan
;
626 list_for_each_entry_safe(chan
, _chan
,
627 &dmadev
->channels
, vchan
.chan
.device_node
) {
628 list_del(&chan
->vchan
.chan
.device_node
);
629 tasklet_kill(&chan
->vchan
.task
);
632 EXPORT_SYMBOL_GPL(fsl_edma_cleanup_vchan
);
635 * On the 32 channels Vybrid/mpc577x edma version (here called "v1"),
636 * register offsets are different compared to ColdFire mcf5441x 64 channels
637 * edma (here called "v2").
639 * This function sets up register offsets as per proper declared version
640 * so must be called in xxx_edma_probe() just after setting the
641 * edma "version" and "membase" appropriately.
643 void fsl_edma_setup_regs(struct fsl_edma_engine
*edma
)
645 edma
->regs
.cr
= edma
->membase
+ EDMA_CR
;
646 edma
->regs
.es
= edma
->membase
+ EDMA_ES
;
647 edma
->regs
.erql
= edma
->membase
+ EDMA_ERQ
;
648 edma
->regs
.eeil
= edma
->membase
+ EDMA_EEI
;
650 edma
->regs
.serq
= edma
->membase
+ ((edma
->version
== v1
) ?
651 EDMA_SERQ
: EDMA64_SERQ
);
652 edma
->regs
.cerq
= edma
->membase
+ ((edma
->version
== v1
) ?
653 EDMA_CERQ
: EDMA64_CERQ
);
654 edma
->regs
.seei
= edma
->membase
+ ((edma
->version
== v1
) ?
655 EDMA_SEEI
: EDMA64_SEEI
);
656 edma
->regs
.ceei
= edma
->membase
+ ((edma
->version
== v1
) ?
657 EDMA_CEEI
: EDMA64_CEEI
);
658 edma
->regs
.cint
= edma
->membase
+ ((edma
->version
== v1
) ?
659 EDMA_CINT
: EDMA64_CINT
);
660 edma
->regs
.cerr
= edma
->membase
+ ((edma
->version
== v1
) ?
661 EDMA_CERR
: EDMA64_CERR
);
662 edma
->regs
.ssrt
= edma
->membase
+ ((edma
->version
== v1
) ?
663 EDMA_SSRT
: EDMA64_SSRT
);
664 edma
->regs
.cdne
= edma
->membase
+ ((edma
->version
== v1
) ?
665 EDMA_CDNE
: EDMA64_CDNE
);
666 edma
->regs
.intl
= edma
->membase
+ ((edma
->version
== v1
) ?
667 EDMA_INTR
: EDMA64_INTL
);
668 edma
->regs
.errl
= edma
->membase
+ ((edma
->version
== v1
) ?
669 EDMA_ERR
: EDMA64_ERRL
);
671 if (edma
->version
== v2
) {
672 edma
->regs
.erqh
= edma
->membase
+ EDMA64_ERQH
;
673 edma
->regs
.eeih
= edma
->membase
+ EDMA64_EEIH
;
674 edma
->regs
.errh
= edma
->membase
+ EDMA64_ERRH
;
675 edma
->regs
.inth
= edma
->membase
+ EDMA64_INTH
;
678 edma
->regs
.tcd
= edma
->membase
+ EDMA_TCD
;
680 EXPORT_SYMBOL_GPL(fsl_edma_setup_regs
);
682 MODULE_LICENSE("GPL v2");