1 // SPDX-License-Identifier: GPL-2.0+
3 // Copyright (c) 2013-2014 Freescale Semiconductor, Inc
4 // Copyright (c) 2017 Sysam, Angelo Dureghello <angelo@sysam.it>
6 #include <linux/dmapool.h>
7 #include <linux/module.h>
8 #include <linux/slab.h>
9 #include <linux/dma-mapping.h>
11 #include "fsl-edma-common.h"
17 #define EDMA_SERQ 0x1B
18 #define EDMA_CERQ 0x1A
19 #define EDMA_SEEI 0x19
20 #define EDMA_CEEI 0x18
21 #define EDMA_CINT 0x1F
22 #define EDMA_CERR 0x1E
23 #define EDMA_SSRT 0x1D
24 #define EDMA_CDNE 0x1C
25 #define EDMA_INTR 0x24
28 #define EDMA64_ERQH 0x08
29 #define EDMA64_EEIH 0x10
30 #define EDMA64_SERQ 0x18
31 #define EDMA64_CERQ 0x19
32 #define EDMA64_SEEI 0x1a
33 #define EDMA64_CEEI 0x1b
34 #define EDMA64_CINT 0x1c
35 #define EDMA64_CERR 0x1d
36 #define EDMA64_SSRT 0x1e
37 #define EDMA64_CDNE 0x1f
38 #define EDMA64_INTH 0x20
39 #define EDMA64_INTL 0x24
40 #define EDMA64_ERRH 0x28
41 #define EDMA64_ERRL 0x2c
43 #define EDMA_TCD 0x1000
45 static void fsl_edma_enable_request(struct fsl_edma_chan
*fsl_chan
)
47 struct edma_regs
*regs
= &fsl_chan
->edma
->regs
;
48 u32 ch
= fsl_chan
->vchan
.chan
.chan_id
;
50 if (fsl_chan
->edma
->drvdata
->version
== v1
) {
51 edma_writeb(fsl_chan
->edma
, EDMA_SEEI_SEEI(ch
), regs
->seei
);
52 edma_writeb(fsl_chan
->edma
, ch
, regs
->serq
);
54 /* ColdFire is big endian, and accesses natively
55 * big endian I/O peripherals
57 iowrite8(EDMA_SEEI_SEEI(ch
), regs
->seei
);
58 iowrite8(ch
, regs
->serq
);
62 void fsl_edma_disable_request(struct fsl_edma_chan
*fsl_chan
)
64 struct edma_regs
*regs
= &fsl_chan
->edma
->regs
;
65 u32 ch
= fsl_chan
->vchan
.chan
.chan_id
;
67 if (fsl_chan
->edma
->drvdata
->version
== v1
) {
68 edma_writeb(fsl_chan
->edma
, ch
, regs
->cerq
);
69 edma_writeb(fsl_chan
->edma
, EDMA_CEEI_CEEI(ch
), regs
->ceei
);
71 /* ColdFire is big endian, and accesses natively
72 * big endian I/O peripherals
74 iowrite8(ch
, regs
->cerq
);
75 iowrite8(EDMA_CEEI_CEEI(ch
), regs
->ceei
);
78 EXPORT_SYMBOL_GPL(fsl_edma_disable_request
);
80 static void mux_configure8(struct fsl_edma_chan
*fsl_chan
, void __iomem
*addr
,
81 u32 off
, u32 slot
, bool enable
)
86 val8
= EDMAMUX_CHCFG_ENBL
| slot
;
88 val8
= EDMAMUX_CHCFG_DIS
;
90 iowrite8(val8
, addr
+ off
);
93 void fsl_edma_chan_mux(struct fsl_edma_chan
*fsl_chan
,
94 unsigned int slot
, bool enable
)
96 u32 ch
= fsl_chan
->vchan
.chan
.chan_id
;
97 void __iomem
*muxaddr
;
98 unsigned int chans_per_mux
, ch_off
;
99 u32 dmamux_nr
= fsl_chan
->edma
->drvdata
->dmamuxs
;
101 chans_per_mux
= fsl_chan
->edma
->n_chans
/ dmamux_nr
;
102 ch_off
= fsl_chan
->vchan
.chan
.chan_id
% chans_per_mux
;
103 muxaddr
= fsl_chan
->edma
->muxbase
[ch
/ chans_per_mux
];
104 slot
= EDMAMUX_CHCFG_SOURCE(slot
);
106 mux_configure8(fsl_chan
, muxaddr
, ch_off
, slot
, enable
);
108 EXPORT_SYMBOL_GPL(fsl_edma_chan_mux
);
110 static unsigned int fsl_edma_get_tcd_attr(enum dma_slave_buswidth addr_width
)
112 switch (addr_width
) {
114 return EDMA_TCD_ATTR_SSIZE_8BIT
| EDMA_TCD_ATTR_DSIZE_8BIT
;
116 return EDMA_TCD_ATTR_SSIZE_16BIT
| EDMA_TCD_ATTR_DSIZE_16BIT
;
118 return EDMA_TCD_ATTR_SSIZE_32BIT
| EDMA_TCD_ATTR_DSIZE_32BIT
;
120 return EDMA_TCD_ATTR_SSIZE_64BIT
| EDMA_TCD_ATTR_DSIZE_64BIT
;
122 return EDMA_TCD_ATTR_SSIZE_32BIT
| EDMA_TCD_ATTR_DSIZE_32BIT
;
126 void fsl_edma_free_desc(struct virt_dma_desc
*vdesc
)
128 struct fsl_edma_desc
*fsl_desc
;
131 fsl_desc
= to_fsl_edma_desc(vdesc
);
132 for (i
= 0; i
< fsl_desc
->n_tcds
; i
++)
133 dma_pool_free(fsl_desc
->echan
->tcd_pool
, fsl_desc
->tcd
[i
].vtcd
,
134 fsl_desc
->tcd
[i
].ptcd
);
137 EXPORT_SYMBOL_GPL(fsl_edma_free_desc
);
139 int fsl_edma_terminate_all(struct dma_chan
*chan
)
141 struct fsl_edma_chan
*fsl_chan
= to_fsl_edma_chan(chan
);
145 spin_lock_irqsave(&fsl_chan
->vchan
.lock
, flags
);
146 fsl_edma_disable_request(fsl_chan
);
147 fsl_chan
->edesc
= NULL
;
148 fsl_chan
->idle
= true;
149 vchan_get_all_descriptors(&fsl_chan
->vchan
, &head
);
150 spin_unlock_irqrestore(&fsl_chan
->vchan
.lock
, flags
);
151 vchan_dma_desc_free_list(&fsl_chan
->vchan
, &head
);
154 EXPORT_SYMBOL_GPL(fsl_edma_terminate_all
);
156 int fsl_edma_pause(struct dma_chan
*chan
)
158 struct fsl_edma_chan
*fsl_chan
= to_fsl_edma_chan(chan
);
161 spin_lock_irqsave(&fsl_chan
->vchan
.lock
, flags
);
162 if (fsl_chan
->edesc
) {
163 fsl_edma_disable_request(fsl_chan
);
164 fsl_chan
->status
= DMA_PAUSED
;
165 fsl_chan
->idle
= true;
167 spin_unlock_irqrestore(&fsl_chan
->vchan
.lock
, flags
);
170 EXPORT_SYMBOL_GPL(fsl_edma_pause
);
172 int fsl_edma_resume(struct dma_chan
*chan
)
174 struct fsl_edma_chan
*fsl_chan
= to_fsl_edma_chan(chan
);
177 spin_lock_irqsave(&fsl_chan
->vchan
.lock
, flags
);
178 if (fsl_chan
->edesc
) {
179 fsl_edma_enable_request(fsl_chan
);
180 fsl_chan
->status
= DMA_IN_PROGRESS
;
181 fsl_chan
->idle
= false;
183 spin_unlock_irqrestore(&fsl_chan
->vchan
.lock
, flags
);
186 EXPORT_SYMBOL_GPL(fsl_edma_resume
);
188 static void fsl_edma_unprep_slave_dma(struct fsl_edma_chan
*fsl_chan
)
190 if (fsl_chan
->dma_dir
!= DMA_NONE
)
191 dma_unmap_resource(fsl_chan
->vchan
.chan
.device
->dev
,
192 fsl_chan
->dma_dev_addr
,
193 fsl_chan
->dma_dev_size
,
194 fsl_chan
->dma_dir
, 0);
195 fsl_chan
->dma_dir
= DMA_NONE
;
198 static bool fsl_edma_prep_slave_dma(struct fsl_edma_chan
*fsl_chan
,
199 enum dma_transfer_direction dir
)
201 struct device
*dev
= fsl_chan
->vchan
.chan
.device
->dev
;
202 enum dma_data_direction dma_dir
;
203 phys_addr_t addr
= 0;
208 dma_dir
= DMA_FROM_DEVICE
;
209 addr
= fsl_chan
->cfg
.dst_addr
;
210 size
= fsl_chan
->cfg
.dst_maxburst
;
213 dma_dir
= DMA_TO_DEVICE
;
214 addr
= fsl_chan
->cfg
.src_addr
;
215 size
= fsl_chan
->cfg
.src_maxburst
;
222 /* Already mapped for this config? */
223 if (fsl_chan
->dma_dir
== dma_dir
)
226 fsl_edma_unprep_slave_dma(fsl_chan
);
228 fsl_chan
->dma_dev_addr
= dma_map_resource(dev
, addr
, size
, dma_dir
, 0);
229 if (dma_mapping_error(dev
, fsl_chan
->dma_dev_addr
))
231 fsl_chan
->dma_dev_size
= size
;
232 fsl_chan
->dma_dir
= dma_dir
;
237 int fsl_edma_slave_config(struct dma_chan
*chan
,
238 struct dma_slave_config
*cfg
)
240 struct fsl_edma_chan
*fsl_chan
= to_fsl_edma_chan(chan
);
242 memcpy(&fsl_chan
->cfg
, cfg
, sizeof(*cfg
));
243 fsl_edma_unprep_slave_dma(fsl_chan
);
247 EXPORT_SYMBOL_GPL(fsl_edma_slave_config
);
249 static size_t fsl_edma_desc_residue(struct fsl_edma_chan
*fsl_chan
,
250 struct virt_dma_desc
*vdesc
, bool in_progress
)
252 struct fsl_edma_desc
*edesc
= fsl_chan
->edesc
;
253 struct edma_regs
*regs
= &fsl_chan
->edma
->regs
;
254 u32 ch
= fsl_chan
->vchan
.chan
.chan_id
;
255 enum dma_transfer_direction dir
= edesc
->dirn
;
256 dma_addr_t cur_addr
, dma_addr
;
260 /* calculate the total size in this desc */
261 for (len
= i
= 0; i
< fsl_chan
->edesc
->n_tcds
; i
++)
262 len
+= le32_to_cpu(edesc
->tcd
[i
].vtcd
->nbytes
)
263 * le16_to_cpu(edesc
->tcd
[i
].vtcd
->biter
);
268 if (dir
== DMA_MEM_TO_DEV
)
269 cur_addr
= edma_readl(fsl_chan
->edma
, ®s
->tcd
[ch
].saddr
);
271 cur_addr
= edma_readl(fsl_chan
->edma
, ®s
->tcd
[ch
].daddr
);
273 /* figure out the finished and calculate the residue */
274 for (i
= 0; i
< fsl_chan
->edesc
->n_tcds
; i
++) {
275 size
= le32_to_cpu(edesc
->tcd
[i
].vtcd
->nbytes
)
276 * le16_to_cpu(edesc
->tcd
[i
].vtcd
->biter
);
277 if (dir
== DMA_MEM_TO_DEV
)
278 dma_addr
= le32_to_cpu(edesc
->tcd
[i
].vtcd
->saddr
);
280 dma_addr
= le32_to_cpu(edesc
->tcd
[i
].vtcd
->daddr
);
283 if (cur_addr
>= dma_addr
&& cur_addr
< dma_addr
+ size
) {
284 len
+= dma_addr
+ size
- cur_addr
;
292 enum dma_status
fsl_edma_tx_status(struct dma_chan
*chan
,
293 dma_cookie_t cookie
, struct dma_tx_state
*txstate
)
295 struct fsl_edma_chan
*fsl_chan
= to_fsl_edma_chan(chan
);
296 struct virt_dma_desc
*vdesc
;
297 enum dma_status status
;
300 status
= dma_cookie_status(chan
, cookie
, txstate
);
301 if (status
== DMA_COMPLETE
)
305 return fsl_chan
->status
;
307 spin_lock_irqsave(&fsl_chan
->vchan
.lock
, flags
);
308 vdesc
= vchan_find_desc(&fsl_chan
->vchan
, cookie
);
309 if (fsl_chan
->edesc
&& cookie
== fsl_chan
->edesc
->vdesc
.tx
.cookie
)
311 fsl_edma_desc_residue(fsl_chan
, vdesc
, true);
314 fsl_edma_desc_residue(fsl_chan
, vdesc
, false);
316 txstate
->residue
= 0;
318 spin_unlock_irqrestore(&fsl_chan
->vchan
.lock
, flags
);
320 return fsl_chan
->status
;
322 EXPORT_SYMBOL_GPL(fsl_edma_tx_status
);
324 static void fsl_edma_set_tcd_regs(struct fsl_edma_chan
*fsl_chan
,
325 struct fsl_edma_hw_tcd
*tcd
)
327 struct fsl_edma_engine
*edma
= fsl_chan
->edma
;
328 struct edma_regs
*regs
= &fsl_chan
->edma
->regs
;
329 u32 ch
= fsl_chan
->vchan
.chan
.chan_id
;
332 * TCD parameters are stored in struct fsl_edma_hw_tcd in little
333 * endian format. However, we need to load the TCD registers in
334 * big- or little-endian obeying the eDMA engine model endian.
336 edma_writew(edma
, 0, ®s
->tcd
[ch
].csr
);
337 edma_writel(edma
, le32_to_cpu(tcd
->saddr
), ®s
->tcd
[ch
].saddr
);
338 edma_writel(edma
, le32_to_cpu(tcd
->daddr
), ®s
->tcd
[ch
].daddr
);
340 edma_writew(edma
, le16_to_cpu(tcd
->attr
), ®s
->tcd
[ch
].attr
);
341 edma_writew(edma
, le16_to_cpu(tcd
->soff
), ®s
->tcd
[ch
].soff
);
343 edma_writel(edma
, le32_to_cpu(tcd
->nbytes
), ®s
->tcd
[ch
].nbytes
);
344 edma_writel(edma
, le32_to_cpu(tcd
->slast
), ®s
->tcd
[ch
].slast
);
346 edma_writew(edma
, le16_to_cpu(tcd
->citer
), ®s
->tcd
[ch
].citer
);
347 edma_writew(edma
, le16_to_cpu(tcd
->biter
), ®s
->tcd
[ch
].biter
);
348 edma_writew(edma
, le16_to_cpu(tcd
->doff
), ®s
->tcd
[ch
].doff
);
350 edma_writel(edma
, le32_to_cpu(tcd
->dlast_sga
),
351 ®s
->tcd
[ch
].dlast_sga
);
353 edma_writew(edma
, le16_to_cpu(tcd
->csr
), ®s
->tcd
[ch
].csr
);
357 void fsl_edma_fill_tcd(struct fsl_edma_hw_tcd
*tcd
, u32 src
, u32 dst
,
358 u16 attr
, u16 soff
, u32 nbytes
, u32 slast
, u16 citer
,
359 u16 biter
, u16 doff
, u32 dlast_sga
, bool major_int
,
360 bool disable_req
, bool enable_sg
)
365 * eDMA hardware SGs require the TCDs to be stored in little
366 * endian format irrespective of the register endian model.
367 * So we put the value in little endian in memory, waiting
368 * for fsl_edma_set_tcd_regs doing the swap.
370 tcd
->saddr
= cpu_to_le32(src
);
371 tcd
->daddr
= cpu_to_le32(dst
);
373 tcd
->attr
= cpu_to_le16(attr
);
375 tcd
->soff
= cpu_to_le16(soff
);
377 tcd
->nbytes
= cpu_to_le32(nbytes
);
378 tcd
->slast
= cpu_to_le32(slast
);
380 tcd
->citer
= cpu_to_le16(EDMA_TCD_CITER_CITER(citer
));
381 tcd
->doff
= cpu_to_le16(doff
);
383 tcd
->dlast_sga
= cpu_to_le32(dlast_sga
);
385 tcd
->biter
= cpu_to_le16(EDMA_TCD_BITER_BITER(biter
));
387 csr
|= EDMA_TCD_CSR_INT_MAJOR
;
390 csr
|= EDMA_TCD_CSR_D_REQ
;
393 csr
|= EDMA_TCD_CSR_E_SG
;
395 tcd
->csr
= cpu_to_le16(csr
);
398 static struct fsl_edma_desc
*fsl_edma_alloc_desc(struct fsl_edma_chan
*fsl_chan
,
401 struct fsl_edma_desc
*fsl_desc
;
404 fsl_desc
= kzalloc(struct_size(fsl_desc
, tcd
, sg_len
), GFP_NOWAIT
);
408 fsl_desc
->echan
= fsl_chan
;
409 fsl_desc
->n_tcds
= sg_len
;
410 for (i
= 0; i
< sg_len
; i
++) {
411 fsl_desc
->tcd
[i
].vtcd
= dma_pool_alloc(fsl_chan
->tcd_pool
,
412 GFP_NOWAIT
, &fsl_desc
->tcd
[i
].ptcd
);
413 if (!fsl_desc
->tcd
[i
].vtcd
)
420 dma_pool_free(fsl_chan
->tcd_pool
, fsl_desc
->tcd
[i
].vtcd
,
421 fsl_desc
->tcd
[i
].ptcd
);
426 struct dma_async_tx_descriptor
*fsl_edma_prep_dma_cyclic(
427 struct dma_chan
*chan
, dma_addr_t dma_addr
, size_t buf_len
,
428 size_t period_len
, enum dma_transfer_direction direction
,
431 struct fsl_edma_chan
*fsl_chan
= to_fsl_edma_chan(chan
);
432 struct fsl_edma_desc
*fsl_desc
;
433 dma_addr_t dma_buf_next
;
435 u32 src_addr
, dst_addr
, last_sg
, nbytes
;
436 u16 soff
, doff
, iter
;
438 if (!is_slave_direction(direction
))
441 if (!fsl_edma_prep_slave_dma(fsl_chan
, direction
))
444 sg_len
= buf_len
/ period_len
;
445 fsl_desc
= fsl_edma_alloc_desc(fsl_chan
, sg_len
);
448 fsl_desc
->iscyclic
= true;
449 fsl_desc
->dirn
= direction
;
451 dma_buf_next
= dma_addr
;
452 if (direction
== DMA_MEM_TO_DEV
) {
454 fsl_edma_get_tcd_attr(fsl_chan
->cfg
.dst_addr_width
);
455 nbytes
= fsl_chan
->cfg
.dst_addr_width
*
456 fsl_chan
->cfg
.dst_maxburst
;
459 fsl_edma_get_tcd_attr(fsl_chan
->cfg
.src_addr_width
);
460 nbytes
= fsl_chan
->cfg
.src_addr_width
*
461 fsl_chan
->cfg
.src_maxburst
;
464 iter
= period_len
/ nbytes
;
466 for (i
= 0; i
< sg_len
; i
++) {
467 if (dma_buf_next
>= dma_addr
+ buf_len
)
468 dma_buf_next
= dma_addr
;
470 /* get next sg's physical address */
471 last_sg
= fsl_desc
->tcd
[(i
+ 1) % sg_len
].ptcd
;
473 if (direction
== DMA_MEM_TO_DEV
) {
474 src_addr
= dma_buf_next
;
475 dst_addr
= fsl_chan
->dma_dev_addr
;
476 soff
= fsl_chan
->cfg
.dst_addr_width
;
479 src_addr
= fsl_chan
->dma_dev_addr
;
480 dst_addr
= dma_buf_next
;
482 doff
= fsl_chan
->cfg
.src_addr_width
;
485 fsl_edma_fill_tcd(fsl_desc
->tcd
[i
].vtcd
, src_addr
, dst_addr
,
486 fsl_chan
->attr
, soff
, nbytes
, 0, iter
,
487 iter
, doff
, last_sg
, true, false, true);
488 dma_buf_next
+= period_len
;
491 return vchan_tx_prep(&fsl_chan
->vchan
, &fsl_desc
->vdesc
, flags
);
493 EXPORT_SYMBOL_GPL(fsl_edma_prep_dma_cyclic
);
495 struct dma_async_tx_descriptor
*fsl_edma_prep_slave_sg(
496 struct dma_chan
*chan
, struct scatterlist
*sgl
,
497 unsigned int sg_len
, enum dma_transfer_direction direction
,
498 unsigned long flags
, void *context
)
500 struct fsl_edma_chan
*fsl_chan
= to_fsl_edma_chan(chan
);
501 struct fsl_edma_desc
*fsl_desc
;
502 struct scatterlist
*sg
;
503 u32 src_addr
, dst_addr
, last_sg
, nbytes
;
504 u16 soff
, doff
, iter
;
507 if (!is_slave_direction(direction
))
510 if (!fsl_edma_prep_slave_dma(fsl_chan
, direction
))
513 fsl_desc
= fsl_edma_alloc_desc(fsl_chan
, sg_len
);
516 fsl_desc
->iscyclic
= false;
517 fsl_desc
->dirn
= direction
;
519 if (direction
== DMA_MEM_TO_DEV
) {
521 fsl_edma_get_tcd_attr(fsl_chan
->cfg
.dst_addr_width
);
522 nbytes
= fsl_chan
->cfg
.dst_addr_width
*
523 fsl_chan
->cfg
.dst_maxburst
;
526 fsl_edma_get_tcd_attr(fsl_chan
->cfg
.src_addr_width
);
527 nbytes
= fsl_chan
->cfg
.src_addr_width
*
528 fsl_chan
->cfg
.src_maxburst
;
531 for_each_sg(sgl
, sg
, sg_len
, i
) {
532 /* get next sg's physical address */
533 last_sg
= fsl_desc
->tcd
[(i
+ 1) % sg_len
].ptcd
;
535 if (direction
== DMA_MEM_TO_DEV
) {
536 src_addr
= sg_dma_address(sg
);
537 dst_addr
= fsl_chan
->dma_dev_addr
;
538 soff
= fsl_chan
->cfg
.dst_addr_width
;
541 src_addr
= fsl_chan
->dma_dev_addr
;
542 dst_addr
= sg_dma_address(sg
);
544 doff
= fsl_chan
->cfg
.src_addr_width
;
547 iter
= sg_dma_len(sg
) / nbytes
;
548 if (i
< sg_len
- 1) {
549 last_sg
= fsl_desc
->tcd
[(i
+ 1)].ptcd
;
550 fsl_edma_fill_tcd(fsl_desc
->tcd
[i
].vtcd
, src_addr
,
551 dst_addr
, fsl_chan
->attr
, soff
,
552 nbytes
, 0, iter
, iter
, doff
, last_sg
,
556 fsl_edma_fill_tcd(fsl_desc
->tcd
[i
].vtcd
, src_addr
,
557 dst_addr
, fsl_chan
->attr
, soff
,
558 nbytes
, 0, iter
, iter
, doff
, last_sg
,
563 return vchan_tx_prep(&fsl_chan
->vchan
, &fsl_desc
->vdesc
, flags
);
565 EXPORT_SYMBOL_GPL(fsl_edma_prep_slave_sg
);
567 void fsl_edma_xfer_desc(struct fsl_edma_chan
*fsl_chan
)
569 struct virt_dma_desc
*vdesc
;
571 vdesc
= vchan_next_desc(&fsl_chan
->vchan
);
574 fsl_chan
->edesc
= to_fsl_edma_desc(vdesc
);
575 fsl_edma_set_tcd_regs(fsl_chan
, fsl_chan
->edesc
->tcd
[0].vtcd
);
576 fsl_edma_enable_request(fsl_chan
);
577 fsl_chan
->status
= DMA_IN_PROGRESS
;
578 fsl_chan
->idle
= false;
580 EXPORT_SYMBOL_GPL(fsl_edma_xfer_desc
);
582 void fsl_edma_issue_pending(struct dma_chan
*chan
)
584 struct fsl_edma_chan
*fsl_chan
= to_fsl_edma_chan(chan
);
587 spin_lock_irqsave(&fsl_chan
->vchan
.lock
, flags
);
589 if (unlikely(fsl_chan
->pm_state
!= RUNNING
)) {
590 spin_unlock_irqrestore(&fsl_chan
->vchan
.lock
, flags
);
591 /* cannot submit due to suspend */
595 if (vchan_issue_pending(&fsl_chan
->vchan
) && !fsl_chan
->edesc
)
596 fsl_edma_xfer_desc(fsl_chan
);
598 spin_unlock_irqrestore(&fsl_chan
->vchan
.lock
, flags
);
600 EXPORT_SYMBOL_GPL(fsl_edma_issue_pending
);
602 int fsl_edma_alloc_chan_resources(struct dma_chan
*chan
)
604 struct fsl_edma_chan
*fsl_chan
= to_fsl_edma_chan(chan
);
606 fsl_chan
->tcd_pool
= dma_pool_create("tcd_pool", chan
->device
->dev
,
607 sizeof(struct fsl_edma_hw_tcd
),
611 EXPORT_SYMBOL_GPL(fsl_edma_alloc_chan_resources
);
613 void fsl_edma_free_chan_resources(struct dma_chan
*chan
)
615 struct fsl_edma_chan
*fsl_chan
= to_fsl_edma_chan(chan
);
619 spin_lock_irqsave(&fsl_chan
->vchan
.lock
, flags
);
620 fsl_edma_disable_request(fsl_chan
);
621 fsl_edma_chan_mux(fsl_chan
, 0, false);
622 fsl_chan
->edesc
= NULL
;
623 vchan_get_all_descriptors(&fsl_chan
->vchan
, &head
);
624 fsl_edma_unprep_slave_dma(fsl_chan
);
625 spin_unlock_irqrestore(&fsl_chan
->vchan
.lock
, flags
);
627 vchan_dma_desc_free_list(&fsl_chan
->vchan
, &head
);
628 dma_pool_destroy(fsl_chan
->tcd_pool
);
629 fsl_chan
->tcd_pool
= NULL
;
631 EXPORT_SYMBOL_GPL(fsl_edma_free_chan_resources
);
633 void fsl_edma_cleanup_vchan(struct dma_device
*dmadev
)
635 struct fsl_edma_chan
*chan
, *_chan
;
637 list_for_each_entry_safe(chan
, _chan
,
638 &dmadev
->channels
, vchan
.chan
.device_node
) {
639 list_del(&chan
->vchan
.chan
.device_node
);
640 tasklet_kill(&chan
->vchan
.task
);
643 EXPORT_SYMBOL_GPL(fsl_edma_cleanup_vchan
);
646 * On the 32 channels Vybrid/mpc577x edma version (here called "v1"),
647 * register offsets are different compared to ColdFire mcf5441x 64 channels
648 * edma (here called "v2").
650 * This function sets up register offsets as per proper declared version
651 * so must be called in xxx_edma_probe() just after setting the
652 * edma "version" and "membase" appropriately.
654 void fsl_edma_setup_regs(struct fsl_edma_engine
*edma
)
656 edma
->regs
.cr
= edma
->membase
+ EDMA_CR
;
657 edma
->regs
.es
= edma
->membase
+ EDMA_ES
;
658 edma
->regs
.erql
= edma
->membase
+ EDMA_ERQ
;
659 edma
->regs
.eeil
= edma
->membase
+ EDMA_EEI
;
661 edma
->regs
.serq
= edma
->membase
+ ((edma
->drvdata
->version
== v2
) ?
662 EDMA64_SERQ
: EDMA_SERQ
);
663 edma
->regs
.cerq
= edma
->membase
+ ((edma
->drvdata
->version
== v2
) ?
664 EDMA64_CERQ
: EDMA_CERQ
);
665 edma
->regs
.seei
= edma
->membase
+ ((edma
->drvdata
->version
== v2
) ?
666 EDMA64_SEEI
: EDMA_SEEI
);
667 edma
->regs
.ceei
= edma
->membase
+ ((edma
->drvdata
->version
== v2
) ?
668 EDMA64_CEEI
: EDMA_CEEI
);
669 edma
->regs
.cint
= edma
->membase
+ ((edma
->drvdata
->version
== v2
) ?
670 EDMA64_CINT
: EDMA_CINT
);
671 edma
->regs
.cerr
= edma
->membase
+ ((edma
->drvdata
->version
== v2
) ?
672 EDMA64_CERR
: EDMA_CERR
);
673 edma
->regs
.ssrt
= edma
->membase
+ ((edma
->drvdata
->version
== v2
) ?
674 EDMA64_SSRT
: EDMA_SSRT
);
675 edma
->regs
.cdne
= edma
->membase
+ ((edma
->drvdata
->version
== v2
) ?
676 EDMA64_CDNE
: EDMA_CDNE
);
677 edma
->regs
.intl
= edma
->membase
+ ((edma
->drvdata
->version
== v2
) ?
678 EDMA64_INTL
: EDMA_INTR
);
679 edma
->regs
.errl
= edma
->membase
+ ((edma
->drvdata
->version
== v2
) ?
680 EDMA64_ERRL
: EDMA_ERR
);
682 if (edma
->drvdata
->version
== v2
) {
683 edma
->regs
.erqh
= edma
->membase
+ EDMA64_ERQH
;
684 edma
->regs
.eeih
= edma
->membase
+ EDMA64_EEIH
;
685 edma
->regs
.errh
= edma
->membase
+ EDMA64_ERRH
;
686 edma
->regs
.inth
= edma
->membase
+ EDMA64_INTH
;
689 edma
->regs
.tcd
= edma
->membase
+ EDMA_TCD
;
691 EXPORT_SYMBOL_GPL(fsl_edma_setup_regs
);
693 MODULE_LICENSE("GPL v2");