1 // SPDX-License-Identifier: GPL-2.0+
3 // Copyright (c) 2013-2014 Freescale Semiconductor, Inc
4 // Copyright (c) 2017 Sysam, Angelo Dureghello <angelo@sysam.it>
6 #include <linux/dmapool.h>
7 #include <linux/module.h>
8 #include <linux/slab.h>
9 #include <linux/dma-mapping.h>
11 #include "fsl-edma-common.h"
17 #define EDMA_SERQ 0x1B
18 #define EDMA_CERQ 0x1A
19 #define EDMA_SEEI 0x19
20 #define EDMA_CEEI 0x18
21 #define EDMA_CINT 0x1F
22 #define EDMA_CERR 0x1E
23 #define EDMA_SSRT 0x1D
24 #define EDMA_CDNE 0x1C
25 #define EDMA_INTR 0x24
28 #define EDMA64_ERQH 0x08
29 #define EDMA64_EEIH 0x10
30 #define EDMA64_SERQ 0x18
31 #define EDMA64_CERQ 0x19
32 #define EDMA64_SEEI 0x1a
33 #define EDMA64_CEEI 0x1b
34 #define EDMA64_CINT 0x1c
35 #define EDMA64_CERR 0x1d
36 #define EDMA64_SSRT 0x1e
37 #define EDMA64_CDNE 0x1f
38 #define EDMA64_INTH 0x20
39 #define EDMA64_INTL 0x24
40 #define EDMA64_ERRH 0x28
41 #define EDMA64_ERRL 0x2c
43 #define EDMA_TCD 0x1000
45 static void fsl_edma_enable_request(struct fsl_edma_chan
*fsl_chan
)
47 struct edma_regs
*regs
= &fsl_chan
->edma
->regs
;
48 u32 ch
= fsl_chan
->vchan
.chan
.chan_id
;
50 if (fsl_chan
->edma
->drvdata
->version
== v1
) {
51 edma_writeb(fsl_chan
->edma
, EDMA_SEEI_SEEI(ch
), regs
->seei
);
52 edma_writeb(fsl_chan
->edma
, ch
, regs
->serq
);
54 /* ColdFire is big endian, and accesses natively
55 * big endian I/O peripherals
57 iowrite8(EDMA_SEEI_SEEI(ch
), regs
->seei
);
58 iowrite8(ch
, regs
->serq
);
62 void fsl_edma_disable_request(struct fsl_edma_chan
*fsl_chan
)
64 struct edma_regs
*regs
= &fsl_chan
->edma
->regs
;
65 u32 ch
= fsl_chan
->vchan
.chan
.chan_id
;
67 if (fsl_chan
->edma
->drvdata
->version
== v1
) {
68 edma_writeb(fsl_chan
->edma
, ch
, regs
->cerq
);
69 edma_writeb(fsl_chan
->edma
, EDMA_CEEI_CEEI(ch
), regs
->ceei
);
71 /* ColdFire is big endian, and accesses natively
72 * big endian I/O peripherals
74 iowrite8(ch
, regs
->cerq
);
75 iowrite8(EDMA_CEEI_CEEI(ch
), regs
->ceei
);
78 EXPORT_SYMBOL_GPL(fsl_edma_disable_request
);
80 static void mux_configure8(struct fsl_edma_chan
*fsl_chan
, void __iomem
*addr
,
81 u32 off
, u32 slot
, bool enable
)
86 val8
= EDMAMUX_CHCFG_ENBL
| slot
;
88 val8
= EDMAMUX_CHCFG_DIS
;
90 iowrite8(val8
, addr
+ off
);
93 static void mux_configure32(struct fsl_edma_chan
*fsl_chan
, void __iomem
*addr
,
94 u32 off
, u32 slot
, bool enable
)
99 val
= EDMAMUX_CHCFG_ENBL
<< 24 | slot
;
101 val
= EDMAMUX_CHCFG_DIS
;
103 iowrite32(val
, addr
+ off
* 4);
106 void fsl_edma_chan_mux(struct fsl_edma_chan
*fsl_chan
,
107 unsigned int slot
, bool enable
)
109 u32 ch
= fsl_chan
->vchan
.chan
.chan_id
;
110 void __iomem
*muxaddr
;
111 unsigned int chans_per_mux
, ch_off
;
112 int endian_diff
[4] = {3, 1, -1, -3};
113 u32 dmamux_nr
= fsl_chan
->edma
->drvdata
->dmamuxs
;
115 chans_per_mux
= fsl_chan
->edma
->n_chans
/ dmamux_nr
;
116 ch_off
= fsl_chan
->vchan
.chan
.chan_id
% chans_per_mux
;
118 if (fsl_chan
->edma
->drvdata
->mux_swap
)
119 ch_off
+= endian_diff
[ch_off
% 4];
121 muxaddr
= fsl_chan
->edma
->muxbase
[ch
/ chans_per_mux
];
122 slot
= EDMAMUX_CHCFG_SOURCE(slot
);
124 if (fsl_chan
->edma
->drvdata
->version
== v3
)
125 mux_configure32(fsl_chan
, muxaddr
, ch_off
, slot
, enable
);
127 mux_configure8(fsl_chan
, muxaddr
, ch_off
, slot
, enable
);
129 EXPORT_SYMBOL_GPL(fsl_edma_chan_mux
);
131 static unsigned int fsl_edma_get_tcd_attr(enum dma_slave_buswidth addr_width
)
133 switch (addr_width
) {
135 return EDMA_TCD_ATTR_SSIZE_8BIT
| EDMA_TCD_ATTR_DSIZE_8BIT
;
137 return EDMA_TCD_ATTR_SSIZE_16BIT
| EDMA_TCD_ATTR_DSIZE_16BIT
;
139 return EDMA_TCD_ATTR_SSIZE_32BIT
| EDMA_TCD_ATTR_DSIZE_32BIT
;
141 return EDMA_TCD_ATTR_SSIZE_64BIT
| EDMA_TCD_ATTR_DSIZE_64BIT
;
143 return EDMA_TCD_ATTR_SSIZE_32BIT
| EDMA_TCD_ATTR_DSIZE_32BIT
;
147 void fsl_edma_free_desc(struct virt_dma_desc
*vdesc
)
149 struct fsl_edma_desc
*fsl_desc
;
152 fsl_desc
= to_fsl_edma_desc(vdesc
);
153 for (i
= 0; i
< fsl_desc
->n_tcds
; i
++)
154 dma_pool_free(fsl_desc
->echan
->tcd_pool
, fsl_desc
->tcd
[i
].vtcd
,
155 fsl_desc
->tcd
[i
].ptcd
);
158 EXPORT_SYMBOL_GPL(fsl_edma_free_desc
);
160 int fsl_edma_terminate_all(struct dma_chan
*chan
)
162 struct fsl_edma_chan
*fsl_chan
= to_fsl_edma_chan(chan
);
166 spin_lock_irqsave(&fsl_chan
->vchan
.lock
, flags
);
167 fsl_edma_disable_request(fsl_chan
);
168 fsl_chan
->edesc
= NULL
;
169 fsl_chan
->idle
= true;
170 vchan_get_all_descriptors(&fsl_chan
->vchan
, &head
);
171 spin_unlock_irqrestore(&fsl_chan
->vchan
.lock
, flags
);
172 vchan_dma_desc_free_list(&fsl_chan
->vchan
, &head
);
175 EXPORT_SYMBOL_GPL(fsl_edma_terminate_all
);
177 int fsl_edma_pause(struct dma_chan
*chan
)
179 struct fsl_edma_chan
*fsl_chan
= to_fsl_edma_chan(chan
);
182 spin_lock_irqsave(&fsl_chan
->vchan
.lock
, flags
);
183 if (fsl_chan
->edesc
) {
184 fsl_edma_disable_request(fsl_chan
);
185 fsl_chan
->status
= DMA_PAUSED
;
186 fsl_chan
->idle
= true;
188 spin_unlock_irqrestore(&fsl_chan
->vchan
.lock
, flags
);
191 EXPORT_SYMBOL_GPL(fsl_edma_pause
);
193 int fsl_edma_resume(struct dma_chan
*chan
)
195 struct fsl_edma_chan
*fsl_chan
= to_fsl_edma_chan(chan
);
198 spin_lock_irqsave(&fsl_chan
->vchan
.lock
, flags
);
199 if (fsl_chan
->edesc
) {
200 fsl_edma_enable_request(fsl_chan
);
201 fsl_chan
->status
= DMA_IN_PROGRESS
;
202 fsl_chan
->idle
= false;
204 spin_unlock_irqrestore(&fsl_chan
->vchan
.lock
, flags
);
207 EXPORT_SYMBOL_GPL(fsl_edma_resume
);
209 static void fsl_edma_unprep_slave_dma(struct fsl_edma_chan
*fsl_chan
)
211 if (fsl_chan
->dma_dir
!= DMA_NONE
)
212 dma_unmap_resource(fsl_chan
->vchan
.chan
.device
->dev
,
213 fsl_chan
->dma_dev_addr
,
214 fsl_chan
->dma_dev_size
,
215 fsl_chan
->dma_dir
, 0);
216 fsl_chan
->dma_dir
= DMA_NONE
;
219 static bool fsl_edma_prep_slave_dma(struct fsl_edma_chan
*fsl_chan
,
220 enum dma_transfer_direction dir
)
222 struct device
*dev
= fsl_chan
->vchan
.chan
.device
->dev
;
223 enum dma_data_direction dma_dir
;
224 phys_addr_t addr
= 0;
229 dma_dir
= DMA_FROM_DEVICE
;
230 addr
= fsl_chan
->cfg
.dst_addr
;
231 size
= fsl_chan
->cfg
.dst_maxburst
;
234 dma_dir
= DMA_TO_DEVICE
;
235 addr
= fsl_chan
->cfg
.src_addr
;
236 size
= fsl_chan
->cfg
.src_maxburst
;
243 /* Already mapped for this config? */
244 if (fsl_chan
->dma_dir
== dma_dir
)
247 fsl_edma_unprep_slave_dma(fsl_chan
);
249 fsl_chan
->dma_dev_addr
= dma_map_resource(dev
, addr
, size
, dma_dir
, 0);
250 if (dma_mapping_error(dev
, fsl_chan
->dma_dev_addr
))
252 fsl_chan
->dma_dev_size
= size
;
253 fsl_chan
->dma_dir
= dma_dir
;
258 int fsl_edma_slave_config(struct dma_chan
*chan
,
259 struct dma_slave_config
*cfg
)
261 struct fsl_edma_chan
*fsl_chan
= to_fsl_edma_chan(chan
);
263 memcpy(&fsl_chan
->cfg
, cfg
, sizeof(*cfg
));
264 fsl_edma_unprep_slave_dma(fsl_chan
);
268 EXPORT_SYMBOL_GPL(fsl_edma_slave_config
);
270 static size_t fsl_edma_desc_residue(struct fsl_edma_chan
*fsl_chan
,
271 struct virt_dma_desc
*vdesc
, bool in_progress
)
273 struct fsl_edma_desc
*edesc
= fsl_chan
->edesc
;
274 struct edma_regs
*regs
= &fsl_chan
->edma
->regs
;
275 u32 ch
= fsl_chan
->vchan
.chan
.chan_id
;
276 enum dma_transfer_direction dir
= edesc
->dirn
;
277 dma_addr_t cur_addr
, dma_addr
;
281 /* calculate the total size in this desc */
282 for (len
= i
= 0; i
< fsl_chan
->edesc
->n_tcds
; i
++)
283 len
+= le32_to_cpu(edesc
->tcd
[i
].vtcd
->nbytes
)
284 * le16_to_cpu(edesc
->tcd
[i
].vtcd
->biter
);
289 if (dir
== DMA_MEM_TO_DEV
)
290 cur_addr
= edma_readl(fsl_chan
->edma
, ®s
->tcd
[ch
].saddr
);
292 cur_addr
= edma_readl(fsl_chan
->edma
, ®s
->tcd
[ch
].daddr
);
294 /* figure out the finished and calculate the residue */
295 for (i
= 0; i
< fsl_chan
->edesc
->n_tcds
; i
++) {
296 size
= le32_to_cpu(edesc
->tcd
[i
].vtcd
->nbytes
)
297 * le16_to_cpu(edesc
->tcd
[i
].vtcd
->biter
);
298 if (dir
== DMA_MEM_TO_DEV
)
299 dma_addr
= le32_to_cpu(edesc
->tcd
[i
].vtcd
->saddr
);
301 dma_addr
= le32_to_cpu(edesc
->tcd
[i
].vtcd
->daddr
);
304 if (cur_addr
>= dma_addr
&& cur_addr
< dma_addr
+ size
) {
305 len
+= dma_addr
+ size
- cur_addr
;
313 enum dma_status
fsl_edma_tx_status(struct dma_chan
*chan
,
314 dma_cookie_t cookie
, struct dma_tx_state
*txstate
)
316 struct fsl_edma_chan
*fsl_chan
= to_fsl_edma_chan(chan
);
317 struct virt_dma_desc
*vdesc
;
318 enum dma_status status
;
321 status
= dma_cookie_status(chan
, cookie
, txstate
);
322 if (status
== DMA_COMPLETE
)
326 return fsl_chan
->status
;
328 spin_lock_irqsave(&fsl_chan
->vchan
.lock
, flags
);
329 vdesc
= vchan_find_desc(&fsl_chan
->vchan
, cookie
);
330 if (fsl_chan
->edesc
&& cookie
== fsl_chan
->edesc
->vdesc
.tx
.cookie
)
332 fsl_edma_desc_residue(fsl_chan
, vdesc
, true);
335 fsl_edma_desc_residue(fsl_chan
, vdesc
, false);
337 txstate
->residue
= 0;
339 spin_unlock_irqrestore(&fsl_chan
->vchan
.lock
, flags
);
341 return fsl_chan
->status
;
343 EXPORT_SYMBOL_GPL(fsl_edma_tx_status
);
345 static void fsl_edma_set_tcd_regs(struct fsl_edma_chan
*fsl_chan
,
346 struct fsl_edma_hw_tcd
*tcd
)
348 struct fsl_edma_engine
*edma
= fsl_chan
->edma
;
349 struct edma_regs
*regs
= &fsl_chan
->edma
->regs
;
350 u32 ch
= fsl_chan
->vchan
.chan
.chan_id
;
353 * TCD parameters are stored in struct fsl_edma_hw_tcd in little
354 * endian format. However, we need to load the TCD registers in
355 * big- or little-endian obeying the eDMA engine model endian,
356 * and this is performed from specific edma_write functions
358 edma_writew(edma
, 0, ®s
->tcd
[ch
].csr
);
360 edma_writel(edma
, (s32
)tcd
->saddr
, ®s
->tcd
[ch
].saddr
);
361 edma_writel(edma
, (s32
)tcd
->daddr
, ®s
->tcd
[ch
].daddr
);
363 edma_writew(edma
, (s16
)tcd
->attr
, ®s
->tcd
[ch
].attr
);
364 edma_writew(edma
, tcd
->soff
, ®s
->tcd
[ch
].soff
);
366 edma_writel(edma
, (s32
)tcd
->nbytes
, ®s
->tcd
[ch
].nbytes
);
367 edma_writel(edma
, (s32
)tcd
->slast
, ®s
->tcd
[ch
].slast
);
369 edma_writew(edma
, (s16
)tcd
->citer
, ®s
->tcd
[ch
].citer
);
370 edma_writew(edma
, (s16
)tcd
->biter
, ®s
->tcd
[ch
].biter
);
371 edma_writew(edma
, (s16
)tcd
->doff
, ®s
->tcd
[ch
].doff
);
373 edma_writel(edma
, (s32
)tcd
->dlast_sga
,
374 ®s
->tcd
[ch
].dlast_sga
);
376 edma_writew(edma
, (s16
)tcd
->csr
, ®s
->tcd
[ch
].csr
);
380 void fsl_edma_fill_tcd(struct fsl_edma_hw_tcd
*tcd
, u32 src
, u32 dst
,
381 u16 attr
, u16 soff
, u32 nbytes
, u32 slast
, u16 citer
,
382 u16 biter
, u16 doff
, u32 dlast_sga
, bool major_int
,
383 bool disable_req
, bool enable_sg
)
388 * eDMA hardware SGs require the TCDs to be stored in little
389 * endian format irrespective of the register endian model.
390 * So we put the value in little endian in memory, waiting
391 * for fsl_edma_set_tcd_regs doing the swap.
393 tcd
->saddr
= cpu_to_le32(src
);
394 tcd
->daddr
= cpu_to_le32(dst
);
396 tcd
->attr
= cpu_to_le16(attr
);
398 tcd
->soff
= cpu_to_le16(soff
);
400 tcd
->nbytes
= cpu_to_le32(nbytes
);
401 tcd
->slast
= cpu_to_le32(slast
);
403 tcd
->citer
= cpu_to_le16(EDMA_TCD_CITER_CITER(citer
));
404 tcd
->doff
= cpu_to_le16(doff
);
406 tcd
->dlast_sga
= cpu_to_le32(dlast_sga
);
408 tcd
->biter
= cpu_to_le16(EDMA_TCD_BITER_BITER(biter
));
410 csr
|= EDMA_TCD_CSR_INT_MAJOR
;
413 csr
|= EDMA_TCD_CSR_D_REQ
;
416 csr
|= EDMA_TCD_CSR_E_SG
;
418 tcd
->csr
= cpu_to_le16(csr
);
421 static struct fsl_edma_desc
*fsl_edma_alloc_desc(struct fsl_edma_chan
*fsl_chan
,
424 struct fsl_edma_desc
*fsl_desc
;
427 fsl_desc
= kzalloc(struct_size(fsl_desc
, tcd
, sg_len
), GFP_NOWAIT
);
431 fsl_desc
->echan
= fsl_chan
;
432 fsl_desc
->n_tcds
= sg_len
;
433 for (i
= 0; i
< sg_len
; i
++) {
434 fsl_desc
->tcd
[i
].vtcd
= dma_pool_alloc(fsl_chan
->tcd_pool
,
435 GFP_NOWAIT
, &fsl_desc
->tcd
[i
].ptcd
);
436 if (!fsl_desc
->tcd
[i
].vtcd
)
443 dma_pool_free(fsl_chan
->tcd_pool
, fsl_desc
->tcd
[i
].vtcd
,
444 fsl_desc
->tcd
[i
].ptcd
);
449 struct dma_async_tx_descriptor
*fsl_edma_prep_dma_cyclic(
450 struct dma_chan
*chan
, dma_addr_t dma_addr
, size_t buf_len
,
451 size_t period_len
, enum dma_transfer_direction direction
,
454 struct fsl_edma_chan
*fsl_chan
= to_fsl_edma_chan(chan
);
455 struct fsl_edma_desc
*fsl_desc
;
456 dma_addr_t dma_buf_next
;
458 u32 src_addr
, dst_addr
, last_sg
, nbytes
;
459 u16 soff
, doff
, iter
;
461 if (!is_slave_direction(direction
))
464 if (!fsl_edma_prep_slave_dma(fsl_chan
, direction
))
467 sg_len
= buf_len
/ period_len
;
468 fsl_desc
= fsl_edma_alloc_desc(fsl_chan
, sg_len
);
471 fsl_desc
->iscyclic
= true;
472 fsl_desc
->dirn
= direction
;
474 dma_buf_next
= dma_addr
;
475 if (direction
== DMA_MEM_TO_DEV
) {
477 fsl_edma_get_tcd_attr(fsl_chan
->cfg
.dst_addr_width
);
478 nbytes
= fsl_chan
->cfg
.dst_addr_width
*
479 fsl_chan
->cfg
.dst_maxburst
;
482 fsl_edma_get_tcd_attr(fsl_chan
->cfg
.src_addr_width
);
483 nbytes
= fsl_chan
->cfg
.src_addr_width
*
484 fsl_chan
->cfg
.src_maxburst
;
487 iter
= period_len
/ nbytes
;
489 for (i
= 0; i
< sg_len
; i
++) {
490 if (dma_buf_next
>= dma_addr
+ buf_len
)
491 dma_buf_next
= dma_addr
;
493 /* get next sg's physical address */
494 last_sg
= fsl_desc
->tcd
[(i
+ 1) % sg_len
].ptcd
;
496 if (direction
== DMA_MEM_TO_DEV
) {
497 src_addr
= dma_buf_next
;
498 dst_addr
= fsl_chan
->dma_dev_addr
;
499 soff
= fsl_chan
->cfg
.dst_addr_width
;
502 src_addr
= fsl_chan
->dma_dev_addr
;
503 dst_addr
= dma_buf_next
;
505 doff
= fsl_chan
->cfg
.src_addr_width
;
508 fsl_edma_fill_tcd(fsl_desc
->tcd
[i
].vtcd
, src_addr
, dst_addr
,
509 fsl_chan
->attr
, soff
, nbytes
, 0, iter
,
510 iter
, doff
, last_sg
, true, false, true);
511 dma_buf_next
+= period_len
;
514 return vchan_tx_prep(&fsl_chan
->vchan
, &fsl_desc
->vdesc
, flags
);
516 EXPORT_SYMBOL_GPL(fsl_edma_prep_dma_cyclic
);
518 struct dma_async_tx_descriptor
*fsl_edma_prep_slave_sg(
519 struct dma_chan
*chan
, struct scatterlist
*sgl
,
520 unsigned int sg_len
, enum dma_transfer_direction direction
,
521 unsigned long flags
, void *context
)
523 struct fsl_edma_chan
*fsl_chan
= to_fsl_edma_chan(chan
);
524 struct fsl_edma_desc
*fsl_desc
;
525 struct scatterlist
*sg
;
526 u32 src_addr
, dst_addr
, last_sg
, nbytes
;
527 u16 soff
, doff
, iter
;
530 if (!is_slave_direction(direction
))
533 if (!fsl_edma_prep_slave_dma(fsl_chan
, direction
))
536 fsl_desc
= fsl_edma_alloc_desc(fsl_chan
, sg_len
);
539 fsl_desc
->iscyclic
= false;
540 fsl_desc
->dirn
= direction
;
542 if (direction
== DMA_MEM_TO_DEV
) {
544 fsl_edma_get_tcd_attr(fsl_chan
->cfg
.dst_addr_width
);
545 nbytes
= fsl_chan
->cfg
.dst_addr_width
*
546 fsl_chan
->cfg
.dst_maxburst
;
549 fsl_edma_get_tcd_attr(fsl_chan
->cfg
.src_addr_width
);
550 nbytes
= fsl_chan
->cfg
.src_addr_width
*
551 fsl_chan
->cfg
.src_maxburst
;
554 for_each_sg(sgl
, sg
, sg_len
, i
) {
555 /* get next sg's physical address */
556 last_sg
= fsl_desc
->tcd
[(i
+ 1) % sg_len
].ptcd
;
558 if (direction
== DMA_MEM_TO_DEV
) {
559 src_addr
= sg_dma_address(sg
);
560 dst_addr
= fsl_chan
->dma_dev_addr
;
561 soff
= fsl_chan
->cfg
.dst_addr_width
;
564 src_addr
= fsl_chan
->dma_dev_addr
;
565 dst_addr
= sg_dma_address(sg
);
567 doff
= fsl_chan
->cfg
.src_addr_width
;
570 iter
= sg_dma_len(sg
) / nbytes
;
571 if (i
< sg_len
- 1) {
572 last_sg
= fsl_desc
->tcd
[(i
+ 1)].ptcd
;
573 fsl_edma_fill_tcd(fsl_desc
->tcd
[i
].vtcd
, src_addr
,
574 dst_addr
, fsl_chan
->attr
, soff
,
575 nbytes
, 0, iter
, iter
, doff
, last_sg
,
579 fsl_edma_fill_tcd(fsl_desc
->tcd
[i
].vtcd
, src_addr
,
580 dst_addr
, fsl_chan
->attr
, soff
,
581 nbytes
, 0, iter
, iter
, doff
, last_sg
,
586 return vchan_tx_prep(&fsl_chan
->vchan
, &fsl_desc
->vdesc
, flags
);
588 EXPORT_SYMBOL_GPL(fsl_edma_prep_slave_sg
);
590 void fsl_edma_xfer_desc(struct fsl_edma_chan
*fsl_chan
)
592 struct virt_dma_desc
*vdesc
;
594 lockdep_assert_held(&fsl_chan
->vchan
.lock
);
596 vdesc
= vchan_next_desc(&fsl_chan
->vchan
);
599 fsl_chan
->edesc
= to_fsl_edma_desc(vdesc
);
600 fsl_edma_set_tcd_regs(fsl_chan
, fsl_chan
->edesc
->tcd
[0].vtcd
);
601 fsl_edma_enable_request(fsl_chan
);
602 fsl_chan
->status
= DMA_IN_PROGRESS
;
603 fsl_chan
->idle
= false;
605 EXPORT_SYMBOL_GPL(fsl_edma_xfer_desc
);
607 void fsl_edma_issue_pending(struct dma_chan
*chan
)
609 struct fsl_edma_chan
*fsl_chan
= to_fsl_edma_chan(chan
);
612 spin_lock_irqsave(&fsl_chan
->vchan
.lock
, flags
);
614 if (unlikely(fsl_chan
->pm_state
!= RUNNING
)) {
615 spin_unlock_irqrestore(&fsl_chan
->vchan
.lock
, flags
);
616 /* cannot submit due to suspend */
620 if (vchan_issue_pending(&fsl_chan
->vchan
) && !fsl_chan
->edesc
)
621 fsl_edma_xfer_desc(fsl_chan
);
623 spin_unlock_irqrestore(&fsl_chan
->vchan
.lock
, flags
);
625 EXPORT_SYMBOL_GPL(fsl_edma_issue_pending
);
627 int fsl_edma_alloc_chan_resources(struct dma_chan
*chan
)
629 struct fsl_edma_chan
*fsl_chan
= to_fsl_edma_chan(chan
);
631 fsl_chan
->tcd_pool
= dma_pool_create("tcd_pool", chan
->device
->dev
,
632 sizeof(struct fsl_edma_hw_tcd
),
636 EXPORT_SYMBOL_GPL(fsl_edma_alloc_chan_resources
);
638 void fsl_edma_free_chan_resources(struct dma_chan
*chan
)
640 struct fsl_edma_chan
*fsl_chan
= to_fsl_edma_chan(chan
);
644 spin_lock_irqsave(&fsl_chan
->vchan
.lock
, flags
);
645 fsl_edma_disable_request(fsl_chan
);
646 fsl_edma_chan_mux(fsl_chan
, 0, false);
647 fsl_chan
->edesc
= NULL
;
648 vchan_get_all_descriptors(&fsl_chan
->vchan
, &head
);
649 fsl_edma_unprep_slave_dma(fsl_chan
);
650 spin_unlock_irqrestore(&fsl_chan
->vchan
.lock
, flags
);
652 vchan_dma_desc_free_list(&fsl_chan
->vchan
, &head
);
653 dma_pool_destroy(fsl_chan
->tcd_pool
);
654 fsl_chan
->tcd_pool
= NULL
;
656 EXPORT_SYMBOL_GPL(fsl_edma_free_chan_resources
);
658 void fsl_edma_cleanup_vchan(struct dma_device
*dmadev
)
660 struct fsl_edma_chan
*chan
, *_chan
;
662 list_for_each_entry_safe(chan
, _chan
,
663 &dmadev
->channels
, vchan
.chan
.device_node
) {
664 list_del(&chan
->vchan
.chan
.device_node
);
665 tasklet_kill(&chan
->vchan
.task
);
668 EXPORT_SYMBOL_GPL(fsl_edma_cleanup_vchan
);
671 * On the 32 channels Vybrid/mpc577x edma version (here called "v1"),
672 * register offsets are different compared to ColdFire mcf5441x 64 channels
673 * edma (here called "v2").
675 * This function sets up register offsets as per proper declared version
676 * so must be called in xxx_edma_probe() just after setting the
677 * edma "version" and "membase" appropriately.
679 void fsl_edma_setup_regs(struct fsl_edma_engine
*edma
)
681 edma
->regs
.cr
= edma
->membase
+ EDMA_CR
;
682 edma
->regs
.es
= edma
->membase
+ EDMA_ES
;
683 edma
->regs
.erql
= edma
->membase
+ EDMA_ERQ
;
684 edma
->regs
.eeil
= edma
->membase
+ EDMA_EEI
;
686 edma
->regs
.serq
= edma
->membase
+ ((edma
->drvdata
->version
== v2
) ?
687 EDMA64_SERQ
: EDMA_SERQ
);
688 edma
->regs
.cerq
= edma
->membase
+ ((edma
->drvdata
->version
== v2
) ?
689 EDMA64_CERQ
: EDMA_CERQ
);
690 edma
->regs
.seei
= edma
->membase
+ ((edma
->drvdata
->version
== v2
) ?
691 EDMA64_SEEI
: EDMA_SEEI
);
692 edma
->regs
.ceei
= edma
->membase
+ ((edma
->drvdata
->version
== v2
) ?
693 EDMA64_CEEI
: EDMA_CEEI
);
694 edma
->regs
.cint
= edma
->membase
+ ((edma
->drvdata
->version
== v2
) ?
695 EDMA64_CINT
: EDMA_CINT
);
696 edma
->regs
.cerr
= edma
->membase
+ ((edma
->drvdata
->version
== v2
) ?
697 EDMA64_CERR
: EDMA_CERR
);
698 edma
->regs
.ssrt
= edma
->membase
+ ((edma
->drvdata
->version
== v2
) ?
699 EDMA64_SSRT
: EDMA_SSRT
);
700 edma
->regs
.cdne
= edma
->membase
+ ((edma
->drvdata
->version
== v2
) ?
701 EDMA64_CDNE
: EDMA_CDNE
);
702 edma
->regs
.intl
= edma
->membase
+ ((edma
->drvdata
->version
== v2
) ?
703 EDMA64_INTL
: EDMA_INTR
);
704 edma
->regs
.errl
= edma
->membase
+ ((edma
->drvdata
->version
== v2
) ?
705 EDMA64_ERRL
: EDMA_ERR
);
707 if (edma
->drvdata
->version
== v2
) {
708 edma
->regs
.erqh
= edma
->membase
+ EDMA64_ERQH
;
709 edma
->regs
.eeih
= edma
->membase
+ EDMA64_EEIH
;
710 edma
->regs
.errh
= edma
->membase
+ EDMA64_ERRH
;
711 edma
->regs
.inth
= edma
->membase
+ EDMA64_INTH
;
714 edma
->regs
.tcd
= edma
->membase
+ EDMA_TCD
;
716 EXPORT_SYMBOL_GPL(fsl_edma_setup_regs
);
718 MODULE_LICENSE("GPL v2");