1 // SPDX-License-Identifier: GPL-2.0+
3 // Copyright (c) 2013-2014 Freescale Semiconductor, Inc
4 // Copyright (c) 2017 Sysam, Angelo Dureghello <angelo@sysam.it>
6 #include <linux/dmapool.h>
7 #include <linux/module.h>
8 #include <linux/slab.h>
9 #include <linux/dma-mapping.h>
11 #include "fsl-edma-common.h"
17 #define EDMA_SERQ 0x1B
18 #define EDMA_CERQ 0x1A
19 #define EDMA_SEEI 0x19
20 #define EDMA_CEEI 0x18
21 #define EDMA_CINT 0x1F
22 #define EDMA_CERR 0x1E
23 #define EDMA_SSRT 0x1D
24 #define EDMA_CDNE 0x1C
25 #define EDMA_INTR 0x24
28 #define EDMA64_ERQH 0x08
29 #define EDMA64_EEIH 0x10
30 #define EDMA64_SERQ 0x18
31 #define EDMA64_CERQ 0x19
32 #define EDMA64_SEEI 0x1a
33 #define EDMA64_CEEI 0x1b
34 #define EDMA64_CINT 0x1c
35 #define EDMA64_CERR 0x1d
36 #define EDMA64_SSRT 0x1e
37 #define EDMA64_CDNE 0x1f
38 #define EDMA64_INTH 0x20
39 #define EDMA64_INTL 0x24
40 #define EDMA64_ERRH 0x28
41 #define EDMA64_ERRL 0x2c
43 #define EDMA_TCD 0x1000
45 static void fsl_edma_enable_request(struct fsl_edma_chan
*fsl_chan
)
47 struct edma_regs
*regs
= &fsl_chan
->edma
->regs
;
48 u32 ch
= fsl_chan
->vchan
.chan
.chan_id
;
50 if (fsl_chan
->edma
->drvdata
->version
== v1
) {
51 edma_writeb(fsl_chan
->edma
, EDMA_SEEI_SEEI(ch
), regs
->seei
);
52 edma_writeb(fsl_chan
->edma
, ch
, regs
->serq
);
54 /* ColdFire is big endian, and accesses natively
55 * big endian I/O peripherals
57 iowrite8(EDMA_SEEI_SEEI(ch
), regs
->seei
);
58 iowrite8(ch
, regs
->serq
);
62 void fsl_edma_disable_request(struct fsl_edma_chan
*fsl_chan
)
64 struct edma_regs
*regs
= &fsl_chan
->edma
->regs
;
65 u32 ch
= fsl_chan
->vchan
.chan
.chan_id
;
67 if (fsl_chan
->edma
->drvdata
->version
== v1
) {
68 edma_writeb(fsl_chan
->edma
, ch
, regs
->cerq
);
69 edma_writeb(fsl_chan
->edma
, EDMA_CEEI_CEEI(ch
), regs
->ceei
);
71 /* ColdFire is big endian, and accesses natively
72 * big endian I/O peripherals
74 iowrite8(ch
, regs
->cerq
);
75 iowrite8(EDMA_CEEI_CEEI(ch
), regs
->ceei
);
78 EXPORT_SYMBOL_GPL(fsl_edma_disable_request
);
80 static void mux_configure8(struct fsl_edma_chan
*fsl_chan
, void __iomem
*addr
,
81 u32 off
, u32 slot
, bool enable
)
86 val8
= EDMAMUX_CHCFG_ENBL
| slot
;
88 val8
= EDMAMUX_CHCFG_DIS
;
90 iowrite8(val8
, addr
+ off
);
93 static void mux_configure32(struct fsl_edma_chan
*fsl_chan
, void __iomem
*addr
,
94 u32 off
, u32 slot
, bool enable
)
99 val
= EDMAMUX_CHCFG_ENBL
<< 24 | slot
;
101 val
= EDMAMUX_CHCFG_DIS
;
103 iowrite32(val
, addr
+ off
* 4);
106 void fsl_edma_chan_mux(struct fsl_edma_chan
*fsl_chan
,
107 unsigned int slot
, bool enable
)
109 u32 ch
= fsl_chan
->vchan
.chan
.chan_id
;
110 void __iomem
*muxaddr
;
111 unsigned int chans_per_mux
, ch_off
;
112 u32 dmamux_nr
= fsl_chan
->edma
->drvdata
->dmamuxs
;
114 chans_per_mux
= fsl_chan
->edma
->n_chans
/ dmamux_nr
;
115 ch_off
= fsl_chan
->vchan
.chan
.chan_id
% chans_per_mux
;
116 muxaddr
= fsl_chan
->edma
->muxbase
[ch
/ chans_per_mux
];
117 slot
= EDMAMUX_CHCFG_SOURCE(slot
);
119 if (fsl_chan
->edma
->drvdata
->version
== v3
)
120 mux_configure32(fsl_chan
, muxaddr
, ch_off
, slot
, enable
);
122 mux_configure8(fsl_chan
, muxaddr
, ch_off
, slot
, enable
);
124 EXPORT_SYMBOL_GPL(fsl_edma_chan_mux
);
126 static unsigned int fsl_edma_get_tcd_attr(enum dma_slave_buswidth addr_width
)
128 switch (addr_width
) {
130 return EDMA_TCD_ATTR_SSIZE_8BIT
| EDMA_TCD_ATTR_DSIZE_8BIT
;
132 return EDMA_TCD_ATTR_SSIZE_16BIT
| EDMA_TCD_ATTR_DSIZE_16BIT
;
134 return EDMA_TCD_ATTR_SSIZE_32BIT
| EDMA_TCD_ATTR_DSIZE_32BIT
;
136 return EDMA_TCD_ATTR_SSIZE_64BIT
| EDMA_TCD_ATTR_DSIZE_64BIT
;
138 return EDMA_TCD_ATTR_SSIZE_32BIT
| EDMA_TCD_ATTR_DSIZE_32BIT
;
142 void fsl_edma_free_desc(struct virt_dma_desc
*vdesc
)
144 struct fsl_edma_desc
*fsl_desc
;
147 fsl_desc
= to_fsl_edma_desc(vdesc
);
148 for (i
= 0; i
< fsl_desc
->n_tcds
; i
++)
149 dma_pool_free(fsl_desc
->echan
->tcd_pool
, fsl_desc
->tcd
[i
].vtcd
,
150 fsl_desc
->tcd
[i
].ptcd
);
153 EXPORT_SYMBOL_GPL(fsl_edma_free_desc
);
155 int fsl_edma_terminate_all(struct dma_chan
*chan
)
157 struct fsl_edma_chan
*fsl_chan
= to_fsl_edma_chan(chan
);
161 spin_lock_irqsave(&fsl_chan
->vchan
.lock
, flags
);
162 fsl_edma_disable_request(fsl_chan
);
163 fsl_chan
->edesc
= NULL
;
164 fsl_chan
->idle
= true;
165 vchan_get_all_descriptors(&fsl_chan
->vchan
, &head
);
166 spin_unlock_irqrestore(&fsl_chan
->vchan
.lock
, flags
);
167 vchan_dma_desc_free_list(&fsl_chan
->vchan
, &head
);
170 EXPORT_SYMBOL_GPL(fsl_edma_terminate_all
);
172 int fsl_edma_pause(struct dma_chan
*chan
)
174 struct fsl_edma_chan
*fsl_chan
= to_fsl_edma_chan(chan
);
177 spin_lock_irqsave(&fsl_chan
->vchan
.lock
, flags
);
178 if (fsl_chan
->edesc
) {
179 fsl_edma_disable_request(fsl_chan
);
180 fsl_chan
->status
= DMA_PAUSED
;
181 fsl_chan
->idle
= true;
183 spin_unlock_irqrestore(&fsl_chan
->vchan
.lock
, flags
);
186 EXPORT_SYMBOL_GPL(fsl_edma_pause
);
188 int fsl_edma_resume(struct dma_chan
*chan
)
190 struct fsl_edma_chan
*fsl_chan
= to_fsl_edma_chan(chan
);
193 spin_lock_irqsave(&fsl_chan
->vchan
.lock
, flags
);
194 if (fsl_chan
->edesc
) {
195 fsl_edma_enable_request(fsl_chan
);
196 fsl_chan
->status
= DMA_IN_PROGRESS
;
197 fsl_chan
->idle
= false;
199 spin_unlock_irqrestore(&fsl_chan
->vchan
.lock
, flags
);
202 EXPORT_SYMBOL_GPL(fsl_edma_resume
);
204 static void fsl_edma_unprep_slave_dma(struct fsl_edma_chan
*fsl_chan
)
206 if (fsl_chan
->dma_dir
!= DMA_NONE
)
207 dma_unmap_resource(fsl_chan
->vchan
.chan
.device
->dev
,
208 fsl_chan
->dma_dev_addr
,
209 fsl_chan
->dma_dev_size
,
210 fsl_chan
->dma_dir
, 0);
211 fsl_chan
->dma_dir
= DMA_NONE
;
214 static bool fsl_edma_prep_slave_dma(struct fsl_edma_chan
*fsl_chan
,
215 enum dma_transfer_direction dir
)
217 struct device
*dev
= fsl_chan
->vchan
.chan
.device
->dev
;
218 enum dma_data_direction dma_dir
;
219 phys_addr_t addr
= 0;
224 dma_dir
= DMA_FROM_DEVICE
;
225 addr
= fsl_chan
->cfg
.dst_addr
;
226 size
= fsl_chan
->cfg
.dst_maxburst
;
229 dma_dir
= DMA_TO_DEVICE
;
230 addr
= fsl_chan
->cfg
.src_addr
;
231 size
= fsl_chan
->cfg
.src_maxburst
;
238 /* Already mapped for this config? */
239 if (fsl_chan
->dma_dir
== dma_dir
)
242 fsl_edma_unprep_slave_dma(fsl_chan
);
244 fsl_chan
->dma_dev_addr
= dma_map_resource(dev
, addr
, size
, dma_dir
, 0);
245 if (dma_mapping_error(dev
, fsl_chan
->dma_dev_addr
))
247 fsl_chan
->dma_dev_size
= size
;
248 fsl_chan
->dma_dir
= dma_dir
;
253 int fsl_edma_slave_config(struct dma_chan
*chan
,
254 struct dma_slave_config
*cfg
)
256 struct fsl_edma_chan
*fsl_chan
= to_fsl_edma_chan(chan
);
258 memcpy(&fsl_chan
->cfg
, cfg
, sizeof(*cfg
));
259 fsl_edma_unprep_slave_dma(fsl_chan
);
263 EXPORT_SYMBOL_GPL(fsl_edma_slave_config
);
265 static size_t fsl_edma_desc_residue(struct fsl_edma_chan
*fsl_chan
,
266 struct virt_dma_desc
*vdesc
, bool in_progress
)
268 struct fsl_edma_desc
*edesc
= fsl_chan
->edesc
;
269 struct edma_regs
*regs
= &fsl_chan
->edma
->regs
;
270 u32 ch
= fsl_chan
->vchan
.chan
.chan_id
;
271 enum dma_transfer_direction dir
= edesc
->dirn
;
272 dma_addr_t cur_addr
, dma_addr
;
276 /* calculate the total size in this desc */
277 for (len
= i
= 0; i
< fsl_chan
->edesc
->n_tcds
; i
++)
278 len
+= le32_to_cpu(edesc
->tcd
[i
].vtcd
->nbytes
)
279 * le16_to_cpu(edesc
->tcd
[i
].vtcd
->biter
);
284 if (dir
== DMA_MEM_TO_DEV
)
285 cur_addr
= edma_readl(fsl_chan
->edma
, ®s
->tcd
[ch
].saddr
);
287 cur_addr
= edma_readl(fsl_chan
->edma
, ®s
->tcd
[ch
].daddr
);
289 /* figure out the finished and calculate the residue */
290 for (i
= 0; i
< fsl_chan
->edesc
->n_tcds
; i
++) {
291 size
= le32_to_cpu(edesc
->tcd
[i
].vtcd
->nbytes
)
292 * le16_to_cpu(edesc
->tcd
[i
].vtcd
->biter
);
293 if (dir
== DMA_MEM_TO_DEV
)
294 dma_addr
= le32_to_cpu(edesc
->tcd
[i
].vtcd
->saddr
);
296 dma_addr
= le32_to_cpu(edesc
->tcd
[i
].vtcd
->daddr
);
299 if (cur_addr
>= dma_addr
&& cur_addr
< dma_addr
+ size
) {
300 len
+= dma_addr
+ size
- cur_addr
;
308 enum dma_status
fsl_edma_tx_status(struct dma_chan
*chan
,
309 dma_cookie_t cookie
, struct dma_tx_state
*txstate
)
311 struct fsl_edma_chan
*fsl_chan
= to_fsl_edma_chan(chan
);
312 struct virt_dma_desc
*vdesc
;
313 enum dma_status status
;
316 status
= dma_cookie_status(chan
, cookie
, txstate
);
317 if (status
== DMA_COMPLETE
)
321 return fsl_chan
->status
;
323 spin_lock_irqsave(&fsl_chan
->vchan
.lock
, flags
);
324 vdesc
= vchan_find_desc(&fsl_chan
->vchan
, cookie
);
325 if (fsl_chan
->edesc
&& cookie
== fsl_chan
->edesc
->vdesc
.tx
.cookie
)
327 fsl_edma_desc_residue(fsl_chan
, vdesc
, true);
330 fsl_edma_desc_residue(fsl_chan
, vdesc
, false);
332 txstate
->residue
= 0;
334 spin_unlock_irqrestore(&fsl_chan
->vchan
.lock
, flags
);
336 return fsl_chan
->status
;
338 EXPORT_SYMBOL_GPL(fsl_edma_tx_status
);
340 static void fsl_edma_set_tcd_regs(struct fsl_edma_chan
*fsl_chan
,
341 struct fsl_edma_hw_tcd
*tcd
)
343 struct fsl_edma_engine
*edma
= fsl_chan
->edma
;
344 struct edma_regs
*regs
= &fsl_chan
->edma
->regs
;
345 u32 ch
= fsl_chan
->vchan
.chan
.chan_id
;
348 * TCD parameters are stored in struct fsl_edma_hw_tcd in little
349 * endian format. However, we need to load the TCD registers in
350 * big- or little-endian obeying the eDMA engine model endian.
352 edma_writew(edma
, 0, ®s
->tcd
[ch
].csr
);
353 edma_writel(edma
, le32_to_cpu(tcd
->saddr
), ®s
->tcd
[ch
].saddr
);
354 edma_writel(edma
, le32_to_cpu(tcd
->daddr
), ®s
->tcd
[ch
].daddr
);
356 edma_writew(edma
, le16_to_cpu(tcd
->attr
), ®s
->tcd
[ch
].attr
);
357 edma_writew(edma
, le16_to_cpu(tcd
->soff
), ®s
->tcd
[ch
].soff
);
359 edma_writel(edma
, le32_to_cpu(tcd
->nbytes
), ®s
->tcd
[ch
].nbytes
);
360 edma_writel(edma
, le32_to_cpu(tcd
->slast
), ®s
->tcd
[ch
].slast
);
362 edma_writew(edma
, le16_to_cpu(tcd
->citer
), ®s
->tcd
[ch
].citer
);
363 edma_writew(edma
, le16_to_cpu(tcd
->biter
), ®s
->tcd
[ch
].biter
);
364 edma_writew(edma
, le16_to_cpu(tcd
->doff
), ®s
->tcd
[ch
].doff
);
366 edma_writel(edma
, le32_to_cpu(tcd
->dlast_sga
),
367 ®s
->tcd
[ch
].dlast_sga
);
369 edma_writew(edma
, le16_to_cpu(tcd
->csr
), ®s
->tcd
[ch
].csr
);
373 void fsl_edma_fill_tcd(struct fsl_edma_hw_tcd
*tcd
, u32 src
, u32 dst
,
374 u16 attr
, u16 soff
, u32 nbytes
, u32 slast
, u16 citer
,
375 u16 biter
, u16 doff
, u32 dlast_sga
, bool major_int
,
376 bool disable_req
, bool enable_sg
)
381 * eDMA hardware SGs require the TCDs to be stored in little
382 * endian format irrespective of the register endian model.
383 * So we put the value in little endian in memory, waiting
384 * for fsl_edma_set_tcd_regs doing the swap.
386 tcd
->saddr
= cpu_to_le32(src
);
387 tcd
->daddr
= cpu_to_le32(dst
);
389 tcd
->attr
= cpu_to_le16(attr
);
391 tcd
->soff
= cpu_to_le16(soff
);
393 tcd
->nbytes
= cpu_to_le32(nbytes
);
394 tcd
->slast
= cpu_to_le32(slast
);
396 tcd
->citer
= cpu_to_le16(EDMA_TCD_CITER_CITER(citer
));
397 tcd
->doff
= cpu_to_le16(doff
);
399 tcd
->dlast_sga
= cpu_to_le32(dlast_sga
);
401 tcd
->biter
= cpu_to_le16(EDMA_TCD_BITER_BITER(biter
));
403 csr
|= EDMA_TCD_CSR_INT_MAJOR
;
406 csr
|= EDMA_TCD_CSR_D_REQ
;
409 csr
|= EDMA_TCD_CSR_E_SG
;
411 tcd
->csr
= cpu_to_le16(csr
);
414 static struct fsl_edma_desc
*fsl_edma_alloc_desc(struct fsl_edma_chan
*fsl_chan
,
417 struct fsl_edma_desc
*fsl_desc
;
420 fsl_desc
= kzalloc(struct_size(fsl_desc
, tcd
, sg_len
), GFP_NOWAIT
);
424 fsl_desc
->echan
= fsl_chan
;
425 fsl_desc
->n_tcds
= sg_len
;
426 for (i
= 0; i
< sg_len
; i
++) {
427 fsl_desc
->tcd
[i
].vtcd
= dma_pool_alloc(fsl_chan
->tcd_pool
,
428 GFP_NOWAIT
, &fsl_desc
->tcd
[i
].ptcd
);
429 if (!fsl_desc
->tcd
[i
].vtcd
)
436 dma_pool_free(fsl_chan
->tcd_pool
, fsl_desc
->tcd
[i
].vtcd
,
437 fsl_desc
->tcd
[i
].ptcd
);
442 struct dma_async_tx_descriptor
*fsl_edma_prep_dma_cyclic(
443 struct dma_chan
*chan
, dma_addr_t dma_addr
, size_t buf_len
,
444 size_t period_len
, enum dma_transfer_direction direction
,
447 struct fsl_edma_chan
*fsl_chan
= to_fsl_edma_chan(chan
);
448 struct fsl_edma_desc
*fsl_desc
;
449 dma_addr_t dma_buf_next
;
451 u32 src_addr
, dst_addr
, last_sg
, nbytes
;
452 u16 soff
, doff
, iter
;
454 if (!is_slave_direction(direction
))
457 if (!fsl_edma_prep_slave_dma(fsl_chan
, direction
))
460 sg_len
= buf_len
/ period_len
;
461 fsl_desc
= fsl_edma_alloc_desc(fsl_chan
, sg_len
);
464 fsl_desc
->iscyclic
= true;
465 fsl_desc
->dirn
= direction
;
467 dma_buf_next
= dma_addr
;
468 if (direction
== DMA_MEM_TO_DEV
) {
470 fsl_edma_get_tcd_attr(fsl_chan
->cfg
.dst_addr_width
);
471 nbytes
= fsl_chan
->cfg
.dst_addr_width
*
472 fsl_chan
->cfg
.dst_maxburst
;
475 fsl_edma_get_tcd_attr(fsl_chan
->cfg
.src_addr_width
);
476 nbytes
= fsl_chan
->cfg
.src_addr_width
*
477 fsl_chan
->cfg
.src_maxburst
;
480 iter
= period_len
/ nbytes
;
482 for (i
= 0; i
< sg_len
; i
++) {
483 if (dma_buf_next
>= dma_addr
+ buf_len
)
484 dma_buf_next
= dma_addr
;
486 /* get next sg's physical address */
487 last_sg
= fsl_desc
->tcd
[(i
+ 1) % sg_len
].ptcd
;
489 if (direction
== DMA_MEM_TO_DEV
) {
490 src_addr
= dma_buf_next
;
491 dst_addr
= fsl_chan
->dma_dev_addr
;
492 soff
= fsl_chan
->cfg
.dst_addr_width
;
495 src_addr
= fsl_chan
->dma_dev_addr
;
496 dst_addr
= dma_buf_next
;
498 doff
= fsl_chan
->cfg
.src_addr_width
;
501 fsl_edma_fill_tcd(fsl_desc
->tcd
[i
].vtcd
, src_addr
, dst_addr
,
502 fsl_chan
->attr
, soff
, nbytes
, 0, iter
,
503 iter
, doff
, last_sg
, true, false, true);
504 dma_buf_next
+= period_len
;
507 return vchan_tx_prep(&fsl_chan
->vchan
, &fsl_desc
->vdesc
, flags
);
509 EXPORT_SYMBOL_GPL(fsl_edma_prep_dma_cyclic
);
511 struct dma_async_tx_descriptor
*fsl_edma_prep_slave_sg(
512 struct dma_chan
*chan
, struct scatterlist
*sgl
,
513 unsigned int sg_len
, enum dma_transfer_direction direction
,
514 unsigned long flags
, void *context
)
516 struct fsl_edma_chan
*fsl_chan
= to_fsl_edma_chan(chan
);
517 struct fsl_edma_desc
*fsl_desc
;
518 struct scatterlist
*sg
;
519 u32 src_addr
, dst_addr
, last_sg
, nbytes
;
520 u16 soff
, doff
, iter
;
523 if (!is_slave_direction(direction
))
526 if (!fsl_edma_prep_slave_dma(fsl_chan
, direction
))
529 fsl_desc
= fsl_edma_alloc_desc(fsl_chan
, sg_len
);
532 fsl_desc
->iscyclic
= false;
533 fsl_desc
->dirn
= direction
;
535 if (direction
== DMA_MEM_TO_DEV
) {
537 fsl_edma_get_tcd_attr(fsl_chan
->cfg
.dst_addr_width
);
538 nbytes
= fsl_chan
->cfg
.dst_addr_width
*
539 fsl_chan
->cfg
.dst_maxburst
;
542 fsl_edma_get_tcd_attr(fsl_chan
->cfg
.src_addr_width
);
543 nbytes
= fsl_chan
->cfg
.src_addr_width
*
544 fsl_chan
->cfg
.src_maxburst
;
547 for_each_sg(sgl
, sg
, sg_len
, i
) {
548 /* get next sg's physical address */
549 last_sg
= fsl_desc
->tcd
[(i
+ 1) % sg_len
].ptcd
;
551 if (direction
== DMA_MEM_TO_DEV
) {
552 src_addr
= sg_dma_address(sg
);
553 dst_addr
= fsl_chan
->dma_dev_addr
;
554 soff
= fsl_chan
->cfg
.dst_addr_width
;
557 src_addr
= fsl_chan
->dma_dev_addr
;
558 dst_addr
= sg_dma_address(sg
);
560 doff
= fsl_chan
->cfg
.src_addr_width
;
563 iter
= sg_dma_len(sg
) / nbytes
;
564 if (i
< sg_len
- 1) {
565 last_sg
= fsl_desc
->tcd
[(i
+ 1)].ptcd
;
566 fsl_edma_fill_tcd(fsl_desc
->tcd
[i
].vtcd
, src_addr
,
567 dst_addr
, fsl_chan
->attr
, soff
,
568 nbytes
, 0, iter
, iter
, doff
, last_sg
,
572 fsl_edma_fill_tcd(fsl_desc
->tcd
[i
].vtcd
, src_addr
,
573 dst_addr
, fsl_chan
->attr
, soff
,
574 nbytes
, 0, iter
, iter
, doff
, last_sg
,
579 return vchan_tx_prep(&fsl_chan
->vchan
, &fsl_desc
->vdesc
, flags
);
581 EXPORT_SYMBOL_GPL(fsl_edma_prep_slave_sg
);
583 void fsl_edma_xfer_desc(struct fsl_edma_chan
*fsl_chan
)
585 struct virt_dma_desc
*vdesc
;
587 vdesc
= vchan_next_desc(&fsl_chan
->vchan
);
590 fsl_chan
->edesc
= to_fsl_edma_desc(vdesc
);
591 fsl_edma_set_tcd_regs(fsl_chan
, fsl_chan
->edesc
->tcd
[0].vtcd
);
592 fsl_edma_enable_request(fsl_chan
);
593 fsl_chan
->status
= DMA_IN_PROGRESS
;
594 fsl_chan
->idle
= false;
596 EXPORT_SYMBOL_GPL(fsl_edma_xfer_desc
);
598 void fsl_edma_issue_pending(struct dma_chan
*chan
)
600 struct fsl_edma_chan
*fsl_chan
= to_fsl_edma_chan(chan
);
603 spin_lock_irqsave(&fsl_chan
->vchan
.lock
, flags
);
605 if (unlikely(fsl_chan
->pm_state
!= RUNNING
)) {
606 spin_unlock_irqrestore(&fsl_chan
->vchan
.lock
, flags
);
607 /* cannot submit due to suspend */
611 if (vchan_issue_pending(&fsl_chan
->vchan
) && !fsl_chan
->edesc
)
612 fsl_edma_xfer_desc(fsl_chan
);
614 spin_unlock_irqrestore(&fsl_chan
->vchan
.lock
, flags
);
616 EXPORT_SYMBOL_GPL(fsl_edma_issue_pending
);
618 int fsl_edma_alloc_chan_resources(struct dma_chan
*chan
)
620 struct fsl_edma_chan
*fsl_chan
= to_fsl_edma_chan(chan
);
622 fsl_chan
->tcd_pool
= dma_pool_create("tcd_pool", chan
->device
->dev
,
623 sizeof(struct fsl_edma_hw_tcd
),
627 EXPORT_SYMBOL_GPL(fsl_edma_alloc_chan_resources
);
629 void fsl_edma_free_chan_resources(struct dma_chan
*chan
)
631 struct fsl_edma_chan
*fsl_chan
= to_fsl_edma_chan(chan
);
635 spin_lock_irqsave(&fsl_chan
->vchan
.lock
, flags
);
636 fsl_edma_disable_request(fsl_chan
);
637 fsl_edma_chan_mux(fsl_chan
, 0, false);
638 fsl_chan
->edesc
= NULL
;
639 vchan_get_all_descriptors(&fsl_chan
->vchan
, &head
);
640 fsl_edma_unprep_slave_dma(fsl_chan
);
641 spin_unlock_irqrestore(&fsl_chan
->vchan
.lock
, flags
);
643 vchan_dma_desc_free_list(&fsl_chan
->vchan
, &head
);
644 dma_pool_destroy(fsl_chan
->tcd_pool
);
645 fsl_chan
->tcd_pool
= NULL
;
647 EXPORT_SYMBOL_GPL(fsl_edma_free_chan_resources
);
649 void fsl_edma_cleanup_vchan(struct dma_device
*dmadev
)
651 struct fsl_edma_chan
*chan
, *_chan
;
653 list_for_each_entry_safe(chan
, _chan
,
654 &dmadev
->channels
, vchan
.chan
.device_node
) {
655 list_del(&chan
->vchan
.chan
.device_node
);
656 tasklet_kill(&chan
->vchan
.task
);
659 EXPORT_SYMBOL_GPL(fsl_edma_cleanup_vchan
);
662 * On the 32 channels Vybrid/mpc577x edma version (here called "v1"),
663 * register offsets are different compared to ColdFire mcf5441x 64 channels
664 * edma (here called "v2").
666 * This function sets up register offsets as per proper declared version
667 * so must be called in xxx_edma_probe() just after setting the
668 * edma "version" and "membase" appropriately.
670 void fsl_edma_setup_regs(struct fsl_edma_engine
*edma
)
672 edma
->regs
.cr
= edma
->membase
+ EDMA_CR
;
673 edma
->regs
.es
= edma
->membase
+ EDMA_ES
;
674 edma
->regs
.erql
= edma
->membase
+ EDMA_ERQ
;
675 edma
->regs
.eeil
= edma
->membase
+ EDMA_EEI
;
677 edma
->regs
.serq
= edma
->membase
+ ((edma
->drvdata
->version
== v2
) ?
678 EDMA64_SERQ
: EDMA_SERQ
);
679 edma
->regs
.cerq
= edma
->membase
+ ((edma
->drvdata
->version
== v2
) ?
680 EDMA64_CERQ
: EDMA_CERQ
);
681 edma
->regs
.seei
= edma
->membase
+ ((edma
->drvdata
->version
== v2
) ?
682 EDMA64_SEEI
: EDMA_SEEI
);
683 edma
->regs
.ceei
= edma
->membase
+ ((edma
->drvdata
->version
== v2
) ?
684 EDMA64_CEEI
: EDMA_CEEI
);
685 edma
->regs
.cint
= edma
->membase
+ ((edma
->drvdata
->version
== v2
) ?
686 EDMA64_CINT
: EDMA_CINT
);
687 edma
->regs
.cerr
= edma
->membase
+ ((edma
->drvdata
->version
== v2
) ?
688 EDMA64_CERR
: EDMA_CERR
);
689 edma
->regs
.ssrt
= edma
->membase
+ ((edma
->drvdata
->version
== v2
) ?
690 EDMA64_SSRT
: EDMA_SSRT
);
691 edma
->regs
.cdne
= edma
->membase
+ ((edma
->drvdata
->version
== v2
) ?
692 EDMA64_CDNE
: EDMA_CDNE
);
693 edma
->regs
.intl
= edma
->membase
+ ((edma
->drvdata
->version
== v2
) ?
694 EDMA64_INTL
: EDMA_INTR
);
695 edma
->regs
.errl
= edma
->membase
+ ((edma
->drvdata
->version
== v2
) ?
696 EDMA64_ERRL
: EDMA_ERR
);
698 if (edma
->drvdata
->version
== v2
) {
699 edma
->regs
.erqh
= edma
->membase
+ EDMA64_ERQH
;
700 edma
->regs
.eeih
= edma
->membase
+ EDMA64_EEIH
;
701 edma
->regs
.errh
= edma
->membase
+ EDMA64_ERRH
;
702 edma
->regs
.inth
= edma
->membase
+ EDMA64_INTH
;
705 edma
->regs
.tcd
= edma
->membase
+ EDMA_TCD
;
707 EXPORT_SYMBOL_GPL(fsl_edma_setup_regs
);
709 MODULE_LICENSE("GPL v2");