1 // SPDX-License-Identifier: GPL-2.0+
3 // Copyright (c) 2013-2014 Freescale Semiconductor, Inc
4 // Copyright (c) 2017 Sysam, Angelo Dureghello <angelo@sysam.it>
6 #include <linux/cleanup.h>
8 #include <linux/dmapool.h>
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/pm_domain.h>
15 #include "fsl-edma-common.h"
21 #define EDMA_SERQ 0x1B
22 #define EDMA_CERQ 0x1A
23 #define EDMA_SEEI 0x19
24 #define EDMA_CEEI 0x18
25 #define EDMA_CINT 0x1F
26 #define EDMA_CERR 0x1E
27 #define EDMA_SSRT 0x1D
28 #define EDMA_CDNE 0x1C
29 #define EDMA_INTR 0x24
32 #define EDMA64_ERQH 0x08
33 #define EDMA64_EEIH 0x10
34 #define EDMA64_SERQ 0x18
35 #define EDMA64_CERQ 0x19
36 #define EDMA64_SEEI 0x1a
37 #define EDMA64_CEEI 0x1b
38 #define EDMA64_CINT 0x1c
39 #define EDMA64_CERR 0x1d
40 #define EDMA64_SSRT 0x1e
41 #define EDMA64_CDNE 0x1f
42 #define EDMA64_INTH 0x20
43 #define EDMA64_INTL 0x24
44 #define EDMA64_ERRH 0x28
45 #define EDMA64_ERRL 0x2c
47 void fsl_edma_tx_chan_handler(struct fsl_edma_chan
*fsl_chan
)
49 spin_lock(&fsl_chan
->vchan
.lock
);
51 if (!fsl_chan
->edesc
) {
52 /* terminate_all called before */
53 spin_unlock(&fsl_chan
->vchan
.lock
);
57 if (!fsl_chan
->edesc
->iscyclic
) {
58 list_del(&fsl_chan
->edesc
->vdesc
.node
);
59 vchan_cookie_complete(&fsl_chan
->edesc
->vdesc
);
60 fsl_chan
->edesc
= NULL
;
61 fsl_chan
->status
= DMA_COMPLETE
;
63 vchan_cyclic_callback(&fsl_chan
->edesc
->vdesc
);
67 fsl_edma_xfer_desc(fsl_chan
);
69 spin_unlock(&fsl_chan
->vchan
.lock
);
72 static void fsl_edma3_enable_request(struct fsl_edma_chan
*fsl_chan
)
76 flags
= fsl_edma_drvflags(fsl_chan
);
77 val
= edma_readl_chreg(fsl_chan
, ch_sbr
);
78 if (fsl_chan
->is_rxchan
)
79 val
|= EDMA_V3_CH_SBR_RD
;
81 val
|= EDMA_V3_CH_SBR_WR
;
83 if (fsl_chan
->is_remote
)
84 val
&= ~(EDMA_V3_CH_SBR_RD
| EDMA_V3_CH_SBR_WR
);
86 edma_writel_chreg(fsl_chan
, val
, ch_sbr
);
88 if (flags
& FSL_EDMA_DRV_HAS_CHMUX
) {
90 * ch_mux: With the exception of 0, attempts to write a value
91 * already in use will be forced to 0.
93 if (!edma_readl(fsl_chan
->edma
, fsl_chan
->mux_addr
))
94 edma_writel(fsl_chan
->edma
, fsl_chan
->srcid
, fsl_chan
->mux_addr
);
97 val
= edma_readl_chreg(fsl_chan
, ch_csr
);
98 val
|= EDMA_V3_CH_CSR_ERQ
;
99 edma_writel_chreg(fsl_chan
, val
, ch_csr
);
102 static void fsl_edma_enable_request(struct fsl_edma_chan
*fsl_chan
)
104 struct edma_regs
*regs
= &fsl_chan
->edma
->regs
;
105 u32 ch
= fsl_chan
->vchan
.chan
.chan_id
;
107 if (fsl_edma_drvflags(fsl_chan
) & FSL_EDMA_DRV_SPLIT_REG
)
108 return fsl_edma3_enable_request(fsl_chan
);
110 if (fsl_chan
->edma
->drvdata
->flags
& FSL_EDMA_DRV_WRAP_IO
) {
111 edma_writeb(fsl_chan
->edma
, EDMA_SEEI_SEEI(ch
), regs
->seei
);
112 edma_writeb(fsl_chan
->edma
, ch
, regs
->serq
);
114 /* ColdFire is big endian, and accesses natively
115 * big endian I/O peripherals
117 iowrite8(EDMA_SEEI_SEEI(ch
), regs
->seei
);
118 iowrite8(ch
, regs
->serq
);
122 static void fsl_edma3_disable_request(struct fsl_edma_chan
*fsl_chan
)
124 u32 val
= edma_readl_chreg(fsl_chan
, ch_csr
);
127 flags
= fsl_edma_drvflags(fsl_chan
);
129 if (flags
& FSL_EDMA_DRV_HAS_CHMUX
)
130 edma_writel(fsl_chan
->edma
, 0, fsl_chan
->mux_addr
);
132 val
&= ~EDMA_V3_CH_CSR_ERQ
;
133 edma_writel_chreg(fsl_chan
, val
, ch_csr
);
136 void fsl_edma_disable_request(struct fsl_edma_chan
*fsl_chan
)
138 struct edma_regs
*regs
= &fsl_chan
->edma
->regs
;
139 u32 ch
= fsl_chan
->vchan
.chan
.chan_id
;
141 if (fsl_edma_drvflags(fsl_chan
) & FSL_EDMA_DRV_SPLIT_REG
)
142 return fsl_edma3_disable_request(fsl_chan
);
144 if (fsl_chan
->edma
->drvdata
->flags
& FSL_EDMA_DRV_WRAP_IO
) {
145 edma_writeb(fsl_chan
->edma
, ch
, regs
->cerq
);
146 edma_writeb(fsl_chan
->edma
, EDMA_CEEI_CEEI(ch
), regs
->ceei
);
148 /* ColdFire is big endian, and accesses natively
149 * big endian I/O peripherals
151 iowrite8(ch
, regs
->cerq
);
152 iowrite8(EDMA_CEEI_CEEI(ch
), regs
->ceei
);
156 static void mux_configure8(struct fsl_edma_chan
*fsl_chan
, void __iomem
*addr
,
157 u32 off
, u32 slot
, bool enable
)
162 val8
= EDMAMUX_CHCFG_ENBL
| slot
;
164 val8
= EDMAMUX_CHCFG_DIS
;
166 iowrite8(val8
, addr
+ off
);
169 static void mux_configure32(struct fsl_edma_chan
*fsl_chan
, void __iomem
*addr
,
170 u32 off
, u32 slot
, bool enable
)
175 val
= EDMAMUX_CHCFG_ENBL
<< 24 | slot
;
177 val
= EDMAMUX_CHCFG_DIS
;
179 iowrite32(val
, addr
+ off
* 4);
182 void fsl_edma_chan_mux(struct fsl_edma_chan
*fsl_chan
,
183 unsigned int slot
, bool enable
)
185 u32 ch
= fsl_chan
->vchan
.chan
.chan_id
;
186 void __iomem
*muxaddr
;
187 unsigned int chans_per_mux
, ch_off
;
188 int endian_diff
[4] = {3, 1, -1, -3};
189 u32 dmamux_nr
= fsl_chan
->edma
->drvdata
->dmamuxs
;
194 chans_per_mux
= fsl_chan
->edma
->n_chans
/ dmamux_nr
;
195 ch_off
= fsl_chan
->vchan
.chan
.chan_id
% chans_per_mux
;
197 if (fsl_chan
->edma
->drvdata
->flags
& FSL_EDMA_DRV_MUX_SWAP
)
198 ch_off
+= endian_diff
[ch_off
% 4];
200 muxaddr
= fsl_chan
->edma
->muxbase
[ch
/ chans_per_mux
];
201 slot
= EDMAMUX_CHCFG_SOURCE(slot
);
203 if (fsl_chan
->edma
->drvdata
->flags
& FSL_EDMA_DRV_CONFIG32
)
204 mux_configure32(fsl_chan
, muxaddr
, ch_off
, slot
, enable
);
206 mux_configure8(fsl_chan
, muxaddr
, ch_off
, slot
, enable
);
209 static unsigned int fsl_edma_get_tcd_attr(enum dma_slave_buswidth addr_width
)
213 if (addr_width
== DMA_SLAVE_BUSWIDTH_UNDEFINED
)
214 addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
216 val
= ffs(addr_width
) - 1;
217 return val
| (val
<< 8);
220 void fsl_edma_free_desc(struct virt_dma_desc
*vdesc
)
222 struct fsl_edma_desc
*fsl_desc
;
225 fsl_desc
= to_fsl_edma_desc(vdesc
);
226 for (i
= 0; i
< fsl_desc
->n_tcds
; i
++)
227 dma_pool_free(fsl_desc
->echan
->tcd_pool
, fsl_desc
->tcd
[i
].vtcd
,
228 fsl_desc
->tcd
[i
].ptcd
);
232 int fsl_edma_terminate_all(struct dma_chan
*chan
)
234 struct fsl_edma_chan
*fsl_chan
= to_fsl_edma_chan(chan
);
238 spin_lock_irqsave(&fsl_chan
->vchan
.lock
, flags
);
239 fsl_edma_disable_request(fsl_chan
);
240 fsl_chan
->edesc
= NULL
;
241 fsl_chan
->status
= DMA_COMPLETE
;
242 vchan_get_all_descriptors(&fsl_chan
->vchan
, &head
);
243 spin_unlock_irqrestore(&fsl_chan
->vchan
.lock
, flags
);
244 vchan_dma_desc_free_list(&fsl_chan
->vchan
, &head
);
246 if (fsl_edma_drvflags(fsl_chan
) & FSL_EDMA_DRV_HAS_PD
)
247 pm_runtime_allow(fsl_chan
->pd_dev
);
252 int fsl_edma_pause(struct dma_chan
*chan
)
254 struct fsl_edma_chan
*fsl_chan
= to_fsl_edma_chan(chan
);
257 spin_lock_irqsave(&fsl_chan
->vchan
.lock
, flags
);
258 if (fsl_chan
->edesc
) {
259 fsl_edma_disable_request(fsl_chan
);
260 fsl_chan
->status
= DMA_PAUSED
;
262 spin_unlock_irqrestore(&fsl_chan
->vchan
.lock
, flags
);
266 int fsl_edma_resume(struct dma_chan
*chan
)
268 struct fsl_edma_chan
*fsl_chan
= to_fsl_edma_chan(chan
);
271 spin_lock_irqsave(&fsl_chan
->vchan
.lock
, flags
);
272 if (fsl_chan
->edesc
) {
273 fsl_edma_enable_request(fsl_chan
);
274 fsl_chan
->status
= DMA_IN_PROGRESS
;
276 spin_unlock_irqrestore(&fsl_chan
->vchan
.lock
, flags
);
280 static void fsl_edma_unprep_slave_dma(struct fsl_edma_chan
*fsl_chan
)
282 if (fsl_chan
->dma_dir
!= DMA_NONE
)
283 dma_unmap_resource(fsl_chan
->vchan
.chan
.device
->dev
,
284 fsl_chan
->dma_dev_addr
,
285 fsl_chan
->dma_dev_size
,
286 fsl_chan
->dma_dir
, 0);
287 fsl_chan
->dma_dir
= DMA_NONE
;
290 static bool fsl_edma_prep_slave_dma(struct fsl_edma_chan
*fsl_chan
,
291 enum dma_transfer_direction dir
)
293 struct device
*dev
= fsl_chan
->vchan
.chan
.device
->dev
;
294 enum dma_data_direction dma_dir
;
295 phys_addr_t addr
= 0;
300 dma_dir
= DMA_FROM_DEVICE
;
301 addr
= fsl_chan
->cfg
.dst_addr
;
302 size
= fsl_chan
->cfg
.dst_maxburst
;
305 dma_dir
= DMA_TO_DEVICE
;
306 addr
= fsl_chan
->cfg
.src_addr
;
307 size
= fsl_chan
->cfg
.src_maxburst
;
314 /* Already mapped for this config? */
315 if (fsl_chan
->dma_dir
== dma_dir
)
318 fsl_edma_unprep_slave_dma(fsl_chan
);
320 fsl_chan
->dma_dev_addr
= dma_map_resource(dev
, addr
, size
, dma_dir
, 0);
321 if (dma_mapping_error(dev
, fsl_chan
->dma_dev_addr
))
323 fsl_chan
->dma_dev_size
= size
;
324 fsl_chan
->dma_dir
= dma_dir
;
329 int fsl_edma_slave_config(struct dma_chan
*chan
,
330 struct dma_slave_config
*cfg
)
332 struct fsl_edma_chan
*fsl_chan
= to_fsl_edma_chan(chan
);
334 memcpy(&fsl_chan
->cfg
, cfg
, sizeof(*cfg
));
335 fsl_edma_unprep_slave_dma(fsl_chan
);
340 static size_t fsl_edma_desc_residue(struct fsl_edma_chan
*fsl_chan
,
341 struct virt_dma_desc
*vdesc
, bool in_progress
)
343 struct fsl_edma_desc
*edesc
= fsl_chan
->edesc
;
344 enum dma_transfer_direction dir
= edesc
->dirn
;
345 dma_addr_t cur_addr
, dma_addr
, old_addr
;
350 /* calculate the total size in this desc */
351 for (len
= i
= 0; i
< fsl_chan
->edesc
->n_tcds
; i
++) {
352 nbytes
= fsl_edma_get_tcd_to_cpu(fsl_chan
, edesc
->tcd
[i
].vtcd
, nbytes
);
353 if (nbytes
& (EDMA_V3_TCD_NBYTES_DMLOE
| EDMA_V3_TCD_NBYTES_SMLOE
))
354 nbytes
= EDMA_V3_TCD_NBYTES_MLOFF_NBYTES(nbytes
);
355 len
+= nbytes
* fsl_edma_get_tcd_to_cpu(fsl_chan
, edesc
->tcd
[i
].vtcd
, biter
);
361 /* 64bit read is not atomic, need read retry when high 32bit changed */
363 if (dir
== DMA_MEM_TO_DEV
) {
364 old_addr
= edma_read_tcdreg(fsl_chan
, saddr
);
365 cur_addr
= edma_read_tcdreg(fsl_chan
, saddr
);
367 old_addr
= edma_read_tcdreg(fsl_chan
, daddr
);
368 cur_addr
= edma_read_tcdreg(fsl_chan
, daddr
);
370 } while (upper_32_bits(cur_addr
) != upper_32_bits(old_addr
));
372 /* figure out the finished and calculate the residue */
373 for (i
= 0; i
< fsl_chan
->edesc
->n_tcds
; i
++) {
374 nbytes
= fsl_edma_get_tcd_to_cpu(fsl_chan
, edesc
->tcd
[i
].vtcd
, nbytes
);
375 if (nbytes
& (EDMA_V3_TCD_NBYTES_DMLOE
| EDMA_V3_TCD_NBYTES_SMLOE
))
376 nbytes
= EDMA_V3_TCD_NBYTES_MLOFF_NBYTES(nbytes
);
378 size
= nbytes
* fsl_edma_get_tcd_to_cpu(fsl_chan
, edesc
->tcd
[i
].vtcd
, biter
);
380 if (dir
== DMA_MEM_TO_DEV
)
381 dma_addr
= fsl_edma_get_tcd_to_cpu(fsl_chan
, edesc
->tcd
[i
].vtcd
, saddr
);
383 dma_addr
= fsl_edma_get_tcd_to_cpu(fsl_chan
, edesc
->tcd
[i
].vtcd
, daddr
);
386 if (cur_addr
>= dma_addr
&& cur_addr
< dma_addr
+ size
) {
387 len
+= dma_addr
+ size
- cur_addr
;
395 enum dma_status
fsl_edma_tx_status(struct dma_chan
*chan
,
396 dma_cookie_t cookie
, struct dma_tx_state
*txstate
)
398 struct fsl_edma_chan
*fsl_chan
= to_fsl_edma_chan(chan
);
399 struct virt_dma_desc
*vdesc
;
400 enum dma_status status
;
403 status
= dma_cookie_status(chan
, cookie
, txstate
);
404 if (status
== DMA_COMPLETE
)
408 return fsl_chan
->status
;
410 spin_lock_irqsave(&fsl_chan
->vchan
.lock
, flags
);
411 vdesc
= vchan_find_desc(&fsl_chan
->vchan
, cookie
);
412 if (fsl_chan
->edesc
&& cookie
== fsl_chan
->edesc
->vdesc
.tx
.cookie
)
414 fsl_edma_desc_residue(fsl_chan
, vdesc
, true);
417 fsl_edma_desc_residue(fsl_chan
, vdesc
, false);
419 txstate
->residue
= 0;
421 spin_unlock_irqrestore(&fsl_chan
->vchan
.lock
, flags
);
423 return fsl_chan
->status
;
426 static void fsl_edma_set_tcd_regs(struct fsl_edma_chan
*fsl_chan
, void *tcd
)
431 * TCD parameters are stored in struct fsl_edma_hw_tcd in little
432 * endian format. However, we need to load the TCD registers in
433 * big- or little-endian obeying the eDMA engine model endian,
434 * and this is performed from specific edma_write functions
436 edma_write_tcdreg(fsl_chan
, 0, csr
);
438 edma_cp_tcd_to_reg(fsl_chan
, tcd
, saddr
);
439 edma_cp_tcd_to_reg(fsl_chan
, tcd
, daddr
);
441 edma_cp_tcd_to_reg(fsl_chan
, tcd
, attr
);
442 edma_cp_tcd_to_reg(fsl_chan
, tcd
, soff
);
444 edma_cp_tcd_to_reg(fsl_chan
, tcd
, nbytes
);
445 edma_cp_tcd_to_reg(fsl_chan
, tcd
, slast
);
447 edma_cp_tcd_to_reg(fsl_chan
, tcd
, citer
);
448 edma_cp_tcd_to_reg(fsl_chan
, tcd
, biter
);
449 edma_cp_tcd_to_reg(fsl_chan
, tcd
, doff
);
451 edma_cp_tcd_to_reg(fsl_chan
, tcd
, dlast_sga
);
453 csr
= fsl_edma_get_tcd_to_cpu(fsl_chan
, tcd
, csr
);
455 if (fsl_chan
->is_sw
) {
456 csr
|= EDMA_TCD_CSR_START
;
457 fsl_edma_set_tcd_to_le(fsl_chan
, tcd
, csr
, csr
);
461 * Must clear CHn_CSR[DONE] bit before enable TCDn_CSR[ESG] at EDMAv3
462 * eDMAv4 have not such requirement.
463 * Change MLINK need clear CHn_CSR[DONE] for both eDMAv3 and eDMAv4.
465 if (((fsl_edma_drvflags(fsl_chan
) & FSL_EDMA_DRV_CLEAR_DONE_E_SG
) &&
466 (csr
& EDMA_TCD_CSR_E_SG
)) ||
467 ((fsl_edma_drvflags(fsl_chan
) & FSL_EDMA_DRV_CLEAR_DONE_E_LINK
) &&
468 (csr
& EDMA_TCD_CSR_E_LINK
)))
469 edma_writel_chreg(fsl_chan
, edma_readl_chreg(fsl_chan
, ch_csr
), ch_csr
);
472 edma_cp_tcd_to_reg(fsl_chan
, tcd
, csr
);
476 void fsl_edma_fill_tcd(struct fsl_edma_chan
*fsl_chan
,
477 struct fsl_edma_hw_tcd
*tcd
, dma_addr_t src
, dma_addr_t dst
,
478 u16 attr
, u16 soff
, u32 nbytes
, dma_addr_t slast
, u16 citer
,
479 u16 biter
, u16 doff
, dma_addr_t dlast_sga
, bool major_int
,
480 bool disable_req
, bool enable_sg
)
482 struct dma_slave_config
*cfg
= &fsl_chan
->cfg
;
487 * eDMA hardware SGs require the TCDs to be stored in little
488 * endian format irrespective of the register endian model.
489 * So we put the value in little endian in memory, waiting
490 * for fsl_edma_set_tcd_regs doing the swap.
492 fsl_edma_set_tcd_to_le(fsl_chan
, tcd
, src
, saddr
);
493 fsl_edma_set_tcd_to_le(fsl_chan
, tcd
, dst
, daddr
);
495 fsl_edma_set_tcd_to_le(fsl_chan
, tcd
, attr
, attr
);
497 fsl_edma_set_tcd_to_le(fsl_chan
, tcd
, soff
, soff
);
499 if (fsl_chan
->is_multi_fifo
) {
500 /* set mloff to support multiple fifo */
501 burst
= cfg
->direction
== DMA_DEV_TO_MEM
?
502 cfg
->src_maxburst
: cfg
->dst_maxburst
;
503 nbytes
|= EDMA_V3_TCD_NBYTES_MLOFF(-(burst
* 4));
504 /* enable DMLOE/SMLOE */
505 if (cfg
->direction
== DMA_MEM_TO_DEV
) {
506 nbytes
|= EDMA_V3_TCD_NBYTES_DMLOE
;
507 nbytes
&= ~EDMA_V3_TCD_NBYTES_SMLOE
;
509 nbytes
|= EDMA_V3_TCD_NBYTES_SMLOE
;
510 nbytes
&= ~EDMA_V3_TCD_NBYTES_DMLOE
;
514 fsl_edma_set_tcd_to_le(fsl_chan
, tcd
, nbytes
, nbytes
);
515 fsl_edma_set_tcd_to_le(fsl_chan
, tcd
, slast
, slast
);
517 fsl_edma_set_tcd_to_le(fsl_chan
, tcd
, EDMA_TCD_CITER_CITER(citer
), citer
);
518 fsl_edma_set_tcd_to_le(fsl_chan
, tcd
, doff
, doff
);
520 fsl_edma_set_tcd_to_le(fsl_chan
, tcd
, dlast_sga
, dlast_sga
);
522 fsl_edma_set_tcd_to_le(fsl_chan
, tcd
, EDMA_TCD_BITER_BITER(biter
), biter
);
525 csr
|= EDMA_TCD_CSR_INT_MAJOR
;
528 csr
|= EDMA_TCD_CSR_D_REQ
;
531 csr
|= EDMA_TCD_CSR_E_SG
;
533 if (fsl_chan
->is_rxchan
)
534 csr
|= EDMA_TCD_CSR_ACTIVE
;
537 csr
|= EDMA_TCD_CSR_START
;
539 fsl_edma_set_tcd_to_le(fsl_chan
, tcd
, csr
, csr
);
541 trace_edma_fill_tcd(fsl_chan
, tcd
);
544 static struct fsl_edma_desc
*fsl_edma_alloc_desc(struct fsl_edma_chan
*fsl_chan
,
547 struct fsl_edma_desc
*fsl_desc
;
550 fsl_desc
= kzalloc(struct_size(fsl_desc
, tcd
, sg_len
), GFP_NOWAIT
);
554 fsl_desc
->echan
= fsl_chan
;
555 fsl_desc
->n_tcds
= sg_len
;
556 for (i
= 0; i
< sg_len
; i
++) {
557 fsl_desc
->tcd
[i
].vtcd
= dma_pool_alloc(fsl_chan
->tcd_pool
,
558 GFP_NOWAIT
, &fsl_desc
->tcd
[i
].ptcd
);
559 if (!fsl_desc
->tcd
[i
].vtcd
)
566 dma_pool_free(fsl_chan
->tcd_pool
, fsl_desc
->tcd
[i
].vtcd
,
567 fsl_desc
->tcd
[i
].ptcd
);
572 struct dma_async_tx_descriptor
*fsl_edma_prep_dma_cyclic(
573 struct dma_chan
*chan
, dma_addr_t dma_addr
, size_t buf_len
,
574 size_t period_len
, enum dma_transfer_direction direction
,
577 struct fsl_edma_chan
*fsl_chan
= to_fsl_edma_chan(chan
);
578 struct fsl_edma_desc
*fsl_desc
;
579 dma_addr_t dma_buf_next
;
580 bool major_int
= true;
582 dma_addr_t src_addr
, dst_addr
, last_sg
;
583 u16 soff
, doff
, iter
;
586 if (!is_slave_direction(direction
))
589 if (!fsl_edma_prep_slave_dma(fsl_chan
, direction
))
592 sg_len
= buf_len
/ period_len
;
593 fsl_desc
= fsl_edma_alloc_desc(fsl_chan
, sg_len
);
596 fsl_desc
->iscyclic
= true;
597 fsl_desc
->dirn
= direction
;
599 dma_buf_next
= dma_addr
;
600 if (direction
== DMA_MEM_TO_DEV
) {
602 fsl_edma_get_tcd_attr(fsl_chan
->cfg
.dst_addr_width
);
603 nbytes
= fsl_chan
->cfg
.dst_addr_width
*
604 fsl_chan
->cfg
.dst_maxburst
;
607 fsl_edma_get_tcd_attr(fsl_chan
->cfg
.src_addr_width
);
608 nbytes
= fsl_chan
->cfg
.src_addr_width
*
609 fsl_chan
->cfg
.src_maxburst
;
612 iter
= period_len
/ nbytes
;
614 for (i
= 0; i
< sg_len
; i
++) {
615 if (dma_buf_next
>= dma_addr
+ buf_len
)
616 dma_buf_next
= dma_addr
;
618 /* get next sg's physical address */
619 last_sg
= fsl_desc
->tcd
[(i
+ 1) % sg_len
].ptcd
;
621 if (direction
== DMA_MEM_TO_DEV
) {
622 src_addr
= dma_buf_next
;
623 dst_addr
= fsl_chan
->dma_dev_addr
;
624 soff
= fsl_chan
->cfg
.dst_addr_width
;
625 doff
= fsl_chan
->is_multi_fifo
? 4 : 0;
626 } else if (direction
== DMA_DEV_TO_MEM
) {
627 src_addr
= fsl_chan
->dma_dev_addr
;
628 dst_addr
= dma_buf_next
;
629 soff
= fsl_chan
->is_multi_fifo
? 4 : 0;
630 doff
= fsl_chan
->cfg
.src_addr_width
;
633 src_addr
= fsl_chan
->cfg
.src_addr
;
634 dst_addr
= fsl_chan
->cfg
.dst_addr
;
639 fsl_edma_fill_tcd(fsl_chan
, fsl_desc
->tcd
[i
].vtcd
, src_addr
, dst_addr
,
640 fsl_chan
->attr
, soff
, nbytes
, 0, iter
,
641 iter
, doff
, last_sg
, major_int
, false, true);
642 dma_buf_next
+= period_len
;
645 return vchan_tx_prep(&fsl_chan
->vchan
, &fsl_desc
->vdesc
, flags
);
648 struct dma_async_tx_descriptor
*fsl_edma_prep_slave_sg(
649 struct dma_chan
*chan
, struct scatterlist
*sgl
,
650 unsigned int sg_len
, enum dma_transfer_direction direction
,
651 unsigned long flags
, void *context
)
653 struct fsl_edma_chan
*fsl_chan
= to_fsl_edma_chan(chan
);
654 struct fsl_edma_desc
*fsl_desc
;
655 struct scatterlist
*sg
;
656 dma_addr_t src_addr
, dst_addr
, last_sg
;
657 u16 soff
, doff
, iter
;
661 if (!is_slave_direction(direction
))
664 if (!fsl_edma_prep_slave_dma(fsl_chan
, direction
))
667 fsl_desc
= fsl_edma_alloc_desc(fsl_chan
, sg_len
);
670 fsl_desc
->iscyclic
= false;
671 fsl_desc
->dirn
= direction
;
673 if (direction
== DMA_MEM_TO_DEV
) {
675 fsl_edma_get_tcd_attr(fsl_chan
->cfg
.dst_addr_width
);
676 nbytes
= fsl_chan
->cfg
.dst_addr_width
*
677 fsl_chan
->cfg
.dst_maxburst
;
680 fsl_edma_get_tcd_attr(fsl_chan
->cfg
.src_addr_width
);
681 nbytes
= fsl_chan
->cfg
.src_addr_width
*
682 fsl_chan
->cfg
.src_maxburst
;
685 for_each_sg(sgl
, sg
, sg_len
, i
) {
686 if (direction
== DMA_MEM_TO_DEV
) {
687 src_addr
= sg_dma_address(sg
);
688 dst_addr
= fsl_chan
->dma_dev_addr
;
689 soff
= fsl_chan
->cfg
.dst_addr_width
;
691 } else if (direction
== DMA_DEV_TO_MEM
) {
692 src_addr
= fsl_chan
->dma_dev_addr
;
693 dst_addr
= sg_dma_address(sg
);
695 doff
= fsl_chan
->cfg
.src_addr_width
;
698 src_addr
= fsl_chan
->cfg
.src_addr
;
699 dst_addr
= fsl_chan
->cfg
.dst_addr
;
705 * Choose the suitable burst length if sg_dma_len is not
706 * multiple of burst length so that the whole transfer length is
707 * multiple of minor loop(burst length).
709 if (sg_dma_len(sg
) % nbytes
) {
710 u32 width
= (direction
== DMA_DEV_TO_MEM
) ? doff
: soff
;
711 u32 burst
= (direction
== DMA_DEV_TO_MEM
) ?
712 fsl_chan
->cfg
.src_maxburst
:
713 fsl_chan
->cfg
.dst_maxburst
;
716 for (j
= burst
; j
> 1; j
--) {
717 if (!(sg_dma_len(sg
) % (j
* width
))) {
722 /* Set burst size as 1 if there's no suitable one */
726 iter
= sg_dma_len(sg
) / nbytes
;
727 if (i
< sg_len
- 1) {
728 last_sg
= fsl_desc
->tcd
[(i
+ 1)].ptcd
;
729 fsl_edma_fill_tcd(fsl_chan
, fsl_desc
->tcd
[i
].vtcd
, src_addr
,
730 dst_addr
, fsl_chan
->attr
, soff
,
731 nbytes
, 0, iter
, iter
, doff
, last_sg
,
735 fsl_edma_fill_tcd(fsl_chan
, fsl_desc
->tcd
[i
].vtcd
, src_addr
,
736 dst_addr
, fsl_chan
->attr
, soff
,
737 nbytes
, 0, iter
, iter
, doff
, last_sg
,
742 return vchan_tx_prep(&fsl_chan
->vchan
, &fsl_desc
->vdesc
, flags
);
745 struct dma_async_tx_descriptor
*fsl_edma_prep_memcpy(struct dma_chan
*chan
,
746 dma_addr_t dma_dst
, dma_addr_t dma_src
,
747 size_t len
, unsigned long flags
)
749 struct fsl_edma_chan
*fsl_chan
= to_fsl_edma_chan(chan
);
750 struct fsl_edma_desc
*fsl_desc
;
752 fsl_desc
= fsl_edma_alloc_desc(fsl_chan
, 1);
755 fsl_desc
->iscyclic
= false;
757 fsl_chan
->is_sw
= true;
758 if (fsl_edma_drvflags(fsl_chan
) & FSL_EDMA_DRV_MEM_REMOTE
)
759 fsl_chan
->is_remote
= true;
761 /* To match with copy_align and max_seg_size so 1 tcd is enough */
762 fsl_edma_fill_tcd(fsl_chan
, fsl_desc
->tcd
[0].vtcd
, dma_src
, dma_dst
,
763 fsl_edma_get_tcd_attr(DMA_SLAVE_BUSWIDTH_32_BYTES
),
764 32, len
, 0, 1, 1, 32, 0, true, true, false);
766 return vchan_tx_prep(&fsl_chan
->vchan
, &fsl_desc
->vdesc
, flags
);
769 void fsl_edma_xfer_desc(struct fsl_edma_chan
*fsl_chan
)
771 struct virt_dma_desc
*vdesc
;
773 lockdep_assert_held(&fsl_chan
->vchan
.lock
);
775 vdesc
= vchan_next_desc(&fsl_chan
->vchan
);
778 fsl_chan
->edesc
= to_fsl_edma_desc(vdesc
);
779 fsl_edma_set_tcd_regs(fsl_chan
, fsl_chan
->edesc
->tcd
[0].vtcd
);
780 fsl_edma_enable_request(fsl_chan
);
781 fsl_chan
->status
= DMA_IN_PROGRESS
;
784 void fsl_edma_issue_pending(struct dma_chan
*chan
)
786 struct fsl_edma_chan
*fsl_chan
= to_fsl_edma_chan(chan
);
789 spin_lock_irqsave(&fsl_chan
->vchan
.lock
, flags
);
791 if (unlikely(fsl_chan
->pm_state
!= RUNNING
)) {
792 spin_unlock_irqrestore(&fsl_chan
->vchan
.lock
, flags
);
793 /* cannot submit due to suspend */
797 if (vchan_issue_pending(&fsl_chan
->vchan
) && !fsl_chan
->edesc
)
798 fsl_edma_xfer_desc(fsl_chan
);
800 spin_unlock_irqrestore(&fsl_chan
->vchan
.lock
, flags
);
803 int fsl_edma_alloc_chan_resources(struct dma_chan
*chan
)
805 struct fsl_edma_chan
*fsl_chan
= to_fsl_edma_chan(chan
);
808 if (fsl_edma_drvflags(fsl_chan
) & FSL_EDMA_DRV_HAS_CHCLK
)
809 clk_prepare_enable(fsl_chan
->clk
);
811 fsl_chan
->tcd_pool
= dma_pool_create("tcd_pool", chan
->device
->dev
,
812 fsl_edma_drvflags(fsl_chan
) & FSL_EDMA_DRV_TCD64
?
813 sizeof(struct fsl_edma_hw_tcd64
) : sizeof(struct fsl_edma_hw_tcd
),
816 if (fsl_chan
->txirq
) {
817 ret
= request_irq(fsl_chan
->txirq
, fsl_chan
->irq_handler
, IRQF_SHARED
,
818 fsl_chan
->chan_name
, fsl_chan
);
821 dma_pool_destroy(fsl_chan
->tcd_pool
);
829 void fsl_edma_free_chan_resources(struct dma_chan
*chan
)
831 struct fsl_edma_chan
*fsl_chan
= to_fsl_edma_chan(chan
);
832 struct fsl_edma_engine
*edma
= fsl_chan
->edma
;
836 spin_lock_irqsave(&fsl_chan
->vchan
.lock
, flags
);
837 fsl_edma_disable_request(fsl_chan
);
838 if (edma
->drvdata
->dmamuxs
)
839 fsl_edma_chan_mux(fsl_chan
, 0, false);
840 fsl_chan
->edesc
= NULL
;
841 vchan_get_all_descriptors(&fsl_chan
->vchan
, &head
);
842 fsl_edma_unprep_slave_dma(fsl_chan
);
843 spin_unlock_irqrestore(&fsl_chan
->vchan
.lock
, flags
);
846 free_irq(fsl_chan
->txirq
, fsl_chan
);
848 vchan_dma_desc_free_list(&fsl_chan
->vchan
, &head
);
849 dma_pool_destroy(fsl_chan
->tcd_pool
);
850 fsl_chan
->tcd_pool
= NULL
;
851 fsl_chan
->is_sw
= false;
853 fsl_chan
->is_remote
= false;
854 if (fsl_edma_drvflags(fsl_chan
) & FSL_EDMA_DRV_HAS_CHCLK
)
855 clk_disable_unprepare(fsl_chan
->clk
);
858 void fsl_edma_cleanup_vchan(struct dma_device
*dmadev
)
860 struct fsl_edma_chan
*chan
, *_chan
;
862 list_for_each_entry_safe(chan
, _chan
,
863 &dmadev
->channels
, vchan
.chan
.device_node
) {
864 list_del(&chan
->vchan
.chan
.device_node
);
865 tasklet_kill(&chan
->vchan
.task
);
870 * On the 32 channels Vybrid/mpc577x edma version, register offsets are
871 * different compared to ColdFire mcf5441x 64 channels edma.
873 * This function sets up register offsets as per proper declared version
874 * so must be called in xxx_edma_probe() just after setting the
875 * edma "version" and "membase" appropriately.
877 void fsl_edma_setup_regs(struct fsl_edma_engine
*edma
)
879 bool is64
= !!(edma
->drvdata
->flags
& FSL_EDMA_DRV_EDMA64
);
881 edma
->regs
.cr
= edma
->membase
+ EDMA_CR
;
882 edma
->regs
.es
= edma
->membase
+ EDMA_ES
;
883 edma
->regs
.erql
= edma
->membase
+ EDMA_ERQ
;
884 edma
->regs
.eeil
= edma
->membase
+ EDMA_EEI
;
886 edma
->regs
.serq
= edma
->membase
+ (is64
? EDMA64_SERQ
: EDMA_SERQ
);
887 edma
->regs
.cerq
= edma
->membase
+ (is64
? EDMA64_CERQ
: EDMA_CERQ
);
888 edma
->regs
.seei
= edma
->membase
+ (is64
? EDMA64_SEEI
: EDMA_SEEI
);
889 edma
->regs
.ceei
= edma
->membase
+ (is64
? EDMA64_CEEI
: EDMA_CEEI
);
890 edma
->regs
.cint
= edma
->membase
+ (is64
? EDMA64_CINT
: EDMA_CINT
);
891 edma
->regs
.cerr
= edma
->membase
+ (is64
? EDMA64_CERR
: EDMA_CERR
);
892 edma
->regs
.ssrt
= edma
->membase
+ (is64
? EDMA64_SSRT
: EDMA_SSRT
);
893 edma
->regs
.cdne
= edma
->membase
+ (is64
? EDMA64_CDNE
: EDMA_CDNE
);
894 edma
->regs
.intl
= edma
->membase
+ (is64
? EDMA64_INTL
: EDMA_INTR
);
895 edma
->regs
.errl
= edma
->membase
+ (is64
? EDMA64_ERRL
: EDMA_ERR
);
898 edma
->regs
.erqh
= edma
->membase
+ EDMA64_ERQH
;
899 edma
->regs
.eeih
= edma
->membase
+ EDMA64_EEIH
;
900 edma
->regs
.errh
= edma
->membase
+ EDMA64_ERRH
;
901 edma
->regs
.inth
= edma
->membase
+ EDMA64_INTH
;
905 MODULE_LICENSE("GPL v2");