1 /* SPDX-License-Identifier: GPL-2.0+ */
3 * Copyright 2013-2014 Freescale Semiconductor, Inc.
4 * Copyright 2018 Angelo Dureghello <angelo@sysam.it>
6 #ifndef _FSL_EDMA_COMMON_H_
7 #define _FSL_EDMA_COMMON_H_
9 #include <linux/dma-direction.h>
10 #include <linux/platform_device.h>
13 #define EDMA_CR_EDBG BIT(1)
14 #define EDMA_CR_ERCA BIT(2)
15 #define EDMA_CR_ERGA BIT(3)
16 #define EDMA_CR_HOE BIT(4)
17 #define EDMA_CR_HALT BIT(5)
18 #define EDMA_CR_CLM BIT(6)
19 #define EDMA_CR_EMLM BIT(7)
20 #define EDMA_CR_ECX BIT(16)
21 #define EDMA_CR_CX BIT(17)
23 #define EDMA_SEEI_SEEI(x) ((x) & GENMASK(4, 0))
24 #define EDMA_CEEI_CEEI(x) ((x) & GENMASK(4, 0))
25 #define EDMA_CINT_CINT(x) ((x) & GENMASK(4, 0))
26 #define EDMA_CERR_CERR(x) ((x) & GENMASK(4, 0))
28 #define EDMA_TCD_ATTR_DSIZE(x) (((x) & GENMASK(2, 0)))
29 #define EDMA_TCD_ATTR_DMOD(x) (((x) & GENMASK(4, 0)) << 3)
30 #define EDMA_TCD_ATTR_SSIZE(x) (((x) & GENMASK(2, 0)) << 8)
31 #define EDMA_TCD_ATTR_SMOD(x) (((x) & GENMASK(4, 0)) << 11)
33 #define EDMA_TCD_ITER_MASK GENMASK(14, 0)
34 #define EDMA_TCD_CITER_CITER(x) ((x) & EDMA_TCD_ITER_MASK)
35 #define EDMA_TCD_BITER_BITER(x) ((x) & EDMA_TCD_ITER_MASK)
37 #define EDMA_TCD_CSR_START BIT(0)
38 #define EDMA_TCD_CSR_INT_MAJOR BIT(1)
39 #define EDMA_TCD_CSR_INT_HALF BIT(2)
40 #define EDMA_TCD_CSR_D_REQ BIT(3)
41 #define EDMA_TCD_CSR_E_SG BIT(4)
42 #define EDMA_TCD_CSR_E_LINK BIT(5)
43 #define EDMA_TCD_CSR_ACTIVE BIT(6)
44 #define EDMA_TCD_CSR_DONE BIT(7)
46 #define EDMA_V3_TCD_NBYTES_MLOFF_NBYTES(x) ((x) & GENMASK(9, 0))
47 #define EDMA_V3_TCD_NBYTES_MLOFF(x) (x << 10)
48 #define EDMA_V3_TCD_NBYTES_DMLOE (1 << 30)
49 #define EDMA_V3_TCD_NBYTES_SMLOE (1 << 31)
51 #define EDMAMUX_CHCFG_DIS 0x0
52 #define EDMAMUX_CHCFG_ENBL 0x80
53 #define EDMAMUX_CHCFG_SOURCE(n) ((n) & 0x3F)
57 #define EDMA_TCD 0x1000
59 #define FSL_EDMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
60 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
61 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
62 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
64 #define EDMA_V3_CH_SBR_RD BIT(22)
65 #define EDMA_V3_CH_SBR_WR BIT(21)
66 #define EDMA_V3_CH_CSR_ERQ BIT(0)
67 #define EDMA_V3_CH_CSR_EARQ BIT(1)
68 #define EDMA_V3_CH_CSR_EEI BIT(2)
69 #define EDMA_V3_CH_CSR_DONE BIT(30)
70 #define EDMA_V3_CH_CSR_ACTIVE BIT(31)
72 enum fsl_edma_pm_state
{
77 struct fsl_edma_hw_tcd
{
91 struct fsl_edma_hw_tcd64
{
105 struct fsl_edma3_ch_reg
{
112 __le32 ch_mattr
; /* edma4, reserved for edma3 */
115 struct fsl_edma_hw_tcd tcd
;
116 struct fsl_edma_hw_tcd64 tcd64
;
121 * These are iomem pointers, for both v32 and v64.
127 void __iomem
*erql
; /* aka erq on v32 */
129 void __iomem
*eeil
; /* aka eei on v32 */
144 struct fsl_edma_sw_tcd
{
149 struct fsl_edma_chan
{
150 struct virt_dma_chan vchan
;
151 enum dma_status status
;
152 enum fsl_edma_pm_state pm_state
;
153 struct fsl_edma_engine
*edma
;
154 struct fsl_edma_desc
*edesc
;
155 struct dma_slave_config cfg
;
158 struct dma_pool
*tcd_pool
;
159 dma_addr_t dma_dev_addr
;
161 enum dma_data_direction dma_dir
;
164 void __iomem
*mux_addr
;
166 struct work_struct issue_worker
;
167 struct platform_device
*pdev
;
168 struct device
*pd_dev
;
174 irqreturn_t (*irq_handler
)(int irq
, void *dev_id
);
180 struct fsl_edma_desc
{
181 struct virt_dma_desc vdesc
;
182 struct fsl_edma_chan
*echan
;
184 enum dma_transfer_direction dirn
;
186 struct fsl_edma_sw_tcd tcd
[];
189 #define FSL_EDMA_DRV_HAS_DMACLK BIT(0)
190 #define FSL_EDMA_DRV_MUX_SWAP BIT(1)
191 #define FSL_EDMA_DRV_CONFIG32 BIT(2)
192 #define FSL_EDMA_DRV_WRAP_IO BIT(3)
193 #define FSL_EDMA_DRV_EDMA64 BIT(4)
194 #define FSL_EDMA_DRV_HAS_PD BIT(5)
195 #define FSL_EDMA_DRV_HAS_CHCLK BIT(6)
196 #define FSL_EDMA_DRV_HAS_CHMUX BIT(7)
197 #define FSL_EDMA_DRV_MEM_REMOTE BIT(8)
198 /* control and status register is in tcd address space, edma3 reg layout */
199 #define FSL_EDMA_DRV_SPLIT_REG BIT(9)
200 #define FSL_EDMA_DRV_BUS_8BYTE BIT(10)
201 #define FSL_EDMA_DRV_DEV_TO_DEV BIT(11)
202 #define FSL_EDMA_DRV_ALIGN_64BYTE BIT(12)
203 /* Need clean CHn_CSR DONE before enable TCD's ESG */
204 #define FSL_EDMA_DRV_CLEAR_DONE_E_SG BIT(13)
205 /* Need clean CHn_CSR DONE before enable TCD's MAJORELINK */
206 #define FSL_EDMA_DRV_CLEAR_DONE_E_LINK BIT(14)
207 #define FSL_EDMA_DRV_TCD64 BIT(15)
209 #define FSL_EDMA_DRV_EDMA3 (FSL_EDMA_DRV_SPLIT_REG | \
210 FSL_EDMA_DRV_BUS_8BYTE | \
211 FSL_EDMA_DRV_DEV_TO_DEV | \
212 FSL_EDMA_DRV_ALIGN_64BYTE | \
213 FSL_EDMA_DRV_CLEAR_DONE_E_SG | \
214 FSL_EDMA_DRV_CLEAR_DONE_E_LINK)
216 #define FSL_EDMA_DRV_EDMA4 (FSL_EDMA_DRV_SPLIT_REG | \
217 FSL_EDMA_DRV_BUS_8BYTE | \
218 FSL_EDMA_DRV_DEV_TO_DEV | \
219 FSL_EDMA_DRV_ALIGN_64BYTE | \
220 FSL_EDMA_DRV_CLEAR_DONE_E_LINK)
222 struct fsl_edma_drvdata
{
223 u32 dmamuxs
; /* only used before v3 */
227 u32 mux_off
; /* channel mux register offset */
228 u32 mux_skip
; /* how much skip for each channel */
229 int (*setup_irq
)(struct platform_device
*pdev
,
230 struct fsl_edma_engine
*fsl_edma
);
233 struct fsl_edma_engine
{
234 struct dma_device dma_dev
;
235 void __iomem
*membase
;
236 void __iomem
*muxbase
[DMAMUX_NR
];
237 struct clk
*muxclk
[DMAMUX_NR
];
239 struct mutex fsl_edma_mutex
;
240 const struct fsl_edma_drvdata
*drvdata
;
245 struct edma_regs regs
;
247 struct fsl_edma_chan chans
[] __counted_by(n_chans
);
250 static inline u32
fsl_edma_drvflags(struct fsl_edma_chan
*fsl_chan
)
252 return fsl_chan
->edma
->drvdata
->flags
;
255 #define edma_read_tcdreg_c(chan, _tcd, __name) \
256 _Generic(((_tcd)->__name), \
257 __iomem __le64 : edma_readq(chan->edma, &(_tcd)->__name), \
258 __iomem __le32 : edma_readl(chan->edma, &(_tcd)->__name), \
259 __iomem __le16 : edma_readw(chan->edma, &(_tcd)->__name) \
262 #define edma_read_tcdreg(chan, __name) \
263 ((fsl_edma_drvflags(chan) & FSL_EDMA_DRV_TCD64) ? \
264 edma_read_tcdreg_c(chan, ((struct fsl_edma_hw_tcd64 __iomem *)chan->tcd), __name) : \
265 edma_read_tcdreg_c(chan, ((struct fsl_edma_hw_tcd __iomem *)chan->tcd), __name) \
268 #define edma_write_tcdreg_c(chan, _tcd, _val, __name) \
269 _Generic((_tcd->__name), \
270 __iomem __le64 : edma_writeq(chan->edma, (u64 __force)(_val), &_tcd->__name), \
271 __iomem __le32 : edma_writel(chan->edma, (u32 __force)(_val), &_tcd->__name), \
272 __iomem __le16 : edma_writew(chan->edma, (u16 __force)(_val), &_tcd->__name), \
273 __iomem u8 : edma_writeb(chan->edma, _val, &_tcd->__name) \
276 #define edma_write_tcdreg(chan, val, __name) \
278 struct fsl_edma_hw_tcd64 __iomem *tcd64_r = (struct fsl_edma_hw_tcd64 __iomem *)chan->tcd; \
279 struct fsl_edma_hw_tcd __iomem *tcd_r = (struct fsl_edma_hw_tcd __iomem *)chan->tcd; \
281 if (fsl_edma_drvflags(chan) & FSL_EDMA_DRV_TCD64) \
282 edma_write_tcdreg_c(chan, tcd64_r, val, __name); \
284 edma_write_tcdreg_c(chan, tcd_r, val, __name); \
287 #define edma_cp_tcd_to_reg(chan, __tcd, __name) \
289 struct fsl_edma_hw_tcd64 __iomem *tcd64_r = (struct fsl_edma_hw_tcd64 __iomem *)chan->tcd; \
290 struct fsl_edma_hw_tcd __iomem *tcd_r = (struct fsl_edma_hw_tcd __iomem *)chan->tcd; \
291 struct fsl_edma_hw_tcd64 *tcd64_m = (struct fsl_edma_hw_tcd64 *)__tcd; \
292 struct fsl_edma_hw_tcd *tcd_m = (struct fsl_edma_hw_tcd *)__tcd; \
294 if (fsl_edma_drvflags(chan) & FSL_EDMA_DRV_TCD64) \
295 edma_write_tcdreg_c(chan, tcd64_r, tcd64_m->__name, __name); \
297 edma_write_tcdreg_c(chan, tcd_r, tcd_m->__name, __name); \
300 #define edma_readl_chreg(chan, __name) \
301 edma_readl(chan->edma, \
302 (void __iomem *)&(container_of(((__force void *)chan->tcd),\
303 struct fsl_edma3_ch_reg, tcd)->__name))
305 #define edma_writel_chreg(chan, val, __name) \
306 edma_writel(chan->edma, val, \
307 (void __iomem *)&(container_of(((__force void *)chan->tcd),\
308 struct fsl_edma3_ch_reg, tcd)->__name))
310 #define fsl_edma_get_tcd(_chan, _tcd, _field) \
311 (fsl_edma_drvflags(_chan) & FSL_EDMA_DRV_TCD64 ? (((struct fsl_edma_hw_tcd64 *)_tcd)->_field) : \
312 (((struct fsl_edma_hw_tcd *)_tcd)->_field))
314 #define fsl_edma_le_to_cpu(x) \
316 __le64 : le64_to_cpu((x)), \
317 __le32 : le32_to_cpu((x)), \
318 __le16 : le16_to_cpu((x)) \
321 #define fsl_edma_get_tcd_to_cpu(_chan, _tcd, _field) \
322 (fsl_edma_drvflags(_chan) & FSL_EDMA_DRV_TCD64 ? \
323 fsl_edma_le_to_cpu(((struct fsl_edma_hw_tcd64 *)_tcd)->_field) : \
324 fsl_edma_le_to_cpu(((struct fsl_edma_hw_tcd *)_tcd)->_field))
326 #define fsl_edma_set_tcd_to_le_c(_tcd, _val, _field) \
327 _Generic(((_tcd)->_field), \
328 __le64 : (_tcd)->_field = cpu_to_le64(_val), \
329 __le32 : (_tcd)->_field = cpu_to_le32(_val), \
330 __le16 : (_tcd)->_field = cpu_to_le16(_val) \
333 #define fsl_edma_set_tcd_to_le(_chan, _tcd, _val, _field) \
335 if (fsl_edma_drvflags(_chan) & FSL_EDMA_DRV_TCD64) \
336 fsl_edma_set_tcd_to_le_c((struct fsl_edma_hw_tcd64 *)_tcd, _val, _field); \
338 fsl_edma_set_tcd_to_le_c((struct fsl_edma_hw_tcd *)_tcd, _val, _field); \
341 /* Need after struct defination */
342 #include "fsl-edma-trace.h"
345 * R/W functions for big- or little-endian registers:
346 * The eDMA controller's endian is independent of the CPU core's endian.
347 * For the big-endian IP module, the offset for 8-bit or 16-bit registers
348 * should also be swapped opposite to that in little-endian IP.
350 static inline u64
edma_readq(struct fsl_edma_engine
*edma
, void __iomem
*addr
)
354 if (edma
->big_endian
) {
355 l
= ioread32be(addr
);
356 h
= ioread32be(addr
+ 4);
359 h
= ioread32(addr
+ 4);
362 trace_edma_readl(edma
, addr
, l
);
363 trace_edma_readl(edma
, addr
+ 4, h
);
365 return (h
<< 32) | l
;
368 static inline u32
edma_readl(struct fsl_edma_engine
*edma
, void __iomem
*addr
)
372 if (edma
->big_endian
)
373 val
= ioread32be(addr
);
375 val
= ioread32(addr
);
377 trace_edma_readl(edma
, addr
, val
);
382 static inline u16
edma_readw(struct fsl_edma_engine
*edma
, void __iomem
*addr
)
386 if (edma
->big_endian
)
387 val
= ioread16be(addr
);
389 val
= ioread16(addr
);
391 trace_edma_readw(edma
, addr
, val
);
396 static inline void edma_writeb(struct fsl_edma_engine
*edma
,
397 u8 val
, void __iomem
*addr
)
399 /* swap the reg offset for these in big-endian mode */
400 if (edma
->big_endian
)
401 iowrite8(val
, (void __iomem
*)((unsigned long)addr
^ 0x3));
405 trace_edma_writeb(edma
, addr
, val
);
408 static inline void edma_writew(struct fsl_edma_engine
*edma
,
409 u16 val
, void __iomem
*addr
)
411 /* swap the reg offset for these in big-endian mode */
412 if (edma
->big_endian
)
413 iowrite16be(val
, (void __iomem
*)((unsigned long)addr
^ 0x2));
415 iowrite16(val
, addr
);
417 trace_edma_writew(edma
, addr
, val
);
420 static inline void edma_writel(struct fsl_edma_engine
*edma
,
421 u32 val
, void __iomem
*addr
)
423 if (edma
->big_endian
)
424 iowrite32be(val
, addr
);
426 iowrite32(val
, addr
);
428 trace_edma_writel(edma
, addr
, val
);
431 static inline void edma_writeq(struct fsl_edma_engine
*edma
,
432 u64 val
, void __iomem
*addr
)
434 if (edma
->big_endian
) {
435 iowrite32be(val
& 0xFFFFFFFF, addr
);
436 iowrite32be(val
>> 32, addr
+ 4);
438 iowrite32(val
& 0xFFFFFFFF, addr
);
439 iowrite32(val
>> 32, addr
+ 4);
442 trace_edma_writel(edma
, addr
, val
& 0xFFFFFFFF);
443 trace_edma_writel(edma
, addr
+ 4, val
>> 32);
446 static inline struct fsl_edma_chan
*to_fsl_edma_chan(struct dma_chan
*chan
)
448 return container_of(chan
, struct fsl_edma_chan
, vchan
.chan
);
451 static inline struct fsl_edma_desc
*to_fsl_edma_desc(struct virt_dma_desc
*vd
)
453 return container_of(vd
, struct fsl_edma_desc
, vdesc
);
456 static inline void fsl_edma_err_chan_handler(struct fsl_edma_chan
*fsl_chan
)
458 fsl_chan
->status
= DMA_ERROR
;
461 void fsl_edma_tx_chan_handler(struct fsl_edma_chan
*fsl_chan
);
462 void fsl_edma_disable_request(struct fsl_edma_chan
*fsl_chan
);
463 void fsl_edma_chan_mux(struct fsl_edma_chan
*fsl_chan
,
464 unsigned int slot
, bool enable
);
465 void fsl_edma_free_desc(struct virt_dma_desc
*vdesc
);
466 int fsl_edma_terminate_all(struct dma_chan
*chan
);
467 int fsl_edma_pause(struct dma_chan
*chan
);
468 int fsl_edma_resume(struct dma_chan
*chan
);
469 int fsl_edma_slave_config(struct dma_chan
*chan
,
470 struct dma_slave_config
*cfg
);
471 enum dma_status
fsl_edma_tx_status(struct dma_chan
*chan
,
472 dma_cookie_t cookie
, struct dma_tx_state
*txstate
);
473 struct dma_async_tx_descriptor
*fsl_edma_prep_dma_cyclic(
474 struct dma_chan
*chan
, dma_addr_t dma_addr
, size_t buf_len
,
475 size_t period_len
, enum dma_transfer_direction direction
,
476 unsigned long flags
);
477 struct dma_async_tx_descriptor
*fsl_edma_prep_slave_sg(
478 struct dma_chan
*chan
, struct scatterlist
*sgl
,
479 unsigned int sg_len
, enum dma_transfer_direction direction
,
480 unsigned long flags
, void *context
);
481 struct dma_async_tx_descriptor
*fsl_edma_prep_memcpy(
482 struct dma_chan
*chan
, dma_addr_t dma_dst
, dma_addr_t dma_src
,
483 size_t len
, unsigned long flags
);
484 void fsl_edma_xfer_desc(struct fsl_edma_chan
*fsl_chan
);
485 void fsl_edma_issue_pending(struct dma_chan
*chan
);
486 int fsl_edma_alloc_chan_resources(struct dma_chan
*chan
);
487 void fsl_edma_free_chan_resources(struct dma_chan
*chan
);
488 void fsl_edma_cleanup_vchan(struct dma_device
*dmadev
);
489 void fsl_edma_setup_regs(struct fsl_edma_engine
*edma
);
491 #endif /* _FSL_EDMA_COMMON_H_ */