1 // SPDX-License-Identifier: GPL-2.0
3 * Xilinx ZynqMP DPDMA Engine driver
5 * Copyright (C) 2015 - 2020 Xilinx, Inc.
7 * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
10 #include <linux/bitfield.h>
11 #include <linux/bits.h>
12 #include <linux/clk.h>
13 #include <linux/debugfs.h>
14 #include <linux/delay.h>
15 #include <linux/dma/xilinx_dpdma.h>
16 #include <linux/dmaengine.h>
17 #include <linux/dmapool.h>
18 #include <linux/interrupt.h>
19 #include <linux/module.h>
21 #include <linux/of_dma.h>
22 #include <linux/platform_device.h>
23 #include <linux/sched.h>
24 #include <linux/slab.h>
25 #include <linux/spinlock.h>
26 #include <linux/wait.h>
28 #include <dt-bindings/dma/xlnx-zynqmp-dpdma.h>
30 #include "../dmaengine.h"
31 #include "../virt-dma.h"
34 #define XILINX_DPDMA_ERR_CTRL 0x000
35 #define XILINX_DPDMA_ISR 0x004
36 #define XILINX_DPDMA_IMR 0x008
37 #define XILINX_DPDMA_IEN 0x00c
38 #define XILINX_DPDMA_IDS 0x010
39 #define XILINX_DPDMA_INTR_DESC_DONE(n) BIT((n) + 0)
40 #define XILINX_DPDMA_INTR_DESC_DONE_MASK GENMASK(5, 0)
41 #define XILINX_DPDMA_INTR_NO_OSTAND(n) BIT((n) + 6)
42 #define XILINX_DPDMA_INTR_NO_OSTAND_MASK GENMASK(11, 6)
43 #define XILINX_DPDMA_INTR_AXI_ERR(n) BIT((n) + 12)
44 #define XILINX_DPDMA_INTR_AXI_ERR_MASK GENMASK(17, 12)
45 #define XILINX_DPDMA_INTR_DESC_ERR(n) BIT((n) + 16)
46 #define XILINX_DPDMA_INTR_DESC_ERR_MASK GENMASK(23, 18)
47 #define XILINX_DPDMA_INTR_WR_CMD_FIFO_FULL BIT(24)
48 #define XILINX_DPDMA_INTR_WR_DATA_FIFO_FULL BIT(25)
49 #define XILINX_DPDMA_INTR_AXI_4K_CROSS BIT(26)
50 #define XILINX_DPDMA_INTR_VSYNC BIT(27)
51 #define XILINX_DPDMA_INTR_CHAN_ERR_MASK 0x00041000
52 #define XILINX_DPDMA_INTR_CHAN_ERR 0x00fff000
53 #define XILINX_DPDMA_INTR_GLOBAL_ERR 0x07000000
54 #define XILINX_DPDMA_INTR_ERR_ALL 0x07fff000
55 #define XILINX_DPDMA_INTR_CHAN_MASK 0x00041041
56 #define XILINX_DPDMA_INTR_GLOBAL_MASK 0x0f000000
57 #define XILINX_DPDMA_INTR_ALL 0x0fffffff
58 #define XILINX_DPDMA_EISR 0x014
59 #define XILINX_DPDMA_EIMR 0x018
60 #define XILINX_DPDMA_EIEN 0x01c
61 #define XILINX_DPDMA_EIDS 0x020
62 #define XILINX_DPDMA_EINTR_INV_APB BIT(0)
63 #define XILINX_DPDMA_EINTR_RD_AXI_ERR(n) BIT((n) + 1)
64 #define XILINX_DPDMA_EINTR_RD_AXI_ERR_MASK GENMASK(6, 1)
65 #define XILINX_DPDMA_EINTR_PRE_ERR(n) BIT((n) + 7)
66 #define XILINX_DPDMA_EINTR_PRE_ERR_MASK GENMASK(12, 7)
67 #define XILINX_DPDMA_EINTR_CRC_ERR(n) BIT((n) + 13)
68 #define XILINX_DPDMA_EINTR_CRC_ERR_MASK GENMASK(18, 13)
69 #define XILINX_DPDMA_EINTR_WR_AXI_ERR(n) BIT((n) + 19)
70 #define XILINX_DPDMA_EINTR_WR_AXI_ERR_MASK GENMASK(24, 19)
71 #define XILINX_DPDMA_EINTR_DESC_DONE_ERR(n) BIT((n) + 25)
72 #define XILINX_DPDMA_EINTR_DESC_DONE_ERR_MASK GENMASK(30, 25)
73 #define XILINX_DPDMA_EINTR_RD_CMD_FIFO_FULL BIT(32)
74 #define XILINX_DPDMA_EINTR_CHAN_ERR_MASK 0x02082082
75 #define XILINX_DPDMA_EINTR_CHAN_ERR 0x7ffffffe
76 #define XILINX_DPDMA_EINTR_GLOBAL_ERR 0x80000001
77 #define XILINX_DPDMA_EINTR_ALL 0xffffffff
78 #define XILINX_DPDMA_CNTL 0x100
79 #define XILINX_DPDMA_GBL 0x104
80 #define XILINX_DPDMA_GBL_TRIG_MASK(n) ((n) << 0)
81 #define XILINX_DPDMA_GBL_RETRIG_MASK(n) ((n) << 6)
82 #define XILINX_DPDMA_ALC0_CNTL 0x108
83 #define XILINX_DPDMA_ALC0_STATUS 0x10c
84 #define XILINX_DPDMA_ALC0_MAX 0x110
85 #define XILINX_DPDMA_ALC0_MIN 0x114
86 #define XILINX_DPDMA_ALC0_ACC 0x118
87 #define XILINX_DPDMA_ALC0_ACC_TRAN 0x11c
88 #define XILINX_DPDMA_ALC1_CNTL 0x120
89 #define XILINX_DPDMA_ALC1_STATUS 0x124
90 #define XILINX_DPDMA_ALC1_MAX 0x128
91 #define XILINX_DPDMA_ALC1_MIN 0x12c
92 #define XILINX_DPDMA_ALC1_ACC 0x130
93 #define XILINX_DPDMA_ALC1_ACC_TRAN 0x134
95 /* Channel register */
96 #define XILINX_DPDMA_CH_BASE 0x200
97 #define XILINX_DPDMA_CH_OFFSET 0x100
98 #define XILINX_DPDMA_CH_DESC_START_ADDRE 0x000
99 #define XILINX_DPDMA_CH_DESC_START_ADDRE_MASK GENMASK(15, 0)
100 #define XILINX_DPDMA_CH_DESC_START_ADDR 0x004
101 #define XILINX_DPDMA_CH_DESC_NEXT_ADDRE 0x008
102 #define XILINX_DPDMA_CH_DESC_NEXT_ADDR 0x00c
103 #define XILINX_DPDMA_CH_PYLD_CUR_ADDRE 0x010
104 #define XILINX_DPDMA_CH_PYLD_CUR_ADDR 0x014
105 #define XILINX_DPDMA_CH_CNTL 0x018
106 #define XILINX_DPDMA_CH_CNTL_ENABLE BIT(0)
107 #define XILINX_DPDMA_CH_CNTL_PAUSE BIT(1)
108 #define XILINX_DPDMA_CH_CNTL_QOS_DSCR_WR_MASK GENMASK(5, 2)
109 #define XILINX_DPDMA_CH_CNTL_QOS_DSCR_RD_MASK GENMASK(9, 6)
110 #define XILINX_DPDMA_CH_CNTL_QOS_DATA_RD_MASK GENMASK(13, 10)
111 #define XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS 11
112 #define XILINX_DPDMA_CH_STATUS 0x01c
113 #define XILINX_DPDMA_CH_STATUS_OTRAN_CNT_MASK GENMASK(24, 21)
114 #define XILINX_DPDMA_CH_VDO 0x020
115 #define XILINX_DPDMA_CH_PYLD_SZ 0x024
116 #define XILINX_DPDMA_CH_DESC_ID 0x028
117 #define XILINX_DPDMA_CH_DESC_ID_MASK GENMASK(15, 0)
119 /* DPDMA descriptor fields */
120 #define XILINX_DPDMA_DESC_CONTROL_PREEMBLE 0xa5
121 #define XILINX_DPDMA_DESC_CONTROL_COMPLETE_INTR BIT(8)
122 #define XILINX_DPDMA_DESC_CONTROL_DESC_UPDATE BIT(9)
123 #define XILINX_DPDMA_DESC_CONTROL_IGNORE_DONE BIT(10)
124 #define XILINX_DPDMA_DESC_CONTROL_FRAG_MODE BIT(18)
125 #define XILINX_DPDMA_DESC_CONTROL_LAST BIT(19)
126 #define XILINX_DPDMA_DESC_CONTROL_ENABLE_CRC BIT(20)
127 #define XILINX_DPDMA_DESC_CONTROL_LAST_OF_FRAME BIT(21)
128 #define XILINX_DPDMA_DESC_ID_MASK GENMASK(15, 0)
129 #define XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_MASK GENMASK(17, 0)
130 #define XILINX_DPDMA_DESC_HSIZE_STRIDE_STRIDE_MASK GENMASK(31, 18)
131 #define XILINX_DPDMA_DESC_ADDR_EXT_NEXT_ADDR_MASK GENMASK(15, 0)
132 #define XILINX_DPDMA_DESC_ADDR_EXT_SRC_ADDR_MASK GENMASK(31, 16)
134 #define XILINX_DPDMA_ALIGN_BYTES 256
135 #define XILINX_DPDMA_LINESIZE_ALIGN_BITS 128
137 #define XILINX_DPDMA_NUM_CHAN 6
139 struct xilinx_dpdma_chan
;
142 * struct xilinx_dpdma_hw_desc - DPDMA hardware descriptor
143 * @control: control configuration field
144 * @desc_id: descriptor ID
145 * @xfer_size: transfer size
146 * @hsize_stride: horizontal size and stride
147 * @timestamp_lsb: LSB of time stamp
148 * @timestamp_msb: MSB of time stamp
149 * @addr_ext: upper 16 bit of 48 bit address (next_desc and src_addr)
150 * @next_desc: next descriptor 32 bit address
151 * @src_addr: payload source address (1st page, 32 LSB)
152 * @addr_ext_23: payload source address (2nd and 3rd pages, 16 LSBs)
153 * @addr_ext_45: payload source address (4th and 5th pages, 16 LSBs)
154 * @src_addr2: payload source address (2nd page, 32 LSB)
155 * @src_addr3: payload source address (3rd page, 32 LSB)
156 * @src_addr4: payload source address (4th page, 32 LSB)
157 * @src_addr5: payload source address (5th page, 32 LSB)
158 * @crc: descriptor CRC
160 struct xilinx_dpdma_hw_desc
{
177 } __aligned(XILINX_DPDMA_ALIGN_BYTES
);
180 * struct xilinx_dpdma_sw_desc - DPDMA software descriptor
181 * @hw: DPDMA hardware descriptor
182 * @node: list node for software descriptors
183 * @dma_addr: DMA address of the software descriptor
185 struct xilinx_dpdma_sw_desc
{
186 struct xilinx_dpdma_hw_desc hw
;
187 struct list_head node
;
192 * struct xilinx_dpdma_tx_desc - DPDMA transaction descriptor
193 * @vdesc: virtual DMA descriptor
195 * @descriptors: list of software descriptors
196 * @error: an error has been detected with this descriptor
198 struct xilinx_dpdma_tx_desc
{
199 struct virt_dma_desc vdesc
;
200 struct xilinx_dpdma_chan
*chan
;
201 struct list_head descriptors
;
205 #define to_dpdma_tx_desc(_desc) \
206 container_of(_desc, struct xilinx_dpdma_tx_desc, vdesc)
209 * struct xilinx_dpdma_chan - DPDMA channel
210 * @vchan: virtual DMA channel
211 * @reg: register base address
213 * @wait_to_stop: queue to wait for outstanding transactions before stopping
214 * @running: true if the channel is running
215 * @first_frame: flag for the first frame of stream
216 * @video_group: flag if multi-channel operation is needed for video channels
217 * @lock: lock to access struct xilinx_dpdma_chan. Must be taken before
218 * @vchan.lock, if both are to be held.
219 * @desc_pool: descriptor allocation pool
220 * @err_task: error IRQ bottom half handler
221 * @desc: References to descriptors being processed
222 * @desc.pending: Descriptor schedule to the hardware, pending execution
223 * @desc.active: Descriptor being executed by the hardware
224 * @xdev: DPDMA device
226 struct xilinx_dpdma_chan
{
227 struct virt_dma_chan vchan
;
231 wait_queue_head_t wait_to_stop
;
236 spinlock_t lock
; /* lock to access struct xilinx_dpdma_chan */
237 struct dma_pool
*desc_pool
;
238 struct tasklet_struct err_task
;
241 struct xilinx_dpdma_tx_desc
*pending
;
242 struct xilinx_dpdma_tx_desc
*active
;
245 struct xilinx_dpdma_device
*xdev
;
248 #define to_xilinx_chan(_chan) \
249 container_of(_chan, struct xilinx_dpdma_chan, vchan.chan)
252 * struct xilinx_dpdma_device - DPDMA device
253 * @common: generic dma device structure
254 * @reg: register base address
255 * @dev: generic device structure
256 * @irq: the interrupt number
257 * @axi_clk: axi clock
258 * @chan: DPDMA channels
259 * @ext_addr: flag for 64 bit system (48 bit addressing)
261 struct xilinx_dpdma_device
{
262 struct dma_device common
;
268 struct xilinx_dpdma_chan
*chan
[XILINX_DPDMA_NUM_CHAN
];
273 /* -----------------------------------------------------------------------------
276 #define XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE 32
277 #define XILINX_DPDMA_DEBUGFS_UINT16_MAX_STR "65535"
279 /* Match xilinx_dpdma_testcases vs dpdma_debugfs_reqs[] entry */
280 enum xilinx_dpdma_testcases
{
285 struct xilinx_dpdma_debugfs
{
286 enum xilinx_dpdma_testcases testcase
;
287 u16 xilinx_dpdma_irq_done_count
;
288 unsigned int chan_id
;
291 static struct xilinx_dpdma_debugfs dpdma_debugfs
;
292 struct xilinx_dpdma_debugfs_request
{
294 enum xilinx_dpdma_testcases tc
;
295 ssize_t (*read
)(char *buf
);
296 int (*write
)(char *args
);
299 static void xilinx_dpdma_debugfs_desc_done_irq(struct xilinx_dpdma_chan
*chan
)
301 if (IS_ENABLED(CONFIG_DEBUG_FS
) && chan
->id
== dpdma_debugfs
.chan_id
)
302 dpdma_debugfs
.xilinx_dpdma_irq_done_count
++;
305 static ssize_t
xilinx_dpdma_debugfs_desc_done_irq_read(char *buf
)
309 dpdma_debugfs
.testcase
= DPDMA_TC_NONE
;
311 out_str_len
= strlen(XILINX_DPDMA_DEBUGFS_UINT16_MAX_STR
);
312 out_str_len
= min_t(size_t, XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE
,
314 snprintf(buf
, out_str_len
, "%d",
315 dpdma_debugfs
.xilinx_dpdma_irq_done_count
);
320 static int xilinx_dpdma_debugfs_desc_done_irq_write(char *args
)
326 arg
= strsep(&args
, " ");
327 if (!arg
|| strncasecmp(arg
, "start", 5))
330 arg
= strsep(&args
, " ");
334 ret
= kstrtou32(arg
, 0, &id
);
338 if (id
< ZYNQMP_DPDMA_VIDEO0
|| id
> ZYNQMP_DPDMA_AUDIO1
)
341 dpdma_debugfs
.testcase
= DPDMA_TC_INTR_DONE
;
342 dpdma_debugfs
.xilinx_dpdma_irq_done_count
= 0;
343 dpdma_debugfs
.chan_id
= id
;
348 /* Match xilinx_dpdma_testcases vs dpdma_debugfs_reqs[] entry */
349 static struct xilinx_dpdma_debugfs_request dpdma_debugfs_reqs
[] = {
351 .name
= "DESCRIPTOR_DONE_INTR",
352 .tc
= DPDMA_TC_INTR_DONE
,
353 .read
= xilinx_dpdma_debugfs_desc_done_irq_read
,
354 .write
= xilinx_dpdma_debugfs_desc_done_irq_write
,
358 static ssize_t
xilinx_dpdma_debugfs_read(struct file
*f
, char __user
*buf
,
359 size_t size
, loff_t
*pos
)
361 enum xilinx_dpdma_testcases testcase
;
365 if (*pos
!= 0 || size
<= 0)
368 kern_buff
= kzalloc(XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE
, GFP_KERNEL
);
370 dpdma_debugfs
.testcase
= DPDMA_TC_NONE
;
374 testcase
= READ_ONCE(dpdma_debugfs
.testcase
);
375 if (testcase
!= DPDMA_TC_NONE
) {
376 ret
= dpdma_debugfs_reqs
[testcase
].read(kern_buff
);
380 strscpy(kern_buff
, "No testcase executed",
381 XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE
);
384 size
= min(size
, strlen(kern_buff
));
385 if (copy_to_user(buf
, kern_buff
, size
))
397 static ssize_t
xilinx_dpdma_debugfs_write(struct file
*f
,
398 const char __user
*buf
, size_t size
,
401 char *kern_buff
, *kern_buff_start
;
406 if (*pos
!= 0 || size
<= 0)
409 /* Supporting single instance of test as of now. */
410 if (dpdma_debugfs
.testcase
!= DPDMA_TC_NONE
)
413 kern_buff
= kzalloc(size
, GFP_KERNEL
);
416 kern_buff_start
= kern_buff
;
418 ret
= strncpy_from_user(kern_buff
, buf
, size
);
422 /* Read the testcase name from a user request. */
423 testcase
= strsep(&kern_buff
, " ");
425 for (i
= 0; i
< ARRAY_SIZE(dpdma_debugfs_reqs
); i
++) {
426 if (!strcasecmp(testcase
, dpdma_debugfs_reqs
[i
].name
))
430 if (i
== ARRAY_SIZE(dpdma_debugfs_reqs
)) {
435 ret
= dpdma_debugfs_reqs
[i
].write(kern_buff
);
442 kfree(kern_buff_start
);
446 static const struct file_operations fops_xilinx_dpdma_dbgfs
= {
447 .owner
= THIS_MODULE
,
448 .read
= xilinx_dpdma_debugfs_read
,
449 .write
= xilinx_dpdma_debugfs_write
,
452 static void xilinx_dpdma_debugfs_init(struct xilinx_dpdma_device
*xdev
)
456 dpdma_debugfs
.testcase
= DPDMA_TC_NONE
;
458 dent
= debugfs_create_file("testcase", 0444, xdev
->common
.dbg_dev_root
,
459 NULL
, &fops_xilinx_dpdma_dbgfs
);
461 dev_err(xdev
->dev
, "Failed to create debugfs testcase file\n");
464 /* -----------------------------------------------------------------------------
468 static inline u32
dpdma_read(void __iomem
*base
, u32 offset
)
470 return ioread32(base
+ offset
);
473 static inline void dpdma_write(void __iomem
*base
, u32 offset
, u32 val
)
475 iowrite32(val
, base
+ offset
);
478 static inline void dpdma_clr(void __iomem
*base
, u32 offset
, u32 clr
)
480 dpdma_write(base
, offset
, dpdma_read(base
, offset
) & ~clr
);
483 static inline void dpdma_set(void __iomem
*base
, u32 offset
, u32 set
)
485 dpdma_write(base
, offset
, dpdma_read(base
, offset
) | set
);
488 /* -----------------------------------------------------------------------------
489 * Descriptor Operations
493 * xilinx_dpdma_sw_desc_set_dma_addrs - Set DMA addresses in the descriptor
494 * @xdev: DPDMA device
495 * @sw_desc: The software descriptor in which to set DMA addresses
496 * @prev: The previous descriptor
497 * @dma_addr: array of dma addresses
498 * @num_src_addr: number of addresses in @dma_addr
500 * Set all the DMA addresses in the hardware descriptor corresponding to @dev
501 * from @dma_addr. If a previous descriptor is specified in @prev, its next
502 * descriptor DMA address is set to the DMA address of @sw_desc. @prev may be
503 * identical to @sw_desc for cyclic transfers.
505 static void xilinx_dpdma_sw_desc_set_dma_addrs(struct xilinx_dpdma_device
*xdev
,
506 struct xilinx_dpdma_sw_desc
*sw_desc
,
507 struct xilinx_dpdma_sw_desc
*prev
,
508 dma_addr_t dma_addr
[],
509 unsigned int num_src_addr
)
511 struct xilinx_dpdma_hw_desc
*hw_desc
= &sw_desc
->hw
;
514 hw_desc
->src_addr
= lower_32_bits(dma_addr
[0]);
517 FIELD_PREP(XILINX_DPDMA_DESC_ADDR_EXT_SRC_ADDR_MASK
,
518 upper_32_bits(dma_addr
[0]));
520 for (i
= 1; i
< num_src_addr
; i
++) {
521 u32
*addr
= &hw_desc
->src_addr2
;
523 addr
[i
- 1] = lower_32_bits(dma_addr
[i
]);
525 if (xdev
->ext_addr
) {
526 u32
*addr_ext
= &hw_desc
->addr_ext_23
;
529 addr_msb
= upper_32_bits(dma_addr
[i
]) & GENMASK(15, 0);
530 addr_msb
<<= 16 * ((i
- 1) % 2);
531 addr_ext
[(i
- 1) / 2] |= addr_msb
;
538 prev
->hw
.next_desc
= lower_32_bits(sw_desc
->dma_addr
);
541 FIELD_PREP(XILINX_DPDMA_DESC_ADDR_EXT_NEXT_ADDR_MASK
,
542 upper_32_bits(sw_desc
->dma_addr
));
546 * xilinx_dpdma_chan_alloc_sw_desc - Allocate a software descriptor
547 * @chan: DPDMA channel
549 * Allocate a software descriptor from the channel's descriptor pool.
551 * Return: a software descriptor or NULL.
553 static struct xilinx_dpdma_sw_desc
*
554 xilinx_dpdma_chan_alloc_sw_desc(struct xilinx_dpdma_chan
*chan
)
556 struct xilinx_dpdma_sw_desc
*sw_desc
;
559 sw_desc
= dma_pool_zalloc(chan
->desc_pool
, GFP_ATOMIC
, &dma_addr
);
563 sw_desc
->dma_addr
= dma_addr
;
569 * xilinx_dpdma_chan_free_sw_desc - Free a software descriptor
570 * @chan: DPDMA channel
571 * @sw_desc: software descriptor to free
573 * Free a software descriptor from the channel's descriptor pool.
576 xilinx_dpdma_chan_free_sw_desc(struct xilinx_dpdma_chan
*chan
,
577 struct xilinx_dpdma_sw_desc
*sw_desc
)
579 dma_pool_free(chan
->desc_pool
, sw_desc
, sw_desc
->dma_addr
);
583 * xilinx_dpdma_chan_dump_tx_desc - Dump a tx descriptor
584 * @chan: DPDMA channel
585 * @tx_desc: tx descriptor to dump
587 * Dump contents of a tx descriptor
589 static void xilinx_dpdma_chan_dump_tx_desc(struct xilinx_dpdma_chan
*chan
,
590 struct xilinx_dpdma_tx_desc
*tx_desc
)
592 struct xilinx_dpdma_sw_desc
*sw_desc
;
593 struct device
*dev
= chan
->xdev
->dev
;
596 dev_dbg(dev
, "------- TX descriptor dump start -------\n");
597 dev_dbg(dev
, "------- channel ID = %d -------\n", chan
->id
);
599 list_for_each_entry(sw_desc
, &tx_desc
->descriptors
, node
) {
600 struct xilinx_dpdma_hw_desc
*hw_desc
= &sw_desc
->hw
;
602 dev_dbg(dev
, "------- HW descriptor %d -------\n", i
++);
603 dev_dbg(dev
, "descriptor DMA addr: %pad\n", &sw_desc
->dma_addr
);
604 dev_dbg(dev
, "control: 0x%08x\n", hw_desc
->control
);
605 dev_dbg(dev
, "desc_id: 0x%08x\n", hw_desc
->desc_id
);
606 dev_dbg(dev
, "xfer_size: 0x%08x\n", hw_desc
->xfer_size
);
607 dev_dbg(dev
, "hsize_stride: 0x%08x\n", hw_desc
->hsize_stride
);
608 dev_dbg(dev
, "timestamp_lsb: 0x%08x\n", hw_desc
->timestamp_lsb
);
609 dev_dbg(dev
, "timestamp_msb: 0x%08x\n", hw_desc
->timestamp_msb
);
610 dev_dbg(dev
, "addr_ext: 0x%08x\n", hw_desc
->addr_ext
);
611 dev_dbg(dev
, "next_desc: 0x%08x\n", hw_desc
->next_desc
);
612 dev_dbg(dev
, "src_addr: 0x%08x\n", hw_desc
->src_addr
);
613 dev_dbg(dev
, "addr_ext_23: 0x%08x\n", hw_desc
->addr_ext_23
);
614 dev_dbg(dev
, "addr_ext_45: 0x%08x\n", hw_desc
->addr_ext_45
);
615 dev_dbg(dev
, "src_addr2: 0x%08x\n", hw_desc
->src_addr2
);
616 dev_dbg(dev
, "src_addr3: 0x%08x\n", hw_desc
->src_addr3
);
617 dev_dbg(dev
, "src_addr4: 0x%08x\n", hw_desc
->src_addr4
);
618 dev_dbg(dev
, "src_addr5: 0x%08x\n", hw_desc
->src_addr5
);
619 dev_dbg(dev
, "crc: 0x%08x\n", hw_desc
->crc
);
622 dev_dbg(dev
, "------- TX descriptor dump end -------\n");
626 * xilinx_dpdma_chan_alloc_tx_desc - Allocate a transaction descriptor
627 * @chan: DPDMA channel
629 * Allocate a tx descriptor.
631 * Return: a tx descriptor or NULL.
633 static struct xilinx_dpdma_tx_desc
*
634 xilinx_dpdma_chan_alloc_tx_desc(struct xilinx_dpdma_chan
*chan
)
636 struct xilinx_dpdma_tx_desc
*tx_desc
;
638 tx_desc
= kzalloc(sizeof(*tx_desc
), GFP_NOWAIT
);
642 INIT_LIST_HEAD(&tx_desc
->descriptors
);
643 tx_desc
->chan
= chan
;
644 tx_desc
->error
= false;
650 * xilinx_dpdma_chan_free_tx_desc - Free a virtual DMA descriptor
651 * @vdesc: virtual DMA descriptor
653 * Free the virtual DMA descriptor @vdesc including its software descriptors.
655 static void xilinx_dpdma_chan_free_tx_desc(struct virt_dma_desc
*vdesc
)
657 struct xilinx_dpdma_sw_desc
*sw_desc
, *next
;
658 struct xilinx_dpdma_tx_desc
*desc
;
663 desc
= to_dpdma_tx_desc(vdesc
);
665 list_for_each_entry_safe(sw_desc
, next
, &desc
->descriptors
, node
) {
666 list_del(&sw_desc
->node
);
667 xilinx_dpdma_chan_free_sw_desc(desc
->chan
, sw_desc
);
674 * xilinx_dpdma_chan_prep_cyclic - Prepare a cyclic dma descriptor
675 * @chan: DPDMA channel
676 * @buf_addr: buffer address
677 * @buf_len: buffer length
678 * @period_len: number of periods
679 * @flags: tx flags argument passed in to prepare function
681 * Prepare a tx descriptor incudling internal software/hardware descriptors
682 * for the given cyclic transaction.
684 * Return: A dma async tx descriptor on success, or NULL.
686 static struct dma_async_tx_descriptor
*
687 xilinx_dpdma_chan_prep_cyclic(struct xilinx_dpdma_chan
*chan
,
688 dma_addr_t buf_addr
, size_t buf_len
,
689 size_t period_len
, unsigned long flags
)
691 struct xilinx_dpdma_tx_desc
*tx_desc
;
692 struct xilinx_dpdma_sw_desc
*sw_desc
, *last
= NULL
;
693 unsigned int periods
= buf_len
/ period_len
;
696 tx_desc
= xilinx_dpdma_chan_alloc_tx_desc(chan
);
700 for (i
= 0; i
< periods
; i
++) {
701 struct xilinx_dpdma_hw_desc
*hw_desc
;
703 if (!IS_ALIGNED(buf_addr
, XILINX_DPDMA_ALIGN_BYTES
)) {
704 dev_err(chan
->xdev
->dev
,
705 "buffer should be aligned at %d B\n",
706 XILINX_DPDMA_ALIGN_BYTES
);
710 sw_desc
= xilinx_dpdma_chan_alloc_sw_desc(chan
);
714 xilinx_dpdma_sw_desc_set_dma_addrs(chan
->xdev
, sw_desc
, last
,
716 hw_desc
= &sw_desc
->hw
;
717 hw_desc
->xfer_size
= period_len
;
718 hw_desc
->hsize_stride
=
719 FIELD_PREP(XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_MASK
,
721 FIELD_PREP(XILINX_DPDMA_DESC_HSIZE_STRIDE_STRIDE_MASK
,
723 hw_desc
->control
= XILINX_DPDMA_DESC_CONTROL_PREEMBLE
|
724 XILINX_DPDMA_DESC_CONTROL_IGNORE_DONE
|
725 XILINX_DPDMA_DESC_CONTROL_COMPLETE_INTR
;
727 list_add_tail(&sw_desc
->node
, &tx_desc
->descriptors
);
729 buf_addr
+= period_len
;
733 sw_desc
= list_first_entry(&tx_desc
->descriptors
,
734 struct xilinx_dpdma_sw_desc
, node
);
735 last
->hw
.next_desc
= lower_32_bits(sw_desc
->dma_addr
);
736 if (chan
->xdev
->ext_addr
)
738 FIELD_PREP(XILINX_DPDMA_DESC_ADDR_EXT_NEXT_ADDR_MASK
,
739 upper_32_bits(sw_desc
->dma_addr
));
741 last
->hw
.control
|= XILINX_DPDMA_DESC_CONTROL_LAST_OF_FRAME
;
743 return vchan_tx_prep(&chan
->vchan
, &tx_desc
->vdesc
, flags
);
746 xilinx_dpdma_chan_free_tx_desc(&tx_desc
->vdesc
);
752 * xilinx_dpdma_chan_prep_interleaved_dma - Prepare an interleaved dma
754 * @chan: DPDMA channel
755 * @xt: dma interleaved template
757 * Prepare a tx descriptor including internal software/hardware descriptors
760 * Return: A DPDMA TX descriptor on success, or NULL.
762 static struct xilinx_dpdma_tx_desc
*
763 xilinx_dpdma_chan_prep_interleaved_dma(struct xilinx_dpdma_chan
*chan
,
764 struct dma_interleaved_template
*xt
)
766 struct xilinx_dpdma_tx_desc
*tx_desc
;
767 struct xilinx_dpdma_sw_desc
*sw_desc
;
768 struct xilinx_dpdma_hw_desc
*hw_desc
;
769 size_t hsize
= xt
->sgl
[0].size
;
770 size_t stride
= hsize
+ xt
->sgl
[0].icg
;
772 if (!IS_ALIGNED(xt
->src_start
, XILINX_DPDMA_ALIGN_BYTES
)) {
773 dev_err(chan
->xdev
->dev
,
774 "chan%u: buffer should be aligned at %d B\n",
775 chan
->id
, XILINX_DPDMA_ALIGN_BYTES
);
779 tx_desc
= xilinx_dpdma_chan_alloc_tx_desc(chan
);
783 sw_desc
= xilinx_dpdma_chan_alloc_sw_desc(chan
);
785 xilinx_dpdma_chan_free_tx_desc(&tx_desc
->vdesc
);
789 xilinx_dpdma_sw_desc_set_dma_addrs(chan
->xdev
, sw_desc
, sw_desc
,
792 hw_desc
= &sw_desc
->hw
;
793 hsize
= ALIGN(hsize
, XILINX_DPDMA_LINESIZE_ALIGN_BITS
/ 8);
794 hw_desc
->xfer_size
= hsize
* xt
->numf
;
795 hw_desc
->hsize_stride
=
796 FIELD_PREP(XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_MASK
, hsize
) |
797 FIELD_PREP(XILINX_DPDMA_DESC_HSIZE_STRIDE_STRIDE_MASK
,
799 hw_desc
->control
|= XILINX_DPDMA_DESC_CONTROL_PREEMBLE
;
800 hw_desc
->control
|= XILINX_DPDMA_DESC_CONTROL_COMPLETE_INTR
;
801 hw_desc
->control
|= XILINX_DPDMA_DESC_CONTROL_IGNORE_DONE
;
802 hw_desc
->control
|= XILINX_DPDMA_DESC_CONTROL_LAST_OF_FRAME
;
804 list_add_tail(&sw_desc
->node
, &tx_desc
->descriptors
);
809 /* -----------------------------------------------------------------------------
810 * DPDMA Channel Operations
814 * xilinx_dpdma_chan_enable - Enable the channel
815 * @chan: DPDMA channel
817 * Enable the channel and its interrupts. Set the QoS values for video class.
819 static void xilinx_dpdma_chan_enable(struct xilinx_dpdma_chan
*chan
)
823 reg
= (XILINX_DPDMA_INTR_CHAN_MASK
<< chan
->id
)
824 | XILINX_DPDMA_INTR_GLOBAL_MASK
;
825 dpdma_write(chan
->xdev
->reg
, XILINX_DPDMA_IEN
, reg
);
826 reg
= (XILINX_DPDMA_EINTR_CHAN_ERR_MASK
<< chan
->id
)
827 | XILINX_DPDMA_INTR_GLOBAL_ERR
;
828 dpdma_write(chan
->xdev
->reg
, XILINX_DPDMA_EIEN
, reg
);
830 reg
= XILINX_DPDMA_CH_CNTL_ENABLE
831 | FIELD_PREP(XILINX_DPDMA_CH_CNTL_QOS_DSCR_WR_MASK
,
832 XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS
)
833 | FIELD_PREP(XILINX_DPDMA_CH_CNTL_QOS_DSCR_RD_MASK
,
834 XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS
)
835 | FIELD_PREP(XILINX_DPDMA_CH_CNTL_QOS_DATA_RD_MASK
,
836 XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS
);
837 dpdma_set(chan
->reg
, XILINX_DPDMA_CH_CNTL
, reg
);
841 * xilinx_dpdma_chan_disable - Disable the channel
842 * @chan: DPDMA channel
844 * Disable the channel and its interrupts.
846 static void xilinx_dpdma_chan_disable(struct xilinx_dpdma_chan
*chan
)
850 reg
= XILINX_DPDMA_INTR_CHAN_MASK
<< chan
->id
;
851 dpdma_write(chan
->xdev
->reg
, XILINX_DPDMA_IEN
, reg
);
852 reg
= XILINX_DPDMA_EINTR_CHAN_ERR_MASK
<< chan
->id
;
853 dpdma_write(chan
->xdev
->reg
, XILINX_DPDMA_EIEN
, reg
);
855 dpdma_clr(chan
->reg
, XILINX_DPDMA_CH_CNTL
, XILINX_DPDMA_CH_CNTL_ENABLE
);
859 * xilinx_dpdma_chan_pause - Pause the channel
860 * @chan: DPDMA channel
864 static void xilinx_dpdma_chan_pause(struct xilinx_dpdma_chan
*chan
)
866 dpdma_set(chan
->reg
, XILINX_DPDMA_CH_CNTL
, XILINX_DPDMA_CH_CNTL_PAUSE
);
870 * xilinx_dpdma_chan_unpause - Unpause the channel
871 * @chan: DPDMA channel
873 * Unpause the channel.
875 static void xilinx_dpdma_chan_unpause(struct xilinx_dpdma_chan
*chan
)
877 dpdma_clr(chan
->reg
, XILINX_DPDMA_CH_CNTL
, XILINX_DPDMA_CH_CNTL_PAUSE
);
880 static u32
xilinx_dpdma_chan_video_group_ready(struct xilinx_dpdma_chan
*chan
)
882 struct xilinx_dpdma_device
*xdev
= chan
->xdev
;
886 for (i
= ZYNQMP_DPDMA_VIDEO0
; i
<= ZYNQMP_DPDMA_VIDEO2
; i
++) {
887 if (xdev
->chan
[i
]->video_group
&& !xdev
->chan
[i
]->running
)
890 if (xdev
->chan
[i
]->video_group
)
898 * xilinx_dpdma_chan_queue_transfer - Queue the next transfer
899 * @chan: DPDMA channel
901 * Queue the next descriptor, if any, to the hardware. If the channel is
902 * stopped, start it first. Otherwise retrigger it with the next descriptor.
904 static void xilinx_dpdma_chan_queue_transfer(struct xilinx_dpdma_chan
*chan
)
906 struct xilinx_dpdma_device
*xdev
= chan
->xdev
;
907 struct xilinx_dpdma_sw_desc
*sw_desc
;
908 struct xilinx_dpdma_tx_desc
*desc
;
909 struct virt_dma_desc
*vdesc
;
913 lockdep_assert_held(&chan
->lock
);
915 if (chan
->desc
.pending
)
918 if (!chan
->running
) {
919 xilinx_dpdma_chan_unpause(chan
);
920 xilinx_dpdma_chan_enable(chan
);
921 chan
->first_frame
= true;
922 chan
->running
= true;
925 vdesc
= vchan_next_desc(&chan
->vchan
);
929 desc
= to_dpdma_tx_desc(vdesc
);
930 chan
->desc
.pending
= desc
;
931 list_del(&desc
->vdesc
.node
);
934 * Assign the cookie to descriptors in this transaction. Only 16 bit
935 * will be used, but it should be enough.
937 list_for_each_entry(sw_desc
, &desc
->descriptors
, node
)
938 sw_desc
->hw
.desc_id
= desc
->vdesc
.tx
.cookie
939 & XILINX_DPDMA_CH_DESC_ID_MASK
;
941 sw_desc
= list_first_entry(&desc
->descriptors
,
942 struct xilinx_dpdma_sw_desc
, node
);
943 dpdma_write(chan
->reg
, XILINX_DPDMA_CH_DESC_START_ADDR
,
944 lower_32_bits(sw_desc
->dma_addr
));
946 dpdma_write(chan
->reg
, XILINX_DPDMA_CH_DESC_START_ADDRE
,
947 FIELD_PREP(XILINX_DPDMA_CH_DESC_START_ADDRE_MASK
,
948 upper_32_bits(sw_desc
->dma_addr
)));
950 first_frame
= chan
->first_frame
;
951 chan
->first_frame
= false;
953 if (chan
->video_group
) {
954 channels
= xilinx_dpdma_chan_video_group_ready(chan
);
956 * Trigger the transfer only when all channels in the group are
962 channels
= BIT(chan
->id
);
966 reg
= XILINX_DPDMA_GBL_TRIG_MASK(channels
);
968 reg
= XILINX_DPDMA_GBL_RETRIG_MASK(channels
);
970 dpdma_write(xdev
->reg
, XILINX_DPDMA_GBL
, reg
);
974 * xilinx_dpdma_chan_ostand - Number of outstanding transactions
975 * @chan: DPDMA channel
977 * Read and return the number of outstanding transactions from register.
979 * Return: Number of outstanding transactions from the status register.
981 static u32
xilinx_dpdma_chan_ostand(struct xilinx_dpdma_chan
*chan
)
983 return FIELD_GET(XILINX_DPDMA_CH_STATUS_OTRAN_CNT_MASK
,
984 dpdma_read(chan
->reg
, XILINX_DPDMA_CH_STATUS
));
988 * xilinx_dpdma_chan_notify_no_ostand - Notify no outstanding transaction event
989 * @chan: DPDMA channel
991 * Notify waiters for no outstanding event, so waiters can stop the channel
992 * safely. This function is supposed to be called when 'no outstanding'
993 * interrupt is generated. The 'no outstanding' interrupt is disabled and
994 * should be re-enabled when this event is handled. If the channel status
995 * register still shows some number of outstanding transactions, the interrupt
998 * Return: 0 on success. On failure, -EWOULDBLOCK if there's still outstanding
1001 static int xilinx_dpdma_chan_notify_no_ostand(struct xilinx_dpdma_chan
*chan
)
1005 cnt
= xilinx_dpdma_chan_ostand(chan
);
1007 dev_dbg(chan
->xdev
->dev
,
1008 "chan%u: %d outstanding transactions\n",
1010 return -EWOULDBLOCK
;
1013 /* Disable 'no outstanding' interrupt */
1014 dpdma_write(chan
->xdev
->reg
, XILINX_DPDMA_IDS
,
1015 XILINX_DPDMA_INTR_NO_OSTAND(chan
->id
));
1016 wake_up(&chan
->wait_to_stop
);
1022 * xilinx_dpdma_chan_wait_no_ostand - Wait for the no outstanding irq
1023 * @chan: DPDMA channel
1025 * Wait for the no outstanding transaction interrupt. This functions can sleep
1028 * Return: 0 on success. On failure, -ETIMEOUT for time out, or the error code
1029 * from wait_event_interruptible_timeout().
1031 static int xilinx_dpdma_chan_wait_no_ostand(struct xilinx_dpdma_chan
*chan
)
1035 /* Wait for a no outstanding transaction interrupt upto 50msec */
1036 ret
= wait_event_interruptible_timeout(chan
->wait_to_stop
,
1037 !xilinx_dpdma_chan_ostand(chan
),
1038 msecs_to_jiffies(50));
1040 dpdma_write(chan
->xdev
->reg
, XILINX_DPDMA_IEN
,
1041 XILINX_DPDMA_INTR_NO_OSTAND(chan
->id
));
1045 dev_err(chan
->xdev
->dev
, "chan%u: not ready to stop: %d trans\n",
1046 chan
->id
, xilinx_dpdma_chan_ostand(chan
));
1055 * xilinx_dpdma_chan_poll_no_ostand - Poll the outstanding transaction status
1056 * @chan: DPDMA channel
1058 * Poll the outstanding transaction status, and return when there's no
1059 * outstanding transaction. This functions can be used in the interrupt context
1060 * or where the atomicity is required. Calling thread may wait more than 50ms.
1062 * Return: 0 on success, or -ETIMEDOUT.
1064 static int xilinx_dpdma_chan_poll_no_ostand(struct xilinx_dpdma_chan
*chan
)
1066 u32 cnt
, loop
= 50000;
1068 /* Poll at least for 50ms (20 fps). */
1070 cnt
= xilinx_dpdma_chan_ostand(chan
);
1072 } while (loop
-- > 0 && cnt
);
1075 dpdma_write(chan
->xdev
->reg
, XILINX_DPDMA_IEN
,
1076 XILINX_DPDMA_INTR_NO_OSTAND(chan
->id
));
1080 dev_err(chan
->xdev
->dev
, "chan%u: not ready to stop: %d trans\n",
1081 chan
->id
, xilinx_dpdma_chan_ostand(chan
));
1087 * xilinx_dpdma_chan_stop - Stop the channel
1088 * @chan: DPDMA channel
1090 * Stop a previously paused channel by first waiting for completion of all
1091 * outstanding transaction and then disabling the channel.
1093 * Return: 0 on success, or -ETIMEDOUT if the channel failed to stop.
1095 static int xilinx_dpdma_chan_stop(struct xilinx_dpdma_chan
*chan
)
1097 unsigned long flags
;
1100 ret
= xilinx_dpdma_chan_wait_no_ostand(chan
);
1104 spin_lock_irqsave(&chan
->lock
, flags
);
1105 xilinx_dpdma_chan_disable(chan
);
1106 chan
->running
= false;
1107 spin_unlock_irqrestore(&chan
->lock
, flags
);
1113 * xilinx_dpdma_chan_done_irq - Handle hardware descriptor completion
1114 * @chan: DPDMA channel
1116 * Handle completion of the currently active descriptor (@chan->desc.active). As
1117 * we currently support cyclic transfers only, this just invokes the cyclic
1118 * callback. The descriptor will be completed at the VSYNC interrupt when a new
1119 * descriptor replaces it.
1121 static void xilinx_dpdma_chan_done_irq(struct xilinx_dpdma_chan
*chan
)
1123 struct xilinx_dpdma_tx_desc
*active
;
1125 spin_lock(&chan
->lock
);
1127 xilinx_dpdma_debugfs_desc_done_irq(chan
);
1129 active
= chan
->desc
.active
;
1131 vchan_cyclic_callback(&active
->vdesc
);
1133 dev_warn(chan
->xdev
->dev
,
1134 "chan%u: DONE IRQ with no active descriptor!\n",
1137 spin_unlock(&chan
->lock
);
1141 * xilinx_dpdma_chan_vsync_irq - Handle hardware descriptor scheduling
1142 * @chan: DPDMA channel
1144 * At VSYNC the active descriptor may have been replaced by the pending
1145 * descriptor. Detect this through the DESC_ID and perform appropriate
1148 static void xilinx_dpdma_chan_vsync_irq(struct xilinx_dpdma_chan
*chan
)
1150 struct xilinx_dpdma_tx_desc
*pending
;
1151 struct xilinx_dpdma_sw_desc
*sw_desc
;
1154 spin_lock(&chan
->lock
);
1156 pending
= chan
->desc
.pending
;
1157 if (!chan
->running
|| !pending
)
1160 desc_id
= dpdma_read(chan
->reg
, XILINX_DPDMA_CH_DESC_ID
)
1161 & XILINX_DPDMA_CH_DESC_ID_MASK
;
1163 /* If the retrigger raced with vsync, retry at the next frame. */
1164 sw_desc
= list_first_entry(&pending
->descriptors
,
1165 struct xilinx_dpdma_sw_desc
, node
);
1166 if (sw_desc
->hw
.desc_id
!= desc_id
) {
1167 dev_dbg(chan
->xdev
->dev
,
1168 "chan%u: vsync race lost (%u != %u), retrying\n",
1169 chan
->id
, sw_desc
->hw
.desc_id
, desc_id
);
1174 * Complete the active descriptor, if any, promote the pending
1175 * descriptor to active, and queue the next transfer, if any.
1177 spin_lock(&chan
->vchan
.lock
);
1178 if (chan
->desc
.active
)
1179 vchan_cookie_complete(&chan
->desc
.active
->vdesc
);
1180 chan
->desc
.active
= pending
;
1181 chan
->desc
.pending
= NULL
;
1183 xilinx_dpdma_chan_queue_transfer(chan
);
1184 spin_unlock(&chan
->vchan
.lock
);
1187 spin_unlock(&chan
->lock
);
1191 * xilinx_dpdma_chan_err - Detect any channel error
1192 * @chan: DPDMA channel
1193 * @isr: masked Interrupt Status Register
1194 * @eisr: Error Interrupt Status Register
1196 * Return: true if any channel error occurs, or false otherwise.
1199 xilinx_dpdma_chan_err(struct xilinx_dpdma_chan
*chan
, u32 isr
, u32 eisr
)
1204 if (chan
->running
&&
1205 ((isr
& (XILINX_DPDMA_INTR_CHAN_ERR_MASK
<< chan
->id
)) ||
1206 (eisr
& (XILINX_DPDMA_EINTR_CHAN_ERR_MASK
<< chan
->id
))))
1213 * xilinx_dpdma_chan_handle_err - DPDMA channel error handling
1214 * @chan: DPDMA channel
1216 * This function is called when any channel error or any global error occurs.
1217 * The function disables the paused channel by errors and determines
1218 * if the current active descriptor can be rescheduled depending on
1219 * the descriptor status.
1221 static void xilinx_dpdma_chan_handle_err(struct xilinx_dpdma_chan
*chan
)
1223 struct xilinx_dpdma_device
*xdev
= chan
->xdev
;
1224 struct xilinx_dpdma_tx_desc
*active
;
1225 unsigned long flags
;
1227 spin_lock_irqsave(&chan
->lock
, flags
);
1229 dev_dbg(xdev
->dev
, "chan%u: cur desc addr = 0x%04x%08x\n",
1231 dpdma_read(chan
->reg
, XILINX_DPDMA_CH_DESC_START_ADDRE
),
1232 dpdma_read(chan
->reg
, XILINX_DPDMA_CH_DESC_START_ADDR
));
1233 dev_dbg(xdev
->dev
, "chan%u: cur payload addr = 0x%04x%08x\n",
1235 dpdma_read(chan
->reg
, XILINX_DPDMA_CH_PYLD_CUR_ADDRE
),
1236 dpdma_read(chan
->reg
, XILINX_DPDMA_CH_PYLD_CUR_ADDR
));
1238 xilinx_dpdma_chan_disable(chan
);
1239 chan
->running
= false;
1241 if (!chan
->desc
.active
)
1244 active
= chan
->desc
.active
;
1245 chan
->desc
.active
= NULL
;
1247 xilinx_dpdma_chan_dump_tx_desc(chan
, active
);
1250 dev_dbg(xdev
->dev
, "chan%u: repeated error on desc\n",
1253 /* Reschedule if there's no new descriptor */
1254 if (!chan
->desc
.pending
&&
1255 list_empty(&chan
->vchan
.desc_issued
)) {
1256 active
->error
= true;
1257 list_add_tail(&active
->vdesc
.node
,
1258 &chan
->vchan
.desc_issued
);
1260 xilinx_dpdma_chan_free_tx_desc(&active
->vdesc
);
1264 spin_unlock_irqrestore(&chan
->lock
, flags
);
1267 /* -----------------------------------------------------------------------------
1268 * DMA Engine Operations
1270 static struct dma_async_tx_descriptor
*
1271 xilinx_dpdma_prep_dma_cyclic(struct dma_chan
*dchan
, dma_addr_t buf_addr
,
1272 size_t buf_len
, size_t period_len
,
1273 enum dma_transfer_direction direction
,
1274 unsigned long flags
)
1276 struct xilinx_dpdma_chan
*chan
= to_xilinx_chan(dchan
);
1278 if (direction
!= DMA_MEM_TO_DEV
)
1281 if (buf_len
% period_len
)
1284 return xilinx_dpdma_chan_prep_cyclic(chan
, buf_addr
, buf_len
,
1288 static struct dma_async_tx_descriptor
*
1289 xilinx_dpdma_prep_interleaved_dma(struct dma_chan
*dchan
,
1290 struct dma_interleaved_template
*xt
,
1291 unsigned long flags
)
1293 struct xilinx_dpdma_chan
*chan
= to_xilinx_chan(dchan
);
1294 struct xilinx_dpdma_tx_desc
*desc
;
1296 if (xt
->dir
!= DMA_MEM_TO_DEV
)
1299 if (!xt
->numf
|| !xt
->sgl
[0].size
)
1302 if (!(flags
& DMA_PREP_REPEAT
) || !(flags
& DMA_PREP_LOAD_EOT
))
1305 desc
= xilinx_dpdma_chan_prep_interleaved_dma(chan
, xt
);
1309 vchan_tx_prep(&chan
->vchan
, &desc
->vdesc
, flags
| DMA_CTRL_ACK
);
1311 return &desc
->vdesc
.tx
;
1315 * xilinx_dpdma_alloc_chan_resources - Allocate resources for the channel
1316 * @dchan: DMA channel
1318 * Allocate a descriptor pool for the channel.
1320 * Return: 0 on success, or -ENOMEM if failed to allocate a pool.
1322 static int xilinx_dpdma_alloc_chan_resources(struct dma_chan
*dchan
)
1324 struct xilinx_dpdma_chan
*chan
= to_xilinx_chan(dchan
);
1325 size_t align
= __alignof__(struct xilinx_dpdma_sw_desc
);
1327 chan
->desc_pool
= dma_pool_create(dev_name(chan
->xdev
->dev
),
1329 sizeof(struct xilinx_dpdma_sw_desc
),
1331 if (!chan
->desc_pool
) {
1332 dev_err(chan
->xdev
->dev
,
1333 "chan%u: failed to allocate a descriptor pool\n",
1342 * xilinx_dpdma_free_chan_resources - Free all resources for the channel
1343 * @dchan: DMA channel
1345 * Free resources associated with the virtual DMA channel, and destroy the
1348 static void xilinx_dpdma_free_chan_resources(struct dma_chan
*dchan
)
1350 struct xilinx_dpdma_chan
*chan
= to_xilinx_chan(dchan
);
1352 vchan_free_chan_resources(&chan
->vchan
);
1354 dma_pool_destroy(chan
->desc_pool
);
1355 chan
->desc_pool
= NULL
;
1358 static void xilinx_dpdma_issue_pending(struct dma_chan
*dchan
)
1360 struct xilinx_dpdma_chan
*chan
= to_xilinx_chan(dchan
);
1361 unsigned long flags
;
1363 spin_lock_irqsave(&chan
->lock
, flags
);
1364 spin_lock(&chan
->vchan
.lock
);
1365 if (vchan_issue_pending(&chan
->vchan
))
1366 xilinx_dpdma_chan_queue_transfer(chan
);
1367 spin_unlock(&chan
->vchan
.lock
);
1368 spin_unlock_irqrestore(&chan
->lock
, flags
);
1371 static int xilinx_dpdma_config(struct dma_chan
*dchan
,
1372 struct dma_slave_config
*config
)
1374 struct xilinx_dpdma_chan
*chan
= to_xilinx_chan(dchan
);
1375 struct xilinx_dpdma_peripheral_config
*pconfig
;
1376 unsigned long flags
;
1379 * The destination address doesn't need to be specified as the DPDMA is
1380 * hardwired to the destination (the DP controller). The transfer
1381 * width, burst size and port window size are thus meaningless, they're
1382 * fixed both on the DPDMA side and on the DP controller side.
1386 * Use the peripheral_config to indicate that the channel is part
1387 * of a video group. This requires matching use of the custom
1388 * structure in each driver.
1390 pconfig
= config
->peripheral_config
;
1391 if (WARN_ON(pconfig
&& config
->peripheral_size
!= sizeof(*pconfig
)))
1394 spin_lock_irqsave(&chan
->lock
, flags
);
1395 if (chan
->id
<= ZYNQMP_DPDMA_VIDEO2
&& pconfig
)
1396 chan
->video_group
= pconfig
->video_group
;
1397 spin_unlock_irqrestore(&chan
->lock
, flags
);
1402 static int xilinx_dpdma_pause(struct dma_chan
*dchan
)
1404 xilinx_dpdma_chan_pause(to_xilinx_chan(dchan
));
1409 static int xilinx_dpdma_resume(struct dma_chan
*dchan
)
1411 xilinx_dpdma_chan_unpause(to_xilinx_chan(dchan
));
1417 * xilinx_dpdma_terminate_all - Terminate the channel and descriptors
1418 * @dchan: DMA channel
1420 * Pause the channel without waiting for ongoing transfers to complete. Waiting
1421 * for completion is performed by xilinx_dpdma_synchronize() that will disable
1422 * the channel to complete the stop.
1424 * All the descriptors associated with the channel that are guaranteed not to
1425 * be touched by the hardware. The pending and active descriptor are not
1426 * touched, and will be freed either upon completion, or by
1427 * xilinx_dpdma_synchronize().
1429 * Return: 0 on success, or -ETIMEDOUT if the channel failed to stop.
1431 static int xilinx_dpdma_terminate_all(struct dma_chan
*dchan
)
1433 struct xilinx_dpdma_chan
*chan
= to_xilinx_chan(dchan
);
1434 struct xilinx_dpdma_device
*xdev
= chan
->xdev
;
1435 LIST_HEAD(descriptors
);
1436 unsigned long flags
;
1439 /* Pause the channel (including the whole video group if applicable). */
1440 if (chan
->video_group
) {
1441 for (i
= ZYNQMP_DPDMA_VIDEO0
; i
<= ZYNQMP_DPDMA_VIDEO2
; i
++) {
1442 if (xdev
->chan
[i
]->video_group
&&
1443 xdev
->chan
[i
]->running
) {
1444 xilinx_dpdma_chan_pause(xdev
->chan
[i
]);
1445 xdev
->chan
[i
]->video_group
= false;
1449 xilinx_dpdma_chan_pause(chan
);
1452 /* Gather all the descriptors we can free and free them. */
1453 spin_lock_irqsave(&chan
->vchan
.lock
, flags
);
1454 vchan_get_all_descriptors(&chan
->vchan
, &descriptors
);
1455 spin_unlock_irqrestore(&chan
->vchan
.lock
, flags
);
1457 vchan_dma_desc_free_list(&chan
->vchan
, &descriptors
);
1463 * xilinx_dpdma_synchronize - Synchronize callback execution
1464 * @dchan: DMA channel
1466 * Synchronizing callback execution ensures that all previously issued
1467 * transfers have completed and all associated callbacks have been called and
1470 * This function waits for the DMA channel to stop. It assumes it has been
1471 * paused by a previous call to dmaengine_terminate_async(), and that no new
1472 * pending descriptors have been issued with dma_async_issue_pending(). The
1473 * behaviour is undefined otherwise.
1475 static void xilinx_dpdma_synchronize(struct dma_chan
*dchan
)
1477 struct xilinx_dpdma_chan
*chan
= to_xilinx_chan(dchan
);
1478 unsigned long flags
;
1480 xilinx_dpdma_chan_stop(chan
);
1482 spin_lock_irqsave(&chan
->vchan
.lock
, flags
);
1483 if (chan
->desc
.pending
) {
1484 vchan_terminate_vdesc(&chan
->desc
.pending
->vdesc
);
1485 chan
->desc
.pending
= NULL
;
1487 if (chan
->desc
.active
) {
1488 vchan_terminate_vdesc(&chan
->desc
.active
->vdesc
);
1489 chan
->desc
.active
= NULL
;
1491 spin_unlock_irqrestore(&chan
->vchan
.lock
, flags
);
1493 vchan_synchronize(&chan
->vchan
);
1496 /* -----------------------------------------------------------------------------
1497 * Interrupt and Tasklet Handling
1501 * xilinx_dpdma_err - Detect any global error
1502 * @isr: Interrupt Status Register
1503 * @eisr: Error Interrupt Status Register
1505 * Return: True if any global error occurs, or false otherwise.
1507 static bool xilinx_dpdma_err(u32 isr
, u32 eisr
)
1509 if (isr
& XILINX_DPDMA_INTR_GLOBAL_ERR
||
1510 eisr
& XILINX_DPDMA_EINTR_GLOBAL_ERR
)
1517 * xilinx_dpdma_handle_err_irq - Handle DPDMA error interrupt
1518 * @xdev: DPDMA device
1519 * @isr: masked Interrupt Status Register
1520 * @eisr: Error Interrupt Status Register
1522 * Handle if any error occurs based on @isr and @eisr. This function disables
1523 * corresponding error interrupts, and those should be re-enabled once handling
1526 static void xilinx_dpdma_handle_err_irq(struct xilinx_dpdma_device
*xdev
,
1529 bool err
= xilinx_dpdma_err(isr
, eisr
);
1532 dev_dbg_ratelimited(xdev
->dev
,
1533 "error irq: isr = 0x%08x, eisr = 0x%08x\n",
1536 /* Disable channel error interrupts until errors are handled. */
1537 dpdma_write(xdev
->reg
, XILINX_DPDMA_IDS
,
1538 isr
& ~XILINX_DPDMA_INTR_GLOBAL_ERR
);
1539 dpdma_write(xdev
->reg
, XILINX_DPDMA_EIDS
,
1540 eisr
& ~XILINX_DPDMA_EINTR_GLOBAL_ERR
);
1542 for (i
= 0; i
< ARRAY_SIZE(xdev
->chan
); i
++)
1543 if (err
|| xilinx_dpdma_chan_err(xdev
->chan
[i
], isr
, eisr
))
1544 tasklet_schedule(&xdev
->chan
[i
]->err_task
);
1548 * xilinx_dpdma_enable_irq - Enable interrupts
1549 * @xdev: DPDMA device
1551 * Enable interrupts.
1553 static void xilinx_dpdma_enable_irq(struct xilinx_dpdma_device
*xdev
)
1555 dpdma_write(xdev
->reg
, XILINX_DPDMA_IEN
, XILINX_DPDMA_INTR_ALL
);
1556 dpdma_write(xdev
->reg
, XILINX_DPDMA_EIEN
, XILINX_DPDMA_EINTR_ALL
);
1560 * xilinx_dpdma_disable_irq - Disable interrupts
1561 * @xdev: DPDMA device
1563 * Disable interrupts.
1565 static void xilinx_dpdma_disable_irq(struct xilinx_dpdma_device
*xdev
)
1567 dpdma_write(xdev
->reg
, XILINX_DPDMA_IDS
, XILINX_DPDMA_INTR_ALL
);
1568 dpdma_write(xdev
->reg
, XILINX_DPDMA_EIDS
, XILINX_DPDMA_EINTR_ALL
);
1572 * xilinx_dpdma_chan_err_task - Per channel tasklet for error handling
1573 * @t: pointer to the tasklet associated with this handler
1575 * Per channel error handling tasklet. This function waits for the outstanding
1576 * transaction to complete and triggers error handling. After error handling,
1577 * re-enable channel error interrupts, and restart the channel if needed.
1579 static void xilinx_dpdma_chan_err_task(struct tasklet_struct
*t
)
1581 struct xilinx_dpdma_chan
*chan
= from_tasklet(chan
, t
, err_task
);
1582 struct xilinx_dpdma_device
*xdev
= chan
->xdev
;
1583 unsigned long flags
;
1585 /* Proceed error handling even when polling fails. */
1586 xilinx_dpdma_chan_poll_no_ostand(chan
);
1588 xilinx_dpdma_chan_handle_err(chan
);
1590 dpdma_write(xdev
->reg
, XILINX_DPDMA_IEN
,
1591 XILINX_DPDMA_INTR_CHAN_ERR_MASK
<< chan
->id
);
1592 dpdma_write(xdev
->reg
, XILINX_DPDMA_EIEN
,
1593 XILINX_DPDMA_EINTR_CHAN_ERR_MASK
<< chan
->id
);
1595 spin_lock_irqsave(&chan
->lock
, flags
);
1596 spin_lock(&chan
->vchan
.lock
);
1597 xilinx_dpdma_chan_queue_transfer(chan
);
1598 spin_unlock(&chan
->vchan
.lock
);
1599 spin_unlock_irqrestore(&chan
->lock
, flags
);
1602 static irqreturn_t
xilinx_dpdma_irq_handler(int irq
, void *data
)
1604 struct xilinx_dpdma_device
*xdev
= data
;
1610 status
= dpdma_read(xdev
->reg
, XILINX_DPDMA_ISR
);
1611 error
= dpdma_read(xdev
->reg
, XILINX_DPDMA_EISR
);
1612 if (!status
&& !error
)
1615 dpdma_write(xdev
->reg
, XILINX_DPDMA_ISR
, status
);
1616 dpdma_write(xdev
->reg
, XILINX_DPDMA_EISR
, error
);
1618 if (status
& XILINX_DPDMA_INTR_VSYNC
) {
1620 * There's a single VSYNC interrupt that needs to be processed
1621 * by each running channel to update the active descriptor.
1623 for (i
= 0; i
< ARRAY_SIZE(xdev
->chan
); i
++) {
1624 struct xilinx_dpdma_chan
*chan
= xdev
->chan
[i
];
1627 xilinx_dpdma_chan_vsync_irq(chan
);
1631 mask
= FIELD_GET(XILINX_DPDMA_INTR_DESC_DONE_MASK
, status
);
1633 for_each_set_bit(i
, &mask
, ARRAY_SIZE(xdev
->chan
))
1634 xilinx_dpdma_chan_done_irq(xdev
->chan
[i
]);
1637 mask
= FIELD_GET(XILINX_DPDMA_INTR_NO_OSTAND_MASK
, status
);
1639 for_each_set_bit(i
, &mask
, ARRAY_SIZE(xdev
->chan
))
1640 xilinx_dpdma_chan_notify_no_ostand(xdev
->chan
[i
]);
1643 mask
= status
& XILINX_DPDMA_INTR_ERR_ALL
;
1645 xilinx_dpdma_handle_err_irq(xdev
, mask
, error
);
1650 /* -----------------------------------------------------------------------------
1651 * Initialization & Cleanup
1654 static int xilinx_dpdma_chan_init(struct xilinx_dpdma_device
*xdev
,
1655 unsigned int chan_id
)
1657 struct xilinx_dpdma_chan
*chan
;
1659 chan
= devm_kzalloc(xdev
->dev
, sizeof(*chan
), GFP_KERNEL
);
1664 chan
->reg
= xdev
->reg
+ XILINX_DPDMA_CH_BASE
1665 + XILINX_DPDMA_CH_OFFSET
* chan
->id
;
1666 chan
->running
= false;
1669 spin_lock_init(&chan
->lock
);
1670 init_waitqueue_head(&chan
->wait_to_stop
);
1672 tasklet_setup(&chan
->err_task
, xilinx_dpdma_chan_err_task
);
1674 chan
->vchan
.desc_free
= xilinx_dpdma_chan_free_tx_desc
;
1675 vchan_init(&chan
->vchan
, &xdev
->common
);
1677 xdev
->chan
[chan
->id
] = chan
;
1682 static void xilinx_dpdma_chan_remove(struct xilinx_dpdma_chan
*chan
)
1687 tasklet_kill(&chan
->err_task
);
1688 list_del(&chan
->vchan
.chan
.device_node
);
1691 static struct dma_chan
*of_dma_xilinx_xlate(struct of_phandle_args
*dma_spec
,
1692 struct of_dma
*ofdma
)
1694 struct xilinx_dpdma_device
*xdev
= ofdma
->of_dma_data
;
1695 u32 chan_id
= dma_spec
->args
[0];
1697 if (chan_id
>= ARRAY_SIZE(xdev
->chan
))
1700 if (!xdev
->chan
[chan_id
])
1703 return dma_get_slave_channel(&xdev
->chan
[chan_id
]->vchan
.chan
);
1706 static void dpdma_hw_init(struct xilinx_dpdma_device
*xdev
)
1711 /* Disable all interrupts */
1712 xilinx_dpdma_disable_irq(xdev
);
1714 /* Stop all channels */
1715 for (i
= 0; i
< ARRAY_SIZE(xdev
->chan
); i
++) {
1716 reg
= xdev
->reg
+ XILINX_DPDMA_CH_BASE
1717 + XILINX_DPDMA_CH_OFFSET
* i
;
1718 dpdma_clr(reg
, XILINX_DPDMA_CH_CNTL
, XILINX_DPDMA_CH_CNTL_ENABLE
);
1721 /* Clear the interrupt status registers */
1722 dpdma_write(xdev
->reg
, XILINX_DPDMA_ISR
, XILINX_DPDMA_INTR_ALL
);
1723 dpdma_write(xdev
->reg
, XILINX_DPDMA_EISR
, XILINX_DPDMA_EINTR_ALL
);
1726 static int xilinx_dpdma_probe(struct platform_device
*pdev
)
1728 struct xilinx_dpdma_device
*xdev
;
1729 struct dma_device
*ddev
;
1733 xdev
= devm_kzalloc(&pdev
->dev
, sizeof(*xdev
), GFP_KERNEL
);
1737 xdev
->dev
= &pdev
->dev
;
1738 xdev
->ext_addr
= sizeof(dma_addr_t
) > 4;
1740 INIT_LIST_HEAD(&xdev
->common
.channels
);
1742 platform_set_drvdata(pdev
, xdev
);
1744 xdev
->axi_clk
= devm_clk_get(xdev
->dev
, "axi_clk");
1745 if (IS_ERR(xdev
->axi_clk
))
1746 return PTR_ERR(xdev
->axi_clk
);
1748 xdev
->reg
= devm_platform_ioremap_resource(pdev
, 0);
1749 if (IS_ERR(xdev
->reg
))
1750 return PTR_ERR(xdev
->reg
);
1752 dpdma_hw_init(xdev
);
1754 xdev
->irq
= platform_get_irq(pdev
, 0);
1758 ret
= request_irq(xdev
->irq
, xilinx_dpdma_irq_handler
, IRQF_SHARED
,
1759 dev_name(xdev
->dev
), xdev
);
1761 dev_err(xdev
->dev
, "failed to request IRQ\n");
1765 ddev
= &xdev
->common
;
1766 ddev
->dev
= &pdev
->dev
;
1768 dma_cap_set(DMA_SLAVE
, ddev
->cap_mask
);
1769 dma_cap_set(DMA_PRIVATE
, ddev
->cap_mask
);
1770 dma_cap_set(DMA_CYCLIC
, ddev
->cap_mask
);
1771 dma_cap_set(DMA_INTERLEAVE
, ddev
->cap_mask
);
1772 dma_cap_set(DMA_REPEAT
, ddev
->cap_mask
);
1773 dma_cap_set(DMA_LOAD_EOT
, ddev
->cap_mask
);
1774 ddev
->copy_align
= fls(XILINX_DPDMA_ALIGN_BYTES
- 1);
1776 ddev
->device_alloc_chan_resources
= xilinx_dpdma_alloc_chan_resources
;
1777 ddev
->device_free_chan_resources
= xilinx_dpdma_free_chan_resources
;
1778 ddev
->device_prep_dma_cyclic
= xilinx_dpdma_prep_dma_cyclic
;
1779 ddev
->device_prep_interleaved_dma
= xilinx_dpdma_prep_interleaved_dma
;
1780 /* TODO: Can we achieve better granularity ? */
1781 ddev
->device_tx_status
= dma_cookie_status
;
1782 ddev
->device_issue_pending
= xilinx_dpdma_issue_pending
;
1783 ddev
->device_config
= xilinx_dpdma_config
;
1784 ddev
->device_pause
= xilinx_dpdma_pause
;
1785 ddev
->device_resume
= xilinx_dpdma_resume
;
1786 ddev
->device_terminate_all
= xilinx_dpdma_terminate_all
;
1787 ddev
->device_synchronize
= xilinx_dpdma_synchronize
;
1788 ddev
->src_addr_widths
= BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED
);
1789 ddev
->directions
= BIT(DMA_MEM_TO_DEV
);
1790 ddev
->residue_granularity
= DMA_RESIDUE_GRANULARITY_DESCRIPTOR
;
1792 for (i
= 0; i
< ARRAY_SIZE(xdev
->chan
); ++i
) {
1793 ret
= xilinx_dpdma_chan_init(xdev
, i
);
1795 dev_err(xdev
->dev
, "failed to initialize channel %u\n",
1801 ret
= clk_prepare_enable(xdev
->axi_clk
);
1803 dev_err(xdev
->dev
, "failed to enable the axi clock\n");
1807 ret
= dma_async_device_register(ddev
);
1809 dev_err(xdev
->dev
, "failed to register the dma device\n");
1810 goto error_dma_async
;
1813 ret
= of_dma_controller_register(xdev
->dev
->of_node
,
1814 of_dma_xilinx_xlate
, ddev
);
1816 dev_err(xdev
->dev
, "failed to register DMA to DT DMA helper\n");
1820 xilinx_dpdma_enable_irq(xdev
);
1822 xilinx_dpdma_debugfs_init(xdev
);
1824 dev_info(&pdev
->dev
, "Xilinx DPDMA engine is probed\n");
1829 dma_async_device_unregister(ddev
);
1831 clk_disable_unprepare(xdev
->axi_clk
);
1833 for (i
= 0; i
< ARRAY_SIZE(xdev
->chan
); i
++)
1834 xilinx_dpdma_chan_remove(xdev
->chan
[i
]);
1836 free_irq(xdev
->irq
, xdev
);
1841 static void xilinx_dpdma_remove(struct platform_device
*pdev
)
1843 struct xilinx_dpdma_device
*xdev
= platform_get_drvdata(pdev
);
1846 /* Start by disabling the IRQ to avoid races during cleanup. */
1847 free_irq(xdev
->irq
, xdev
);
1849 xilinx_dpdma_disable_irq(xdev
);
1850 of_dma_controller_free(pdev
->dev
.of_node
);
1851 dma_async_device_unregister(&xdev
->common
);
1852 clk_disable_unprepare(xdev
->axi_clk
);
1854 for (i
= 0; i
< ARRAY_SIZE(xdev
->chan
); i
++)
1855 xilinx_dpdma_chan_remove(xdev
->chan
[i
]);
1858 static const struct of_device_id xilinx_dpdma_of_match
[] = {
1859 { .compatible
= "xlnx,zynqmp-dpdma",},
1860 { /* end of table */ },
1862 MODULE_DEVICE_TABLE(of
, xilinx_dpdma_of_match
);
1864 static struct platform_driver xilinx_dpdma_driver
= {
1865 .probe
= xilinx_dpdma_probe
,
1866 .remove
= xilinx_dpdma_remove
,
1868 .name
= "xilinx-zynqmp-dpdma",
1869 .of_match_table
= xilinx_dpdma_of_match
,
1873 module_platform_driver(xilinx_dpdma_driver
);
1875 MODULE_AUTHOR("Xilinx, Inc.");
1876 MODULE_DESCRIPTION("Xilinx ZynqMP DPDMA driver");
1877 MODULE_LICENSE("GPL v2");