2 * DMA driver for Xilinx Video DMA Engine
4 * Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved.
6 * Based on the Freescale DMA driver.
9 * The AXI Video Direct Memory Access (AXI VDMA) core is a soft Xilinx IP
10 * core that provides high-bandwidth direct memory access between memory
11 * and AXI4-Stream type video target peripherals. The core provides efficient
12 * two dimensional DMA operations with independent asynchronous read (S2MM)
13 * and write (MM2S) channel operation. It can be configured to have either
14 * one channel or two channels. If configured as two channels, one is to
15 * transmit to the video device (MM2S) and another is to receive from the
16 * video device (S2MM). Initialization, status, interrupt and management
17 * registers are accessed through an AXI4-Lite slave interface.
19 * This program is free software: you can redistribute it and/or modify
20 * it under the terms of the GNU General Public License as published by
21 * the Free Software Foundation, either version 2 of the License, or
22 * (at your option) any later version.
25 #include <linux/bitops.h>
26 #include <linux/dmapool.h>
27 #include <linux/dma/xilinx_dma.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
31 #include <linux/module.h>
32 #include <linux/of_address.h>
33 #include <linux/of_dma.h>
34 #include <linux/of_platform.h>
35 #include <linux/of_irq.h>
36 #include <linux/slab.h>
38 #include "../dmaengine.h"
40 /* Register/Descriptor Offsets */
41 #define XILINX_VDMA_MM2S_CTRL_OFFSET 0x0000
42 #define XILINX_VDMA_S2MM_CTRL_OFFSET 0x0030
43 #define XILINX_VDMA_MM2S_DESC_OFFSET 0x0050
44 #define XILINX_VDMA_S2MM_DESC_OFFSET 0x00a0
46 /* Control Registers */
47 #define XILINX_VDMA_REG_DMACR 0x0000
48 #define XILINX_VDMA_DMACR_DELAY_MAX 0xff
49 #define XILINX_VDMA_DMACR_DELAY_SHIFT 24
50 #define XILINX_VDMA_DMACR_FRAME_COUNT_MAX 0xff
51 #define XILINX_VDMA_DMACR_FRAME_COUNT_SHIFT 16
52 #define XILINX_VDMA_DMACR_ERR_IRQ BIT(14)
53 #define XILINX_VDMA_DMACR_DLY_CNT_IRQ BIT(13)
54 #define XILINX_VDMA_DMACR_FRM_CNT_IRQ BIT(12)
55 #define XILINX_VDMA_DMACR_MASTER_SHIFT 8
56 #define XILINX_VDMA_DMACR_FSYNCSRC_SHIFT 5
57 #define XILINX_VDMA_DMACR_FRAMECNT_EN BIT(4)
58 #define XILINX_VDMA_DMACR_GENLOCK_EN BIT(3)
59 #define XILINX_VDMA_DMACR_RESET BIT(2)
60 #define XILINX_VDMA_DMACR_CIRC_EN BIT(1)
61 #define XILINX_VDMA_DMACR_RUNSTOP BIT(0)
62 #define XILINX_VDMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5)
64 #define XILINX_VDMA_REG_DMASR 0x0004
65 #define XILINX_VDMA_DMASR_EOL_LATE_ERR BIT(15)
66 #define XILINX_VDMA_DMASR_ERR_IRQ BIT(14)
67 #define XILINX_VDMA_DMASR_DLY_CNT_IRQ BIT(13)
68 #define XILINX_VDMA_DMASR_FRM_CNT_IRQ BIT(12)
69 #define XILINX_VDMA_DMASR_SOF_LATE_ERR BIT(11)
70 #define XILINX_VDMA_DMASR_SG_DEC_ERR BIT(10)
71 #define XILINX_VDMA_DMASR_SG_SLV_ERR BIT(9)
72 #define XILINX_VDMA_DMASR_EOF_EARLY_ERR BIT(8)
73 #define XILINX_VDMA_DMASR_SOF_EARLY_ERR BIT(7)
74 #define XILINX_VDMA_DMASR_DMA_DEC_ERR BIT(6)
75 #define XILINX_VDMA_DMASR_DMA_SLAVE_ERR BIT(5)
76 #define XILINX_VDMA_DMASR_DMA_INT_ERR BIT(4)
77 #define XILINX_VDMA_DMASR_IDLE BIT(1)
78 #define XILINX_VDMA_DMASR_HALTED BIT(0)
79 #define XILINX_VDMA_DMASR_DELAY_MASK GENMASK(31, 24)
80 #define XILINX_VDMA_DMASR_FRAME_COUNT_MASK GENMASK(23, 16)
82 #define XILINX_VDMA_REG_CURDESC 0x0008
83 #define XILINX_VDMA_REG_TAILDESC 0x0010
84 #define XILINX_VDMA_REG_REG_INDEX 0x0014
85 #define XILINX_VDMA_REG_FRMSTORE 0x0018
86 #define XILINX_VDMA_REG_THRESHOLD 0x001c
87 #define XILINX_VDMA_REG_FRMPTR_STS 0x0024
88 #define XILINX_VDMA_REG_PARK_PTR 0x0028
89 #define XILINX_VDMA_PARK_PTR_WR_REF_SHIFT 8
90 #define XILINX_VDMA_PARK_PTR_RD_REF_SHIFT 0
91 #define XILINX_VDMA_REG_VDMA_VERSION 0x002c
93 /* Register Direct Mode Registers */
94 #define XILINX_VDMA_REG_VSIZE 0x0000
95 #define XILINX_VDMA_REG_HSIZE 0x0004
97 #define XILINX_VDMA_REG_FRMDLY_STRIDE 0x0008
98 #define XILINX_VDMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24
99 #define XILINX_VDMA_FRMDLY_STRIDE_STRIDE_SHIFT 0
101 #define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n))
103 /* HW specific definitions */
104 #define XILINX_VDMA_MAX_CHANS_PER_DEVICE 0x2
106 #define XILINX_VDMA_DMAXR_ALL_IRQ_MASK \
107 (XILINX_VDMA_DMASR_FRM_CNT_IRQ | \
108 XILINX_VDMA_DMASR_DLY_CNT_IRQ | \
109 XILINX_VDMA_DMASR_ERR_IRQ)
111 #define XILINX_VDMA_DMASR_ALL_ERR_MASK \
112 (XILINX_VDMA_DMASR_EOL_LATE_ERR | \
113 XILINX_VDMA_DMASR_SOF_LATE_ERR | \
114 XILINX_VDMA_DMASR_SG_DEC_ERR | \
115 XILINX_VDMA_DMASR_SG_SLV_ERR | \
116 XILINX_VDMA_DMASR_EOF_EARLY_ERR | \
117 XILINX_VDMA_DMASR_SOF_EARLY_ERR | \
118 XILINX_VDMA_DMASR_DMA_DEC_ERR | \
119 XILINX_VDMA_DMASR_DMA_SLAVE_ERR | \
120 XILINX_VDMA_DMASR_DMA_INT_ERR)
123 * Recoverable errors are DMA Internal error, SOF Early, EOF Early
124 * and SOF Late. They are only recoverable when C_FLUSH_ON_FSYNC
125 * is enabled in the h/w system.
127 #define XILINX_VDMA_DMASR_ERR_RECOVER_MASK \
128 (XILINX_VDMA_DMASR_SOF_LATE_ERR | \
129 XILINX_VDMA_DMASR_EOF_EARLY_ERR | \
130 XILINX_VDMA_DMASR_SOF_EARLY_ERR | \
131 XILINX_VDMA_DMASR_DMA_INT_ERR)
133 /* Axi VDMA Flush on Fsync bits */
134 #define XILINX_VDMA_FLUSH_S2MM 3
135 #define XILINX_VDMA_FLUSH_MM2S 2
136 #define XILINX_VDMA_FLUSH_BOTH 1
138 /* Delay loop counter to prevent hardware failure */
139 #define XILINX_VDMA_LOOP_COUNT 1000000
142 * struct xilinx_vdma_desc_hw - Hardware Descriptor
143 * @next_desc: Next Descriptor Pointer @0x00
144 * @pad1: Reserved @0x04
145 * @buf_addr: Buffer address @0x08
146 * @pad2: Reserved @0x0C
147 * @vsize: Vertical Size @0x10
148 * @hsize: Horizontal Size @0x14
149 * @stride: Number of bytes between the first
150 * pixels of each horizontal line @0x18
152 struct xilinx_vdma_desc_hw
{
163 * struct xilinx_vdma_tx_segment - Descriptor segment
164 * @hw: Hardware descriptor
165 * @node: Node in the descriptor segments list
166 * @phys: Physical address of segment
168 struct xilinx_vdma_tx_segment
{
169 struct xilinx_vdma_desc_hw hw
;
170 struct list_head node
;
175 * struct xilinx_vdma_tx_descriptor - Per Transaction structure
176 * @async_tx: Async transaction descriptor
177 * @segments: TX segments list
178 * @node: Node in the channel descriptors list
180 struct xilinx_vdma_tx_descriptor
{
181 struct dma_async_tx_descriptor async_tx
;
182 struct list_head segments
;
183 struct list_head node
;
187 * struct xilinx_vdma_chan - Driver specific VDMA channel structure
188 * @xdev: Driver specific device structure
189 * @ctrl_offset: Control registers offset
190 * @desc_offset: TX descriptor registers offset
191 * @lock: Descriptor operation lock
192 * @pending_list: Descriptors waiting
193 * @active_desc: Active descriptor
194 * @allocated_desc: Allocated descriptor
195 * @done_list: Complete descriptors
196 * @common: DMA common channel
197 * @desc_pool: Descriptors pool
198 * @dev: The dma device
201 * @direction: Transfer direction
202 * @num_frms: Number of frames
203 * @has_sg: Support scatter transfers
204 * @genlock: Support genlock mode
205 * @err: Channel has errors
206 * @tasklet: Cleanup work after irq
207 * @config: Device configuration info
208 * @flush_on_fsync: Flush on Frame sync
210 struct xilinx_vdma_chan
{
211 struct xilinx_vdma_device
*xdev
;
215 struct list_head pending_list
;
216 struct xilinx_vdma_tx_descriptor
*active_desc
;
217 struct xilinx_vdma_tx_descriptor
*allocated_desc
;
218 struct list_head done_list
;
219 struct dma_chan common
;
220 struct dma_pool
*desc_pool
;
224 enum dma_transfer_direction direction
;
229 struct tasklet_struct tasklet
;
230 struct xilinx_vdma_config config
;
235 * struct xilinx_vdma_device - VDMA device structure
236 * @regs: I/O mapped base address
237 * @dev: Device Structure
238 * @common: DMA device structure
239 * @chan: Driver specific VDMA channel
240 * @has_sg: Specifies whether Scatter-Gather is present or not
241 * @flush_on_fsync: Flush on frame sync
243 struct xilinx_vdma_device
{
246 struct dma_device common
;
247 struct xilinx_vdma_chan
*chan
[XILINX_VDMA_MAX_CHANS_PER_DEVICE
];
253 #define to_xilinx_chan(chan) \
254 container_of(chan, struct xilinx_vdma_chan, common)
255 #define to_vdma_tx_descriptor(tx) \
256 container_of(tx, struct xilinx_vdma_tx_descriptor, async_tx)
259 static inline u32
vdma_read(struct xilinx_vdma_chan
*chan
, u32 reg
)
261 return ioread32(chan
->xdev
->regs
+ reg
);
264 static inline void vdma_write(struct xilinx_vdma_chan
*chan
, u32 reg
, u32 value
)
266 iowrite32(value
, chan
->xdev
->regs
+ reg
);
269 static inline void vdma_desc_write(struct xilinx_vdma_chan
*chan
, u32 reg
,
272 vdma_write(chan
, chan
->desc_offset
+ reg
, value
);
275 static inline u32
vdma_ctrl_read(struct xilinx_vdma_chan
*chan
, u32 reg
)
277 return vdma_read(chan
, chan
->ctrl_offset
+ reg
);
280 static inline void vdma_ctrl_write(struct xilinx_vdma_chan
*chan
, u32 reg
,
283 vdma_write(chan
, chan
->ctrl_offset
+ reg
, value
);
286 static inline void vdma_ctrl_clr(struct xilinx_vdma_chan
*chan
, u32 reg
,
289 vdma_ctrl_write(chan
, reg
, vdma_ctrl_read(chan
, reg
) & ~clr
);
292 static inline void vdma_ctrl_set(struct xilinx_vdma_chan
*chan
, u32 reg
,
295 vdma_ctrl_write(chan
, reg
, vdma_ctrl_read(chan
, reg
) | set
);
298 /* -----------------------------------------------------------------------------
299 * Descriptors and segments alloc and free
303 * xilinx_vdma_alloc_tx_segment - Allocate transaction segment
304 * @chan: Driver specific VDMA channel
306 * Return: The allocated segment on success and NULL on failure.
308 static struct xilinx_vdma_tx_segment
*
309 xilinx_vdma_alloc_tx_segment(struct xilinx_vdma_chan
*chan
)
311 struct xilinx_vdma_tx_segment
*segment
;
314 segment
= dma_pool_alloc(chan
->desc_pool
, GFP_ATOMIC
, &phys
);
318 memset(segment
, 0, sizeof(*segment
));
319 segment
->phys
= phys
;
325 * xilinx_vdma_free_tx_segment - Free transaction segment
326 * @chan: Driver specific VDMA channel
327 * @segment: VDMA transaction segment
329 static void xilinx_vdma_free_tx_segment(struct xilinx_vdma_chan
*chan
,
330 struct xilinx_vdma_tx_segment
*segment
)
332 dma_pool_free(chan
->desc_pool
, segment
, segment
->phys
);
336 * xilinx_vdma_tx_descriptor - Allocate transaction descriptor
337 * @chan: Driver specific VDMA channel
339 * Return: The allocated descriptor on success and NULL on failure.
341 static struct xilinx_vdma_tx_descriptor
*
342 xilinx_vdma_alloc_tx_descriptor(struct xilinx_vdma_chan
*chan
)
344 struct xilinx_vdma_tx_descriptor
*desc
;
347 if (chan
->allocated_desc
)
348 return chan
->allocated_desc
;
350 desc
= kzalloc(sizeof(*desc
), GFP_KERNEL
);
354 spin_lock_irqsave(&chan
->lock
, flags
);
355 chan
->allocated_desc
= desc
;
356 spin_unlock_irqrestore(&chan
->lock
, flags
);
358 INIT_LIST_HEAD(&desc
->segments
);
364 * xilinx_vdma_free_tx_descriptor - Free transaction descriptor
365 * @chan: Driver specific VDMA channel
366 * @desc: VDMA transaction descriptor
369 xilinx_vdma_free_tx_descriptor(struct xilinx_vdma_chan
*chan
,
370 struct xilinx_vdma_tx_descriptor
*desc
)
372 struct xilinx_vdma_tx_segment
*segment
, *next
;
377 list_for_each_entry_safe(segment
, next
, &desc
->segments
, node
) {
378 list_del(&segment
->node
);
379 xilinx_vdma_free_tx_segment(chan
, segment
);
385 /* Required functions */
388 * xilinx_vdma_free_desc_list - Free descriptors list
389 * @chan: Driver specific VDMA channel
390 * @list: List to parse and delete the descriptor
392 static void xilinx_vdma_free_desc_list(struct xilinx_vdma_chan
*chan
,
393 struct list_head
*list
)
395 struct xilinx_vdma_tx_descriptor
*desc
, *next
;
397 list_for_each_entry_safe(desc
, next
, list
, node
) {
398 list_del(&desc
->node
);
399 xilinx_vdma_free_tx_descriptor(chan
, desc
);
404 * xilinx_vdma_free_descriptors - Free channel descriptors
405 * @chan: Driver specific VDMA channel
407 static void xilinx_vdma_free_descriptors(struct xilinx_vdma_chan
*chan
)
411 spin_lock_irqsave(&chan
->lock
, flags
);
413 xilinx_vdma_free_desc_list(chan
, &chan
->pending_list
);
414 xilinx_vdma_free_desc_list(chan
, &chan
->done_list
);
416 xilinx_vdma_free_tx_descriptor(chan
, chan
->active_desc
);
417 chan
->active_desc
= NULL
;
419 spin_unlock_irqrestore(&chan
->lock
, flags
);
423 * xilinx_vdma_free_chan_resources - Free channel resources
424 * @dchan: DMA channel
426 static void xilinx_vdma_free_chan_resources(struct dma_chan
*dchan
)
428 struct xilinx_vdma_chan
*chan
= to_xilinx_chan(dchan
);
430 dev_dbg(chan
->dev
, "Free all channel resources.\n");
432 xilinx_vdma_free_descriptors(chan
);
433 dma_pool_destroy(chan
->desc_pool
);
434 chan
->desc_pool
= NULL
;
438 * xilinx_vdma_chan_desc_cleanup - Clean channel descriptors
439 * @chan: Driver specific VDMA channel
441 static void xilinx_vdma_chan_desc_cleanup(struct xilinx_vdma_chan
*chan
)
443 struct xilinx_vdma_tx_descriptor
*desc
, *next
;
446 spin_lock_irqsave(&chan
->lock
, flags
);
448 list_for_each_entry_safe(desc
, next
, &chan
->done_list
, node
) {
449 dma_async_tx_callback callback
;
450 void *callback_param
;
452 /* Remove from the list of running transactions */
453 list_del(&desc
->node
);
455 /* Run the link descriptor callback function */
456 callback
= desc
->async_tx
.callback
;
457 callback_param
= desc
->async_tx
.callback_param
;
459 spin_unlock_irqrestore(&chan
->lock
, flags
);
460 callback(callback_param
);
461 spin_lock_irqsave(&chan
->lock
, flags
);
464 /* Run any dependencies, then free the descriptor */
465 dma_run_dependencies(&desc
->async_tx
);
466 xilinx_vdma_free_tx_descriptor(chan
, desc
);
469 spin_unlock_irqrestore(&chan
->lock
, flags
);
473 * xilinx_vdma_do_tasklet - Schedule completion tasklet
474 * @data: Pointer to the Xilinx VDMA channel structure
476 static void xilinx_vdma_do_tasklet(unsigned long data
)
478 struct xilinx_vdma_chan
*chan
= (struct xilinx_vdma_chan
*)data
;
480 xilinx_vdma_chan_desc_cleanup(chan
);
484 * xilinx_vdma_alloc_chan_resources - Allocate channel resources
485 * @dchan: DMA channel
487 * Return: '0' on success and failure value on error
489 static int xilinx_vdma_alloc_chan_resources(struct dma_chan
*dchan
)
491 struct xilinx_vdma_chan
*chan
= to_xilinx_chan(dchan
);
493 /* Has this channel already been allocated? */
498 * We need the descriptor to be aligned to 64bytes
499 * for meeting Xilinx VDMA specification requirement.
501 chan
->desc_pool
= dma_pool_create("xilinx_vdma_desc_pool",
503 sizeof(struct xilinx_vdma_tx_segment
),
504 __alignof__(struct xilinx_vdma_tx_segment
), 0);
505 if (!chan
->desc_pool
) {
507 "unable to allocate channel %d descriptor pool\n",
512 dma_cookie_init(dchan
);
517 * xilinx_vdma_tx_status - Get VDMA transaction status
518 * @dchan: DMA channel
519 * @cookie: Transaction identifier
520 * @txstate: Transaction state
522 * Return: DMA transaction status
524 static enum dma_status
xilinx_vdma_tx_status(struct dma_chan
*dchan
,
526 struct dma_tx_state
*txstate
)
528 return dma_cookie_status(dchan
, cookie
, txstate
);
532 * xilinx_vdma_is_running - Check if VDMA channel is running
533 * @chan: Driver specific VDMA channel
535 * Return: '1' if running, '0' if not.
537 static bool xilinx_vdma_is_running(struct xilinx_vdma_chan
*chan
)
539 return !(vdma_ctrl_read(chan
, XILINX_VDMA_REG_DMASR
) &
540 XILINX_VDMA_DMASR_HALTED
) &&
541 (vdma_ctrl_read(chan
, XILINX_VDMA_REG_DMACR
) &
542 XILINX_VDMA_DMACR_RUNSTOP
);
546 * xilinx_vdma_is_idle - Check if VDMA channel is idle
547 * @chan: Driver specific VDMA channel
549 * Return: '1' if idle, '0' if not.
551 static bool xilinx_vdma_is_idle(struct xilinx_vdma_chan
*chan
)
553 return vdma_ctrl_read(chan
, XILINX_VDMA_REG_DMASR
) &
554 XILINX_VDMA_DMASR_IDLE
;
558 * xilinx_vdma_halt - Halt VDMA channel
559 * @chan: Driver specific VDMA channel
561 static void xilinx_vdma_halt(struct xilinx_vdma_chan
*chan
)
563 int loop
= XILINX_VDMA_LOOP_COUNT
;
565 vdma_ctrl_clr(chan
, XILINX_VDMA_REG_DMACR
, XILINX_VDMA_DMACR_RUNSTOP
);
567 /* Wait for the hardware to halt */
569 if (vdma_ctrl_read(chan
, XILINX_VDMA_REG_DMASR
) &
570 XILINX_VDMA_DMASR_HALTED
)
575 dev_err(chan
->dev
, "Cannot stop channel %p: %x\n",
576 chan
, vdma_ctrl_read(chan
, XILINX_VDMA_REG_DMASR
));
584 * xilinx_vdma_start - Start VDMA channel
585 * @chan: Driver specific VDMA channel
587 static void xilinx_vdma_start(struct xilinx_vdma_chan
*chan
)
589 int loop
= XILINX_VDMA_LOOP_COUNT
;
591 vdma_ctrl_set(chan
, XILINX_VDMA_REG_DMACR
, XILINX_VDMA_DMACR_RUNSTOP
);
593 /* Wait for the hardware to start */
595 if (!(vdma_ctrl_read(chan
, XILINX_VDMA_REG_DMASR
) &
596 XILINX_VDMA_DMASR_HALTED
))
601 dev_err(chan
->dev
, "Cannot start channel %p: %x\n",
602 chan
, vdma_ctrl_read(chan
, XILINX_VDMA_REG_DMASR
));
611 * xilinx_vdma_start_transfer - Starts VDMA transfer
612 * @chan: Driver specific channel struct pointer
614 static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan
*chan
)
616 struct xilinx_vdma_config
*config
= &chan
->config
;
617 struct xilinx_vdma_tx_descriptor
*desc
;
620 struct xilinx_vdma_tx_segment
*head
, *tail
= NULL
;
625 spin_lock_irqsave(&chan
->lock
, flags
);
627 /* There's already an active descriptor, bail out. */
628 if (chan
->active_desc
)
631 if (list_empty(&chan
->pending_list
))
634 desc
= list_first_entry(&chan
->pending_list
,
635 struct xilinx_vdma_tx_descriptor
, node
);
637 /* If it is SG mode and hardware is busy, cannot submit */
638 if (chan
->has_sg
&& xilinx_vdma_is_running(chan
) &&
639 !xilinx_vdma_is_idle(chan
)) {
640 dev_dbg(chan
->dev
, "DMA controller still busy\n");
645 * If hardware is idle, then all descriptors on the running lists are
646 * done, start new transfers
649 head
= list_first_entry(&desc
->segments
,
650 struct xilinx_vdma_tx_segment
, node
);
651 tail
= list_entry(desc
->segments
.prev
,
652 struct xilinx_vdma_tx_segment
, node
);
654 vdma_ctrl_write(chan
, XILINX_VDMA_REG_CURDESC
, head
->phys
);
657 /* Configure the hardware using info in the config structure */
658 reg
= vdma_ctrl_read(chan
, XILINX_VDMA_REG_DMACR
);
660 if (config
->frm_cnt_en
)
661 reg
|= XILINX_VDMA_DMACR_FRAMECNT_EN
;
663 reg
&= ~XILINX_VDMA_DMACR_FRAMECNT_EN
;
666 * With SG, start with circular mode, so that BDs can be fetched.
667 * In direct register mode, if not parking, enable circular mode
669 if (chan
->has_sg
|| !config
->park
)
670 reg
|= XILINX_VDMA_DMACR_CIRC_EN
;
673 reg
&= ~XILINX_VDMA_DMACR_CIRC_EN
;
675 vdma_ctrl_write(chan
, XILINX_VDMA_REG_DMACR
, reg
);
677 if (config
->park
&& (config
->park_frm
>= 0) &&
678 (config
->park_frm
< chan
->num_frms
)) {
679 if (chan
->direction
== DMA_MEM_TO_DEV
)
680 vdma_write(chan
, XILINX_VDMA_REG_PARK_PTR
,
682 XILINX_VDMA_PARK_PTR_RD_REF_SHIFT
);
684 vdma_write(chan
, XILINX_VDMA_REG_PARK_PTR
,
686 XILINX_VDMA_PARK_PTR_WR_REF_SHIFT
);
689 /* Start the hardware */
690 xilinx_vdma_start(chan
);
695 /* Start the transfer */
697 vdma_ctrl_write(chan
, XILINX_VDMA_REG_TAILDESC
, tail
->phys
);
699 struct xilinx_vdma_tx_segment
*segment
, *last
= NULL
;
702 list_for_each_entry(segment
, &desc
->segments
, node
) {
703 vdma_desc_write(chan
,
704 XILINX_VDMA_REG_START_ADDRESS(i
++),
705 segment
->hw
.buf_addr
);
712 /* HW expects these parameters to be same for one transaction */
713 vdma_desc_write(chan
, XILINX_VDMA_REG_HSIZE
, last
->hw
.hsize
);
714 vdma_desc_write(chan
, XILINX_VDMA_REG_FRMDLY_STRIDE
,
716 vdma_desc_write(chan
, XILINX_VDMA_REG_VSIZE
, last
->hw
.vsize
);
719 list_del(&desc
->node
);
720 chan
->active_desc
= desc
;
723 spin_unlock_irqrestore(&chan
->lock
, flags
);
727 * xilinx_vdma_issue_pending - Issue pending transactions
728 * @dchan: DMA channel
730 static void xilinx_vdma_issue_pending(struct dma_chan
*dchan
)
732 struct xilinx_vdma_chan
*chan
= to_xilinx_chan(dchan
);
734 xilinx_vdma_start_transfer(chan
);
738 * xilinx_vdma_complete_descriptor - Mark the active descriptor as complete
739 * @chan : xilinx DMA channel
743 static void xilinx_vdma_complete_descriptor(struct xilinx_vdma_chan
*chan
)
745 struct xilinx_vdma_tx_descriptor
*desc
;
748 spin_lock_irqsave(&chan
->lock
, flags
);
750 desc
= chan
->active_desc
;
752 dev_dbg(chan
->dev
, "no running descriptors\n");
756 dma_cookie_complete(&desc
->async_tx
);
757 list_add_tail(&desc
->node
, &chan
->done_list
);
759 chan
->active_desc
= NULL
;
762 spin_unlock_irqrestore(&chan
->lock
, flags
);
766 * xilinx_vdma_reset - Reset VDMA channel
767 * @chan: Driver specific VDMA channel
769 * Return: '0' on success and failure value on error
771 static int xilinx_vdma_reset(struct xilinx_vdma_chan
*chan
)
773 int loop
= XILINX_VDMA_LOOP_COUNT
;
776 vdma_ctrl_set(chan
, XILINX_VDMA_REG_DMACR
, XILINX_VDMA_DMACR_RESET
);
778 tmp
= vdma_ctrl_read(chan
, XILINX_VDMA_REG_DMACR
) &
779 XILINX_VDMA_DMACR_RESET
;
781 /* Wait for the hardware to finish reset */
783 tmp
= vdma_ctrl_read(chan
, XILINX_VDMA_REG_DMACR
) &
784 XILINX_VDMA_DMACR_RESET
;
785 } while (loop
-- && tmp
);
788 dev_err(chan
->dev
, "reset timeout, cr %x, sr %x\n",
789 vdma_ctrl_read(chan
, XILINX_VDMA_REG_DMACR
),
790 vdma_ctrl_read(chan
, XILINX_VDMA_REG_DMASR
));
800 * xilinx_vdma_chan_reset - Reset VDMA channel and enable interrupts
801 * @chan: Driver specific VDMA channel
803 * Return: '0' on success and failure value on error
805 static int xilinx_vdma_chan_reset(struct xilinx_vdma_chan
*chan
)
810 err
= xilinx_vdma_reset(chan
);
814 /* Enable interrupts */
815 vdma_ctrl_set(chan
, XILINX_VDMA_REG_DMACR
,
816 XILINX_VDMA_DMAXR_ALL_IRQ_MASK
);
822 * xilinx_vdma_irq_handler - VDMA Interrupt handler
824 * @data: Pointer to the Xilinx VDMA channel structure
826 * Return: IRQ_HANDLED/IRQ_NONE
828 static irqreturn_t
xilinx_vdma_irq_handler(int irq
, void *data
)
830 struct xilinx_vdma_chan
*chan
= data
;
833 /* Read the status and ack the interrupts. */
834 status
= vdma_ctrl_read(chan
, XILINX_VDMA_REG_DMASR
);
835 if (!(status
& XILINX_VDMA_DMAXR_ALL_IRQ_MASK
))
838 vdma_ctrl_write(chan
, XILINX_VDMA_REG_DMASR
,
839 status
& XILINX_VDMA_DMAXR_ALL_IRQ_MASK
);
841 if (status
& XILINX_VDMA_DMASR_ERR_IRQ
) {
843 * An error occurred. If C_FLUSH_ON_FSYNC is enabled and the
844 * error is recoverable, ignore it. Otherwise flag the error.
846 * Only recoverable errors can be cleared in the DMASR register,
847 * make sure not to write to other error bits to 1.
849 u32 errors
= status
& XILINX_VDMA_DMASR_ALL_ERR_MASK
;
850 vdma_ctrl_write(chan
, XILINX_VDMA_REG_DMASR
,
851 errors
& XILINX_VDMA_DMASR_ERR_RECOVER_MASK
);
853 if (!chan
->flush_on_fsync
||
854 (errors
& ~XILINX_VDMA_DMASR_ERR_RECOVER_MASK
)) {
856 "Channel %p has errors %x, cdr %x tdr %x\n",
858 vdma_ctrl_read(chan
, XILINX_VDMA_REG_CURDESC
),
859 vdma_ctrl_read(chan
, XILINX_VDMA_REG_TAILDESC
));
864 if (status
& XILINX_VDMA_DMASR_DLY_CNT_IRQ
) {
866 * Device takes too long to do the transfer when user requires
869 dev_dbg(chan
->dev
, "Inter-packet latency too long\n");
872 if (status
& XILINX_VDMA_DMASR_FRM_CNT_IRQ
) {
873 xilinx_vdma_complete_descriptor(chan
);
874 xilinx_vdma_start_transfer(chan
);
877 tasklet_schedule(&chan
->tasklet
);
882 * xilinx_vdma_tx_submit - Submit DMA transaction
883 * @tx: Async transaction descriptor
885 * Return: cookie value on success and failure value on error
887 static dma_cookie_t
xilinx_vdma_tx_submit(struct dma_async_tx_descriptor
*tx
)
889 struct xilinx_vdma_tx_descriptor
*desc
= to_vdma_tx_descriptor(tx
);
890 struct xilinx_vdma_chan
*chan
= to_xilinx_chan(tx
->chan
);
897 * If reset fails, need to hard reset the system.
898 * Channel is no longer functional
900 err
= xilinx_vdma_chan_reset(chan
);
905 spin_lock_irqsave(&chan
->lock
, flags
);
907 cookie
= dma_cookie_assign(tx
);
909 /* Append the transaction to the pending transactions queue. */
910 list_add_tail(&desc
->node
, &chan
->pending_list
);
912 /* Free the allocated desc */
913 chan
->allocated_desc
= NULL
;
915 spin_unlock_irqrestore(&chan
->lock
, flags
);
921 * xilinx_vdma_dma_prep_interleaved - prepare a descriptor for a
922 * DMA_SLAVE transaction
923 * @dchan: DMA channel
924 * @xt: Interleaved template pointer
925 * @flags: transfer ack flags
927 * Return: Async transaction descriptor on success and NULL on failure
929 static struct dma_async_tx_descriptor
*
930 xilinx_vdma_dma_prep_interleaved(struct dma_chan
*dchan
,
931 struct dma_interleaved_template
*xt
,
934 struct xilinx_vdma_chan
*chan
= to_xilinx_chan(dchan
);
935 struct xilinx_vdma_tx_descriptor
*desc
;
936 struct xilinx_vdma_tx_segment
*segment
, *prev
= NULL
;
937 struct xilinx_vdma_desc_hw
*hw
;
939 if (!is_slave_direction(xt
->dir
))
942 if (!xt
->numf
|| !xt
->sgl
[0].size
)
945 if (xt
->frame_size
!= 1)
948 /* Allocate a transaction descriptor. */
949 desc
= xilinx_vdma_alloc_tx_descriptor(chan
);
953 dma_async_tx_descriptor_init(&desc
->async_tx
, &chan
->common
);
954 desc
->async_tx
.tx_submit
= xilinx_vdma_tx_submit
;
955 async_tx_ack(&desc
->async_tx
);
957 /* Allocate the link descriptor from DMA pool */
958 segment
= xilinx_vdma_alloc_tx_segment(chan
);
962 /* Fill in the hardware descriptor */
964 hw
->vsize
= xt
->numf
;
965 hw
->hsize
= xt
->sgl
[0].size
;
966 hw
->stride
= (xt
->sgl
[0].icg
+ xt
->sgl
[0].size
) <<
967 XILINX_VDMA_FRMDLY_STRIDE_STRIDE_SHIFT
;
968 hw
->stride
|= chan
->config
.frm_dly
<<
969 XILINX_VDMA_FRMDLY_STRIDE_FRMDLY_SHIFT
;
971 if (xt
->dir
!= DMA_MEM_TO_DEV
)
972 hw
->buf_addr
= xt
->dst_start
;
974 hw
->buf_addr
= xt
->src_start
;
976 /* Link the previous next descriptor to current */
977 if (!list_empty(&desc
->segments
)) {
978 prev
= list_last_entry(&desc
->segments
,
979 struct xilinx_vdma_tx_segment
, node
);
980 prev
->hw
.next_desc
= segment
->phys
;
983 /* Insert the segment into the descriptor segments list. */
984 list_add_tail(&segment
->node
, &desc
->segments
);
988 /* Link the last hardware descriptor with the first. */
989 segment
= list_first_entry(&desc
->segments
,
990 struct xilinx_vdma_tx_segment
, node
);
991 prev
->hw
.next_desc
= segment
->phys
;
993 return &desc
->async_tx
;
996 xilinx_vdma_free_tx_descriptor(chan
, desc
);
1001 * xilinx_vdma_terminate_all - Halt the channel and free descriptors
1002 * @chan: Driver specific VDMA Channel pointer
1004 static int xilinx_vdma_terminate_all(struct dma_chan
*dchan
)
1006 struct xilinx_vdma_chan
*chan
= to_xilinx_chan(dchan
);
1008 /* Halt the DMA engine */
1009 xilinx_vdma_halt(chan
);
1011 /* Remove and free all of the descriptors in the lists */
1012 xilinx_vdma_free_descriptors(chan
);
1018 * xilinx_vdma_channel_set_config - Configure VDMA channel
1019 * Run-time configuration for Axi VDMA, supports:
1020 * . halt the channel
1021 * . configure interrupt coalescing and inter-packet delay threshold
1022 * . start/stop parking
1025 * @dchan: DMA channel
1026 * @cfg: VDMA device configuration pointer
1028 * Return: '0' on success and failure value on error
1030 int xilinx_vdma_channel_set_config(struct dma_chan
*dchan
,
1031 struct xilinx_vdma_config
*cfg
)
1033 struct xilinx_vdma_chan
*chan
= to_xilinx_chan(dchan
);
1037 return xilinx_vdma_chan_reset(chan
);
1039 dmacr
= vdma_ctrl_read(chan
, XILINX_VDMA_REG_DMACR
);
1041 chan
->config
.frm_dly
= cfg
->frm_dly
;
1042 chan
->config
.park
= cfg
->park
;
1044 /* genlock settings */
1045 chan
->config
.gen_lock
= cfg
->gen_lock
;
1046 chan
->config
.master
= cfg
->master
;
1048 if (cfg
->gen_lock
&& chan
->genlock
) {
1049 dmacr
|= XILINX_VDMA_DMACR_GENLOCK_EN
;
1050 dmacr
|= cfg
->master
<< XILINX_VDMA_DMACR_MASTER_SHIFT
;
1053 chan
->config
.frm_cnt_en
= cfg
->frm_cnt_en
;
1055 chan
->config
.park_frm
= cfg
->park_frm
;
1057 chan
->config
.park_frm
= -1;
1059 chan
->config
.coalesc
= cfg
->coalesc
;
1060 chan
->config
.delay
= cfg
->delay
;
1062 if (cfg
->coalesc
<= XILINX_VDMA_DMACR_FRAME_COUNT_MAX
) {
1063 dmacr
|= cfg
->coalesc
<< XILINX_VDMA_DMACR_FRAME_COUNT_SHIFT
;
1064 chan
->config
.coalesc
= cfg
->coalesc
;
1067 if (cfg
->delay
<= XILINX_VDMA_DMACR_DELAY_MAX
) {
1068 dmacr
|= cfg
->delay
<< XILINX_VDMA_DMACR_DELAY_SHIFT
;
1069 chan
->config
.delay
= cfg
->delay
;
1072 /* FSync Source selection */
1073 dmacr
&= ~XILINX_VDMA_DMACR_FSYNCSRC_MASK
;
1074 dmacr
|= cfg
->ext_fsync
<< XILINX_VDMA_DMACR_FSYNCSRC_SHIFT
;
1076 vdma_ctrl_write(chan
, XILINX_VDMA_REG_DMACR
, dmacr
);
1080 EXPORT_SYMBOL(xilinx_vdma_channel_set_config
);
1082 /* -----------------------------------------------------------------------------
1087 * xilinx_vdma_chan_remove - Per Channel remove function
1088 * @chan: Driver specific VDMA channel
1090 static void xilinx_vdma_chan_remove(struct xilinx_vdma_chan
*chan
)
1092 /* Disable all interrupts */
1093 vdma_ctrl_clr(chan
, XILINX_VDMA_REG_DMACR
,
1094 XILINX_VDMA_DMAXR_ALL_IRQ_MASK
);
1097 free_irq(chan
->irq
, chan
);
1099 tasklet_kill(&chan
->tasklet
);
1101 list_del(&chan
->common
.device_node
);
1105 * xilinx_vdma_chan_probe - Per Channel Probing
1106 * It get channel features from the device tree entry and
1107 * initialize special channel handling routines
1109 * @xdev: Driver specific device structure
1110 * @node: Device node
1112 * Return: '0' on success and failure value on error
1114 static int xilinx_vdma_chan_probe(struct xilinx_vdma_device
*xdev
,
1115 struct device_node
*node
)
1117 struct xilinx_vdma_chan
*chan
;
1118 bool has_dre
= false;
1122 /* Allocate and initialize the channel structure */
1123 chan
= devm_kzalloc(xdev
->dev
, sizeof(*chan
), GFP_KERNEL
);
1127 chan
->dev
= xdev
->dev
;
1129 chan
->has_sg
= xdev
->has_sg
;
1131 spin_lock_init(&chan
->lock
);
1132 INIT_LIST_HEAD(&chan
->pending_list
);
1133 INIT_LIST_HEAD(&chan
->done_list
);
1135 /* Retrieve the channel properties from the device tree */
1136 has_dre
= of_property_read_bool(node
, "xlnx,include-dre");
1138 chan
->genlock
= of_property_read_bool(node
, "xlnx,genlock-mode");
1140 err
= of_property_read_u32(node
, "xlnx,datawidth", &value
);
1142 dev_err(xdev
->dev
, "missing xlnx,datawidth property\n");
1145 width
= value
>> 3; /* Convert bits to bytes */
1147 /* If data width is greater than 8 bytes, DRE is not in hw */
1152 xdev
->common
.copy_align
= fls(width
- 1);
1154 if (of_device_is_compatible(node
, "xlnx,axi-vdma-mm2s-channel")) {
1155 chan
->direction
= DMA_MEM_TO_DEV
;
1158 chan
->ctrl_offset
= XILINX_VDMA_MM2S_CTRL_OFFSET
;
1159 chan
->desc_offset
= XILINX_VDMA_MM2S_DESC_OFFSET
;
1161 if (xdev
->flush_on_fsync
== XILINX_VDMA_FLUSH_BOTH
||
1162 xdev
->flush_on_fsync
== XILINX_VDMA_FLUSH_MM2S
)
1163 chan
->flush_on_fsync
= true;
1164 } else if (of_device_is_compatible(node
,
1165 "xlnx,axi-vdma-s2mm-channel")) {
1166 chan
->direction
= DMA_DEV_TO_MEM
;
1169 chan
->ctrl_offset
= XILINX_VDMA_S2MM_CTRL_OFFSET
;
1170 chan
->desc_offset
= XILINX_VDMA_S2MM_DESC_OFFSET
;
1172 if (xdev
->flush_on_fsync
== XILINX_VDMA_FLUSH_BOTH
||
1173 xdev
->flush_on_fsync
== XILINX_VDMA_FLUSH_S2MM
)
1174 chan
->flush_on_fsync
= true;
1176 dev_err(xdev
->dev
, "Invalid channel compatible node\n");
1180 /* Request the interrupt */
1181 chan
->irq
= irq_of_parse_and_map(node
, 0);
1182 err
= request_irq(chan
->irq
, xilinx_vdma_irq_handler
, IRQF_SHARED
,
1183 "xilinx-vdma-controller", chan
);
1185 dev_err(xdev
->dev
, "unable to request IRQ %d\n", chan
->irq
);
1189 /* Initialize the tasklet */
1190 tasklet_init(&chan
->tasklet
, xilinx_vdma_do_tasklet
,
1191 (unsigned long)chan
);
1194 * Initialize the DMA channel and add it to the DMA engine channels
1197 chan
->common
.device
= &xdev
->common
;
1199 list_add_tail(&chan
->common
.device_node
, &xdev
->common
.channels
);
1200 xdev
->chan
[chan
->id
] = chan
;
1202 /* Reset the channel */
1203 err
= xilinx_vdma_chan_reset(chan
);
1205 dev_err(xdev
->dev
, "Reset channel failed\n");
1213 * of_dma_xilinx_xlate - Translation function
1214 * @dma_spec: Pointer to DMA specifier as found in the device tree
1215 * @ofdma: Pointer to DMA controller data
1217 * Return: DMA channel pointer on success and NULL on error
1219 static struct dma_chan
*of_dma_xilinx_xlate(struct of_phandle_args
*dma_spec
,
1220 struct of_dma
*ofdma
)
1222 struct xilinx_vdma_device
*xdev
= ofdma
->of_dma_data
;
1223 int chan_id
= dma_spec
->args
[0];
1225 if (chan_id
>= XILINX_VDMA_MAX_CHANS_PER_DEVICE
)
1228 return dma_get_slave_channel(&xdev
->chan
[chan_id
]->common
);
1232 * xilinx_vdma_probe - Driver probe function
1233 * @pdev: Pointer to the platform_device structure
1235 * Return: '0' on success and failure value on error
1237 static int xilinx_vdma_probe(struct platform_device
*pdev
)
1239 struct device_node
*node
= pdev
->dev
.of_node
;
1240 struct xilinx_vdma_device
*xdev
;
1241 struct device_node
*child
;
1242 struct resource
*io
;
1246 /* Allocate and initialize the DMA engine structure */
1247 xdev
= devm_kzalloc(&pdev
->dev
, sizeof(*xdev
), GFP_KERNEL
);
1251 xdev
->dev
= &pdev
->dev
;
1253 /* Request and map I/O memory */
1254 io
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1255 xdev
->regs
= devm_ioremap_resource(&pdev
->dev
, io
);
1256 if (IS_ERR(xdev
->regs
))
1257 return PTR_ERR(xdev
->regs
);
1259 /* Retrieve the DMA engine properties from the device tree */
1260 xdev
->has_sg
= of_property_read_bool(node
, "xlnx,include-sg");
1262 err
= of_property_read_u32(node
, "xlnx,num-fstores", &num_frames
);
1264 dev_err(xdev
->dev
, "missing xlnx,num-fstores property\n");
1268 err
= of_property_read_u32(node
, "xlnx,flush-fsync",
1269 &xdev
->flush_on_fsync
);
1271 dev_warn(xdev
->dev
, "missing xlnx,flush-fsync property\n");
1273 /* Initialize the DMA engine */
1274 xdev
->common
.dev
= &pdev
->dev
;
1276 INIT_LIST_HEAD(&xdev
->common
.channels
);
1277 dma_cap_set(DMA_SLAVE
, xdev
->common
.cap_mask
);
1278 dma_cap_set(DMA_PRIVATE
, xdev
->common
.cap_mask
);
1280 xdev
->common
.device_alloc_chan_resources
=
1281 xilinx_vdma_alloc_chan_resources
;
1282 xdev
->common
.device_free_chan_resources
=
1283 xilinx_vdma_free_chan_resources
;
1284 xdev
->common
.device_prep_interleaved_dma
=
1285 xilinx_vdma_dma_prep_interleaved
;
1286 xdev
->common
.device_terminate_all
= xilinx_vdma_terminate_all
;
1287 xdev
->common
.device_tx_status
= xilinx_vdma_tx_status
;
1288 xdev
->common
.device_issue_pending
= xilinx_vdma_issue_pending
;
1290 platform_set_drvdata(pdev
, xdev
);
1292 /* Initialize the channels */
1293 for_each_child_of_node(node
, child
) {
1294 err
= xilinx_vdma_chan_probe(xdev
, child
);
1299 for (i
= 0; i
< XILINX_VDMA_MAX_CHANS_PER_DEVICE
; i
++)
1301 xdev
->chan
[i
]->num_frms
= num_frames
;
1303 /* Register the DMA engine with the core */
1304 dma_async_device_register(&xdev
->common
);
1306 err
= of_dma_controller_register(node
, of_dma_xilinx_xlate
,
1309 dev_err(&pdev
->dev
, "Unable to register DMA to DT\n");
1310 dma_async_device_unregister(&xdev
->common
);
1314 dev_info(&pdev
->dev
, "Xilinx AXI VDMA Engine Driver Probed!!\n");
1319 for (i
= 0; i
< XILINX_VDMA_MAX_CHANS_PER_DEVICE
; i
++)
1321 xilinx_vdma_chan_remove(xdev
->chan
[i
]);
1327 * xilinx_vdma_remove - Driver remove function
1328 * @pdev: Pointer to the platform_device structure
1330 * Return: Always '0'
1332 static int xilinx_vdma_remove(struct platform_device
*pdev
)
1334 struct xilinx_vdma_device
*xdev
= platform_get_drvdata(pdev
);
1337 of_dma_controller_free(pdev
->dev
.of_node
);
1339 dma_async_device_unregister(&xdev
->common
);
1341 for (i
= 0; i
< XILINX_VDMA_MAX_CHANS_PER_DEVICE
; i
++)
1343 xilinx_vdma_chan_remove(xdev
->chan
[i
]);
1348 static const struct of_device_id xilinx_vdma_of_ids
[] = {
1349 { .compatible
= "xlnx,axi-vdma-1.00.a",},
1353 static struct platform_driver xilinx_vdma_driver
= {
1355 .name
= "xilinx-vdma",
1356 .of_match_table
= xilinx_vdma_of_ids
,
1358 .probe
= xilinx_vdma_probe
,
1359 .remove
= xilinx_vdma_remove
,
1362 module_platform_driver(xilinx_vdma_driver
);
1364 MODULE_AUTHOR("Xilinx, Inc.");
1365 MODULE_DESCRIPTION("Xilinx VDMA driver");
1366 MODULE_LICENSE("GPL v2");