2 * DMA driver for Xilinx Video DMA Engine
4 * Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved.
6 * Based on the Freescale DMA driver.
9 * The AXI Video Direct Memory Access (AXI VDMA) core is a soft Xilinx IP
10 * core that provides high-bandwidth direct memory access between memory
11 * and AXI4-Stream type video target peripherals. The core provides efficient
12 * two dimensional DMA operations with independent asynchronous read (S2MM)
13 * and write (MM2S) channel operation. It can be configured to have either
14 * one channel or two channels. If configured as two channels, one is to
15 * transmit to the video device (MM2S) and another is to receive from the
16 * video device (S2MM). Initialization, status, interrupt and management
17 * registers are accessed through an AXI4-Lite slave interface.
19 * The AXI Direct Memory Access (AXI DMA) core is a soft Xilinx IP core that
20 * provides high-bandwidth one dimensional direct memory access between memory
21 * and AXI4-Stream target peripherals. It supports one receive and one
22 * transmit channel, both of them optional at synthesis time.
24 * The AXI CDMA, is a soft IP, which provides high-bandwidth Direct Memory
25 * Access (DMA) between a memory-mapped source address and a memory-mapped
26 * destination address.
28 * This program is free software: you can redistribute it and/or modify
29 * it under the terms of the GNU General Public License as published by
30 * the Free Software Foundation, either version 2 of the License, or
31 * (at your option) any later version.
34 #include <linux/bitops.h>
35 #include <linux/dmapool.h>
36 #include <linux/dma/xilinx_dma.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
40 #include <linux/iopoll.h>
41 #include <linux/module.h>
42 #include <linux/of_address.h>
43 #include <linux/of_dma.h>
44 #include <linux/of_platform.h>
45 #include <linux/of_irq.h>
46 #include <linux/slab.h>
47 #include <linux/clk.h>
48 #include <linux/io-64-nonatomic-lo-hi.h>
50 #include "../dmaengine.h"
52 /* Register/Descriptor Offsets */
53 #define XILINX_DMA_MM2S_CTRL_OFFSET 0x0000
54 #define XILINX_DMA_S2MM_CTRL_OFFSET 0x0030
55 #define XILINX_VDMA_MM2S_DESC_OFFSET 0x0050
56 #define XILINX_VDMA_S2MM_DESC_OFFSET 0x00a0
58 /* Control Registers */
59 #define XILINX_DMA_REG_DMACR 0x0000
60 #define XILINX_DMA_DMACR_DELAY_MAX 0xff
61 #define XILINX_DMA_DMACR_DELAY_SHIFT 24
62 #define XILINX_DMA_DMACR_FRAME_COUNT_MAX 0xff
63 #define XILINX_DMA_DMACR_FRAME_COUNT_SHIFT 16
64 #define XILINX_DMA_DMACR_ERR_IRQ BIT(14)
65 #define XILINX_DMA_DMACR_DLY_CNT_IRQ BIT(13)
66 #define XILINX_DMA_DMACR_FRM_CNT_IRQ BIT(12)
67 #define XILINX_DMA_DMACR_MASTER_SHIFT 8
68 #define XILINX_DMA_DMACR_FSYNCSRC_SHIFT 5
69 #define XILINX_DMA_DMACR_FRAMECNT_EN BIT(4)
70 #define XILINX_DMA_DMACR_GENLOCK_EN BIT(3)
71 #define XILINX_DMA_DMACR_RESET BIT(2)
72 #define XILINX_DMA_DMACR_CIRC_EN BIT(1)
73 #define XILINX_DMA_DMACR_RUNSTOP BIT(0)
74 #define XILINX_DMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5)
76 #define XILINX_DMA_REG_DMASR 0x0004
77 #define XILINX_DMA_DMASR_EOL_LATE_ERR BIT(15)
78 #define XILINX_DMA_DMASR_ERR_IRQ BIT(14)
79 #define XILINX_DMA_DMASR_DLY_CNT_IRQ BIT(13)
80 #define XILINX_DMA_DMASR_FRM_CNT_IRQ BIT(12)
81 #define XILINX_DMA_DMASR_SOF_LATE_ERR BIT(11)
82 #define XILINX_DMA_DMASR_SG_DEC_ERR BIT(10)
83 #define XILINX_DMA_DMASR_SG_SLV_ERR BIT(9)
84 #define XILINX_DMA_DMASR_EOF_EARLY_ERR BIT(8)
85 #define XILINX_DMA_DMASR_SOF_EARLY_ERR BIT(7)
86 #define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6)
87 #define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5)
88 #define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4)
89 #define XILINX_DMA_DMASR_IDLE BIT(1)
90 #define XILINX_DMA_DMASR_HALTED BIT(0)
91 #define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24)
92 #define XILINX_DMA_DMASR_FRAME_COUNT_MASK GENMASK(23, 16)
94 #define XILINX_DMA_REG_CURDESC 0x0008
95 #define XILINX_DMA_REG_TAILDESC 0x0010
96 #define XILINX_DMA_REG_REG_INDEX 0x0014
97 #define XILINX_DMA_REG_FRMSTORE 0x0018
98 #define XILINX_DMA_REG_THRESHOLD 0x001c
99 #define XILINX_DMA_REG_FRMPTR_STS 0x0024
100 #define XILINX_DMA_REG_PARK_PTR 0x0028
101 #define XILINX_DMA_PARK_PTR_WR_REF_SHIFT 8
102 #define XILINX_DMA_PARK_PTR_RD_REF_SHIFT 0
103 #define XILINX_DMA_REG_VDMA_VERSION 0x002c
105 /* Register Direct Mode Registers */
106 #define XILINX_DMA_REG_VSIZE 0x0000
107 #define XILINX_DMA_REG_HSIZE 0x0004
109 #define XILINX_DMA_REG_FRMDLY_STRIDE 0x0008
110 #define XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24
111 #define XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT 0
113 #define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n))
114 #define XILINX_VDMA_REG_START_ADDRESS_64(n) (0x000c + 8 * (n))
116 /* HW specific definitions */
117 #define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x20
119 #define XILINX_DMA_DMAXR_ALL_IRQ_MASK \
120 (XILINX_DMA_DMASR_FRM_CNT_IRQ | \
121 XILINX_DMA_DMASR_DLY_CNT_IRQ | \
122 XILINX_DMA_DMASR_ERR_IRQ)
124 #define XILINX_DMA_DMASR_ALL_ERR_MASK \
125 (XILINX_DMA_DMASR_EOL_LATE_ERR | \
126 XILINX_DMA_DMASR_SOF_LATE_ERR | \
127 XILINX_DMA_DMASR_SG_DEC_ERR | \
128 XILINX_DMA_DMASR_SG_SLV_ERR | \
129 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
130 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
131 XILINX_DMA_DMASR_DMA_DEC_ERR | \
132 XILINX_DMA_DMASR_DMA_SLAVE_ERR | \
133 XILINX_DMA_DMASR_DMA_INT_ERR)
136 * Recoverable errors are DMA Internal error, SOF Early, EOF Early
137 * and SOF Late. They are only recoverable when C_FLUSH_ON_FSYNC
138 * is enabled in the h/w system.
140 #define XILINX_DMA_DMASR_ERR_RECOVER_MASK \
141 (XILINX_DMA_DMASR_SOF_LATE_ERR | \
142 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
143 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
144 XILINX_DMA_DMASR_DMA_INT_ERR)
146 /* Axi VDMA Flush on Fsync bits */
147 #define XILINX_DMA_FLUSH_S2MM 3
148 #define XILINX_DMA_FLUSH_MM2S 2
149 #define XILINX_DMA_FLUSH_BOTH 1
151 /* Delay loop counter to prevent hardware failure */
152 #define XILINX_DMA_LOOP_COUNT 1000000
154 /* AXI DMA Specific Registers/Offsets */
155 #define XILINX_DMA_REG_SRCDSTADDR 0x18
156 #define XILINX_DMA_REG_BTT 0x28
158 /* AXI DMA Specific Masks/Bit fields */
159 #define XILINX_DMA_MAX_TRANS_LEN GENMASK(22, 0)
160 #define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16)
161 #define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4)
162 #define XILINX_DMA_CR_COALESCE_SHIFT 16
163 #define XILINX_DMA_BD_SOP BIT(27)
164 #define XILINX_DMA_BD_EOP BIT(26)
165 #define XILINX_DMA_COALESCE_MAX 255
166 #define XILINX_DMA_NUM_APP_WORDS 5
168 /* Multi-Channel DMA Descriptor offsets*/
169 #define XILINX_DMA_MCRX_CDESC(x) (0x40 + (x-1) * 0x20)
170 #define XILINX_DMA_MCRX_TDESC(x) (0x48 + (x-1) * 0x20)
172 /* Multi-Channel DMA Masks/Shifts */
173 #define XILINX_DMA_BD_HSIZE_MASK GENMASK(15, 0)
174 #define XILINX_DMA_BD_STRIDE_MASK GENMASK(15, 0)
175 #define XILINX_DMA_BD_VSIZE_MASK GENMASK(31, 19)
176 #define XILINX_DMA_BD_TDEST_MASK GENMASK(4, 0)
177 #define XILINX_DMA_BD_STRIDE_SHIFT 0
178 #define XILINX_DMA_BD_VSIZE_SHIFT 19
180 /* AXI CDMA Specific Registers/Offsets */
181 #define XILINX_CDMA_REG_SRCADDR 0x18
182 #define XILINX_CDMA_REG_DSTADDR 0x20
184 /* AXI CDMA Specific Masks */
185 #define XILINX_CDMA_CR_SGMODE BIT(3)
188 * struct xilinx_vdma_desc_hw - Hardware Descriptor
189 * @next_desc: Next Descriptor Pointer @0x00
190 * @pad1: Reserved @0x04
191 * @buf_addr: Buffer address @0x08
192 * @buf_addr_msb: MSB of Buffer address @0x0C
193 * @vsize: Vertical Size @0x10
194 * @hsize: Horizontal Size @0x14
195 * @stride: Number of bytes between the first
196 * pixels of each horizontal line @0x18
198 struct xilinx_vdma_desc_hw
{
209 * struct xilinx_axidma_desc_hw - Hardware Descriptor for AXI DMA
210 * @next_desc: Next Descriptor Pointer @0x00
211 * @next_desc_msb: MSB of Next Descriptor Pointer @0x04
212 * @buf_addr: Buffer address @0x08
213 * @buf_addr_msb: MSB of Buffer address @0x0C
214 * @pad1: Reserved @0x10
215 * @pad2: Reserved @0x14
216 * @control: Control field @0x18
217 * @status: Status field @0x1C
218 * @app: APP Fields @0x20 - 0x30
220 struct xilinx_axidma_desc_hw
{
229 u32 app
[XILINX_DMA_NUM_APP_WORDS
];
233 * struct xilinx_cdma_desc_hw - Hardware Descriptor
234 * @next_desc: Next Descriptor Pointer @0x00
235 * @next_descmsb: Next Descriptor Pointer MSB @0x04
236 * @src_addr: Source address @0x08
237 * @src_addrmsb: Source address MSB @0x0C
238 * @dest_addr: Destination address @0x10
239 * @dest_addrmsb: Destination address MSB @0x14
240 * @control: Control field @0x18
241 * @status: Status field @0x1C
243 struct xilinx_cdma_desc_hw
{
255 * struct xilinx_vdma_tx_segment - Descriptor segment
256 * @hw: Hardware descriptor
257 * @node: Node in the descriptor segments list
258 * @phys: Physical address of segment
260 struct xilinx_vdma_tx_segment
{
261 struct xilinx_vdma_desc_hw hw
;
262 struct list_head node
;
267 * struct xilinx_axidma_tx_segment - Descriptor segment
268 * @hw: Hardware descriptor
269 * @node: Node in the descriptor segments list
270 * @phys: Physical address of segment
272 struct xilinx_axidma_tx_segment
{
273 struct xilinx_axidma_desc_hw hw
;
274 struct list_head node
;
279 * struct xilinx_cdma_tx_segment - Descriptor segment
280 * @hw: Hardware descriptor
281 * @node: Node in the descriptor segments list
282 * @phys: Physical address of segment
284 struct xilinx_cdma_tx_segment
{
285 struct xilinx_cdma_desc_hw hw
;
286 struct list_head node
;
291 * struct xilinx_dma_tx_descriptor - Per Transaction structure
292 * @async_tx: Async transaction descriptor
293 * @segments: TX segments list
294 * @node: Node in the channel descriptors list
295 * @cyclic: Check for cyclic transfers.
297 struct xilinx_dma_tx_descriptor
{
298 struct dma_async_tx_descriptor async_tx
;
299 struct list_head segments
;
300 struct list_head node
;
305 * struct xilinx_dma_chan - Driver specific DMA channel structure
306 * @xdev: Driver specific device structure
307 * @ctrl_offset: Control registers offset
308 * @desc_offset: TX descriptor registers offset
309 * @lock: Descriptor operation lock
310 * @pending_list: Descriptors waiting
311 * @active_list: Descriptors ready to submit
312 * @done_list: Complete descriptors
313 * @common: DMA common channel
314 * @desc_pool: Descriptors pool
315 * @dev: The dma device
318 * @direction: Transfer direction
319 * @num_frms: Number of frames
320 * @has_sg: Support scatter transfers
321 * @cyclic: Check for cyclic transfers.
322 * @genlock: Support genlock mode
323 * @err: Channel has errors
324 * @tasklet: Cleanup work after irq
325 * @config: Device configuration info
326 * @flush_on_fsync: Flush on Frame sync
327 * @desc_pendingcount: Descriptor pending count
328 * @ext_addr: Indicates 64 bit addressing is supported by dma channel
329 * @desc_submitcount: Descriptor h/w submitted count
330 * @residue: Residue for AXI DMA
331 * @seg_v: Statically allocated segments base
332 * @cyclic_seg_v: Statically allocated segment base for cyclic transfers
333 * @start_transfer: Differentiate b/w DMA IP's transfer
335 struct xilinx_dma_chan
{
336 struct xilinx_dma_device
*xdev
;
340 struct list_head pending_list
;
341 struct list_head active_list
;
342 struct list_head done_list
;
343 struct dma_chan common
;
344 struct dma_pool
*desc_pool
;
348 enum dma_transfer_direction direction
;
354 struct tasklet_struct tasklet
;
355 struct xilinx_vdma_config config
;
357 u32 desc_pendingcount
;
359 u32 desc_submitcount
;
361 struct xilinx_axidma_tx_segment
*seg_v
;
362 struct xilinx_axidma_tx_segment
*cyclic_seg_v
;
363 void (*start_transfer
)(struct xilinx_dma_chan
*chan
);
367 struct xilinx_dma_config
{
368 enum xdma_ip_type dmatype
;
369 int (*clk_init
)(struct platform_device
*pdev
, struct clk
**axi_clk
,
370 struct clk
**tx_clk
, struct clk
**txs_clk
,
371 struct clk
**rx_clk
, struct clk
**rxs_clk
);
375 * struct xilinx_dma_device - DMA device structure
376 * @regs: I/O mapped base address
377 * @dev: Device Structure
378 * @common: DMA device structure
379 * @chan: Driver specific DMA channel
380 * @has_sg: Specifies whether Scatter-Gather is present or not
381 * @mcdma: Specifies whether Multi-Channel is present or not
382 * @flush_on_fsync: Flush on frame sync
383 * @ext_addr: Indicates 64 bit addressing is supported by dma device
384 * @pdev: Platform device structure pointer
385 * @dma_config: DMA config structure
386 * @axi_clk: DMA Axi4-lite interace clock
387 * @tx_clk: DMA mm2s clock
388 * @txs_clk: DMA mm2s stream clock
389 * @rx_clk: DMA s2mm clock
390 * @rxs_clk: DMA s2mm stream clock
391 * @nr_channels: Number of channels DMA device supports
392 * @chan_id: DMA channel identifier
394 struct xilinx_dma_device
{
397 struct dma_device common
;
398 struct xilinx_dma_chan
*chan
[XILINX_DMA_MAX_CHANS_PER_DEVICE
];
403 struct platform_device
*pdev
;
404 const struct xilinx_dma_config
*dma_config
;
415 #define to_xilinx_chan(chan) \
416 container_of(chan, struct xilinx_dma_chan, common)
417 #define to_dma_tx_descriptor(tx) \
418 container_of(tx, struct xilinx_dma_tx_descriptor, async_tx)
419 #define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \
420 readl_poll_timeout(chan->xdev->regs + chan->ctrl_offset + reg, val, \
421 cond, delay_us, timeout_us)
424 static inline u32
dma_read(struct xilinx_dma_chan
*chan
, u32 reg
)
426 return ioread32(chan
->xdev
->regs
+ reg
);
429 static inline void dma_write(struct xilinx_dma_chan
*chan
, u32 reg
, u32 value
)
431 iowrite32(value
, chan
->xdev
->regs
+ reg
);
434 static inline void vdma_desc_write(struct xilinx_dma_chan
*chan
, u32 reg
,
437 dma_write(chan
, chan
->desc_offset
+ reg
, value
);
440 static inline u32
dma_ctrl_read(struct xilinx_dma_chan
*chan
, u32 reg
)
442 return dma_read(chan
, chan
->ctrl_offset
+ reg
);
445 static inline void dma_ctrl_write(struct xilinx_dma_chan
*chan
, u32 reg
,
448 dma_write(chan
, chan
->ctrl_offset
+ reg
, value
);
451 static inline void dma_ctrl_clr(struct xilinx_dma_chan
*chan
, u32 reg
,
454 dma_ctrl_write(chan
, reg
, dma_ctrl_read(chan
, reg
) & ~clr
);
457 static inline void dma_ctrl_set(struct xilinx_dma_chan
*chan
, u32 reg
,
460 dma_ctrl_write(chan
, reg
, dma_ctrl_read(chan
, reg
) | set
);
464 * vdma_desc_write_64 - 64-bit descriptor write
465 * @chan: Driver specific VDMA channel
466 * @reg: Register to write
467 * @value_lsb: lower address of the descriptor.
468 * @value_msb: upper address of the descriptor.
470 * Since vdma driver is trying to write to a register offset which is not a
471 * multiple of 64 bits(ex : 0x5c), we are writing as two separate 32 bits
472 * instead of a single 64 bit register write.
474 static inline void vdma_desc_write_64(struct xilinx_dma_chan
*chan
, u32 reg
,
475 u32 value_lsb
, u32 value_msb
)
477 /* Write the lsb 32 bits*/
478 writel(value_lsb
, chan
->xdev
->regs
+ chan
->desc_offset
+ reg
);
480 /* Write the msb 32 bits */
481 writel(value_msb
, chan
->xdev
->regs
+ chan
->desc_offset
+ reg
+ 4);
484 static inline void dma_writeq(struct xilinx_dma_chan
*chan
, u32 reg
, u64 value
)
486 lo_hi_writeq(value
, chan
->xdev
->regs
+ chan
->ctrl_offset
+ reg
);
489 static inline void xilinx_write(struct xilinx_dma_chan
*chan
, u32 reg
,
493 dma_writeq(chan
, reg
, addr
);
495 dma_ctrl_write(chan
, reg
, addr
);
498 static inline void xilinx_axidma_buf(struct xilinx_dma_chan
*chan
,
499 struct xilinx_axidma_desc_hw
*hw
,
500 dma_addr_t buf_addr
, size_t sg_used
,
503 if (chan
->ext_addr
) {
504 hw
->buf_addr
= lower_32_bits(buf_addr
+ sg_used
+ period_len
);
505 hw
->buf_addr_msb
= upper_32_bits(buf_addr
+ sg_used
+
508 hw
->buf_addr
= buf_addr
+ sg_used
+ period_len
;
512 /* -----------------------------------------------------------------------------
513 * Descriptors and segments alloc and free
517 * xilinx_vdma_alloc_tx_segment - Allocate transaction segment
518 * @chan: Driver specific DMA channel
520 * Return: The allocated segment on success and NULL on failure.
522 static struct xilinx_vdma_tx_segment
*
523 xilinx_vdma_alloc_tx_segment(struct xilinx_dma_chan
*chan
)
525 struct xilinx_vdma_tx_segment
*segment
;
528 segment
= dma_pool_zalloc(chan
->desc_pool
, GFP_ATOMIC
, &phys
);
532 segment
->phys
= phys
;
538 * xilinx_cdma_alloc_tx_segment - Allocate transaction segment
539 * @chan: Driver specific DMA channel
541 * Return: The allocated segment on success and NULL on failure.
543 static struct xilinx_cdma_tx_segment
*
544 xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan
*chan
)
546 struct xilinx_cdma_tx_segment
*segment
;
549 segment
= dma_pool_zalloc(chan
->desc_pool
, GFP_ATOMIC
, &phys
);
553 segment
->phys
= phys
;
559 * xilinx_axidma_alloc_tx_segment - Allocate transaction segment
560 * @chan: Driver specific DMA channel
562 * Return: The allocated segment on success and NULL on failure.
564 static struct xilinx_axidma_tx_segment
*
565 xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan
*chan
)
567 struct xilinx_axidma_tx_segment
*segment
;
570 segment
= dma_pool_zalloc(chan
->desc_pool
, GFP_ATOMIC
, &phys
);
574 segment
->phys
= phys
;
580 * xilinx_dma_free_tx_segment - Free transaction segment
581 * @chan: Driver specific DMA channel
582 * @segment: DMA transaction segment
584 static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan
*chan
,
585 struct xilinx_axidma_tx_segment
*segment
)
587 dma_pool_free(chan
->desc_pool
, segment
, segment
->phys
);
591 * xilinx_cdma_free_tx_segment - Free transaction segment
592 * @chan: Driver specific DMA channel
593 * @segment: DMA transaction segment
595 static void xilinx_cdma_free_tx_segment(struct xilinx_dma_chan
*chan
,
596 struct xilinx_cdma_tx_segment
*segment
)
598 dma_pool_free(chan
->desc_pool
, segment
, segment
->phys
);
602 * xilinx_vdma_free_tx_segment - Free transaction segment
603 * @chan: Driver specific DMA channel
604 * @segment: DMA transaction segment
606 static void xilinx_vdma_free_tx_segment(struct xilinx_dma_chan
*chan
,
607 struct xilinx_vdma_tx_segment
*segment
)
609 dma_pool_free(chan
->desc_pool
, segment
, segment
->phys
);
613 * xilinx_dma_tx_descriptor - Allocate transaction descriptor
614 * @chan: Driver specific DMA channel
616 * Return: The allocated descriptor on success and NULL on failure.
618 static struct xilinx_dma_tx_descriptor
*
619 xilinx_dma_alloc_tx_descriptor(struct xilinx_dma_chan
*chan
)
621 struct xilinx_dma_tx_descriptor
*desc
;
623 desc
= kzalloc(sizeof(*desc
), GFP_KERNEL
);
627 INIT_LIST_HEAD(&desc
->segments
);
633 * xilinx_dma_free_tx_descriptor - Free transaction descriptor
634 * @chan: Driver specific DMA channel
635 * @desc: DMA transaction descriptor
638 xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan
*chan
,
639 struct xilinx_dma_tx_descriptor
*desc
)
641 struct xilinx_vdma_tx_segment
*segment
, *next
;
642 struct xilinx_cdma_tx_segment
*cdma_segment
, *cdma_next
;
643 struct xilinx_axidma_tx_segment
*axidma_segment
, *axidma_next
;
648 if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_VDMA
) {
649 list_for_each_entry_safe(segment
, next
, &desc
->segments
, node
) {
650 list_del(&segment
->node
);
651 xilinx_vdma_free_tx_segment(chan
, segment
);
653 } else if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_CDMA
) {
654 list_for_each_entry_safe(cdma_segment
, cdma_next
,
655 &desc
->segments
, node
) {
656 list_del(&cdma_segment
->node
);
657 xilinx_cdma_free_tx_segment(chan
, cdma_segment
);
660 list_for_each_entry_safe(axidma_segment
, axidma_next
,
661 &desc
->segments
, node
) {
662 list_del(&axidma_segment
->node
);
663 xilinx_dma_free_tx_segment(chan
, axidma_segment
);
670 /* Required functions */
673 * xilinx_dma_free_desc_list - Free descriptors list
674 * @chan: Driver specific DMA channel
675 * @list: List to parse and delete the descriptor
677 static void xilinx_dma_free_desc_list(struct xilinx_dma_chan
*chan
,
678 struct list_head
*list
)
680 struct xilinx_dma_tx_descriptor
*desc
, *next
;
682 list_for_each_entry_safe(desc
, next
, list
, node
) {
683 list_del(&desc
->node
);
684 xilinx_dma_free_tx_descriptor(chan
, desc
);
689 * xilinx_dma_free_descriptors - Free channel descriptors
690 * @chan: Driver specific DMA channel
692 static void xilinx_dma_free_descriptors(struct xilinx_dma_chan
*chan
)
696 spin_lock_irqsave(&chan
->lock
, flags
);
698 xilinx_dma_free_desc_list(chan
, &chan
->pending_list
);
699 xilinx_dma_free_desc_list(chan
, &chan
->done_list
);
700 xilinx_dma_free_desc_list(chan
, &chan
->active_list
);
702 spin_unlock_irqrestore(&chan
->lock
, flags
);
706 * xilinx_dma_free_chan_resources - Free channel resources
707 * @dchan: DMA channel
709 static void xilinx_dma_free_chan_resources(struct dma_chan
*dchan
)
711 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
713 dev_dbg(chan
->dev
, "Free all channel resources.\n");
715 xilinx_dma_free_descriptors(chan
);
716 if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_AXIDMA
) {
717 xilinx_dma_free_tx_segment(chan
, chan
->cyclic_seg_v
);
718 xilinx_dma_free_tx_segment(chan
, chan
->seg_v
);
720 dma_pool_destroy(chan
->desc_pool
);
721 chan
->desc_pool
= NULL
;
725 * xilinx_dma_chan_handle_cyclic - Cyclic dma callback
726 * @chan: Driver specific dma channel
727 * @desc: dma transaction descriptor
728 * @flags: flags for spin lock
730 static void xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan
*chan
,
731 struct xilinx_dma_tx_descriptor
*desc
,
732 unsigned long *flags
)
734 dma_async_tx_callback callback
;
735 void *callback_param
;
737 callback
= desc
->async_tx
.callback
;
738 callback_param
= desc
->async_tx
.callback_param
;
740 spin_unlock_irqrestore(&chan
->lock
, *flags
);
741 callback(callback_param
);
742 spin_lock_irqsave(&chan
->lock
, *flags
);
747 * xilinx_dma_chan_desc_cleanup - Clean channel descriptors
748 * @chan: Driver specific DMA channel
750 static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan
*chan
)
752 struct xilinx_dma_tx_descriptor
*desc
, *next
;
755 spin_lock_irqsave(&chan
->lock
, flags
);
757 list_for_each_entry_safe(desc
, next
, &chan
->done_list
, node
) {
758 struct dmaengine_desc_callback cb
;
761 xilinx_dma_chan_handle_cyclic(chan
, desc
, &flags
);
765 /* Remove from the list of running transactions */
766 list_del(&desc
->node
);
768 /* Run the link descriptor callback function */
769 dmaengine_desc_get_callback(&desc
->async_tx
, &cb
);
770 if (dmaengine_desc_callback_valid(&cb
)) {
771 spin_unlock_irqrestore(&chan
->lock
, flags
);
772 dmaengine_desc_callback_invoke(&cb
, NULL
);
773 spin_lock_irqsave(&chan
->lock
, flags
);
776 /* Run any dependencies, then free the descriptor */
777 dma_run_dependencies(&desc
->async_tx
);
778 xilinx_dma_free_tx_descriptor(chan
, desc
);
781 spin_unlock_irqrestore(&chan
->lock
, flags
);
785 * xilinx_dma_do_tasklet - Schedule completion tasklet
786 * @data: Pointer to the Xilinx DMA channel structure
788 static void xilinx_dma_do_tasklet(unsigned long data
)
790 struct xilinx_dma_chan
*chan
= (struct xilinx_dma_chan
*)data
;
792 xilinx_dma_chan_desc_cleanup(chan
);
796 * xilinx_dma_alloc_chan_resources - Allocate channel resources
797 * @dchan: DMA channel
799 * Return: '0' on success and failure value on error
801 static int xilinx_dma_alloc_chan_resources(struct dma_chan
*dchan
)
803 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
805 /* Has this channel already been allocated? */
810 * We need the descriptor to be aligned to 64bytes
811 * for meeting Xilinx VDMA specification requirement.
813 if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_AXIDMA
) {
814 chan
->desc_pool
= dma_pool_create("xilinx_dma_desc_pool",
816 sizeof(struct xilinx_axidma_tx_segment
),
817 __alignof__(struct xilinx_axidma_tx_segment
),
819 } else if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_CDMA
) {
820 chan
->desc_pool
= dma_pool_create("xilinx_cdma_desc_pool",
822 sizeof(struct xilinx_cdma_tx_segment
),
823 __alignof__(struct xilinx_cdma_tx_segment
),
826 chan
->desc_pool
= dma_pool_create("xilinx_vdma_desc_pool",
828 sizeof(struct xilinx_vdma_tx_segment
),
829 __alignof__(struct xilinx_vdma_tx_segment
),
833 if (!chan
->desc_pool
) {
835 "unable to allocate channel %d descriptor pool\n",
840 if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_AXIDMA
) {
842 * For AXI DMA case after submitting a pending_list, keep
843 * an extra segment allocated so that the "next descriptor"
844 * pointer on the tail descriptor always points to a
845 * valid descriptor, even when paused after reaching taildesc.
846 * This way, it is possible to issue additional
847 * transfers without halting and restarting the channel.
849 chan
->seg_v
= xilinx_axidma_alloc_tx_segment(chan
);
852 * For cyclic DMA mode we need to program the tail Descriptor
853 * register with a value which is not a part of the BD chain
854 * so allocating a desc segment during channel allocation for
855 * programming tail descriptor.
857 chan
->cyclic_seg_v
= xilinx_axidma_alloc_tx_segment(chan
);
860 dma_cookie_init(dchan
);
862 if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_AXIDMA
) {
863 /* For AXI DMA resetting once channel will reset the
864 * other channel as well so enable the interrupts here.
866 dma_ctrl_set(chan
, XILINX_DMA_REG_DMACR
,
867 XILINX_DMA_DMAXR_ALL_IRQ_MASK
);
870 if ((chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_CDMA
) && chan
->has_sg
)
871 dma_ctrl_set(chan
, XILINX_DMA_REG_DMACR
,
872 XILINX_CDMA_CR_SGMODE
);
878 * xilinx_dma_tx_status - Get DMA transaction status
879 * @dchan: DMA channel
880 * @cookie: Transaction identifier
881 * @txstate: Transaction state
883 * Return: DMA transaction status
885 static enum dma_status
xilinx_dma_tx_status(struct dma_chan
*dchan
,
887 struct dma_tx_state
*txstate
)
889 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
890 struct xilinx_dma_tx_descriptor
*desc
;
891 struct xilinx_axidma_tx_segment
*segment
;
892 struct xilinx_axidma_desc_hw
*hw
;
897 ret
= dma_cookie_status(dchan
, cookie
, txstate
);
898 if (ret
== DMA_COMPLETE
|| !txstate
)
901 if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_AXIDMA
) {
902 spin_lock_irqsave(&chan
->lock
, flags
);
904 desc
= list_last_entry(&chan
->active_list
,
905 struct xilinx_dma_tx_descriptor
, node
);
907 list_for_each_entry(segment
, &desc
->segments
, node
) {
909 residue
+= (hw
->control
- hw
->status
) &
910 XILINX_DMA_MAX_TRANS_LEN
;
913 spin_unlock_irqrestore(&chan
->lock
, flags
);
915 chan
->residue
= residue
;
916 dma_set_residue(txstate
, chan
->residue
);
923 * xilinx_dma_is_running - Check if DMA channel is running
924 * @chan: Driver specific DMA channel
926 * Return: '1' if running, '0' if not.
928 static bool xilinx_dma_is_running(struct xilinx_dma_chan
*chan
)
930 return !(dma_ctrl_read(chan
, XILINX_DMA_REG_DMASR
) &
931 XILINX_DMA_DMASR_HALTED
) &&
932 (dma_ctrl_read(chan
, XILINX_DMA_REG_DMACR
) &
933 XILINX_DMA_DMACR_RUNSTOP
);
937 * xilinx_dma_is_idle - Check if DMA channel is idle
938 * @chan: Driver specific DMA channel
940 * Return: '1' if idle, '0' if not.
942 static bool xilinx_dma_is_idle(struct xilinx_dma_chan
*chan
)
944 return dma_ctrl_read(chan
, XILINX_DMA_REG_DMASR
) &
945 XILINX_DMA_DMASR_IDLE
;
949 * xilinx_dma_halt - Halt DMA channel
950 * @chan: Driver specific DMA channel
952 static void xilinx_dma_halt(struct xilinx_dma_chan
*chan
)
957 dma_ctrl_clr(chan
, XILINX_DMA_REG_DMACR
, XILINX_DMA_DMACR_RUNSTOP
);
959 /* Wait for the hardware to halt */
960 err
= xilinx_dma_poll_timeout(chan
, XILINX_DMA_REG_DMASR
, val
,
961 (val
& XILINX_DMA_DMASR_HALTED
), 0,
962 XILINX_DMA_LOOP_COUNT
);
965 dev_err(chan
->dev
, "Cannot stop channel %p: %x\n",
966 chan
, dma_ctrl_read(chan
, XILINX_DMA_REG_DMASR
));
972 * xilinx_dma_start - Start DMA channel
973 * @chan: Driver specific DMA channel
975 static void xilinx_dma_start(struct xilinx_dma_chan
*chan
)
980 dma_ctrl_set(chan
, XILINX_DMA_REG_DMACR
, XILINX_DMA_DMACR_RUNSTOP
);
982 /* Wait for the hardware to start */
983 err
= xilinx_dma_poll_timeout(chan
, XILINX_DMA_REG_DMASR
, val
,
984 !(val
& XILINX_DMA_DMASR_HALTED
), 0,
985 XILINX_DMA_LOOP_COUNT
);
988 dev_err(chan
->dev
, "Cannot start channel %p: %x\n",
989 chan
, dma_ctrl_read(chan
, XILINX_DMA_REG_DMASR
));
996 * xilinx_vdma_start_transfer - Starts VDMA transfer
997 * @chan: Driver specific channel struct pointer
999 static void xilinx_vdma_start_transfer(struct xilinx_dma_chan
*chan
)
1001 struct xilinx_vdma_config
*config
= &chan
->config
;
1002 struct xilinx_dma_tx_descriptor
*desc
, *tail_desc
;
1004 struct xilinx_vdma_tx_segment
*tail_segment
;
1006 /* This function was invoked with lock held */
1010 if (list_empty(&chan
->pending_list
))
1013 desc
= list_first_entry(&chan
->pending_list
,
1014 struct xilinx_dma_tx_descriptor
, node
);
1015 tail_desc
= list_last_entry(&chan
->pending_list
,
1016 struct xilinx_dma_tx_descriptor
, node
);
1018 tail_segment
= list_last_entry(&tail_desc
->segments
,
1019 struct xilinx_vdma_tx_segment
, node
);
1021 /* If it is SG mode and hardware is busy, cannot submit */
1022 if (chan
->has_sg
&& xilinx_dma_is_running(chan
) &&
1023 !xilinx_dma_is_idle(chan
)) {
1024 dev_dbg(chan
->dev
, "DMA controller still busy\n");
1029 * If hardware is idle, then all descriptors on the running lists are
1030 * done, start new transfers
1033 dma_ctrl_write(chan
, XILINX_DMA_REG_CURDESC
,
1034 desc
->async_tx
.phys
);
1036 /* Configure the hardware using info in the config structure */
1037 reg
= dma_ctrl_read(chan
, XILINX_DMA_REG_DMACR
);
1039 if (config
->frm_cnt_en
)
1040 reg
|= XILINX_DMA_DMACR_FRAMECNT_EN
;
1042 reg
&= ~XILINX_DMA_DMACR_FRAMECNT_EN
;
1044 /* Configure channel to allow number frame buffers */
1045 dma_ctrl_write(chan
, XILINX_DMA_REG_FRMSTORE
,
1046 chan
->desc_pendingcount
);
1049 * With SG, start with circular mode, so that BDs can be fetched.
1050 * In direct register mode, if not parking, enable circular mode
1052 if (chan
->has_sg
|| !config
->park
)
1053 reg
|= XILINX_DMA_DMACR_CIRC_EN
;
1056 reg
&= ~XILINX_DMA_DMACR_CIRC_EN
;
1058 dma_ctrl_write(chan
, XILINX_DMA_REG_DMACR
, reg
);
1060 if (config
->park
&& (config
->park_frm
>= 0) &&
1061 (config
->park_frm
< chan
->num_frms
)) {
1062 if (chan
->direction
== DMA_MEM_TO_DEV
)
1063 dma_write(chan
, XILINX_DMA_REG_PARK_PTR
,
1065 XILINX_DMA_PARK_PTR_RD_REF_SHIFT
);
1067 dma_write(chan
, XILINX_DMA_REG_PARK_PTR
,
1069 XILINX_DMA_PARK_PTR_WR_REF_SHIFT
);
1072 /* Start the hardware */
1073 xilinx_dma_start(chan
);
1078 /* Start the transfer */
1080 dma_ctrl_write(chan
, XILINX_DMA_REG_TAILDESC
,
1081 tail_segment
->phys
);
1083 struct xilinx_vdma_tx_segment
*segment
, *last
= NULL
;
1086 if (chan
->desc_submitcount
< chan
->num_frms
)
1087 i
= chan
->desc_submitcount
;
1089 list_for_each_entry(segment
, &desc
->segments
, node
) {
1091 vdma_desc_write_64(chan
,
1092 XILINX_VDMA_REG_START_ADDRESS_64(i
++),
1093 segment
->hw
.buf_addr
,
1094 segment
->hw
.buf_addr_msb
);
1096 vdma_desc_write(chan
,
1097 XILINX_VDMA_REG_START_ADDRESS(i
++),
1098 segment
->hw
.buf_addr
);
1106 /* HW expects these parameters to be same for one transaction */
1107 vdma_desc_write(chan
, XILINX_DMA_REG_HSIZE
, last
->hw
.hsize
);
1108 vdma_desc_write(chan
, XILINX_DMA_REG_FRMDLY_STRIDE
,
1110 vdma_desc_write(chan
, XILINX_DMA_REG_VSIZE
, last
->hw
.vsize
);
1113 if (!chan
->has_sg
) {
1114 list_del(&desc
->node
);
1115 list_add_tail(&desc
->node
, &chan
->active_list
);
1116 chan
->desc_submitcount
++;
1117 chan
->desc_pendingcount
--;
1118 if (chan
->desc_submitcount
== chan
->num_frms
)
1119 chan
->desc_submitcount
= 0;
1121 list_splice_tail_init(&chan
->pending_list
, &chan
->active_list
);
1122 chan
->desc_pendingcount
= 0;
1127 * xilinx_cdma_start_transfer - Starts cdma transfer
1128 * @chan: Driver specific channel struct pointer
1130 static void xilinx_cdma_start_transfer(struct xilinx_dma_chan
*chan
)
1132 struct xilinx_dma_tx_descriptor
*head_desc
, *tail_desc
;
1133 struct xilinx_cdma_tx_segment
*tail_segment
;
1134 u32 ctrl_reg
= dma_read(chan
, XILINX_DMA_REG_DMACR
);
1139 if (list_empty(&chan
->pending_list
))
1142 head_desc
= list_first_entry(&chan
->pending_list
,
1143 struct xilinx_dma_tx_descriptor
, node
);
1144 tail_desc
= list_last_entry(&chan
->pending_list
,
1145 struct xilinx_dma_tx_descriptor
, node
);
1146 tail_segment
= list_last_entry(&tail_desc
->segments
,
1147 struct xilinx_cdma_tx_segment
, node
);
1149 if (chan
->desc_pendingcount
<= XILINX_DMA_COALESCE_MAX
) {
1150 ctrl_reg
&= ~XILINX_DMA_CR_COALESCE_MAX
;
1151 ctrl_reg
|= chan
->desc_pendingcount
<<
1152 XILINX_DMA_CR_COALESCE_SHIFT
;
1153 dma_ctrl_write(chan
, XILINX_DMA_REG_DMACR
, ctrl_reg
);
1157 xilinx_write(chan
, XILINX_DMA_REG_CURDESC
,
1158 head_desc
->async_tx
.phys
);
1160 /* Update tail ptr register which will start the transfer */
1161 xilinx_write(chan
, XILINX_DMA_REG_TAILDESC
,
1162 tail_segment
->phys
);
1164 /* In simple mode */
1165 struct xilinx_cdma_tx_segment
*segment
;
1166 struct xilinx_cdma_desc_hw
*hw
;
1168 segment
= list_first_entry(&head_desc
->segments
,
1169 struct xilinx_cdma_tx_segment
,
1174 xilinx_write(chan
, XILINX_CDMA_REG_SRCADDR
, hw
->src_addr
);
1175 xilinx_write(chan
, XILINX_CDMA_REG_DSTADDR
, hw
->dest_addr
);
1177 /* Start the transfer */
1178 dma_ctrl_write(chan
, XILINX_DMA_REG_BTT
,
1179 hw
->control
& XILINX_DMA_MAX_TRANS_LEN
);
1182 list_splice_tail_init(&chan
->pending_list
, &chan
->active_list
);
1183 chan
->desc_pendingcount
= 0;
1187 * xilinx_dma_start_transfer - Starts DMA transfer
1188 * @chan: Driver specific channel struct pointer
1190 static void xilinx_dma_start_transfer(struct xilinx_dma_chan
*chan
)
1192 struct xilinx_dma_tx_descriptor
*head_desc
, *tail_desc
;
1193 struct xilinx_axidma_tx_segment
*tail_segment
, *old_head
, *new_head
;
1199 if (list_empty(&chan
->pending_list
))
1202 /* If it is SG mode and hardware is busy, cannot submit */
1203 if (chan
->has_sg
&& xilinx_dma_is_running(chan
) &&
1204 !xilinx_dma_is_idle(chan
)) {
1205 dev_dbg(chan
->dev
, "DMA controller still busy\n");
1209 head_desc
= list_first_entry(&chan
->pending_list
,
1210 struct xilinx_dma_tx_descriptor
, node
);
1211 tail_desc
= list_last_entry(&chan
->pending_list
,
1212 struct xilinx_dma_tx_descriptor
, node
);
1213 tail_segment
= list_last_entry(&tail_desc
->segments
,
1214 struct xilinx_axidma_tx_segment
, node
);
1216 if (chan
->has_sg
&& !chan
->xdev
->mcdma
) {
1217 old_head
= list_first_entry(&head_desc
->segments
,
1218 struct xilinx_axidma_tx_segment
, node
);
1219 new_head
= chan
->seg_v
;
1220 /* Copy Buffer Descriptor fields. */
1221 new_head
->hw
= old_head
->hw
;
1223 /* Swap and save new reserve */
1224 list_replace_init(&old_head
->node
, &new_head
->node
);
1225 chan
->seg_v
= old_head
;
1227 tail_segment
->hw
.next_desc
= chan
->seg_v
->phys
;
1228 head_desc
->async_tx
.phys
= new_head
->phys
;
1231 reg
= dma_ctrl_read(chan
, XILINX_DMA_REG_DMACR
);
1233 if (chan
->desc_pendingcount
<= XILINX_DMA_COALESCE_MAX
) {
1234 reg
&= ~XILINX_DMA_CR_COALESCE_MAX
;
1235 reg
|= chan
->desc_pendingcount
<<
1236 XILINX_DMA_CR_COALESCE_SHIFT
;
1237 dma_ctrl_write(chan
, XILINX_DMA_REG_DMACR
, reg
);
1240 if (chan
->has_sg
&& !chan
->xdev
->mcdma
)
1241 xilinx_write(chan
, XILINX_DMA_REG_CURDESC
,
1242 head_desc
->async_tx
.phys
);
1244 if (chan
->has_sg
&& chan
->xdev
->mcdma
) {
1245 if (chan
->direction
== DMA_MEM_TO_DEV
) {
1246 dma_ctrl_write(chan
, XILINX_DMA_REG_CURDESC
,
1247 head_desc
->async_tx
.phys
);
1250 dma_ctrl_write(chan
, XILINX_DMA_REG_CURDESC
,
1251 head_desc
->async_tx
.phys
);
1253 dma_ctrl_write(chan
,
1254 XILINX_DMA_MCRX_CDESC(chan
->tdest
),
1255 head_desc
->async_tx
.phys
);
1260 xilinx_dma_start(chan
);
1265 /* Start the transfer */
1266 if (chan
->has_sg
&& !chan
->xdev
->mcdma
) {
1268 xilinx_write(chan
, XILINX_DMA_REG_TAILDESC
,
1269 chan
->cyclic_seg_v
->phys
);
1271 xilinx_write(chan
, XILINX_DMA_REG_TAILDESC
,
1272 tail_segment
->phys
);
1273 } else if (chan
->has_sg
&& chan
->xdev
->mcdma
) {
1274 if (chan
->direction
== DMA_MEM_TO_DEV
) {
1275 dma_ctrl_write(chan
, XILINX_DMA_REG_TAILDESC
,
1276 tail_segment
->phys
);
1279 dma_ctrl_write(chan
, XILINX_DMA_REG_TAILDESC
,
1280 tail_segment
->phys
);
1282 dma_ctrl_write(chan
,
1283 XILINX_DMA_MCRX_TDESC(chan
->tdest
),
1284 tail_segment
->phys
);
1288 struct xilinx_axidma_tx_segment
*segment
;
1289 struct xilinx_axidma_desc_hw
*hw
;
1291 segment
= list_first_entry(&head_desc
->segments
,
1292 struct xilinx_axidma_tx_segment
,
1296 xilinx_write(chan
, XILINX_DMA_REG_SRCDSTADDR
, hw
->buf_addr
);
1298 /* Start the transfer */
1299 dma_ctrl_write(chan
, XILINX_DMA_REG_BTT
,
1300 hw
->control
& XILINX_DMA_MAX_TRANS_LEN
);
1303 list_splice_tail_init(&chan
->pending_list
, &chan
->active_list
);
1304 chan
->desc_pendingcount
= 0;
1308 * xilinx_dma_issue_pending - Issue pending transactions
1309 * @dchan: DMA channel
1311 static void xilinx_dma_issue_pending(struct dma_chan
*dchan
)
1313 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
1314 unsigned long flags
;
1316 spin_lock_irqsave(&chan
->lock
, flags
);
1317 chan
->start_transfer(chan
);
1318 spin_unlock_irqrestore(&chan
->lock
, flags
);
1322 * xilinx_dma_complete_descriptor - Mark the active descriptor as complete
1323 * @chan : xilinx DMA channel
1327 static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan
*chan
)
1329 struct xilinx_dma_tx_descriptor
*desc
, *next
;
1331 /* This function was invoked with lock held */
1332 if (list_empty(&chan
->active_list
))
1335 list_for_each_entry_safe(desc
, next
, &chan
->active_list
, node
) {
1336 list_del(&desc
->node
);
1338 dma_cookie_complete(&desc
->async_tx
);
1339 list_add_tail(&desc
->node
, &chan
->done_list
);
1344 * xilinx_dma_reset - Reset DMA channel
1345 * @chan: Driver specific DMA channel
1347 * Return: '0' on success and failure value on error
1349 static int xilinx_dma_reset(struct xilinx_dma_chan
*chan
)
1354 dma_ctrl_set(chan
, XILINX_DMA_REG_DMACR
, XILINX_DMA_DMACR_RESET
);
1356 /* Wait for the hardware to finish reset */
1357 err
= xilinx_dma_poll_timeout(chan
, XILINX_DMA_REG_DMACR
, tmp
,
1358 !(tmp
& XILINX_DMA_DMACR_RESET
), 0,
1359 XILINX_DMA_LOOP_COUNT
);
1362 dev_err(chan
->dev
, "reset timeout, cr %x, sr %x\n",
1363 dma_ctrl_read(chan
, XILINX_DMA_REG_DMACR
),
1364 dma_ctrl_read(chan
, XILINX_DMA_REG_DMASR
));
1374 * xilinx_dma_chan_reset - Reset DMA channel and enable interrupts
1375 * @chan: Driver specific DMA channel
1377 * Return: '0' on success and failure value on error
1379 static int xilinx_dma_chan_reset(struct xilinx_dma_chan
*chan
)
1384 err
= xilinx_dma_reset(chan
);
1388 /* Enable interrupts */
1389 dma_ctrl_set(chan
, XILINX_DMA_REG_DMACR
,
1390 XILINX_DMA_DMAXR_ALL_IRQ_MASK
);
1396 * xilinx_dma_irq_handler - DMA Interrupt handler
1398 * @data: Pointer to the Xilinx DMA channel structure
1400 * Return: IRQ_HANDLED/IRQ_NONE
1402 static irqreturn_t
xilinx_dma_irq_handler(int irq
, void *data
)
1404 struct xilinx_dma_chan
*chan
= data
;
1407 /* Read the status and ack the interrupts. */
1408 status
= dma_ctrl_read(chan
, XILINX_DMA_REG_DMASR
);
1409 if (!(status
& XILINX_DMA_DMAXR_ALL_IRQ_MASK
))
1412 dma_ctrl_write(chan
, XILINX_DMA_REG_DMASR
,
1413 status
& XILINX_DMA_DMAXR_ALL_IRQ_MASK
);
1415 if (status
& XILINX_DMA_DMASR_ERR_IRQ
) {
1417 * An error occurred. If C_FLUSH_ON_FSYNC is enabled and the
1418 * error is recoverable, ignore it. Otherwise flag the error.
1420 * Only recoverable errors can be cleared in the DMASR register,
1421 * make sure not to write to other error bits to 1.
1423 u32 errors
= status
& XILINX_DMA_DMASR_ALL_ERR_MASK
;
1425 dma_ctrl_write(chan
, XILINX_DMA_REG_DMASR
,
1426 errors
& XILINX_DMA_DMASR_ERR_RECOVER_MASK
);
1428 if (!chan
->flush_on_fsync
||
1429 (errors
& ~XILINX_DMA_DMASR_ERR_RECOVER_MASK
)) {
1431 "Channel %p has errors %x, cdr %x tdr %x\n",
1433 dma_ctrl_read(chan
, XILINX_DMA_REG_CURDESC
),
1434 dma_ctrl_read(chan
, XILINX_DMA_REG_TAILDESC
));
1439 if (status
& XILINX_DMA_DMASR_DLY_CNT_IRQ
) {
1441 * Device takes too long to do the transfer when user requires
1444 dev_dbg(chan
->dev
, "Inter-packet latency too long\n");
1447 if (status
& XILINX_DMA_DMASR_FRM_CNT_IRQ
) {
1448 spin_lock(&chan
->lock
);
1449 xilinx_dma_complete_descriptor(chan
);
1450 chan
->start_transfer(chan
);
1451 spin_unlock(&chan
->lock
);
1454 tasklet_schedule(&chan
->tasklet
);
1459 * append_desc_queue - Queuing descriptor
1460 * @chan: Driver specific dma channel
1461 * @desc: dma transaction descriptor
1463 static void append_desc_queue(struct xilinx_dma_chan
*chan
,
1464 struct xilinx_dma_tx_descriptor
*desc
)
1466 struct xilinx_vdma_tx_segment
*tail_segment
;
1467 struct xilinx_dma_tx_descriptor
*tail_desc
;
1468 struct xilinx_axidma_tx_segment
*axidma_tail_segment
;
1469 struct xilinx_cdma_tx_segment
*cdma_tail_segment
;
1471 if (list_empty(&chan
->pending_list
))
1475 * Add the hardware descriptor to the chain of hardware descriptors
1476 * that already exists in memory.
1478 tail_desc
= list_last_entry(&chan
->pending_list
,
1479 struct xilinx_dma_tx_descriptor
, node
);
1480 if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_VDMA
) {
1481 tail_segment
= list_last_entry(&tail_desc
->segments
,
1482 struct xilinx_vdma_tx_segment
,
1484 tail_segment
->hw
.next_desc
= (u32
)desc
->async_tx
.phys
;
1485 } else if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_CDMA
) {
1486 cdma_tail_segment
= list_last_entry(&tail_desc
->segments
,
1487 struct xilinx_cdma_tx_segment
,
1489 cdma_tail_segment
->hw
.next_desc
= (u32
)desc
->async_tx
.phys
;
1491 axidma_tail_segment
= list_last_entry(&tail_desc
->segments
,
1492 struct xilinx_axidma_tx_segment
,
1494 axidma_tail_segment
->hw
.next_desc
= (u32
)desc
->async_tx
.phys
;
1498 * Add the software descriptor and all children to the list
1499 * of pending transactions
1502 list_add_tail(&desc
->node
, &chan
->pending_list
);
1503 chan
->desc_pendingcount
++;
1505 if (chan
->has_sg
&& (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_VDMA
)
1506 && unlikely(chan
->desc_pendingcount
> chan
->num_frms
)) {
1507 dev_dbg(chan
->dev
, "desc pendingcount is too high\n");
1508 chan
->desc_pendingcount
= chan
->num_frms
;
1513 * xilinx_dma_tx_submit - Submit DMA transaction
1514 * @tx: Async transaction descriptor
1516 * Return: cookie value on success and failure value on error
1518 static dma_cookie_t
xilinx_dma_tx_submit(struct dma_async_tx_descriptor
*tx
)
1520 struct xilinx_dma_tx_descriptor
*desc
= to_dma_tx_descriptor(tx
);
1521 struct xilinx_dma_chan
*chan
= to_xilinx_chan(tx
->chan
);
1522 dma_cookie_t cookie
;
1523 unsigned long flags
;
1527 xilinx_dma_free_tx_descriptor(chan
, desc
);
1533 * If reset fails, need to hard reset the system.
1534 * Channel is no longer functional
1536 err
= xilinx_dma_chan_reset(chan
);
1541 spin_lock_irqsave(&chan
->lock
, flags
);
1543 cookie
= dma_cookie_assign(tx
);
1545 /* Put this transaction onto the tail of the pending queue */
1546 append_desc_queue(chan
, desc
);
1549 chan
->cyclic
= true;
1551 spin_unlock_irqrestore(&chan
->lock
, flags
);
1557 * xilinx_vdma_dma_prep_interleaved - prepare a descriptor for a
1558 * DMA_SLAVE transaction
1559 * @dchan: DMA channel
1560 * @xt: Interleaved template pointer
1561 * @flags: transfer ack flags
1563 * Return: Async transaction descriptor on success and NULL on failure
1565 static struct dma_async_tx_descriptor
*
1566 xilinx_vdma_dma_prep_interleaved(struct dma_chan
*dchan
,
1567 struct dma_interleaved_template
*xt
,
1568 unsigned long flags
)
1570 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
1571 struct xilinx_dma_tx_descriptor
*desc
;
1572 struct xilinx_vdma_tx_segment
*segment
, *prev
= NULL
;
1573 struct xilinx_vdma_desc_hw
*hw
;
1575 if (!is_slave_direction(xt
->dir
))
1578 if (!xt
->numf
|| !xt
->sgl
[0].size
)
1581 if (xt
->frame_size
!= 1)
1584 /* Allocate a transaction descriptor. */
1585 desc
= xilinx_dma_alloc_tx_descriptor(chan
);
1589 dma_async_tx_descriptor_init(&desc
->async_tx
, &chan
->common
);
1590 desc
->async_tx
.tx_submit
= xilinx_dma_tx_submit
;
1591 async_tx_ack(&desc
->async_tx
);
1593 /* Allocate the link descriptor from DMA pool */
1594 segment
= xilinx_vdma_alloc_tx_segment(chan
);
1598 /* Fill in the hardware descriptor */
1600 hw
->vsize
= xt
->numf
;
1601 hw
->hsize
= xt
->sgl
[0].size
;
1602 hw
->stride
= (xt
->sgl
[0].icg
+ xt
->sgl
[0].size
) <<
1603 XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT
;
1604 hw
->stride
|= chan
->config
.frm_dly
<<
1605 XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT
;
1607 if (xt
->dir
!= DMA_MEM_TO_DEV
) {
1608 if (chan
->ext_addr
) {
1609 hw
->buf_addr
= lower_32_bits(xt
->dst_start
);
1610 hw
->buf_addr_msb
= upper_32_bits(xt
->dst_start
);
1612 hw
->buf_addr
= xt
->dst_start
;
1615 if (chan
->ext_addr
) {
1616 hw
->buf_addr
= lower_32_bits(xt
->src_start
);
1617 hw
->buf_addr_msb
= upper_32_bits(xt
->src_start
);
1619 hw
->buf_addr
= xt
->src_start
;
1623 /* Insert the segment into the descriptor segments list. */
1624 list_add_tail(&segment
->node
, &desc
->segments
);
1628 /* Link the last hardware descriptor with the first. */
1629 segment
= list_first_entry(&desc
->segments
,
1630 struct xilinx_vdma_tx_segment
, node
);
1631 desc
->async_tx
.phys
= segment
->phys
;
1633 return &desc
->async_tx
;
1636 xilinx_dma_free_tx_descriptor(chan
, desc
);
1641 * xilinx_cdma_prep_memcpy - prepare descriptors for a memcpy transaction
1642 * @dchan: DMA channel
1643 * @dma_dst: destination address
1644 * @dma_src: source address
1645 * @len: transfer length
1646 * @flags: transfer ack flags
1648 * Return: Async transaction descriptor on success and NULL on failure
1650 static struct dma_async_tx_descriptor
*
1651 xilinx_cdma_prep_memcpy(struct dma_chan
*dchan
, dma_addr_t dma_dst
,
1652 dma_addr_t dma_src
, size_t len
, unsigned long flags
)
1654 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
1655 struct xilinx_dma_tx_descriptor
*desc
;
1656 struct xilinx_cdma_tx_segment
*segment
, *prev
;
1657 struct xilinx_cdma_desc_hw
*hw
;
1659 if (!len
|| len
> XILINX_DMA_MAX_TRANS_LEN
)
1662 desc
= xilinx_dma_alloc_tx_descriptor(chan
);
1666 dma_async_tx_descriptor_init(&desc
->async_tx
, &chan
->common
);
1667 desc
->async_tx
.tx_submit
= xilinx_dma_tx_submit
;
1669 /* Allocate the link descriptor from DMA pool */
1670 segment
= xilinx_cdma_alloc_tx_segment(chan
);
1676 hw
->src_addr
= dma_src
;
1677 hw
->dest_addr
= dma_dst
;
1678 if (chan
->ext_addr
) {
1679 hw
->src_addr_msb
= upper_32_bits(dma_src
);
1680 hw
->dest_addr_msb
= upper_32_bits(dma_dst
);
1683 /* Fill the previous next descriptor with current */
1684 prev
= list_last_entry(&desc
->segments
,
1685 struct xilinx_cdma_tx_segment
, node
);
1686 prev
->hw
.next_desc
= segment
->phys
;
1688 /* Insert the segment into the descriptor segments list. */
1689 list_add_tail(&segment
->node
, &desc
->segments
);
1693 /* Link the last hardware descriptor with the first. */
1694 segment
= list_first_entry(&desc
->segments
,
1695 struct xilinx_cdma_tx_segment
, node
);
1696 desc
->async_tx
.phys
= segment
->phys
;
1697 prev
->hw
.next_desc
= segment
->phys
;
1699 return &desc
->async_tx
;
1702 xilinx_dma_free_tx_descriptor(chan
, desc
);
1707 * xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
1708 * @dchan: DMA channel
1709 * @sgl: scatterlist to transfer to/from
1710 * @sg_len: number of entries in @scatterlist
1711 * @direction: DMA direction
1712 * @flags: transfer ack flags
1713 * @context: APP words of the descriptor
1715 * Return: Async transaction descriptor on success and NULL on failure
1717 static struct dma_async_tx_descriptor
*xilinx_dma_prep_slave_sg(
1718 struct dma_chan
*dchan
, struct scatterlist
*sgl
, unsigned int sg_len
,
1719 enum dma_transfer_direction direction
, unsigned long flags
,
1722 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
1723 struct xilinx_dma_tx_descriptor
*desc
;
1724 struct xilinx_axidma_tx_segment
*segment
= NULL
, *prev
= NULL
;
1725 u32
*app_w
= (u32
*)context
;
1726 struct scatterlist
*sg
;
1731 if (!is_slave_direction(direction
))
1734 /* Allocate a transaction descriptor. */
1735 desc
= xilinx_dma_alloc_tx_descriptor(chan
);
1739 dma_async_tx_descriptor_init(&desc
->async_tx
, &chan
->common
);
1740 desc
->async_tx
.tx_submit
= xilinx_dma_tx_submit
;
1742 /* Build transactions using information in the scatter gather list */
1743 for_each_sg(sgl
, sg
, sg_len
, i
) {
1746 /* Loop until the entire scatterlist entry is used */
1747 while (sg_used
< sg_dma_len(sg
)) {
1748 struct xilinx_axidma_desc_hw
*hw
;
1750 /* Get a free segment */
1751 segment
= xilinx_axidma_alloc_tx_segment(chan
);
1756 * Calculate the maximum number of bytes to transfer,
1757 * making sure it is less than the hw limit
1759 copy
= min_t(size_t, sg_dma_len(sg
) - sg_used
,
1760 XILINX_DMA_MAX_TRANS_LEN
);
1763 /* Fill in the descriptor */
1764 xilinx_axidma_buf(chan
, hw
, sg_dma_address(sg
),
1769 if (chan
->direction
== DMA_MEM_TO_DEV
) {
1771 memcpy(hw
->app
, app_w
, sizeof(u32
) *
1772 XILINX_DMA_NUM_APP_WORDS
);
1776 prev
->hw
.next_desc
= segment
->phys
;
1782 * Insert the segment into the descriptor segments
1785 list_add_tail(&segment
->node
, &desc
->segments
);
1789 segment
= list_first_entry(&desc
->segments
,
1790 struct xilinx_axidma_tx_segment
, node
);
1791 desc
->async_tx
.phys
= segment
->phys
;
1792 prev
->hw
.next_desc
= segment
->phys
;
1794 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
1795 if (chan
->direction
== DMA_MEM_TO_DEV
) {
1796 segment
->hw
.control
|= XILINX_DMA_BD_SOP
;
1797 segment
= list_last_entry(&desc
->segments
,
1798 struct xilinx_axidma_tx_segment
,
1800 segment
->hw
.control
|= XILINX_DMA_BD_EOP
;
1803 return &desc
->async_tx
;
1806 xilinx_dma_free_tx_descriptor(chan
, desc
);
1811 * xilinx_dma_prep_dma_cyclic - prepare descriptors for a DMA_SLAVE transaction
1812 * @chan: DMA channel
1813 * @sgl: scatterlist to transfer to/from
1814 * @sg_len: number of entries in @scatterlist
1815 * @direction: DMA direction
1816 * @flags: transfer ack flags
1818 static struct dma_async_tx_descriptor
*xilinx_dma_prep_dma_cyclic(
1819 struct dma_chan
*dchan
, dma_addr_t buf_addr
, size_t buf_len
,
1820 size_t period_len
, enum dma_transfer_direction direction
,
1821 unsigned long flags
)
1823 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
1824 struct xilinx_dma_tx_descriptor
*desc
;
1825 struct xilinx_axidma_tx_segment
*segment
, *head_segment
, *prev
= NULL
;
1826 size_t copy
, sg_used
;
1827 unsigned int num_periods
;
1834 num_periods
= buf_len
/ period_len
;
1839 if (!is_slave_direction(direction
))
1842 /* Allocate a transaction descriptor. */
1843 desc
= xilinx_dma_alloc_tx_descriptor(chan
);
1847 chan
->direction
= direction
;
1848 dma_async_tx_descriptor_init(&desc
->async_tx
, &chan
->common
);
1849 desc
->async_tx
.tx_submit
= xilinx_dma_tx_submit
;
1851 for (i
= 0; i
< num_periods
; ++i
) {
1854 while (sg_used
< period_len
) {
1855 struct xilinx_axidma_desc_hw
*hw
;
1857 /* Get a free segment */
1858 segment
= xilinx_axidma_alloc_tx_segment(chan
);
1863 * Calculate the maximum number of bytes to transfer,
1864 * making sure it is less than the hw limit
1866 copy
= min_t(size_t, period_len
- sg_used
,
1867 XILINX_DMA_MAX_TRANS_LEN
);
1869 xilinx_axidma_buf(chan
, hw
, buf_addr
, sg_used
,
1874 prev
->hw
.next_desc
= segment
->phys
;
1880 * Insert the segment into the descriptor segments
1883 list_add_tail(&segment
->node
, &desc
->segments
);
1887 head_segment
= list_first_entry(&desc
->segments
,
1888 struct xilinx_axidma_tx_segment
, node
);
1889 desc
->async_tx
.phys
= head_segment
->phys
;
1891 desc
->cyclic
= true;
1892 reg
= dma_ctrl_read(chan
, XILINX_DMA_REG_DMACR
);
1893 reg
|= XILINX_DMA_CR_CYCLIC_BD_EN_MASK
;
1894 dma_ctrl_write(chan
, XILINX_DMA_REG_DMACR
, reg
);
1896 segment
= list_last_entry(&desc
->segments
,
1897 struct xilinx_axidma_tx_segment
,
1899 segment
->hw
.next_desc
= (u32
) head_segment
->phys
;
1901 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
1902 if (direction
== DMA_MEM_TO_DEV
) {
1903 head_segment
->hw
.control
|= XILINX_DMA_BD_SOP
;
1904 segment
->hw
.control
|= XILINX_DMA_BD_EOP
;
1907 return &desc
->async_tx
;
1910 xilinx_dma_free_tx_descriptor(chan
, desc
);
1915 * xilinx_dma_prep_interleaved - prepare a descriptor for a
1916 * DMA_SLAVE transaction
1917 * @dchan: DMA channel
1918 * @xt: Interleaved template pointer
1919 * @flags: transfer ack flags
1921 * Return: Async transaction descriptor on success and NULL on failure
1923 static struct dma_async_tx_descriptor
*
1924 xilinx_dma_prep_interleaved(struct dma_chan
*dchan
,
1925 struct dma_interleaved_template
*xt
,
1926 unsigned long flags
)
1928 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
1929 struct xilinx_dma_tx_descriptor
*desc
;
1930 struct xilinx_axidma_tx_segment
*segment
;
1931 struct xilinx_axidma_desc_hw
*hw
;
1933 if (!is_slave_direction(xt
->dir
))
1936 if (!xt
->numf
|| !xt
->sgl
[0].size
)
1939 if (xt
->frame_size
!= 1)
1942 /* Allocate a transaction descriptor. */
1943 desc
= xilinx_dma_alloc_tx_descriptor(chan
);
1947 chan
->direction
= xt
->dir
;
1948 dma_async_tx_descriptor_init(&desc
->async_tx
, &chan
->common
);
1949 desc
->async_tx
.tx_submit
= xilinx_dma_tx_submit
;
1951 /* Get a free segment */
1952 segment
= xilinx_axidma_alloc_tx_segment(chan
);
1958 /* Fill in the descriptor */
1959 if (xt
->dir
!= DMA_MEM_TO_DEV
)
1960 hw
->buf_addr
= xt
->dst_start
;
1962 hw
->buf_addr
= xt
->src_start
;
1964 hw
->mcdma_control
= chan
->tdest
& XILINX_DMA_BD_TDEST_MASK
;
1965 hw
->vsize_stride
= (xt
->numf
<< XILINX_DMA_BD_VSIZE_SHIFT
) &
1966 XILINX_DMA_BD_VSIZE_MASK
;
1967 hw
->vsize_stride
|= (xt
->sgl
[0].icg
+ xt
->sgl
[0].size
) &
1968 XILINX_DMA_BD_STRIDE_MASK
;
1969 hw
->control
= xt
->sgl
[0].size
& XILINX_DMA_BD_HSIZE_MASK
;
1972 * Insert the segment into the descriptor segments
1975 list_add_tail(&segment
->node
, &desc
->segments
);
1978 segment
= list_first_entry(&desc
->segments
,
1979 struct xilinx_axidma_tx_segment
, node
);
1980 desc
->async_tx
.phys
= segment
->phys
;
1982 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
1983 if (xt
->dir
== DMA_MEM_TO_DEV
) {
1984 segment
->hw
.control
|= XILINX_DMA_BD_SOP
;
1985 segment
= list_last_entry(&desc
->segments
,
1986 struct xilinx_axidma_tx_segment
,
1988 segment
->hw
.control
|= XILINX_DMA_BD_EOP
;
1991 return &desc
->async_tx
;
1994 xilinx_dma_free_tx_descriptor(chan
, desc
);
1999 * xilinx_dma_terminate_all - Halt the channel and free descriptors
2000 * @chan: Driver specific DMA Channel pointer
2002 static int xilinx_dma_terminate_all(struct dma_chan
*dchan
)
2004 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
2008 xilinx_dma_chan_reset(chan
);
2010 /* Halt the DMA engine */
2011 xilinx_dma_halt(chan
);
2013 /* Remove and free all of the descriptors in the lists */
2014 xilinx_dma_free_descriptors(chan
);
2017 reg
= dma_ctrl_read(chan
, XILINX_DMA_REG_DMACR
);
2018 reg
&= ~XILINX_DMA_CR_CYCLIC_BD_EN_MASK
;
2019 dma_ctrl_write(chan
, XILINX_DMA_REG_DMACR
, reg
);
2020 chan
->cyclic
= false;
2027 * xilinx_dma_channel_set_config - Configure VDMA channel
2028 * Run-time configuration for Axi VDMA, supports:
2029 * . halt the channel
2030 * . configure interrupt coalescing and inter-packet delay threshold
2031 * . start/stop parking
2034 * @dchan: DMA channel
2035 * @cfg: VDMA device configuration pointer
2037 * Return: '0' on success and failure value on error
2039 int xilinx_vdma_channel_set_config(struct dma_chan
*dchan
,
2040 struct xilinx_vdma_config
*cfg
)
2042 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
2046 return xilinx_dma_chan_reset(chan
);
2048 dmacr
= dma_ctrl_read(chan
, XILINX_DMA_REG_DMACR
);
2050 chan
->config
.frm_dly
= cfg
->frm_dly
;
2051 chan
->config
.park
= cfg
->park
;
2053 /* genlock settings */
2054 chan
->config
.gen_lock
= cfg
->gen_lock
;
2055 chan
->config
.master
= cfg
->master
;
2057 if (cfg
->gen_lock
&& chan
->genlock
) {
2058 dmacr
|= XILINX_DMA_DMACR_GENLOCK_EN
;
2059 dmacr
|= cfg
->master
<< XILINX_DMA_DMACR_MASTER_SHIFT
;
2062 chan
->config
.frm_cnt_en
= cfg
->frm_cnt_en
;
2064 chan
->config
.park_frm
= cfg
->park_frm
;
2066 chan
->config
.park_frm
= -1;
2068 chan
->config
.coalesc
= cfg
->coalesc
;
2069 chan
->config
.delay
= cfg
->delay
;
2071 if (cfg
->coalesc
<= XILINX_DMA_DMACR_FRAME_COUNT_MAX
) {
2072 dmacr
|= cfg
->coalesc
<< XILINX_DMA_DMACR_FRAME_COUNT_SHIFT
;
2073 chan
->config
.coalesc
= cfg
->coalesc
;
2076 if (cfg
->delay
<= XILINX_DMA_DMACR_DELAY_MAX
) {
2077 dmacr
|= cfg
->delay
<< XILINX_DMA_DMACR_DELAY_SHIFT
;
2078 chan
->config
.delay
= cfg
->delay
;
2081 /* FSync Source selection */
2082 dmacr
&= ~XILINX_DMA_DMACR_FSYNCSRC_MASK
;
2083 dmacr
|= cfg
->ext_fsync
<< XILINX_DMA_DMACR_FSYNCSRC_SHIFT
;
2085 dma_ctrl_write(chan
, XILINX_DMA_REG_DMACR
, dmacr
);
2089 EXPORT_SYMBOL(xilinx_vdma_channel_set_config
);
2091 /* -----------------------------------------------------------------------------
2096 * xilinx_dma_chan_remove - Per Channel remove function
2097 * @chan: Driver specific DMA channel
2099 static void xilinx_dma_chan_remove(struct xilinx_dma_chan
*chan
)
2101 /* Disable all interrupts */
2102 dma_ctrl_clr(chan
, XILINX_DMA_REG_DMACR
,
2103 XILINX_DMA_DMAXR_ALL_IRQ_MASK
);
2106 free_irq(chan
->irq
, chan
);
2108 tasklet_kill(&chan
->tasklet
);
2110 list_del(&chan
->common
.device_node
);
2113 static int axidma_clk_init(struct platform_device
*pdev
, struct clk
**axi_clk
,
2114 struct clk
**tx_clk
, struct clk
**rx_clk
,
2115 struct clk
**sg_clk
, struct clk
**tmp_clk
)
2121 *axi_clk
= devm_clk_get(&pdev
->dev
, "s_axi_lite_aclk");
2122 if (IS_ERR(*axi_clk
)) {
2123 err
= PTR_ERR(*axi_clk
);
2124 dev_err(&pdev
->dev
, "failed to get axi_aclk (%u)\n", err
);
2128 *tx_clk
= devm_clk_get(&pdev
->dev
, "m_axi_mm2s_aclk");
2129 if (IS_ERR(*tx_clk
))
2132 *rx_clk
= devm_clk_get(&pdev
->dev
, "m_axi_s2mm_aclk");
2133 if (IS_ERR(*rx_clk
))
2136 *sg_clk
= devm_clk_get(&pdev
->dev
, "m_axi_sg_aclk");
2137 if (IS_ERR(*sg_clk
))
2140 err
= clk_prepare_enable(*axi_clk
);
2142 dev_err(&pdev
->dev
, "failed to enable axi_clk (%u)\n", err
);
2146 err
= clk_prepare_enable(*tx_clk
);
2148 dev_err(&pdev
->dev
, "failed to enable tx_clk (%u)\n", err
);
2149 goto err_disable_axiclk
;
2152 err
= clk_prepare_enable(*rx_clk
);
2154 dev_err(&pdev
->dev
, "failed to enable rx_clk (%u)\n", err
);
2155 goto err_disable_txclk
;
2158 err
= clk_prepare_enable(*sg_clk
);
2160 dev_err(&pdev
->dev
, "failed to enable sg_clk (%u)\n", err
);
2161 goto err_disable_rxclk
;
2167 clk_disable_unprepare(*rx_clk
);
2169 clk_disable_unprepare(*tx_clk
);
2171 clk_disable_unprepare(*axi_clk
);
2176 static int axicdma_clk_init(struct platform_device
*pdev
, struct clk
**axi_clk
,
2177 struct clk
**dev_clk
, struct clk
**tmp_clk
,
2178 struct clk
**tmp1_clk
, struct clk
**tmp2_clk
)
2186 *axi_clk
= devm_clk_get(&pdev
->dev
, "s_axi_lite_aclk");
2187 if (IS_ERR(*axi_clk
)) {
2188 err
= PTR_ERR(*axi_clk
);
2189 dev_err(&pdev
->dev
, "failed to get axi_clk (%u)\n", err
);
2193 *dev_clk
= devm_clk_get(&pdev
->dev
, "m_axi_aclk");
2194 if (IS_ERR(*dev_clk
)) {
2195 err
= PTR_ERR(*dev_clk
);
2196 dev_err(&pdev
->dev
, "failed to get dev_clk (%u)\n", err
);
2200 err
= clk_prepare_enable(*axi_clk
);
2202 dev_err(&pdev
->dev
, "failed to enable axi_clk (%u)\n", err
);
2206 err
= clk_prepare_enable(*dev_clk
);
2208 dev_err(&pdev
->dev
, "failed to enable dev_clk (%u)\n", err
);
2209 goto err_disable_axiclk
;
2215 clk_disable_unprepare(*axi_clk
);
2220 static int axivdma_clk_init(struct platform_device
*pdev
, struct clk
**axi_clk
,
2221 struct clk
**tx_clk
, struct clk
**txs_clk
,
2222 struct clk
**rx_clk
, struct clk
**rxs_clk
)
2226 *axi_clk
= devm_clk_get(&pdev
->dev
, "s_axi_lite_aclk");
2227 if (IS_ERR(*axi_clk
)) {
2228 err
= PTR_ERR(*axi_clk
);
2229 dev_err(&pdev
->dev
, "failed to get axi_aclk (%u)\n", err
);
2233 *tx_clk
= devm_clk_get(&pdev
->dev
, "m_axi_mm2s_aclk");
2234 if (IS_ERR(*tx_clk
))
2237 *txs_clk
= devm_clk_get(&pdev
->dev
, "m_axis_mm2s_aclk");
2238 if (IS_ERR(*txs_clk
))
2241 *rx_clk
= devm_clk_get(&pdev
->dev
, "m_axi_s2mm_aclk");
2242 if (IS_ERR(*rx_clk
))
2245 *rxs_clk
= devm_clk_get(&pdev
->dev
, "s_axis_s2mm_aclk");
2246 if (IS_ERR(*rxs_clk
))
2249 err
= clk_prepare_enable(*axi_clk
);
2251 dev_err(&pdev
->dev
, "failed to enable axi_clk (%u)\n", err
);
2255 err
= clk_prepare_enable(*tx_clk
);
2257 dev_err(&pdev
->dev
, "failed to enable tx_clk (%u)\n", err
);
2258 goto err_disable_axiclk
;
2261 err
= clk_prepare_enable(*txs_clk
);
2263 dev_err(&pdev
->dev
, "failed to enable txs_clk (%u)\n", err
);
2264 goto err_disable_txclk
;
2267 err
= clk_prepare_enable(*rx_clk
);
2269 dev_err(&pdev
->dev
, "failed to enable rx_clk (%u)\n", err
);
2270 goto err_disable_txsclk
;
2273 err
= clk_prepare_enable(*rxs_clk
);
2275 dev_err(&pdev
->dev
, "failed to enable rxs_clk (%u)\n", err
);
2276 goto err_disable_rxclk
;
2282 clk_disable_unprepare(*rx_clk
);
2284 clk_disable_unprepare(*txs_clk
);
2286 clk_disable_unprepare(*tx_clk
);
2288 clk_disable_unprepare(*axi_clk
);
2293 static void xdma_disable_allclks(struct xilinx_dma_device
*xdev
)
2295 clk_disable_unprepare(xdev
->rxs_clk
);
2296 clk_disable_unprepare(xdev
->rx_clk
);
2297 clk_disable_unprepare(xdev
->txs_clk
);
2298 clk_disable_unprepare(xdev
->tx_clk
);
2299 clk_disable_unprepare(xdev
->axi_clk
);
2303 * xilinx_dma_chan_probe - Per Channel Probing
2304 * It get channel features from the device tree entry and
2305 * initialize special channel handling routines
2307 * @xdev: Driver specific device structure
2308 * @node: Device node
2310 * Return: '0' on success and failure value on error
2312 static int xilinx_dma_chan_probe(struct xilinx_dma_device
*xdev
,
2313 struct device_node
*node
, int chan_id
)
2315 struct xilinx_dma_chan
*chan
;
2316 bool has_dre
= false;
2320 /* Allocate and initialize the channel structure */
2321 chan
= devm_kzalloc(xdev
->dev
, sizeof(*chan
), GFP_KERNEL
);
2325 chan
->dev
= xdev
->dev
;
2327 chan
->has_sg
= xdev
->has_sg
;
2328 chan
->desc_pendingcount
= 0x0;
2329 chan
->ext_addr
= xdev
->ext_addr
;
2331 spin_lock_init(&chan
->lock
);
2332 INIT_LIST_HEAD(&chan
->pending_list
);
2333 INIT_LIST_HEAD(&chan
->done_list
);
2334 INIT_LIST_HEAD(&chan
->active_list
);
2336 /* Retrieve the channel properties from the device tree */
2337 has_dre
= of_property_read_bool(node
, "xlnx,include-dre");
2339 chan
->genlock
= of_property_read_bool(node
, "xlnx,genlock-mode");
2341 err
= of_property_read_u32(node
, "xlnx,datawidth", &value
);
2343 dev_err(xdev
->dev
, "missing xlnx,datawidth property\n");
2346 width
= value
>> 3; /* Convert bits to bytes */
2348 /* If data width is greater than 8 bytes, DRE is not in hw */
2353 xdev
->common
.copy_align
= fls(width
- 1);
2355 if (of_device_is_compatible(node
, "xlnx,axi-vdma-mm2s-channel") ||
2356 of_device_is_compatible(node
, "xlnx,axi-dma-mm2s-channel") ||
2357 of_device_is_compatible(node
, "xlnx,axi-cdma-channel")) {
2358 chan
->direction
= DMA_MEM_TO_DEV
;
2360 chan
->tdest
= chan_id
;
2362 chan
->ctrl_offset
= XILINX_DMA_MM2S_CTRL_OFFSET
;
2363 if (xdev
->dma_config
->dmatype
== XDMA_TYPE_VDMA
) {
2364 chan
->desc_offset
= XILINX_VDMA_MM2S_DESC_OFFSET
;
2366 if (xdev
->flush_on_fsync
== XILINX_DMA_FLUSH_BOTH
||
2367 xdev
->flush_on_fsync
== XILINX_DMA_FLUSH_MM2S
)
2368 chan
->flush_on_fsync
= true;
2370 } else if (of_device_is_compatible(node
,
2371 "xlnx,axi-vdma-s2mm-channel") ||
2372 of_device_is_compatible(node
,
2373 "xlnx,axi-dma-s2mm-channel")) {
2374 chan
->direction
= DMA_DEV_TO_MEM
;
2376 chan
->tdest
= chan_id
- xdev
->nr_channels
;
2378 chan
->ctrl_offset
= XILINX_DMA_S2MM_CTRL_OFFSET
;
2379 if (xdev
->dma_config
->dmatype
== XDMA_TYPE_VDMA
) {
2380 chan
->desc_offset
= XILINX_VDMA_S2MM_DESC_OFFSET
;
2382 if (xdev
->flush_on_fsync
== XILINX_DMA_FLUSH_BOTH
||
2383 xdev
->flush_on_fsync
== XILINX_DMA_FLUSH_S2MM
)
2384 chan
->flush_on_fsync
= true;
2387 dev_err(xdev
->dev
, "Invalid channel compatible node\n");
2391 /* Request the interrupt */
2392 chan
->irq
= irq_of_parse_and_map(node
, 0);
2393 err
= request_irq(chan
->irq
, xilinx_dma_irq_handler
, IRQF_SHARED
,
2394 "xilinx-dma-controller", chan
);
2396 dev_err(xdev
->dev
, "unable to request IRQ %d\n", chan
->irq
);
2400 if (xdev
->dma_config
->dmatype
== XDMA_TYPE_AXIDMA
)
2401 chan
->start_transfer
= xilinx_dma_start_transfer
;
2402 else if (xdev
->dma_config
->dmatype
== XDMA_TYPE_CDMA
)
2403 chan
->start_transfer
= xilinx_cdma_start_transfer
;
2405 chan
->start_transfer
= xilinx_vdma_start_transfer
;
2407 /* Initialize the tasklet */
2408 tasklet_init(&chan
->tasklet
, xilinx_dma_do_tasklet
,
2409 (unsigned long)chan
);
2412 * Initialize the DMA channel and add it to the DMA engine channels
2415 chan
->common
.device
= &xdev
->common
;
2417 list_add_tail(&chan
->common
.device_node
, &xdev
->common
.channels
);
2418 xdev
->chan
[chan
->id
] = chan
;
2420 /* Reset the channel */
2421 err
= xilinx_dma_chan_reset(chan
);
2423 dev_err(xdev
->dev
, "Reset channel failed\n");
2431 * xilinx_dma_child_probe - Per child node probe
2432 * It get number of dma-channels per child node from
2433 * device-tree and initializes all the channels.
2435 * @xdev: Driver specific device structure
2436 * @node: Device node
2440 static int xilinx_dma_child_probe(struct xilinx_dma_device
*xdev
,
2441 struct device_node
*node
) {
2442 int ret
, i
, nr_channels
= 1;
2444 ret
= of_property_read_u32(node
, "dma-channels", &nr_channels
);
2445 if ((ret
< 0) && xdev
->mcdma
)
2446 dev_warn(xdev
->dev
, "missing dma-channels property\n");
2448 for (i
= 0; i
< nr_channels
; i
++)
2449 xilinx_dma_chan_probe(xdev
, node
, xdev
->chan_id
++);
2451 xdev
->nr_channels
+= nr_channels
;
2457 * of_dma_xilinx_xlate - Translation function
2458 * @dma_spec: Pointer to DMA specifier as found in the device tree
2459 * @ofdma: Pointer to DMA controller data
2461 * Return: DMA channel pointer on success and NULL on error
2463 static struct dma_chan
*of_dma_xilinx_xlate(struct of_phandle_args
*dma_spec
,
2464 struct of_dma
*ofdma
)
2466 struct xilinx_dma_device
*xdev
= ofdma
->of_dma_data
;
2467 int chan_id
= dma_spec
->args
[0];
2469 if (chan_id
>= xdev
->nr_channels
|| !xdev
->chan
[chan_id
])
2472 return dma_get_slave_channel(&xdev
->chan
[chan_id
]->common
);
2475 static const struct xilinx_dma_config axidma_config
= {
2476 .dmatype
= XDMA_TYPE_AXIDMA
,
2477 .clk_init
= axidma_clk_init
,
2480 static const struct xilinx_dma_config axicdma_config
= {
2481 .dmatype
= XDMA_TYPE_CDMA
,
2482 .clk_init
= axicdma_clk_init
,
2485 static const struct xilinx_dma_config axivdma_config
= {
2486 .dmatype
= XDMA_TYPE_VDMA
,
2487 .clk_init
= axivdma_clk_init
,
2490 static const struct of_device_id xilinx_dma_of_ids
[] = {
2491 { .compatible
= "xlnx,axi-dma-1.00.a", .data
= &axidma_config
},
2492 { .compatible
= "xlnx,axi-cdma-1.00.a", .data
= &axicdma_config
},
2493 { .compatible
= "xlnx,axi-vdma-1.00.a", .data
= &axivdma_config
},
2496 MODULE_DEVICE_TABLE(of
, xilinx_dma_of_ids
);
2499 * xilinx_dma_probe - Driver probe function
2500 * @pdev: Pointer to the platform_device structure
2502 * Return: '0' on success and failure value on error
2504 static int xilinx_dma_probe(struct platform_device
*pdev
)
2506 int (*clk_init
)(struct platform_device
*, struct clk
**, struct clk
**,
2507 struct clk
**, struct clk
**, struct clk
**)
2509 struct device_node
*node
= pdev
->dev
.of_node
;
2510 struct xilinx_dma_device
*xdev
;
2511 struct device_node
*child
, *np
= pdev
->dev
.of_node
;
2512 struct resource
*io
;
2513 u32 num_frames
, addr_width
;
2516 /* Allocate and initialize the DMA engine structure */
2517 xdev
= devm_kzalloc(&pdev
->dev
, sizeof(*xdev
), GFP_KERNEL
);
2521 xdev
->dev
= &pdev
->dev
;
2523 const struct of_device_id
*match
;
2525 match
= of_match_node(xilinx_dma_of_ids
, np
);
2526 if (match
&& match
->data
) {
2527 xdev
->dma_config
= match
->data
;
2528 clk_init
= xdev
->dma_config
->clk_init
;
2532 err
= clk_init(pdev
, &xdev
->axi_clk
, &xdev
->tx_clk
, &xdev
->txs_clk
,
2533 &xdev
->rx_clk
, &xdev
->rxs_clk
);
2537 /* Request and map I/O memory */
2538 io
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
2539 xdev
->regs
= devm_ioremap_resource(&pdev
->dev
, io
);
2540 if (IS_ERR(xdev
->regs
))
2541 return PTR_ERR(xdev
->regs
);
2543 /* Retrieve the DMA engine properties from the device tree */
2544 xdev
->has_sg
= of_property_read_bool(node
, "xlnx,include-sg");
2545 if (xdev
->dma_config
->dmatype
== XDMA_TYPE_AXIDMA
)
2546 xdev
->mcdma
= of_property_read_bool(node
, "xlnx,mcdma");
2548 if (xdev
->dma_config
->dmatype
== XDMA_TYPE_VDMA
) {
2549 err
= of_property_read_u32(node
, "xlnx,num-fstores",
2553 "missing xlnx,num-fstores property\n");
2557 err
= of_property_read_u32(node
, "xlnx,flush-fsync",
2558 &xdev
->flush_on_fsync
);
2561 "missing xlnx,flush-fsync property\n");
2564 err
= of_property_read_u32(node
, "xlnx,addrwidth", &addr_width
);
2566 dev_warn(xdev
->dev
, "missing xlnx,addrwidth property\n");
2568 if (addr_width
> 32)
2569 xdev
->ext_addr
= true;
2571 xdev
->ext_addr
= false;
2573 /* Set the dma mask bits */
2574 dma_set_mask(xdev
->dev
, DMA_BIT_MASK(addr_width
));
2576 /* Initialize the DMA engine */
2577 xdev
->common
.dev
= &pdev
->dev
;
2579 INIT_LIST_HEAD(&xdev
->common
.channels
);
2580 if (!(xdev
->dma_config
->dmatype
== XDMA_TYPE_CDMA
)) {
2581 dma_cap_set(DMA_SLAVE
, xdev
->common
.cap_mask
);
2582 dma_cap_set(DMA_PRIVATE
, xdev
->common
.cap_mask
);
2585 xdev
->common
.device_alloc_chan_resources
=
2586 xilinx_dma_alloc_chan_resources
;
2587 xdev
->common
.device_free_chan_resources
=
2588 xilinx_dma_free_chan_resources
;
2589 xdev
->common
.device_terminate_all
= xilinx_dma_terminate_all
;
2590 xdev
->common
.device_tx_status
= xilinx_dma_tx_status
;
2591 xdev
->common
.device_issue_pending
= xilinx_dma_issue_pending
;
2592 if (xdev
->dma_config
->dmatype
== XDMA_TYPE_AXIDMA
) {
2593 dma_cap_set(DMA_CYCLIC
, xdev
->common
.cap_mask
);
2594 xdev
->common
.device_prep_slave_sg
= xilinx_dma_prep_slave_sg
;
2595 xdev
->common
.device_prep_dma_cyclic
=
2596 xilinx_dma_prep_dma_cyclic
;
2597 xdev
->common
.device_prep_interleaved_dma
=
2598 xilinx_dma_prep_interleaved
;
2599 /* Residue calculation is supported by only AXI DMA */
2600 xdev
->common
.residue_granularity
=
2601 DMA_RESIDUE_GRANULARITY_SEGMENT
;
2602 } else if (xdev
->dma_config
->dmatype
== XDMA_TYPE_CDMA
) {
2603 dma_cap_set(DMA_MEMCPY
, xdev
->common
.cap_mask
);
2604 xdev
->common
.device_prep_dma_memcpy
= xilinx_cdma_prep_memcpy
;
2606 xdev
->common
.device_prep_interleaved_dma
=
2607 xilinx_vdma_dma_prep_interleaved
;
2610 platform_set_drvdata(pdev
, xdev
);
2612 /* Initialize the channels */
2613 for_each_child_of_node(node
, child
) {
2614 err
= xilinx_dma_child_probe(xdev
, child
);
2619 if (xdev
->dma_config
->dmatype
== XDMA_TYPE_VDMA
) {
2620 for (i
= 0; i
< xdev
->nr_channels
; i
++)
2622 xdev
->chan
[i
]->num_frms
= num_frames
;
2625 /* Register the DMA engine with the core */
2626 dma_async_device_register(&xdev
->common
);
2628 err
= of_dma_controller_register(node
, of_dma_xilinx_xlate
,
2631 dev_err(&pdev
->dev
, "Unable to register DMA to DT\n");
2632 dma_async_device_unregister(&xdev
->common
);
2636 dev_info(&pdev
->dev
, "Xilinx AXI VDMA Engine Driver Probed!!\n");
2641 xdma_disable_allclks(xdev
);
2643 for (i
= 0; i
< xdev
->nr_channels
; i
++)
2645 xilinx_dma_chan_remove(xdev
->chan
[i
]);
2651 * xilinx_dma_remove - Driver remove function
2652 * @pdev: Pointer to the platform_device structure
2654 * Return: Always '0'
2656 static int xilinx_dma_remove(struct platform_device
*pdev
)
2658 struct xilinx_dma_device
*xdev
= platform_get_drvdata(pdev
);
2661 of_dma_controller_free(pdev
->dev
.of_node
);
2663 dma_async_device_unregister(&xdev
->common
);
2665 for (i
= 0; i
< xdev
->nr_channels
; i
++)
2667 xilinx_dma_chan_remove(xdev
->chan
[i
]);
2669 xdma_disable_allclks(xdev
);
2674 static struct platform_driver xilinx_vdma_driver
= {
2676 .name
= "xilinx-vdma",
2677 .of_match_table
= xilinx_dma_of_ids
,
2679 .probe
= xilinx_dma_probe
,
2680 .remove
= xilinx_dma_remove
,
2683 module_platform_driver(xilinx_vdma_driver
);
2685 MODULE_AUTHOR("Xilinx, Inc.");
2686 MODULE_DESCRIPTION("Xilinx VDMA driver");
2687 MODULE_LICENSE("GPL v2");