2 * DMA driver for Xilinx Video DMA Engine
4 * Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved.
6 * Based on the Freescale DMA driver.
9 * The AXI Video Direct Memory Access (AXI VDMA) core is a soft Xilinx IP
10 * core that provides high-bandwidth direct memory access between memory
11 * and AXI4-Stream type video target peripherals. The core provides efficient
12 * two dimensional DMA operations with independent asynchronous read (S2MM)
13 * and write (MM2S) channel operation. It can be configured to have either
14 * one channel or two channels. If configured as two channels, one is to
15 * transmit to the video device (MM2S) and another is to receive from the
16 * video device (S2MM). Initialization, status, interrupt and management
17 * registers are accessed through an AXI4-Lite slave interface.
19 * The AXI Direct Memory Access (AXI DMA) core is a soft Xilinx IP core that
20 * provides high-bandwidth one dimensional direct memory access between memory
21 * and AXI4-Stream target peripherals. It supports one receive and one
22 * transmit channel, both of them optional at synthesis time.
24 * The AXI CDMA, is a soft IP, which provides high-bandwidth Direct Memory
25 * Access (DMA) between a memory-mapped source address and a memory-mapped
26 * destination address.
28 * This program is free software: you can redistribute it and/or modify
29 * it under the terms of the GNU General Public License as published by
30 * the Free Software Foundation, either version 2 of the License, or
31 * (at your option) any later version.
34 #include <linux/bitops.h>
35 #include <linux/dmapool.h>
36 #include <linux/dma/xilinx_dma.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
40 #include <linux/iopoll.h>
41 #include <linux/module.h>
42 #include <linux/of_address.h>
43 #include <linux/of_dma.h>
44 #include <linux/of_platform.h>
45 #include <linux/of_irq.h>
46 #include <linux/slab.h>
47 #include <linux/clk.h>
48 #include <linux/io-64-nonatomic-lo-hi.h>
50 #include "../dmaengine.h"
52 /* Register/Descriptor Offsets */
53 #define XILINX_DMA_MM2S_CTRL_OFFSET 0x0000
54 #define XILINX_DMA_S2MM_CTRL_OFFSET 0x0030
55 #define XILINX_VDMA_MM2S_DESC_OFFSET 0x0050
56 #define XILINX_VDMA_S2MM_DESC_OFFSET 0x00a0
58 /* Control Registers */
59 #define XILINX_DMA_REG_DMACR 0x0000
60 #define XILINX_DMA_DMACR_DELAY_MAX 0xff
61 #define XILINX_DMA_DMACR_DELAY_SHIFT 24
62 #define XILINX_DMA_DMACR_FRAME_COUNT_MAX 0xff
63 #define XILINX_DMA_DMACR_FRAME_COUNT_SHIFT 16
64 #define XILINX_DMA_DMACR_ERR_IRQ BIT(14)
65 #define XILINX_DMA_DMACR_DLY_CNT_IRQ BIT(13)
66 #define XILINX_DMA_DMACR_FRM_CNT_IRQ BIT(12)
67 #define XILINX_DMA_DMACR_MASTER_SHIFT 8
68 #define XILINX_DMA_DMACR_FSYNCSRC_SHIFT 5
69 #define XILINX_DMA_DMACR_FRAMECNT_EN BIT(4)
70 #define XILINX_DMA_DMACR_GENLOCK_EN BIT(3)
71 #define XILINX_DMA_DMACR_RESET BIT(2)
72 #define XILINX_DMA_DMACR_CIRC_EN BIT(1)
73 #define XILINX_DMA_DMACR_RUNSTOP BIT(0)
74 #define XILINX_DMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5)
76 #define XILINX_DMA_REG_DMASR 0x0004
77 #define XILINX_DMA_DMASR_EOL_LATE_ERR BIT(15)
78 #define XILINX_DMA_DMASR_ERR_IRQ BIT(14)
79 #define XILINX_DMA_DMASR_DLY_CNT_IRQ BIT(13)
80 #define XILINX_DMA_DMASR_FRM_CNT_IRQ BIT(12)
81 #define XILINX_DMA_DMASR_SOF_LATE_ERR BIT(11)
82 #define XILINX_DMA_DMASR_SG_DEC_ERR BIT(10)
83 #define XILINX_DMA_DMASR_SG_SLV_ERR BIT(9)
84 #define XILINX_DMA_DMASR_EOF_EARLY_ERR BIT(8)
85 #define XILINX_DMA_DMASR_SOF_EARLY_ERR BIT(7)
86 #define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6)
87 #define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5)
88 #define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4)
89 #define XILINX_DMA_DMASR_IDLE BIT(1)
90 #define XILINX_DMA_DMASR_HALTED BIT(0)
91 #define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24)
92 #define XILINX_DMA_DMASR_FRAME_COUNT_MASK GENMASK(23, 16)
94 #define XILINX_DMA_REG_CURDESC 0x0008
95 #define XILINX_DMA_REG_TAILDESC 0x0010
96 #define XILINX_DMA_REG_REG_INDEX 0x0014
97 #define XILINX_DMA_REG_FRMSTORE 0x0018
98 #define XILINX_DMA_REG_THRESHOLD 0x001c
99 #define XILINX_DMA_REG_FRMPTR_STS 0x0024
100 #define XILINX_DMA_REG_PARK_PTR 0x0028
101 #define XILINX_DMA_PARK_PTR_WR_REF_SHIFT 8
102 #define XILINX_DMA_PARK_PTR_WR_REF_MASK GENMASK(12, 8)
103 #define XILINX_DMA_PARK_PTR_RD_REF_SHIFT 0
104 #define XILINX_DMA_PARK_PTR_RD_REF_MASK GENMASK(4, 0)
105 #define XILINX_DMA_REG_VDMA_VERSION 0x002c
107 /* Register Direct Mode Registers */
108 #define XILINX_DMA_REG_VSIZE 0x0000
109 #define XILINX_DMA_REG_HSIZE 0x0004
111 #define XILINX_DMA_REG_FRMDLY_STRIDE 0x0008
112 #define XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24
113 #define XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT 0
115 #define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n))
116 #define XILINX_VDMA_REG_START_ADDRESS_64(n) (0x000c + 8 * (n))
118 /* HW specific definitions */
119 #define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x20
121 #define XILINX_DMA_DMAXR_ALL_IRQ_MASK \
122 (XILINX_DMA_DMASR_FRM_CNT_IRQ | \
123 XILINX_DMA_DMASR_DLY_CNT_IRQ | \
124 XILINX_DMA_DMASR_ERR_IRQ)
126 #define XILINX_DMA_DMASR_ALL_ERR_MASK \
127 (XILINX_DMA_DMASR_EOL_LATE_ERR | \
128 XILINX_DMA_DMASR_SOF_LATE_ERR | \
129 XILINX_DMA_DMASR_SG_DEC_ERR | \
130 XILINX_DMA_DMASR_SG_SLV_ERR | \
131 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
132 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
133 XILINX_DMA_DMASR_DMA_DEC_ERR | \
134 XILINX_DMA_DMASR_DMA_SLAVE_ERR | \
135 XILINX_DMA_DMASR_DMA_INT_ERR)
138 * Recoverable errors are DMA Internal error, SOF Early, EOF Early
139 * and SOF Late. They are only recoverable when C_FLUSH_ON_FSYNC
140 * is enabled in the h/w system.
142 #define XILINX_DMA_DMASR_ERR_RECOVER_MASK \
143 (XILINX_DMA_DMASR_SOF_LATE_ERR | \
144 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
145 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
146 XILINX_DMA_DMASR_DMA_INT_ERR)
148 /* Axi VDMA Flush on Fsync bits */
149 #define XILINX_DMA_FLUSH_S2MM 3
150 #define XILINX_DMA_FLUSH_MM2S 2
151 #define XILINX_DMA_FLUSH_BOTH 1
153 /* Delay loop counter to prevent hardware failure */
154 #define XILINX_DMA_LOOP_COUNT 1000000
156 /* AXI DMA Specific Registers/Offsets */
157 #define XILINX_DMA_REG_SRCDSTADDR 0x18
158 #define XILINX_DMA_REG_BTT 0x28
160 /* AXI DMA Specific Masks/Bit fields */
161 #define XILINX_DMA_MAX_TRANS_LEN GENMASK(22, 0)
162 #define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16)
163 #define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4)
164 #define XILINX_DMA_CR_COALESCE_SHIFT 16
165 #define XILINX_DMA_BD_SOP BIT(27)
166 #define XILINX_DMA_BD_EOP BIT(26)
167 #define XILINX_DMA_COALESCE_MAX 255
168 #define XILINX_DMA_NUM_DESCS 255
169 #define XILINX_DMA_NUM_APP_WORDS 5
171 /* Multi-Channel DMA Descriptor offsets*/
172 #define XILINX_DMA_MCRX_CDESC(x) (0x40 + (x-1) * 0x20)
173 #define XILINX_DMA_MCRX_TDESC(x) (0x48 + (x-1) * 0x20)
175 /* Multi-Channel DMA Masks/Shifts */
176 #define XILINX_DMA_BD_HSIZE_MASK GENMASK(15, 0)
177 #define XILINX_DMA_BD_STRIDE_MASK GENMASK(15, 0)
178 #define XILINX_DMA_BD_VSIZE_MASK GENMASK(31, 19)
179 #define XILINX_DMA_BD_TDEST_MASK GENMASK(4, 0)
180 #define XILINX_DMA_BD_STRIDE_SHIFT 0
181 #define XILINX_DMA_BD_VSIZE_SHIFT 19
183 /* AXI CDMA Specific Registers/Offsets */
184 #define XILINX_CDMA_REG_SRCADDR 0x18
185 #define XILINX_CDMA_REG_DSTADDR 0x20
187 /* AXI CDMA Specific Masks */
188 #define XILINX_CDMA_CR_SGMODE BIT(3)
191 * struct xilinx_vdma_desc_hw - Hardware Descriptor
192 * @next_desc: Next Descriptor Pointer @0x00
193 * @pad1: Reserved @0x04
194 * @buf_addr: Buffer address @0x08
195 * @buf_addr_msb: MSB of Buffer address @0x0C
196 * @vsize: Vertical Size @0x10
197 * @hsize: Horizontal Size @0x14
198 * @stride: Number of bytes between the first
199 * pixels of each horizontal line @0x18
201 struct xilinx_vdma_desc_hw
{
212 * struct xilinx_axidma_desc_hw - Hardware Descriptor for AXI DMA
213 * @next_desc: Next Descriptor Pointer @0x00
214 * @next_desc_msb: MSB of Next Descriptor Pointer @0x04
215 * @buf_addr: Buffer address @0x08
216 * @buf_addr_msb: MSB of Buffer address @0x0C
217 * @mcdma_control: Control field for mcdma @0x10
218 * @vsize_stride: Vsize and Stride field for mcdma @0x14
219 * @control: Control field @0x18
220 * @status: Status field @0x1C
221 * @app: APP Fields @0x20 - 0x30
223 struct xilinx_axidma_desc_hw
{
232 u32 app
[XILINX_DMA_NUM_APP_WORDS
];
236 * struct xilinx_cdma_desc_hw - Hardware Descriptor
237 * @next_desc: Next Descriptor Pointer @0x00
238 * @next_desc_msb: Next Descriptor Pointer MSB @0x04
239 * @src_addr: Source address @0x08
240 * @src_addr_msb: Source address MSB @0x0C
241 * @dest_addr: Destination address @0x10
242 * @dest_addr_msb: Destination address MSB @0x14
243 * @control: Control field @0x18
244 * @status: Status field @0x1C
246 struct xilinx_cdma_desc_hw
{
258 * struct xilinx_vdma_tx_segment - Descriptor segment
259 * @hw: Hardware descriptor
260 * @node: Node in the descriptor segments list
261 * @phys: Physical address of segment
263 struct xilinx_vdma_tx_segment
{
264 struct xilinx_vdma_desc_hw hw
;
265 struct list_head node
;
270 * struct xilinx_axidma_tx_segment - Descriptor segment
271 * @hw: Hardware descriptor
272 * @node: Node in the descriptor segments list
273 * @phys: Physical address of segment
275 struct xilinx_axidma_tx_segment
{
276 struct xilinx_axidma_desc_hw hw
;
277 struct list_head node
;
282 * struct xilinx_cdma_tx_segment - Descriptor segment
283 * @hw: Hardware descriptor
284 * @node: Node in the descriptor segments list
285 * @phys: Physical address of segment
287 struct xilinx_cdma_tx_segment
{
288 struct xilinx_cdma_desc_hw hw
;
289 struct list_head node
;
294 * struct xilinx_dma_tx_descriptor - Per Transaction structure
295 * @async_tx: Async transaction descriptor
296 * @segments: TX segments list
297 * @node: Node in the channel descriptors list
298 * @cyclic: Check for cyclic transfers.
300 struct xilinx_dma_tx_descriptor
{
301 struct dma_async_tx_descriptor async_tx
;
302 struct list_head segments
;
303 struct list_head node
;
308 * struct xilinx_dma_chan - Driver specific DMA channel structure
309 * @xdev: Driver specific device structure
310 * @ctrl_offset: Control registers offset
311 * @desc_offset: TX descriptor registers offset
312 * @lock: Descriptor operation lock
313 * @pending_list: Descriptors waiting
314 * @active_list: Descriptors ready to submit
315 * @done_list: Complete descriptors
316 * @free_seg_list: Free descriptors
317 * @common: DMA common channel
318 * @desc_pool: Descriptors pool
319 * @dev: The dma device
322 * @direction: Transfer direction
323 * @num_frms: Number of frames
324 * @has_sg: Support scatter transfers
325 * @cyclic: Check for cyclic transfers.
326 * @genlock: Support genlock mode
327 * @err: Channel has errors
328 * @idle: Check for channel idle
329 * @tasklet: Cleanup work after irq
330 * @config: Device configuration info
331 * @flush_on_fsync: Flush on Frame sync
332 * @desc_pendingcount: Descriptor pending count
333 * @ext_addr: Indicates 64 bit addressing is supported by dma channel
334 * @desc_submitcount: Descriptor h/w submitted count
335 * @residue: Residue for AXI DMA
336 * @seg_v: Statically allocated segments base
337 * @seg_p: Physical allocated segments base
338 * @cyclic_seg_v: Statically allocated segment base for cyclic transfers
339 * @cyclic_seg_p: Physical allocated segments base for cyclic dma
340 * @start_transfer: Differentiate b/w DMA IP's transfer
341 * @stop_transfer: Differentiate b/w DMA IP's quiesce
342 * @tdest: TDEST value for mcdma
344 struct xilinx_dma_chan
{
345 struct xilinx_dma_device
*xdev
;
349 struct list_head pending_list
;
350 struct list_head active_list
;
351 struct list_head done_list
;
352 struct list_head free_seg_list
;
353 struct dma_chan common
;
354 struct dma_pool
*desc_pool
;
358 enum dma_transfer_direction direction
;
365 struct tasklet_struct tasklet
;
366 struct xilinx_vdma_config config
;
368 u32 desc_pendingcount
;
370 u32 desc_submitcount
;
372 struct xilinx_axidma_tx_segment
*seg_v
;
374 struct xilinx_axidma_tx_segment
*cyclic_seg_v
;
375 dma_addr_t cyclic_seg_p
;
376 void (*start_transfer
)(struct xilinx_dma_chan
*chan
);
377 int (*stop_transfer
)(struct xilinx_dma_chan
*chan
);
382 * enum xdma_ip_type - DMA IP type.
384 * @XDMA_TYPE_AXIDMA: Axi dma ip.
385 * @XDMA_TYPE_CDMA: Axi cdma ip.
386 * @XDMA_TYPE_VDMA: Axi vdma ip.
390 XDMA_TYPE_AXIDMA
= 0,
395 struct xilinx_dma_config
{
396 enum xdma_ip_type dmatype
;
397 int (*clk_init
)(struct platform_device
*pdev
, struct clk
**axi_clk
,
398 struct clk
**tx_clk
, struct clk
**txs_clk
,
399 struct clk
**rx_clk
, struct clk
**rxs_clk
);
403 * struct xilinx_dma_device - DMA device structure
404 * @regs: I/O mapped base address
405 * @dev: Device Structure
406 * @common: DMA device structure
407 * @chan: Driver specific DMA channel
408 * @has_sg: Specifies whether Scatter-Gather is present or not
409 * @mcdma: Specifies whether Multi-Channel is present or not
410 * @flush_on_fsync: Flush on frame sync
411 * @ext_addr: Indicates 64 bit addressing is supported by dma device
412 * @pdev: Platform device structure pointer
413 * @dma_config: DMA config structure
414 * @axi_clk: DMA Axi4-lite interace clock
415 * @tx_clk: DMA mm2s clock
416 * @txs_clk: DMA mm2s stream clock
417 * @rx_clk: DMA s2mm clock
418 * @rxs_clk: DMA s2mm stream clock
419 * @nr_channels: Number of channels DMA device supports
420 * @chan_id: DMA channel identifier
422 struct xilinx_dma_device
{
425 struct dma_device common
;
426 struct xilinx_dma_chan
*chan
[XILINX_DMA_MAX_CHANS_PER_DEVICE
];
431 struct platform_device
*pdev
;
432 const struct xilinx_dma_config
*dma_config
;
443 #define to_xilinx_chan(chan) \
444 container_of(chan, struct xilinx_dma_chan, common)
445 #define to_dma_tx_descriptor(tx) \
446 container_of(tx, struct xilinx_dma_tx_descriptor, async_tx)
447 #define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \
448 readl_poll_timeout(chan->xdev->regs + chan->ctrl_offset + reg, val, \
449 cond, delay_us, timeout_us)
452 static inline u32
dma_read(struct xilinx_dma_chan
*chan
, u32 reg
)
454 return ioread32(chan
->xdev
->regs
+ reg
);
457 static inline void dma_write(struct xilinx_dma_chan
*chan
, u32 reg
, u32 value
)
459 iowrite32(value
, chan
->xdev
->regs
+ reg
);
462 static inline void vdma_desc_write(struct xilinx_dma_chan
*chan
, u32 reg
,
465 dma_write(chan
, chan
->desc_offset
+ reg
, value
);
468 static inline u32
dma_ctrl_read(struct xilinx_dma_chan
*chan
, u32 reg
)
470 return dma_read(chan
, chan
->ctrl_offset
+ reg
);
473 static inline void dma_ctrl_write(struct xilinx_dma_chan
*chan
, u32 reg
,
476 dma_write(chan
, chan
->ctrl_offset
+ reg
, value
);
479 static inline void dma_ctrl_clr(struct xilinx_dma_chan
*chan
, u32 reg
,
482 dma_ctrl_write(chan
, reg
, dma_ctrl_read(chan
, reg
) & ~clr
);
485 static inline void dma_ctrl_set(struct xilinx_dma_chan
*chan
, u32 reg
,
488 dma_ctrl_write(chan
, reg
, dma_ctrl_read(chan
, reg
) | set
);
492 * vdma_desc_write_64 - 64-bit descriptor write
493 * @chan: Driver specific VDMA channel
494 * @reg: Register to write
495 * @value_lsb: lower address of the descriptor.
496 * @value_msb: upper address of the descriptor.
498 * Since vdma driver is trying to write to a register offset which is not a
499 * multiple of 64 bits(ex : 0x5c), we are writing as two separate 32 bits
500 * instead of a single 64 bit register write.
502 static inline void vdma_desc_write_64(struct xilinx_dma_chan
*chan
, u32 reg
,
503 u32 value_lsb
, u32 value_msb
)
505 /* Write the lsb 32 bits*/
506 writel(value_lsb
, chan
->xdev
->regs
+ chan
->desc_offset
+ reg
);
508 /* Write the msb 32 bits */
509 writel(value_msb
, chan
->xdev
->regs
+ chan
->desc_offset
+ reg
+ 4);
512 static inline void dma_writeq(struct xilinx_dma_chan
*chan
, u32 reg
, u64 value
)
514 lo_hi_writeq(value
, chan
->xdev
->regs
+ chan
->ctrl_offset
+ reg
);
517 static inline void xilinx_write(struct xilinx_dma_chan
*chan
, u32 reg
,
521 dma_writeq(chan
, reg
, addr
);
523 dma_ctrl_write(chan
, reg
, addr
);
526 static inline void xilinx_axidma_buf(struct xilinx_dma_chan
*chan
,
527 struct xilinx_axidma_desc_hw
*hw
,
528 dma_addr_t buf_addr
, size_t sg_used
,
531 if (chan
->ext_addr
) {
532 hw
->buf_addr
= lower_32_bits(buf_addr
+ sg_used
+ period_len
);
533 hw
->buf_addr_msb
= upper_32_bits(buf_addr
+ sg_used
+
536 hw
->buf_addr
= buf_addr
+ sg_used
+ period_len
;
540 /* -----------------------------------------------------------------------------
541 * Descriptors and segments alloc and free
545 * xilinx_vdma_alloc_tx_segment - Allocate transaction segment
546 * @chan: Driver specific DMA channel
548 * Return: The allocated segment on success and NULL on failure.
550 static struct xilinx_vdma_tx_segment
*
551 xilinx_vdma_alloc_tx_segment(struct xilinx_dma_chan
*chan
)
553 struct xilinx_vdma_tx_segment
*segment
;
556 segment
= dma_pool_zalloc(chan
->desc_pool
, GFP_ATOMIC
, &phys
);
560 segment
->phys
= phys
;
566 * xilinx_cdma_alloc_tx_segment - Allocate transaction segment
567 * @chan: Driver specific DMA channel
569 * Return: The allocated segment on success and NULL on failure.
571 static struct xilinx_cdma_tx_segment
*
572 xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan
*chan
)
574 struct xilinx_cdma_tx_segment
*segment
;
577 segment
= dma_pool_zalloc(chan
->desc_pool
, GFP_ATOMIC
, &phys
);
581 segment
->phys
= phys
;
587 * xilinx_axidma_alloc_tx_segment - Allocate transaction segment
588 * @chan: Driver specific DMA channel
590 * Return: The allocated segment on success and NULL on failure.
592 static struct xilinx_axidma_tx_segment
*
593 xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan
*chan
)
595 struct xilinx_axidma_tx_segment
*segment
= NULL
;
598 spin_lock_irqsave(&chan
->lock
, flags
);
599 if (!list_empty(&chan
->free_seg_list
)) {
600 segment
= list_first_entry(&chan
->free_seg_list
,
601 struct xilinx_axidma_tx_segment
,
603 list_del(&segment
->node
);
605 spin_unlock_irqrestore(&chan
->lock
, flags
);
610 static void xilinx_dma_clean_hw_desc(struct xilinx_axidma_desc_hw
*hw
)
612 u32 next_desc
= hw
->next_desc
;
613 u32 next_desc_msb
= hw
->next_desc_msb
;
615 memset(hw
, 0, sizeof(struct xilinx_axidma_desc_hw
));
617 hw
->next_desc
= next_desc
;
618 hw
->next_desc_msb
= next_desc_msb
;
622 * xilinx_dma_free_tx_segment - Free transaction segment
623 * @chan: Driver specific DMA channel
624 * @segment: DMA transaction segment
626 static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan
*chan
,
627 struct xilinx_axidma_tx_segment
*segment
)
629 xilinx_dma_clean_hw_desc(&segment
->hw
);
631 list_add_tail(&segment
->node
, &chan
->free_seg_list
);
635 * xilinx_cdma_free_tx_segment - Free transaction segment
636 * @chan: Driver specific DMA channel
637 * @segment: DMA transaction segment
639 static void xilinx_cdma_free_tx_segment(struct xilinx_dma_chan
*chan
,
640 struct xilinx_cdma_tx_segment
*segment
)
642 dma_pool_free(chan
->desc_pool
, segment
, segment
->phys
);
646 * xilinx_vdma_free_tx_segment - Free transaction segment
647 * @chan: Driver specific DMA channel
648 * @segment: DMA transaction segment
650 static void xilinx_vdma_free_tx_segment(struct xilinx_dma_chan
*chan
,
651 struct xilinx_vdma_tx_segment
*segment
)
653 dma_pool_free(chan
->desc_pool
, segment
, segment
->phys
);
657 * xilinx_dma_tx_descriptor - Allocate transaction descriptor
658 * @chan: Driver specific DMA channel
660 * Return: The allocated descriptor on success and NULL on failure.
662 static struct xilinx_dma_tx_descriptor
*
663 xilinx_dma_alloc_tx_descriptor(struct xilinx_dma_chan
*chan
)
665 struct xilinx_dma_tx_descriptor
*desc
;
667 desc
= kzalloc(sizeof(*desc
), GFP_KERNEL
);
671 INIT_LIST_HEAD(&desc
->segments
);
677 * xilinx_dma_free_tx_descriptor - Free transaction descriptor
678 * @chan: Driver specific DMA channel
679 * @desc: DMA transaction descriptor
682 xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan
*chan
,
683 struct xilinx_dma_tx_descriptor
*desc
)
685 struct xilinx_vdma_tx_segment
*segment
, *next
;
686 struct xilinx_cdma_tx_segment
*cdma_segment
, *cdma_next
;
687 struct xilinx_axidma_tx_segment
*axidma_segment
, *axidma_next
;
692 if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_VDMA
) {
693 list_for_each_entry_safe(segment
, next
, &desc
->segments
, node
) {
694 list_del(&segment
->node
);
695 xilinx_vdma_free_tx_segment(chan
, segment
);
697 } else if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_CDMA
) {
698 list_for_each_entry_safe(cdma_segment
, cdma_next
,
699 &desc
->segments
, node
) {
700 list_del(&cdma_segment
->node
);
701 xilinx_cdma_free_tx_segment(chan
, cdma_segment
);
704 list_for_each_entry_safe(axidma_segment
, axidma_next
,
705 &desc
->segments
, node
) {
706 list_del(&axidma_segment
->node
);
707 xilinx_dma_free_tx_segment(chan
, axidma_segment
);
714 /* Required functions */
717 * xilinx_dma_free_desc_list - Free descriptors list
718 * @chan: Driver specific DMA channel
719 * @list: List to parse and delete the descriptor
721 static void xilinx_dma_free_desc_list(struct xilinx_dma_chan
*chan
,
722 struct list_head
*list
)
724 struct xilinx_dma_tx_descriptor
*desc
, *next
;
726 list_for_each_entry_safe(desc
, next
, list
, node
) {
727 list_del(&desc
->node
);
728 xilinx_dma_free_tx_descriptor(chan
, desc
);
733 * xilinx_dma_free_descriptors - Free channel descriptors
734 * @chan: Driver specific DMA channel
736 static void xilinx_dma_free_descriptors(struct xilinx_dma_chan
*chan
)
740 spin_lock_irqsave(&chan
->lock
, flags
);
742 xilinx_dma_free_desc_list(chan
, &chan
->pending_list
);
743 xilinx_dma_free_desc_list(chan
, &chan
->done_list
);
744 xilinx_dma_free_desc_list(chan
, &chan
->active_list
);
746 spin_unlock_irqrestore(&chan
->lock
, flags
);
750 * xilinx_dma_free_chan_resources - Free channel resources
751 * @dchan: DMA channel
753 static void xilinx_dma_free_chan_resources(struct dma_chan
*dchan
)
755 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
758 dev_dbg(chan
->dev
, "Free all channel resources.\n");
760 xilinx_dma_free_descriptors(chan
);
762 if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_AXIDMA
) {
763 spin_lock_irqsave(&chan
->lock
, flags
);
764 INIT_LIST_HEAD(&chan
->free_seg_list
);
765 spin_unlock_irqrestore(&chan
->lock
, flags
);
767 /* Free memory that is allocated for BD */
768 dma_free_coherent(chan
->dev
, sizeof(*chan
->seg_v
) *
769 XILINX_DMA_NUM_DESCS
, chan
->seg_v
,
772 /* Free Memory that is allocated for cyclic DMA Mode */
773 dma_free_coherent(chan
->dev
, sizeof(*chan
->cyclic_seg_v
),
774 chan
->cyclic_seg_v
, chan
->cyclic_seg_p
);
777 if (chan
->xdev
->dma_config
->dmatype
!= XDMA_TYPE_AXIDMA
) {
778 dma_pool_destroy(chan
->desc_pool
);
779 chan
->desc_pool
= NULL
;
784 * xilinx_dma_chan_handle_cyclic - Cyclic dma callback
785 * @chan: Driver specific dma channel
786 * @desc: dma transaction descriptor
787 * @flags: flags for spin lock
789 static void xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan
*chan
,
790 struct xilinx_dma_tx_descriptor
*desc
,
791 unsigned long *flags
)
793 dma_async_tx_callback callback
;
794 void *callback_param
;
796 callback
= desc
->async_tx
.callback
;
797 callback_param
= desc
->async_tx
.callback_param
;
799 spin_unlock_irqrestore(&chan
->lock
, *flags
);
800 callback(callback_param
);
801 spin_lock_irqsave(&chan
->lock
, *flags
);
806 * xilinx_dma_chan_desc_cleanup - Clean channel descriptors
807 * @chan: Driver specific DMA channel
809 static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan
*chan
)
811 struct xilinx_dma_tx_descriptor
*desc
, *next
;
814 spin_lock_irqsave(&chan
->lock
, flags
);
816 list_for_each_entry_safe(desc
, next
, &chan
->done_list
, node
) {
817 struct dmaengine_desc_callback cb
;
820 xilinx_dma_chan_handle_cyclic(chan
, desc
, &flags
);
824 /* Remove from the list of running transactions */
825 list_del(&desc
->node
);
827 /* Run the link descriptor callback function */
828 dmaengine_desc_get_callback(&desc
->async_tx
, &cb
);
829 if (dmaengine_desc_callback_valid(&cb
)) {
830 spin_unlock_irqrestore(&chan
->lock
, flags
);
831 dmaengine_desc_callback_invoke(&cb
, NULL
);
832 spin_lock_irqsave(&chan
->lock
, flags
);
835 /* Run any dependencies, then free the descriptor */
836 dma_run_dependencies(&desc
->async_tx
);
837 xilinx_dma_free_tx_descriptor(chan
, desc
);
840 spin_unlock_irqrestore(&chan
->lock
, flags
);
844 * xilinx_dma_do_tasklet - Schedule completion tasklet
845 * @data: Pointer to the Xilinx DMA channel structure
847 static void xilinx_dma_do_tasklet(unsigned long data
)
849 struct xilinx_dma_chan
*chan
= (struct xilinx_dma_chan
*)data
;
851 xilinx_dma_chan_desc_cleanup(chan
);
855 * xilinx_dma_alloc_chan_resources - Allocate channel resources
856 * @dchan: DMA channel
858 * Return: '0' on success and failure value on error
860 static int xilinx_dma_alloc_chan_resources(struct dma_chan
*dchan
)
862 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
865 /* Has this channel already been allocated? */
870 * We need the descriptor to be aligned to 64bytes
871 * for meeting Xilinx VDMA specification requirement.
873 if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_AXIDMA
) {
874 /* Allocate the buffer descriptors. */
875 chan
->seg_v
= dma_zalloc_coherent(chan
->dev
,
876 sizeof(*chan
->seg_v
) *
877 XILINX_DMA_NUM_DESCS
,
878 &chan
->seg_p
, GFP_KERNEL
);
881 "unable to allocate channel %d descriptors\n",
886 for (i
= 0; i
< XILINX_DMA_NUM_DESCS
; i
++) {
887 chan
->seg_v
[i
].hw
.next_desc
=
888 lower_32_bits(chan
->seg_p
+ sizeof(*chan
->seg_v
) *
889 ((i
+ 1) % XILINX_DMA_NUM_DESCS
));
890 chan
->seg_v
[i
].hw
.next_desc_msb
=
891 upper_32_bits(chan
->seg_p
+ sizeof(*chan
->seg_v
) *
892 ((i
+ 1) % XILINX_DMA_NUM_DESCS
));
893 chan
->seg_v
[i
].phys
= chan
->seg_p
+
894 sizeof(*chan
->seg_v
) * i
;
895 list_add_tail(&chan
->seg_v
[i
].node
,
896 &chan
->free_seg_list
);
898 } else if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_CDMA
) {
899 chan
->desc_pool
= dma_pool_create("xilinx_cdma_desc_pool",
901 sizeof(struct xilinx_cdma_tx_segment
),
902 __alignof__(struct xilinx_cdma_tx_segment
),
905 chan
->desc_pool
= dma_pool_create("xilinx_vdma_desc_pool",
907 sizeof(struct xilinx_vdma_tx_segment
),
908 __alignof__(struct xilinx_vdma_tx_segment
),
912 if (!chan
->desc_pool
&&
913 (chan
->xdev
->dma_config
->dmatype
!= XDMA_TYPE_AXIDMA
)) {
915 "unable to allocate channel %d descriptor pool\n",
920 if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_AXIDMA
) {
922 * For cyclic DMA mode we need to program the tail Descriptor
923 * register with a value which is not a part of the BD chain
924 * so allocating a desc segment during channel allocation for
925 * programming tail descriptor.
927 chan
->cyclic_seg_v
= dma_zalloc_coherent(chan
->dev
,
928 sizeof(*chan
->cyclic_seg_v
),
929 &chan
->cyclic_seg_p
, GFP_KERNEL
);
930 if (!chan
->cyclic_seg_v
) {
932 "unable to allocate desc segment for cyclic DMA\n");
935 chan
->cyclic_seg_v
->phys
= chan
->cyclic_seg_p
;
938 dma_cookie_init(dchan
);
940 if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_AXIDMA
) {
941 /* For AXI DMA resetting once channel will reset the
942 * other channel as well so enable the interrupts here.
944 dma_ctrl_set(chan
, XILINX_DMA_REG_DMACR
,
945 XILINX_DMA_DMAXR_ALL_IRQ_MASK
);
948 if ((chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_CDMA
) && chan
->has_sg
)
949 dma_ctrl_set(chan
, XILINX_DMA_REG_DMACR
,
950 XILINX_CDMA_CR_SGMODE
);
956 * xilinx_dma_tx_status - Get DMA transaction status
957 * @dchan: DMA channel
958 * @cookie: Transaction identifier
959 * @txstate: Transaction state
961 * Return: DMA transaction status
963 static enum dma_status
xilinx_dma_tx_status(struct dma_chan
*dchan
,
965 struct dma_tx_state
*txstate
)
967 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
968 struct xilinx_dma_tx_descriptor
*desc
;
969 struct xilinx_axidma_tx_segment
*segment
;
970 struct xilinx_axidma_desc_hw
*hw
;
975 ret
= dma_cookie_status(dchan
, cookie
, txstate
);
976 if (ret
== DMA_COMPLETE
|| !txstate
)
979 if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_AXIDMA
) {
980 spin_lock_irqsave(&chan
->lock
, flags
);
982 desc
= list_last_entry(&chan
->active_list
,
983 struct xilinx_dma_tx_descriptor
, node
);
985 list_for_each_entry(segment
, &desc
->segments
, node
) {
987 residue
+= (hw
->control
- hw
->status
) &
988 XILINX_DMA_MAX_TRANS_LEN
;
991 spin_unlock_irqrestore(&chan
->lock
, flags
);
993 chan
->residue
= residue
;
994 dma_set_residue(txstate
, chan
->residue
);
1001 * xilinx_dma_stop_transfer - Halt DMA channel
1002 * @chan: Driver specific DMA channel
1004 * Return: '0' on success and failure value on error
1006 static int xilinx_dma_stop_transfer(struct xilinx_dma_chan
*chan
)
1010 dma_ctrl_clr(chan
, XILINX_DMA_REG_DMACR
, XILINX_DMA_DMACR_RUNSTOP
);
1012 /* Wait for the hardware to halt */
1013 return xilinx_dma_poll_timeout(chan
, XILINX_DMA_REG_DMASR
, val
,
1014 val
& XILINX_DMA_DMASR_HALTED
, 0,
1015 XILINX_DMA_LOOP_COUNT
);
1019 * xilinx_cdma_stop_transfer - Wait for the current transfer to complete
1020 * @chan: Driver specific DMA channel
1022 * Return: '0' on success and failure value on error
1024 static int xilinx_cdma_stop_transfer(struct xilinx_dma_chan
*chan
)
1028 return xilinx_dma_poll_timeout(chan
, XILINX_DMA_REG_DMASR
, val
,
1029 val
& XILINX_DMA_DMASR_IDLE
, 0,
1030 XILINX_DMA_LOOP_COUNT
);
1034 * xilinx_dma_start - Start DMA channel
1035 * @chan: Driver specific DMA channel
1037 static void xilinx_dma_start(struct xilinx_dma_chan
*chan
)
1042 dma_ctrl_set(chan
, XILINX_DMA_REG_DMACR
, XILINX_DMA_DMACR_RUNSTOP
);
1044 /* Wait for the hardware to start */
1045 err
= xilinx_dma_poll_timeout(chan
, XILINX_DMA_REG_DMASR
, val
,
1046 !(val
& XILINX_DMA_DMASR_HALTED
), 0,
1047 XILINX_DMA_LOOP_COUNT
);
1050 dev_err(chan
->dev
, "Cannot start channel %p: %x\n",
1051 chan
, dma_ctrl_read(chan
, XILINX_DMA_REG_DMASR
));
1058 * xilinx_vdma_start_transfer - Starts VDMA transfer
1059 * @chan: Driver specific channel struct pointer
1061 static void xilinx_vdma_start_transfer(struct xilinx_dma_chan
*chan
)
1063 struct xilinx_vdma_config
*config
= &chan
->config
;
1064 struct xilinx_dma_tx_descriptor
*desc
, *tail_desc
;
1066 struct xilinx_vdma_tx_segment
*tail_segment
;
1068 /* This function was invoked with lock held */
1075 if (list_empty(&chan
->pending_list
))
1078 desc
= list_first_entry(&chan
->pending_list
,
1079 struct xilinx_dma_tx_descriptor
, node
);
1080 tail_desc
= list_last_entry(&chan
->pending_list
,
1081 struct xilinx_dma_tx_descriptor
, node
);
1083 tail_segment
= list_last_entry(&tail_desc
->segments
,
1084 struct xilinx_vdma_tx_segment
, node
);
1087 * If hardware is idle, then all descriptors on the running lists are
1088 * done, start new transfers
1091 dma_ctrl_write(chan
, XILINX_DMA_REG_CURDESC
,
1092 desc
->async_tx
.phys
);
1094 /* Configure the hardware using info in the config structure */
1095 reg
= dma_ctrl_read(chan
, XILINX_DMA_REG_DMACR
);
1097 if (config
->frm_cnt_en
)
1098 reg
|= XILINX_DMA_DMACR_FRAMECNT_EN
;
1100 reg
&= ~XILINX_DMA_DMACR_FRAMECNT_EN
;
1103 * With SG, start with circular mode, so that BDs can be fetched.
1104 * In direct register mode, if not parking, enable circular mode
1106 if (chan
->has_sg
|| !config
->park
)
1107 reg
|= XILINX_DMA_DMACR_CIRC_EN
;
1110 reg
&= ~XILINX_DMA_DMACR_CIRC_EN
;
1112 dma_ctrl_write(chan
, XILINX_DMA_REG_DMACR
, reg
);
1114 j
= chan
->desc_submitcount
;
1115 reg
= dma_read(chan
, XILINX_DMA_REG_PARK_PTR
);
1116 if (chan
->direction
== DMA_MEM_TO_DEV
) {
1117 reg
&= ~XILINX_DMA_PARK_PTR_RD_REF_MASK
;
1118 reg
|= j
<< XILINX_DMA_PARK_PTR_RD_REF_SHIFT
;
1120 reg
&= ~XILINX_DMA_PARK_PTR_WR_REF_MASK
;
1121 reg
|= j
<< XILINX_DMA_PARK_PTR_WR_REF_SHIFT
;
1123 dma_write(chan
, XILINX_DMA_REG_PARK_PTR
, reg
);
1125 /* Start the hardware */
1126 xilinx_dma_start(chan
);
1131 /* Start the transfer */
1133 dma_ctrl_write(chan
, XILINX_DMA_REG_TAILDESC
,
1134 tail_segment
->phys
);
1135 list_splice_tail_init(&chan
->pending_list
, &chan
->active_list
);
1136 chan
->desc_pendingcount
= 0;
1138 struct xilinx_vdma_tx_segment
*segment
, *last
= NULL
;
1141 if (chan
->desc_submitcount
< chan
->num_frms
)
1142 i
= chan
->desc_submitcount
;
1144 list_for_each_entry(segment
, &desc
->segments
, node
) {
1146 vdma_desc_write_64(chan
,
1147 XILINX_VDMA_REG_START_ADDRESS_64(i
++),
1148 segment
->hw
.buf_addr
,
1149 segment
->hw
.buf_addr_msb
);
1151 vdma_desc_write(chan
,
1152 XILINX_VDMA_REG_START_ADDRESS(i
++),
1153 segment
->hw
.buf_addr
);
1161 /* HW expects these parameters to be same for one transaction */
1162 vdma_desc_write(chan
, XILINX_DMA_REG_HSIZE
, last
->hw
.hsize
);
1163 vdma_desc_write(chan
, XILINX_DMA_REG_FRMDLY_STRIDE
,
1165 vdma_desc_write(chan
, XILINX_DMA_REG_VSIZE
, last
->hw
.vsize
);
1167 chan
->desc_submitcount
++;
1168 chan
->desc_pendingcount
--;
1169 list_del(&desc
->node
);
1170 list_add_tail(&desc
->node
, &chan
->active_list
);
1171 if (chan
->desc_submitcount
== chan
->num_frms
)
1172 chan
->desc_submitcount
= 0;
1179 * xilinx_cdma_start_transfer - Starts cdma transfer
1180 * @chan: Driver specific channel struct pointer
1182 static void xilinx_cdma_start_transfer(struct xilinx_dma_chan
*chan
)
1184 struct xilinx_dma_tx_descriptor
*head_desc
, *tail_desc
;
1185 struct xilinx_cdma_tx_segment
*tail_segment
;
1186 u32 ctrl_reg
= dma_read(chan
, XILINX_DMA_REG_DMACR
);
1194 if (list_empty(&chan
->pending_list
))
1197 head_desc
= list_first_entry(&chan
->pending_list
,
1198 struct xilinx_dma_tx_descriptor
, node
);
1199 tail_desc
= list_last_entry(&chan
->pending_list
,
1200 struct xilinx_dma_tx_descriptor
, node
);
1201 tail_segment
= list_last_entry(&tail_desc
->segments
,
1202 struct xilinx_cdma_tx_segment
, node
);
1204 if (chan
->desc_pendingcount
<= XILINX_DMA_COALESCE_MAX
) {
1205 ctrl_reg
&= ~XILINX_DMA_CR_COALESCE_MAX
;
1206 ctrl_reg
|= chan
->desc_pendingcount
<<
1207 XILINX_DMA_CR_COALESCE_SHIFT
;
1208 dma_ctrl_write(chan
, XILINX_DMA_REG_DMACR
, ctrl_reg
);
1212 dma_ctrl_clr(chan
, XILINX_DMA_REG_DMACR
,
1213 XILINX_CDMA_CR_SGMODE
);
1215 dma_ctrl_set(chan
, XILINX_DMA_REG_DMACR
,
1216 XILINX_CDMA_CR_SGMODE
);
1218 xilinx_write(chan
, XILINX_DMA_REG_CURDESC
,
1219 head_desc
->async_tx
.phys
);
1221 /* Update tail ptr register which will start the transfer */
1222 xilinx_write(chan
, XILINX_DMA_REG_TAILDESC
,
1223 tail_segment
->phys
);
1225 /* In simple mode */
1226 struct xilinx_cdma_tx_segment
*segment
;
1227 struct xilinx_cdma_desc_hw
*hw
;
1229 segment
= list_first_entry(&head_desc
->segments
,
1230 struct xilinx_cdma_tx_segment
,
1235 xilinx_write(chan
, XILINX_CDMA_REG_SRCADDR
, hw
->src_addr
);
1236 xilinx_write(chan
, XILINX_CDMA_REG_DSTADDR
, hw
->dest_addr
);
1238 /* Start the transfer */
1239 dma_ctrl_write(chan
, XILINX_DMA_REG_BTT
,
1240 hw
->control
& XILINX_DMA_MAX_TRANS_LEN
);
1243 list_splice_tail_init(&chan
->pending_list
, &chan
->active_list
);
1244 chan
->desc_pendingcount
= 0;
1249 * xilinx_dma_start_transfer - Starts DMA transfer
1250 * @chan: Driver specific channel struct pointer
1252 static void xilinx_dma_start_transfer(struct xilinx_dma_chan
*chan
)
1254 struct xilinx_dma_tx_descriptor
*head_desc
, *tail_desc
;
1255 struct xilinx_axidma_tx_segment
*tail_segment
;
1261 if (list_empty(&chan
->pending_list
))
1267 head_desc
= list_first_entry(&chan
->pending_list
,
1268 struct xilinx_dma_tx_descriptor
, node
);
1269 tail_desc
= list_last_entry(&chan
->pending_list
,
1270 struct xilinx_dma_tx_descriptor
, node
);
1271 tail_segment
= list_last_entry(&tail_desc
->segments
,
1272 struct xilinx_axidma_tx_segment
, node
);
1274 reg
= dma_ctrl_read(chan
, XILINX_DMA_REG_DMACR
);
1276 if (chan
->desc_pendingcount
<= XILINX_DMA_COALESCE_MAX
) {
1277 reg
&= ~XILINX_DMA_CR_COALESCE_MAX
;
1278 reg
|= chan
->desc_pendingcount
<<
1279 XILINX_DMA_CR_COALESCE_SHIFT
;
1280 dma_ctrl_write(chan
, XILINX_DMA_REG_DMACR
, reg
);
1283 if (chan
->has_sg
&& !chan
->xdev
->mcdma
)
1284 xilinx_write(chan
, XILINX_DMA_REG_CURDESC
,
1285 head_desc
->async_tx
.phys
);
1287 if (chan
->has_sg
&& chan
->xdev
->mcdma
) {
1288 if (chan
->direction
== DMA_MEM_TO_DEV
) {
1289 dma_ctrl_write(chan
, XILINX_DMA_REG_CURDESC
,
1290 head_desc
->async_tx
.phys
);
1293 dma_ctrl_write(chan
, XILINX_DMA_REG_CURDESC
,
1294 head_desc
->async_tx
.phys
);
1296 dma_ctrl_write(chan
,
1297 XILINX_DMA_MCRX_CDESC(chan
->tdest
),
1298 head_desc
->async_tx
.phys
);
1303 xilinx_dma_start(chan
);
1308 /* Start the transfer */
1309 if (chan
->has_sg
&& !chan
->xdev
->mcdma
) {
1311 xilinx_write(chan
, XILINX_DMA_REG_TAILDESC
,
1312 chan
->cyclic_seg_v
->phys
);
1314 xilinx_write(chan
, XILINX_DMA_REG_TAILDESC
,
1315 tail_segment
->phys
);
1316 } else if (chan
->has_sg
&& chan
->xdev
->mcdma
) {
1317 if (chan
->direction
== DMA_MEM_TO_DEV
) {
1318 dma_ctrl_write(chan
, XILINX_DMA_REG_TAILDESC
,
1319 tail_segment
->phys
);
1322 dma_ctrl_write(chan
, XILINX_DMA_REG_TAILDESC
,
1323 tail_segment
->phys
);
1325 dma_ctrl_write(chan
,
1326 XILINX_DMA_MCRX_TDESC(chan
->tdest
),
1327 tail_segment
->phys
);
1331 struct xilinx_axidma_tx_segment
*segment
;
1332 struct xilinx_axidma_desc_hw
*hw
;
1334 segment
= list_first_entry(&head_desc
->segments
,
1335 struct xilinx_axidma_tx_segment
,
1339 xilinx_write(chan
, XILINX_DMA_REG_SRCDSTADDR
, hw
->buf_addr
);
1341 /* Start the transfer */
1342 dma_ctrl_write(chan
, XILINX_DMA_REG_BTT
,
1343 hw
->control
& XILINX_DMA_MAX_TRANS_LEN
);
1346 list_splice_tail_init(&chan
->pending_list
, &chan
->active_list
);
1347 chan
->desc_pendingcount
= 0;
1352 * xilinx_dma_issue_pending - Issue pending transactions
1353 * @dchan: DMA channel
1355 static void xilinx_dma_issue_pending(struct dma_chan
*dchan
)
1357 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
1358 unsigned long flags
;
1360 spin_lock_irqsave(&chan
->lock
, flags
);
1361 chan
->start_transfer(chan
);
1362 spin_unlock_irqrestore(&chan
->lock
, flags
);
1366 * xilinx_dma_complete_descriptor - Mark the active descriptor as complete
1367 * @chan : xilinx DMA channel
1371 static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan
*chan
)
1373 struct xilinx_dma_tx_descriptor
*desc
, *next
;
1375 /* This function was invoked with lock held */
1376 if (list_empty(&chan
->active_list
))
1379 list_for_each_entry_safe(desc
, next
, &chan
->active_list
, node
) {
1380 list_del(&desc
->node
);
1382 dma_cookie_complete(&desc
->async_tx
);
1383 list_add_tail(&desc
->node
, &chan
->done_list
);
1388 * xilinx_dma_reset - Reset DMA channel
1389 * @chan: Driver specific DMA channel
1391 * Return: '0' on success and failure value on error
1393 static int xilinx_dma_reset(struct xilinx_dma_chan
*chan
)
1398 dma_ctrl_set(chan
, XILINX_DMA_REG_DMACR
, XILINX_DMA_DMACR_RESET
);
1400 /* Wait for the hardware to finish reset */
1401 err
= xilinx_dma_poll_timeout(chan
, XILINX_DMA_REG_DMACR
, tmp
,
1402 !(tmp
& XILINX_DMA_DMACR_RESET
), 0,
1403 XILINX_DMA_LOOP_COUNT
);
1406 dev_err(chan
->dev
, "reset timeout, cr %x, sr %x\n",
1407 dma_ctrl_read(chan
, XILINX_DMA_REG_DMACR
),
1408 dma_ctrl_read(chan
, XILINX_DMA_REG_DMASR
));
1414 chan
->desc_submitcount
= 0;
1420 * xilinx_dma_chan_reset - Reset DMA channel and enable interrupts
1421 * @chan: Driver specific DMA channel
1423 * Return: '0' on success and failure value on error
1425 static int xilinx_dma_chan_reset(struct xilinx_dma_chan
*chan
)
1430 err
= xilinx_dma_reset(chan
);
1434 /* Enable interrupts */
1435 dma_ctrl_set(chan
, XILINX_DMA_REG_DMACR
,
1436 XILINX_DMA_DMAXR_ALL_IRQ_MASK
);
1442 * xilinx_dma_irq_handler - DMA Interrupt handler
1444 * @data: Pointer to the Xilinx DMA channel structure
1446 * Return: IRQ_HANDLED/IRQ_NONE
1448 static irqreturn_t
xilinx_dma_irq_handler(int irq
, void *data
)
1450 struct xilinx_dma_chan
*chan
= data
;
1453 /* Read the status and ack the interrupts. */
1454 status
= dma_ctrl_read(chan
, XILINX_DMA_REG_DMASR
);
1455 if (!(status
& XILINX_DMA_DMAXR_ALL_IRQ_MASK
))
1458 dma_ctrl_write(chan
, XILINX_DMA_REG_DMASR
,
1459 status
& XILINX_DMA_DMAXR_ALL_IRQ_MASK
);
1461 if (status
& XILINX_DMA_DMASR_ERR_IRQ
) {
1463 * An error occurred. If C_FLUSH_ON_FSYNC is enabled and the
1464 * error is recoverable, ignore it. Otherwise flag the error.
1466 * Only recoverable errors can be cleared in the DMASR register,
1467 * make sure not to write to other error bits to 1.
1469 u32 errors
= status
& XILINX_DMA_DMASR_ALL_ERR_MASK
;
1471 dma_ctrl_write(chan
, XILINX_DMA_REG_DMASR
,
1472 errors
& XILINX_DMA_DMASR_ERR_RECOVER_MASK
);
1474 if (!chan
->flush_on_fsync
||
1475 (errors
& ~XILINX_DMA_DMASR_ERR_RECOVER_MASK
)) {
1477 "Channel %p has errors %x, cdr %x tdr %x\n",
1479 dma_ctrl_read(chan
, XILINX_DMA_REG_CURDESC
),
1480 dma_ctrl_read(chan
, XILINX_DMA_REG_TAILDESC
));
1485 if (status
& XILINX_DMA_DMASR_DLY_CNT_IRQ
) {
1487 * Device takes too long to do the transfer when user requires
1490 dev_dbg(chan
->dev
, "Inter-packet latency too long\n");
1493 if (status
& XILINX_DMA_DMASR_FRM_CNT_IRQ
) {
1494 spin_lock(&chan
->lock
);
1495 xilinx_dma_complete_descriptor(chan
);
1497 chan
->start_transfer(chan
);
1498 spin_unlock(&chan
->lock
);
1501 tasklet_schedule(&chan
->tasklet
);
1506 * append_desc_queue - Queuing descriptor
1507 * @chan: Driver specific dma channel
1508 * @desc: dma transaction descriptor
1510 static void append_desc_queue(struct xilinx_dma_chan
*chan
,
1511 struct xilinx_dma_tx_descriptor
*desc
)
1513 struct xilinx_vdma_tx_segment
*tail_segment
;
1514 struct xilinx_dma_tx_descriptor
*tail_desc
;
1515 struct xilinx_axidma_tx_segment
*axidma_tail_segment
;
1516 struct xilinx_cdma_tx_segment
*cdma_tail_segment
;
1518 if (list_empty(&chan
->pending_list
))
1522 * Add the hardware descriptor to the chain of hardware descriptors
1523 * that already exists in memory.
1525 tail_desc
= list_last_entry(&chan
->pending_list
,
1526 struct xilinx_dma_tx_descriptor
, node
);
1527 if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_VDMA
) {
1528 tail_segment
= list_last_entry(&tail_desc
->segments
,
1529 struct xilinx_vdma_tx_segment
,
1531 tail_segment
->hw
.next_desc
= (u32
)desc
->async_tx
.phys
;
1532 } else if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_CDMA
) {
1533 cdma_tail_segment
= list_last_entry(&tail_desc
->segments
,
1534 struct xilinx_cdma_tx_segment
,
1536 cdma_tail_segment
->hw
.next_desc
= (u32
)desc
->async_tx
.phys
;
1538 axidma_tail_segment
= list_last_entry(&tail_desc
->segments
,
1539 struct xilinx_axidma_tx_segment
,
1541 axidma_tail_segment
->hw
.next_desc
= (u32
)desc
->async_tx
.phys
;
1545 * Add the software descriptor and all children to the list
1546 * of pending transactions
1549 list_add_tail(&desc
->node
, &chan
->pending_list
);
1550 chan
->desc_pendingcount
++;
1552 if (chan
->has_sg
&& (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_VDMA
)
1553 && unlikely(chan
->desc_pendingcount
> chan
->num_frms
)) {
1554 dev_dbg(chan
->dev
, "desc pendingcount is too high\n");
1555 chan
->desc_pendingcount
= chan
->num_frms
;
1560 * xilinx_dma_tx_submit - Submit DMA transaction
1561 * @tx: Async transaction descriptor
1563 * Return: cookie value on success and failure value on error
1565 static dma_cookie_t
xilinx_dma_tx_submit(struct dma_async_tx_descriptor
*tx
)
1567 struct xilinx_dma_tx_descriptor
*desc
= to_dma_tx_descriptor(tx
);
1568 struct xilinx_dma_chan
*chan
= to_xilinx_chan(tx
->chan
);
1569 dma_cookie_t cookie
;
1570 unsigned long flags
;
1574 xilinx_dma_free_tx_descriptor(chan
, desc
);
1580 * If reset fails, need to hard reset the system.
1581 * Channel is no longer functional
1583 err
= xilinx_dma_chan_reset(chan
);
1588 spin_lock_irqsave(&chan
->lock
, flags
);
1590 cookie
= dma_cookie_assign(tx
);
1592 /* Put this transaction onto the tail of the pending queue */
1593 append_desc_queue(chan
, desc
);
1596 chan
->cyclic
= true;
1598 spin_unlock_irqrestore(&chan
->lock
, flags
);
1604 * xilinx_vdma_dma_prep_interleaved - prepare a descriptor for a
1605 * DMA_SLAVE transaction
1606 * @dchan: DMA channel
1607 * @xt: Interleaved template pointer
1608 * @flags: transfer ack flags
1610 * Return: Async transaction descriptor on success and NULL on failure
1612 static struct dma_async_tx_descriptor
*
1613 xilinx_vdma_dma_prep_interleaved(struct dma_chan
*dchan
,
1614 struct dma_interleaved_template
*xt
,
1615 unsigned long flags
)
1617 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
1618 struct xilinx_dma_tx_descriptor
*desc
;
1619 struct xilinx_vdma_tx_segment
*segment
;
1620 struct xilinx_vdma_desc_hw
*hw
;
1622 if (!is_slave_direction(xt
->dir
))
1625 if (!xt
->numf
|| !xt
->sgl
[0].size
)
1628 if (xt
->frame_size
!= 1)
1631 /* Allocate a transaction descriptor. */
1632 desc
= xilinx_dma_alloc_tx_descriptor(chan
);
1636 dma_async_tx_descriptor_init(&desc
->async_tx
, &chan
->common
);
1637 desc
->async_tx
.tx_submit
= xilinx_dma_tx_submit
;
1638 async_tx_ack(&desc
->async_tx
);
1640 /* Allocate the link descriptor from DMA pool */
1641 segment
= xilinx_vdma_alloc_tx_segment(chan
);
1645 /* Fill in the hardware descriptor */
1647 hw
->vsize
= xt
->numf
;
1648 hw
->hsize
= xt
->sgl
[0].size
;
1649 hw
->stride
= (xt
->sgl
[0].icg
+ xt
->sgl
[0].size
) <<
1650 XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT
;
1651 hw
->stride
|= chan
->config
.frm_dly
<<
1652 XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT
;
1654 if (xt
->dir
!= DMA_MEM_TO_DEV
) {
1655 if (chan
->ext_addr
) {
1656 hw
->buf_addr
= lower_32_bits(xt
->dst_start
);
1657 hw
->buf_addr_msb
= upper_32_bits(xt
->dst_start
);
1659 hw
->buf_addr
= xt
->dst_start
;
1662 if (chan
->ext_addr
) {
1663 hw
->buf_addr
= lower_32_bits(xt
->src_start
);
1664 hw
->buf_addr_msb
= upper_32_bits(xt
->src_start
);
1666 hw
->buf_addr
= xt
->src_start
;
1670 /* Insert the segment into the descriptor segments list. */
1671 list_add_tail(&segment
->node
, &desc
->segments
);
1673 /* Link the last hardware descriptor with the first. */
1674 segment
= list_first_entry(&desc
->segments
,
1675 struct xilinx_vdma_tx_segment
, node
);
1676 desc
->async_tx
.phys
= segment
->phys
;
1678 return &desc
->async_tx
;
1681 xilinx_dma_free_tx_descriptor(chan
, desc
);
1686 * xilinx_cdma_prep_memcpy - prepare descriptors for a memcpy transaction
1687 * @dchan: DMA channel
1688 * @dma_dst: destination address
1689 * @dma_src: source address
1690 * @len: transfer length
1691 * @flags: transfer ack flags
1693 * Return: Async transaction descriptor on success and NULL on failure
1695 static struct dma_async_tx_descriptor
*
1696 xilinx_cdma_prep_memcpy(struct dma_chan
*dchan
, dma_addr_t dma_dst
,
1697 dma_addr_t dma_src
, size_t len
, unsigned long flags
)
1699 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
1700 struct xilinx_dma_tx_descriptor
*desc
;
1701 struct xilinx_cdma_tx_segment
*segment
;
1702 struct xilinx_cdma_desc_hw
*hw
;
1704 if (!len
|| len
> XILINX_DMA_MAX_TRANS_LEN
)
1707 desc
= xilinx_dma_alloc_tx_descriptor(chan
);
1711 dma_async_tx_descriptor_init(&desc
->async_tx
, &chan
->common
);
1712 desc
->async_tx
.tx_submit
= xilinx_dma_tx_submit
;
1714 /* Allocate the link descriptor from DMA pool */
1715 segment
= xilinx_cdma_alloc_tx_segment(chan
);
1721 hw
->src_addr
= dma_src
;
1722 hw
->dest_addr
= dma_dst
;
1723 if (chan
->ext_addr
) {
1724 hw
->src_addr_msb
= upper_32_bits(dma_src
);
1725 hw
->dest_addr_msb
= upper_32_bits(dma_dst
);
1728 /* Insert the segment into the descriptor segments list. */
1729 list_add_tail(&segment
->node
, &desc
->segments
);
1731 desc
->async_tx
.phys
= segment
->phys
;
1732 hw
->next_desc
= segment
->phys
;
1734 return &desc
->async_tx
;
1737 xilinx_dma_free_tx_descriptor(chan
, desc
);
1742 * xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
1743 * @dchan: DMA channel
1744 * @sgl: scatterlist to transfer to/from
1745 * @sg_len: number of entries in @scatterlist
1746 * @direction: DMA direction
1747 * @flags: transfer ack flags
1748 * @context: APP words of the descriptor
1750 * Return: Async transaction descriptor on success and NULL on failure
1752 static struct dma_async_tx_descriptor
*xilinx_dma_prep_slave_sg(
1753 struct dma_chan
*dchan
, struct scatterlist
*sgl
, unsigned int sg_len
,
1754 enum dma_transfer_direction direction
, unsigned long flags
,
1757 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
1758 struct xilinx_dma_tx_descriptor
*desc
;
1759 struct xilinx_axidma_tx_segment
*segment
= NULL
;
1760 u32
*app_w
= (u32
*)context
;
1761 struct scatterlist
*sg
;
1766 if (!is_slave_direction(direction
))
1769 /* Allocate a transaction descriptor. */
1770 desc
= xilinx_dma_alloc_tx_descriptor(chan
);
1774 dma_async_tx_descriptor_init(&desc
->async_tx
, &chan
->common
);
1775 desc
->async_tx
.tx_submit
= xilinx_dma_tx_submit
;
1777 /* Build transactions using information in the scatter gather list */
1778 for_each_sg(sgl
, sg
, sg_len
, i
) {
1781 /* Loop until the entire scatterlist entry is used */
1782 while (sg_used
< sg_dma_len(sg
)) {
1783 struct xilinx_axidma_desc_hw
*hw
;
1785 /* Get a free segment */
1786 segment
= xilinx_axidma_alloc_tx_segment(chan
);
1791 * Calculate the maximum number of bytes to transfer,
1792 * making sure it is less than the hw limit
1794 copy
= min_t(size_t, sg_dma_len(sg
) - sg_used
,
1795 XILINX_DMA_MAX_TRANS_LEN
);
1798 /* Fill in the descriptor */
1799 xilinx_axidma_buf(chan
, hw
, sg_dma_address(sg
),
1804 if (chan
->direction
== DMA_MEM_TO_DEV
) {
1806 memcpy(hw
->app
, app_w
, sizeof(u32
) *
1807 XILINX_DMA_NUM_APP_WORDS
);
1813 * Insert the segment into the descriptor segments
1816 list_add_tail(&segment
->node
, &desc
->segments
);
1820 segment
= list_first_entry(&desc
->segments
,
1821 struct xilinx_axidma_tx_segment
, node
);
1822 desc
->async_tx
.phys
= segment
->phys
;
1824 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
1825 if (chan
->direction
== DMA_MEM_TO_DEV
) {
1826 segment
->hw
.control
|= XILINX_DMA_BD_SOP
;
1827 segment
= list_last_entry(&desc
->segments
,
1828 struct xilinx_axidma_tx_segment
,
1830 segment
->hw
.control
|= XILINX_DMA_BD_EOP
;
1833 return &desc
->async_tx
;
1836 xilinx_dma_free_tx_descriptor(chan
, desc
);
1841 * xilinx_dma_prep_dma_cyclic - prepare descriptors for a DMA_SLAVE transaction
1842 * @dchan: DMA channel
1843 * @buf_addr: Physical address of the buffer
1844 * @buf_len: Total length of the cyclic buffers
1845 * @period_len: length of individual cyclic buffer
1846 * @direction: DMA direction
1847 * @flags: transfer ack flags
1849 * Return: Async transaction descriptor on success and NULL on failure
1851 static struct dma_async_tx_descriptor
*xilinx_dma_prep_dma_cyclic(
1852 struct dma_chan
*dchan
, dma_addr_t buf_addr
, size_t buf_len
,
1853 size_t period_len
, enum dma_transfer_direction direction
,
1854 unsigned long flags
)
1856 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
1857 struct xilinx_dma_tx_descriptor
*desc
;
1858 struct xilinx_axidma_tx_segment
*segment
, *head_segment
, *prev
= NULL
;
1859 size_t copy
, sg_used
;
1860 unsigned int num_periods
;
1867 num_periods
= buf_len
/ period_len
;
1872 if (!is_slave_direction(direction
))
1875 /* Allocate a transaction descriptor. */
1876 desc
= xilinx_dma_alloc_tx_descriptor(chan
);
1880 chan
->direction
= direction
;
1881 dma_async_tx_descriptor_init(&desc
->async_tx
, &chan
->common
);
1882 desc
->async_tx
.tx_submit
= xilinx_dma_tx_submit
;
1884 for (i
= 0; i
< num_periods
; ++i
) {
1887 while (sg_used
< period_len
) {
1888 struct xilinx_axidma_desc_hw
*hw
;
1890 /* Get a free segment */
1891 segment
= xilinx_axidma_alloc_tx_segment(chan
);
1896 * Calculate the maximum number of bytes to transfer,
1897 * making sure it is less than the hw limit
1899 copy
= min_t(size_t, period_len
- sg_used
,
1900 XILINX_DMA_MAX_TRANS_LEN
);
1902 xilinx_axidma_buf(chan
, hw
, buf_addr
, sg_used
,
1907 prev
->hw
.next_desc
= segment
->phys
;
1913 * Insert the segment into the descriptor segments
1916 list_add_tail(&segment
->node
, &desc
->segments
);
1920 head_segment
= list_first_entry(&desc
->segments
,
1921 struct xilinx_axidma_tx_segment
, node
);
1922 desc
->async_tx
.phys
= head_segment
->phys
;
1924 desc
->cyclic
= true;
1925 reg
= dma_ctrl_read(chan
, XILINX_DMA_REG_DMACR
);
1926 reg
|= XILINX_DMA_CR_CYCLIC_BD_EN_MASK
;
1927 dma_ctrl_write(chan
, XILINX_DMA_REG_DMACR
, reg
);
1929 segment
= list_last_entry(&desc
->segments
,
1930 struct xilinx_axidma_tx_segment
,
1932 segment
->hw
.next_desc
= (u32
) head_segment
->phys
;
1934 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
1935 if (direction
== DMA_MEM_TO_DEV
) {
1936 head_segment
->hw
.control
|= XILINX_DMA_BD_SOP
;
1937 segment
->hw
.control
|= XILINX_DMA_BD_EOP
;
1940 return &desc
->async_tx
;
1943 xilinx_dma_free_tx_descriptor(chan
, desc
);
1948 * xilinx_dma_prep_interleaved - prepare a descriptor for a
1949 * DMA_SLAVE transaction
1950 * @dchan: DMA channel
1951 * @xt: Interleaved template pointer
1952 * @flags: transfer ack flags
1954 * Return: Async transaction descriptor on success and NULL on failure
1956 static struct dma_async_tx_descriptor
*
1957 xilinx_dma_prep_interleaved(struct dma_chan
*dchan
,
1958 struct dma_interleaved_template
*xt
,
1959 unsigned long flags
)
1961 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
1962 struct xilinx_dma_tx_descriptor
*desc
;
1963 struct xilinx_axidma_tx_segment
*segment
;
1964 struct xilinx_axidma_desc_hw
*hw
;
1966 if (!is_slave_direction(xt
->dir
))
1969 if (!xt
->numf
|| !xt
->sgl
[0].size
)
1972 if (xt
->frame_size
!= 1)
1975 /* Allocate a transaction descriptor. */
1976 desc
= xilinx_dma_alloc_tx_descriptor(chan
);
1980 chan
->direction
= xt
->dir
;
1981 dma_async_tx_descriptor_init(&desc
->async_tx
, &chan
->common
);
1982 desc
->async_tx
.tx_submit
= xilinx_dma_tx_submit
;
1984 /* Get a free segment */
1985 segment
= xilinx_axidma_alloc_tx_segment(chan
);
1991 /* Fill in the descriptor */
1992 if (xt
->dir
!= DMA_MEM_TO_DEV
)
1993 hw
->buf_addr
= xt
->dst_start
;
1995 hw
->buf_addr
= xt
->src_start
;
1997 hw
->mcdma_control
= chan
->tdest
& XILINX_DMA_BD_TDEST_MASK
;
1998 hw
->vsize_stride
= (xt
->numf
<< XILINX_DMA_BD_VSIZE_SHIFT
) &
1999 XILINX_DMA_BD_VSIZE_MASK
;
2000 hw
->vsize_stride
|= (xt
->sgl
[0].icg
+ xt
->sgl
[0].size
) &
2001 XILINX_DMA_BD_STRIDE_MASK
;
2002 hw
->control
= xt
->sgl
[0].size
& XILINX_DMA_BD_HSIZE_MASK
;
2005 * Insert the segment into the descriptor segments
2008 list_add_tail(&segment
->node
, &desc
->segments
);
2011 segment
= list_first_entry(&desc
->segments
,
2012 struct xilinx_axidma_tx_segment
, node
);
2013 desc
->async_tx
.phys
= segment
->phys
;
2015 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
2016 if (xt
->dir
== DMA_MEM_TO_DEV
) {
2017 segment
->hw
.control
|= XILINX_DMA_BD_SOP
;
2018 segment
= list_last_entry(&desc
->segments
,
2019 struct xilinx_axidma_tx_segment
,
2021 segment
->hw
.control
|= XILINX_DMA_BD_EOP
;
2024 return &desc
->async_tx
;
2027 xilinx_dma_free_tx_descriptor(chan
, desc
);
2032 * xilinx_dma_terminate_all - Halt the channel and free descriptors
2033 * @dchan: Driver specific DMA Channel pointer
2035 * Return: '0' always.
2037 static int xilinx_dma_terminate_all(struct dma_chan
*dchan
)
2039 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
2044 xilinx_dma_chan_reset(chan
);
2046 err
= chan
->stop_transfer(chan
);
2048 dev_err(chan
->dev
, "Cannot stop channel %p: %x\n",
2049 chan
, dma_ctrl_read(chan
, XILINX_DMA_REG_DMASR
));
2053 /* Remove and free all of the descriptors in the lists */
2054 xilinx_dma_free_descriptors(chan
);
2058 reg
= dma_ctrl_read(chan
, XILINX_DMA_REG_DMACR
);
2059 reg
&= ~XILINX_DMA_CR_CYCLIC_BD_EN_MASK
;
2060 dma_ctrl_write(chan
, XILINX_DMA_REG_DMACR
, reg
);
2061 chan
->cyclic
= false;
2064 if ((chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_CDMA
) && chan
->has_sg
)
2065 dma_ctrl_clr(chan
, XILINX_DMA_REG_DMACR
,
2066 XILINX_CDMA_CR_SGMODE
);
2072 * xilinx_dma_channel_set_config - Configure VDMA channel
2073 * Run-time configuration for Axi VDMA, supports:
2074 * . halt the channel
2075 * . configure interrupt coalescing and inter-packet delay threshold
2076 * . start/stop parking
2079 * @dchan: DMA channel
2080 * @cfg: VDMA device configuration pointer
2082 * Return: '0' on success and failure value on error
2084 int xilinx_vdma_channel_set_config(struct dma_chan
*dchan
,
2085 struct xilinx_vdma_config
*cfg
)
2087 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
2091 return xilinx_dma_chan_reset(chan
);
2093 dmacr
= dma_ctrl_read(chan
, XILINX_DMA_REG_DMACR
);
2095 chan
->config
.frm_dly
= cfg
->frm_dly
;
2096 chan
->config
.park
= cfg
->park
;
2098 /* genlock settings */
2099 chan
->config
.gen_lock
= cfg
->gen_lock
;
2100 chan
->config
.master
= cfg
->master
;
2102 if (cfg
->gen_lock
&& chan
->genlock
) {
2103 dmacr
|= XILINX_DMA_DMACR_GENLOCK_EN
;
2104 dmacr
|= cfg
->master
<< XILINX_DMA_DMACR_MASTER_SHIFT
;
2107 chan
->config
.frm_cnt_en
= cfg
->frm_cnt_en
;
2109 chan
->config
.park_frm
= cfg
->park_frm
;
2111 chan
->config
.park_frm
= -1;
2113 chan
->config
.coalesc
= cfg
->coalesc
;
2114 chan
->config
.delay
= cfg
->delay
;
2116 if (cfg
->coalesc
<= XILINX_DMA_DMACR_FRAME_COUNT_MAX
) {
2117 dmacr
|= cfg
->coalesc
<< XILINX_DMA_DMACR_FRAME_COUNT_SHIFT
;
2118 chan
->config
.coalesc
= cfg
->coalesc
;
2121 if (cfg
->delay
<= XILINX_DMA_DMACR_DELAY_MAX
) {
2122 dmacr
|= cfg
->delay
<< XILINX_DMA_DMACR_DELAY_SHIFT
;
2123 chan
->config
.delay
= cfg
->delay
;
2126 /* FSync Source selection */
2127 dmacr
&= ~XILINX_DMA_DMACR_FSYNCSRC_MASK
;
2128 dmacr
|= cfg
->ext_fsync
<< XILINX_DMA_DMACR_FSYNCSRC_SHIFT
;
2130 dma_ctrl_write(chan
, XILINX_DMA_REG_DMACR
, dmacr
);
2134 EXPORT_SYMBOL(xilinx_vdma_channel_set_config
);
2136 /* -----------------------------------------------------------------------------
2141 * xilinx_dma_chan_remove - Per Channel remove function
2142 * @chan: Driver specific DMA channel
2144 static void xilinx_dma_chan_remove(struct xilinx_dma_chan
*chan
)
2146 /* Disable all interrupts */
2147 dma_ctrl_clr(chan
, XILINX_DMA_REG_DMACR
,
2148 XILINX_DMA_DMAXR_ALL_IRQ_MASK
);
2151 free_irq(chan
->irq
, chan
);
2153 tasklet_kill(&chan
->tasklet
);
2155 list_del(&chan
->common
.device_node
);
2158 static int axidma_clk_init(struct platform_device
*pdev
, struct clk
**axi_clk
,
2159 struct clk
**tx_clk
, struct clk
**rx_clk
,
2160 struct clk
**sg_clk
, struct clk
**tmp_clk
)
2166 *axi_clk
= devm_clk_get(&pdev
->dev
, "s_axi_lite_aclk");
2167 if (IS_ERR(*axi_clk
)) {
2168 err
= PTR_ERR(*axi_clk
);
2169 dev_err(&pdev
->dev
, "failed to get axi_aclk (%d)\n", err
);
2173 *tx_clk
= devm_clk_get(&pdev
->dev
, "m_axi_mm2s_aclk");
2174 if (IS_ERR(*tx_clk
))
2177 *rx_clk
= devm_clk_get(&pdev
->dev
, "m_axi_s2mm_aclk");
2178 if (IS_ERR(*rx_clk
))
2181 *sg_clk
= devm_clk_get(&pdev
->dev
, "m_axi_sg_aclk");
2182 if (IS_ERR(*sg_clk
))
2185 err
= clk_prepare_enable(*axi_clk
);
2187 dev_err(&pdev
->dev
, "failed to enable axi_clk (%d)\n", err
);
2191 err
= clk_prepare_enable(*tx_clk
);
2193 dev_err(&pdev
->dev
, "failed to enable tx_clk (%d)\n", err
);
2194 goto err_disable_axiclk
;
2197 err
= clk_prepare_enable(*rx_clk
);
2199 dev_err(&pdev
->dev
, "failed to enable rx_clk (%d)\n", err
);
2200 goto err_disable_txclk
;
2203 err
= clk_prepare_enable(*sg_clk
);
2205 dev_err(&pdev
->dev
, "failed to enable sg_clk (%d)\n", err
);
2206 goto err_disable_rxclk
;
2212 clk_disable_unprepare(*rx_clk
);
2214 clk_disable_unprepare(*tx_clk
);
2216 clk_disable_unprepare(*axi_clk
);
2221 static int axicdma_clk_init(struct platform_device
*pdev
, struct clk
**axi_clk
,
2222 struct clk
**dev_clk
, struct clk
**tmp_clk
,
2223 struct clk
**tmp1_clk
, struct clk
**tmp2_clk
)
2231 *axi_clk
= devm_clk_get(&pdev
->dev
, "s_axi_lite_aclk");
2232 if (IS_ERR(*axi_clk
)) {
2233 err
= PTR_ERR(*axi_clk
);
2234 dev_err(&pdev
->dev
, "failed to get axi_clk (%d)\n", err
);
2238 *dev_clk
= devm_clk_get(&pdev
->dev
, "m_axi_aclk");
2239 if (IS_ERR(*dev_clk
)) {
2240 err
= PTR_ERR(*dev_clk
);
2241 dev_err(&pdev
->dev
, "failed to get dev_clk (%d)\n", err
);
2245 err
= clk_prepare_enable(*axi_clk
);
2247 dev_err(&pdev
->dev
, "failed to enable axi_clk (%d)\n", err
);
2251 err
= clk_prepare_enable(*dev_clk
);
2253 dev_err(&pdev
->dev
, "failed to enable dev_clk (%d)\n", err
);
2254 goto err_disable_axiclk
;
2260 clk_disable_unprepare(*axi_clk
);
2265 static int axivdma_clk_init(struct platform_device
*pdev
, struct clk
**axi_clk
,
2266 struct clk
**tx_clk
, struct clk
**txs_clk
,
2267 struct clk
**rx_clk
, struct clk
**rxs_clk
)
2271 *axi_clk
= devm_clk_get(&pdev
->dev
, "s_axi_lite_aclk");
2272 if (IS_ERR(*axi_clk
)) {
2273 err
= PTR_ERR(*axi_clk
);
2274 dev_err(&pdev
->dev
, "failed to get axi_aclk (%d)\n", err
);
2278 *tx_clk
= devm_clk_get(&pdev
->dev
, "m_axi_mm2s_aclk");
2279 if (IS_ERR(*tx_clk
))
2282 *txs_clk
= devm_clk_get(&pdev
->dev
, "m_axis_mm2s_aclk");
2283 if (IS_ERR(*txs_clk
))
2286 *rx_clk
= devm_clk_get(&pdev
->dev
, "m_axi_s2mm_aclk");
2287 if (IS_ERR(*rx_clk
))
2290 *rxs_clk
= devm_clk_get(&pdev
->dev
, "s_axis_s2mm_aclk");
2291 if (IS_ERR(*rxs_clk
))
2294 err
= clk_prepare_enable(*axi_clk
);
2296 dev_err(&pdev
->dev
, "failed to enable axi_clk (%d)\n", err
);
2300 err
= clk_prepare_enable(*tx_clk
);
2302 dev_err(&pdev
->dev
, "failed to enable tx_clk (%d)\n", err
);
2303 goto err_disable_axiclk
;
2306 err
= clk_prepare_enable(*txs_clk
);
2308 dev_err(&pdev
->dev
, "failed to enable txs_clk (%d)\n", err
);
2309 goto err_disable_txclk
;
2312 err
= clk_prepare_enable(*rx_clk
);
2314 dev_err(&pdev
->dev
, "failed to enable rx_clk (%d)\n", err
);
2315 goto err_disable_txsclk
;
2318 err
= clk_prepare_enable(*rxs_clk
);
2320 dev_err(&pdev
->dev
, "failed to enable rxs_clk (%d)\n", err
);
2321 goto err_disable_rxclk
;
2327 clk_disable_unprepare(*rx_clk
);
2329 clk_disable_unprepare(*txs_clk
);
2331 clk_disable_unprepare(*tx_clk
);
2333 clk_disable_unprepare(*axi_clk
);
2338 static void xdma_disable_allclks(struct xilinx_dma_device
*xdev
)
2340 clk_disable_unprepare(xdev
->rxs_clk
);
2341 clk_disable_unprepare(xdev
->rx_clk
);
2342 clk_disable_unprepare(xdev
->txs_clk
);
2343 clk_disable_unprepare(xdev
->tx_clk
);
2344 clk_disable_unprepare(xdev
->axi_clk
);
2348 * xilinx_dma_chan_probe - Per Channel Probing
2349 * It get channel features from the device tree entry and
2350 * initialize special channel handling routines
2352 * @xdev: Driver specific device structure
2353 * @node: Device node
2354 * @chan_id: DMA Channel id
2356 * Return: '0' on success and failure value on error
2358 static int xilinx_dma_chan_probe(struct xilinx_dma_device
*xdev
,
2359 struct device_node
*node
, int chan_id
)
2361 struct xilinx_dma_chan
*chan
;
2362 bool has_dre
= false;
2366 /* Allocate and initialize the channel structure */
2367 chan
= devm_kzalloc(xdev
->dev
, sizeof(*chan
), GFP_KERNEL
);
2371 chan
->dev
= xdev
->dev
;
2373 chan
->has_sg
= xdev
->has_sg
;
2374 chan
->desc_pendingcount
= 0x0;
2375 chan
->ext_addr
= xdev
->ext_addr
;
2376 /* This variable ensures that descriptors are not
2377 * Submitted when dma engine is in progress. This variable is
2378 * Added to avoid polling for a bit in the status register to
2379 * Know dma state in the driver hot path.
2383 spin_lock_init(&chan
->lock
);
2384 INIT_LIST_HEAD(&chan
->pending_list
);
2385 INIT_LIST_HEAD(&chan
->done_list
);
2386 INIT_LIST_HEAD(&chan
->active_list
);
2387 INIT_LIST_HEAD(&chan
->free_seg_list
);
2389 /* Retrieve the channel properties from the device tree */
2390 has_dre
= of_property_read_bool(node
, "xlnx,include-dre");
2392 chan
->genlock
= of_property_read_bool(node
, "xlnx,genlock-mode");
2394 err
= of_property_read_u32(node
, "xlnx,datawidth", &value
);
2396 dev_err(xdev
->dev
, "missing xlnx,datawidth property\n");
2399 width
= value
>> 3; /* Convert bits to bytes */
2401 /* If data width is greater than 8 bytes, DRE is not in hw */
2406 xdev
->common
.copy_align
= fls(width
- 1);
2408 if (of_device_is_compatible(node
, "xlnx,axi-vdma-mm2s-channel") ||
2409 of_device_is_compatible(node
, "xlnx,axi-dma-mm2s-channel") ||
2410 of_device_is_compatible(node
, "xlnx,axi-cdma-channel")) {
2411 chan
->direction
= DMA_MEM_TO_DEV
;
2413 chan
->tdest
= chan_id
;
2415 chan
->ctrl_offset
= XILINX_DMA_MM2S_CTRL_OFFSET
;
2416 if (xdev
->dma_config
->dmatype
== XDMA_TYPE_VDMA
) {
2417 chan
->desc_offset
= XILINX_VDMA_MM2S_DESC_OFFSET
;
2418 chan
->config
.park
= 1;
2420 if (xdev
->flush_on_fsync
== XILINX_DMA_FLUSH_BOTH
||
2421 xdev
->flush_on_fsync
== XILINX_DMA_FLUSH_MM2S
)
2422 chan
->flush_on_fsync
= true;
2424 } else if (of_device_is_compatible(node
,
2425 "xlnx,axi-vdma-s2mm-channel") ||
2426 of_device_is_compatible(node
,
2427 "xlnx,axi-dma-s2mm-channel")) {
2428 chan
->direction
= DMA_DEV_TO_MEM
;
2430 chan
->tdest
= chan_id
- xdev
->nr_channels
;
2432 chan
->ctrl_offset
= XILINX_DMA_S2MM_CTRL_OFFSET
;
2433 if (xdev
->dma_config
->dmatype
== XDMA_TYPE_VDMA
) {
2434 chan
->desc_offset
= XILINX_VDMA_S2MM_DESC_OFFSET
;
2435 chan
->config
.park
= 1;
2437 if (xdev
->flush_on_fsync
== XILINX_DMA_FLUSH_BOTH
||
2438 xdev
->flush_on_fsync
== XILINX_DMA_FLUSH_S2MM
)
2439 chan
->flush_on_fsync
= true;
2442 dev_err(xdev
->dev
, "Invalid channel compatible node\n");
2446 /* Request the interrupt */
2447 chan
->irq
= irq_of_parse_and_map(node
, 0);
2448 err
= request_irq(chan
->irq
, xilinx_dma_irq_handler
, IRQF_SHARED
,
2449 "xilinx-dma-controller", chan
);
2451 dev_err(xdev
->dev
, "unable to request IRQ %d\n", chan
->irq
);
2455 if (xdev
->dma_config
->dmatype
== XDMA_TYPE_AXIDMA
) {
2456 chan
->start_transfer
= xilinx_dma_start_transfer
;
2457 chan
->stop_transfer
= xilinx_dma_stop_transfer
;
2458 } else if (xdev
->dma_config
->dmatype
== XDMA_TYPE_CDMA
) {
2459 chan
->start_transfer
= xilinx_cdma_start_transfer
;
2460 chan
->stop_transfer
= xilinx_cdma_stop_transfer
;
2462 chan
->start_transfer
= xilinx_vdma_start_transfer
;
2463 chan
->stop_transfer
= xilinx_dma_stop_transfer
;
2466 /* Initialize the tasklet */
2467 tasklet_init(&chan
->tasklet
, xilinx_dma_do_tasklet
,
2468 (unsigned long)chan
);
2471 * Initialize the DMA channel and add it to the DMA engine channels
2474 chan
->common
.device
= &xdev
->common
;
2476 list_add_tail(&chan
->common
.device_node
, &xdev
->common
.channels
);
2477 xdev
->chan
[chan
->id
] = chan
;
2479 /* Reset the channel */
2480 err
= xilinx_dma_chan_reset(chan
);
2482 dev_err(xdev
->dev
, "Reset channel failed\n");
2490 * xilinx_dma_child_probe - Per child node probe
2491 * It get number of dma-channels per child node from
2492 * device-tree and initializes all the channels.
2494 * @xdev: Driver specific device structure
2495 * @node: Device node
2499 static int xilinx_dma_child_probe(struct xilinx_dma_device
*xdev
,
2500 struct device_node
*node
)
2502 int ret
, i
, nr_channels
= 1;
2504 ret
= of_property_read_u32(node
, "dma-channels", &nr_channels
);
2505 if ((ret
< 0) && xdev
->mcdma
)
2506 dev_warn(xdev
->dev
, "missing dma-channels property\n");
2508 for (i
= 0; i
< nr_channels
; i
++)
2509 xilinx_dma_chan_probe(xdev
, node
, xdev
->chan_id
++);
2511 xdev
->nr_channels
+= nr_channels
;
2517 * of_dma_xilinx_xlate - Translation function
2518 * @dma_spec: Pointer to DMA specifier as found in the device tree
2519 * @ofdma: Pointer to DMA controller data
2521 * Return: DMA channel pointer on success and NULL on error
2523 static struct dma_chan
*of_dma_xilinx_xlate(struct of_phandle_args
*dma_spec
,
2524 struct of_dma
*ofdma
)
2526 struct xilinx_dma_device
*xdev
= ofdma
->of_dma_data
;
2527 int chan_id
= dma_spec
->args
[0];
2529 if (chan_id
>= xdev
->nr_channels
|| !xdev
->chan
[chan_id
])
2532 return dma_get_slave_channel(&xdev
->chan
[chan_id
]->common
);
2535 static const struct xilinx_dma_config axidma_config
= {
2536 .dmatype
= XDMA_TYPE_AXIDMA
,
2537 .clk_init
= axidma_clk_init
,
2540 static const struct xilinx_dma_config axicdma_config
= {
2541 .dmatype
= XDMA_TYPE_CDMA
,
2542 .clk_init
= axicdma_clk_init
,
2545 static const struct xilinx_dma_config axivdma_config
= {
2546 .dmatype
= XDMA_TYPE_VDMA
,
2547 .clk_init
= axivdma_clk_init
,
2550 static const struct of_device_id xilinx_dma_of_ids
[] = {
2551 { .compatible
= "xlnx,axi-dma-1.00.a", .data
= &axidma_config
},
2552 { .compatible
= "xlnx,axi-cdma-1.00.a", .data
= &axicdma_config
},
2553 { .compatible
= "xlnx,axi-vdma-1.00.a", .data
= &axivdma_config
},
2556 MODULE_DEVICE_TABLE(of
, xilinx_dma_of_ids
);
2559 * xilinx_dma_probe - Driver probe function
2560 * @pdev: Pointer to the platform_device structure
2562 * Return: '0' on success and failure value on error
2564 static int xilinx_dma_probe(struct platform_device
*pdev
)
2566 int (*clk_init
)(struct platform_device
*, struct clk
**, struct clk
**,
2567 struct clk
**, struct clk
**, struct clk
**)
2569 struct device_node
*node
= pdev
->dev
.of_node
;
2570 struct xilinx_dma_device
*xdev
;
2571 struct device_node
*child
, *np
= pdev
->dev
.of_node
;
2572 struct resource
*io
;
2573 u32 num_frames
, addr_width
;
2576 /* Allocate and initialize the DMA engine structure */
2577 xdev
= devm_kzalloc(&pdev
->dev
, sizeof(*xdev
), GFP_KERNEL
);
2581 xdev
->dev
= &pdev
->dev
;
2583 const struct of_device_id
*match
;
2585 match
= of_match_node(xilinx_dma_of_ids
, np
);
2586 if (match
&& match
->data
) {
2587 xdev
->dma_config
= match
->data
;
2588 clk_init
= xdev
->dma_config
->clk_init
;
2592 err
= clk_init(pdev
, &xdev
->axi_clk
, &xdev
->tx_clk
, &xdev
->txs_clk
,
2593 &xdev
->rx_clk
, &xdev
->rxs_clk
);
2597 /* Request and map I/O memory */
2598 io
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
2599 xdev
->regs
= devm_ioremap_resource(&pdev
->dev
, io
);
2600 if (IS_ERR(xdev
->regs
))
2601 return PTR_ERR(xdev
->regs
);
2603 /* Retrieve the DMA engine properties from the device tree */
2604 xdev
->has_sg
= of_property_read_bool(node
, "xlnx,include-sg");
2605 if (xdev
->dma_config
->dmatype
== XDMA_TYPE_AXIDMA
)
2606 xdev
->mcdma
= of_property_read_bool(node
, "xlnx,mcdma");
2608 if (xdev
->dma_config
->dmatype
== XDMA_TYPE_VDMA
) {
2609 err
= of_property_read_u32(node
, "xlnx,num-fstores",
2613 "missing xlnx,num-fstores property\n");
2617 err
= of_property_read_u32(node
, "xlnx,flush-fsync",
2618 &xdev
->flush_on_fsync
);
2621 "missing xlnx,flush-fsync property\n");
2624 err
= of_property_read_u32(node
, "xlnx,addrwidth", &addr_width
);
2626 dev_warn(xdev
->dev
, "missing xlnx,addrwidth property\n");
2628 if (addr_width
> 32)
2629 xdev
->ext_addr
= true;
2631 xdev
->ext_addr
= false;
2633 /* Set the dma mask bits */
2634 dma_set_mask(xdev
->dev
, DMA_BIT_MASK(addr_width
));
2636 /* Initialize the DMA engine */
2637 xdev
->common
.dev
= &pdev
->dev
;
2639 INIT_LIST_HEAD(&xdev
->common
.channels
);
2640 if (!(xdev
->dma_config
->dmatype
== XDMA_TYPE_CDMA
)) {
2641 dma_cap_set(DMA_SLAVE
, xdev
->common
.cap_mask
);
2642 dma_cap_set(DMA_PRIVATE
, xdev
->common
.cap_mask
);
2645 xdev
->common
.device_alloc_chan_resources
=
2646 xilinx_dma_alloc_chan_resources
;
2647 xdev
->common
.device_free_chan_resources
=
2648 xilinx_dma_free_chan_resources
;
2649 xdev
->common
.device_terminate_all
= xilinx_dma_terminate_all
;
2650 xdev
->common
.device_tx_status
= xilinx_dma_tx_status
;
2651 xdev
->common
.device_issue_pending
= xilinx_dma_issue_pending
;
2652 if (xdev
->dma_config
->dmatype
== XDMA_TYPE_AXIDMA
) {
2653 dma_cap_set(DMA_CYCLIC
, xdev
->common
.cap_mask
);
2654 xdev
->common
.device_prep_slave_sg
= xilinx_dma_prep_slave_sg
;
2655 xdev
->common
.device_prep_dma_cyclic
=
2656 xilinx_dma_prep_dma_cyclic
;
2657 xdev
->common
.device_prep_interleaved_dma
=
2658 xilinx_dma_prep_interleaved
;
2659 /* Residue calculation is supported by only AXI DMA */
2660 xdev
->common
.residue_granularity
=
2661 DMA_RESIDUE_GRANULARITY_SEGMENT
;
2662 } else if (xdev
->dma_config
->dmatype
== XDMA_TYPE_CDMA
) {
2663 dma_cap_set(DMA_MEMCPY
, xdev
->common
.cap_mask
);
2664 xdev
->common
.device_prep_dma_memcpy
= xilinx_cdma_prep_memcpy
;
2666 xdev
->common
.device_prep_interleaved_dma
=
2667 xilinx_vdma_dma_prep_interleaved
;
2670 platform_set_drvdata(pdev
, xdev
);
2672 /* Initialize the channels */
2673 for_each_child_of_node(node
, child
) {
2674 err
= xilinx_dma_child_probe(xdev
, child
);
2679 if (xdev
->dma_config
->dmatype
== XDMA_TYPE_VDMA
) {
2680 for (i
= 0; i
< xdev
->nr_channels
; i
++)
2682 xdev
->chan
[i
]->num_frms
= num_frames
;
2685 /* Register the DMA engine with the core */
2686 dma_async_device_register(&xdev
->common
);
2688 err
= of_dma_controller_register(node
, of_dma_xilinx_xlate
,
2691 dev_err(&pdev
->dev
, "Unable to register DMA to DT\n");
2692 dma_async_device_unregister(&xdev
->common
);
2696 if (xdev
->dma_config
->dmatype
== XDMA_TYPE_AXIDMA
)
2697 dev_info(&pdev
->dev
, "Xilinx AXI DMA Engine Driver Probed!!\n");
2698 else if (xdev
->dma_config
->dmatype
== XDMA_TYPE_CDMA
)
2699 dev_info(&pdev
->dev
, "Xilinx AXI CDMA Engine Driver Probed!!\n");
2701 dev_info(&pdev
->dev
, "Xilinx AXI VDMA Engine Driver Probed!!\n");
2706 xdma_disable_allclks(xdev
);
2708 for (i
= 0; i
< xdev
->nr_channels
; i
++)
2710 xilinx_dma_chan_remove(xdev
->chan
[i
]);
2716 * xilinx_dma_remove - Driver remove function
2717 * @pdev: Pointer to the platform_device structure
2719 * Return: Always '0'
2721 static int xilinx_dma_remove(struct platform_device
*pdev
)
2723 struct xilinx_dma_device
*xdev
= platform_get_drvdata(pdev
);
2726 of_dma_controller_free(pdev
->dev
.of_node
);
2728 dma_async_device_unregister(&xdev
->common
);
2730 for (i
= 0; i
< xdev
->nr_channels
; i
++)
2732 xilinx_dma_chan_remove(xdev
->chan
[i
]);
2734 xdma_disable_allclks(xdev
);
2739 static struct platform_driver xilinx_vdma_driver
= {
2741 .name
= "xilinx-vdma",
2742 .of_match_table
= xilinx_dma_of_ids
,
2744 .probe
= xilinx_dma_probe
,
2745 .remove
= xilinx_dma_remove
,
2748 module_platform_driver(xilinx_vdma_driver
);
2750 MODULE_AUTHOR("Xilinx, Inc.");
2751 MODULE_DESCRIPTION("Xilinx VDMA driver");
2752 MODULE_LICENSE("GPL v2");