2 * DMA driver for Xilinx Video DMA Engine
4 * Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved.
6 * Based on the Freescale DMA driver.
9 * The AXI Video Direct Memory Access (AXI VDMA) core is a soft Xilinx IP
10 * core that provides high-bandwidth direct memory access between memory
11 * and AXI4-Stream type video target peripherals. The core provides efficient
12 * two dimensional DMA operations with independent asynchronous read (S2MM)
13 * and write (MM2S) channel operation. It can be configured to have either
14 * one channel or two channels. If configured as two channels, one is to
15 * transmit to the video device (MM2S) and another is to receive from the
16 * video device (S2MM). Initialization, status, interrupt and management
17 * registers are accessed through an AXI4-Lite slave interface.
19 * The AXI Direct Memory Access (AXI DMA) core is a soft Xilinx IP core that
20 * provides high-bandwidth one dimensional direct memory access between memory
21 * and AXI4-Stream target peripherals. It supports one receive and one
22 * transmit channel, both of them optional at synthesis time.
24 * The AXI CDMA, is a soft IP, which provides high-bandwidth Direct Memory
25 * Access (DMA) between a memory-mapped source address and a memory-mapped
26 * destination address.
28 * This program is free software: you can redistribute it and/or modify
29 * it under the terms of the GNU General Public License as published by
30 * the Free Software Foundation, either version 2 of the License, or
31 * (at your option) any later version.
34 #include <linux/bitops.h>
35 #include <linux/dmapool.h>
36 #include <linux/dma/xilinx_dma.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
40 #include <linux/iopoll.h>
41 #include <linux/module.h>
42 #include <linux/of_address.h>
43 #include <linux/of_dma.h>
44 #include <linux/of_platform.h>
45 #include <linux/of_irq.h>
46 #include <linux/slab.h>
47 #include <linux/clk.h>
48 #include <linux/io-64-nonatomic-lo-hi.h>
50 #include "../dmaengine.h"
52 /* Register/Descriptor Offsets */
53 #define XILINX_DMA_MM2S_CTRL_OFFSET 0x0000
54 #define XILINX_DMA_S2MM_CTRL_OFFSET 0x0030
55 #define XILINX_VDMA_MM2S_DESC_OFFSET 0x0050
56 #define XILINX_VDMA_S2MM_DESC_OFFSET 0x00a0
58 /* Control Registers */
59 #define XILINX_DMA_REG_DMACR 0x0000
60 #define XILINX_DMA_DMACR_DELAY_MAX 0xff
61 #define XILINX_DMA_DMACR_DELAY_SHIFT 24
62 #define XILINX_DMA_DMACR_FRAME_COUNT_MAX 0xff
63 #define XILINX_DMA_DMACR_FRAME_COUNT_SHIFT 16
64 #define XILINX_DMA_DMACR_ERR_IRQ BIT(14)
65 #define XILINX_DMA_DMACR_DLY_CNT_IRQ BIT(13)
66 #define XILINX_DMA_DMACR_FRM_CNT_IRQ BIT(12)
67 #define XILINX_DMA_DMACR_MASTER_SHIFT 8
68 #define XILINX_DMA_DMACR_FSYNCSRC_SHIFT 5
69 #define XILINX_DMA_DMACR_FRAMECNT_EN BIT(4)
70 #define XILINX_DMA_DMACR_GENLOCK_EN BIT(3)
71 #define XILINX_DMA_DMACR_RESET BIT(2)
72 #define XILINX_DMA_DMACR_CIRC_EN BIT(1)
73 #define XILINX_DMA_DMACR_RUNSTOP BIT(0)
74 #define XILINX_DMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5)
76 #define XILINX_DMA_REG_DMASR 0x0004
77 #define XILINX_DMA_DMASR_EOL_LATE_ERR BIT(15)
78 #define XILINX_DMA_DMASR_ERR_IRQ BIT(14)
79 #define XILINX_DMA_DMASR_DLY_CNT_IRQ BIT(13)
80 #define XILINX_DMA_DMASR_FRM_CNT_IRQ BIT(12)
81 #define XILINX_DMA_DMASR_SOF_LATE_ERR BIT(11)
82 #define XILINX_DMA_DMASR_SG_DEC_ERR BIT(10)
83 #define XILINX_DMA_DMASR_SG_SLV_ERR BIT(9)
84 #define XILINX_DMA_DMASR_EOF_EARLY_ERR BIT(8)
85 #define XILINX_DMA_DMASR_SOF_EARLY_ERR BIT(7)
86 #define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6)
87 #define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5)
88 #define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4)
89 #define XILINX_DMA_DMASR_SG_MASK BIT(3)
90 #define XILINX_DMA_DMASR_IDLE BIT(1)
91 #define XILINX_DMA_DMASR_HALTED BIT(0)
92 #define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24)
93 #define XILINX_DMA_DMASR_FRAME_COUNT_MASK GENMASK(23, 16)
95 #define XILINX_DMA_REG_CURDESC 0x0008
96 #define XILINX_DMA_REG_TAILDESC 0x0010
97 #define XILINX_DMA_REG_REG_INDEX 0x0014
98 #define XILINX_DMA_REG_FRMSTORE 0x0018
99 #define XILINX_DMA_REG_THRESHOLD 0x001c
100 #define XILINX_DMA_REG_FRMPTR_STS 0x0024
101 #define XILINX_DMA_REG_PARK_PTR 0x0028
102 #define XILINX_DMA_PARK_PTR_WR_REF_SHIFT 8
103 #define XILINX_DMA_PARK_PTR_WR_REF_MASK GENMASK(12, 8)
104 #define XILINX_DMA_PARK_PTR_RD_REF_SHIFT 0
105 #define XILINX_DMA_PARK_PTR_RD_REF_MASK GENMASK(4, 0)
106 #define XILINX_DMA_REG_VDMA_VERSION 0x002c
108 /* Register Direct Mode Registers */
109 #define XILINX_DMA_REG_VSIZE 0x0000
110 #define XILINX_DMA_REG_HSIZE 0x0004
112 #define XILINX_DMA_REG_FRMDLY_STRIDE 0x0008
113 #define XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24
114 #define XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT 0
116 #define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n))
117 #define XILINX_VDMA_REG_START_ADDRESS_64(n) (0x000c + 8 * (n))
119 #define XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP 0x00ec
120 #define XILINX_VDMA_ENABLE_VERTICAL_FLIP BIT(0)
122 /* HW specific definitions */
123 #define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x20
125 #define XILINX_DMA_DMAXR_ALL_IRQ_MASK \
126 (XILINX_DMA_DMASR_FRM_CNT_IRQ | \
127 XILINX_DMA_DMASR_DLY_CNT_IRQ | \
128 XILINX_DMA_DMASR_ERR_IRQ)
130 #define XILINX_DMA_DMASR_ALL_ERR_MASK \
131 (XILINX_DMA_DMASR_EOL_LATE_ERR | \
132 XILINX_DMA_DMASR_SOF_LATE_ERR | \
133 XILINX_DMA_DMASR_SG_DEC_ERR | \
134 XILINX_DMA_DMASR_SG_SLV_ERR | \
135 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
136 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
137 XILINX_DMA_DMASR_DMA_DEC_ERR | \
138 XILINX_DMA_DMASR_DMA_SLAVE_ERR | \
139 XILINX_DMA_DMASR_DMA_INT_ERR)
142 * Recoverable errors are DMA Internal error, SOF Early, EOF Early
143 * and SOF Late. They are only recoverable when C_FLUSH_ON_FSYNC
144 * is enabled in the h/w system.
146 #define XILINX_DMA_DMASR_ERR_RECOVER_MASK \
147 (XILINX_DMA_DMASR_SOF_LATE_ERR | \
148 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
149 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
150 XILINX_DMA_DMASR_DMA_INT_ERR)
152 /* Axi VDMA Flush on Fsync bits */
153 #define XILINX_DMA_FLUSH_S2MM 3
154 #define XILINX_DMA_FLUSH_MM2S 2
155 #define XILINX_DMA_FLUSH_BOTH 1
157 /* Delay loop counter to prevent hardware failure */
158 #define XILINX_DMA_LOOP_COUNT 1000000
160 /* AXI DMA Specific Registers/Offsets */
161 #define XILINX_DMA_REG_SRCDSTADDR 0x18
162 #define XILINX_DMA_REG_BTT 0x28
164 /* AXI DMA Specific Masks/Bit fields */
165 #define XILINX_DMA_MAX_TRANS_LEN_MIN 8
166 #define XILINX_DMA_MAX_TRANS_LEN_MAX 23
167 #define XILINX_DMA_V2_MAX_TRANS_LEN_MAX 26
168 #define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16)
169 #define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4)
170 #define XILINX_DMA_CR_COALESCE_SHIFT 16
171 #define XILINX_DMA_BD_SOP BIT(27)
172 #define XILINX_DMA_BD_EOP BIT(26)
173 #define XILINX_DMA_COALESCE_MAX 255
174 #define XILINX_DMA_NUM_DESCS 255
175 #define XILINX_DMA_NUM_APP_WORDS 5
177 /* Multi-Channel DMA Descriptor offsets*/
178 #define XILINX_DMA_MCRX_CDESC(x) (0x40 + (x-1) * 0x20)
179 #define XILINX_DMA_MCRX_TDESC(x) (0x48 + (x-1) * 0x20)
181 /* Multi-Channel DMA Masks/Shifts */
182 #define XILINX_DMA_BD_HSIZE_MASK GENMASK(15, 0)
183 #define XILINX_DMA_BD_STRIDE_MASK GENMASK(15, 0)
184 #define XILINX_DMA_BD_VSIZE_MASK GENMASK(31, 19)
185 #define XILINX_DMA_BD_TDEST_MASK GENMASK(4, 0)
186 #define XILINX_DMA_BD_STRIDE_SHIFT 0
187 #define XILINX_DMA_BD_VSIZE_SHIFT 19
189 /* AXI CDMA Specific Registers/Offsets */
190 #define XILINX_CDMA_REG_SRCADDR 0x18
191 #define XILINX_CDMA_REG_DSTADDR 0x20
193 /* AXI CDMA Specific Masks */
194 #define XILINX_CDMA_CR_SGMODE BIT(3)
196 #define xilinx_prep_dma_addr_t(addr) \
197 ((dma_addr_t)((u64)addr##_##msb << 32 | (addr)))
199 * struct xilinx_vdma_desc_hw - Hardware Descriptor
200 * @next_desc: Next Descriptor Pointer @0x00
201 * @pad1: Reserved @0x04
202 * @buf_addr: Buffer address @0x08
203 * @buf_addr_msb: MSB of Buffer address @0x0C
204 * @vsize: Vertical Size @0x10
205 * @hsize: Horizontal Size @0x14
206 * @stride: Number of bytes between the first
207 * pixels of each horizontal line @0x18
209 struct xilinx_vdma_desc_hw
{
220 * struct xilinx_axidma_desc_hw - Hardware Descriptor for AXI DMA
221 * @next_desc: Next Descriptor Pointer @0x00
222 * @next_desc_msb: MSB of Next Descriptor Pointer @0x04
223 * @buf_addr: Buffer address @0x08
224 * @buf_addr_msb: MSB of Buffer address @0x0C
225 * @mcdma_control: Control field for mcdma @0x10
226 * @vsize_stride: Vsize and Stride field for mcdma @0x14
227 * @control: Control field @0x18
228 * @status: Status field @0x1C
229 * @app: APP Fields @0x20 - 0x30
231 struct xilinx_axidma_desc_hw
{
240 u32 app
[XILINX_DMA_NUM_APP_WORDS
];
244 * struct xilinx_cdma_desc_hw - Hardware Descriptor
245 * @next_desc: Next Descriptor Pointer @0x00
246 * @next_desc_msb: Next Descriptor Pointer MSB @0x04
247 * @src_addr: Source address @0x08
248 * @src_addr_msb: Source address MSB @0x0C
249 * @dest_addr: Destination address @0x10
250 * @dest_addr_msb: Destination address MSB @0x14
251 * @control: Control field @0x18
252 * @status: Status field @0x1C
254 struct xilinx_cdma_desc_hw
{
266 * struct xilinx_vdma_tx_segment - Descriptor segment
267 * @hw: Hardware descriptor
268 * @node: Node in the descriptor segments list
269 * @phys: Physical address of segment
271 struct xilinx_vdma_tx_segment
{
272 struct xilinx_vdma_desc_hw hw
;
273 struct list_head node
;
278 * struct xilinx_axidma_tx_segment - Descriptor segment
279 * @hw: Hardware descriptor
280 * @node: Node in the descriptor segments list
281 * @phys: Physical address of segment
283 struct xilinx_axidma_tx_segment
{
284 struct xilinx_axidma_desc_hw hw
;
285 struct list_head node
;
290 * struct xilinx_cdma_tx_segment - Descriptor segment
291 * @hw: Hardware descriptor
292 * @node: Node in the descriptor segments list
293 * @phys: Physical address of segment
295 struct xilinx_cdma_tx_segment
{
296 struct xilinx_cdma_desc_hw hw
;
297 struct list_head node
;
302 * struct xilinx_dma_tx_descriptor - Per Transaction structure
303 * @async_tx: Async transaction descriptor
304 * @segments: TX segments list
305 * @node: Node in the channel descriptors list
306 * @cyclic: Check for cyclic transfers.
308 struct xilinx_dma_tx_descriptor
{
309 struct dma_async_tx_descriptor async_tx
;
310 struct list_head segments
;
311 struct list_head node
;
316 * struct xilinx_dma_chan - Driver specific DMA channel structure
317 * @xdev: Driver specific device structure
318 * @ctrl_offset: Control registers offset
319 * @desc_offset: TX descriptor registers offset
320 * @lock: Descriptor operation lock
321 * @pending_list: Descriptors waiting
322 * @active_list: Descriptors ready to submit
323 * @done_list: Complete descriptors
324 * @free_seg_list: Free descriptors
325 * @common: DMA common channel
326 * @desc_pool: Descriptors pool
327 * @dev: The dma device
330 * @direction: Transfer direction
331 * @num_frms: Number of frames
332 * @has_sg: Support scatter transfers
333 * @cyclic: Check for cyclic transfers.
334 * @genlock: Support genlock mode
335 * @err: Channel has errors
336 * @idle: Check for channel idle
337 * @tasklet: Cleanup work after irq
338 * @config: Device configuration info
339 * @flush_on_fsync: Flush on Frame sync
340 * @desc_pendingcount: Descriptor pending count
341 * @ext_addr: Indicates 64 bit addressing is supported by dma channel
342 * @desc_submitcount: Descriptor h/w submitted count
343 * @residue: Residue for AXI DMA
344 * @seg_v: Statically allocated segments base
345 * @seg_p: Physical allocated segments base
346 * @cyclic_seg_v: Statically allocated segment base for cyclic transfers
347 * @cyclic_seg_p: Physical allocated segments base for cyclic dma
348 * @start_transfer: Differentiate b/w DMA IP's transfer
349 * @stop_transfer: Differentiate b/w DMA IP's quiesce
350 * @tdest: TDEST value for mcdma
351 * @has_vflip: S2MM vertical flip
353 struct xilinx_dma_chan
{
354 struct xilinx_dma_device
*xdev
;
358 struct list_head pending_list
;
359 struct list_head active_list
;
360 struct list_head done_list
;
361 struct list_head free_seg_list
;
362 struct dma_chan common
;
363 struct dma_pool
*desc_pool
;
367 enum dma_transfer_direction direction
;
374 struct tasklet_struct tasklet
;
375 struct xilinx_vdma_config config
;
377 u32 desc_pendingcount
;
379 u32 desc_submitcount
;
381 struct xilinx_axidma_tx_segment
*seg_v
;
383 struct xilinx_axidma_tx_segment
*cyclic_seg_v
;
384 dma_addr_t cyclic_seg_p
;
385 void (*start_transfer
)(struct xilinx_dma_chan
*chan
);
386 int (*stop_transfer
)(struct xilinx_dma_chan
*chan
);
392 * enum xdma_ip_type - DMA IP type.
394 * @XDMA_TYPE_AXIDMA: Axi dma ip.
395 * @XDMA_TYPE_CDMA: Axi cdma ip.
396 * @XDMA_TYPE_VDMA: Axi vdma ip.
400 XDMA_TYPE_AXIDMA
= 0,
405 struct xilinx_dma_config
{
406 enum xdma_ip_type dmatype
;
407 int (*clk_init
)(struct platform_device
*pdev
, struct clk
**axi_clk
,
408 struct clk
**tx_clk
, struct clk
**txs_clk
,
409 struct clk
**rx_clk
, struct clk
**rxs_clk
);
413 * struct xilinx_dma_device - DMA device structure
414 * @regs: I/O mapped base address
415 * @dev: Device Structure
416 * @common: DMA device structure
417 * @chan: Driver specific DMA channel
418 * @mcdma: Specifies whether Multi-Channel is present or not
419 * @flush_on_fsync: Flush on frame sync
420 * @ext_addr: Indicates 64 bit addressing is supported by dma device
421 * @pdev: Platform device structure pointer
422 * @dma_config: DMA config structure
423 * @axi_clk: DMA Axi4-lite interace clock
424 * @tx_clk: DMA mm2s clock
425 * @txs_clk: DMA mm2s stream clock
426 * @rx_clk: DMA s2mm clock
427 * @rxs_clk: DMA s2mm stream clock
428 * @nr_channels: Number of channels DMA device supports
429 * @chan_id: DMA channel identifier
430 * @max_buffer_len: Max buffer length
432 struct xilinx_dma_device
{
435 struct dma_device common
;
436 struct xilinx_dma_chan
*chan
[XILINX_DMA_MAX_CHANS_PER_DEVICE
];
440 struct platform_device
*pdev
;
441 const struct xilinx_dma_config
*dma_config
;
453 #define to_xilinx_chan(chan) \
454 container_of(chan, struct xilinx_dma_chan, common)
455 #define to_dma_tx_descriptor(tx) \
456 container_of(tx, struct xilinx_dma_tx_descriptor, async_tx)
457 #define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \
458 readl_poll_timeout(chan->xdev->regs + chan->ctrl_offset + reg, val, \
459 cond, delay_us, timeout_us)
462 static inline u32
dma_read(struct xilinx_dma_chan
*chan
, u32 reg
)
464 return ioread32(chan
->xdev
->regs
+ reg
);
467 static inline void dma_write(struct xilinx_dma_chan
*chan
, u32 reg
, u32 value
)
469 iowrite32(value
, chan
->xdev
->regs
+ reg
);
472 static inline void vdma_desc_write(struct xilinx_dma_chan
*chan
, u32 reg
,
475 dma_write(chan
, chan
->desc_offset
+ reg
, value
);
478 static inline u32
dma_ctrl_read(struct xilinx_dma_chan
*chan
, u32 reg
)
480 return dma_read(chan
, chan
->ctrl_offset
+ reg
);
483 static inline void dma_ctrl_write(struct xilinx_dma_chan
*chan
, u32 reg
,
486 dma_write(chan
, chan
->ctrl_offset
+ reg
, value
);
489 static inline void dma_ctrl_clr(struct xilinx_dma_chan
*chan
, u32 reg
,
492 dma_ctrl_write(chan
, reg
, dma_ctrl_read(chan
, reg
) & ~clr
);
495 static inline void dma_ctrl_set(struct xilinx_dma_chan
*chan
, u32 reg
,
498 dma_ctrl_write(chan
, reg
, dma_ctrl_read(chan
, reg
) | set
);
502 * vdma_desc_write_64 - 64-bit descriptor write
503 * @chan: Driver specific VDMA channel
504 * @reg: Register to write
505 * @value_lsb: lower address of the descriptor.
506 * @value_msb: upper address of the descriptor.
508 * Since vdma driver is trying to write to a register offset which is not a
509 * multiple of 64 bits(ex : 0x5c), we are writing as two separate 32 bits
510 * instead of a single 64 bit register write.
512 static inline void vdma_desc_write_64(struct xilinx_dma_chan
*chan
, u32 reg
,
513 u32 value_lsb
, u32 value_msb
)
515 /* Write the lsb 32 bits*/
516 writel(value_lsb
, chan
->xdev
->regs
+ chan
->desc_offset
+ reg
);
518 /* Write the msb 32 bits */
519 writel(value_msb
, chan
->xdev
->regs
+ chan
->desc_offset
+ reg
+ 4);
522 static inline void dma_writeq(struct xilinx_dma_chan
*chan
, u32 reg
, u64 value
)
524 lo_hi_writeq(value
, chan
->xdev
->regs
+ chan
->ctrl_offset
+ reg
);
527 static inline void xilinx_write(struct xilinx_dma_chan
*chan
, u32 reg
,
531 dma_writeq(chan
, reg
, addr
);
533 dma_ctrl_write(chan
, reg
, addr
);
536 static inline void xilinx_axidma_buf(struct xilinx_dma_chan
*chan
,
537 struct xilinx_axidma_desc_hw
*hw
,
538 dma_addr_t buf_addr
, size_t sg_used
,
541 if (chan
->ext_addr
) {
542 hw
->buf_addr
= lower_32_bits(buf_addr
+ sg_used
+ period_len
);
543 hw
->buf_addr_msb
= upper_32_bits(buf_addr
+ sg_used
+
546 hw
->buf_addr
= buf_addr
+ sg_used
+ period_len
;
550 /* -----------------------------------------------------------------------------
551 * Descriptors and segments alloc and free
555 * xilinx_vdma_alloc_tx_segment - Allocate transaction segment
556 * @chan: Driver specific DMA channel
558 * Return: The allocated segment on success and NULL on failure.
560 static struct xilinx_vdma_tx_segment
*
561 xilinx_vdma_alloc_tx_segment(struct xilinx_dma_chan
*chan
)
563 struct xilinx_vdma_tx_segment
*segment
;
566 segment
= dma_pool_zalloc(chan
->desc_pool
, GFP_ATOMIC
, &phys
);
570 segment
->phys
= phys
;
576 * xilinx_cdma_alloc_tx_segment - Allocate transaction segment
577 * @chan: Driver specific DMA channel
579 * Return: The allocated segment on success and NULL on failure.
581 static struct xilinx_cdma_tx_segment
*
582 xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan
*chan
)
584 struct xilinx_cdma_tx_segment
*segment
;
587 segment
= dma_pool_zalloc(chan
->desc_pool
, GFP_ATOMIC
, &phys
);
591 segment
->phys
= phys
;
597 * xilinx_axidma_alloc_tx_segment - Allocate transaction segment
598 * @chan: Driver specific DMA channel
600 * Return: The allocated segment on success and NULL on failure.
602 static struct xilinx_axidma_tx_segment
*
603 xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan
*chan
)
605 struct xilinx_axidma_tx_segment
*segment
= NULL
;
608 spin_lock_irqsave(&chan
->lock
, flags
);
609 if (!list_empty(&chan
->free_seg_list
)) {
610 segment
= list_first_entry(&chan
->free_seg_list
,
611 struct xilinx_axidma_tx_segment
,
613 list_del(&segment
->node
);
615 spin_unlock_irqrestore(&chan
->lock
, flags
);
620 static void xilinx_dma_clean_hw_desc(struct xilinx_axidma_desc_hw
*hw
)
622 u32 next_desc
= hw
->next_desc
;
623 u32 next_desc_msb
= hw
->next_desc_msb
;
625 memset(hw
, 0, sizeof(struct xilinx_axidma_desc_hw
));
627 hw
->next_desc
= next_desc
;
628 hw
->next_desc_msb
= next_desc_msb
;
632 * xilinx_dma_free_tx_segment - Free transaction segment
633 * @chan: Driver specific DMA channel
634 * @segment: DMA transaction segment
636 static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan
*chan
,
637 struct xilinx_axidma_tx_segment
*segment
)
639 xilinx_dma_clean_hw_desc(&segment
->hw
);
641 list_add_tail(&segment
->node
, &chan
->free_seg_list
);
645 * xilinx_cdma_free_tx_segment - Free transaction segment
646 * @chan: Driver specific DMA channel
647 * @segment: DMA transaction segment
649 static void xilinx_cdma_free_tx_segment(struct xilinx_dma_chan
*chan
,
650 struct xilinx_cdma_tx_segment
*segment
)
652 dma_pool_free(chan
->desc_pool
, segment
, segment
->phys
);
656 * xilinx_vdma_free_tx_segment - Free transaction segment
657 * @chan: Driver specific DMA channel
658 * @segment: DMA transaction segment
660 static void xilinx_vdma_free_tx_segment(struct xilinx_dma_chan
*chan
,
661 struct xilinx_vdma_tx_segment
*segment
)
663 dma_pool_free(chan
->desc_pool
, segment
, segment
->phys
);
667 * xilinx_dma_tx_descriptor - Allocate transaction descriptor
668 * @chan: Driver specific DMA channel
670 * Return: The allocated descriptor on success and NULL on failure.
672 static struct xilinx_dma_tx_descriptor
*
673 xilinx_dma_alloc_tx_descriptor(struct xilinx_dma_chan
*chan
)
675 struct xilinx_dma_tx_descriptor
*desc
;
677 desc
= kzalloc(sizeof(*desc
), GFP_KERNEL
);
681 INIT_LIST_HEAD(&desc
->segments
);
687 * xilinx_dma_free_tx_descriptor - Free transaction descriptor
688 * @chan: Driver specific DMA channel
689 * @desc: DMA transaction descriptor
692 xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan
*chan
,
693 struct xilinx_dma_tx_descriptor
*desc
)
695 struct xilinx_vdma_tx_segment
*segment
, *next
;
696 struct xilinx_cdma_tx_segment
*cdma_segment
, *cdma_next
;
697 struct xilinx_axidma_tx_segment
*axidma_segment
, *axidma_next
;
702 if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_VDMA
) {
703 list_for_each_entry_safe(segment
, next
, &desc
->segments
, node
) {
704 list_del(&segment
->node
);
705 xilinx_vdma_free_tx_segment(chan
, segment
);
707 } else if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_CDMA
) {
708 list_for_each_entry_safe(cdma_segment
, cdma_next
,
709 &desc
->segments
, node
) {
710 list_del(&cdma_segment
->node
);
711 xilinx_cdma_free_tx_segment(chan
, cdma_segment
);
714 list_for_each_entry_safe(axidma_segment
, axidma_next
,
715 &desc
->segments
, node
) {
716 list_del(&axidma_segment
->node
);
717 xilinx_dma_free_tx_segment(chan
, axidma_segment
);
724 /* Required functions */
727 * xilinx_dma_free_desc_list - Free descriptors list
728 * @chan: Driver specific DMA channel
729 * @list: List to parse and delete the descriptor
731 static void xilinx_dma_free_desc_list(struct xilinx_dma_chan
*chan
,
732 struct list_head
*list
)
734 struct xilinx_dma_tx_descriptor
*desc
, *next
;
736 list_for_each_entry_safe(desc
, next
, list
, node
) {
737 list_del(&desc
->node
);
738 xilinx_dma_free_tx_descriptor(chan
, desc
);
743 * xilinx_dma_free_descriptors - Free channel descriptors
744 * @chan: Driver specific DMA channel
746 static void xilinx_dma_free_descriptors(struct xilinx_dma_chan
*chan
)
750 spin_lock_irqsave(&chan
->lock
, flags
);
752 xilinx_dma_free_desc_list(chan
, &chan
->pending_list
);
753 xilinx_dma_free_desc_list(chan
, &chan
->done_list
);
754 xilinx_dma_free_desc_list(chan
, &chan
->active_list
);
756 spin_unlock_irqrestore(&chan
->lock
, flags
);
760 * xilinx_dma_free_chan_resources - Free channel resources
761 * @dchan: DMA channel
763 static void xilinx_dma_free_chan_resources(struct dma_chan
*dchan
)
765 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
768 dev_dbg(chan
->dev
, "Free all channel resources.\n");
770 xilinx_dma_free_descriptors(chan
);
772 if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_AXIDMA
) {
773 spin_lock_irqsave(&chan
->lock
, flags
);
774 INIT_LIST_HEAD(&chan
->free_seg_list
);
775 spin_unlock_irqrestore(&chan
->lock
, flags
);
777 /* Free memory that is allocated for BD */
778 dma_free_coherent(chan
->dev
, sizeof(*chan
->seg_v
) *
779 XILINX_DMA_NUM_DESCS
, chan
->seg_v
,
782 /* Free Memory that is allocated for cyclic DMA Mode */
783 dma_free_coherent(chan
->dev
, sizeof(*chan
->cyclic_seg_v
),
784 chan
->cyclic_seg_v
, chan
->cyclic_seg_p
);
787 if (chan
->xdev
->dma_config
->dmatype
!= XDMA_TYPE_AXIDMA
) {
788 dma_pool_destroy(chan
->desc_pool
);
789 chan
->desc_pool
= NULL
;
794 * xilinx_dma_chan_handle_cyclic - Cyclic dma callback
795 * @chan: Driver specific dma channel
796 * @desc: dma transaction descriptor
797 * @flags: flags for spin lock
799 static void xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan
*chan
,
800 struct xilinx_dma_tx_descriptor
*desc
,
801 unsigned long *flags
)
803 dma_async_tx_callback callback
;
804 void *callback_param
;
806 callback
= desc
->async_tx
.callback
;
807 callback_param
= desc
->async_tx
.callback_param
;
809 spin_unlock_irqrestore(&chan
->lock
, *flags
);
810 callback(callback_param
);
811 spin_lock_irqsave(&chan
->lock
, *flags
);
816 * xilinx_dma_chan_desc_cleanup - Clean channel descriptors
817 * @chan: Driver specific DMA channel
819 static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan
*chan
)
821 struct xilinx_dma_tx_descriptor
*desc
, *next
;
824 spin_lock_irqsave(&chan
->lock
, flags
);
826 list_for_each_entry_safe(desc
, next
, &chan
->done_list
, node
) {
827 struct dmaengine_desc_callback cb
;
830 xilinx_dma_chan_handle_cyclic(chan
, desc
, &flags
);
834 /* Remove from the list of running transactions */
835 list_del(&desc
->node
);
837 /* Run the link descriptor callback function */
838 dmaengine_desc_get_callback(&desc
->async_tx
, &cb
);
839 if (dmaengine_desc_callback_valid(&cb
)) {
840 spin_unlock_irqrestore(&chan
->lock
, flags
);
841 dmaengine_desc_callback_invoke(&cb
, NULL
);
842 spin_lock_irqsave(&chan
->lock
, flags
);
845 /* Run any dependencies, then free the descriptor */
846 dma_run_dependencies(&desc
->async_tx
);
847 xilinx_dma_free_tx_descriptor(chan
, desc
);
850 spin_unlock_irqrestore(&chan
->lock
, flags
);
854 * xilinx_dma_do_tasklet - Schedule completion tasklet
855 * @data: Pointer to the Xilinx DMA channel structure
857 static void xilinx_dma_do_tasklet(unsigned long data
)
859 struct xilinx_dma_chan
*chan
= (struct xilinx_dma_chan
*)data
;
861 xilinx_dma_chan_desc_cleanup(chan
);
865 * xilinx_dma_alloc_chan_resources - Allocate channel resources
866 * @dchan: DMA channel
868 * Return: '0' on success and failure value on error
870 static int xilinx_dma_alloc_chan_resources(struct dma_chan
*dchan
)
872 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
875 /* Has this channel already been allocated? */
880 * We need the descriptor to be aligned to 64bytes
881 * for meeting Xilinx VDMA specification requirement.
883 if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_AXIDMA
) {
884 /* Allocate the buffer descriptors. */
885 chan
->seg_v
= dma_alloc_coherent(chan
->dev
,
886 sizeof(*chan
->seg_v
) * XILINX_DMA_NUM_DESCS
,
887 &chan
->seg_p
, GFP_KERNEL
);
890 "unable to allocate channel %d descriptors\n",
895 * For cyclic DMA mode we need to program the tail Descriptor
896 * register with a value which is not a part of the BD chain
897 * so allocating a desc segment during channel allocation for
898 * programming tail descriptor.
900 chan
->cyclic_seg_v
= dma_alloc_coherent(chan
->dev
,
901 sizeof(*chan
->cyclic_seg_v
),
904 if (!chan
->cyclic_seg_v
) {
906 "unable to allocate desc segment for cyclic DMA\n");
907 dma_free_coherent(chan
->dev
, sizeof(*chan
->seg_v
) *
908 XILINX_DMA_NUM_DESCS
, chan
->seg_v
,
912 chan
->cyclic_seg_v
->phys
= chan
->cyclic_seg_p
;
914 for (i
= 0; i
< XILINX_DMA_NUM_DESCS
; i
++) {
915 chan
->seg_v
[i
].hw
.next_desc
=
916 lower_32_bits(chan
->seg_p
+ sizeof(*chan
->seg_v
) *
917 ((i
+ 1) % XILINX_DMA_NUM_DESCS
));
918 chan
->seg_v
[i
].hw
.next_desc_msb
=
919 upper_32_bits(chan
->seg_p
+ sizeof(*chan
->seg_v
) *
920 ((i
+ 1) % XILINX_DMA_NUM_DESCS
));
921 chan
->seg_v
[i
].phys
= chan
->seg_p
+
922 sizeof(*chan
->seg_v
) * i
;
923 list_add_tail(&chan
->seg_v
[i
].node
,
924 &chan
->free_seg_list
);
926 } else if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_CDMA
) {
927 chan
->desc_pool
= dma_pool_create("xilinx_cdma_desc_pool",
929 sizeof(struct xilinx_cdma_tx_segment
),
930 __alignof__(struct xilinx_cdma_tx_segment
),
933 chan
->desc_pool
= dma_pool_create("xilinx_vdma_desc_pool",
935 sizeof(struct xilinx_vdma_tx_segment
),
936 __alignof__(struct xilinx_vdma_tx_segment
),
940 if (!chan
->desc_pool
&&
941 (chan
->xdev
->dma_config
->dmatype
!= XDMA_TYPE_AXIDMA
)) {
943 "unable to allocate channel %d descriptor pool\n",
948 dma_cookie_init(dchan
);
950 if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_AXIDMA
) {
951 /* For AXI DMA resetting once channel will reset the
952 * other channel as well so enable the interrupts here.
954 dma_ctrl_set(chan
, XILINX_DMA_REG_DMACR
,
955 XILINX_DMA_DMAXR_ALL_IRQ_MASK
);
958 if ((chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_CDMA
) && chan
->has_sg
)
959 dma_ctrl_set(chan
, XILINX_DMA_REG_DMACR
,
960 XILINX_CDMA_CR_SGMODE
);
966 * xilinx_dma_calc_copysize - Calculate the amount of data to copy
967 * @chan: Driver specific DMA channel
968 * @size: Total data that needs to be copied
969 * @done: Amount of data that has been already copied
971 * Return: Amount of data that has to be copied
973 static int xilinx_dma_calc_copysize(struct xilinx_dma_chan
*chan
,
978 copy
= min_t(size_t, size
- done
,
979 chan
->xdev
->max_buffer_len
);
981 if ((copy
+ done
< size
) &&
982 chan
->xdev
->common
.copy_align
) {
984 * If this is not the last descriptor, make sure
985 * the next one will be properly aligned
987 copy
= rounddown(copy
,
988 (1 << chan
->xdev
->common
.copy_align
));
994 * xilinx_dma_tx_status - Get DMA transaction status
995 * @dchan: DMA channel
996 * @cookie: Transaction identifier
997 * @txstate: Transaction state
999 * Return: DMA transaction status
1001 static enum dma_status
xilinx_dma_tx_status(struct dma_chan
*dchan
,
1002 dma_cookie_t cookie
,
1003 struct dma_tx_state
*txstate
)
1005 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
1006 struct xilinx_dma_tx_descriptor
*desc
;
1007 struct xilinx_axidma_tx_segment
*segment
;
1008 struct xilinx_axidma_desc_hw
*hw
;
1009 enum dma_status ret
;
1010 unsigned long flags
;
1013 ret
= dma_cookie_status(dchan
, cookie
, txstate
);
1014 if (ret
== DMA_COMPLETE
|| !txstate
)
1017 if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_AXIDMA
) {
1018 spin_lock_irqsave(&chan
->lock
, flags
);
1020 desc
= list_last_entry(&chan
->active_list
,
1021 struct xilinx_dma_tx_descriptor
, node
);
1023 list_for_each_entry(segment
, &desc
->segments
, node
) {
1025 residue
+= (hw
->control
- hw
->status
) &
1026 chan
->xdev
->max_buffer_len
;
1029 spin_unlock_irqrestore(&chan
->lock
, flags
);
1031 chan
->residue
= residue
;
1032 dma_set_residue(txstate
, chan
->residue
);
1039 * xilinx_dma_stop_transfer - Halt DMA channel
1040 * @chan: Driver specific DMA channel
1042 * Return: '0' on success and failure value on error
1044 static int xilinx_dma_stop_transfer(struct xilinx_dma_chan
*chan
)
1048 dma_ctrl_clr(chan
, XILINX_DMA_REG_DMACR
, XILINX_DMA_DMACR_RUNSTOP
);
1050 /* Wait for the hardware to halt */
1051 return xilinx_dma_poll_timeout(chan
, XILINX_DMA_REG_DMASR
, val
,
1052 val
& XILINX_DMA_DMASR_HALTED
, 0,
1053 XILINX_DMA_LOOP_COUNT
);
1057 * xilinx_cdma_stop_transfer - Wait for the current transfer to complete
1058 * @chan: Driver specific DMA channel
1060 * Return: '0' on success and failure value on error
1062 static int xilinx_cdma_stop_transfer(struct xilinx_dma_chan
*chan
)
1066 return xilinx_dma_poll_timeout(chan
, XILINX_DMA_REG_DMASR
, val
,
1067 val
& XILINX_DMA_DMASR_IDLE
, 0,
1068 XILINX_DMA_LOOP_COUNT
);
1072 * xilinx_dma_start - Start DMA channel
1073 * @chan: Driver specific DMA channel
1075 static void xilinx_dma_start(struct xilinx_dma_chan
*chan
)
1080 dma_ctrl_set(chan
, XILINX_DMA_REG_DMACR
, XILINX_DMA_DMACR_RUNSTOP
);
1082 /* Wait for the hardware to start */
1083 err
= xilinx_dma_poll_timeout(chan
, XILINX_DMA_REG_DMASR
, val
,
1084 !(val
& XILINX_DMA_DMASR_HALTED
), 0,
1085 XILINX_DMA_LOOP_COUNT
);
1088 dev_err(chan
->dev
, "Cannot start channel %p: %x\n",
1089 chan
, dma_ctrl_read(chan
, XILINX_DMA_REG_DMASR
));
1096 * xilinx_vdma_start_transfer - Starts VDMA transfer
1097 * @chan: Driver specific channel struct pointer
1099 static void xilinx_vdma_start_transfer(struct xilinx_dma_chan
*chan
)
1101 struct xilinx_vdma_config
*config
= &chan
->config
;
1102 struct xilinx_dma_tx_descriptor
*desc
, *tail_desc
;
1104 struct xilinx_vdma_tx_segment
*segment
, *last
= NULL
;
1107 /* This function was invoked with lock held */
1114 if (list_empty(&chan
->pending_list
))
1117 desc
= list_first_entry(&chan
->pending_list
,
1118 struct xilinx_dma_tx_descriptor
, node
);
1119 tail_desc
= list_last_entry(&chan
->pending_list
,
1120 struct xilinx_dma_tx_descriptor
, node
);
1122 /* Configure the hardware using info in the config structure */
1123 if (chan
->has_vflip
) {
1124 reg
= dma_read(chan
, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP
);
1125 reg
&= ~XILINX_VDMA_ENABLE_VERTICAL_FLIP
;
1126 reg
|= config
->vflip_en
;
1127 dma_write(chan
, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP
,
1131 reg
= dma_ctrl_read(chan
, XILINX_DMA_REG_DMACR
);
1133 if (config
->frm_cnt_en
)
1134 reg
|= XILINX_DMA_DMACR_FRAMECNT_EN
;
1136 reg
&= ~XILINX_DMA_DMACR_FRAMECNT_EN
;
1138 /* If not parking, enable circular mode */
1140 reg
&= ~XILINX_DMA_DMACR_CIRC_EN
;
1142 reg
|= XILINX_DMA_DMACR_CIRC_EN
;
1144 dma_ctrl_write(chan
, XILINX_DMA_REG_DMACR
, reg
);
1146 j
= chan
->desc_submitcount
;
1147 reg
= dma_read(chan
, XILINX_DMA_REG_PARK_PTR
);
1148 if (chan
->direction
== DMA_MEM_TO_DEV
) {
1149 reg
&= ~XILINX_DMA_PARK_PTR_RD_REF_MASK
;
1150 reg
|= j
<< XILINX_DMA_PARK_PTR_RD_REF_SHIFT
;
1152 reg
&= ~XILINX_DMA_PARK_PTR_WR_REF_MASK
;
1153 reg
|= j
<< XILINX_DMA_PARK_PTR_WR_REF_SHIFT
;
1155 dma_write(chan
, XILINX_DMA_REG_PARK_PTR
, reg
);
1157 /* Start the hardware */
1158 xilinx_dma_start(chan
);
1163 /* Start the transfer */
1164 if (chan
->desc_submitcount
< chan
->num_frms
)
1165 i
= chan
->desc_submitcount
;
1167 list_for_each_entry(segment
, &desc
->segments
, node
) {
1169 vdma_desc_write_64(chan
,
1170 XILINX_VDMA_REG_START_ADDRESS_64(i
++),
1171 segment
->hw
.buf_addr
,
1172 segment
->hw
.buf_addr_msb
);
1174 vdma_desc_write(chan
,
1175 XILINX_VDMA_REG_START_ADDRESS(i
++),
1176 segment
->hw
.buf_addr
);
1184 /* HW expects these parameters to be same for one transaction */
1185 vdma_desc_write(chan
, XILINX_DMA_REG_HSIZE
, last
->hw
.hsize
);
1186 vdma_desc_write(chan
, XILINX_DMA_REG_FRMDLY_STRIDE
,
1188 vdma_desc_write(chan
, XILINX_DMA_REG_VSIZE
, last
->hw
.vsize
);
1190 chan
->desc_submitcount
++;
1191 chan
->desc_pendingcount
--;
1192 list_del(&desc
->node
);
1193 list_add_tail(&desc
->node
, &chan
->active_list
);
1194 if (chan
->desc_submitcount
== chan
->num_frms
)
1195 chan
->desc_submitcount
= 0;
1201 * xilinx_cdma_start_transfer - Starts cdma transfer
1202 * @chan: Driver specific channel struct pointer
1204 static void xilinx_cdma_start_transfer(struct xilinx_dma_chan
*chan
)
1206 struct xilinx_dma_tx_descriptor
*head_desc
, *tail_desc
;
1207 struct xilinx_cdma_tx_segment
*tail_segment
;
1208 u32 ctrl_reg
= dma_read(chan
, XILINX_DMA_REG_DMACR
);
1216 if (list_empty(&chan
->pending_list
))
1219 head_desc
= list_first_entry(&chan
->pending_list
,
1220 struct xilinx_dma_tx_descriptor
, node
);
1221 tail_desc
= list_last_entry(&chan
->pending_list
,
1222 struct xilinx_dma_tx_descriptor
, node
);
1223 tail_segment
= list_last_entry(&tail_desc
->segments
,
1224 struct xilinx_cdma_tx_segment
, node
);
1226 if (chan
->desc_pendingcount
<= XILINX_DMA_COALESCE_MAX
) {
1227 ctrl_reg
&= ~XILINX_DMA_CR_COALESCE_MAX
;
1228 ctrl_reg
|= chan
->desc_pendingcount
<<
1229 XILINX_DMA_CR_COALESCE_SHIFT
;
1230 dma_ctrl_write(chan
, XILINX_DMA_REG_DMACR
, ctrl_reg
);
1234 dma_ctrl_clr(chan
, XILINX_DMA_REG_DMACR
,
1235 XILINX_CDMA_CR_SGMODE
);
1237 dma_ctrl_set(chan
, XILINX_DMA_REG_DMACR
,
1238 XILINX_CDMA_CR_SGMODE
);
1240 xilinx_write(chan
, XILINX_DMA_REG_CURDESC
,
1241 head_desc
->async_tx
.phys
);
1243 /* Update tail ptr register which will start the transfer */
1244 xilinx_write(chan
, XILINX_DMA_REG_TAILDESC
,
1245 tail_segment
->phys
);
1247 /* In simple mode */
1248 struct xilinx_cdma_tx_segment
*segment
;
1249 struct xilinx_cdma_desc_hw
*hw
;
1251 segment
= list_first_entry(&head_desc
->segments
,
1252 struct xilinx_cdma_tx_segment
,
1257 xilinx_write(chan
, XILINX_CDMA_REG_SRCADDR
,
1258 xilinx_prep_dma_addr_t(hw
->src_addr
));
1259 xilinx_write(chan
, XILINX_CDMA_REG_DSTADDR
,
1260 xilinx_prep_dma_addr_t(hw
->dest_addr
));
1262 /* Start the transfer */
1263 dma_ctrl_write(chan
, XILINX_DMA_REG_BTT
,
1264 hw
->control
& chan
->xdev
->max_buffer_len
);
1267 list_splice_tail_init(&chan
->pending_list
, &chan
->active_list
);
1268 chan
->desc_pendingcount
= 0;
1273 * xilinx_dma_start_transfer - Starts DMA transfer
1274 * @chan: Driver specific channel struct pointer
1276 static void xilinx_dma_start_transfer(struct xilinx_dma_chan
*chan
)
1278 struct xilinx_dma_tx_descriptor
*head_desc
, *tail_desc
;
1279 struct xilinx_axidma_tx_segment
*tail_segment
;
1285 if (list_empty(&chan
->pending_list
))
1291 head_desc
= list_first_entry(&chan
->pending_list
,
1292 struct xilinx_dma_tx_descriptor
, node
);
1293 tail_desc
= list_last_entry(&chan
->pending_list
,
1294 struct xilinx_dma_tx_descriptor
, node
);
1295 tail_segment
= list_last_entry(&tail_desc
->segments
,
1296 struct xilinx_axidma_tx_segment
, node
);
1298 reg
= dma_ctrl_read(chan
, XILINX_DMA_REG_DMACR
);
1300 if (chan
->desc_pendingcount
<= XILINX_DMA_COALESCE_MAX
) {
1301 reg
&= ~XILINX_DMA_CR_COALESCE_MAX
;
1302 reg
|= chan
->desc_pendingcount
<<
1303 XILINX_DMA_CR_COALESCE_SHIFT
;
1304 dma_ctrl_write(chan
, XILINX_DMA_REG_DMACR
, reg
);
1307 if (chan
->has_sg
&& !chan
->xdev
->mcdma
)
1308 xilinx_write(chan
, XILINX_DMA_REG_CURDESC
,
1309 head_desc
->async_tx
.phys
);
1311 if (chan
->has_sg
&& chan
->xdev
->mcdma
) {
1312 if (chan
->direction
== DMA_MEM_TO_DEV
) {
1313 dma_ctrl_write(chan
, XILINX_DMA_REG_CURDESC
,
1314 head_desc
->async_tx
.phys
);
1317 dma_ctrl_write(chan
, XILINX_DMA_REG_CURDESC
,
1318 head_desc
->async_tx
.phys
);
1320 dma_ctrl_write(chan
,
1321 XILINX_DMA_MCRX_CDESC(chan
->tdest
),
1322 head_desc
->async_tx
.phys
);
1327 xilinx_dma_start(chan
);
1332 /* Start the transfer */
1333 if (chan
->has_sg
&& !chan
->xdev
->mcdma
) {
1335 xilinx_write(chan
, XILINX_DMA_REG_TAILDESC
,
1336 chan
->cyclic_seg_v
->phys
);
1338 xilinx_write(chan
, XILINX_DMA_REG_TAILDESC
,
1339 tail_segment
->phys
);
1340 } else if (chan
->has_sg
&& chan
->xdev
->mcdma
) {
1341 if (chan
->direction
== DMA_MEM_TO_DEV
) {
1342 dma_ctrl_write(chan
, XILINX_DMA_REG_TAILDESC
,
1343 tail_segment
->phys
);
1346 dma_ctrl_write(chan
, XILINX_DMA_REG_TAILDESC
,
1347 tail_segment
->phys
);
1349 dma_ctrl_write(chan
,
1350 XILINX_DMA_MCRX_TDESC(chan
->tdest
),
1351 tail_segment
->phys
);
1355 struct xilinx_axidma_tx_segment
*segment
;
1356 struct xilinx_axidma_desc_hw
*hw
;
1358 segment
= list_first_entry(&head_desc
->segments
,
1359 struct xilinx_axidma_tx_segment
,
1363 xilinx_write(chan
, XILINX_DMA_REG_SRCDSTADDR
, hw
->buf_addr
);
1365 /* Start the transfer */
1366 dma_ctrl_write(chan
, XILINX_DMA_REG_BTT
,
1367 hw
->control
& chan
->xdev
->max_buffer_len
);
1370 list_splice_tail_init(&chan
->pending_list
, &chan
->active_list
);
1371 chan
->desc_pendingcount
= 0;
1376 * xilinx_dma_issue_pending - Issue pending transactions
1377 * @dchan: DMA channel
1379 static void xilinx_dma_issue_pending(struct dma_chan
*dchan
)
1381 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
1382 unsigned long flags
;
1384 spin_lock_irqsave(&chan
->lock
, flags
);
1385 chan
->start_transfer(chan
);
1386 spin_unlock_irqrestore(&chan
->lock
, flags
);
1390 * xilinx_dma_complete_descriptor - Mark the active descriptor as complete
1391 * @chan : xilinx DMA channel
1395 static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan
*chan
)
1397 struct xilinx_dma_tx_descriptor
*desc
, *next
;
1399 /* This function was invoked with lock held */
1400 if (list_empty(&chan
->active_list
))
1403 list_for_each_entry_safe(desc
, next
, &chan
->active_list
, node
) {
1404 list_del(&desc
->node
);
1406 dma_cookie_complete(&desc
->async_tx
);
1407 list_add_tail(&desc
->node
, &chan
->done_list
);
1412 * xilinx_dma_reset - Reset DMA channel
1413 * @chan: Driver specific DMA channel
1415 * Return: '0' on success and failure value on error
1417 static int xilinx_dma_reset(struct xilinx_dma_chan
*chan
)
1422 dma_ctrl_set(chan
, XILINX_DMA_REG_DMACR
, XILINX_DMA_DMACR_RESET
);
1424 /* Wait for the hardware to finish reset */
1425 err
= xilinx_dma_poll_timeout(chan
, XILINX_DMA_REG_DMACR
, tmp
,
1426 !(tmp
& XILINX_DMA_DMACR_RESET
), 0,
1427 XILINX_DMA_LOOP_COUNT
);
1430 dev_err(chan
->dev
, "reset timeout, cr %x, sr %x\n",
1431 dma_ctrl_read(chan
, XILINX_DMA_REG_DMACR
),
1432 dma_ctrl_read(chan
, XILINX_DMA_REG_DMASR
));
1438 chan
->desc_submitcount
= 0;
1444 * xilinx_dma_chan_reset - Reset DMA channel and enable interrupts
1445 * @chan: Driver specific DMA channel
1447 * Return: '0' on success and failure value on error
1449 static int xilinx_dma_chan_reset(struct xilinx_dma_chan
*chan
)
1454 err
= xilinx_dma_reset(chan
);
1458 /* Enable interrupts */
1459 dma_ctrl_set(chan
, XILINX_DMA_REG_DMACR
,
1460 XILINX_DMA_DMAXR_ALL_IRQ_MASK
);
1466 * xilinx_dma_irq_handler - DMA Interrupt handler
1468 * @data: Pointer to the Xilinx DMA channel structure
1470 * Return: IRQ_HANDLED/IRQ_NONE
1472 static irqreturn_t
xilinx_dma_irq_handler(int irq
, void *data
)
1474 struct xilinx_dma_chan
*chan
= data
;
1477 /* Read the status and ack the interrupts. */
1478 status
= dma_ctrl_read(chan
, XILINX_DMA_REG_DMASR
);
1479 if (!(status
& XILINX_DMA_DMAXR_ALL_IRQ_MASK
))
1482 dma_ctrl_write(chan
, XILINX_DMA_REG_DMASR
,
1483 status
& XILINX_DMA_DMAXR_ALL_IRQ_MASK
);
1485 if (status
& XILINX_DMA_DMASR_ERR_IRQ
) {
1487 * An error occurred. If C_FLUSH_ON_FSYNC is enabled and the
1488 * error is recoverable, ignore it. Otherwise flag the error.
1490 * Only recoverable errors can be cleared in the DMASR register,
1491 * make sure not to write to other error bits to 1.
1493 u32 errors
= status
& XILINX_DMA_DMASR_ALL_ERR_MASK
;
1495 dma_ctrl_write(chan
, XILINX_DMA_REG_DMASR
,
1496 errors
& XILINX_DMA_DMASR_ERR_RECOVER_MASK
);
1498 if (!chan
->flush_on_fsync
||
1499 (errors
& ~XILINX_DMA_DMASR_ERR_RECOVER_MASK
)) {
1501 "Channel %p has errors %x, cdr %x tdr %x\n",
1503 dma_ctrl_read(chan
, XILINX_DMA_REG_CURDESC
),
1504 dma_ctrl_read(chan
, XILINX_DMA_REG_TAILDESC
));
1509 if (status
& XILINX_DMA_DMASR_DLY_CNT_IRQ
) {
1511 * Device takes too long to do the transfer when user requires
1514 dev_dbg(chan
->dev
, "Inter-packet latency too long\n");
1517 if (status
& XILINX_DMA_DMASR_FRM_CNT_IRQ
) {
1518 spin_lock(&chan
->lock
);
1519 xilinx_dma_complete_descriptor(chan
);
1521 chan
->start_transfer(chan
);
1522 spin_unlock(&chan
->lock
);
1525 tasklet_schedule(&chan
->tasklet
);
1530 * append_desc_queue - Queuing descriptor
1531 * @chan: Driver specific dma channel
1532 * @desc: dma transaction descriptor
1534 static void append_desc_queue(struct xilinx_dma_chan
*chan
,
1535 struct xilinx_dma_tx_descriptor
*desc
)
1537 struct xilinx_vdma_tx_segment
*tail_segment
;
1538 struct xilinx_dma_tx_descriptor
*tail_desc
;
1539 struct xilinx_axidma_tx_segment
*axidma_tail_segment
;
1540 struct xilinx_cdma_tx_segment
*cdma_tail_segment
;
1542 if (list_empty(&chan
->pending_list
))
1546 * Add the hardware descriptor to the chain of hardware descriptors
1547 * that already exists in memory.
1549 tail_desc
= list_last_entry(&chan
->pending_list
,
1550 struct xilinx_dma_tx_descriptor
, node
);
1551 if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_VDMA
) {
1552 tail_segment
= list_last_entry(&tail_desc
->segments
,
1553 struct xilinx_vdma_tx_segment
,
1555 tail_segment
->hw
.next_desc
= (u32
)desc
->async_tx
.phys
;
1556 } else if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_CDMA
) {
1557 cdma_tail_segment
= list_last_entry(&tail_desc
->segments
,
1558 struct xilinx_cdma_tx_segment
,
1560 cdma_tail_segment
->hw
.next_desc
= (u32
)desc
->async_tx
.phys
;
1562 axidma_tail_segment
= list_last_entry(&tail_desc
->segments
,
1563 struct xilinx_axidma_tx_segment
,
1565 axidma_tail_segment
->hw
.next_desc
= (u32
)desc
->async_tx
.phys
;
1569 * Add the software descriptor and all children to the list
1570 * of pending transactions
1573 list_add_tail(&desc
->node
, &chan
->pending_list
);
1574 chan
->desc_pendingcount
++;
1576 if (chan
->has_sg
&& (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_VDMA
)
1577 && unlikely(chan
->desc_pendingcount
> chan
->num_frms
)) {
1578 dev_dbg(chan
->dev
, "desc pendingcount is too high\n");
1579 chan
->desc_pendingcount
= chan
->num_frms
;
1584 * xilinx_dma_tx_submit - Submit DMA transaction
1585 * @tx: Async transaction descriptor
1587 * Return: cookie value on success and failure value on error
1589 static dma_cookie_t
xilinx_dma_tx_submit(struct dma_async_tx_descriptor
*tx
)
1591 struct xilinx_dma_tx_descriptor
*desc
= to_dma_tx_descriptor(tx
);
1592 struct xilinx_dma_chan
*chan
= to_xilinx_chan(tx
->chan
);
1593 dma_cookie_t cookie
;
1594 unsigned long flags
;
1598 xilinx_dma_free_tx_descriptor(chan
, desc
);
1604 * If reset fails, need to hard reset the system.
1605 * Channel is no longer functional
1607 err
= xilinx_dma_chan_reset(chan
);
1612 spin_lock_irqsave(&chan
->lock
, flags
);
1614 cookie
= dma_cookie_assign(tx
);
1616 /* Put this transaction onto the tail of the pending queue */
1617 append_desc_queue(chan
, desc
);
1620 chan
->cyclic
= true;
1622 spin_unlock_irqrestore(&chan
->lock
, flags
);
1628 * xilinx_vdma_dma_prep_interleaved - prepare a descriptor for a
1629 * DMA_SLAVE transaction
1630 * @dchan: DMA channel
1631 * @xt: Interleaved template pointer
1632 * @flags: transfer ack flags
1634 * Return: Async transaction descriptor on success and NULL on failure
1636 static struct dma_async_tx_descriptor
*
1637 xilinx_vdma_dma_prep_interleaved(struct dma_chan
*dchan
,
1638 struct dma_interleaved_template
*xt
,
1639 unsigned long flags
)
1641 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
1642 struct xilinx_dma_tx_descriptor
*desc
;
1643 struct xilinx_vdma_tx_segment
*segment
;
1644 struct xilinx_vdma_desc_hw
*hw
;
1646 if (!is_slave_direction(xt
->dir
))
1649 if (!xt
->numf
|| !xt
->sgl
[0].size
)
1652 if (xt
->frame_size
!= 1)
1655 /* Allocate a transaction descriptor. */
1656 desc
= xilinx_dma_alloc_tx_descriptor(chan
);
1660 dma_async_tx_descriptor_init(&desc
->async_tx
, &chan
->common
);
1661 desc
->async_tx
.tx_submit
= xilinx_dma_tx_submit
;
1662 async_tx_ack(&desc
->async_tx
);
1664 /* Allocate the link descriptor from DMA pool */
1665 segment
= xilinx_vdma_alloc_tx_segment(chan
);
1669 /* Fill in the hardware descriptor */
1671 hw
->vsize
= xt
->numf
;
1672 hw
->hsize
= xt
->sgl
[0].size
;
1673 hw
->stride
= (xt
->sgl
[0].icg
+ xt
->sgl
[0].size
) <<
1674 XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT
;
1675 hw
->stride
|= chan
->config
.frm_dly
<<
1676 XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT
;
1678 if (xt
->dir
!= DMA_MEM_TO_DEV
) {
1679 if (chan
->ext_addr
) {
1680 hw
->buf_addr
= lower_32_bits(xt
->dst_start
);
1681 hw
->buf_addr_msb
= upper_32_bits(xt
->dst_start
);
1683 hw
->buf_addr
= xt
->dst_start
;
1686 if (chan
->ext_addr
) {
1687 hw
->buf_addr
= lower_32_bits(xt
->src_start
);
1688 hw
->buf_addr_msb
= upper_32_bits(xt
->src_start
);
1690 hw
->buf_addr
= xt
->src_start
;
1694 /* Insert the segment into the descriptor segments list. */
1695 list_add_tail(&segment
->node
, &desc
->segments
);
1697 /* Link the last hardware descriptor with the first. */
1698 segment
= list_first_entry(&desc
->segments
,
1699 struct xilinx_vdma_tx_segment
, node
);
1700 desc
->async_tx
.phys
= segment
->phys
;
1702 return &desc
->async_tx
;
1705 xilinx_dma_free_tx_descriptor(chan
, desc
);
1710 * xilinx_cdma_prep_memcpy - prepare descriptors for a memcpy transaction
1711 * @dchan: DMA channel
1712 * @dma_dst: destination address
1713 * @dma_src: source address
1714 * @len: transfer length
1715 * @flags: transfer ack flags
1717 * Return: Async transaction descriptor on success and NULL on failure
1719 static struct dma_async_tx_descriptor
*
1720 xilinx_cdma_prep_memcpy(struct dma_chan
*dchan
, dma_addr_t dma_dst
,
1721 dma_addr_t dma_src
, size_t len
, unsigned long flags
)
1723 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
1724 struct xilinx_dma_tx_descriptor
*desc
;
1725 struct xilinx_cdma_tx_segment
*segment
;
1726 struct xilinx_cdma_desc_hw
*hw
;
1728 if (!len
|| len
> chan
->xdev
->max_buffer_len
)
1731 desc
= xilinx_dma_alloc_tx_descriptor(chan
);
1735 dma_async_tx_descriptor_init(&desc
->async_tx
, &chan
->common
);
1736 desc
->async_tx
.tx_submit
= xilinx_dma_tx_submit
;
1738 /* Allocate the link descriptor from DMA pool */
1739 segment
= xilinx_cdma_alloc_tx_segment(chan
);
1745 hw
->src_addr
= dma_src
;
1746 hw
->dest_addr
= dma_dst
;
1747 if (chan
->ext_addr
) {
1748 hw
->src_addr_msb
= upper_32_bits(dma_src
);
1749 hw
->dest_addr_msb
= upper_32_bits(dma_dst
);
1752 /* Insert the segment into the descriptor segments list. */
1753 list_add_tail(&segment
->node
, &desc
->segments
);
1755 desc
->async_tx
.phys
= segment
->phys
;
1756 hw
->next_desc
= segment
->phys
;
1758 return &desc
->async_tx
;
1761 xilinx_dma_free_tx_descriptor(chan
, desc
);
1766 * xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
1767 * @dchan: DMA channel
1768 * @sgl: scatterlist to transfer to/from
1769 * @sg_len: number of entries in @scatterlist
1770 * @direction: DMA direction
1771 * @flags: transfer ack flags
1772 * @context: APP words of the descriptor
1774 * Return: Async transaction descriptor on success and NULL on failure
1776 static struct dma_async_tx_descriptor
*xilinx_dma_prep_slave_sg(
1777 struct dma_chan
*dchan
, struct scatterlist
*sgl
, unsigned int sg_len
,
1778 enum dma_transfer_direction direction
, unsigned long flags
,
1781 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
1782 struct xilinx_dma_tx_descriptor
*desc
;
1783 struct xilinx_axidma_tx_segment
*segment
= NULL
;
1784 u32
*app_w
= (u32
*)context
;
1785 struct scatterlist
*sg
;
1790 if (!is_slave_direction(direction
))
1793 /* Allocate a transaction descriptor. */
1794 desc
= xilinx_dma_alloc_tx_descriptor(chan
);
1798 dma_async_tx_descriptor_init(&desc
->async_tx
, &chan
->common
);
1799 desc
->async_tx
.tx_submit
= xilinx_dma_tx_submit
;
1801 /* Build transactions using information in the scatter gather list */
1802 for_each_sg(sgl
, sg
, sg_len
, i
) {
1805 /* Loop until the entire scatterlist entry is used */
1806 while (sg_used
< sg_dma_len(sg
)) {
1807 struct xilinx_axidma_desc_hw
*hw
;
1809 /* Get a free segment */
1810 segment
= xilinx_axidma_alloc_tx_segment(chan
);
1815 * Calculate the maximum number of bytes to transfer,
1816 * making sure it is less than the hw limit
1818 copy
= xilinx_dma_calc_copysize(chan
, sg_dma_len(sg
),
1822 /* Fill in the descriptor */
1823 xilinx_axidma_buf(chan
, hw
, sg_dma_address(sg
),
1828 if (chan
->direction
== DMA_MEM_TO_DEV
) {
1830 memcpy(hw
->app
, app_w
, sizeof(u32
) *
1831 XILINX_DMA_NUM_APP_WORDS
);
1837 * Insert the segment into the descriptor segments
1840 list_add_tail(&segment
->node
, &desc
->segments
);
1844 segment
= list_first_entry(&desc
->segments
,
1845 struct xilinx_axidma_tx_segment
, node
);
1846 desc
->async_tx
.phys
= segment
->phys
;
1848 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
1849 if (chan
->direction
== DMA_MEM_TO_DEV
) {
1850 segment
->hw
.control
|= XILINX_DMA_BD_SOP
;
1851 segment
= list_last_entry(&desc
->segments
,
1852 struct xilinx_axidma_tx_segment
,
1854 segment
->hw
.control
|= XILINX_DMA_BD_EOP
;
1857 return &desc
->async_tx
;
1860 xilinx_dma_free_tx_descriptor(chan
, desc
);
1865 * xilinx_dma_prep_dma_cyclic - prepare descriptors for a DMA_SLAVE transaction
1866 * @dchan: DMA channel
1867 * @buf_addr: Physical address of the buffer
1868 * @buf_len: Total length of the cyclic buffers
1869 * @period_len: length of individual cyclic buffer
1870 * @direction: DMA direction
1871 * @flags: transfer ack flags
1873 * Return: Async transaction descriptor on success and NULL on failure
1875 static struct dma_async_tx_descriptor
*xilinx_dma_prep_dma_cyclic(
1876 struct dma_chan
*dchan
, dma_addr_t buf_addr
, size_t buf_len
,
1877 size_t period_len
, enum dma_transfer_direction direction
,
1878 unsigned long flags
)
1880 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
1881 struct xilinx_dma_tx_descriptor
*desc
;
1882 struct xilinx_axidma_tx_segment
*segment
, *head_segment
, *prev
= NULL
;
1883 size_t copy
, sg_used
;
1884 unsigned int num_periods
;
1891 num_periods
= buf_len
/ period_len
;
1896 if (!is_slave_direction(direction
))
1899 /* Allocate a transaction descriptor. */
1900 desc
= xilinx_dma_alloc_tx_descriptor(chan
);
1904 chan
->direction
= direction
;
1905 dma_async_tx_descriptor_init(&desc
->async_tx
, &chan
->common
);
1906 desc
->async_tx
.tx_submit
= xilinx_dma_tx_submit
;
1908 for (i
= 0; i
< num_periods
; ++i
) {
1911 while (sg_used
< period_len
) {
1912 struct xilinx_axidma_desc_hw
*hw
;
1914 /* Get a free segment */
1915 segment
= xilinx_axidma_alloc_tx_segment(chan
);
1920 * Calculate the maximum number of bytes to transfer,
1921 * making sure it is less than the hw limit
1923 copy
= xilinx_dma_calc_copysize(chan
, period_len
,
1926 xilinx_axidma_buf(chan
, hw
, buf_addr
, sg_used
,
1931 prev
->hw
.next_desc
= segment
->phys
;
1937 * Insert the segment into the descriptor segments
1940 list_add_tail(&segment
->node
, &desc
->segments
);
1944 head_segment
= list_first_entry(&desc
->segments
,
1945 struct xilinx_axidma_tx_segment
, node
);
1946 desc
->async_tx
.phys
= head_segment
->phys
;
1948 desc
->cyclic
= true;
1949 reg
= dma_ctrl_read(chan
, XILINX_DMA_REG_DMACR
);
1950 reg
|= XILINX_DMA_CR_CYCLIC_BD_EN_MASK
;
1951 dma_ctrl_write(chan
, XILINX_DMA_REG_DMACR
, reg
);
1953 segment
= list_last_entry(&desc
->segments
,
1954 struct xilinx_axidma_tx_segment
,
1956 segment
->hw
.next_desc
= (u32
) head_segment
->phys
;
1958 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
1959 if (direction
== DMA_MEM_TO_DEV
) {
1960 head_segment
->hw
.control
|= XILINX_DMA_BD_SOP
;
1961 segment
->hw
.control
|= XILINX_DMA_BD_EOP
;
1964 return &desc
->async_tx
;
1967 xilinx_dma_free_tx_descriptor(chan
, desc
);
1972 * xilinx_dma_prep_interleaved - prepare a descriptor for a
1973 * DMA_SLAVE transaction
1974 * @dchan: DMA channel
1975 * @xt: Interleaved template pointer
1976 * @flags: transfer ack flags
1978 * Return: Async transaction descriptor on success and NULL on failure
1980 static struct dma_async_tx_descriptor
*
1981 xilinx_dma_prep_interleaved(struct dma_chan
*dchan
,
1982 struct dma_interleaved_template
*xt
,
1983 unsigned long flags
)
1985 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
1986 struct xilinx_dma_tx_descriptor
*desc
;
1987 struct xilinx_axidma_tx_segment
*segment
;
1988 struct xilinx_axidma_desc_hw
*hw
;
1990 if (!is_slave_direction(xt
->dir
))
1993 if (!xt
->numf
|| !xt
->sgl
[0].size
)
1996 if (xt
->frame_size
!= 1)
1999 /* Allocate a transaction descriptor. */
2000 desc
= xilinx_dma_alloc_tx_descriptor(chan
);
2004 chan
->direction
= xt
->dir
;
2005 dma_async_tx_descriptor_init(&desc
->async_tx
, &chan
->common
);
2006 desc
->async_tx
.tx_submit
= xilinx_dma_tx_submit
;
2008 /* Get a free segment */
2009 segment
= xilinx_axidma_alloc_tx_segment(chan
);
2015 /* Fill in the descriptor */
2016 if (xt
->dir
!= DMA_MEM_TO_DEV
)
2017 hw
->buf_addr
= xt
->dst_start
;
2019 hw
->buf_addr
= xt
->src_start
;
2021 hw
->mcdma_control
= chan
->tdest
& XILINX_DMA_BD_TDEST_MASK
;
2022 hw
->vsize_stride
= (xt
->numf
<< XILINX_DMA_BD_VSIZE_SHIFT
) &
2023 XILINX_DMA_BD_VSIZE_MASK
;
2024 hw
->vsize_stride
|= (xt
->sgl
[0].icg
+ xt
->sgl
[0].size
) &
2025 XILINX_DMA_BD_STRIDE_MASK
;
2026 hw
->control
= xt
->sgl
[0].size
& XILINX_DMA_BD_HSIZE_MASK
;
2029 * Insert the segment into the descriptor segments
2032 list_add_tail(&segment
->node
, &desc
->segments
);
2035 segment
= list_first_entry(&desc
->segments
,
2036 struct xilinx_axidma_tx_segment
, node
);
2037 desc
->async_tx
.phys
= segment
->phys
;
2039 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
2040 if (xt
->dir
== DMA_MEM_TO_DEV
) {
2041 segment
->hw
.control
|= XILINX_DMA_BD_SOP
;
2042 segment
= list_last_entry(&desc
->segments
,
2043 struct xilinx_axidma_tx_segment
,
2045 segment
->hw
.control
|= XILINX_DMA_BD_EOP
;
2048 return &desc
->async_tx
;
2051 xilinx_dma_free_tx_descriptor(chan
, desc
);
2056 * xilinx_dma_terminate_all - Halt the channel and free descriptors
2057 * @dchan: Driver specific DMA Channel pointer
2059 * Return: '0' always.
2061 static int xilinx_dma_terminate_all(struct dma_chan
*dchan
)
2063 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
2068 xilinx_dma_chan_reset(chan
);
2070 err
= chan
->stop_transfer(chan
);
2072 dev_err(chan
->dev
, "Cannot stop channel %p: %x\n",
2073 chan
, dma_ctrl_read(chan
, XILINX_DMA_REG_DMASR
));
2077 /* Remove and free all of the descriptors in the lists */
2078 xilinx_dma_free_descriptors(chan
);
2082 reg
= dma_ctrl_read(chan
, XILINX_DMA_REG_DMACR
);
2083 reg
&= ~XILINX_DMA_CR_CYCLIC_BD_EN_MASK
;
2084 dma_ctrl_write(chan
, XILINX_DMA_REG_DMACR
, reg
);
2085 chan
->cyclic
= false;
2088 if ((chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_CDMA
) && chan
->has_sg
)
2089 dma_ctrl_clr(chan
, XILINX_DMA_REG_DMACR
,
2090 XILINX_CDMA_CR_SGMODE
);
2096 * xilinx_dma_channel_set_config - Configure VDMA channel
2097 * Run-time configuration for Axi VDMA, supports:
2098 * . halt the channel
2099 * . configure interrupt coalescing and inter-packet delay threshold
2100 * . start/stop parking
2103 * @dchan: DMA channel
2104 * @cfg: VDMA device configuration pointer
2106 * Return: '0' on success and failure value on error
2108 int xilinx_vdma_channel_set_config(struct dma_chan
*dchan
,
2109 struct xilinx_vdma_config
*cfg
)
2111 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
2115 return xilinx_dma_chan_reset(chan
);
2117 dmacr
= dma_ctrl_read(chan
, XILINX_DMA_REG_DMACR
);
2119 chan
->config
.frm_dly
= cfg
->frm_dly
;
2120 chan
->config
.park
= cfg
->park
;
2122 /* genlock settings */
2123 chan
->config
.gen_lock
= cfg
->gen_lock
;
2124 chan
->config
.master
= cfg
->master
;
2126 if (cfg
->gen_lock
&& chan
->genlock
) {
2127 dmacr
|= XILINX_DMA_DMACR_GENLOCK_EN
;
2128 dmacr
|= cfg
->master
<< XILINX_DMA_DMACR_MASTER_SHIFT
;
2131 chan
->config
.frm_cnt_en
= cfg
->frm_cnt_en
;
2132 chan
->config
.vflip_en
= cfg
->vflip_en
;
2135 chan
->config
.park_frm
= cfg
->park_frm
;
2137 chan
->config
.park_frm
= -1;
2139 chan
->config
.coalesc
= cfg
->coalesc
;
2140 chan
->config
.delay
= cfg
->delay
;
2142 if (cfg
->coalesc
<= XILINX_DMA_DMACR_FRAME_COUNT_MAX
) {
2143 dmacr
|= cfg
->coalesc
<< XILINX_DMA_DMACR_FRAME_COUNT_SHIFT
;
2144 chan
->config
.coalesc
= cfg
->coalesc
;
2147 if (cfg
->delay
<= XILINX_DMA_DMACR_DELAY_MAX
) {
2148 dmacr
|= cfg
->delay
<< XILINX_DMA_DMACR_DELAY_SHIFT
;
2149 chan
->config
.delay
= cfg
->delay
;
2152 /* FSync Source selection */
2153 dmacr
&= ~XILINX_DMA_DMACR_FSYNCSRC_MASK
;
2154 dmacr
|= cfg
->ext_fsync
<< XILINX_DMA_DMACR_FSYNCSRC_SHIFT
;
2156 dma_ctrl_write(chan
, XILINX_DMA_REG_DMACR
, dmacr
);
2160 EXPORT_SYMBOL(xilinx_vdma_channel_set_config
);
2162 /* -----------------------------------------------------------------------------
2167 * xilinx_dma_chan_remove - Per Channel remove function
2168 * @chan: Driver specific DMA channel
2170 static void xilinx_dma_chan_remove(struct xilinx_dma_chan
*chan
)
2172 /* Disable all interrupts */
2173 dma_ctrl_clr(chan
, XILINX_DMA_REG_DMACR
,
2174 XILINX_DMA_DMAXR_ALL_IRQ_MASK
);
2177 free_irq(chan
->irq
, chan
);
2179 tasklet_kill(&chan
->tasklet
);
2181 list_del(&chan
->common
.device_node
);
2184 static int axidma_clk_init(struct platform_device
*pdev
, struct clk
**axi_clk
,
2185 struct clk
**tx_clk
, struct clk
**rx_clk
,
2186 struct clk
**sg_clk
, struct clk
**tmp_clk
)
2192 *axi_clk
= devm_clk_get(&pdev
->dev
, "s_axi_lite_aclk");
2193 if (IS_ERR(*axi_clk
)) {
2194 err
= PTR_ERR(*axi_clk
);
2195 dev_err(&pdev
->dev
, "failed to get axi_aclk (%d)\n", err
);
2199 *tx_clk
= devm_clk_get(&pdev
->dev
, "m_axi_mm2s_aclk");
2200 if (IS_ERR(*tx_clk
))
2203 *rx_clk
= devm_clk_get(&pdev
->dev
, "m_axi_s2mm_aclk");
2204 if (IS_ERR(*rx_clk
))
2207 *sg_clk
= devm_clk_get(&pdev
->dev
, "m_axi_sg_aclk");
2208 if (IS_ERR(*sg_clk
))
2211 err
= clk_prepare_enable(*axi_clk
);
2213 dev_err(&pdev
->dev
, "failed to enable axi_clk (%d)\n", err
);
2217 err
= clk_prepare_enable(*tx_clk
);
2219 dev_err(&pdev
->dev
, "failed to enable tx_clk (%d)\n", err
);
2220 goto err_disable_axiclk
;
2223 err
= clk_prepare_enable(*rx_clk
);
2225 dev_err(&pdev
->dev
, "failed to enable rx_clk (%d)\n", err
);
2226 goto err_disable_txclk
;
2229 err
= clk_prepare_enable(*sg_clk
);
2231 dev_err(&pdev
->dev
, "failed to enable sg_clk (%d)\n", err
);
2232 goto err_disable_rxclk
;
2238 clk_disable_unprepare(*rx_clk
);
2240 clk_disable_unprepare(*tx_clk
);
2242 clk_disable_unprepare(*axi_clk
);
2247 static int axicdma_clk_init(struct platform_device
*pdev
, struct clk
**axi_clk
,
2248 struct clk
**dev_clk
, struct clk
**tmp_clk
,
2249 struct clk
**tmp1_clk
, struct clk
**tmp2_clk
)
2257 *axi_clk
= devm_clk_get(&pdev
->dev
, "s_axi_lite_aclk");
2258 if (IS_ERR(*axi_clk
)) {
2259 err
= PTR_ERR(*axi_clk
);
2260 dev_err(&pdev
->dev
, "failed to get axi_clk (%d)\n", err
);
2264 *dev_clk
= devm_clk_get(&pdev
->dev
, "m_axi_aclk");
2265 if (IS_ERR(*dev_clk
)) {
2266 err
= PTR_ERR(*dev_clk
);
2267 dev_err(&pdev
->dev
, "failed to get dev_clk (%d)\n", err
);
2271 err
= clk_prepare_enable(*axi_clk
);
2273 dev_err(&pdev
->dev
, "failed to enable axi_clk (%d)\n", err
);
2277 err
= clk_prepare_enable(*dev_clk
);
2279 dev_err(&pdev
->dev
, "failed to enable dev_clk (%d)\n", err
);
2280 goto err_disable_axiclk
;
2286 clk_disable_unprepare(*axi_clk
);
2291 static int axivdma_clk_init(struct platform_device
*pdev
, struct clk
**axi_clk
,
2292 struct clk
**tx_clk
, struct clk
**txs_clk
,
2293 struct clk
**rx_clk
, struct clk
**rxs_clk
)
2297 *axi_clk
= devm_clk_get(&pdev
->dev
, "s_axi_lite_aclk");
2298 if (IS_ERR(*axi_clk
)) {
2299 err
= PTR_ERR(*axi_clk
);
2300 dev_err(&pdev
->dev
, "failed to get axi_aclk (%d)\n", err
);
2304 *tx_clk
= devm_clk_get(&pdev
->dev
, "m_axi_mm2s_aclk");
2305 if (IS_ERR(*tx_clk
))
2308 *txs_clk
= devm_clk_get(&pdev
->dev
, "m_axis_mm2s_aclk");
2309 if (IS_ERR(*txs_clk
))
2312 *rx_clk
= devm_clk_get(&pdev
->dev
, "m_axi_s2mm_aclk");
2313 if (IS_ERR(*rx_clk
))
2316 *rxs_clk
= devm_clk_get(&pdev
->dev
, "s_axis_s2mm_aclk");
2317 if (IS_ERR(*rxs_clk
))
2320 err
= clk_prepare_enable(*axi_clk
);
2322 dev_err(&pdev
->dev
, "failed to enable axi_clk (%d)\n", err
);
2326 err
= clk_prepare_enable(*tx_clk
);
2328 dev_err(&pdev
->dev
, "failed to enable tx_clk (%d)\n", err
);
2329 goto err_disable_axiclk
;
2332 err
= clk_prepare_enable(*txs_clk
);
2334 dev_err(&pdev
->dev
, "failed to enable txs_clk (%d)\n", err
);
2335 goto err_disable_txclk
;
2338 err
= clk_prepare_enable(*rx_clk
);
2340 dev_err(&pdev
->dev
, "failed to enable rx_clk (%d)\n", err
);
2341 goto err_disable_txsclk
;
2344 err
= clk_prepare_enable(*rxs_clk
);
2346 dev_err(&pdev
->dev
, "failed to enable rxs_clk (%d)\n", err
);
2347 goto err_disable_rxclk
;
2353 clk_disable_unprepare(*rx_clk
);
2355 clk_disable_unprepare(*txs_clk
);
2357 clk_disable_unprepare(*tx_clk
);
2359 clk_disable_unprepare(*axi_clk
);
2364 static void xdma_disable_allclks(struct xilinx_dma_device
*xdev
)
2366 clk_disable_unprepare(xdev
->rxs_clk
);
2367 clk_disable_unprepare(xdev
->rx_clk
);
2368 clk_disable_unprepare(xdev
->txs_clk
);
2369 clk_disable_unprepare(xdev
->tx_clk
);
2370 clk_disable_unprepare(xdev
->axi_clk
);
2374 * xilinx_dma_chan_probe - Per Channel Probing
2375 * It get channel features from the device tree entry and
2376 * initialize special channel handling routines
2378 * @xdev: Driver specific device structure
2379 * @node: Device node
2380 * @chan_id: DMA Channel id
2382 * Return: '0' on success and failure value on error
2384 static int xilinx_dma_chan_probe(struct xilinx_dma_device
*xdev
,
2385 struct device_node
*node
, int chan_id
)
2387 struct xilinx_dma_chan
*chan
;
2388 bool has_dre
= false;
2392 /* Allocate and initialize the channel structure */
2393 chan
= devm_kzalloc(xdev
->dev
, sizeof(*chan
), GFP_KERNEL
);
2397 chan
->dev
= xdev
->dev
;
2399 chan
->desc_pendingcount
= 0x0;
2400 chan
->ext_addr
= xdev
->ext_addr
;
2401 /* This variable ensures that descriptors are not
2402 * Submitted when dma engine is in progress. This variable is
2403 * Added to avoid polling for a bit in the status register to
2404 * Know dma state in the driver hot path.
2408 spin_lock_init(&chan
->lock
);
2409 INIT_LIST_HEAD(&chan
->pending_list
);
2410 INIT_LIST_HEAD(&chan
->done_list
);
2411 INIT_LIST_HEAD(&chan
->active_list
);
2412 INIT_LIST_HEAD(&chan
->free_seg_list
);
2414 /* Retrieve the channel properties from the device tree */
2415 has_dre
= of_property_read_bool(node
, "xlnx,include-dre");
2417 chan
->genlock
= of_property_read_bool(node
, "xlnx,genlock-mode");
2419 err
= of_property_read_u32(node
, "xlnx,datawidth", &value
);
2421 dev_err(xdev
->dev
, "missing xlnx,datawidth property\n");
2424 width
= value
>> 3; /* Convert bits to bytes */
2426 /* If data width is greater than 8 bytes, DRE is not in hw */
2431 xdev
->common
.copy_align
= fls(width
- 1);
2433 if (of_device_is_compatible(node
, "xlnx,axi-vdma-mm2s-channel") ||
2434 of_device_is_compatible(node
, "xlnx,axi-dma-mm2s-channel") ||
2435 of_device_is_compatible(node
, "xlnx,axi-cdma-channel")) {
2436 chan
->direction
= DMA_MEM_TO_DEV
;
2438 chan
->tdest
= chan_id
;
2440 chan
->ctrl_offset
= XILINX_DMA_MM2S_CTRL_OFFSET
;
2441 if (xdev
->dma_config
->dmatype
== XDMA_TYPE_VDMA
) {
2442 chan
->desc_offset
= XILINX_VDMA_MM2S_DESC_OFFSET
;
2443 chan
->config
.park
= 1;
2445 if (xdev
->flush_on_fsync
== XILINX_DMA_FLUSH_BOTH
||
2446 xdev
->flush_on_fsync
== XILINX_DMA_FLUSH_MM2S
)
2447 chan
->flush_on_fsync
= true;
2449 } else if (of_device_is_compatible(node
,
2450 "xlnx,axi-vdma-s2mm-channel") ||
2451 of_device_is_compatible(node
,
2452 "xlnx,axi-dma-s2mm-channel")) {
2453 chan
->direction
= DMA_DEV_TO_MEM
;
2455 chan
->tdest
= chan_id
- xdev
->nr_channels
;
2456 chan
->has_vflip
= of_property_read_bool(node
,
2457 "xlnx,enable-vert-flip");
2458 if (chan
->has_vflip
) {
2459 chan
->config
.vflip_en
= dma_read(chan
,
2460 XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP
) &
2461 XILINX_VDMA_ENABLE_VERTICAL_FLIP
;
2464 chan
->ctrl_offset
= XILINX_DMA_S2MM_CTRL_OFFSET
;
2465 if (xdev
->dma_config
->dmatype
== XDMA_TYPE_VDMA
) {
2466 chan
->desc_offset
= XILINX_VDMA_S2MM_DESC_OFFSET
;
2467 chan
->config
.park
= 1;
2469 if (xdev
->flush_on_fsync
== XILINX_DMA_FLUSH_BOTH
||
2470 xdev
->flush_on_fsync
== XILINX_DMA_FLUSH_S2MM
)
2471 chan
->flush_on_fsync
= true;
2474 dev_err(xdev
->dev
, "Invalid channel compatible node\n");
2478 /* Request the interrupt */
2479 chan
->irq
= irq_of_parse_and_map(node
, 0);
2480 err
= request_irq(chan
->irq
, xilinx_dma_irq_handler
, IRQF_SHARED
,
2481 "xilinx-dma-controller", chan
);
2483 dev_err(xdev
->dev
, "unable to request IRQ %d\n", chan
->irq
);
2487 if (xdev
->dma_config
->dmatype
== XDMA_TYPE_AXIDMA
) {
2488 chan
->start_transfer
= xilinx_dma_start_transfer
;
2489 chan
->stop_transfer
= xilinx_dma_stop_transfer
;
2490 } else if (xdev
->dma_config
->dmatype
== XDMA_TYPE_CDMA
) {
2491 chan
->start_transfer
= xilinx_cdma_start_transfer
;
2492 chan
->stop_transfer
= xilinx_cdma_stop_transfer
;
2494 chan
->start_transfer
= xilinx_vdma_start_transfer
;
2495 chan
->stop_transfer
= xilinx_dma_stop_transfer
;
2498 /* check if SG is enabled (only for AXIDMA and CDMA) */
2499 if (xdev
->dma_config
->dmatype
!= XDMA_TYPE_VDMA
) {
2500 if (dma_ctrl_read(chan
, XILINX_DMA_REG_DMASR
) &
2501 XILINX_DMA_DMASR_SG_MASK
)
2502 chan
->has_sg
= true;
2503 dev_dbg(chan
->dev
, "ch %d: SG %s\n", chan
->id
,
2504 chan
->has_sg
? "enabled" : "disabled");
2507 /* Initialize the tasklet */
2508 tasklet_init(&chan
->tasklet
, xilinx_dma_do_tasklet
,
2509 (unsigned long)chan
);
2512 * Initialize the DMA channel and add it to the DMA engine channels
2515 chan
->common
.device
= &xdev
->common
;
2517 list_add_tail(&chan
->common
.device_node
, &xdev
->common
.channels
);
2518 xdev
->chan
[chan
->id
] = chan
;
2520 /* Reset the channel */
2521 err
= xilinx_dma_chan_reset(chan
);
2523 dev_err(xdev
->dev
, "Reset channel failed\n");
2531 * xilinx_dma_child_probe - Per child node probe
2532 * It get number of dma-channels per child node from
2533 * device-tree and initializes all the channels.
2535 * @xdev: Driver specific device structure
2536 * @node: Device node
2540 static int xilinx_dma_child_probe(struct xilinx_dma_device
*xdev
,
2541 struct device_node
*node
)
2543 int ret
, i
, nr_channels
= 1;
2545 ret
= of_property_read_u32(node
, "dma-channels", &nr_channels
);
2546 if ((ret
< 0) && xdev
->mcdma
)
2547 dev_warn(xdev
->dev
, "missing dma-channels property\n");
2549 for (i
= 0; i
< nr_channels
; i
++)
2550 xilinx_dma_chan_probe(xdev
, node
, xdev
->chan_id
++);
2552 xdev
->nr_channels
+= nr_channels
;
2558 * of_dma_xilinx_xlate - Translation function
2559 * @dma_spec: Pointer to DMA specifier as found in the device tree
2560 * @ofdma: Pointer to DMA controller data
2562 * Return: DMA channel pointer on success and NULL on error
2564 static struct dma_chan
*of_dma_xilinx_xlate(struct of_phandle_args
*dma_spec
,
2565 struct of_dma
*ofdma
)
2567 struct xilinx_dma_device
*xdev
= ofdma
->of_dma_data
;
2568 int chan_id
= dma_spec
->args
[0];
2570 if (chan_id
>= xdev
->nr_channels
|| !xdev
->chan
[chan_id
])
2573 return dma_get_slave_channel(&xdev
->chan
[chan_id
]->common
);
2576 static const struct xilinx_dma_config axidma_config
= {
2577 .dmatype
= XDMA_TYPE_AXIDMA
,
2578 .clk_init
= axidma_clk_init
,
2581 static const struct xilinx_dma_config axicdma_config
= {
2582 .dmatype
= XDMA_TYPE_CDMA
,
2583 .clk_init
= axicdma_clk_init
,
2586 static const struct xilinx_dma_config axivdma_config
= {
2587 .dmatype
= XDMA_TYPE_VDMA
,
2588 .clk_init
= axivdma_clk_init
,
2591 static const struct of_device_id xilinx_dma_of_ids
[] = {
2592 { .compatible
= "xlnx,axi-dma-1.00.a", .data
= &axidma_config
},
2593 { .compatible
= "xlnx,axi-cdma-1.00.a", .data
= &axicdma_config
},
2594 { .compatible
= "xlnx,axi-vdma-1.00.a", .data
= &axivdma_config
},
2597 MODULE_DEVICE_TABLE(of
, xilinx_dma_of_ids
);
2600 * xilinx_dma_probe - Driver probe function
2601 * @pdev: Pointer to the platform_device structure
2603 * Return: '0' on success and failure value on error
2605 static int xilinx_dma_probe(struct platform_device
*pdev
)
2607 int (*clk_init
)(struct platform_device
*, struct clk
**, struct clk
**,
2608 struct clk
**, struct clk
**, struct clk
**)
2610 struct device_node
*node
= pdev
->dev
.of_node
;
2611 struct xilinx_dma_device
*xdev
;
2612 struct device_node
*child
, *np
= pdev
->dev
.of_node
;
2613 struct resource
*io
;
2614 u32 num_frames
, addr_width
, len_width
;
2617 /* Allocate and initialize the DMA engine structure */
2618 xdev
= devm_kzalloc(&pdev
->dev
, sizeof(*xdev
), GFP_KERNEL
);
2622 xdev
->dev
= &pdev
->dev
;
2624 const struct of_device_id
*match
;
2626 match
= of_match_node(xilinx_dma_of_ids
, np
);
2627 if (match
&& match
->data
) {
2628 xdev
->dma_config
= match
->data
;
2629 clk_init
= xdev
->dma_config
->clk_init
;
2633 err
= clk_init(pdev
, &xdev
->axi_clk
, &xdev
->tx_clk
, &xdev
->txs_clk
,
2634 &xdev
->rx_clk
, &xdev
->rxs_clk
);
2638 /* Request and map I/O memory */
2639 io
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
2640 xdev
->regs
= devm_ioremap_resource(&pdev
->dev
, io
);
2641 if (IS_ERR(xdev
->regs
))
2642 return PTR_ERR(xdev
->regs
);
2644 /* Retrieve the DMA engine properties from the device tree */
2645 xdev
->max_buffer_len
= GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX
- 1, 0);
2647 if (xdev
->dma_config
->dmatype
== XDMA_TYPE_AXIDMA
) {
2648 xdev
->mcdma
= of_property_read_bool(node
, "xlnx,mcdma");
2649 if (!of_property_read_u32(node
, "xlnx,sg-length-width",
2651 if (len_width
< XILINX_DMA_MAX_TRANS_LEN_MIN
||
2652 len_width
> XILINX_DMA_V2_MAX_TRANS_LEN_MAX
) {
2654 "invalid xlnx,sg-length-width property value. Using default width\n");
2656 if (len_width
> XILINX_DMA_MAX_TRANS_LEN_MAX
)
2657 dev_warn(xdev
->dev
, "Please ensure that IP supports buffer length > 23 bits\n");
2658 xdev
->max_buffer_len
=
2659 GENMASK(len_width
- 1, 0);
2664 if (xdev
->dma_config
->dmatype
== XDMA_TYPE_VDMA
) {
2665 err
= of_property_read_u32(node
, "xlnx,num-fstores",
2669 "missing xlnx,num-fstores property\n");
2673 err
= of_property_read_u32(node
, "xlnx,flush-fsync",
2674 &xdev
->flush_on_fsync
);
2677 "missing xlnx,flush-fsync property\n");
2680 err
= of_property_read_u32(node
, "xlnx,addrwidth", &addr_width
);
2682 dev_warn(xdev
->dev
, "missing xlnx,addrwidth property\n");
2684 if (addr_width
> 32)
2685 xdev
->ext_addr
= true;
2687 xdev
->ext_addr
= false;
2689 /* Set the dma mask bits */
2690 dma_set_mask(xdev
->dev
, DMA_BIT_MASK(addr_width
));
2692 /* Initialize the DMA engine */
2693 xdev
->common
.dev
= &pdev
->dev
;
2695 INIT_LIST_HEAD(&xdev
->common
.channels
);
2696 if (!(xdev
->dma_config
->dmatype
== XDMA_TYPE_CDMA
)) {
2697 dma_cap_set(DMA_SLAVE
, xdev
->common
.cap_mask
);
2698 dma_cap_set(DMA_PRIVATE
, xdev
->common
.cap_mask
);
2701 xdev
->common
.device_alloc_chan_resources
=
2702 xilinx_dma_alloc_chan_resources
;
2703 xdev
->common
.device_free_chan_resources
=
2704 xilinx_dma_free_chan_resources
;
2705 xdev
->common
.device_terminate_all
= xilinx_dma_terminate_all
;
2706 xdev
->common
.device_tx_status
= xilinx_dma_tx_status
;
2707 xdev
->common
.device_issue_pending
= xilinx_dma_issue_pending
;
2708 if (xdev
->dma_config
->dmatype
== XDMA_TYPE_AXIDMA
) {
2709 dma_cap_set(DMA_CYCLIC
, xdev
->common
.cap_mask
);
2710 xdev
->common
.device_prep_slave_sg
= xilinx_dma_prep_slave_sg
;
2711 xdev
->common
.device_prep_dma_cyclic
=
2712 xilinx_dma_prep_dma_cyclic
;
2713 xdev
->common
.device_prep_interleaved_dma
=
2714 xilinx_dma_prep_interleaved
;
2715 /* Residue calculation is supported by only AXI DMA */
2716 xdev
->common
.residue_granularity
=
2717 DMA_RESIDUE_GRANULARITY_SEGMENT
;
2718 } else if (xdev
->dma_config
->dmatype
== XDMA_TYPE_CDMA
) {
2719 dma_cap_set(DMA_MEMCPY
, xdev
->common
.cap_mask
);
2720 xdev
->common
.device_prep_dma_memcpy
= xilinx_cdma_prep_memcpy
;
2722 xdev
->common
.device_prep_interleaved_dma
=
2723 xilinx_vdma_dma_prep_interleaved
;
2726 platform_set_drvdata(pdev
, xdev
);
2728 /* Initialize the channels */
2729 for_each_child_of_node(node
, child
) {
2730 err
= xilinx_dma_child_probe(xdev
, child
);
2735 if (xdev
->dma_config
->dmatype
== XDMA_TYPE_VDMA
) {
2736 for (i
= 0; i
< xdev
->nr_channels
; i
++)
2738 xdev
->chan
[i
]->num_frms
= num_frames
;
2741 /* Register the DMA engine with the core */
2742 dma_async_device_register(&xdev
->common
);
2744 err
= of_dma_controller_register(node
, of_dma_xilinx_xlate
,
2747 dev_err(&pdev
->dev
, "Unable to register DMA to DT\n");
2748 dma_async_device_unregister(&xdev
->common
);
2752 if (xdev
->dma_config
->dmatype
== XDMA_TYPE_AXIDMA
)
2753 dev_info(&pdev
->dev
, "Xilinx AXI DMA Engine Driver Probed!!\n");
2754 else if (xdev
->dma_config
->dmatype
== XDMA_TYPE_CDMA
)
2755 dev_info(&pdev
->dev
, "Xilinx AXI CDMA Engine Driver Probed!!\n");
2757 dev_info(&pdev
->dev
, "Xilinx AXI VDMA Engine Driver Probed!!\n");
2762 xdma_disable_allclks(xdev
);
2764 for (i
= 0; i
< xdev
->nr_channels
; i
++)
2766 xilinx_dma_chan_remove(xdev
->chan
[i
]);
2772 * xilinx_dma_remove - Driver remove function
2773 * @pdev: Pointer to the platform_device structure
2775 * Return: Always '0'
2777 static int xilinx_dma_remove(struct platform_device
*pdev
)
2779 struct xilinx_dma_device
*xdev
= platform_get_drvdata(pdev
);
2782 of_dma_controller_free(pdev
->dev
.of_node
);
2784 dma_async_device_unregister(&xdev
->common
);
2786 for (i
= 0; i
< xdev
->nr_channels
; i
++)
2788 xilinx_dma_chan_remove(xdev
->chan
[i
]);
2790 xdma_disable_allclks(xdev
);
2795 static struct platform_driver xilinx_vdma_driver
= {
2797 .name
= "xilinx-vdma",
2798 .of_match_table
= xilinx_dma_of_ids
,
2800 .probe
= xilinx_dma_probe
,
2801 .remove
= xilinx_dma_remove
,
2804 module_platform_driver(xilinx_vdma_driver
);
2806 MODULE_AUTHOR("Xilinx, Inc.");
2807 MODULE_DESCRIPTION("Xilinx VDMA driver");
2808 MODULE_LICENSE("GPL v2");