2 * DMA driver for Xilinx Video DMA Engine
4 * Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved.
6 * Based on the Freescale DMA driver.
9 * The AXI Video Direct Memory Access (AXI VDMA) core is a soft Xilinx IP
10 * core that provides high-bandwidth direct memory access between memory
11 * and AXI4-Stream type video target peripherals. The core provides efficient
12 * two dimensional DMA operations with independent asynchronous read (S2MM)
13 * and write (MM2S) channel operation. It can be configured to have either
14 * one channel or two channels. If configured as two channels, one is to
15 * transmit to the video device (MM2S) and another is to receive from the
16 * video device (S2MM). Initialization, status, interrupt and management
17 * registers are accessed through an AXI4-Lite slave interface.
19 * The AXI Direct Memory Access (AXI DMA) core is a soft Xilinx IP core that
20 * provides high-bandwidth one dimensional direct memory access between memory
21 * and AXI4-Stream target peripherals. It supports one receive and one
22 * transmit channel, both of them optional at synthesis time.
24 * The AXI CDMA, is a soft IP, which provides high-bandwidth Direct Memory
25 * Access (DMA) between a memory-mapped source address and a memory-mapped
26 * destination address.
28 * This program is free software: you can redistribute it and/or modify
29 * it under the terms of the GNU General Public License as published by
30 * the Free Software Foundation, either version 2 of the License, or
31 * (at your option) any later version.
34 #include <linux/bitops.h>
35 #include <linux/dmapool.h>
36 #include <linux/dma/xilinx_dma.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
40 #include <linux/iopoll.h>
41 #include <linux/module.h>
42 #include <linux/of_address.h>
43 #include <linux/of_dma.h>
44 #include <linux/of_platform.h>
45 #include <linux/of_irq.h>
46 #include <linux/slab.h>
47 #include <linux/clk.h>
48 #include <linux/io-64-nonatomic-lo-hi.h>
50 #include "../dmaengine.h"
52 /* Register/Descriptor Offsets */
53 #define XILINX_DMA_MM2S_CTRL_OFFSET 0x0000
54 #define XILINX_DMA_S2MM_CTRL_OFFSET 0x0030
55 #define XILINX_VDMA_MM2S_DESC_OFFSET 0x0050
56 #define XILINX_VDMA_S2MM_DESC_OFFSET 0x00a0
58 /* Control Registers */
59 #define XILINX_DMA_REG_DMACR 0x0000
60 #define XILINX_DMA_DMACR_DELAY_MAX 0xff
61 #define XILINX_DMA_DMACR_DELAY_SHIFT 24
62 #define XILINX_DMA_DMACR_FRAME_COUNT_MAX 0xff
63 #define XILINX_DMA_DMACR_FRAME_COUNT_SHIFT 16
64 #define XILINX_DMA_DMACR_ERR_IRQ BIT(14)
65 #define XILINX_DMA_DMACR_DLY_CNT_IRQ BIT(13)
66 #define XILINX_DMA_DMACR_FRM_CNT_IRQ BIT(12)
67 #define XILINX_DMA_DMACR_MASTER_SHIFT 8
68 #define XILINX_DMA_DMACR_FSYNCSRC_SHIFT 5
69 #define XILINX_DMA_DMACR_FRAMECNT_EN BIT(4)
70 #define XILINX_DMA_DMACR_GENLOCK_EN BIT(3)
71 #define XILINX_DMA_DMACR_RESET BIT(2)
72 #define XILINX_DMA_DMACR_CIRC_EN BIT(1)
73 #define XILINX_DMA_DMACR_RUNSTOP BIT(0)
74 #define XILINX_DMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5)
75 #define XILINX_DMA_DMACR_DELAY_MASK GENMASK(31, 24)
76 #define XILINX_DMA_DMACR_FRAME_COUNT_MASK GENMASK(23, 16)
77 #define XILINX_DMA_DMACR_MASTER_MASK GENMASK(11, 8)
79 #define XILINX_DMA_REG_DMASR 0x0004
80 #define XILINX_DMA_DMASR_EOL_LATE_ERR BIT(15)
81 #define XILINX_DMA_DMASR_ERR_IRQ BIT(14)
82 #define XILINX_DMA_DMASR_DLY_CNT_IRQ BIT(13)
83 #define XILINX_DMA_DMASR_FRM_CNT_IRQ BIT(12)
84 #define XILINX_DMA_DMASR_SOF_LATE_ERR BIT(11)
85 #define XILINX_DMA_DMASR_SG_DEC_ERR BIT(10)
86 #define XILINX_DMA_DMASR_SG_SLV_ERR BIT(9)
87 #define XILINX_DMA_DMASR_EOF_EARLY_ERR BIT(8)
88 #define XILINX_DMA_DMASR_SOF_EARLY_ERR BIT(7)
89 #define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6)
90 #define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5)
91 #define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4)
92 #define XILINX_DMA_DMASR_IDLE BIT(1)
93 #define XILINX_DMA_DMASR_HALTED BIT(0)
94 #define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24)
95 #define XILINX_DMA_DMASR_FRAME_COUNT_MASK GENMASK(23, 16)
97 #define XILINX_DMA_REG_CURDESC 0x0008
98 #define XILINX_DMA_REG_TAILDESC 0x0010
99 #define XILINX_DMA_REG_REG_INDEX 0x0014
100 #define XILINX_DMA_REG_FRMSTORE 0x0018
101 #define XILINX_DMA_REG_THRESHOLD 0x001c
102 #define XILINX_DMA_REG_FRMPTR_STS 0x0024
103 #define XILINX_DMA_REG_PARK_PTR 0x0028
104 #define XILINX_DMA_PARK_PTR_WR_REF_SHIFT 8
105 #define XILINX_DMA_PARK_PTR_WR_REF_MASK GENMASK(12, 8)
106 #define XILINX_DMA_PARK_PTR_RD_REF_SHIFT 0
107 #define XILINX_DMA_PARK_PTR_RD_REF_MASK GENMASK(4, 0)
108 #define XILINX_DMA_REG_VDMA_VERSION 0x002c
110 /* Register Direct Mode Registers */
111 #define XILINX_DMA_REG_VSIZE 0x0000
112 #define XILINX_DMA_REG_HSIZE 0x0004
114 #define XILINX_DMA_REG_FRMDLY_STRIDE 0x0008
115 #define XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24
116 #define XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT 0
118 #define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n))
119 #define XILINX_VDMA_REG_START_ADDRESS_64(n) (0x000c + 8 * (n))
121 #define XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP 0x00ec
122 #define XILINX_VDMA_ENABLE_VERTICAL_FLIP BIT(0)
124 /* HW specific definitions */
125 #define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x20
127 #define XILINX_DMA_DMAXR_ALL_IRQ_MASK \
128 (XILINX_DMA_DMASR_FRM_CNT_IRQ | \
129 XILINX_DMA_DMASR_DLY_CNT_IRQ | \
130 XILINX_DMA_DMASR_ERR_IRQ)
132 #define XILINX_DMA_DMASR_ALL_ERR_MASK \
133 (XILINX_DMA_DMASR_EOL_LATE_ERR | \
134 XILINX_DMA_DMASR_SOF_LATE_ERR | \
135 XILINX_DMA_DMASR_SG_DEC_ERR | \
136 XILINX_DMA_DMASR_SG_SLV_ERR | \
137 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
138 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
139 XILINX_DMA_DMASR_DMA_DEC_ERR | \
140 XILINX_DMA_DMASR_DMA_SLAVE_ERR | \
141 XILINX_DMA_DMASR_DMA_INT_ERR)
144 * Recoverable errors are DMA Internal error, SOF Early, EOF Early
145 * and SOF Late. They are only recoverable when C_FLUSH_ON_FSYNC
146 * is enabled in the h/w system.
148 #define XILINX_DMA_DMASR_ERR_RECOVER_MASK \
149 (XILINX_DMA_DMASR_SOF_LATE_ERR | \
150 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
151 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
152 XILINX_DMA_DMASR_DMA_INT_ERR)
154 /* Axi VDMA Flush on Fsync bits */
155 #define XILINX_DMA_FLUSH_S2MM 3
156 #define XILINX_DMA_FLUSH_MM2S 2
157 #define XILINX_DMA_FLUSH_BOTH 1
159 /* Delay loop counter to prevent hardware failure */
160 #define XILINX_DMA_LOOP_COUNT 1000000
162 /* AXI DMA Specific Registers/Offsets */
163 #define XILINX_DMA_REG_SRCDSTADDR 0x18
164 #define XILINX_DMA_REG_BTT 0x28
166 /* AXI DMA Specific Masks/Bit fields */
167 #define XILINX_DMA_MAX_TRANS_LEN GENMASK(22, 0)
168 #define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16)
169 #define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4)
170 #define XILINX_DMA_CR_COALESCE_SHIFT 16
171 #define XILINX_DMA_BD_SOP BIT(27)
172 #define XILINX_DMA_BD_EOP BIT(26)
173 #define XILINX_DMA_COALESCE_MAX 255
174 #define XILINX_DMA_NUM_DESCS 255
175 #define XILINX_DMA_NUM_APP_WORDS 5
177 /* Multi-Channel DMA Descriptor offsets*/
178 #define XILINX_DMA_MCRX_CDESC(x) (0x40 + (x-1) * 0x20)
179 #define XILINX_DMA_MCRX_TDESC(x) (0x48 + (x-1) * 0x20)
181 /* Multi-Channel DMA Masks/Shifts */
182 #define XILINX_DMA_BD_HSIZE_MASK GENMASK(15, 0)
183 #define XILINX_DMA_BD_STRIDE_MASK GENMASK(15, 0)
184 #define XILINX_DMA_BD_VSIZE_MASK GENMASK(31, 19)
185 #define XILINX_DMA_BD_TDEST_MASK GENMASK(4, 0)
186 #define XILINX_DMA_BD_STRIDE_SHIFT 0
187 #define XILINX_DMA_BD_VSIZE_SHIFT 19
189 /* AXI CDMA Specific Registers/Offsets */
190 #define XILINX_CDMA_REG_SRCADDR 0x18
191 #define XILINX_CDMA_REG_DSTADDR 0x20
193 /* AXI CDMA Specific Masks */
194 #define XILINX_CDMA_CR_SGMODE BIT(3)
197 * struct xilinx_vdma_desc_hw - Hardware Descriptor
198 * @next_desc: Next Descriptor Pointer @0x00
199 * @pad1: Reserved @0x04
200 * @buf_addr: Buffer address @0x08
201 * @buf_addr_msb: MSB of Buffer address @0x0C
202 * @vsize: Vertical Size @0x10
203 * @hsize: Horizontal Size @0x14
204 * @stride: Number of bytes between the first
205 * pixels of each horizontal line @0x18
207 struct xilinx_vdma_desc_hw
{
218 * struct xilinx_axidma_desc_hw - Hardware Descriptor for AXI DMA
219 * @next_desc: Next Descriptor Pointer @0x00
220 * @next_desc_msb: MSB of Next Descriptor Pointer @0x04
221 * @buf_addr: Buffer address @0x08
222 * @buf_addr_msb: MSB of Buffer address @0x0C
223 * @mcdma_control: Control field for mcdma @0x10
224 * @vsize_stride: Vsize and Stride field for mcdma @0x14
225 * @control: Control field @0x18
226 * @status: Status field @0x1C
227 * @app: APP Fields @0x20 - 0x30
229 struct xilinx_axidma_desc_hw
{
238 u32 app
[XILINX_DMA_NUM_APP_WORDS
];
242 * struct xilinx_cdma_desc_hw - Hardware Descriptor
243 * @next_desc: Next Descriptor Pointer @0x00
244 * @next_desc_msb: Next Descriptor Pointer MSB @0x04
245 * @src_addr: Source address @0x08
246 * @src_addr_msb: Source address MSB @0x0C
247 * @dest_addr: Destination address @0x10
248 * @dest_addr_msb: Destination address MSB @0x14
249 * @control: Control field @0x18
250 * @status: Status field @0x1C
252 struct xilinx_cdma_desc_hw
{
264 * struct xilinx_vdma_tx_segment - Descriptor segment
265 * @hw: Hardware descriptor
266 * @node: Node in the descriptor segments list
267 * @phys: Physical address of segment
269 struct xilinx_vdma_tx_segment
{
270 struct xilinx_vdma_desc_hw hw
;
271 struct list_head node
;
276 * struct xilinx_axidma_tx_segment - Descriptor segment
277 * @hw: Hardware descriptor
278 * @node: Node in the descriptor segments list
279 * @phys: Physical address of segment
281 struct xilinx_axidma_tx_segment
{
282 struct xilinx_axidma_desc_hw hw
;
283 struct list_head node
;
288 * struct xilinx_cdma_tx_segment - Descriptor segment
289 * @hw: Hardware descriptor
290 * @node: Node in the descriptor segments list
291 * @phys: Physical address of segment
293 struct xilinx_cdma_tx_segment
{
294 struct xilinx_cdma_desc_hw hw
;
295 struct list_head node
;
300 * struct xilinx_dma_tx_descriptor - Per Transaction structure
301 * @async_tx: Async transaction descriptor
302 * @segments: TX segments list
303 * @node: Node in the channel descriptors list
304 * @cyclic: Check for cyclic transfers.
306 struct xilinx_dma_tx_descriptor
{
307 struct dma_async_tx_descriptor async_tx
;
308 struct list_head segments
;
309 struct list_head node
;
314 * struct xilinx_dma_chan - Driver specific DMA channel structure
315 * @xdev: Driver specific device structure
316 * @ctrl_offset: Control registers offset
317 * @desc_offset: TX descriptor registers offset
318 * @lock: Descriptor operation lock
319 * @pending_list: Descriptors waiting
320 * @active_list: Descriptors ready to submit
321 * @done_list: Complete descriptors
322 * @free_seg_list: Free descriptors
323 * @common: DMA common channel
324 * @desc_pool: Descriptors pool
325 * @dev: The dma device
328 * @direction: Transfer direction
329 * @num_frms: Number of frames
330 * @has_sg: Support scatter transfers
331 * @cyclic: Check for cyclic transfers.
332 * @genlock: Support genlock mode
333 * @err: Channel has errors
334 * @idle: Check for channel idle
335 * @tasklet: Cleanup work after irq
336 * @config: Device configuration info
337 * @flush_on_fsync: Flush on Frame sync
338 * @desc_pendingcount: Descriptor pending count
339 * @ext_addr: Indicates 64 bit addressing is supported by dma channel
340 * @desc_submitcount: Descriptor h/w submitted count
341 * @residue: Residue for AXI DMA
342 * @seg_v: Statically allocated segments base
343 * @seg_p: Physical allocated segments base
344 * @cyclic_seg_v: Statically allocated segment base for cyclic transfers
345 * @cyclic_seg_p: Physical allocated segments base for cyclic dma
346 * @start_transfer: Differentiate b/w DMA IP's transfer
347 * @stop_transfer: Differentiate b/w DMA IP's quiesce
348 * @tdest: TDEST value for mcdma
349 * @has_vflip: S2MM vertical flip
351 struct xilinx_dma_chan
{
352 struct xilinx_dma_device
*xdev
;
356 struct list_head pending_list
;
357 struct list_head active_list
;
358 struct list_head done_list
;
359 struct list_head free_seg_list
;
360 struct dma_chan common
;
361 struct dma_pool
*desc_pool
;
365 enum dma_transfer_direction direction
;
372 struct tasklet_struct tasklet
;
373 struct xilinx_vdma_config config
;
375 u32 desc_pendingcount
;
377 u32 desc_submitcount
;
379 struct xilinx_axidma_tx_segment
*seg_v
;
381 struct xilinx_axidma_tx_segment
*cyclic_seg_v
;
382 dma_addr_t cyclic_seg_p
;
383 void (*start_transfer
)(struct xilinx_dma_chan
*chan
);
384 int (*stop_transfer
)(struct xilinx_dma_chan
*chan
);
390 * enum xdma_ip_type - DMA IP type.
392 * @XDMA_TYPE_AXIDMA: Axi dma ip.
393 * @XDMA_TYPE_CDMA: Axi cdma ip.
394 * @XDMA_TYPE_VDMA: Axi vdma ip.
398 XDMA_TYPE_AXIDMA
= 0,
403 struct xilinx_dma_config
{
404 enum xdma_ip_type dmatype
;
405 int (*clk_init
)(struct platform_device
*pdev
, struct clk
**axi_clk
,
406 struct clk
**tx_clk
, struct clk
**txs_clk
,
407 struct clk
**rx_clk
, struct clk
**rxs_clk
);
411 * struct xilinx_dma_device - DMA device structure
412 * @regs: I/O mapped base address
413 * @dev: Device Structure
414 * @common: DMA device structure
415 * @chan: Driver specific DMA channel
416 * @has_sg: Specifies whether Scatter-Gather is present or not
417 * @mcdma: Specifies whether Multi-Channel is present or not
418 * @flush_on_fsync: Flush on frame sync
419 * @ext_addr: Indicates 64 bit addressing is supported by dma device
420 * @pdev: Platform device structure pointer
421 * @dma_config: DMA config structure
422 * @axi_clk: DMA Axi4-lite interace clock
423 * @tx_clk: DMA mm2s clock
424 * @txs_clk: DMA mm2s stream clock
425 * @rx_clk: DMA s2mm clock
426 * @rxs_clk: DMA s2mm stream clock
427 * @nr_channels: Number of channels DMA device supports
428 * @chan_id: DMA channel identifier
430 struct xilinx_dma_device
{
433 struct dma_device common
;
434 struct xilinx_dma_chan
*chan
[XILINX_DMA_MAX_CHANS_PER_DEVICE
];
439 struct platform_device
*pdev
;
440 const struct xilinx_dma_config
*dma_config
;
451 #define to_xilinx_chan(chan) \
452 container_of(chan, struct xilinx_dma_chan, common)
453 #define to_dma_tx_descriptor(tx) \
454 container_of(tx, struct xilinx_dma_tx_descriptor, async_tx)
455 #define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \
456 readl_poll_timeout(chan->xdev->regs + chan->ctrl_offset + reg, val, \
457 cond, delay_us, timeout_us)
460 static inline u32
dma_read(struct xilinx_dma_chan
*chan
, u32 reg
)
462 return ioread32(chan
->xdev
->regs
+ reg
);
465 static inline void dma_write(struct xilinx_dma_chan
*chan
, u32 reg
, u32 value
)
467 iowrite32(value
, chan
->xdev
->regs
+ reg
);
470 static inline void vdma_desc_write(struct xilinx_dma_chan
*chan
, u32 reg
,
473 dma_write(chan
, chan
->desc_offset
+ reg
, value
);
476 static inline u32
dma_ctrl_read(struct xilinx_dma_chan
*chan
, u32 reg
)
478 return dma_read(chan
, chan
->ctrl_offset
+ reg
);
481 static inline void dma_ctrl_write(struct xilinx_dma_chan
*chan
, u32 reg
,
484 dma_write(chan
, chan
->ctrl_offset
+ reg
, value
);
487 static inline void dma_ctrl_clr(struct xilinx_dma_chan
*chan
, u32 reg
,
490 dma_ctrl_write(chan
, reg
, dma_ctrl_read(chan
, reg
) & ~clr
);
493 static inline void dma_ctrl_set(struct xilinx_dma_chan
*chan
, u32 reg
,
496 dma_ctrl_write(chan
, reg
, dma_ctrl_read(chan
, reg
) | set
);
500 * vdma_desc_write_64 - 64-bit descriptor write
501 * @chan: Driver specific VDMA channel
502 * @reg: Register to write
503 * @value_lsb: lower address of the descriptor.
504 * @value_msb: upper address of the descriptor.
506 * Since vdma driver is trying to write to a register offset which is not a
507 * multiple of 64 bits(ex : 0x5c), we are writing as two separate 32 bits
508 * instead of a single 64 bit register write.
510 static inline void vdma_desc_write_64(struct xilinx_dma_chan
*chan
, u32 reg
,
511 u32 value_lsb
, u32 value_msb
)
513 /* Write the lsb 32 bits*/
514 writel(value_lsb
, chan
->xdev
->regs
+ chan
->desc_offset
+ reg
);
516 /* Write the msb 32 bits */
517 writel(value_msb
, chan
->xdev
->regs
+ chan
->desc_offset
+ reg
+ 4);
520 static inline void dma_writeq(struct xilinx_dma_chan
*chan
, u32 reg
, u64 value
)
522 lo_hi_writeq(value
, chan
->xdev
->regs
+ chan
->ctrl_offset
+ reg
);
525 static inline void xilinx_write(struct xilinx_dma_chan
*chan
, u32 reg
,
529 dma_writeq(chan
, reg
, addr
);
531 dma_ctrl_write(chan
, reg
, addr
);
534 static inline void xilinx_axidma_buf(struct xilinx_dma_chan
*chan
,
535 struct xilinx_axidma_desc_hw
*hw
,
536 dma_addr_t buf_addr
, size_t sg_used
,
539 if (chan
->ext_addr
) {
540 hw
->buf_addr
= lower_32_bits(buf_addr
+ sg_used
+ period_len
);
541 hw
->buf_addr_msb
= upper_32_bits(buf_addr
+ sg_used
+
544 hw
->buf_addr
= buf_addr
+ sg_used
+ period_len
;
548 /* -----------------------------------------------------------------------------
549 * Descriptors and segments alloc and free
553 * xilinx_vdma_alloc_tx_segment - Allocate transaction segment
554 * @chan: Driver specific DMA channel
556 * Return: The allocated segment on success and NULL on failure.
558 static struct xilinx_vdma_tx_segment
*
559 xilinx_vdma_alloc_tx_segment(struct xilinx_dma_chan
*chan
)
561 struct xilinx_vdma_tx_segment
*segment
;
564 segment
= dma_pool_zalloc(chan
->desc_pool
, GFP_ATOMIC
, &phys
);
568 segment
->phys
= phys
;
574 * xilinx_cdma_alloc_tx_segment - Allocate transaction segment
575 * @chan: Driver specific DMA channel
577 * Return: The allocated segment on success and NULL on failure.
579 static struct xilinx_cdma_tx_segment
*
580 xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan
*chan
)
582 struct xilinx_cdma_tx_segment
*segment
;
585 segment
= dma_pool_zalloc(chan
->desc_pool
, GFP_ATOMIC
, &phys
);
589 segment
->phys
= phys
;
595 * xilinx_axidma_alloc_tx_segment - Allocate transaction segment
596 * @chan: Driver specific DMA channel
598 * Return: The allocated segment on success and NULL on failure.
600 static struct xilinx_axidma_tx_segment
*
601 xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan
*chan
)
603 struct xilinx_axidma_tx_segment
*segment
= NULL
;
606 spin_lock_irqsave(&chan
->lock
, flags
);
607 if (!list_empty(&chan
->free_seg_list
)) {
608 segment
= list_first_entry(&chan
->free_seg_list
,
609 struct xilinx_axidma_tx_segment
,
611 list_del(&segment
->node
);
613 spin_unlock_irqrestore(&chan
->lock
, flags
);
618 static void xilinx_dma_clean_hw_desc(struct xilinx_axidma_desc_hw
*hw
)
620 u32 next_desc
= hw
->next_desc
;
621 u32 next_desc_msb
= hw
->next_desc_msb
;
623 memset(hw
, 0, sizeof(struct xilinx_axidma_desc_hw
));
625 hw
->next_desc
= next_desc
;
626 hw
->next_desc_msb
= next_desc_msb
;
630 * xilinx_dma_free_tx_segment - Free transaction segment
631 * @chan: Driver specific DMA channel
632 * @segment: DMA transaction segment
634 static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan
*chan
,
635 struct xilinx_axidma_tx_segment
*segment
)
637 xilinx_dma_clean_hw_desc(&segment
->hw
);
639 list_add_tail(&segment
->node
, &chan
->free_seg_list
);
643 * xilinx_cdma_free_tx_segment - Free transaction segment
644 * @chan: Driver specific DMA channel
645 * @segment: DMA transaction segment
647 static void xilinx_cdma_free_tx_segment(struct xilinx_dma_chan
*chan
,
648 struct xilinx_cdma_tx_segment
*segment
)
650 dma_pool_free(chan
->desc_pool
, segment
, segment
->phys
);
654 * xilinx_vdma_free_tx_segment - Free transaction segment
655 * @chan: Driver specific DMA channel
656 * @segment: DMA transaction segment
658 static void xilinx_vdma_free_tx_segment(struct xilinx_dma_chan
*chan
,
659 struct xilinx_vdma_tx_segment
*segment
)
661 dma_pool_free(chan
->desc_pool
, segment
, segment
->phys
);
665 * xilinx_dma_tx_descriptor - Allocate transaction descriptor
666 * @chan: Driver specific DMA channel
668 * Return: The allocated descriptor on success and NULL on failure.
670 static struct xilinx_dma_tx_descriptor
*
671 xilinx_dma_alloc_tx_descriptor(struct xilinx_dma_chan
*chan
)
673 struct xilinx_dma_tx_descriptor
*desc
;
675 desc
= kzalloc(sizeof(*desc
), GFP_KERNEL
);
679 INIT_LIST_HEAD(&desc
->segments
);
685 * xilinx_dma_free_tx_descriptor - Free transaction descriptor
686 * @chan: Driver specific DMA channel
687 * @desc: DMA transaction descriptor
690 xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan
*chan
,
691 struct xilinx_dma_tx_descriptor
*desc
)
693 struct xilinx_vdma_tx_segment
*segment
, *next
;
694 struct xilinx_cdma_tx_segment
*cdma_segment
, *cdma_next
;
695 struct xilinx_axidma_tx_segment
*axidma_segment
, *axidma_next
;
700 if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_VDMA
) {
701 list_for_each_entry_safe(segment
, next
, &desc
->segments
, node
) {
702 list_del(&segment
->node
);
703 xilinx_vdma_free_tx_segment(chan
, segment
);
705 } else if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_CDMA
) {
706 list_for_each_entry_safe(cdma_segment
, cdma_next
,
707 &desc
->segments
, node
) {
708 list_del(&cdma_segment
->node
);
709 xilinx_cdma_free_tx_segment(chan
, cdma_segment
);
712 list_for_each_entry_safe(axidma_segment
, axidma_next
,
713 &desc
->segments
, node
) {
714 list_del(&axidma_segment
->node
);
715 xilinx_dma_free_tx_segment(chan
, axidma_segment
);
722 /* Required functions */
725 * xilinx_dma_free_desc_list - Free descriptors list
726 * @chan: Driver specific DMA channel
727 * @list: List to parse and delete the descriptor
729 static void xilinx_dma_free_desc_list(struct xilinx_dma_chan
*chan
,
730 struct list_head
*list
)
732 struct xilinx_dma_tx_descriptor
*desc
, *next
;
734 list_for_each_entry_safe(desc
, next
, list
, node
) {
735 list_del(&desc
->node
);
736 xilinx_dma_free_tx_descriptor(chan
, desc
);
741 * xilinx_dma_free_descriptors - Free channel descriptors
742 * @chan: Driver specific DMA channel
744 static void xilinx_dma_free_descriptors(struct xilinx_dma_chan
*chan
)
748 spin_lock_irqsave(&chan
->lock
, flags
);
750 xilinx_dma_free_desc_list(chan
, &chan
->pending_list
);
751 xilinx_dma_free_desc_list(chan
, &chan
->done_list
);
752 xilinx_dma_free_desc_list(chan
, &chan
->active_list
);
754 spin_unlock_irqrestore(&chan
->lock
, flags
);
758 * xilinx_dma_free_chan_resources - Free channel resources
759 * @dchan: DMA channel
761 static void xilinx_dma_free_chan_resources(struct dma_chan
*dchan
)
763 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
766 dev_dbg(chan
->dev
, "Free all channel resources.\n");
768 xilinx_dma_free_descriptors(chan
);
770 if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_AXIDMA
) {
771 spin_lock_irqsave(&chan
->lock
, flags
);
772 INIT_LIST_HEAD(&chan
->free_seg_list
);
773 spin_unlock_irqrestore(&chan
->lock
, flags
);
775 /* Free memory that is allocated for BD */
776 dma_free_coherent(chan
->dev
, sizeof(*chan
->seg_v
) *
777 XILINX_DMA_NUM_DESCS
, chan
->seg_v
,
780 /* Free Memory that is allocated for cyclic DMA Mode */
781 dma_free_coherent(chan
->dev
, sizeof(*chan
->cyclic_seg_v
),
782 chan
->cyclic_seg_v
, chan
->cyclic_seg_p
);
785 if (chan
->xdev
->dma_config
->dmatype
!= XDMA_TYPE_AXIDMA
) {
786 dma_pool_destroy(chan
->desc_pool
);
787 chan
->desc_pool
= NULL
;
792 * xilinx_dma_chan_handle_cyclic - Cyclic dma callback
793 * @chan: Driver specific dma channel
794 * @desc: dma transaction descriptor
795 * @flags: flags for spin lock
797 static void xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan
*chan
,
798 struct xilinx_dma_tx_descriptor
*desc
,
799 unsigned long *flags
)
801 dma_async_tx_callback callback
;
802 void *callback_param
;
804 callback
= desc
->async_tx
.callback
;
805 callback_param
= desc
->async_tx
.callback_param
;
807 spin_unlock_irqrestore(&chan
->lock
, *flags
);
808 callback(callback_param
);
809 spin_lock_irqsave(&chan
->lock
, *flags
);
814 * xilinx_dma_chan_desc_cleanup - Clean channel descriptors
815 * @chan: Driver specific DMA channel
817 static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan
*chan
)
819 struct xilinx_dma_tx_descriptor
*desc
, *next
;
822 spin_lock_irqsave(&chan
->lock
, flags
);
824 list_for_each_entry_safe(desc
, next
, &chan
->done_list
, node
) {
825 struct dmaengine_desc_callback cb
;
828 xilinx_dma_chan_handle_cyclic(chan
, desc
, &flags
);
832 /* Remove from the list of running transactions */
833 list_del(&desc
->node
);
835 /* Run the link descriptor callback function */
836 dmaengine_desc_get_callback(&desc
->async_tx
, &cb
);
837 if (dmaengine_desc_callback_valid(&cb
)) {
838 spin_unlock_irqrestore(&chan
->lock
, flags
);
839 dmaengine_desc_callback_invoke(&cb
, NULL
);
840 spin_lock_irqsave(&chan
->lock
, flags
);
843 /* Run any dependencies, then free the descriptor */
844 dma_run_dependencies(&desc
->async_tx
);
845 xilinx_dma_free_tx_descriptor(chan
, desc
);
848 spin_unlock_irqrestore(&chan
->lock
, flags
);
852 * xilinx_dma_do_tasklet - Schedule completion tasklet
853 * @data: Pointer to the Xilinx DMA channel structure
855 static void xilinx_dma_do_tasklet(unsigned long data
)
857 struct xilinx_dma_chan
*chan
= (struct xilinx_dma_chan
*)data
;
859 xilinx_dma_chan_desc_cleanup(chan
);
863 * xilinx_dma_alloc_chan_resources - Allocate channel resources
864 * @dchan: DMA channel
866 * Return: '0' on success and failure value on error
868 static int xilinx_dma_alloc_chan_resources(struct dma_chan
*dchan
)
870 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
873 /* Has this channel already been allocated? */
878 * We need the descriptor to be aligned to 64bytes
879 * for meeting Xilinx VDMA specification requirement.
881 if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_AXIDMA
) {
882 /* Allocate the buffer descriptors. */
883 chan
->seg_v
= dma_zalloc_coherent(chan
->dev
,
884 sizeof(*chan
->seg_v
) *
885 XILINX_DMA_NUM_DESCS
,
886 &chan
->seg_p
, GFP_KERNEL
);
889 "unable to allocate channel %d descriptors\n",
894 for (i
= 0; i
< XILINX_DMA_NUM_DESCS
; i
++) {
895 chan
->seg_v
[i
].hw
.next_desc
=
896 lower_32_bits(chan
->seg_p
+ sizeof(*chan
->seg_v
) *
897 ((i
+ 1) % XILINX_DMA_NUM_DESCS
));
898 chan
->seg_v
[i
].hw
.next_desc_msb
=
899 upper_32_bits(chan
->seg_p
+ sizeof(*chan
->seg_v
) *
900 ((i
+ 1) % XILINX_DMA_NUM_DESCS
));
901 chan
->seg_v
[i
].phys
= chan
->seg_p
+
902 sizeof(*chan
->seg_v
) * i
;
903 list_add_tail(&chan
->seg_v
[i
].node
,
904 &chan
->free_seg_list
);
906 } else if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_CDMA
) {
907 chan
->desc_pool
= dma_pool_create("xilinx_cdma_desc_pool",
909 sizeof(struct xilinx_cdma_tx_segment
),
910 __alignof__(struct xilinx_cdma_tx_segment
),
913 chan
->desc_pool
= dma_pool_create("xilinx_vdma_desc_pool",
915 sizeof(struct xilinx_vdma_tx_segment
),
916 __alignof__(struct xilinx_vdma_tx_segment
),
920 if (!chan
->desc_pool
&&
921 (chan
->xdev
->dma_config
->dmatype
!= XDMA_TYPE_AXIDMA
)) {
923 "unable to allocate channel %d descriptor pool\n",
928 if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_AXIDMA
) {
930 * For cyclic DMA mode we need to program the tail Descriptor
931 * register with a value which is not a part of the BD chain
932 * so allocating a desc segment during channel allocation for
933 * programming tail descriptor.
935 chan
->cyclic_seg_v
= dma_zalloc_coherent(chan
->dev
,
936 sizeof(*chan
->cyclic_seg_v
),
937 &chan
->cyclic_seg_p
, GFP_KERNEL
);
938 if (!chan
->cyclic_seg_v
) {
940 "unable to allocate desc segment for cyclic DMA\n");
943 chan
->cyclic_seg_v
->phys
= chan
->cyclic_seg_p
;
946 dma_cookie_init(dchan
);
948 if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_AXIDMA
) {
949 /* For AXI DMA resetting once channel will reset the
950 * other channel as well so enable the interrupts here.
952 dma_ctrl_set(chan
, XILINX_DMA_REG_DMACR
,
953 XILINX_DMA_DMAXR_ALL_IRQ_MASK
);
956 if ((chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_CDMA
) && chan
->has_sg
)
957 dma_ctrl_set(chan
, XILINX_DMA_REG_DMACR
,
958 XILINX_CDMA_CR_SGMODE
);
964 * xilinx_dma_tx_status - Get DMA transaction status
965 * @dchan: DMA channel
966 * @cookie: Transaction identifier
967 * @txstate: Transaction state
969 * Return: DMA transaction status
971 static enum dma_status
xilinx_dma_tx_status(struct dma_chan
*dchan
,
973 struct dma_tx_state
*txstate
)
975 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
976 struct xilinx_dma_tx_descriptor
*desc
;
977 struct xilinx_axidma_tx_segment
*segment
;
978 struct xilinx_axidma_desc_hw
*hw
;
983 ret
= dma_cookie_status(dchan
, cookie
, txstate
);
984 if (ret
== DMA_COMPLETE
|| !txstate
)
987 if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_AXIDMA
) {
988 spin_lock_irqsave(&chan
->lock
, flags
);
990 desc
= list_last_entry(&chan
->active_list
,
991 struct xilinx_dma_tx_descriptor
, node
);
993 list_for_each_entry(segment
, &desc
->segments
, node
) {
995 residue
+= (hw
->control
- hw
->status
) &
996 XILINX_DMA_MAX_TRANS_LEN
;
999 spin_unlock_irqrestore(&chan
->lock
, flags
);
1001 chan
->residue
= residue
;
1002 dma_set_residue(txstate
, chan
->residue
);
1009 * xilinx_dma_stop_transfer - Halt DMA channel
1010 * @chan: Driver specific DMA channel
1012 * Return: '0' on success and failure value on error
1014 static int xilinx_dma_stop_transfer(struct xilinx_dma_chan
*chan
)
1018 dma_ctrl_clr(chan
, XILINX_DMA_REG_DMACR
, XILINX_DMA_DMACR_RUNSTOP
);
1020 /* Wait for the hardware to halt */
1021 return xilinx_dma_poll_timeout(chan
, XILINX_DMA_REG_DMASR
, val
,
1022 val
& XILINX_DMA_DMASR_HALTED
, 0,
1023 XILINX_DMA_LOOP_COUNT
);
1027 * xilinx_cdma_stop_transfer - Wait for the current transfer to complete
1028 * @chan: Driver specific DMA channel
1030 * Return: '0' on success and failure value on error
1032 static int xilinx_cdma_stop_transfer(struct xilinx_dma_chan
*chan
)
1036 return xilinx_dma_poll_timeout(chan
, XILINX_DMA_REG_DMASR
, val
,
1037 val
& XILINX_DMA_DMASR_IDLE
, 0,
1038 XILINX_DMA_LOOP_COUNT
);
1042 * xilinx_dma_start - Start DMA channel
1043 * @chan: Driver specific DMA channel
1045 static void xilinx_dma_start(struct xilinx_dma_chan
*chan
)
1050 dma_ctrl_set(chan
, XILINX_DMA_REG_DMACR
, XILINX_DMA_DMACR_RUNSTOP
);
1052 /* Wait for the hardware to start */
1053 err
= xilinx_dma_poll_timeout(chan
, XILINX_DMA_REG_DMASR
, val
,
1054 !(val
& XILINX_DMA_DMASR_HALTED
), 0,
1055 XILINX_DMA_LOOP_COUNT
);
1058 dev_err(chan
->dev
, "Cannot start channel %p: %x\n",
1059 chan
, dma_ctrl_read(chan
, XILINX_DMA_REG_DMASR
));
1066 * xilinx_vdma_start_transfer - Starts VDMA transfer
1067 * @chan: Driver specific channel struct pointer
1069 static void xilinx_vdma_start_transfer(struct xilinx_dma_chan
*chan
)
1071 struct xilinx_vdma_config
*config
= &chan
->config
;
1072 struct xilinx_dma_tx_descriptor
*desc
, *tail_desc
;
1074 struct xilinx_vdma_tx_segment
*tail_segment
;
1076 /* This function was invoked with lock held */
1083 if (list_empty(&chan
->pending_list
))
1086 desc
= list_first_entry(&chan
->pending_list
,
1087 struct xilinx_dma_tx_descriptor
, node
);
1088 tail_desc
= list_last_entry(&chan
->pending_list
,
1089 struct xilinx_dma_tx_descriptor
, node
);
1091 tail_segment
= list_last_entry(&tail_desc
->segments
,
1092 struct xilinx_vdma_tx_segment
, node
);
1095 * If hardware is idle, then all descriptors on the running lists are
1096 * done, start new transfers
1099 dma_ctrl_write(chan
, XILINX_DMA_REG_CURDESC
,
1100 desc
->async_tx
.phys
);
1102 /* Configure the hardware using info in the config structure */
1103 if (chan
->has_vflip
) {
1104 reg
= dma_read(chan
, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP
);
1105 reg
&= ~XILINX_VDMA_ENABLE_VERTICAL_FLIP
;
1106 reg
|= config
->vflip_en
;
1107 dma_write(chan
, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP
,
1111 reg
= dma_ctrl_read(chan
, XILINX_DMA_REG_DMACR
);
1113 if (config
->frm_cnt_en
)
1114 reg
|= XILINX_DMA_DMACR_FRAMECNT_EN
;
1116 reg
&= ~XILINX_DMA_DMACR_FRAMECNT_EN
;
1119 * With SG, start with circular mode, so that BDs can be fetched.
1120 * In direct register mode, if not parking, enable circular mode
1122 if (chan
->has_sg
|| !config
->park
)
1123 reg
|= XILINX_DMA_DMACR_CIRC_EN
;
1126 reg
&= ~XILINX_DMA_DMACR_CIRC_EN
;
1128 dma_ctrl_write(chan
, XILINX_DMA_REG_DMACR
, reg
);
1130 j
= chan
->desc_submitcount
;
1131 reg
= dma_read(chan
, XILINX_DMA_REG_PARK_PTR
);
1132 if (chan
->direction
== DMA_MEM_TO_DEV
) {
1133 reg
&= ~XILINX_DMA_PARK_PTR_RD_REF_MASK
;
1134 reg
|= j
<< XILINX_DMA_PARK_PTR_RD_REF_SHIFT
;
1136 reg
&= ~XILINX_DMA_PARK_PTR_WR_REF_MASK
;
1137 reg
|= j
<< XILINX_DMA_PARK_PTR_WR_REF_SHIFT
;
1139 dma_write(chan
, XILINX_DMA_REG_PARK_PTR
, reg
);
1141 /* Start the hardware */
1142 xilinx_dma_start(chan
);
1147 /* Start the transfer */
1149 dma_ctrl_write(chan
, XILINX_DMA_REG_TAILDESC
,
1150 tail_segment
->phys
);
1151 list_splice_tail_init(&chan
->pending_list
, &chan
->active_list
);
1152 chan
->desc_pendingcount
= 0;
1154 struct xilinx_vdma_tx_segment
*segment
, *last
= NULL
;
1157 if (chan
->desc_submitcount
< chan
->num_frms
)
1158 i
= chan
->desc_submitcount
;
1160 list_for_each_entry(segment
, &desc
->segments
, node
) {
1162 vdma_desc_write_64(chan
,
1163 XILINX_VDMA_REG_START_ADDRESS_64(i
++),
1164 segment
->hw
.buf_addr
,
1165 segment
->hw
.buf_addr_msb
);
1167 vdma_desc_write(chan
,
1168 XILINX_VDMA_REG_START_ADDRESS(i
++),
1169 segment
->hw
.buf_addr
);
1177 /* HW expects these parameters to be same for one transaction */
1178 vdma_desc_write(chan
, XILINX_DMA_REG_HSIZE
, last
->hw
.hsize
);
1179 vdma_desc_write(chan
, XILINX_DMA_REG_FRMDLY_STRIDE
,
1181 vdma_desc_write(chan
, XILINX_DMA_REG_VSIZE
, last
->hw
.vsize
);
1183 chan
->desc_submitcount
++;
1184 chan
->desc_pendingcount
--;
1185 list_del(&desc
->node
);
1186 list_add_tail(&desc
->node
, &chan
->active_list
);
1187 if (chan
->desc_submitcount
== chan
->num_frms
)
1188 chan
->desc_submitcount
= 0;
1195 * xilinx_cdma_start_transfer - Starts cdma transfer
1196 * @chan: Driver specific channel struct pointer
1198 static void xilinx_cdma_start_transfer(struct xilinx_dma_chan
*chan
)
1200 struct xilinx_dma_tx_descriptor
*head_desc
, *tail_desc
;
1201 struct xilinx_cdma_tx_segment
*tail_segment
;
1202 u32 ctrl_reg
= dma_read(chan
, XILINX_DMA_REG_DMACR
);
1210 if (list_empty(&chan
->pending_list
))
1213 head_desc
= list_first_entry(&chan
->pending_list
,
1214 struct xilinx_dma_tx_descriptor
, node
);
1215 tail_desc
= list_last_entry(&chan
->pending_list
,
1216 struct xilinx_dma_tx_descriptor
, node
);
1217 tail_segment
= list_last_entry(&tail_desc
->segments
,
1218 struct xilinx_cdma_tx_segment
, node
);
1220 if (chan
->desc_pendingcount
<= XILINX_DMA_COALESCE_MAX
) {
1221 ctrl_reg
&= ~XILINX_DMA_CR_COALESCE_MAX
;
1222 ctrl_reg
|= chan
->desc_pendingcount
<<
1223 XILINX_DMA_CR_COALESCE_SHIFT
;
1224 dma_ctrl_write(chan
, XILINX_DMA_REG_DMACR
, ctrl_reg
);
1228 dma_ctrl_clr(chan
, XILINX_DMA_REG_DMACR
,
1229 XILINX_CDMA_CR_SGMODE
);
1231 dma_ctrl_set(chan
, XILINX_DMA_REG_DMACR
,
1232 XILINX_CDMA_CR_SGMODE
);
1234 xilinx_write(chan
, XILINX_DMA_REG_CURDESC
,
1235 head_desc
->async_tx
.phys
);
1237 /* Update tail ptr register which will start the transfer */
1238 xilinx_write(chan
, XILINX_DMA_REG_TAILDESC
,
1239 tail_segment
->phys
);
1241 /* In simple mode */
1242 struct xilinx_cdma_tx_segment
*segment
;
1243 struct xilinx_cdma_desc_hw
*hw
;
1245 segment
= list_first_entry(&head_desc
->segments
,
1246 struct xilinx_cdma_tx_segment
,
1251 xilinx_write(chan
, XILINX_CDMA_REG_SRCADDR
, hw
->src_addr
);
1252 xilinx_write(chan
, XILINX_CDMA_REG_DSTADDR
, hw
->dest_addr
);
1254 /* Start the transfer */
1255 dma_ctrl_write(chan
, XILINX_DMA_REG_BTT
,
1256 hw
->control
& XILINX_DMA_MAX_TRANS_LEN
);
1259 list_splice_tail_init(&chan
->pending_list
, &chan
->active_list
);
1260 chan
->desc_pendingcount
= 0;
1265 * xilinx_dma_start_transfer - Starts DMA transfer
1266 * @chan: Driver specific channel struct pointer
1268 static void xilinx_dma_start_transfer(struct xilinx_dma_chan
*chan
)
1270 struct xilinx_dma_tx_descriptor
*head_desc
, *tail_desc
;
1271 struct xilinx_axidma_tx_segment
*tail_segment
;
1277 if (list_empty(&chan
->pending_list
))
1283 head_desc
= list_first_entry(&chan
->pending_list
,
1284 struct xilinx_dma_tx_descriptor
, node
);
1285 tail_desc
= list_last_entry(&chan
->pending_list
,
1286 struct xilinx_dma_tx_descriptor
, node
);
1287 tail_segment
= list_last_entry(&tail_desc
->segments
,
1288 struct xilinx_axidma_tx_segment
, node
);
1290 reg
= dma_ctrl_read(chan
, XILINX_DMA_REG_DMACR
);
1292 if (chan
->desc_pendingcount
<= XILINX_DMA_COALESCE_MAX
) {
1293 reg
&= ~XILINX_DMA_CR_COALESCE_MAX
;
1294 reg
|= chan
->desc_pendingcount
<<
1295 XILINX_DMA_CR_COALESCE_SHIFT
;
1296 dma_ctrl_write(chan
, XILINX_DMA_REG_DMACR
, reg
);
1299 if (chan
->has_sg
&& !chan
->xdev
->mcdma
)
1300 xilinx_write(chan
, XILINX_DMA_REG_CURDESC
,
1301 head_desc
->async_tx
.phys
);
1303 if (chan
->has_sg
&& chan
->xdev
->mcdma
) {
1304 if (chan
->direction
== DMA_MEM_TO_DEV
) {
1305 dma_ctrl_write(chan
, XILINX_DMA_REG_CURDESC
,
1306 head_desc
->async_tx
.phys
);
1309 dma_ctrl_write(chan
, XILINX_DMA_REG_CURDESC
,
1310 head_desc
->async_tx
.phys
);
1312 dma_ctrl_write(chan
,
1313 XILINX_DMA_MCRX_CDESC(chan
->tdest
),
1314 head_desc
->async_tx
.phys
);
1319 xilinx_dma_start(chan
);
1324 /* Start the transfer */
1325 if (chan
->has_sg
&& !chan
->xdev
->mcdma
) {
1327 xilinx_write(chan
, XILINX_DMA_REG_TAILDESC
,
1328 chan
->cyclic_seg_v
->phys
);
1330 xilinx_write(chan
, XILINX_DMA_REG_TAILDESC
,
1331 tail_segment
->phys
);
1332 } else if (chan
->has_sg
&& chan
->xdev
->mcdma
) {
1333 if (chan
->direction
== DMA_MEM_TO_DEV
) {
1334 dma_ctrl_write(chan
, XILINX_DMA_REG_TAILDESC
,
1335 tail_segment
->phys
);
1338 dma_ctrl_write(chan
, XILINX_DMA_REG_TAILDESC
,
1339 tail_segment
->phys
);
1341 dma_ctrl_write(chan
,
1342 XILINX_DMA_MCRX_TDESC(chan
->tdest
),
1343 tail_segment
->phys
);
1347 struct xilinx_axidma_tx_segment
*segment
;
1348 struct xilinx_axidma_desc_hw
*hw
;
1350 segment
= list_first_entry(&head_desc
->segments
,
1351 struct xilinx_axidma_tx_segment
,
1355 xilinx_write(chan
, XILINX_DMA_REG_SRCDSTADDR
, hw
->buf_addr
);
1357 /* Start the transfer */
1358 dma_ctrl_write(chan
, XILINX_DMA_REG_BTT
,
1359 hw
->control
& XILINX_DMA_MAX_TRANS_LEN
);
1362 list_splice_tail_init(&chan
->pending_list
, &chan
->active_list
);
1363 chan
->desc_pendingcount
= 0;
1368 * xilinx_dma_issue_pending - Issue pending transactions
1369 * @dchan: DMA channel
1371 static void xilinx_dma_issue_pending(struct dma_chan
*dchan
)
1373 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
1374 unsigned long flags
;
1376 spin_lock_irqsave(&chan
->lock
, flags
);
1377 chan
->start_transfer(chan
);
1378 spin_unlock_irqrestore(&chan
->lock
, flags
);
1382 * xilinx_dma_complete_descriptor - Mark the active descriptor as complete
1383 * @chan : xilinx DMA channel
1387 static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan
*chan
)
1389 struct xilinx_dma_tx_descriptor
*desc
, *next
;
1391 /* This function was invoked with lock held */
1392 if (list_empty(&chan
->active_list
))
1395 list_for_each_entry_safe(desc
, next
, &chan
->active_list
, node
) {
1396 list_del(&desc
->node
);
1398 dma_cookie_complete(&desc
->async_tx
);
1399 list_add_tail(&desc
->node
, &chan
->done_list
);
1404 * xilinx_dma_reset - Reset DMA channel
1405 * @chan: Driver specific DMA channel
1407 * Return: '0' on success and failure value on error
1409 static int xilinx_dma_reset(struct xilinx_dma_chan
*chan
)
1414 dma_ctrl_set(chan
, XILINX_DMA_REG_DMACR
, XILINX_DMA_DMACR_RESET
);
1416 /* Wait for the hardware to finish reset */
1417 err
= xilinx_dma_poll_timeout(chan
, XILINX_DMA_REG_DMACR
, tmp
,
1418 !(tmp
& XILINX_DMA_DMACR_RESET
), 0,
1419 XILINX_DMA_LOOP_COUNT
);
1422 dev_err(chan
->dev
, "reset timeout, cr %x, sr %x\n",
1423 dma_ctrl_read(chan
, XILINX_DMA_REG_DMACR
),
1424 dma_ctrl_read(chan
, XILINX_DMA_REG_DMASR
));
1430 chan
->desc_pendingcount
= 0;
1431 chan
->desc_submitcount
= 0;
1437 * xilinx_dma_chan_reset - Reset DMA channel and enable interrupts
1438 * @chan: Driver specific DMA channel
1440 * Return: '0' on success and failure value on error
1442 static int xilinx_dma_chan_reset(struct xilinx_dma_chan
*chan
)
1447 err
= xilinx_dma_reset(chan
);
1451 /* Enable interrupts */
1452 dma_ctrl_set(chan
, XILINX_DMA_REG_DMACR
,
1453 XILINX_DMA_DMAXR_ALL_IRQ_MASK
);
1459 * xilinx_dma_irq_handler - DMA Interrupt handler
1461 * @data: Pointer to the Xilinx DMA channel structure
1463 * Return: IRQ_HANDLED/IRQ_NONE
1465 static irqreturn_t
xilinx_dma_irq_handler(int irq
, void *data
)
1467 struct xilinx_dma_chan
*chan
= data
;
1470 /* Read the status and ack the interrupts. */
1471 status
= dma_ctrl_read(chan
, XILINX_DMA_REG_DMASR
);
1472 if (!(status
& XILINX_DMA_DMAXR_ALL_IRQ_MASK
))
1475 dma_ctrl_write(chan
, XILINX_DMA_REG_DMASR
,
1476 status
& XILINX_DMA_DMAXR_ALL_IRQ_MASK
);
1478 if (status
& XILINX_DMA_DMASR_ERR_IRQ
) {
1480 * An error occurred. If C_FLUSH_ON_FSYNC is enabled and the
1481 * error is recoverable, ignore it. Otherwise flag the error.
1483 * Only recoverable errors can be cleared in the DMASR register,
1484 * make sure not to write to other error bits to 1.
1486 u32 errors
= status
& XILINX_DMA_DMASR_ALL_ERR_MASK
;
1488 dma_ctrl_write(chan
, XILINX_DMA_REG_DMASR
,
1489 errors
& XILINX_DMA_DMASR_ERR_RECOVER_MASK
);
1491 if (!chan
->flush_on_fsync
||
1492 (errors
& ~XILINX_DMA_DMASR_ERR_RECOVER_MASK
)) {
1494 "Channel %p has errors %x, cdr %x tdr %x\n",
1496 dma_ctrl_read(chan
, XILINX_DMA_REG_CURDESC
),
1497 dma_ctrl_read(chan
, XILINX_DMA_REG_TAILDESC
));
1502 if (status
& XILINX_DMA_DMASR_DLY_CNT_IRQ
) {
1504 * Device takes too long to do the transfer when user requires
1507 dev_dbg(chan
->dev
, "Inter-packet latency too long\n");
1510 if (status
& XILINX_DMA_DMASR_FRM_CNT_IRQ
) {
1511 spin_lock(&chan
->lock
);
1512 xilinx_dma_complete_descriptor(chan
);
1514 chan
->start_transfer(chan
);
1515 spin_unlock(&chan
->lock
);
1518 tasklet_schedule(&chan
->tasklet
);
1523 * append_desc_queue - Queuing descriptor
1524 * @chan: Driver specific dma channel
1525 * @desc: dma transaction descriptor
1527 static void append_desc_queue(struct xilinx_dma_chan
*chan
,
1528 struct xilinx_dma_tx_descriptor
*desc
)
1530 struct xilinx_vdma_tx_segment
*tail_segment
;
1531 struct xilinx_dma_tx_descriptor
*tail_desc
;
1532 struct xilinx_axidma_tx_segment
*axidma_tail_segment
;
1533 struct xilinx_cdma_tx_segment
*cdma_tail_segment
;
1535 if (list_empty(&chan
->pending_list
))
1539 * Add the hardware descriptor to the chain of hardware descriptors
1540 * that already exists in memory.
1542 tail_desc
= list_last_entry(&chan
->pending_list
,
1543 struct xilinx_dma_tx_descriptor
, node
);
1544 if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_VDMA
) {
1545 tail_segment
= list_last_entry(&tail_desc
->segments
,
1546 struct xilinx_vdma_tx_segment
,
1548 tail_segment
->hw
.next_desc
= (u32
)desc
->async_tx
.phys
;
1549 } else if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_CDMA
) {
1550 cdma_tail_segment
= list_last_entry(&tail_desc
->segments
,
1551 struct xilinx_cdma_tx_segment
,
1553 cdma_tail_segment
->hw
.next_desc
= (u32
)desc
->async_tx
.phys
;
1555 axidma_tail_segment
= list_last_entry(&tail_desc
->segments
,
1556 struct xilinx_axidma_tx_segment
,
1558 axidma_tail_segment
->hw
.next_desc
= (u32
)desc
->async_tx
.phys
;
1562 * Add the software descriptor and all children to the list
1563 * of pending transactions
1566 list_add_tail(&desc
->node
, &chan
->pending_list
);
1567 chan
->desc_pendingcount
++;
1569 if (chan
->has_sg
&& (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_VDMA
)
1570 && unlikely(chan
->desc_pendingcount
> chan
->num_frms
)) {
1571 dev_dbg(chan
->dev
, "desc pendingcount is too high\n");
1572 chan
->desc_pendingcount
= chan
->num_frms
;
1577 * xilinx_dma_tx_submit - Submit DMA transaction
1578 * @tx: Async transaction descriptor
1580 * Return: cookie value on success and failure value on error
1582 static dma_cookie_t
xilinx_dma_tx_submit(struct dma_async_tx_descriptor
*tx
)
1584 struct xilinx_dma_tx_descriptor
*desc
= to_dma_tx_descriptor(tx
);
1585 struct xilinx_dma_chan
*chan
= to_xilinx_chan(tx
->chan
);
1586 dma_cookie_t cookie
;
1587 unsigned long flags
;
1591 xilinx_dma_free_tx_descriptor(chan
, desc
);
1597 * If reset fails, need to hard reset the system.
1598 * Channel is no longer functional
1600 err
= xilinx_dma_chan_reset(chan
);
1605 spin_lock_irqsave(&chan
->lock
, flags
);
1607 cookie
= dma_cookie_assign(tx
);
1609 /* Put this transaction onto the tail of the pending queue */
1610 append_desc_queue(chan
, desc
);
1613 chan
->cyclic
= true;
1615 spin_unlock_irqrestore(&chan
->lock
, flags
);
1621 * xilinx_vdma_dma_prep_interleaved - prepare a descriptor for a
1622 * DMA_SLAVE transaction
1623 * @dchan: DMA channel
1624 * @xt: Interleaved template pointer
1625 * @flags: transfer ack flags
1627 * Return: Async transaction descriptor on success and NULL on failure
1629 static struct dma_async_tx_descriptor
*
1630 xilinx_vdma_dma_prep_interleaved(struct dma_chan
*dchan
,
1631 struct dma_interleaved_template
*xt
,
1632 unsigned long flags
)
1634 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
1635 struct xilinx_dma_tx_descriptor
*desc
;
1636 struct xilinx_vdma_tx_segment
*segment
;
1637 struct xilinx_vdma_desc_hw
*hw
;
1639 if (!is_slave_direction(xt
->dir
))
1642 if (!xt
->numf
|| !xt
->sgl
[0].size
)
1645 if (xt
->frame_size
!= 1)
1648 /* Allocate a transaction descriptor. */
1649 desc
= xilinx_dma_alloc_tx_descriptor(chan
);
1653 dma_async_tx_descriptor_init(&desc
->async_tx
, &chan
->common
);
1654 desc
->async_tx
.tx_submit
= xilinx_dma_tx_submit
;
1655 async_tx_ack(&desc
->async_tx
);
1657 /* Allocate the link descriptor from DMA pool */
1658 segment
= xilinx_vdma_alloc_tx_segment(chan
);
1662 /* Fill in the hardware descriptor */
1664 hw
->vsize
= xt
->numf
;
1665 hw
->hsize
= xt
->sgl
[0].size
;
1666 hw
->stride
= (xt
->sgl
[0].icg
+ xt
->sgl
[0].size
) <<
1667 XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT
;
1668 hw
->stride
|= chan
->config
.frm_dly
<<
1669 XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT
;
1671 if (xt
->dir
!= DMA_MEM_TO_DEV
) {
1672 if (chan
->ext_addr
) {
1673 hw
->buf_addr
= lower_32_bits(xt
->dst_start
);
1674 hw
->buf_addr_msb
= upper_32_bits(xt
->dst_start
);
1676 hw
->buf_addr
= xt
->dst_start
;
1679 if (chan
->ext_addr
) {
1680 hw
->buf_addr
= lower_32_bits(xt
->src_start
);
1681 hw
->buf_addr_msb
= upper_32_bits(xt
->src_start
);
1683 hw
->buf_addr
= xt
->src_start
;
1687 /* Insert the segment into the descriptor segments list. */
1688 list_add_tail(&segment
->node
, &desc
->segments
);
1690 /* Link the last hardware descriptor with the first. */
1691 segment
= list_first_entry(&desc
->segments
,
1692 struct xilinx_vdma_tx_segment
, node
);
1693 desc
->async_tx
.phys
= segment
->phys
;
1695 return &desc
->async_tx
;
1698 xilinx_dma_free_tx_descriptor(chan
, desc
);
1703 * xilinx_cdma_prep_memcpy - prepare descriptors for a memcpy transaction
1704 * @dchan: DMA channel
1705 * @dma_dst: destination address
1706 * @dma_src: source address
1707 * @len: transfer length
1708 * @flags: transfer ack flags
1710 * Return: Async transaction descriptor on success and NULL on failure
1712 static struct dma_async_tx_descriptor
*
1713 xilinx_cdma_prep_memcpy(struct dma_chan
*dchan
, dma_addr_t dma_dst
,
1714 dma_addr_t dma_src
, size_t len
, unsigned long flags
)
1716 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
1717 struct xilinx_dma_tx_descriptor
*desc
;
1718 struct xilinx_cdma_tx_segment
*segment
;
1719 struct xilinx_cdma_desc_hw
*hw
;
1721 if (!len
|| len
> XILINX_DMA_MAX_TRANS_LEN
)
1724 desc
= xilinx_dma_alloc_tx_descriptor(chan
);
1728 dma_async_tx_descriptor_init(&desc
->async_tx
, &chan
->common
);
1729 desc
->async_tx
.tx_submit
= xilinx_dma_tx_submit
;
1731 /* Allocate the link descriptor from DMA pool */
1732 segment
= xilinx_cdma_alloc_tx_segment(chan
);
1738 hw
->src_addr
= dma_src
;
1739 hw
->dest_addr
= dma_dst
;
1740 if (chan
->ext_addr
) {
1741 hw
->src_addr_msb
= upper_32_bits(dma_src
);
1742 hw
->dest_addr_msb
= upper_32_bits(dma_dst
);
1745 /* Insert the segment into the descriptor segments list. */
1746 list_add_tail(&segment
->node
, &desc
->segments
);
1748 desc
->async_tx
.phys
= segment
->phys
;
1749 hw
->next_desc
= segment
->phys
;
1751 return &desc
->async_tx
;
1754 xilinx_dma_free_tx_descriptor(chan
, desc
);
1759 * xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
1760 * @dchan: DMA channel
1761 * @sgl: scatterlist to transfer to/from
1762 * @sg_len: number of entries in @scatterlist
1763 * @direction: DMA direction
1764 * @flags: transfer ack flags
1765 * @context: APP words of the descriptor
1767 * Return: Async transaction descriptor on success and NULL on failure
1769 static struct dma_async_tx_descriptor
*xilinx_dma_prep_slave_sg(
1770 struct dma_chan
*dchan
, struct scatterlist
*sgl
, unsigned int sg_len
,
1771 enum dma_transfer_direction direction
, unsigned long flags
,
1774 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
1775 struct xilinx_dma_tx_descriptor
*desc
;
1776 struct xilinx_axidma_tx_segment
*segment
= NULL
;
1777 u32
*app_w
= (u32
*)context
;
1778 struct scatterlist
*sg
;
1783 if (!is_slave_direction(direction
))
1786 /* Allocate a transaction descriptor. */
1787 desc
= xilinx_dma_alloc_tx_descriptor(chan
);
1791 dma_async_tx_descriptor_init(&desc
->async_tx
, &chan
->common
);
1792 desc
->async_tx
.tx_submit
= xilinx_dma_tx_submit
;
1794 /* Build transactions using information in the scatter gather list */
1795 for_each_sg(sgl
, sg
, sg_len
, i
) {
1798 /* Loop until the entire scatterlist entry is used */
1799 while (sg_used
< sg_dma_len(sg
)) {
1800 struct xilinx_axidma_desc_hw
*hw
;
1802 /* Get a free segment */
1803 segment
= xilinx_axidma_alloc_tx_segment(chan
);
1808 * Calculate the maximum number of bytes to transfer,
1809 * making sure it is less than the hw limit
1811 copy
= min_t(size_t, sg_dma_len(sg
) - sg_used
,
1812 XILINX_DMA_MAX_TRANS_LEN
);
1815 /* Fill in the descriptor */
1816 xilinx_axidma_buf(chan
, hw
, sg_dma_address(sg
),
1821 if (chan
->direction
== DMA_MEM_TO_DEV
) {
1823 memcpy(hw
->app
, app_w
, sizeof(u32
) *
1824 XILINX_DMA_NUM_APP_WORDS
);
1830 * Insert the segment into the descriptor segments
1833 list_add_tail(&segment
->node
, &desc
->segments
);
1837 segment
= list_first_entry(&desc
->segments
,
1838 struct xilinx_axidma_tx_segment
, node
);
1839 desc
->async_tx
.phys
= segment
->phys
;
1841 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
1842 if (chan
->direction
== DMA_MEM_TO_DEV
) {
1843 segment
->hw
.control
|= XILINX_DMA_BD_SOP
;
1844 segment
= list_last_entry(&desc
->segments
,
1845 struct xilinx_axidma_tx_segment
,
1847 segment
->hw
.control
|= XILINX_DMA_BD_EOP
;
1850 return &desc
->async_tx
;
1853 xilinx_dma_free_tx_descriptor(chan
, desc
);
1858 * xilinx_dma_prep_dma_cyclic - prepare descriptors for a DMA_SLAVE transaction
1859 * @dchan: DMA channel
1860 * @buf_addr: Physical address of the buffer
1861 * @buf_len: Total length of the cyclic buffers
1862 * @period_len: length of individual cyclic buffer
1863 * @direction: DMA direction
1864 * @flags: transfer ack flags
1866 * Return: Async transaction descriptor on success and NULL on failure
1868 static struct dma_async_tx_descriptor
*xilinx_dma_prep_dma_cyclic(
1869 struct dma_chan
*dchan
, dma_addr_t buf_addr
, size_t buf_len
,
1870 size_t period_len
, enum dma_transfer_direction direction
,
1871 unsigned long flags
)
1873 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
1874 struct xilinx_dma_tx_descriptor
*desc
;
1875 struct xilinx_axidma_tx_segment
*segment
, *head_segment
, *prev
= NULL
;
1876 size_t copy
, sg_used
;
1877 unsigned int num_periods
;
1884 num_periods
= buf_len
/ period_len
;
1889 if (!is_slave_direction(direction
))
1892 /* Allocate a transaction descriptor. */
1893 desc
= xilinx_dma_alloc_tx_descriptor(chan
);
1897 chan
->direction
= direction
;
1898 dma_async_tx_descriptor_init(&desc
->async_tx
, &chan
->common
);
1899 desc
->async_tx
.tx_submit
= xilinx_dma_tx_submit
;
1901 for (i
= 0; i
< num_periods
; ++i
) {
1904 while (sg_used
< period_len
) {
1905 struct xilinx_axidma_desc_hw
*hw
;
1907 /* Get a free segment */
1908 segment
= xilinx_axidma_alloc_tx_segment(chan
);
1913 * Calculate the maximum number of bytes to transfer,
1914 * making sure it is less than the hw limit
1916 copy
= min_t(size_t, period_len
- sg_used
,
1917 XILINX_DMA_MAX_TRANS_LEN
);
1919 xilinx_axidma_buf(chan
, hw
, buf_addr
, sg_used
,
1924 prev
->hw
.next_desc
= segment
->phys
;
1930 * Insert the segment into the descriptor segments
1933 list_add_tail(&segment
->node
, &desc
->segments
);
1937 head_segment
= list_first_entry(&desc
->segments
,
1938 struct xilinx_axidma_tx_segment
, node
);
1939 desc
->async_tx
.phys
= head_segment
->phys
;
1941 desc
->cyclic
= true;
1942 reg
= dma_ctrl_read(chan
, XILINX_DMA_REG_DMACR
);
1943 reg
|= XILINX_DMA_CR_CYCLIC_BD_EN_MASK
;
1944 dma_ctrl_write(chan
, XILINX_DMA_REG_DMACR
, reg
);
1946 segment
= list_last_entry(&desc
->segments
,
1947 struct xilinx_axidma_tx_segment
,
1949 segment
->hw
.next_desc
= (u32
) head_segment
->phys
;
1951 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
1952 if (direction
== DMA_MEM_TO_DEV
) {
1953 head_segment
->hw
.control
|= XILINX_DMA_BD_SOP
;
1954 segment
->hw
.control
|= XILINX_DMA_BD_EOP
;
1957 return &desc
->async_tx
;
1960 xilinx_dma_free_tx_descriptor(chan
, desc
);
1965 * xilinx_dma_prep_interleaved - prepare a descriptor for a
1966 * DMA_SLAVE transaction
1967 * @dchan: DMA channel
1968 * @xt: Interleaved template pointer
1969 * @flags: transfer ack flags
1971 * Return: Async transaction descriptor on success and NULL on failure
1973 static struct dma_async_tx_descriptor
*
1974 xilinx_dma_prep_interleaved(struct dma_chan
*dchan
,
1975 struct dma_interleaved_template
*xt
,
1976 unsigned long flags
)
1978 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
1979 struct xilinx_dma_tx_descriptor
*desc
;
1980 struct xilinx_axidma_tx_segment
*segment
;
1981 struct xilinx_axidma_desc_hw
*hw
;
1983 if (!is_slave_direction(xt
->dir
))
1986 if (!xt
->numf
|| !xt
->sgl
[0].size
)
1989 if (xt
->frame_size
!= 1)
1992 /* Allocate a transaction descriptor. */
1993 desc
= xilinx_dma_alloc_tx_descriptor(chan
);
1997 chan
->direction
= xt
->dir
;
1998 dma_async_tx_descriptor_init(&desc
->async_tx
, &chan
->common
);
1999 desc
->async_tx
.tx_submit
= xilinx_dma_tx_submit
;
2001 /* Get a free segment */
2002 segment
= xilinx_axidma_alloc_tx_segment(chan
);
2008 /* Fill in the descriptor */
2009 if (xt
->dir
!= DMA_MEM_TO_DEV
)
2010 hw
->buf_addr
= xt
->dst_start
;
2012 hw
->buf_addr
= xt
->src_start
;
2014 hw
->mcdma_control
= chan
->tdest
& XILINX_DMA_BD_TDEST_MASK
;
2015 hw
->vsize_stride
= (xt
->numf
<< XILINX_DMA_BD_VSIZE_SHIFT
) &
2016 XILINX_DMA_BD_VSIZE_MASK
;
2017 hw
->vsize_stride
|= (xt
->sgl
[0].icg
+ xt
->sgl
[0].size
) &
2018 XILINX_DMA_BD_STRIDE_MASK
;
2019 hw
->control
= xt
->sgl
[0].size
& XILINX_DMA_BD_HSIZE_MASK
;
2022 * Insert the segment into the descriptor segments
2025 list_add_tail(&segment
->node
, &desc
->segments
);
2028 segment
= list_first_entry(&desc
->segments
,
2029 struct xilinx_axidma_tx_segment
, node
);
2030 desc
->async_tx
.phys
= segment
->phys
;
2032 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
2033 if (xt
->dir
== DMA_MEM_TO_DEV
) {
2034 segment
->hw
.control
|= XILINX_DMA_BD_SOP
;
2035 segment
= list_last_entry(&desc
->segments
,
2036 struct xilinx_axidma_tx_segment
,
2038 segment
->hw
.control
|= XILINX_DMA_BD_EOP
;
2041 return &desc
->async_tx
;
2044 xilinx_dma_free_tx_descriptor(chan
, desc
);
2049 * xilinx_dma_terminate_all - Halt the channel and free descriptors
2050 * @dchan: Driver specific DMA Channel pointer
2052 * Return: '0' always.
2054 static int xilinx_dma_terminate_all(struct dma_chan
*dchan
)
2056 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
2061 xilinx_dma_chan_reset(chan
);
2063 err
= chan
->stop_transfer(chan
);
2065 dev_err(chan
->dev
, "Cannot stop channel %p: %x\n",
2066 chan
, dma_ctrl_read(chan
, XILINX_DMA_REG_DMASR
));
2070 /* Remove and free all of the descriptors in the lists */
2071 xilinx_dma_free_descriptors(chan
);
2075 reg
= dma_ctrl_read(chan
, XILINX_DMA_REG_DMACR
);
2076 reg
&= ~XILINX_DMA_CR_CYCLIC_BD_EN_MASK
;
2077 dma_ctrl_write(chan
, XILINX_DMA_REG_DMACR
, reg
);
2078 chan
->cyclic
= false;
2081 if ((chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_CDMA
) && chan
->has_sg
)
2082 dma_ctrl_clr(chan
, XILINX_DMA_REG_DMACR
,
2083 XILINX_CDMA_CR_SGMODE
);
2089 * xilinx_dma_channel_set_config - Configure VDMA channel
2090 * Run-time configuration for Axi VDMA, supports:
2091 * . halt the channel
2092 * . configure interrupt coalescing and inter-packet delay threshold
2093 * . start/stop parking
2096 * @dchan: DMA channel
2097 * @cfg: VDMA device configuration pointer
2099 * Return: '0' on success and failure value on error
2101 int xilinx_vdma_channel_set_config(struct dma_chan
*dchan
,
2102 struct xilinx_vdma_config
*cfg
)
2104 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
2108 return xilinx_dma_chan_reset(chan
);
2110 dmacr
= dma_ctrl_read(chan
, XILINX_DMA_REG_DMACR
);
2112 chan
->config
.frm_dly
= cfg
->frm_dly
;
2113 chan
->config
.park
= cfg
->park
;
2115 /* genlock settings */
2116 chan
->config
.gen_lock
= cfg
->gen_lock
;
2117 chan
->config
.master
= cfg
->master
;
2119 dmacr
&= ~XILINX_DMA_DMACR_GENLOCK_EN
;
2120 if (cfg
->gen_lock
&& chan
->genlock
) {
2121 dmacr
|= XILINX_DMA_DMACR_GENLOCK_EN
;
2122 dmacr
&= ~XILINX_DMA_DMACR_MASTER_MASK
;
2123 dmacr
|= cfg
->master
<< XILINX_DMA_DMACR_MASTER_SHIFT
;
2126 chan
->config
.frm_cnt_en
= cfg
->frm_cnt_en
;
2127 chan
->config
.vflip_en
= cfg
->vflip_en
;
2130 chan
->config
.park_frm
= cfg
->park_frm
;
2132 chan
->config
.park_frm
= -1;
2134 chan
->config
.coalesc
= cfg
->coalesc
;
2135 chan
->config
.delay
= cfg
->delay
;
2137 if (cfg
->coalesc
<= XILINX_DMA_DMACR_FRAME_COUNT_MAX
) {
2138 dmacr
&= ~XILINX_DMA_DMACR_FRAME_COUNT_MASK
;
2139 dmacr
|= cfg
->coalesc
<< XILINX_DMA_DMACR_FRAME_COUNT_SHIFT
;
2140 chan
->config
.coalesc
= cfg
->coalesc
;
2143 if (cfg
->delay
<= XILINX_DMA_DMACR_DELAY_MAX
) {
2144 dmacr
&= ~XILINX_DMA_DMACR_DELAY_MASK
;
2145 dmacr
|= cfg
->delay
<< XILINX_DMA_DMACR_DELAY_SHIFT
;
2146 chan
->config
.delay
= cfg
->delay
;
2149 /* FSync Source selection */
2150 dmacr
&= ~XILINX_DMA_DMACR_FSYNCSRC_MASK
;
2151 dmacr
|= cfg
->ext_fsync
<< XILINX_DMA_DMACR_FSYNCSRC_SHIFT
;
2153 dma_ctrl_write(chan
, XILINX_DMA_REG_DMACR
, dmacr
);
2157 EXPORT_SYMBOL(xilinx_vdma_channel_set_config
);
2159 /* -----------------------------------------------------------------------------
2164 * xilinx_dma_chan_remove - Per Channel remove function
2165 * @chan: Driver specific DMA channel
2167 static void xilinx_dma_chan_remove(struct xilinx_dma_chan
*chan
)
2169 /* Disable all interrupts */
2170 dma_ctrl_clr(chan
, XILINX_DMA_REG_DMACR
,
2171 XILINX_DMA_DMAXR_ALL_IRQ_MASK
);
2174 free_irq(chan
->irq
, chan
);
2176 tasklet_kill(&chan
->tasklet
);
2178 list_del(&chan
->common
.device_node
);
2181 static int axidma_clk_init(struct platform_device
*pdev
, struct clk
**axi_clk
,
2182 struct clk
**tx_clk
, struct clk
**rx_clk
,
2183 struct clk
**sg_clk
, struct clk
**tmp_clk
)
2189 *axi_clk
= devm_clk_get(&pdev
->dev
, "s_axi_lite_aclk");
2190 if (IS_ERR(*axi_clk
)) {
2191 err
= PTR_ERR(*axi_clk
);
2192 dev_err(&pdev
->dev
, "failed to get axi_aclk (%d)\n", err
);
2196 *tx_clk
= devm_clk_get(&pdev
->dev
, "m_axi_mm2s_aclk");
2197 if (IS_ERR(*tx_clk
))
2200 *rx_clk
= devm_clk_get(&pdev
->dev
, "m_axi_s2mm_aclk");
2201 if (IS_ERR(*rx_clk
))
2204 *sg_clk
= devm_clk_get(&pdev
->dev
, "m_axi_sg_aclk");
2205 if (IS_ERR(*sg_clk
))
2208 err
= clk_prepare_enable(*axi_clk
);
2210 dev_err(&pdev
->dev
, "failed to enable axi_clk (%d)\n", err
);
2214 err
= clk_prepare_enable(*tx_clk
);
2216 dev_err(&pdev
->dev
, "failed to enable tx_clk (%d)\n", err
);
2217 goto err_disable_axiclk
;
2220 err
= clk_prepare_enable(*rx_clk
);
2222 dev_err(&pdev
->dev
, "failed to enable rx_clk (%d)\n", err
);
2223 goto err_disable_txclk
;
2226 err
= clk_prepare_enable(*sg_clk
);
2228 dev_err(&pdev
->dev
, "failed to enable sg_clk (%d)\n", err
);
2229 goto err_disable_rxclk
;
2235 clk_disable_unprepare(*rx_clk
);
2237 clk_disable_unprepare(*tx_clk
);
2239 clk_disable_unprepare(*axi_clk
);
2244 static int axicdma_clk_init(struct platform_device
*pdev
, struct clk
**axi_clk
,
2245 struct clk
**dev_clk
, struct clk
**tmp_clk
,
2246 struct clk
**tmp1_clk
, struct clk
**tmp2_clk
)
2254 *axi_clk
= devm_clk_get(&pdev
->dev
, "s_axi_lite_aclk");
2255 if (IS_ERR(*axi_clk
)) {
2256 err
= PTR_ERR(*axi_clk
);
2257 dev_err(&pdev
->dev
, "failed to get axi_clk (%d)\n", err
);
2261 *dev_clk
= devm_clk_get(&pdev
->dev
, "m_axi_aclk");
2262 if (IS_ERR(*dev_clk
)) {
2263 err
= PTR_ERR(*dev_clk
);
2264 dev_err(&pdev
->dev
, "failed to get dev_clk (%d)\n", err
);
2268 err
= clk_prepare_enable(*axi_clk
);
2270 dev_err(&pdev
->dev
, "failed to enable axi_clk (%d)\n", err
);
2274 err
= clk_prepare_enable(*dev_clk
);
2276 dev_err(&pdev
->dev
, "failed to enable dev_clk (%d)\n", err
);
2277 goto err_disable_axiclk
;
2283 clk_disable_unprepare(*axi_clk
);
2288 static int axivdma_clk_init(struct platform_device
*pdev
, struct clk
**axi_clk
,
2289 struct clk
**tx_clk
, struct clk
**txs_clk
,
2290 struct clk
**rx_clk
, struct clk
**rxs_clk
)
2294 *axi_clk
= devm_clk_get(&pdev
->dev
, "s_axi_lite_aclk");
2295 if (IS_ERR(*axi_clk
)) {
2296 err
= PTR_ERR(*axi_clk
);
2297 dev_err(&pdev
->dev
, "failed to get axi_aclk (%d)\n", err
);
2301 *tx_clk
= devm_clk_get(&pdev
->dev
, "m_axi_mm2s_aclk");
2302 if (IS_ERR(*tx_clk
))
2305 *txs_clk
= devm_clk_get(&pdev
->dev
, "m_axis_mm2s_aclk");
2306 if (IS_ERR(*txs_clk
))
2309 *rx_clk
= devm_clk_get(&pdev
->dev
, "m_axi_s2mm_aclk");
2310 if (IS_ERR(*rx_clk
))
2313 *rxs_clk
= devm_clk_get(&pdev
->dev
, "s_axis_s2mm_aclk");
2314 if (IS_ERR(*rxs_clk
))
2317 err
= clk_prepare_enable(*axi_clk
);
2319 dev_err(&pdev
->dev
, "failed to enable axi_clk (%d)\n", err
);
2323 err
= clk_prepare_enable(*tx_clk
);
2325 dev_err(&pdev
->dev
, "failed to enable tx_clk (%d)\n", err
);
2326 goto err_disable_axiclk
;
2329 err
= clk_prepare_enable(*txs_clk
);
2331 dev_err(&pdev
->dev
, "failed to enable txs_clk (%d)\n", err
);
2332 goto err_disable_txclk
;
2335 err
= clk_prepare_enable(*rx_clk
);
2337 dev_err(&pdev
->dev
, "failed to enable rx_clk (%d)\n", err
);
2338 goto err_disable_txsclk
;
2341 err
= clk_prepare_enable(*rxs_clk
);
2343 dev_err(&pdev
->dev
, "failed to enable rxs_clk (%d)\n", err
);
2344 goto err_disable_rxclk
;
2350 clk_disable_unprepare(*rx_clk
);
2352 clk_disable_unprepare(*txs_clk
);
2354 clk_disable_unprepare(*tx_clk
);
2356 clk_disable_unprepare(*axi_clk
);
2361 static void xdma_disable_allclks(struct xilinx_dma_device
*xdev
)
2363 clk_disable_unprepare(xdev
->rxs_clk
);
2364 clk_disable_unprepare(xdev
->rx_clk
);
2365 clk_disable_unprepare(xdev
->txs_clk
);
2366 clk_disable_unprepare(xdev
->tx_clk
);
2367 clk_disable_unprepare(xdev
->axi_clk
);
2371 * xilinx_dma_chan_probe - Per Channel Probing
2372 * It get channel features from the device tree entry and
2373 * initialize special channel handling routines
2375 * @xdev: Driver specific device structure
2376 * @node: Device node
2377 * @chan_id: DMA Channel id
2379 * Return: '0' on success and failure value on error
2381 static int xilinx_dma_chan_probe(struct xilinx_dma_device
*xdev
,
2382 struct device_node
*node
, int chan_id
)
2384 struct xilinx_dma_chan
*chan
;
2385 bool has_dre
= false;
2389 /* Allocate and initialize the channel structure */
2390 chan
= devm_kzalloc(xdev
->dev
, sizeof(*chan
), GFP_KERNEL
);
2394 chan
->dev
= xdev
->dev
;
2396 chan
->has_sg
= xdev
->has_sg
;
2397 chan
->desc_pendingcount
= 0x0;
2398 chan
->ext_addr
= xdev
->ext_addr
;
2399 /* This variable ensures that descriptors are not
2400 * Submitted when dma engine is in progress. This variable is
2401 * Added to avoid polling for a bit in the status register to
2402 * Know dma state in the driver hot path.
2406 spin_lock_init(&chan
->lock
);
2407 INIT_LIST_HEAD(&chan
->pending_list
);
2408 INIT_LIST_HEAD(&chan
->done_list
);
2409 INIT_LIST_HEAD(&chan
->active_list
);
2410 INIT_LIST_HEAD(&chan
->free_seg_list
);
2412 /* Retrieve the channel properties from the device tree */
2413 has_dre
= of_property_read_bool(node
, "xlnx,include-dre");
2415 chan
->genlock
= of_property_read_bool(node
, "xlnx,genlock-mode");
2417 err
= of_property_read_u32(node
, "xlnx,datawidth", &value
);
2419 dev_err(xdev
->dev
, "missing xlnx,datawidth property\n");
2422 width
= value
>> 3; /* Convert bits to bytes */
2424 /* If data width is greater than 8 bytes, DRE is not in hw */
2429 xdev
->common
.copy_align
= fls(width
- 1);
2431 if (of_device_is_compatible(node
, "xlnx,axi-vdma-mm2s-channel") ||
2432 of_device_is_compatible(node
, "xlnx,axi-dma-mm2s-channel") ||
2433 of_device_is_compatible(node
, "xlnx,axi-cdma-channel")) {
2434 chan
->direction
= DMA_MEM_TO_DEV
;
2436 chan
->tdest
= chan_id
;
2438 chan
->ctrl_offset
= XILINX_DMA_MM2S_CTRL_OFFSET
;
2439 if (xdev
->dma_config
->dmatype
== XDMA_TYPE_VDMA
) {
2440 chan
->desc_offset
= XILINX_VDMA_MM2S_DESC_OFFSET
;
2441 chan
->config
.park
= 1;
2443 if (xdev
->flush_on_fsync
== XILINX_DMA_FLUSH_BOTH
||
2444 xdev
->flush_on_fsync
== XILINX_DMA_FLUSH_MM2S
)
2445 chan
->flush_on_fsync
= true;
2447 } else if (of_device_is_compatible(node
,
2448 "xlnx,axi-vdma-s2mm-channel") ||
2449 of_device_is_compatible(node
,
2450 "xlnx,axi-dma-s2mm-channel")) {
2451 chan
->direction
= DMA_DEV_TO_MEM
;
2453 chan
->tdest
= chan_id
- xdev
->nr_channels
;
2454 chan
->has_vflip
= of_property_read_bool(node
,
2455 "xlnx,enable-vert-flip");
2456 if (chan
->has_vflip
) {
2457 chan
->config
.vflip_en
= dma_read(chan
,
2458 XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP
) &
2459 XILINX_VDMA_ENABLE_VERTICAL_FLIP
;
2462 chan
->ctrl_offset
= XILINX_DMA_S2MM_CTRL_OFFSET
;
2463 if (xdev
->dma_config
->dmatype
== XDMA_TYPE_VDMA
) {
2464 chan
->desc_offset
= XILINX_VDMA_S2MM_DESC_OFFSET
;
2465 chan
->config
.park
= 1;
2467 if (xdev
->flush_on_fsync
== XILINX_DMA_FLUSH_BOTH
||
2468 xdev
->flush_on_fsync
== XILINX_DMA_FLUSH_S2MM
)
2469 chan
->flush_on_fsync
= true;
2472 dev_err(xdev
->dev
, "Invalid channel compatible node\n");
2476 /* Request the interrupt */
2477 chan
->irq
= irq_of_parse_and_map(node
, 0);
2478 err
= request_irq(chan
->irq
, xilinx_dma_irq_handler
, IRQF_SHARED
,
2479 "xilinx-dma-controller", chan
);
2481 dev_err(xdev
->dev
, "unable to request IRQ %d\n", chan
->irq
);
2485 if (xdev
->dma_config
->dmatype
== XDMA_TYPE_AXIDMA
) {
2486 chan
->start_transfer
= xilinx_dma_start_transfer
;
2487 chan
->stop_transfer
= xilinx_dma_stop_transfer
;
2488 } else if (xdev
->dma_config
->dmatype
== XDMA_TYPE_CDMA
) {
2489 chan
->start_transfer
= xilinx_cdma_start_transfer
;
2490 chan
->stop_transfer
= xilinx_cdma_stop_transfer
;
2492 chan
->start_transfer
= xilinx_vdma_start_transfer
;
2493 chan
->stop_transfer
= xilinx_dma_stop_transfer
;
2496 /* Initialize the tasklet */
2497 tasklet_init(&chan
->tasklet
, xilinx_dma_do_tasklet
,
2498 (unsigned long)chan
);
2501 * Initialize the DMA channel and add it to the DMA engine channels
2504 chan
->common
.device
= &xdev
->common
;
2506 list_add_tail(&chan
->common
.device_node
, &xdev
->common
.channels
);
2507 xdev
->chan
[chan
->id
] = chan
;
2509 /* Reset the channel */
2510 err
= xilinx_dma_chan_reset(chan
);
2512 dev_err(xdev
->dev
, "Reset channel failed\n");
2520 * xilinx_dma_child_probe - Per child node probe
2521 * It get number of dma-channels per child node from
2522 * device-tree and initializes all the channels.
2524 * @xdev: Driver specific device structure
2525 * @node: Device node
2529 static int xilinx_dma_child_probe(struct xilinx_dma_device
*xdev
,
2530 struct device_node
*node
)
2532 int ret
, i
, nr_channels
= 1;
2534 ret
= of_property_read_u32(node
, "dma-channels", &nr_channels
);
2535 if ((ret
< 0) && xdev
->mcdma
)
2536 dev_warn(xdev
->dev
, "missing dma-channels property\n");
2538 for (i
= 0; i
< nr_channels
; i
++)
2539 xilinx_dma_chan_probe(xdev
, node
, xdev
->chan_id
++);
2541 xdev
->nr_channels
+= nr_channels
;
2547 * of_dma_xilinx_xlate - Translation function
2548 * @dma_spec: Pointer to DMA specifier as found in the device tree
2549 * @ofdma: Pointer to DMA controller data
2551 * Return: DMA channel pointer on success and NULL on error
2553 static struct dma_chan
*of_dma_xilinx_xlate(struct of_phandle_args
*dma_spec
,
2554 struct of_dma
*ofdma
)
2556 struct xilinx_dma_device
*xdev
= ofdma
->of_dma_data
;
2557 int chan_id
= dma_spec
->args
[0];
2559 if (chan_id
>= xdev
->nr_channels
|| !xdev
->chan
[chan_id
])
2562 return dma_get_slave_channel(&xdev
->chan
[chan_id
]->common
);
2565 static const struct xilinx_dma_config axidma_config
= {
2566 .dmatype
= XDMA_TYPE_AXIDMA
,
2567 .clk_init
= axidma_clk_init
,
2570 static const struct xilinx_dma_config axicdma_config
= {
2571 .dmatype
= XDMA_TYPE_CDMA
,
2572 .clk_init
= axicdma_clk_init
,
2575 static const struct xilinx_dma_config axivdma_config
= {
2576 .dmatype
= XDMA_TYPE_VDMA
,
2577 .clk_init
= axivdma_clk_init
,
2580 static const struct of_device_id xilinx_dma_of_ids
[] = {
2581 { .compatible
= "xlnx,axi-dma-1.00.a", .data
= &axidma_config
},
2582 { .compatible
= "xlnx,axi-cdma-1.00.a", .data
= &axicdma_config
},
2583 { .compatible
= "xlnx,axi-vdma-1.00.a", .data
= &axivdma_config
},
2586 MODULE_DEVICE_TABLE(of
, xilinx_dma_of_ids
);
2589 * xilinx_dma_probe - Driver probe function
2590 * @pdev: Pointer to the platform_device structure
2592 * Return: '0' on success and failure value on error
2594 static int xilinx_dma_probe(struct platform_device
*pdev
)
2596 int (*clk_init
)(struct platform_device
*, struct clk
**, struct clk
**,
2597 struct clk
**, struct clk
**, struct clk
**)
2599 struct device_node
*node
= pdev
->dev
.of_node
;
2600 struct xilinx_dma_device
*xdev
;
2601 struct device_node
*child
, *np
= pdev
->dev
.of_node
;
2602 struct resource
*io
;
2603 u32 num_frames
, addr_width
;
2606 /* Allocate and initialize the DMA engine structure */
2607 xdev
= devm_kzalloc(&pdev
->dev
, sizeof(*xdev
), GFP_KERNEL
);
2611 xdev
->dev
= &pdev
->dev
;
2613 const struct of_device_id
*match
;
2615 match
= of_match_node(xilinx_dma_of_ids
, np
);
2616 if (match
&& match
->data
) {
2617 xdev
->dma_config
= match
->data
;
2618 clk_init
= xdev
->dma_config
->clk_init
;
2622 err
= clk_init(pdev
, &xdev
->axi_clk
, &xdev
->tx_clk
, &xdev
->txs_clk
,
2623 &xdev
->rx_clk
, &xdev
->rxs_clk
);
2627 /* Request and map I/O memory */
2628 io
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
2629 xdev
->regs
= devm_ioremap_resource(&pdev
->dev
, io
);
2630 if (IS_ERR(xdev
->regs
))
2631 return PTR_ERR(xdev
->regs
);
2633 /* Retrieve the DMA engine properties from the device tree */
2634 xdev
->has_sg
= of_property_read_bool(node
, "xlnx,include-sg");
2635 if (xdev
->dma_config
->dmatype
== XDMA_TYPE_AXIDMA
)
2636 xdev
->mcdma
= of_property_read_bool(node
, "xlnx,mcdma");
2638 if (xdev
->dma_config
->dmatype
== XDMA_TYPE_VDMA
) {
2639 err
= of_property_read_u32(node
, "xlnx,num-fstores",
2643 "missing xlnx,num-fstores property\n");
2647 err
= of_property_read_u32(node
, "xlnx,flush-fsync",
2648 &xdev
->flush_on_fsync
);
2651 "missing xlnx,flush-fsync property\n");
2654 err
= of_property_read_u32(node
, "xlnx,addrwidth", &addr_width
);
2656 dev_warn(xdev
->dev
, "missing xlnx,addrwidth property\n");
2658 if (addr_width
> 32)
2659 xdev
->ext_addr
= true;
2661 xdev
->ext_addr
= false;
2663 /* Set the dma mask bits */
2664 dma_set_mask(xdev
->dev
, DMA_BIT_MASK(addr_width
));
2666 /* Initialize the DMA engine */
2667 xdev
->common
.dev
= &pdev
->dev
;
2669 INIT_LIST_HEAD(&xdev
->common
.channels
);
2670 if (!(xdev
->dma_config
->dmatype
== XDMA_TYPE_CDMA
)) {
2671 dma_cap_set(DMA_SLAVE
, xdev
->common
.cap_mask
);
2672 dma_cap_set(DMA_PRIVATE
, xdev
->common
.cap_mask
);
2675 xdev
->common
.device_alloc_chan_resources
=
2676 xilinx_dma_alloc_chan_resources
;
2677 xdev
->common
.device_free_chan_resources
=
2678 xilinx_dma_free_chan_resources
;
2679 xdev
->common
.device_terminate_all
= xilinx_dma_terminate_all
;
2680 xdev
->common
.device_tx_status
= xilinx_dma_tx_status
;
2681 xdev
->common
.device_issue_pending
= xilinx_dma_issue_pending
;
2682 if (xdev
->dma_config
->dmatype
== XDMA_TYPE_AXIDMA
) {
2683 dma_cap_set(DMA_CYCLIC
, xdev
->common
.cap_mask
);
2684 xdev
->common
.device_prep_slave_sg
= xilinx_dma_prep_slave_sg
;
2685 xdev
->common
.device_prep_dma_cyclic
=
2686 xilinx_dma_prep_dma_cyclic
;
2687 xdev
->common
.device_prep_interleaved_dma
=
2688 xilinx_dma_prep_interleaved
;
2689 /* Residue calculation is supported by only AXI DMA */
2690 xdev
->common
.residue_granularity
=
2691 DMA_RESIDUE_GRANULARITY_SEGMENT
;
2692 } else if (xdev
->dma_config
->dmatype
== XDMA_TYPE_CDMA
) {
2693 dma_cap_set(DMA_MEMCPY
, xdev
->common
.cap_mask
);
2694 xdev
->common
.device_prep_dma_memcpy
= xilinx_cdma_prep_memcpy
;
2696 xdev
->common
.device_prep_interleaved_dma
=
2697 xilinx_vdma_dma_prep_interleaved
;
2700 platform_set_drvdata(pdev
, xdev
);
2702 /* Initialize the channels */
2703 for_each_child_of_node(node
, child
) {
2704 err
= xilinx_dma_child_probe(xdev
, child
);
2709 if (xdev
->dma_config
->dmatype
== XDMA_TYPE_VDMA
) {
2710 for (i
= 0; i
< xdev
->nr_channels
; i
++)
2712 xdev
->chan
[i
]->num_frms
= num_frames
;
2715 /* Register the DMA engine with the core */
2716 dma_async_device_register(&xdev
->common
);
2718 err
= of_dma_controller_register(node
, of_dma_xilinx_xlate
,
2721 dev_err(&pdev
->dev
, "Unable to register DMA to DT\n");
2722 dma_async_device_unregister(&xdev
->common
);
2726 if (xdev
->dma_config
->dmatype
== XDMA_TYPE_AXIDMA
)
2727 dev_info(&pdev
->dev
, "Xilinx AXI DMA Engine Driver Probed!!\n");
2728 else if (xdev
->dma_config
->dmatype
== XDMA_TYPE_CDMA
)
2729 dev_info(&pdev
->dev
, "Xilinx AXI CDMA Engine Driver Probed!!\n");
2731 dev_info(&pdev
->dev
, "Xilinx AXI VDMA Engine Driver Probed!!\n");
2736 xdma_disable_allclks(xdev
);
2738 for (i
= 0; i
< xdev
->nr_channels
; i
++)
2740 xilinx_dma_chan_remove(xdev
->chan
[i
]);
2746 * xilinx_dma_remove - Driver remove function
2747 * @pdev: Pointer to the platform_device structure
2749 * Return: Always '0'
2751 static int xilinx_dma_remove(struct platform_device
*pdev
)
2753 struct xilinx_dma_device
*xdev
= platform_get_drvdata(pdev
);
2756 of_dma_controller_free(pdev
->dev
.of_node
);
2758 dma_async_device_unregister(&xdev
->common
);
2760 for (i
= 0; i
< xdev
->nr_channels
; i
++)
2762 xilinx_dma_chan_remove(xdev
->chan
[i
]);
2764 xdma_disable_allclks(xdev
);
2769 static struct platform_driver xilinx_vdma_driver
= {
2771 .name
= "xilinx-vdma",
2772 .of_match_table
= xilinx_dma_of_ids
,
2774 .probe
= xilinx_dma_probe
,
2775 .remove
= xilinx_dma_remove
,
2778 module_platform_driver(xilinx_vdma_driver
);
2780 MODULE_AUTHOR("Xilinx, Inc.");
2781 MODULE_DESCRIPTION("Xilinx VDMA driver");
2782 MODULE_LICENSE("GPL v2");