1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * DMA driver for Xilinx Video DMA Engine
5 * Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved.
7 * Based on the Freescale DMA driver.
10 * The AXI Video Direct Memory Access (AXI VDMA) core is a soft Xilinx IP
11 * core that provides high-bandwidth direct memory access between memory
12 * and AXI4-Stream type video target peripherals. The core provides efficient
13 * two dimensional DMA operations with independent asynchronous read (S2MM)
14 * and write (MM2S) channel operation. It can be configured to have either
15 * one channel or two channels. If configured as two channels, one is to
16 * transmit to the video device (MM2S) and another is to receive from the
17 * video device (S2MM). Initialization, status, interrupt and management
18 * registers are accessed through an AXI4-Lite slave interface.
20 * The AXI Direct Memory Access (AXI DMA) core is a soft Xilinx IP core that
21 * provides high-bandwidth one dimensional direct memory access between memory
22 * and AXI4-Stream target peripherals. It supports one receive and one
23 * transmit channel, both of them optional at synthesis time.
25 * The AXI CDMA, is a soft IP, which provides high-bandwidth Direct Memory
26 * Access (DMA) between a memory-mapped source address and a memory-mapped
27 * destination address.
29 * The AXI Multichannel Direct Memory Access (AXI MCDMA) core is a soft
30 * Xilinx IP that provides high-bandwidth direct memory access between
31 * memory and AXI4-Stream target peripherals. It provides scatter gather
32 * (SG) interface with multiple channels independent configuration support.
36 #include <linux/bitops.h>
37 #include <linux/dmapool.h>
38 #include <linux/dma/xilinx_dma.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
42 #include <linux/iopoll.h>
43 #include <linux/module.h>
44 #include <linux/of_address.h>
45 #include <linux/of_dma.h>
46 #include <linux/of_platform.h>
47 #include <linux/of_irq.h>
48 #include <linux/slab.h>
49 #include <linux/clk.h>
50 #include <linux/io-64-nonatomic-lo-hi.h>
52 #include "../dmaengine.h"
54 /* Register/Descriptor Offsets */
55 #define XILINX_DMA_MM2S_CTRL_OFFSET 0x0000
56 #define XILINX_DMA_S2MM_CTRL_OFFSET 0x0030
57 #define XILINX_VDMA_MM2S_DESC_OFFSET 0x0050
58 #define XILINX_VDMA_S2MM_DESC_OFFSET 0x00a0
60 /* Control Registers */
61 #define XILINX_DMA_REG_DMACR 0x0000
62 #define XILINX_DMA_DMACR_DELAY_MAX 0xff
63 #define XILINX_DMA_DMACR_DELAY_SHIFT 24
64 #define XILINX_DMA_DMACR_FRAME_COUNT_MAX 0xff
65 #define XILINX_DMA_DMACR_FRAME_COUNT_SHIFT 16
66 #define XILINX_DMA_DMACR_ERR_IRQ BIT(14)
67 #define XILINX_DMA_DMACR_DLY_CNT_IRQ BIT(13)
68 #define XILINX_DMA_DMACR_FRM_CNT_IRQ BIT(12)
69 #define XILINX_DMA_DMACR_MASTER_SHIFT 8
70 #define XILINX_DMA_DMACR_FSYNCSRC_SHIFT 5
71 #define XILINX_DMA_DMACR_FRAMECNT_EN BIT(4)
72 #define XILINX_DMA_DMACR_GENLOCK_EN BIT(3)
73 #define XILINX_DMA_DMACR_RESET BIT(2)
74 #define XILINX_DMA_DMACR_CIRC_EN BIT(1)
75 #define XILINX_DMA_DMACR_RUNSTOP BIT(0)
76 #define XILINX_DMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5)
77 #define XILINX_DMA_DMACR_DELAY_MASK GENMASK(31, 24)
78 #define XILINX_DMA_DMACR_FRAME_COUNT_MASK GENMASK(23, 16)
79 #define XILINX_DMA_DMACR_MASTER_MASK GENMASK(11, 8)
81 #define XILINX_DMA_REG_DMASR 0x0004
82 #define XILINX_DMA_DMASR_EOL_LATE_ERR BIT(15)
83 #define XILINX_DMA_DMASR_ERR_IRQ BIT(14)
84 #define XILINX_DMA_DMASR_DLY_CNT_IRQ BIT(13)
85 #define XILINX_DMA_DMASR_FRM_CNT_IRQ BIT(12)
86 #define XILINX_DMA_DMASR_SOF_LATE_ERR BIT(11)
87 #define XILINX_DMA_DMASR_SG_DEC_ERR BIT(10)
88 #define XILINX_DMA_DMASR_SG_SLV_ERR BIT(9)
89 #define XILINX_DMA_DMASR_EOF_EARLY_ERR BIT(8)
90 #define XILINX_DMA_DMASR_SOF_EARLY_ERR BIT(7)
91 #define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6)
92 #define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5)
93 #define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4)
94 #define XILINX_DMA_DMASR_SG_MASK BIT(3)
95 #define XILINX_DMA_DMASR_IDLE BIT(1)
96 #define XILINX_DMA_DMASR_HALTED BIT(0)
97 #define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24)
98 #define XILINX_DMA_DMASR_FRAME_COUNT_MASK GENMASK(23, 16)
100 #define XILINX_DMA_REG_CURDESC 0x0008
101 #define XILINX_DMA_REG_TAILDESC 0x0010
102 #define XILINX_DMA_REG_REG_INDEX 0x0014
103 #define XILINX_DMA_REG_FRMSTORE 0x0018
104 #define XILINX_DMA_REG_THRESHOLD 0x001c
105 #define XILINX_DMA_REG_FRMPTR_STS 0x0024
106 #define XILINX_DMA_REG_PARK_PTR 0x0028
107 #define XILINX_DMA_PARK_PTR_WR_REF_SHIFT 8
108 #define XILINX_DMA_PARK_PTR_WR_REF_MASK GENMASK(12, 8)
109 #define XILINX_DMA_PARK_PTR_RD_REF_SHIFT 0
110 #define XILINX_DMA_PARK_PTR_RD_REF_MASK GENMASK(4, 0)
111 #define XILINX_DMA_REG_VDMA_VERSION 0x002c
113 /* Register Direct Mode Registers */
114 #define XILINX_DMA_REG_VSIZE 0x0000
115 #define XILINX_DMA_REG_HSIZE 0x0004
117 #define XILINX_DMA_REG_FRMDLY_STRIDE 0x0008
118 #define XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24
119 #define XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT 0
121 #define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n))
122 #define XILINX_VDMA_REG_START_ADDRESS_64(n) (0x000c + 8 * (n))
124 #define XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP 0x00ec
125 #define XILINX_VDMA_ENABLE_VERTICAL_FLIP BIT(0)
127 /* HW specific definitions */
128 #define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x20
130 #define XILINX_DMA_DMAXR_ALL_IRQ_MASK \
131 (XILINX_DMA_DMASR_FRM_CNT_IRQ | \
132 XILINX_DMA_DMASR_DLY_CNT_IRQ | \
133 XILINX_DMA_DMASR_ERR_IRQ)
135 #define XILINX_DMA_DMASR_ALL_ERR_MASK \
136 (XILINX_DMA_DMASR_EOL_LATE_ERR | \
137 XILINX_DMA_DMASR_SOF_LATE_ERR | \
138 XILINX_DMA_DMASR_SG_DEC_ERR | \
139 XILINX_DMA_DMASR_SG_SLV_ERR | \
140 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
141 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
142 XILINX_DMA_DMASR_DMA_DEC_ERR | \
143 XILINX_DMA_DMASR_DMA_SLAVE_ERR | \
144 XILINX_DMA_DMASR_DMA_INT_ERR)
147 * Recoverable errors are DMA Internal error, SOF Early, EOF Early
148 * and SOF Late. They are only recoverable when C_FLUSH_ON_FSYNC
149 * is enabled in the h/w system.
151 #define XILINX_DMA_DMASR_ERR_RECOVER_MASK \
152 (XILINX_DMA_DMASR_SOF_LATE_ERR | \
153 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
154 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
155 XILINX_DMA_DMASR_DMA_INT_ERR)
157 /* Axi VDMA Flush on Fsync bits */
158 #define XILINX_DMA_FLUSH_S2MM 3
159 #define XILINX_DMA_FLUSH_MM2S 2
160 #define XILINX_DMA_FLUSH_BOTH 1
162 /* Delay loop counter to prevent hardware failure */
163 #define XILINX_DMA_LOOP_COUNT 1000000
165 /* AXI DMA Specific Registers/Offsets */
166 #define XILINX_DMA_REG_SRCDSTADDR 0x18
167 #define XILINX_DMA_REG_BTT 0x28
169 /* AXI DMA Specific Masks/Bit fields */
170 #define XILINX_DMA_MAX_TRANS_LEN_MIN 8
171 #define XILINX_DMA_MAX_TRANS_LEN_MAX 23
172 #define XILINX_DMA_V2_MAX_TRANS_LEN_MAX 26
173 #define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16)
174 #define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4)
175 #define XILINX_DMA_CR_COALESCE_SHIFT 16
176 #define XILINX_DMA_BD_SOP BIT(27)
177 #define XILINX_DMA_BD_EOP BIT(26)
178 #define XILINX_DMA_COALESCE_MAX 255
179 #define XILINX_DMA_NUM_DESCS 255
180 #define XILINX_DMA_NUM_APP_WORDS 5
182 /* AXI CDMA Specific Registers/Offsets */
183 #define XILINX_CDMA_REG_SRCADDR 0x18
184 #define XILINX_CDMA_REG_DSTADDR 0x20
186 /* AXI CDMA Specific Masks */
187 #define XILINX_CDMA_CR_SGMODE BIT(3)
189 #define xilinx_prep_dma_addr_t(addr) \
190 ((dma_addr_t)((u64)addr##_##msb << 32 | (addr)))
192 /* AXI MCDMA Specific Registers/Offsets */
193 #define XILINX_MCDMA_MM2S_CTRL_OFFSET 0x0000
194 #define XILINX_MCDMA_S2MM_CTRL_OFFSET 0x0500
195 #define XILINX_MCDMA_CHEN_OFFSET 0x0008
196 #define XILINX_MCDMA_CH_ERR_OFFSET 0x0010
197 #define XILINX_MCDMA_RXINT_SER_OFFSET 0x0020
198 #define XILINX_MCDMA_TXINT_SER_OFFSET 0x0028
199 #define XILINX_MCDMA_CHAN_CR_OFFSET(x) (0x40 + (x) * 0x40)
200 #define XILINX_MCDMA_CHAN_SR_OFFSET(x) (0x44 + (x) * 0x40)
201 #define XILINX_MCDMA_CHAN_CDESC_OFFSET(x) (0x48 + (x) * 0x40)
202 #define XILINX_MCDMA_CHAN_TDESC_OFFSET(x) (0x50 + (x) * 0x40)
204 /* AXI MCDMA Specific Masks/Shifts */
205 #define XILINX_MCDMA_COALESCE_SHIFT 16
206 #define XILINX_MCDMA_COALESCE_MAX 24
207 #define XILINX_MCDMA_IRQ_ALL_MASK GENMASK(7, 5)
208 #define XILINX_MCDMA_COALESCE_MASK GENMASK(23, 16)
209 #define XILINX_MCDMA_CR_RUNSTOP_MASK BIT(0)
210 #define XILINX_MCDMA_IRQ_IOC_MASK BIT(5)
211 #define XILINX_MCDMA_IRQ_DELAY_MASK BIT(6)
212 #define XILINX_MCDMA_IRQ_ERR_MASK BIT(7)
213 #define XILINX_MCDMA_BD_EOP BIT(30)
214 #define XILINX_MCDMA_BD_SOP BIT(31)
217 * struct xilinx_vdma_desc_hw - Hardware Descriptor
218 * @next_desc: Next Descriptor Pointer @0x00
219 * @pad1: Reserved @0x04
220 * @buf_addr: Buffer address @0x08
221 * @buf_addr_msb: MSB of Buffer address @0x0C
222 * @vsize: Vertical Size @0x10
223 * @hsize: Horizontal Size @0x14
224 * @stride: Number of bytes between the first
225 * pixels of each horizontal line @0x18
227 struct xilinx_vdma_desc_hw
{
238 * struct xilinx_axidma_desc_hw - Hardware Descriptor for AXI DMA
239 * @next_desc: Next Descriptor Pointer @0x00
240 * @next_desc_msb: MSB of Next Descriptor Pointer @0x04
241 * @buf_addr: Buffer address @0x08
242 * @buf_addr_msb: MSB of Buffer address @0x0C
243 * @reserved1: Reserved @0x10
244 * @reserved2: Reserved @0x14
245 * @control: Control field @0x18
246 * @status: Status field @0x1C
247 * @app: APP Fields @0x20 - 0x30
249 struct xilinx_axidma_desc_hw
{
258 u32 app
[XILINX_DMA_NUM_APP_WORDS
];
262 * struct xilinx_aximcdma_desc_hw - Hardware Descriptor for AXI MCDMA
263 * @next_desc: Next Descriptor Pointer @0x00
264 * @next_desc_msb: MSB of Next Descriptor Pointer @0x04
265 * @buf_addr: Buffer address @0x08
266 * @buf_addr_msb: MSB of Buffer address @0x0C
267 * @rsvd: Reserved field @0x10
268 * @control: Control Information field @0x14
269 * @status: Status field @0x18
270 * @sideband_status: Status of sideband signals @0x1C
271 * @app: APP Fields @0x20 - 0x30
273 struct xilinx_aximcdma_desc_hw
{
282 u32 app
[XILINX_DMA_NUM_APP_WORDS
];
286 * struct xilinx_cdma_desc_hw - Hardware Descriptor
287 * @next_desc: Next Descriptor Pointer @0x00
288 * @next_desc_msb: Next Descriptor Pointer MSB @0x04
289 * @src_addr: Source address @0x08
290 * @src_addr_msb: Source address MSB @0x0C
291 * @dest_addr: Destination address @0x10
292 * @dest_addr_msb: Destination address MSB @0x14
293 * @control: Control field @0x18
294 * @status: Status field @0x1C
296 struct xilinx_cdma_desc_hw
{
308 * struct xilinx_vdma_tx_segment - Descriptor segment
309 * @hw: Hardware descriptor
310 * @node: Node in the descriptor segments list
311 * @phys: Physical address of segment
313 struct xilinx_vdma_tx_segment
{
314 struct xilinx_vdma_desc_hw hw
;
315 struct list_head node
;
320 * struct xilinx_axidma_tx_segment - Descriptor segment
321 * @hw: Hardware descriptor
322 * @node: Node in the descriptor segments list
323 * @phys: Physical address of segment
325 struct xilinx_axidma_tx_segment
{
326 struct xilinx_axidma_desc_hw hw
;
327 struct list_head node
;
332 * struct xilinx_aximcdma_tx_segment - Descriptor segment
333 * @hw: Hardware descriptor
334 * @node: Node in the descriptor segments list
335 * @phys: Physical address of segment
337 struct xilinx_aximcdma_tx_segment
{
338 struct xilinx_aximcdma_desc_hw hw
;
339 struct list_head node
;
344 * struct xilinx_cdma_tx_segment - Descriptor segment
345 * @hw: Hardware descriptor
346 * @node: Node in the descriptor segments list
347 * @phys: Physical address of segment
349 struct xilinx_cdma_tx_segment
{
350 struct xilinx_cdma_desc_hw hw
;
351 struct list_head node
;
356 * struct xilinx_dma_tx_descriptor - Per Transaction structure
357 * @async_tx: Async transaction descriptor
358 * @segments: TX segments list
359 * @node: Node in the channel descriptors list
360 * @cyclic: Check for cyclic transfers.
361 * @err: Whether the descriptor has an error.
362 * @residue: Residue of the completed descriptor
364 struct xilinx_dma_tx_descriptor
{
365 struct dma_async_tx_descriptor async_tx
;
366 struct list_head segments
;
367 struct list_head node
;
374 * struct xilinx_dma_chan - Driver specific DMA channel structure
375 * @xdev: Driver specific device structure
376 * @ctrl_offset: Control registers offset
377 * @desc_offset: TX descriptor registers offset
378 * @lock: Descriptor operation lock
379 * @pending_list: Descriptors waiting
380 * @active_list: Descriptors ready to submit
381 * @done_list: Complete descriptors
382 * @free_seg_list: Free descriptors
383 * @common: DMA common channel
384 * @desc_pool: Descriptors pool
385 * @dev: The dma device
388 * @direction: Transfer direction
389 * @num_frms: Number of frames
390 * @has_sg: Support scatter transfers
391 * @cyclic: Check for cyclic transfers.
392 * @genlock: Support genlock mode
393 * @err: Channel has errors
394 * @idle: Check for channel idle
395 * @tasklet: Cleanup work after irq
396 * @config: Device configuration info
397 * @flush_on_fsync: Flush on Frame sync
398 * @desc_pendingcount: Descriptor pending count
399 * @ext_addr: Indicates 64 bit addressing is supported by dma channel
400 * @desc_submitcount: Descriptor h/w submitted count
401 * @seg_v: Statically allocated segments base
402 * @seg_mv: Statically allocated segments base for MCDMA
403 * @seg_p: Physical allocated segments base
404 * @cyclic_seg_v: Statically allocated segment base for cyclic transfers
405 * @cyclic_seg_p: Physical allocated segments base for cyclic dma
406 * @start_transfer: Differentiate b/w DMA IP's transfer
407 * @stop_transfer: Differentiate b/w DMA IP's quiesce
408 * @tdest: TDEST value for mcdma
409 * @has_vflip: S2MM vertical flip
411 struct xilinx_dma_chan
{
412 struct xilinx_dma_device
*xdev
;
416 struct list_head pending_list
;
417 struct list_head active_list
;
418 struct list_head done_list
;
419 struct list_head free_seg_list
;
420 struct dma_chan common
;
421 struct dma_pool
*desc_pool
;
425 enum dma_transfer_direction direction
;
432 struct tasklet_struct tasklet
;
433 struct xilinx_vdma_config config
;
435 u32 desc_pendingcount
;
437 u32 desc_submitcount
;
438 struct xilinx_axidma_tx_segment
*seg_v
;
439 struct xilinx_aximcdma_tx_segment
*seg_mv
;
441 struct xilinx_axidma_tx_segment
*cyclic_seg_v
;
442 dma_addr_t cyclic_seg_p
;
443 void (*start_transfer
)(struct xilinx_dma_chan
*chan
);
444 int (*stop_transfer
)(struct xilinx_dma_chan
*chan
);
450 * enum xdma_ip_type - DMA IP type.
452 * @XDMA_TYPE_AXIDMA: Axi dma ip.
453 * @XDMA_TYPE_CDMA: Axi cdma ip.
454 * @XDMA_TYPE_VDMA: Axi vdma ip.
455 * @XDMA_TYPE_AXIMCDMA: Axi MCDMA ip.
459 XDMA_TYPE_AXIDMA
= 0,
465 struct xilinx_dma_config
{
466 enum xdma_ip_type dmatype
;
467 int (*clk_init
)(struct platform_device
*pdev
, struct clk
**axi_clk
,
468 struct clk
**tx_clk
, struct clk
**txs_clk
,
469 struct clk
**rx_clk
, struct clk
**rxs_clk
);
470 irqreturn_t (*irq_handler
)(int irq
, void *data
);
474 * struct xilinx_dma_device - DMA device structure
475 * @regs: I/O mapped base address
476 * @dev: Device Structure
477 * @common: DMA device structure
478 * @chan: Driver specific DMA channel
479 * @flush_on_fsync: Flush on frame sync
480 * @ext_addr: Indicates 64 bit addressing is supported by dma device
481 * @pdev: Platform device structure pointer
482 * @dma_config: DMA config structure
483 * @axi_clk: DMA Axi4-lite interace clock
484 * @tx_clk: DMA mm2s clock
485 * @txs_clk: DMA mm2s stream clock
486 * @rx_clk: DMA s2mm clock
487 * @rxs_clk: DMA s2mm stream clock
488 * @nr_channels: Number of channels DMA device supports
489 * @chan_id: DMA channel identifier
490 * @max_buffer_len: Max buffer length
491 * @s2mm_index: S2MM channel index
493 struct xilinx_dma_device
{
496 struct dma_device common
;
497 struct xilinx_dma_chan
*chan
[XILINX_DMA_MAX_CHANS_PER_DEVICE
];
500 struct platform_device
*pdev
;
501 const struct xilinx_dma_config
*dma_config
;
514 #define to_xilinx_chan(chan) \
515 container_of(chan, struct xilinx_dma_chan, common)
516 #define to_dma_tx_descriptor(tx) \
517 container_of(tx, struct xilinx_dma_tx_descriptor, async_tx)
518 #define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \
519 readl_poll_timeout(chan->xdev->regs + chan->ctrl_offset + reg, val, \
520 cond, delay_us, timeout_us)
523 static inline u32
dma_read(struct xilinx_dma_chan
*chan
, u32 reg
)
525 return ioread32(chan
->xdev
->regs
+ reg
);
528 static inline void dma_write(struct xilinx_dma_chan
*chan
, u32 reg
, u32 value
)
530 iowrite32(value
, chan
->xdev
->regs
+ reg
);
533 static inline void vdma_desc_write(struct xilinx_dma_chan
*chan
, u32 reg
,
536 dma_write(chan
, chan
->desc_offset
+ reg
, value
);
539 static inline u32
dma_ctrl_read(struct xilinx_dma_chan
*chan
, u32 reg
)
541 return dma_read(chan
, chan
->ctrl_offset
+ reg
);
544 static inline void dma_ctrl_write(struct xilinx_dma_chan
*chan
, u32 reg
,
547 dma_write(chan
, chan
->ctrl_offset
+ reg
, value
);
550 static inline void dma_ctrl_clr(struct xilinx_dma_chan
*chan
, u32 reg
,
553 dma_ctrl_write(chan
, reg
, dma_ctrl_read(chan
, reg
) & ~clr
);
556 static inline void dma_ctrl_set(struct xilinx_dma_chan
*chan
, u32 reg
,
559 dma_ctrl_write(chan
, reg
, dma_ctrl_read(chan
, reg
) | set
);
563 * vdma_desc_write_64 - 64-bit descriptor write
564 * @chan: Driver specific VDMA channel
565 * @reg: Register to write
566 * @value_lsb: lower address of the descriptor.
567 * @value_msb: upper address of the descriptor.
569 * Since vdma driver is trying to write to a register offset which is not a
570 * multiple of 64 bits(ex : 0x5c), we are writing as two separate 32 bits
571 * instead of a single 64 bit register write.
573 static inline void vdma_desc_write_64(struct xilinx_dma_chan
*chan
, u32 reg
,
574 u32 value_lsb
, u32 value_msb
)
576 /* Write the lsb 32 bits*/
577 writel(value_lsb
, chan
->xdev
->regs
+ chan
->desc_offset
+ reg
);
579 /* Write the msb 32 bits */
580 writel(value_msb
, chan
->xdev
->regs
+ chan
->desc_offset
+ reg
+ 4);
583 static inline void dma_writeq(struct xilinx_dma_chan
*chan
, u32 reg
, u64 value
)
585 lo_hi_writeq(value
, chan
->xdev
->regs
+ chan
->ctrl_offset
+ reg
);
588 static inline void xilinx_write(struct xilinx_dma_chan
*chan
, u32 reg
,
592 dma_writeq(chan
, reg
, addr
);
594 dma_ctrl_write(chan
, reg
, addr
);
597 static inline void xilinx_axidma_buf(struct xilinx_dma_chan
*chan
,
598 struct xilinx_axidma_desc_hw
*hw
,
599 dma_addr_t buf_addr
, size_t sg_used
,
602 if (chan
->ext_addr
) {
603 hw
->buf_addr
= lower_32_bits(buf_addr
+ sg_used
+ period_len
);
604 hw
->buf_addr_msb
= upper_32_bits(buf_addr
+ sg_used
+
607 hw
->buf_addr
= buf_addr
+ sg_used
+ period_len
;
611 static inline void xilinx_aximcdma_buf(struct xilinx_dma_chan
*chan
,
612 struct xilinx_aximcdma_desc_hw
*hw
,
613 dma_addr_t buf_addr
, size_t sg_used
)
615 if (chan
->ext_addr
) {
616 hw
->buf_addr
= lower_32_bits(buf_addr
+ sg_used
);
617 hw
->buf_addr_msb
= upper_32_bits(buf_addr
+ sg_used
);
619 hw
->buf_addr
= buf_addr
+ sg_used
;
623 /* -----------------------------------------------------------------------------
624 * Descriptors and segments alloc and free
628 * xilinx_vdma_alloc_tx_segment - Allocate transaction segment
629 * @chan: Driver specific DMA channel
631 * Return: The allocated segment on success and NULL on failure.
633 static struct xilinx_vdma_tx_segment
*
634 xilinx_vdma_alloc_tx_segment(struct xilinx_dma_chan
*chan
)
636 struct xilinx_vdma_tx_segment
*segment
;
639 segment
= dma_pool_zalloc(chan
->desc_pool
, GFP_ATOMIC
, &phys
);
643 segment
->phys
= phys
;
649 * xilinx_cdma_alloc_tx_segment - Allocate transaction segment
650 * @chan: Driver specific DMA channel
652 * Return: The allocated segment on success and NULL on failure.
654 static struct xilinx_cdma_tx_segment
*
655 xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan
*chan
)
657 struct xilinx_cdma_tx_segment
*segment
;
660 segment
= dma_pool_zalloc(chan
->desc_pool
, GFP_ATOMIC
, &phys
);
664 segment
->phys
= phys
;
670 * xilinx_axidma_alloc_tx_segment - Allocate transaction segment
671 * @chan: Driver specific DMA channel
673 * Return: The allocated segment on success and NULL on failure.
675 static struct xilinx_axidma_tx_segment
*
676 xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan
*chan
)
678 struct xilinx_axidma_tx_segment
*segment
= NULL
;
681 spin_lock_irqsave(&chan
->lock
, flags
);
682 if (!list_empty(&chan
->free_seg_list
)) {
683 segment
= list_first_entry(&chan
->free_seg_list
,
684 struct xilinx_axidma_tx_segment
,
686 list_del(&segment
->node
);
688 spin_unlock_irqrestore(&chan
->lock
, flags
);
691 dev_dbg(chan
->dev
, "Could not find free tx segment\n");
697 * xilinx_aximcdma_alloc_tx_segment - Allocate transaction segment
698 * @chan: Driver specific DMA channel
700 * Return: The allocated segment on success and NULL on failure.
702 static struct xilinx_aximcdma_tx_segment
*
703 xilinx_aximcdma_alloc_tx_segment(struct xilinx_dma_chan
*chan
)
705 struct xilinx_aximcdma_tx_segment
*segment
= NULL
;
708 spin_lock_irqsave(&chan
->lock
, flags
);
709 if (!list_empty(&chan
->free_seg_list
)) {
710 segment
= list_first_entry(&chan
->free_seg_list
,
711 struct xilinx_aximcdma_tx_segment
,
713 list_del(&segment
->node
);
715 spin_unlock_irqrestore(&chan
->lock
, flags
);
720 static void xilinx_dma_clean_hw_desc(struct xilinx_axidma_desc_hw
*hw
)
722 u32 next_desc
= hw
->next_desc
;
723 u32 next_desc_msb
= hw
->next_desc_msb
;
725 memset(hw
, 0, sizeof(struct xilinx_axidma_desc_hw
));
727 hw
->next_desc
= next_desc
;
728 hw
->next_desc_msb
= next_desc_msb
;
731 static void xilinx_mcdma_clean_hw_desc(struct xilinx_aximcdma_desc_hw
*hw
)
733 u32 next_desc
= hw
->next_desc
;
734 u32 next_desc_msb
= hw
->next_desc_msb
;
736 memset(hw
, 0, sizeof(struct xilinx_aximcdma_desc_hw
));
738 hw
->next_desc
= next_desc
;
739 hw
->next_desc_msb
= next_desc_msb
;
743 * xilinx_dma_free_tx_segment - Free transaction segment
744 * @chan: Driver specific DMA channel
745 * @segment: DMA transaction segment
747 static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan
*chan
,
748 struct xilinx_axidma_tx_segment
*segment
)
750 xilinx_dma_clean_hw_desc(&segment
->hw
);
752 list_add_tail(&segment
->node
, &chan
->free_seg_list
);
756 * xilinx_mcdma_free_tx_segment - Free transaction segment
757 * @chan: Driver specific DMA channel
758 * @segment: DMA transaction segment
760 static void xilinx_mcdma_free_tx_segment(struct xilinx_dma_chan
*chan
,
761 struct xilinx_aximcdma_tx_segment
*
764 xilinx_mcdma_clean_hw_desc(&segment
->hw
);
766 list_add_tail(&segment
->node
, &chan
->free_seg_list
);
770 * xilinx_cdma_free_tx_segment - Free transaction segment
771 * @chan: Driver specific DMA channel
772 * @segment: DMA transaction segment
774 static void xilinx_cdma_free_tx_segment(struct xilinx_dma_chan
*chan
,
775 struct xilinx_cdma_tx_segment
*segment
)
777 dma_pool_free(chan
->desc_pool
, segment
, segment
->phys
);
781 * xilinx_vdma_free_tx_segment - Free transaction segment
782 * @chan: Driver specific DMA channel
783 * @segment: DMA transaction segment
785 static void xilinx_vdma_free_tx_segment(struct xilinx_dma_chan
*chan
,
786 struct xilinx_vdma_tx_segment
*segment
)
788 dma_pool_free(chan
->desc_pool
, segment
, segment
->phys
);
792 * xilinx_dma_tx_descriptor - Allocate transaction descriptor
793 * @chan: Driver specific DMA channel
795 * Return: The allocated descriptor on success and NULL on failure.
797 static struct xilinx_dma_tx_descriptor
*
798 xilinx_dma_alloc_tx_descriptor(struct xilinx_dma_chan
*chan
)
800 struct xilinx_dma_tx_descriptor
*desc
;
802 desc
= kzalloc(sizeof(*desc
), GFP_KERNEL
);
806 INIT_LIST_HEAD(&desc
->segments
);
812 * xilinx_dma_free_tx_descriptor - Free transaction descriptor
813 * @chan: Driver specific DMA channel
814 * @desc: DMA transaction descriptor
817 xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan
*chan
,
818 struct xilinx_dma_tx_descriptor
*desc
)
820 struct xilinx_vdma_tx_segment
*segment
, *next
;
821 struct xilinx_cdma_tx_segment
*cdma_segment
, *cdma_next
;
822 struct xilinx_axidma_tx_segment
*axidma_segment
, *axidma_next
;
823 struct xilinx_aximcdma_tx_segment
*aximcdma_segment
, *aximcdma_next
;
828 if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_VDMA
) {
829 list_for_each_entry_safe(segment
, next
, &desc
->segments
, node
) {
830 list_del(&segment
->node
);
831 xilinx_vdma_free_tx_segment(chan
, segment
);
833 } else if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_CDMA
) {
834 list_for_each_entry_safe(cdma_segment
, cdma_next
,
835 &desc
->segments
, node
) {
836 list_del(&cdma_segment
->node
);
837 xilinx_cdma_free_tx_segment(chan
, cdma_segment
);
839 } else if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_AXIDMA
) {
840 list_for_each_entry_safe(axidma_segment
, axidma_next
,
841 &desc
->segments
, node
) {
842 list_del(&axidma_segment
->node
);
843 xilinx_dma_free_tx_segment(chan
, axidma_segment
);
846 list_for_each_entry_safe(aximcdma_segment
, aximcdma_next
,
847 &desc
->segments
, node
) {
848 list_del(&aximcdma_segment
->node
);
849 xilinx_mcdma_free_tx_segment(chan
, aximcdma_segment
);
856 /* Required functions */
859 * xilinx_dma_free_desc_list - Free descriptors list
860 * @chan: Driver specific DMA channel
861 * @list: List to parse and delete the descriptor
863 static void xilinx_dma_free_desc_list(struct xilinx_dma_chan
*chan
,
864 struct list_head
*list
)
866 struct xilinx_dma_tx_descriptor
*desc
, *next
;
868 list_for_each_entry_safe(desc
, next
, list
, node
) {
869 list_del(&desc
->node
);
870 xilinx_dma_free_tx_descriptor(chan
, desc
);
875 * xilinx_dma_free_descriptors - Free channel descriptors
876 * @chan: Driver specific DMA channel
878 static void xilinx_dma_free_descriptors(struct xilinx_dma_chan
*chan
)
882 spin_lock_irqsave(&chan
->lock
, flags
);
884 xilinx_dma_free_desc_list(chan
, &chan
->pending_list
);
885 xilinx_dma_free_desc_list(chan
, &chan
->done_list
);
886 xilinx_dma_free_desc_list(chan
, &chan
->active_list
);
888 spin_unlock_irqrestore(&chan
->lock
, flags
);
892 * xilinx_dma_free_chan_resources - Free channel resources
893 * @dchan: DMA channel
895 static void xilinx_dma_free_chan_resources(struct dma_chan
*dchan
)
897 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
900 dev_dbg(chan
->dev
, "Free all channel resources.\n");
902 xilinx_dma_free_descriptors(chan
);
904 if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_AXIDMA
) {
905 spin_lock_irqsave(&chan
->lock
, flags
);
906 INIT_LIST_HEAD(&chan
->free_seg_list
);
907 spin_unlock_irqrestore(&chan
->lock
, flags
);
909 /* Free memory that is allocated for BD */
910 dma_free_coherent(chan
->dev
, sizeof(*chan
->seg_v
) *
911 XILINX_DMA_NUM_DESCS
, chan
->seg_v
,
914 /* Free Memory that is allocated for cyclic DMA Mode */
915 dma_free_coherent(chan
->dev
, sizeof(*chan
->cyclic_seg_v
),
916 chan
->cyclic_seg_v
, chan
->cyclic_seg_p
);
919 if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_AXIMCDMA
) {
920 spin_lock_irqsave(&chan
->lock
, flags
);
921 INIT_LIST_HEAD(&chan
->free_seg_list
);
922 spin_unlock_irqrestore(&chan
->lock
, flags
);
924 /* Free memory that is allocated for BD */
925 dma_free_coherent(chan
->dev
, sizeof(*chan
->seg_mv
) *
926 XILINX_DMA_NUM_DESCS
, chan
->seg_mv
,
930 if (chan
->xdev
->dma_config
->dmatype
!= XDMA_TYPE_AXIDMA
&&
931 chan
->xdev
->dma_config
->dmatype
!= XDMA_TYPE_AXIMCDMA
) {
932 dma_pool_destroy(chan
->desc_pool
);
933 chan
->desc_pool
= NULL
;
939 * xilinx_dma_get_residue - Compute residue for a given descriptor
940 * @chan: Driver specific dma channel
941 * @desc: dma transaction descriptor
943 * Return: The number of residue bytes for the descriptor.
945 static u32
xilinx_dma_get_residue(struct xilinx_dma_chan
*chan
,
946 struct xilinx_dma_tx_descriptor
*desc
)
948 struct xilinx_cdma_tx_segment
*cdma_seg
;
949 struct xilinx_axidma_tx_segment
*axidma_seg
;
950 struct xilinx_cdma_desc_hw
*cdma_hw
;
951 struct xilinx_axidma_desc_hw
*axidma_hw
;
952 struct list_head
*entry
;
955 list_for_each(entry
, &desc
->segments
) {
956 if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_CDMA
) {
957 cdma_seg
= list_entry(entry
,
958 struct xilinx_cdma_tx_segment
,
960 cdma_hw
= &cdma_seg
->hw
;
961 residue
+= (cdma_hw
->control
- cdma_hw
->status
) &
962 chan
->xdev
->max_buffer_len
;
964 axidma_seg
= list_entry(entry
,
965 struct xilinx_axidma_tx_segment
,
967 axidma_hw
= &axidma_seg
->hw
;
968 residue
+= (axidma_hw
->control
- axidma_hw
->status
) &
969 chan
->xdev
->max_buffer_len
;
977 * xilinx_dma_chan_handle_cyclic - Cyclic dma callback
978 * @chan: Driver specific dma channel
979 * @desc: dma transaction descriptor
980 * @flags: flags for spin lock
982 static void xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan
*chan
,
983 struct xilinx_dma_tx_descriptor
*desc
,
984 unsigned long *flags
)
986 dma_async_tx_callback callback
;
987 void *callback_param
;
989 callback
= desc
->async_tx
.callback
;
990 callback_param
= desc
->async_tx
.callback_param
;
992 spin_unlock_irqrestore(&chan
->lock
, *flags
);
993 callback(callback_param
);
994 spin_lock_irqsave(&chan
->lock
, *flags
);
999 * xilinx_dma_chan_desc_cleanup - Clean channel descriptors
1000 * @chan: Driver specific DMA channel
1002 static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan
*chan
)
1004 struct xilinx_dma_tx_descriptor
*desc
, *next
;
1005 unsigned long flags
;
1007 spin_lock_irqsave(&chan
->lock
, flags
);
1009 list_for_each_entry_safe(desc
, next
, &chan
->done_list
, node
) {
1010 struct dmaengine_result result
;
1013 xilinx_dma_chan_handle_cyclic(chan
, desc
, &flags
);
1017 /* Remove from the list of running transactions */
1018 list_del(&desc
->node
);
1020 if (unlikely(desc
->err
)) {
1021 if (chan
->direction
== DMA_DEV_TO_MEM
)
1022 result
.result
= DMA_TRANS_READ_FAILED
;
1024 result
.result
= DMA_TRANS_WRITE_FAILED
;
1026 result
.result
= DMA_TRANS_NOERROR
;
1029 result
.residue
= desc
->residue
;
1031 /* Run the link descriptor callback function */
1032 spin_unlock_irqrestore(&chan
->lock
, flags
);
1033 dmaengine_desc_get_callback_invoke(&desc
->async_tx
, &result
);
1034 spin_lock_irqsave(&chan
->lock
, flags
);
1036 /* Run any dependencies, then free the descriptor */
1037 dma_run_dependencies(&desc
->async_tx
);
1038 xilinx_dma_free_tx_descriptor(chan
, desc
);
1041 spin_unlock_irqrestore(&chan
->lock
, flags
);
1045 * xilinx_dma_do_tasklet - Schedule completion tasklet
1046 * @data: Pointer to the Xilinx DMA channel structure
1048 static void xilinx_dma_do_tasklet(unsigned long data
)
1050 struct xilinx_dma_chan
*chan
= (struct xilinx_dma_chan
*)data
;
1052 xilinx_dma_chan_desc_cleanup(chan
);
1056 * xilinx_dma_alloc_chan_resources - Allocate channel resources
1057 * @dchan: DMA channel
1059 * Return: '0' on success and failure value on error
1061 static int xilinx_dma_alloc_chan_resources(struct dma_chan
*dchan
)
1063 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
1066 /* Has this channel already been allocated? */
1067 if (chan
->desc_pool
)
1071 * We need the descriptor to be aligned to 64bytes
1072 * for meeting Xilinx VDMA specification requirement.
1074 if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_AXIDMA
) {
1075 /* Allocate the buffer descriptors. */
1076 chan
->seg_v
= dma_alloc_coherent(chan
->dev
,
1077 sizeof(*chan
->seg_v
) * XILINX_DMA_NUM_DESCS
,
1078 &chan
->seg_p
, GFP_KERNEL
);
1081 "unable to allocate channel %d descriptors\n",
1086 * For cyclic DMA mode we need to program the tail Descriptor
1087 * register with a value which is not a part of the BD chain
1088 * so allocating a desc segment during channel allocation for
1089 * programming tail descriptor.
1091 chan
->cyclic_seg_v
= dma_alloc_coherent(chan
->dev
,
1092 sizeof(*chan
->cyclic_seg_v
),
1093 &chan
->cyclic_seg_p
,
1095 if (!chan
->cyclic_seg_v
) {
1097 "unable to allocate desc segment for cyclic DMA\n");
1098 dma_free_coherent(chan
->dev
, sizeof(*chan
->seg_v
) *
1099 XILINX_DMA_NUM_DESCS
, chan
->seg_v
,
1103 chan
->cyclic_seg_v
->phys
= chan
->cyclic_seg_p
;
1105 for (i
= 0; i
< XILINX_DMA_NUM_DESCS
; i
++) {
1106 chan
->seg_v
[i
].hw
.next_desc
=
1107 lower_32_bits(chan
->seg_p
+ sizeof(*chan
->seg_v
) *
1108 ((i
+ 1) % XILINX_DMA_NUM_DESCS
));
1109 chan
->seg_v
[i
].hw
.next_desc_msb
=
1110 upper_32_bits(chan
->seg_p
+ sizeof(*chan
->seg_v
) *
1111 ((i
+ 1) % XILINX_DMA_NUM_DESCS
));
1112 chan
->seg_v
[i
].phys
= chan
->seg_p
+
1113 sizeof(*chan
->seg_v
) * i
;
1114 list_add_tail(&chan
->seg_v
[i
].node
,
1115 &chan
->free_seg_list
);
1117 } else if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_AXIMCDMA
) {
1118 /* Allocate the buffer descriptors. */
1119 chan
->seg_mv
= dma_alloc_coherent(chan
->dev
,
1120 sizeof(*chan
->seg_mv
) *
1121 XILINX_DMA_NUM_DESCS
,
1122 &chan
->seg_p
, GFP_KERNEL
);
1123 if (!chan
->seg_mv
) {
1125 "unable to allocate channel %d descriptors\n",
1129 for (i
= 0; i
< XILINX_DMA_NUM_DESCS
; i
++) {
1130 chan
->seg_mv
[i
].hw
.next_desc
=
1131 lower_32_bits(chan
->seg_p
+ sizeof(*chan
->seg_mv
) *
1132 ((i
+ 1) % XILINX_DMA_NUM_DESCS
));
1133 chan
->seg_mv
[i
].hw
.next_desc_msb
=
1134 upper_32_bits(chan
->seg_p
+ sizeof(*chan
->seg_mv
) *
1135 ((i
+ 1) % XILINX_DMA_NUM_DESCS
));
1136 chan
->seg_mv
[i
].phys
= chan
->seg_p
+
1137 sizeof(*chan
->seg_v
) * i
;
1138 list_add_tail(&chan
->seg_mv
[i
].node
,
1139 &chan
->free_seg_list
);
1141 } else if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_CDMA
) {
1142 chan
->desc_pool
= dma_pool_create("xilinx_cdma_desc_pool",
1144 sizeof(struct xilinx_cdma_tx_segment
),
1145 __alignof__(struct xilinx_cdma_tx_segment
),
1148 chan
->desc_pool
= dma_pool_create("xilinx_vdma_desc_pool",
1150 sizeof(struct xilinx_vdma_tx_segment
),
1151 __alignof__(struct xilinx_vdma_tx_segment
),
1155 if (!chan
->desc_pool
&&
1156 ((chan
->xdev
->dma_config
->dmatype
!= XDMA_TYPE_AXIDMA
) &&
1157 chan
->xdev
->dma_config
->dmatype
!= XDMA_TYPE_AXIMCDMA
)) {
1159 "unable to allocate channel %d descriptor pool\n",
1164 dma_cookie_init(dchan
);
1166 if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_AXIDMA
) {
1167 /* For AXI DMA resetting once channel will reset the
1168 * other channel as well so enable the interrupts here.
1170 dma_ctrl_set(chan
, XILINX_DMA_REG_DMACR
,
1171 XILINX_DMA_DMAXR_ALL_IRQ_MASK
);
1174 if ((chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_CDMA
) && chan
->has_sg
)
1175 dma_ctrl_set(chan
, XILINX_DMA_REG_DMACR
,
1176 XILINX_CDMA_CR_SGMODE
);
1182 * xilinx_dma_calc_copysize - Calculate the amount of data to copy
1183 * @chan: Driver specific DMA channel
1184 * @size: Total data that needs to be copied
1185 * @done: Amount of data that has been already copied
1187 * Return: Amount of data that has to be copied
1189 static int xilinx_dma_calc_copysize(struct xilinx_dma_chan
*chan
,
1194 copy
= min_t(size_t, size
- done
,
1195 chan
->xdev
->max_buffer_len
);
1197 if ((copy
+ done
< size
) &&
1198 chan
->xdev
->common
.copy_align
) {
1200 * If this is not the last descriptor, make sure
1201 * the next one will be properly aligned
1203 copy
= rounddown(copy
,
1204 (1 << chan
->xdev
->common
.copy_align
));
1210 * xilinx_dma_tx_status - Get DMA transaction status
1211 * @dchan: DMA channel
1212 * @cookie: Transaction identifier
1213 * @txstate: Transaction state
1215 * Return: DMA transaction status
1217 static enum dma_status
xilinx_dma_tx_status(struct dma_chan
*dchan
,
1218 dma_cookie_t cookie
,
1219 struct dma_tx_state
*txstate
)
1221 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
1222 struct xilinx_dma_tx_descriptor
*desc
;
1223 enum dma_status ret
;
1224 unsigned long flags
;
1227 ret
= dma_cookie_status(dchan
, cookie
, txstate
);
1228 if (ret
== DMA_COMPLETE
|| !txstate
)
1231 spin_lock_irqsave(&chan
->lock
, flags
);
1233 desc
= list_last_entry(&chan
->active_list
,
1234 struct xilinx_dma_tx_descriptor
, node
);
1236 * VDMA and simple mode do not support residue reporting, so the
1237 * residue field will always be 0.
1239 if (chan
->has_sg
&& chan
->xdev
->dma_config
->dmatype
!= XDMA_TYPE_VDMA
)
1240 residue
= xilinx_dma_get_residue(chan
, desc
);
1242 spin_unlock_irqrestore(&chan
->lock
, flags
);
1244 dma_set_residue(txstate
, residue
);
1250 * xilinx_dma_stop_transfer - Halt DMA channel
1251 * @chan: Driver specific DMA channel
1253 * Return: '0' on success and failure value on error
1255 static int xilinx_dma_stop_transfer(struct xilinx_dma_chan
*chan
)
1259 dma_ctrl_clr(chan
, XILINX_DMA_REG_DMACR
, XILINX_DMA_DMACR_RUNSTOP
);
1261 /* Wait for the hardware to halt */
1262 return xilinx_dma_poll_timeout(chan
, XILINX_DMA_REG_DMASR
, val
,
1263 val
& XILINX_DMA_DMASR_HALTED
, 0,
1264 XILINX_DMA_LOOP_COUNT
);
1268 * xilinx_cdma_stop_transfer - Wait for the current transfer to complete
1269 * @chan: Driver specific DMA channel
1271 * Return: '0' on success and failure value on error
1273 static int xilinx_cdma_stop_transfer(struct xilinx_dma_chan
*chan
)
1277 return xilinx_dma_poll_timeout(chan
, XILINX_DMA_REG_DMASR
, val
,
1278 val
& XILINX_DMA_DMASR_IDLE
, 0,
1279 XILINX_DMA_LOOP_COUNT
);
1283 * xilinx_dma_start - Start DMA channel
1284 * @chan: Driver specific DMA channel
1286 static void xilinx_dma_start(struct xilinx_dma_chan
*chan
)
1291 dma_ctrl_set(chan
, XILINX_DMA_REG_DMACR
, XILINX_DMA_DMACR_RUNSTOP
);
1293 /* Wait for the hardware to start */
1294 err
= xilinx_dma_poll_timeout(chan
, XILINX_DMA_REG_DMASR
, val
,
1295 !(val
& XILINX_DMA_DMASR_HALTED
), 0,
1296 XILINX_DMA_LOOP_COUNT
);
1299 dev_err(chan
->dev
, "Cannot start channel %p: %x\n",
1300 chan
, dma_ctrl_read(chan
, XILINX_DMA_REG_DMASR
));
1307 * xilinx_vdma_start_transfer - Starts VDMA transfer
1308 * @chan: Driver specific channel struct pointer
1310 static void xilinx_vdma_start_transfer(struct xilinx_dma_chan
*chan
)
1312 struct xilinx_vdma_config
*config
= &chan
->config
;
1313 struct xilinx_dma_tx_descriptor
*desc
;
1315 struct xilinx_vdma_tx_segment
*segment
, *last
= NULL
;
1318 /* This function was invoked with lock held */
1325 if (list_empty(&chan
->pending_list
))
1328 desc
= list_first_entry(&chan
->pending_list
,
1329 struct xilinx_dma_tx_descriptor
, node
);
1331 /* Configure the hardware using info in the config structure */
1332 if (chan
->has_vflip
) {
1333 reg
= dma_read(chan
, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP
);
1334 reg
&= ~XILINX_VDMA_ENABLE_VERTICAL_FLIP
;
1335 reg
|= config
->vflip_en
;
1336 dma_write(chan
, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP
,
1340 reg
= dma_ctrl_read(chan
, XILINX_DMA_REG_DMACR
);
1342 if (config
->frm_cnt_en
)
1343 reg
|= XILINX_DMA_DMACR_FRAMECNT_EN
;
1345 reg
&= ~XILINX_DMA_DMACR_FRAMECNT_EN
;
1347 /* If not parking, enable circular mode */
1349 reg
&= ~XILINX_DMA_DMACR_CIRC_EN
;
1351 reg
|= XILINX_DMA_DMACR_CIRC_EN
;
1353 dma_ctrl_write(chan
, XILINX_DMA_REG_DMACR
, reg
);
1355 j
= chan
->desc_submitcount
;
1356 reg
= dma_read(chan
, XILINX_DMA_REG_PARK_PTR
);
1357 if (chan
->direction
== DMA_MEM_TO_DEV
) {
1358 reg
&= ~XILINX_DMA_PARK_PTR_RD_REF_MASK
;
1359 reg
|= j
<< XILINX_DMA_PARK_PTR_RD_REF_SHIFT
;
1361 reg
&= ~XILINX_DMA_PARK_PTR_WR_REF_MASK
;
1362 reg
|= j
<< XILINX_DMA_PARK_PTR_WR_REF_SHIFT
;
1364 dma_write(chan
, XILINX_DMA_REG_PARK_PTR
, reg
);
1366 /* Start the hardware */
1367 xilinx_dma_start(chan
);
1372 /* Start the transfer */
1373 if (chan
->desc_submitcount
< chan
->num_frms
)
1374 i
= chan
->desc_submitcount
;
1376 list_for_each_entry(segment
, &desc
->segments
, node
) {
1378 vdma_desc_write_64(chan
,
1379 XILINX_VDMA_REG_START_ADDRESS_64(i
++),
1380 segment
->hw
.buf_addr
,
1381 segment
->hw
.buf_addr_msb
);
1383 vdma_desc_write(chan
,
1384 XILINX_VDMA_REG_START_ADDRESS(i
++),
1385 segment
->hw
.buf_addr
);
1393 /* HW expects these parameters to be same for one transaction */
1394 vdma_desc_write(chan
, XILINX_DMA_REG_HSIZE
, last
->hw
.hsize
);
1395 vdma_desc_write(chan
, XILINX_DMA_REG_FRMDLY_STRIDE
,
1397 vdma_desc_write(chan
, XILINX_DMA_REG_VSIZE
, last
->hw
.vsize
);
1399 chan
->desc_submitcount
++;
1400 chan
->desc_pendingcount
--;
1401 list_del(&desc
->node
);
1402 list_add_tail(&desc
->node
, &chan
->active_list
);
1403 if (chan
->desc_submitcount
== chan
->num_frms
)
1404 chan
->desc_submitcount
= 0;
1410 * xilinx_cdma_start_transfer - Starts cdma transfer
1411 * @chan: Driver specific channel struct pointer
1413 static void xilinx_cdma_start_transfer(struct xilinx_dma_chan
*chan
)
1415 struct xilinx_dma_tx_descriptor
*head_desc
, *tail_desc
;
1416 struct xilinx_cdma_tx_segment
*tail_segment
;
1417 u32 ctrl_reg
= dma_read(chan
, XILINX_DMA_REG_DMACR
);
1425 if (list_empty(&chan
->pending_list
))
1428 head_desc
= list_first_entry(&chan
->pending_list
,
1429 struct xilinx_dma_tx_descriptor
, node
);
1430 tail_desc
= list_last_entry(&chan
->pending_list
,
1431 struct xilinx_dma_tx_descriptor
, node
);
1432 tail_segment
= list_last_entry(&tail_desc
->segments
,
1433 struct xilinx_cdma_tx_segment
, node
);
1435 if (chan
->desc_pendingcount
<= XILINX_DMA_COALESCE_MAX
) {
1436 ctrl_reg
&= ~XILINX_DMA_CR_COALESCE_MAX
;
1437 ctrl_reg
|= chan
->desc_pendingcount
<<
1438 XILINX_DMA_CR_COALESCE_SHIFT
;
1439 dma_ctrl_write(chan
, XILINX_DMA_REG_DMACR
, ctrl_reg
);
1443 dma_ctrl_clr(chan
, XILINX_DMA_REG_DMACR
,
1444 XILINX_CDMA_CR_SGMODE
);
1446 dma_ctrl_set(chan
, XILINX_DMA_REG_DMACR
,
1447 XILINX_CDMA_CR_SGMODE
);
1449 xilinx_write(chan
, XILINX_DMA_REG_CURDESC
,
1450 head_desc
->async_tx
.phys
);
1452 /* Update tail ptr register which will start the transfer */
1453 xilinx_write(chan
, XILINX_DMA_REG_TAILDESC
,
1454 tail_segment
->phys
);
1456 /* In simple mode */
1457 struct xilinx_cdma_tx_segment
*segment
;
1458 struct xilinx_cdma_desc_hw
*hw
;
1460 segment
= list_first_entry(&head_desc
->segments
,
1461 struct xilinx_cdma_tx_segment
,
1466 xilinx_write(chan
, XILINX_CDMA_REG_SRCADDR
,
1467 xilinx_prep_dma_addr_t(hw
->src_addr
));
1468 xilinx_write(chan
, XILINX_CDMA_REG_DSTADDR
,
1469 xilinx_prep_dma_addr_t(hw
->dest_addr
));
1471 /* Start the transfer */
1472 dma_ctrl_write(chan
, XILINX_DMA_REG_BTT
,
1473 hw
->control
& chan
->xdev
->max_buffer_len
);
1476 list_splice_tail_init(&chan
->pending_list
, &chan
->active_list
);
1477 chan
->desc_pendingcount
= 0;
1482 * xilinx_dma_start_transfer - Starts DMA transfer
1483 * @chan: Driver specific channel struct pointer
1485 static void xilinx_dma_start_transfer(struct xilinx_dma_chan
*chan
)
1487 struct xilinx_dma_tx_descriptor
*head_desc
, *tail_desc
;
1488 struct xilinx_axidma_tx_segment
*tail_segment
;
1494 if (list_empty(&chan
->pending_list
))
1500 head_desc
= list_first_entry(&chan
->pending_list
,
1501 struct xilinx_dma_tx_descriptor
, node
);
1502 tail_desc
= list_last_entry(&chan
->pending_list
,
1503 struct xilinx_dma_tx_descriptor
, node
);
1504 tail_segment
= list_last_entry(&tail_desc
->segments
,
1505 struct xilinx_axidma_tx_segment
, node
);
1507 reg
= dma_ctrl_read(chan
, XILINX_DMA_REG_DMACR
);
1509 if (chan
->desc_pendingcount
<= XILINX_DMA_COALESCE_MAX
) {
1510 reg
&= ~XILINX_DMA_CR_COALESCE_MAX
;
1511 reg
|= chan
->desc_pendingcount
<<
1512 XILINX_DMA_CR_COALESCE_SHIFT
;
1513 dma_ctrl_write(chan
, XILINX_DMA_REG_DMACR
, reg
);
1517 xilinx_write(chan
, XILINX_DMA_REG_CURDESC
,
1518 head_desc
->async_tx
.phys
);
1520 xilinx_dma_start(chan
);
1525 /* Start the transfer */
1528 xilinx_write(chan
, XILINX_DMA_REG_TAILDESC
,
1529 chan
->cyclic_seg_v
->phys
);
1531 xilinx_write(chan
, XILINX_DMA_REG_TAILDESC
,
1532 tail_segment
->phys
);
1534 struct xilinx_axidma_tx_segment
*segment
;
1535 struct xilinx_axidma_desc_hw
*hw
;
1537 segment
= list_first_entry(&head_desc
->segments
,
1538 struct xilinx_axidma_tx_segment
,
1542 xilinx_write(chan
, XILINX_DMA_REG_SRCDSTADDR
,
1543 xilinx_prep_dma_addr_t(hw
->buf_addr
));
1545 /* Start the transfer */
1546 dma_ctrl_write(chan
, XILINX_DMA_REG_BTT
,
1547 hw
->control
& chan
->xdev
->max_buffer_len
);
1550 list_splice_tail_init(&chan
->pending_list
, &chan
->active_list
);
1551 chan
->desc_pendingcount
= 0;
1556 * xilinx_mcdma_start_transfer - Starts MCDMA transfer
1557 * @chan: Driver specific channel struct pointer
1559 static void xilinx_mcdma_start_transfer(struct xilinx_dma_chan
*chan
)
1561 struct xilinx_dma_tx_descriptor
*head_desc
, *tail_desc
;
1562 struct xilinx_axidma_tx_segment
*tail_segment
;
1566 * lock has been held by calling functions, so we don't need it
1567 * to take it here again.
1576 if (list_empty(&chan
->pending_list
))
1579 head_desc
= list_first_entry(&chan
->pending_list
,
1580 struct xilinx_dma_tx_descriptor
, node
);
1581 tail_desc
= list_last_entry(&chan
->pending_list
,
1582 struct xilinx_dma_tx_descriptor
, node
);
1583 tail_segment
= list_last_entry(&tail_desc
->segments
,
1584 struct xilinx_axidma_tx_segment
, node
);
1586 reg
= dma_ctrl_read(chan
, XILINX_MCDMA_CHAN_CR_OFFSET(chan
->tdest
));
1588 if (chan
->desc_pendingcount
<= XILINX_MCDMA_COALESCE_MAX
) {
1589 reg
&= ~XILINX_MCDMA_COALESCE_MASK
;
1590 reg
|= chan
->desc_pendingcount
<<
1591 XILINX_MCDMA_COALESCE_SHIFT
;
1594 reg
|= XILINX_MCDMA_IRQ_ALL_MASK
;
1595 dma_ctrl_write(chan
, XILINX_MCDMA_CHAN_CR_OFFSET(chan
->tdest
), reg
);
1597 /* Program current descriptor */
1598 xilinx_write(chan
, XILINX_MCDMA_CHAN_CDESC_OFFSET(chan
->tdest
),
1599 head_desc
->async_tx
.phys
);
1601 /* Program channel enable register */
1602 reg
= dma_ctrl_read(chan
, XILINX_MCDMA_CHEN_OFFSET
);
1603 reg
|= BIT(chan
->tdest
);
1604 dma_ctrl_write(chan
, XILINX_MCDMA_CHEN_OFFSET
, reg
);
1606 /* Start the fetch of BDs for the channel */
1607 reg
= dma_ctrl_read(chan
, XILINX_MCDMA_CHAN_CR_OFFSET(chan
->tdest
));
1608 reg
|= XILINX_MCDMA_CR_RUNSTOP_MASK
;
1609 dma_ctrl_write(chan
, XILINX_MCDMA_CHAN_CR_OFFSET(chan
->tdest
), reg
);
1611 xilinx_dma_start(chan
);
1616 /* Start the transfer */
1617 xilinx_write(chan
, XILINX_MCDMA_CHAN_TDESC_OFFSET(chan
->tdest
),
1618 tail_segment
->phys
);
1620 list_splice_tail_init(&chan
->pending_list
, &chan
->active_list
);
1621 chan
->desc_pendingcount
= 0;
1626 * xilinx_dma_issue_pending - Issue pending transactions
1627 * @dchan: DMA channel
1629 static void xilinx_dma_issue_pending(struct dma_chan
*dchan
)
1631 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
1632 unsigned long flags
;
1634 spin_lock_irqsave(&chan
->lock
, flags
);
1635 chan
->start_transfer(chan
);
1636 spin_unlock_irqrestore(&chan
->lock
, flags
);
1640 * xilinx_dma_complete_descriptor - Mark the active descriptor as complete
1641 * @chan : xilinx DMA channel
1645 static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan
*chan
)
1647 struct xilinx_dma_tx_descriptor
*desc
, *next
;
1649 /* This function was invoked with lock held */
1650 if (list_empty(&chan
->active_list
))
1653 list_for_each_entry_safe(desc
, next
, &chan
->active_list
, node
) {
1654 if (chan
->has_sg
&& chan
->xdev
->dma_config
->dmatype
!=
1656 desc
->residue
= xilinx_dma_get_residue(chan
, desc
);
1659 desc
->err
= chan
->err
;
1661 list_del(&desc
->node
);
1663 dma_cookie_complete(&desc
->async_tx
);
1664 list_add_tail(&desc
->node
, &chan
->done_list
);
1669 * xilinx_dma_reset - Reset DMA channel
1670 * @chan: Driver specific DMA channel
1672 * Return: '0' on success and failure value on error
1674 static int xilinx_dma_reset(struct xilinx_dma_chan
*chan
)
1679 dma_ctrl_set(chan
, XILINX_DMA_REG_DMACR
, XILINX_DMA_DMACR_RESET
);
1681 /* Wait for the hardware to finish reset */
1682 err
= xilinx_dma_poll_timeout(chan
, XILINX_DMA_REG_DMACR
, tmp
,
1683 !(tmp
& XILINX_DMA_DMACR_RESET
), 0,
1684 XILINX_DMA_LOOP_COUNT
);
1687 dev_err(chan
->dev
, "reset timeout, cr %x, sr %x\n",
1688 dma_ctrl_read(chan
, XILINX_DMA_REG_DMACR
),
1689 dma_ctrl_read(chan
, XILINX_DMA_REG_DMASR
));
1695 chan
->desc_pendingcount
= 0;
1696 chan
->desc_submitcount
= 0;
1702 * xilinx_dma_chan_reset - Reset DMA channel and enable interrupts
1703 * @chan: Driver specific DMA channel
1705 * Return: '0' on success and failure value on error
1707 static int xilinx_dma_chan_reset(struct xilinx_dma_chan
*chan
)
1712 err
= xilinx_dma_reset(chan
);
1716 /* Enable interrupts */
1717 dma_ctrl_set(chan
, XILINX_DMA_REG_DMACR
,
1718 XILINX_DMA_DMAXR_ALL_IRQ_MASK
);
1724 * xilinx_mcdma_irq_handler - MCDMA Interrupt handler
1726 * @data: Pointer to the Xilinx MCDMA channel structure
1728 * Return: IRQ_HANDLED/IRQ_NONE
1730 static irqreturn_t
xilinx_mcdma_irq_handler(int irq
, void *data
)
1732 struct xilinx_dma_chan
*chan
= data
;
1733 u32 status
, ser_offset
, chan_sermask
, chan_offset
= 0, chan_id
;
1735 if (chan
->direction
== DMA_DEV_TO_MEM
)
1736 ser_offset
= XILINX_MCDMA_RXINT_SER_OFFSET
;
1738 ser_offset
= XILINX_MCDMA_TXINT_SER_OFFSET
;
1740 /* Read the channel id raising the interrupt*/
1741 chan_sermask
= dma_ctrl_read(chan
, ser_offset
);
1742 chan_id
= ffs(chan_sermask
);
1747 if (chan
->direction
== DMA_DEV_TO_MEM
)
1748 chan_offset
= chan
->xdev
->s2mm_index
;
1750 chan_offset
= chan_offset
+ (chan_id
- 1);
1751 chan
= chan
->xdev
->chan
[chan_offset
];
1752 /* Read the status and ack the interrupts. */
1753 status
= dma_ctrl_read(chan
, XILINX_MCDMA_CHAN_SR_OFFSET(chan
->tdest
));
1754 if (!(status
& XILINX_MCDMA_IRQ_ALL_MASK
))
1757 dma_ctrl_write(chan
, XILINX_MCDMA_CHAN_SR_OFFSET(chan
->tdest
),
1758 status
& XILINX_MCDMA_IRQ_ALL_MASK
);
1760 if (status
& XILINX_MCDMA_IRQ_ERR_MASK
) {
1761 dev_err(chan
->dev
, "Channel %p has errors %x cdr %x tdr %x\n",
1763 dma_ctrl_read(chan
, XILINX_MCDMA_CH_ERR_OFFSET
),
1764 dma_ctrl_read(chan
, XILINX_MCDMA_CHAN_CDESC_OFFSET
1766 dma_ctrl_read(chan
, XILINX_MCDMA_CHAN_TDESC_OFFSET
1771 if (status
& XILINX_MCDMA_IRQ_DELAY_MASK
) {
1773 * Device takes too long to do the transfer when user requires
1776 dev_dbg(chan
->dev
, "Inter-packet latency too long\n");
1779 if (status
& XILINX_MCDMA_IRQ_IOC_MASK
) {
1780 spin_lock(&chan
->lock
);
1781 xilinx_dma_complete_descriptor(chan
);
1783 chan
->start_transfer(chan
);
1784 spin_unlock(&chan
->lock
);
1787 tasklet_schedule(&chan
->tasklet
);
1792 * xilinx_dma_irq_handler - DMA Interrupt handler
1794 * @data: Pointer to the Xilinx DMA channel structure
1796 * Return: IRQ_HANDLED/IRQ_NONE
1798 static irqreturn_t
xilinx_dma_irq_handler(int irq
, void *data
)
1800 struct xilinx_dma_chan
*chan
= data
;
1803 /* Read the status and ack the interrupts. */
1804 status
= dma_ctrl_read(chan
, XILINX_DMA_REG_DMASR
);
1805 if (!(status
& XILINX_DMA_DMAXR_ALL_IRQ_MASK
))
1808 dma_ctrl_write(chan
, XILINX_DMA_REG_DMASR
,
1809 status
& XILINX_DMA_DMAXR_ALL_IRQ_MASK
);
1811 if (status
& XILINX_DMA_DMASR_ERR_IRQ
) {
1813 * An error occurred. If C_FLUSH_ON_FSYNC is enabled and the
1814 * error is recoverable, ignore it. Otherwise flag the error.
1816 * Only recoverable errors can be cleared in the DMASR register,
1817 * make sure not to write to other error bits to 1.
1819 u32 errors
= status
& XILINX_DMA_DMASR_ALL_ERR_MASK
;
1821 dma_ctrl_write(chan
, XILINX_DMA_REG_DMASR
,
1822 errors
& XILINX_DMA_DMASR_ERR_RECOVER_MASK
);
1824 if (!chan
->flush_on_fsync
||
1825 (errors
& ~XILINX_DMA_DMASR_ERR_RECOVER_MASK
)) {
1827 "Channel %p has errors %x, cdr %x tdr %x\n",
1829 dma_ctrl_read(chan
, XILINX_DMA_REG_CURDESC
),
1830 dma_ctrl_read(chan
, XILINX_DMA_REG_TAILDESC
));
1835 if (status
& XILINX_DMA_DMASR_DLY_CNT_IRQ
) {
1837 * Device takes too long to do the transfer when user requires
1840 dev_dbg(chan
->dev
, "Inter-packet latency too long\n");
1843 if (status
& XILINX_DMA_DMASR_FRM_CNT_IRQ
) {
1844 spin_lock(&chan
->lock
);
1845 xilinx_dma_complete_descriptor(chan
);
1847 chan
->start_transfer(chan
);
1848 spin_unlock(&chan
->lock
);
1851 tasklet_schedule(&chan
->tasklet
);
1856 * append_desc_queue - Queuing descriptor
1857 * @chan: Driver specific dma channel
1858 * @desc: dma transaction descriptor
1860 static void append_desc_queue(struct xilinx_dma_chan
*chan
,
1861 struct xilinx_dma_tx_descriptor
*desc
)
1863 struct xilinx_vdma_tx_segment
*tail_segment
;
1864 struct xilinx_dma_tx_descriptor
*tail_desc
;
1865 struct xilinx_axidma_tx_segment
*axidma_tail_segment
;
1866 struct xilinx_cdma_tx_segment
*cdma_tail_segment
;
1868 if (list_empty(&chan
->pending_list
))
1872 * Add the hardware descriptor to the chain of hardware descriptors
1873 * that already exists in memory.
1875 tail_desc
= list_last_entry(&chan
->pending_list
,
1876 struct xilinx_dma_tx_descriptor
, node
);
1877 if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_VDMA
) {
1878 tail_segment
= list_last_entry(&tail_desc
->segments
,
1879 struct xilinx_vdma_tx_segment
,
1881 tail_segment
->hw
.next_desc
= (u32
)desc
->async_tx
.phys
;
1882 } else if (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_CDMA
) {
1883 cdma_tail_segment
= list_last_entry(&tail_desc
->segments
,
1884 struct xilinx_cdma_tx_segment
,
1886 cdma_tail_segment
->hw
.next_desc
= (u32
)desc
->async_tx
.phys
;
1888 axidma_tail_segment
= list_last_entry(&tail_desc
->segments
,
1889 struct xilinx_axidma_tx_segment
,
1891 axidma_tail_segment
->hw
.next_desc
= (u32
)desc
->async_tx
.phys
;
1895 * Add the software descriptor and all children to the list
1896 * of pending transactions
1899 list_add_tail(&desc
->node
, &chan
->pending_list
);
1900 chan
->desc_pendingcount
++;
1902 if (chan
->has_sg
&& (chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_VDMA
)
1903 && unlikely(chan
->desc_pendingcount
> chan
->num_frms
)) {
1904 dev_dbg(chan
->dev
, "desc pendingcount is too high\n");
1905 chan
->desc_pendingcount
= chan
->num_frms
;
1910 * xilinx_dma_tx_submit - Submit DMA transaction
1911 * @tx: Async transaction descriptor
1913 * Return: cookie value on success and failure value on error
1915 static dma_cookie_t
xilinx_dma_tx_submit(struct dma_async_tx_descriptor
*tx
)
1917 struct xilinx_dma_tx_descriptor
*desc
= to_dma_tx_descriptor(tx
);
1918 struct xilinx_dma_chan
*chan
= to_xilinx_chan(tx
->chan
);
1919 dma_cookie_t cookie
;
1920 unsigned long flags
;
1924 xilinx_dma_free_tx_descriptor(chan
, desc
);
1930 * If reset fails, need to hard reset the system.
1931 * Channel is no longer functional
1933 err
= xilinx_dma_chan_reset(chan
);
1938 spin_lock_irqsave(&chan
->lock
, flags
);
1940 cookie
= dma_cookie_assign(tx
);
1942 /* Put this transaction onto the tail of the pending queue */
1943 append_desc_queue(chan
, desc
);
1946 chan
->cyclic
= true;
1948 spin_unlock_irqrestore(&chan
->lock
, flags
);
1954 * xilinx_vdma_dma_prep_interleaved - prepare a descriptor for a
1955 * DMA_SLAVE transaction
1956 * @dchan: DMA channel
1957 * @xt: Interleaved template pointer
1958 * @flags: transfer ack flags
1960 * Return: Async transaction descriptor on success and NULL on failure
1962 static struct dma_async_tx_descriptor
*
1963 xilinx_vdma_dma_prep_interleaved(struct dma_chan
*dchan
,
1964 struct dma_interleaved_template
*xt
,
1965 unsigned long flags
)
1967 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
1968 struct xilinx_dma_tx_descriptor
*desc
;
1969 struct xilinx_vdma_tx_segment
*segment
;
1970 struct xilinx_vdma_desc_hw
*hw
;
1972 if (!is_slave_direction(xt
->dir
))
1975 if (!xt
->numf
|| !xt
->sgl
[0].size
)
1978 if (xt
->frame_size
!= 1)
1981 /* Allocate a transaction descriptor. */
1982 desc
= xilinx_dma_alloc_tx_descriptor(chan
);
1986 dma_async_tx_descriptor_init(&desc
->async_tx
, &chan
->common
);
1987 desc
->async_tx
.tx_submit
= xilinx_dma_tx_submit
;
1988 async_tx_ack(&desc
->async_tx
);
1990 /* Allocate the link descriptor from DMA pool */
1991 segment
= xilinx_vdma_alloc_tx_segment(chan
);
1995 /* Fill in the hardware descriptor */
1997 hw
->vsize
= xt
->numf
;
1998 hw
->hsize
= xt
->sgl
[0].size
;
1999 hw
->stride
= (xt
->sgl
[0].icg
+ xt
->sgl
[0].size
) <<
2000 XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT
;
2001 hw
->stride
|= chan
->config
.frm_dly
<<
2002 XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT
;
2004 if (xt
->dir
!= DMA_MEM_TO_DEV
) {
2005 if (chan
->ext_addr
) {
2006 hw
->buf_addr
= lower_32_bits(xt
->dst_start
);
2007 hw
->buf_addr_msb
= upper_32_bits(xt
->dst_start
);
2009 hw
->buf_addr
= xt
->dst_start
;
2012 if (chan
->ext_addr
) {
2013 hw
->buf_addr
= lower_32_bits(xt
->src_start
);
2014 hw
->buf_addr_msb
= upper_32_bits(xt
->src_start
);
2016 hw
->buf_addr
= xt
->src_start
;
2020 /* Insert the segment into the descriptor segments list. */
2021 list_add_tail(&segment
->node
, &desc
->segments
);
2023 /* Link the last hardware descriptor with the first. */
2024 segment
= list_first_entry(&desc
->segments
,
2025 struct xilinx_vdma_tx_segment
, node
);
2026 desc
->async_tx
.phys
= segment
->phys
;
2028 return &desc
->async_tx
;
2031 xilinx_dma_free_tx_descriptor(chan
, desc
);
2036 * xilinx_cdma_prep_memcpy - prepare descriptors for a memcpy transaction
2037 * @dchan: DMA channel
2038 * @dma_dst: destination address
2039 * @dma_src: source address
2040 * @len: transfer length
2041 * @flags: transfer ack flags
2043 * Return: Async transaction descriptor on success and NULL on failure
2045 static struct dma_async_tx_descriptor
*
2046 xilinx_cdma_prep_memcpy(struct dma_chan
*dchan
, dma_addr_t dma_dst
,
2047 dma_addr_t dma_src
, size_t len
, unsigned long flags
)
2049 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
2050 struct xilinx_dma_tx_descriptor
*desc
;
2051 struct xilinx_cdma_tx_segment
*segment
;
2052 struct xilinx_cdma_desc_hw
*hw
;
2054 if (!len
|| len
> chan
->xdev
->max_buffer_len
)
2057 desc
= xilinx_dma_alloc_tx_descriptor(chan
);
2061 dma_async_tx_descriptor_init(&desc
->async_tx
, &chan
->common
);
2062 desc
->async_tx
.tx_submit
= xilinx_dma_tx_submit
;
2064 /* Allocate the link descriptor from DMA pool */
2065 segment
= xilinx_cdma_alloc_tx_segment(chan
);
2071 hw
->src_addr
= dma_src
;
2072 hw
->dest_addr
= dma_dst
;
2073 if (chan
->ext_addr
) {
2074 hw
->src_addr_msb
= upper_32_bits(dma_src
);
2075 hw
->dest_addr_msb
= upper_32_bits(dma_dst
);
2078 /* Insert the segment into the descriptor segments list. */
2079 list_add_tail(&segment
->node
, &desc
->segments
);
2081 desc
->async_tx
.phys
= segment
->phys
;
2082 hw
->next_desc
= segment
->phys
;
2084 return &desc
->async_tx
;
2087 xilinx_dma_free_tx_descriptor(chan
, desc
);
2092 * xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
2093 * @dchan: DMA channel
2094 * @sgl: scatterlist to transfer to/from
2095 * @sg_len: number of entries in @scatterlist
2096 * @direction: DMA direction
2097 * @flags: transfer ack flags
2098 * @context: APP words of the descriptor
2100 * Return: Async transaction descriptor on success and NULL on failure
2102 static struct dma_async_tx_descriptor
*xilinx_dma_prep_slave_sg(
2103 struct dma_chan
*dchan
, struct scatterlist
*sgl
, unsigned int sg_len
,
2104 enum dma_transfer_direction direction
, unsigned long flags
,
2107 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
2108 struct xilinx_dma_tx_descriptor
*desc
;
2109 struct xilinx_axidma_tx_segment
*segment
= NULL
;
2110 u32
*app_w
= (u32
*)context
;
2111 struct scatterlist
*sg
;
2116 if (!is_slave_direction(direction
))
2119 /* Allocate a transaction descriptor. */
2120 desc
= xilinx_dma_alloc_tx_descriptor(chan
);
2124 dma_async_tx_descriptor_init(&desc
->async_tx
, &chan
->common
);
2125 desc
->async_tx
.tx_submit
= xilinx_dma_tx_submit
;
2127 /* Build transactions using information in the scatter gather list */
2128 for_each_sg(sgl
, sg
, sg_len
, i
) {
2131 /* Loop until the entire scatterlist entry is used */
2132 while (sg_used
< sg_dma_len(sg
)) {
2133 struct xilinx_axidma_desc_hw
*hw
;
2135 /* Get a free segment */
2136 segment
= xilinx_axidma_alloc_tx_segment(chan
);
2141 * Calculate the maximum number of bytes to transfer,
2142 * making sure it is less than the hw limit
2144 copy
= xilinx_dma_calc_copysize(chan
, sg_dma_len(sg
),
2148 /* Fill in the descriptor */
2149 xilinx_axidma_buf(chan
, hw
, sg_dma_address(sg
),
2154 if (chan
->direction
== DMA_MEM_TO_DEV
) {
2156 memcpy(hw
->app
, app_w
, sizeof(u32
) *
2157 XILINX_DMA_NUM_APP_WORDS
);
2163 * Insert the segment into the descriptor segments
2166 list_add_tail(&segment
->node
, &desc
->segments
);
2170 segment
= list_first_entry(&desc
->segments
,
2171 struct xilinx_axidma_tx_segment
, node
);
2172 desc
->async_tx
.phys
= segment
->phys
;
2174 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
2175 if (chan
->direction
== DMA_MEM_TO_DEV
) {
2176 segment
->hw
.control
|= XILINX_DMA_BD_SOP
;
2177 segment
= list_last_entry(&desc
->segments
,
2178 struct xilinx_axidma_tx_segment
,
2180 segment
->hw
.control
|= XILINX_DMA_BD_EOP
;
2183 return &desc
->async_tx
;
2186 xilinx_dma_free_tx_descriptor(chan
, desc
);
2191 * xilinx_dma_prep_dma_cyclic - prepare descriptors for a DMA_SLAVE transaction
2192 * @dchan: DMA channel
2193 * @buf_addr: Physical address of the buffer
2194 * @buf_len: Total length of the cyclic buffers
2195 * @period_len: length of individual cyclic buffer
2196 * @direction: DMA direction
2197 * @flags: transfer ack flags
2199 * Return: Async transaction descriptor on success and NULL on failure
2201 static struct dma_async_tx_descriptor
*xilinx_dma_prep_dma_cyclic(
2202 struct dma_chan
*dchan
, dma_addr_t buf_addr
, size_t buf_len
,
2203 size_t period_len
, enum dma_transfer_direction direction
,
2204 unsigned long flags
)
2206 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
2207 struct xilinx_dma_tx_descriptor
*desc
;
2208 struct xilinx_axidma_tx_segment
*segment
, *head_segment
, *prev
= NULL
;
2209 size_t copy
, sg_used
;
2210 unsigned int num_periods
;
2217 num_periods
= buf_len
/ period_len
;
2222 if (!is_slave_direction(direction
))
2225 /* Allocate a transaction descriptor. */
2226 desc
= xilinx_dma_alloc_tx_descriptor(chan
);
2230 chan
->direction
= direction
;
2231 dma_async_tx_descriptor_init(&desc
->async_tx
, &chan
->common
);
2232 desc
->async_tx
.tx_submit
= xilinx_dma_tx_submit
;
2234 for (i
= 0; i
< num_periods
; ++i
) {
2237 while (sg_used
< period_len
) {
2238 struct xilinx_axidma_desc_hw
*hw
;
2240 /* Get a free segment */
2241 segment
= xilinx_axidma_alloc_tx_segment(chan
);
2246 * Calculate the maximum number of bytes to transfer,
2247 * making sure it is less than the hw limit
2249 copy
= xilinx_dma_calc_copysize(chan
, period_len
,
2252 xilinx_axidma_buf(chan
, hw
, buf_addr
, sg_used
,
2257 prev
->hw
.next_desc
= segment
->phys
;
2263 * Insert the segment into the descriptor segments
2266 list_add_tail(&segment
->node
, &desc
->segments
);
2270 head_segment
= list_first_entry(&desc
->segments
,
2271 struct xilinx_axidma_tx_segment
, node
);
2272 desc
->async_tx
.phys
= head_segment
->phys
;
2274 desc
->cyclic
= true;
2275 reg
= dma_ctrl_read(chan
, XILINX_DMA_REG_DMACR
);
2276 reg
|= XILINX_DMA_CR_CYCLIC_BD_EN_MASK
;
2277 dma_ctrl_write(chan
, XILINX_DMA_REG_DMACR
, reg
);
2279 segment
= list_last_entry(&desc
->segments
,
2280 struct xilinx_axidma_tx_segment
,
2282 segment
->hw
.next_desc
= (u32
) head_segment
->phys
;
2284 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
2285 if (direction
== DMA_MEM_TO_DEV
) {
2286 head_segment
->hw
.control
|= XILINX_DMA_BD_SOP
;
2287 segment
->hw
.control
|= XILINX_DMA_BD_EOP
;
2290 return &desc
->async_tx
;
2293 xilinx_dma_free_tx_descriptor(chan
, desc
);
2298 * xilinx_mcdma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
2299 * @dchan: DMA channel
2300 * @sgl: scatterlist to transfer to/from
2301 * @sg_len: number of entries in @scatterlist
2302 * @direction: DMA direction
2303 * @flags: transfer ack flags
2304 * @context: APP words of the descriptor
2306 * Return: Async transaction descriptor on success and NULL on failure
2308 static struct dma_async_tx_descriptor
*
2309 xilinx_mcdma_prep_slave_sg(struct dma_chan
*dchan
, struct scatterlist
*sgl
,
2310 unsigned int sg_len
,
2311 enum dma_transfer_direction direction
,
2312 unsigned long flags
, void *context
)
2314 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
2315 struct xilinx_dma_tx_descriptor
*desc
;
2316 struct xilinx_aximcdma_tx_segment
*segment
= NULL
;
2317 u32
*app_w
= (u32
*)context
;
2318 struct scatterlist
*sg
;
2323 if (!is_slave_direction(direction
))
2326 /* Allocate a transaction descriptor. */
2327 desc
= xilinx_dma_alloc_tx_descriptor(chan
);
2331 dma_async_tx_descriptor_init(&desc
->async_tx
, &chan
->common
);
2332 desc
->async_tx
.tx_submit
= xilinx_dma_tx_submit
;
2334 /* Build transactions using information in the scatter gather list */
2335 for_each_sg(sgl
, sg
, sg_len
, i
) {
2338 /* Loop until the entire scatterlist entry is used */
2339 while (sg_used
< sg_dma_len(sg
)) {
2340 struct xilinx_aximcdma_desc_hw
*hw
;
2342 /* Get a free segment */
2343 segment
= xilinx_aximcdma_alloc_tx_segment(chan
);
2348 * Calculate the maximum number of bytes to transfer,
2349 * making sure it is less than the hw limit
2351 copy
= min_t(size_t, sg_dma_len(sg
) - sg_used
,
2352 chan
->xdev
->max_buffer_len
);
2355 /* Fill in the descriptor */
2356 xilinx_aximcdma_buf(chan
, hw
, sg_dma_address(sg
),
2360 if (chan
->direction
== DMA_MEM_TO_DEV
&& app_w
) {
2361 memcpy(hw
->app
, app_w
, sizeof(u32
) *
2362 XILINX_DMA_NUM_APP_WORDS
);
2367 * Insert the segment into the descriptor segments
2370 list_add_tail(&segment
->node
, &desc
->segments
);
2374 segment
= list_first_entry(&desc
->segments
,
2375 struct xilinx_aximcdma_tx_segment
, node
);
2376 desc
->async_tx
.phys
= segment
->phys
;
2378 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
2379 if (chan
->direction
== DMA_MEM_TO_DEV
) {
2380 segment
->hw
.control
|= XILINX_MCDMA_BD_SOP
;
2381 segment
= list_last_entry(&desc
->segments
,
2382 struct xilinx_aximcdma_tx_segment
,
2384 segment
->hw
.control
|= XILINX_MCDMA_BD_EOP
;
2387 return &desc
->async_tx
;
2390 xilinx_dma_free_tx_descriptor(chan
, desc
);
2396 * xilinx_dma_terminate_all - Halt the channel and free descriptors
2397 * @dchan: Driver specific DMA Channel pointer
2399 * Return: '0' always.
2401 static int xilinx_dma_terminate_all(struct dma_chan
*dchan
)
2403 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
2408 xilinx_dma_chan_reset(chan
);
2410 err
= chan
->stop_transfer(chan
);
2412 dev_err(chan
->dev
, "Cannot stop channel %p: %x\n",
2413 chan
, dma_ctrl_read(chan
, XILINX_DMA_REG_DMASR
));
2417 /* Remove and free all of the descriptors in the lists */
2418 xilinx_dma_free_descriptors(chan
);
2422 reg
= dma_ctrl_read(chan
, XILINX_DMA_REG_DMACR
);
2423 reg
&= ~XILINX_DMA_CR_CYCLIC_BD_EN_MASK
;
2424 dma_ctrl_write(chan
, XILINX_DMA_REG_DMACR
, reg
);
2425 chan
->cyclic
= false;
2428 if ((chan
->xdev
->dma_config
->dmatype
== XDMA_TYPE_CDMA
) && chan
->has_sg
)
2429 dma_ctrl_clr(chan
, XILINX_DMA_REG_DMACR
,
2430 XILINX_CDMA_CR_SGMODE
);
2436 * xilinx_dma_channel_set_config - Configure VDMA channel
2437 * Run-time configuration for Axi VDMA, supports:
2438 * . halt the channel
2439 * . configure interrupt coalescing and inter-packet delay threshold
2440 * . start/stop parking
2443 * @dchan: DMA channel
2444 * @cfg: VDMA device configuration pointer
2446 * Return: '0' on success and failure value on error
2448 int xilinx_vdma_channel_set_config(struct dma_chan
*dchan
,
2449 struct xilinx_vdma_config
*cfg
)
2451 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
2455 return xilinx_dma_chan_reset(chan
);
2457 dmacr
= dma_ctrl_read(chan
, XILINX_DMA_REG_DMACR
);
2459 chan
->config
.frm_dly
= cfg
->frm_dly
;
2460 chan
->config
.park
= cfg
->park
;
2462 /* genlock settings */
2463 chan
->config
.gen_lock
= cfg
->gen_lock
;
2464 chan
->config
.master
= cfg
->master
;
2466 dmacr
&= ~XILINX_DMA_DMACR_GENLOCK_EN
;
2467 if (cfg
->gen_lock
&& chan
->genlock
) {
2468 dmacr
|= XILINX_DMA_DMACR_GENLOCK_EN
;
2469 dmacr
&= ~XILINX_DMA_DMACR_MASTER_MASK
;
2470 dmacr
|= cfg
->master
<< XILINX_DMA_DMACR_MASTER_SHIFT
;
2473 chan
->config
.frm_cnt_en
= cfg
->frm_cnt_en
;
2474 chan
->config
.vflip_en
= cfg
->vflip_en
;
2477 chan
->config
.park_frm
= cfg
->park_frm
;
2479 chan
->config
.park_frm
= -1;
2481 chan
->config
.coalesc
= cfg
->coalesc
;
2482 chan
->config
.delay
= cfg
->delay
;
2484 if (cfg
->coalesc
<= XILINX_DMA_DMACR_FRAME_COUNT_MAX
) {
2485 dmacr
&= ~XILINX_DMA_DMACR_FRAME_COUNT_MASK
;
2486 dmacr
|= cfg
->coalesc
<< XILINX_DMA_DMACR_FRAME_COUNT_SHIFT
;
2487 chan
->config
.coalesc
= cfg
->coalesc
;
2490 if (cfg
->delay
<= XILINX_DMA_DMACR_DELAY_MAX
) {
2491 dmacr
&= ~XILINX_DMA_DMACR_DELAY_MASK
;
2492 dmacr
|= cfg
->delay
<< XILINX_DMA_DMACR_DELAY_SHIFT
;
2493 chan
->config
.delay
= cfg
->delay
;
2496 /* FSync Source selection */
2497 dmacr
&= ~XILINX_DMA_DMACR_FSYNCSRC_MASK
;
2498 dmacr
|= cfg
->ext_fsync
<< XILINX_DMA_DMACR_FSYNCSRC_SHIFT
;
2500 dma_ctrl_write(chan
, XILINX_DMA_REG_DMACR
, dmacr
);
2504 EXPORT_SYMBOL(xilinx_vdma_channel_set_config
);
2506 /* -----------------------------------------------------------------------------
2511 * xilinx_dma_chan_remove - Per Channel remove function
2512 * @chan: Driver specific DMA channel
2514 static void xilinx_dma_chan_remove(struct xilinx_dma_chan
*chan
)
2516 /* Disable all interrupts */
2517 dma_ctrl_clr(chan
, XILINX_DMA_REG_DMACR
,
2518 XILINX_DMA_DMAXR_ALL_IRQ_MASK
);
2521 free_irq(chan
->irq
, chan
);
2523 tasklet_kill(&chan
->tasklet
);
2525 list_del(&chan
->common
.device_node
);
2528 static int axidma_clk_init(struct platform_device
*pdev
, struct clk
**axi_clk
,
2529 struct clk
**tx_clk
, struct clk
**rx_clk
,
2530 struct clk
**sg_clk
, struct clk
**tmp_clk
)
2536 *axi_clk
= devm_clk_get(&pdev
->dev
, "s_axi_lite_aclk");
2537 if (IS_ERR(*axi_clk
)) {
2538 err
= PTR_ERR(*axi_clk
);
2539 if (err
!= -EPROBE_DEFER
)
2540 dev_err(&pdev
->dev
, "failed to get axi_aclk (%d)\n",
2545 *tx_clk
= devm_clk_get(&pdev
->dev
, "m_axi_mm2s_aclk");
2546 if (IS_ERR(*tx_clk
))
2549 *rx_clk
= devm_clk_get(&pdev
->dev
, "m_axi_s2mm_aclk");
2550 if (IS_ERR(*rx_clk
))
2553 *sg_clk
= devm_clk_get(&pdev
->dev
, "m_axi_sg_aclk");
2554 if (IS_ERR(*sg_clk
))
2557 err
= clk_prepare_enable(*axi_clk
);
2559 dev_err(&pdev
->dev
, "failed to enable axi_clk (%d)\n", err
);
2563 err
= clk_prepare_enable(*tx_clk
);
2565 dev_err(&pdev
->dev
, "failed to enable tx_clk (%d)\n", err
);
2566 goto err_disable_axiclk
;
2569 err
= clk_prepare_enable(*rx_clk
);
2571 dev_err(&pdev
->dev
, "failed to enable rx_clk (%d)\n", err
);
2572 goto err_disable_txclk
;
2575 err
= clk_prepare_enable(*sg_clk
);
2577 dev_err(&pdev
->dev
, "failed to enable sg_clk (%d)\n", err
);
2578 goto err_disable_rxclk
;
2584 clk_disable_unprepare(*rx_clk
);
2586 clk_disable_unprepare(*tx_clk
);
2588 clk_disable_unprepare(*axi_clk
);
2593 static int axicdma_clk_init(struct platform_device
*pdev
, struct clk
**axi_clk
,
2594 struct clk
**dev_clk
, struct clk
**tmp_clk
,
2595 struct clk
**tmp1_clk
, struct clk
**tmp2_clk
)
2603 *axi_clk
= devm_clk_get(&pdev
->dev
, "s_axi_lite_aclk");
2604 if (IS_ERR(*axi_clk
)) {
2605 err
= PTR_ERR(*axi_clk
);
2606 if (err
!= -EPROBE_DEFER
)
2607 dev_err(&pdev
->dev
, "failed to get axi_clk (%d)\n",
2612 *dev_clk
= devm_clk_get(&pdev
->dev
, "m_axi_aclk");
2613 if (IS_ERR(*dev_clk
)) {
2614 err
= PTR_ERR(*dev_clk
);
2615 if (err
!= -EPROBE_DEFER
)
2616 dev_err(&pdev
->dev
, "failed to get dev_clk (%d)\n",
2621 err
= clk_prepare_enable(*axi_clk
);
2623 dev_err(&pdev
->dev
, "failed to enable axi_clk (%d)\n", err
);
2627 err
= clk_prepare_enable(*dev_clk
);
2629 dev_err(&pdev
->dev
, "failed to enable dev_clk (%d)\n", err
);
2630 goto err_disable_axiclk
;
2636 clk_disable_unprepare(*axi_clk
);
2641 static int axivdma_clk_init(struct platform_device
*pdev
, struct clk
**axi_clk
,
2642 struct clk
**tx_clk
, struct clk
**txs_clk
,
2643 struct clk
**rx_clk
, struct clk
**rxs_clk
)
2647 *axi_clk
= devm_clk_get(&pdev
->dev
, "s_axi_lite_aclk");
2648 if (IS_ERR(*axi_clk
)) {
2649 err
= PTR_ERR(*axi_clk
);
2650 if (err
!= -EPROBE_DEFER
)
2651 dev_err(&pdev
->dev
, "failed to get axi_aclk (%d)\n",
2656 *tx_clk
= devm_clk_get(&pdev
->dev
, "m_axi_mm2s_aclk");
2657 if (IS_ERR(*tx_clk
))
2660 *txs_clk
= devm_clk_get(&pdev
->dev
, "m_axis_mm2s_aclk");
2661 if (IS_ERR(*txs_clk
))
2664 *rx_clk
= devm_clk_get(&pdev
->dev
, "m_axi_s2mm_aclk");
2665 if (IS_ERR(*rx_clk
))
2668 *rxs_clk
= devm_clk_get(&pdev
->dev
, "s_axis_s2mm_aclk");
2669 if (IS_ERR(*rxs_clk
))
2672 err
= clk_prepare_enable(*axi_clk
);
2674 dev_err(&pdev
->dev
, "failed to enable axi_clk (%d)\n",
2679 err
= clk_prepare_enable(*tx_clk
);
2681 dev_err(&pdev
->dev
, "failed to enable tx_clk (%d)\n", err
);
2682 goto err_disable_axiclk
;
2685 err
= clk_prepare_enable(*txs_clk
);
2687 dev_err(&pdev
->dev
, "failed to enable txs_clk (%d)\n", err
);
2688 goto err_disable_txclk
;
2691 err
= clk_prepare_enable(*rx_clk
);
2693 dev_err(&pdev
->dev
, "failed to enable rx_clk (%d)\n", err
);
2694 goto err_disable_txsclk
;
2697 err
= clk_prepare_enable(*rxs_clk
);
2699 dev_err(&pdev
->dev
, "failed to enable rxs_clk (%d)\n", err
);
2700 goto err_disable_rxclk
;
2706 clk_disable_unprepare(*rx_clk
);
2708 clk_disable_unprepare(*txs_clk
);
2710 clk_disable_unprepare(*tx_clk
);
2712 clk_disable_unprepare(*axi_clk
);
2717 static void xdma_disable_allclks(struct xilinx_dma_device
*xdev
)
2719 clk_disable_unprepare(xdev
->rxs_clk
);
2720 clk_disable_unprepare(xdev
->rx_clk
);
2721 clk_disable_unprepare(xdev
->txs_clk
);
2722 clk_disable_unprepare(xdev
->tx_clk
);
2723 clk_disable_unprepare(xdev
->axi_clk
);
2727 * xilinx_dma_chan_probe - Per Channel Probing
2728 * It get channel features from the device tree entry and
2729 * initialize special channel handling routines
2731 * @xdev: Driver specific device structure
2732 * @node: Device node
2733 * @chan_id: DMA Channel id
2735 * Return: '0' on success and failure value on error
2737 static int xilinx_dma_chan_probe(struct xilinx_dma_device
*xdev
,
2738 struct device_node
*node
, int chan_id
)
2740 struct xilinx_dma_chan
*chan
;
2741 bool has_dre
= false;
2745 /* Allocate and initialize the channel structure */
2746 chan
= devm_kzalloc(xdev
->dev
, sizeof(*chan
), GFP_KERNEL
);
2750 chan
->dev
= xdev
->dev
;
2752 chan
->desc_pendingcount
= 0x0;
2753 chan
->ext_addr
= xdev
->ext_addr
;
2754 /* This variable ensures that descriptors are not
2755 * Submitted when dma engine is in progress. This variable is
2756 * Added to avoid polling for a bit in the status register to
2757 * Know dma state in the driver hot path.
2761 spin_lock_init(&chan
->lock
);
2762 INIT_LIST_HEAD(&chan
->pending_list
);
2763 INIT_LIST_HEAD(&chan
->done_list
);
2764 INIT_LIST_HEAD(&chan
->active_list
);
2765 INIT_LIST_HEAD(&chan
->free_seg_list
);
2767 /* Retrieve the channel properties from the device tree */
2768 has_dre
= of_property_read_bool(node
, "xlnx,include-dre");
2770 chan
->genlock
= of_property_read_bool(node
, "xlnx,genlock-mode");
2772 err
= of_property_read_u32(node
, "xlnx,datawidth", &value
);
2774 dev_err(xdev
->dev
, "missing xlnx,datawidth property\n");
2777 width
= value
>> 3; /* Convert bits to bytes */
2779 /* If data width is greater than 8 bytes, DRE is not in hw */
2784 xdev
->common
.copy_align
= fls(width
- 1);
2786 if (of_device_is_compatible(node
, "xlnx,axi-vdma-mm2s-channel") ||
2787 of_device_is_compatible(node
, "xlnx,axi-dma-mm2s-channel") ||
2788 of_device_is_compatible(node
, "xlnx,axi-cdma-channel")) {
2789 chan
->direction
= DMA_MEM_TO_DEV
;
2791 chan
->tdest
= chan_id
;
2793 chan
->ctrl_offset
= XILINX_DMA_MM2S_CTRL_OFFSET
;
2794 if (xdev
->dma_config
->dmatype
== XDMA_TYPE_VDMA
) {
2795 chan
->desc_offset
= XILINX_VDMA_MM2S_DESC_OFFSET
;
2796 chan
->config
.park
= 1;
2798 if (xdev
->flush_on_fsync
== XILINX_DMA_FLUSH_BOTH
||
2799 xdev
->flush_on_fsync
== XILINX_DMA_FLUSH_MM2S
)
2800 chan
->flush_on_fsync
= true;
2802 } else if (of_device_is_compatible(node
,
2803 "xlnx,axi-vdma-s2mm-channel") ||
2804 of_device_is_compatible(node
,
2805 "xlnx,axi-dma-s2mm-channel")) {
2806 chan
->direction
= DMA_DEV_TO_MEM
;
2808 xdev
->s2mm_index
= xdev
->nr_channels
;
2809 chan
->tdest
= chan_id
- xdev
->nr_channels
;
2810 chan
->has_vflip
= of_property_read_bool(node
,
2811 "xlnx,enable-vert-flip");
2812 if (chan
->has_vflip
) {
2813 chan
->config
.vflip_en
= dma_read(chan
,
2814 XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP
) &
2815 XILINX_VDMA_ENABLE_VERTICAL_FLIP
;
2818 if (xdev
->dma_config
->dmatype
== XDMA_TYPE_AXIMCDMA
)
2819 chan
->ctrl_offset
= XILINX_MCDMA_S2MM_CTRL_OFFSET
;
2821 chan
->ctrl_offset
= XILINX_DMA_S2MM_CTRL_OFFSET
;
2823 if (xdev
->dma_config
->dmatype
== XDMA_TYPE_VDMA
) {
2824 chan
->desc_offset
= XILINX_VDMA_S2MM_DESC_OFFSET
;
2825 chan
->config
.park
= 1;
2827 if (xdev
->flush_on_fsync
== XILINX_DMA_FLUSH_BOTH
||
2828 xdev
->flush_on_fsync
== XILINX_DMA_FLUSH_S2MM
)
2829 chan
->flush_on_fsync
= true;
2832 dev_err(xdev
->dev
, "Invalid channel compatible node\n");
2836 /* Request the interrupt */
2837 chan
->irq
= irq_of_parse_and_map(node
, chan
->tdest
);
2838 err
= request_irq(chan
->irq
, xdev
->dma_config
->irq_handler
,
2839 IRQF_SHARED
, "xilinx-dma-controller", chan
);
2841 dev_err(xdev
->dev
, "unable to request IRQ %d\n", chan
->irq
);
2845 if (xdev
->dma_config
->dmatype
== XDMA_TYPE_AXIDMA
) {
2846 chan
->start_transfer
= xilinx_dma_start_transfer
;
2847 chan
->stop_transfer
= xilinx_dma_stop_transfer
;
2848 } else if (xdev
->dma_config
->dmatype
== XDMA_TYPE_AXIMCDMA
) {
2849 chan
->start_transfer
= xilinx_mcdma_start_transfer
;
2850 chan
->stop_transfer
= xilinx_dma_stop_transfer
;
2851 } else if (xdev
->dma_config
->dmatype
== XDMA_TYPE_CDMA
) {
2852 chan
->start_transfer
= xilinx_cdma_start_transfer
;
2853 chan
->stop_transfer
= xilinx_cdma_stop_transfer
;
2855 chan
->start_transfer
= xilinx_vdma_start_transfer
;
2856 chan
->stop_transfer
= xilinx_dma_stop_transfer
;
2859 /* check if SG is enabled (only for AXIDMA and CDMA) */
2860 if (xdev
->dma_config
->dmatype
!= XDMA_TYPE_VDMA
) {
2861 if (dma_ctrl_read(chan
, XILINX_DMA_REG_DMASR
) &
2862 XILINX_DMA_DMASR_SG_MASK
)
2863 chan
->has_sg
= true;
2864 dev_dbg(chan
->dev
, "ch %d: SG %s\n", chan
->id
,
2865 chan
->has_sg
? "enabled" : "disabled");
2868 /* Initialize the tasklet */
2869 tasklet_init(&chan
->tasklet
, xilinx_dma_do_tasklet
,
2870 (unsigned long)chan
);
2873 * Initialize the DMA channel and add it to the DMA engine channels
2876 chan
->common
.device
= &xdev
->common
;
2878 list_add_tail(&chan
->common
.device_node
, &xdev
->common
.channels
);
2879 xdev
->chan
[chan
->id
] = chan
;
2881 /* Reset the channel */
2882 err
= xilinx_dma_chan_reset(chan
);
2884 dev_err(xdev
->dev
, "Reset channel failed\n");
2892 * xilinx_dma_child_probe - Per child node probe
2893 * It get number of dma-channels per child node from
2894 * device-tree and initializes all the channels.
2896 * @xdev: Driver specific device structure
2897 * @node: Device node
2901 static int xilinx_dma_child_probe(struct xilinx_dma_device
*xdev
,
2902 struct device_node
*node
)
2904 int ret
, i
, nr_channels
= 1;
2906 ret
= of_property_read_u32(node
, "dma-channels", &nr_channels
);
2907 if (xdev
->dma_config
->dmatype
== XDMA_TYPE_AXIMCDMA
&& ret
< 0)
2908 dev_warn(xdev
->dev
, "missing dma-channels property\n");
2910 for (i
= 0; i
< nr_channels
; i
++)
2911 xilinx_dma_chan_probe(xdev
, node
, xdev
->chan_id
++);
2913 xdev
->nr_channels
+= nr_channels
;
2919 * of_dma_xilinx_xlate - Translation function
2920 * @dma_spec: Pointer to DMA specifier as found in the device tree
2921 * @ofdma: Pointer to DMA controller data
2923 * Return: DMA channel pointer on success and NULL on error
2925 static struct dma_chan
*of_dma_xilinx_xlate(struct of_phandle_args
*dma_spec
,
2926 struct of_dma
*ofdma
)
2928 struct xilinx_dma_device
*xdev
= ofdma
->of_dma_data
;
2929 int chan_id
= dma_spec
->args
[0];
2931 if (chan_id
>= xdev
->nr_channels
|| !xdev
->chan
[chan_id
])
2934 return dma_get_slave_channel(&xdev
->chan
[chan_id
]->common
);
2937 static const struct xilinx_dma_config axidma_config
= {
2938 .dmatype
= XDMA_TYPE_AXIDMA
,
2939 .clk_init
= axidma_clk_init
,
2940 .irq_handler
= xilinx_dma_irq_handler
,
2943 static const struct xilinx_dma_config aximcdma_config
= {
2944 .dmatype
= XDMA_TYPE_AXIMCDMA
,
2945 .clk_init
= axidma_clk_init
,
2946 .irq_handler
= xilinx_mcdma_irq_handler
,
2948 static const struct xilinx_dma_config axicdma_config
= {
2949 .dmatype
= XDMA_TYPE_CDMA
,
2950 .clk_init
= axicdma_clk_init
,
2951 .irq_handler
= xilinx_dma_irq_handler
,
2954 static const struct xilinx_dma_config axivdma_config
= {
2955 .dmatype
= XDMA_TYPE_VDMA
,
2956 .clk_init
= axivdma_clk_init
,
2957 .irq_handler
= xilinx_dma_irq_handler
,
2960 static const struct of_device_id xilinx_dma_of_ids
[] = {
2961 { .compatible
= "xlnx,axi-dma-1.00.a", .data
= &axidma_config
},
2962 { .compatible
= "xlnx,axi-cdma-1.00.a", .data
= &axicdma_config
},
2963 { .compatible
= "xlnx,axi-vdma-1.00.a", .data
= &axivdma_config
},
2964 { .compatible
= "xlnx,axi-mcdma-1.00.a", .data
= &aximcdma_config
},
2967 MODULE_DEVICE_TABLE(of
, xilinx_dma_of_ids
);
2970 * xilinx_dma_probe - Driver probe function
2971 * @pdev: Pointer to the platform_device structure
2973 * Return: '0' on success and failure value on error
2975 static int xilinx_dma_probe(struct platform_device
*pdev
)
2977 int (*clk_init
)(struct platform_device
*, struct clk
**, struct clk
**,
2978 struct clk
**, struct clk
**, struct clk
**)
2980 struct device_node
*node
= pdev
->dev
.of_node
;
2981 struct xilinx_dma_device
*xdev
;
2982 struct device_node
*child
, *np
= pdev
->dev
.of_node
;
2983 u32 num_frames
, addr_width
, len_width
;
2986 /* Allocate and initialize the DMA engine structure */
2987 xdev
= devm_kzalloc(&pdev
->dev
, sizeof(*xdev
), GFP_KERNEL
);
2991 xdev
->dev
= &pdev
->dev
;
2993 const struct of_device_id
*match
;
2995 match
= of_match_node(xilinx_dma_of_ids
, np
);
2996 if (match
&& match
->data
) {
2997 xdev
->dma_config
= match
->data
;
2998 clk_init
= xdev
->dma_config
->clk_init
;
3002 err
= clk_init(pdev
, &xdev
->axi_clk
, &xdev
->tx_clk
, &xdev
->txs_clk
,
3003 &xdev
->rx_clk
, &xdev
->rxs_clk
);
3007 /* Request and map I/O memory */
3008 xdev
->regs
= devm_platform_ioremap_resource(pdev
, 0);
3009 if (IS_ERR(xdev
->regs
))
3010 return PTR_ERR(xdev
->regs
);
3012 /* Retrieve the DMA engine properties from the device tree */
3013 xdev
->max_buffer_len
= GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX
- 1, 0);
3015 if (xdev
->dma_config
->dmatype
== XDMA_TYPE_AXIDMA
||
3016 xdev
->dma_config
->dmatype
== XDMA_TYPE_AXIMCDMA
) {
3017 if (!of_property_read_u32(node
, "xlnx,sg-length-width",
3019 if (len_width
< XILINX_DMA_MAX_TRANS_LEN_MIN
||
3020 len_width
> XILINX_DMA_V2_MAX_TRANS_LEN_MAX
) {
3022 "invalid xlnx,sg-length-width property value. Using default width\n");
3024 if (len_width
> XILINX_DMA_MAX_TRANS_LEN_MAX
)
3025 dev_warn(xdev
->dev
, "Please ensure that IP supports buffer length > 23 bits\n");
3026 xdev
->max_buffer_len
=
3027 GENMASK(len_width
- 1, 0);
3032 if (xdev
->dma_config
->dmatype
== XDMA_TYPE_VDMA
) {
3033 err
= of_property_read_u32(node
, "xlnx,num-fstores",
3037 "missing xlnx,num-fstores property\n");
3041 err
= of_property_read_u32(node
, "xlnx,flush-fsync",
3042 &xdev
->flush_on_fsync
);
3045 "missing xlnx,flush-fsync property\n");
3048 err
= of_property_read_u32(node
, "xlnx,addrwidth", &addr_width
);
3050 dev_warn(xdev
->dev
, "missing xlnx,addrwidth property\n");
3052 if (addr_width
> 32)
3053 xdev
->ext_addr
= true;
3055 xdev
->ext_addr
= false;
3057 /* Set the dma mask bits */
3058 dma_set_mask(xdev
->dev
, DMA_BIT_MASK(addr_width
));
3060 /* Initialize the DMA engine */
3061 xdev
->common
.dev
= &pdev
->dev
;
3063 INIT_LIST_HEAD(&xdev
->common
.channels
);
3064 if (!(xdev
->dma_config
->dmatype
== XDMA_TYPE_CDMA
)) {
3065 dma_cap_set(DMA_SLAVE
, xdev
->common
.cap_mask
);
3066 dma_cap_set(DMA_PRIVATE
, xdev
->common
.cap_mask
);
3069 xdev
->common
.device_alloc_chan_resources
=
3070 xilinx_dma_alloc_chan_resources
;
3071 xdev
->common
.device_free_chan_resources
=
3072 xilinx_dma_free_chan_resources
;
3073 xdev
->common
.device_terminate_all
= xilinx_dma_terminate_all
;
3074 xdev
->common
.device_tx_status
= xilinx_dma_tx_status
;
3075 xdev
->common
.device_issue_pending
= xilinx_dma_issue_pending
;
3076 if (xdev
->dma_config
->dmatype
== XDMA_TYPE_AXIDMA
) {
3077 dma_cap_set(DMA_CYCLIC
, xdev
->common
.cap_mask
);
3078 xdev
->common
.device_prep_slave_sg
= xilinx_dma_prep_slave_sg
;
3079 xdev
->common
.device_prep_dma_cyclic
=
3080 xilinx_dma_prep_dma_cyclic
;
3081 /* Residue calculation is supported by only AXI DMA and CDMA */
3082 xdev
->common
.residue_granularity
=
3083 DMA_RESIDUE_GRANULARITY_SEGMENT
;
3084 } else if (xdev
->dma_config
->dmatype
== XDMA_TYPE_CDMA
) {
3085 dma_cap_set(DMA_MEMCPY
, xdev
->common
.cap_mask
);
3086 xdev
->common
.device_prep_dma_memcpy
= xilinx_cdma_prep_memcpy
;
3087 /* Residue calculation is supported by only AXI DMA and CDMA */
3088 xdev
->common
.residue_granularity
=
3089 DMA_RESIDUE_GRANULARITY_SEGMENT
;
3090 } else if (xdev
->dma_config
->dmatype
== XDMA_TYPE_AXIMCDMA
) {
3091 xdev
->common
.device_prep_slave_sg
= xilinx_mcdma_prep_slave_sg
;
3093 xdev
->common
.device_prep_interleaved_dma
=
3094 xilinx_vdma_dma_prep_interleaved
;
3097 platform_set_drvdata(pdev
, xdev
);
3099 /* Initialize the channels */
3100 for_each_child_of_node(node
, child
) {
3101 err
= xilinx_dma_child_probe(xdev
, child
);
3106 if (xdev
->dma_config
->dmatype
== XDMA_TYPE_VDMA
) {
3107 for (i
= 0; i
< xdev
->nr_channels
; i
++)
3109 xdev
->chan
[i
]->num_frms
= num_frames
;
3112 /* Register the DMA engine with the core */
3113 dma_async_device_register(&xdev
->common
);
3115 err
= of_dma_controller_register(node
, of_dma_xilinx_xlate
,
3118 dev_err(&pdev
->dev
, "Unable to register DMA to DT\n");
3119 dma_async_device_unregister(&xdev
->common
);
3123 if (xdev
->dma_config
->dmatype
== XDMA_TYPE_AXIDMA
)
3124 dev_info(&pdev
->dev
, "Xilinx AXI DMA Engine Driver Probed!!\n");
3125 else if (xdev
->dma_config
->dmatype
== XDMA_TYPE_CDMA
)
3126 dev_info(&pdev
->dev
, "Xilinx AXI CDMA Engine Driver Probed!!\n");
3127 else if (xdev
->dma_config
->dmatype
== XDMA_TYPE_AXIMCDMA
)
3128 dev_info(&pdev
->dev
, "Xilinx AXI MCDMA Engine Driver Probed!!\n");
3130 dev_info(&pdev
->dev
, "Xilinx AXI VDMA Engine Driver Probed!!\n");
3135 xdma_disable_allclks(xdev
);
3137 for (i
= 0; i
< xdev
->nr_channels
; i
++)
3139 xilinx_dma_chan_remove(xdev
->chan
[i
]);
3145 * xilinx_dma_remove - Driver remove function
3146 * @pdev: Pointer to the platform_device structure
3148 * Return: Always '0'
3150 static int xilinx_dma_remove(struct platform_device
*pdev
)
3152 struct xilinx_dma_device
*xdev
= platform_get_drvdata(pdev
);
3155 of_dma_controller_free(pdev
->dev
.of_node
);
3157 dma_async_device_unregister(&xdev
->common
);
3159 for (i
= 0; i
< xdev
->nr_channels
; i
++)
3161 xilinx_dma_chan_remove(xdev
->chan
[i
]);
3163 xdma_disable_allclks(xdev
);
3168 static struct platform_driver xilinx_vdma_driver
= {
3170 .name
= "xilinx-vdma",
3171 .of_match_table
= xilinx_dma_of_ids
,
3173 .probe
= xilinx_dma_probe
,
3174 .remove
= xilinx_dma_remove
,
3177 module_platform_driver(xilinx_vdma_driver
);
3179 MODULE_AUTHOR("Xilinx, Inc.");
3180 MODULE_DESCRIPTION("Xilinx VDMA driver");
3181 MODULE_LICENSE("GPL v2");