1 // SPDX-License-Identifier: GPL-2.0-only
3 * Driver for the Analog Devices AXI-DMAC core
5 * Copyright 2013-2019 Analog Devices Inc.
6 * Author: Lars-Peter Clausen <lars@metafoo.de>
9 #include <linux/bitfield.h>
10 #include <linux/clk.h>
11 #include <linux/device.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/dmaengine.h>
14 #include <linux/err.h>
15 #include <linux/interrupt.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
20 #include <linux/of_dma.h>
21 #include <linux/of_address.h>
22 #include <linux/platform_device.h>
23 #include <linux/regmap.h>
24 #include <linux/slab.h>
25 #include <linux/fpga/adi-axi-common.h>
27 #include <dt-bindings/dma/axi-dmac.h>
29 #include "dmaengine.h"
33 * The AXI-DMAC is a soft IP core that is used in FPGA designs. The core has
34 * various instantiation parameters which decided the exact feature set support
37 * Each channel of the core has a source interface and a destination interface.
38 * The number of channels and the type of the channel interfaces is selected at
39 * configuration time. A interface can either be a connected to a central memory
40 * interconnect, which allows access to system memory, or it can be connected to
41 * a dedicated bus which is directly connected to a data port on a peripheral.
42 * Given that those are configuration options of the core that are selected when
43 * it is instantiated this means that they can not be changed by software at
44 * runtime. By extension this means that each channel is uni-directional. It can
45 * either be device to memory or memory to device, but not both. Also since the
46 * device side is a dedicated data bus only connected to a single peripheral
47 * there is no address than can or needs to be configured for the device side.
50 #define AXI_DMAC_REG_INTERFACE_DESC 0x10
51 #define AXI_DMAC_DMA_SRC_TYPE_MSK GENMASK(13, 12)
52 #define AXI_DMAC_DMA_SRC_TYPE_GET(x) FIELD_GET(AXI_DMAC_DMA_SRC_TYPE_MSK, x)
53 #define AXI_DMAC_DMA_SRC_WIDTH_MSK GENMASK(11, 8)
54 #define AXI_DMAC_DMA_SRC_WIDTH_GET(x) FIELD_GET(AXI_DMAC_DMA_SRC_WIDTH_MSK, x)
55 #define AXI_DMAC_DMA_DST_TYPE_MSK GENMASK(5, 4)
56 #define AXI_DMAC_DMA_DST_TYPE_GET(x) FIELD_GET(AXI_DMAC_DMA_DST_TYPE_MSK, x)
57 #define AXI_DMAC_DMA_DST_WIDTH_MSK GENMASK(3, 0)
58 #define AXI_DMAC_DMA_DST_WIDTH_GET(x) FIELD_GET(AXI_DMAC_DMA_DST_WIDTH_MSK, x)
59 #define AXI_DMAC_REG_COHERENCY_DESC 0x14
60 #define AXI_DMAC_DST_COHERENT_MSK BIT(0)
61 #define AXI_DMAC_DST_COHERENT_GET(x) FIELD_GET(AXI_DMAC_DST_COHERENT_MSK, x)
63 #define AXI_DMAC_REG_IRQ_MASK 0x80
64 #define AXI_DMAC_REG_IRQ_PENDING 0x84
65 #define AXI_DMAC_REG_IRQ_SOURCE 0x88
67 #define AXI_DMAC_REG_CTRL 0x400
68 #define AXI_DMAC_REG_TRANSFER_ID 0x404
69 #define AXI_DMAC_REG_START_TRANSFER 0x408
70 #define AXI_DMAC_REG_FLAGS 0x40c
71 #define AXI_DMAC_REG_DEST_ADDRESS 0x410
72 #define AXI_DMAC_REG_SRC_ADDRESS 0x414
73 #define AXI_DMAC_REG_X_LENGTH 0x418
74 #define AXI_DMAC_REG_Y_LENGTH 0x41c
75 #define AXI_DMAC_REG_DEST_STRIDE 0x420
76 #define AXI_DMAC_REG_SRC_STRIDE 0x424
77 #define AXI_DMAC_REG_TRANSFER_DONE 0x428
78 #define AXI_DMAC_REG_ACTIVE_TRANSFER_ID 0x42c
79 #define AXI_DMAC_REG_STATUS 0x430
80 #define AXI_DMAC_REG_CURRENT_SRC_ADDR 0x434
81 #define AXI_DMAC_REG_CURRENT_DEST_ADDR 0x438
82 #define AXI_DMAC_REG_PARTIAL_XFER_LEN 0x44c
83 #define AXI_DMAC_REG_PARTIAL_XFER_ID 0x450
84 #define AXI_DMAC_REG_CURRENT_SG_ID 0x454
85 #define AXI_DMAC_REG_SG_ADDRESS 0x47c
86 #define AXI_DMAC_REG_SG_ADDRESS_HIGH 0x4bc
88 #define AXI_DMAC_CTRL_ENABLE BIT(0)
89 #define AXI_DMAC_CTRL_PAUSE BIT(1)
90 #define AXI_DMAC_CTRL_ENABLE_SG BIT(2)
92 #define AXI_DMAC_IRQ_SOT BIT(0)
93 #define AXI_DMAC_IRQ_EOT BIT(1)
95 #define AXI_DMAC_FLAG_CYCLIC BIT(0)
96 #define AXI_DMAC_FLAG_LAST BIT(1)
97 #define AXI_DMAC_FLAG_PARTIAL_REPORT BIT(2)
99 #define AXI_DMAC_FLAG_PARTIAL_XFER_DONE BIT(31)
101 /* The maximum ID allocated by the hardware is 31 */
102 #define AXI_DMAC_SG_UNUSED 32U
104 /* Flags for axi_dmac_hw_desc.flags */
105 #define AXI_DMAC_HW_FLAG_LAST BIT(0)
106 #define AXI_DMAC_HW_FLAG_IRQ BIT(1)
108 struct axi_dmac_hw_desc
{
122 unsigned int partial_len
;
123 bool schedule_when_free
;
125 struct axi_dmac_hw_desc
*hw
;
129 struct axi_dmac_desc
{
130 struct virt_dma_desc vdesc
;
131 struct axi_dmac_chan
*chan
;
134 bool have_partial_xfer
;
136 unsigned int num_submitted
;
137 unsigned int num_completed
;
138 unsigned int num_sgs
;
139 struct axi_dmac_sg sg
[] __counted_by(num_sgs
);
142 struct axi_dmac_chan
{
143 struct virt_dma_chan vchan
;
145 struct axi_dmac_desc
*next_desc
;
146 struct list_head active_descs
;
147 enum dma_transfer_direction direction
;
149 unsigned int src_width
;
150 unsigned int dest_width
;
151 unsigned int src_type
;
152 unsigned int dest_type
;
154 unsigned int max_length
;
155 unsigned int address_align_mask
;
156 unsigned int length_align_mask
;
158 bool hw_partial_xfer
;
170 struct dma_device dma_dev
;
171 struct axi_dmac_chan chan
;
174 static struct axi_dmac
*chan_to_axi_dmac(struct axi_dmac_chan
*chan
)
176 return container_of(chan
->vchan
.chan
.device
, struct axi_dmac
,
180 static struct axi_dmac_chan
*to_axi_dmac_chan(struct dma_chan
*c
)
182 return container_of(c
, struct axi_dmac_chan
, vchan
.chan
);
185 static struct axi_dmac_desc
*to_axi_dmac_desc(struct virt_dma_desc
*vdesc
)
187 return container_of(vdesc
, struct axi_dmac_desc
, vdesc
);
190 static void axi_dmac_write(struct axi_dmac
*axi_dmac
, unsigned int reg
,
193 writel(val
, axi_dmac
->base
+ reg
);
196 static int axi_dmac_read(struct axi_dmac
*axi_dmac
, unsigned int reg
)
198 return readl(axi_dmac
->base
+ reg
);
201 static int axi_dmac_src_is_mem(struct axi_dmac_chan
*chan
)
203 return chan
->src_type
== AXI_DMAC_BUS_TYPE_AXI_MM
;
206 static int axi_dmac_dest_is_mem(struct axi_dmac_chan
*chan
)
208 return chan
->dest_type
== AXI_DMAC_BUS_TYPE_AXI_MM
;
211 static bool axi_dmac_check_len(struct axi_dmac_chan
*chan
, unsigned int len
)
215 if ((len
& chan
->length_align_mask
) != 0) /* Not aligned */
220 static bool axi_dmac_check_addr(struct axi_dmac_chan
*chan
, dma_addr_t addr
)
222 if ((addr
& chan
->address_align_mask
) != 0) /* Not aligned */
227 static void axi_dmac_start_transfer(struct axi_dmac_chan
*chan
)
229 struct axi_dmac
*dmac
= chan_to_axi_dmac(chan
);
230 struct virt_dma_desc
*vdesc
;
231 struct axi_dmac_desc
*desc
;
232 struct axi_dmac_sg
*sg
;
233 unsigned int flags
= 0;
237 val
= axi_dmac_read(dmac
, AXI_DMAC_REG_START_TRANSFER
);
238 if (val
) /* Queue is full, wait for the next SOT IRQ */
242 desc
= chan
->next_desc
;
245 vdesc
= vchan_next_desc(&chan
->vchan
);
248 list_move_tail(&vdesc
->node
, &chan
->active_descs
);
249 desc
= to_axi_dmac_desc(vdesc
);
251 sg
= &desc
->sg
[desc
->num_submitted
];
253 /* Already queued in cyclic mode. Wait for it to finish */
254 if (sg
->hw
->id
!= AXI_DMAC_SG_UNUSED
) {
255 sg
->schedule_when_free
= true;
260 chan
->next_desc
= NULL
;
261 } else if (++desc
->num_submitted
== desc
->num_sgs
||
262 desc
->have_partial_xfer
) {
264 desc
->num_submitted
= 0; /* Start again */
266 chan
->next_desc
= NULL
;
267 flags
|= AXI_DMAC_FLAG_LAST
;
269 chan
->next_desc
= desc
;
272 sg
->hw
->id
= axi_dmac_read(dmac
, AXI_DMAC_REG_TRANSFER_ID
);
275 if (axi_dmac_dest_is_mem(chan
)) {
276 axi_dmac_write(dmac
, AXI_DMAC_REG_DEST_ADDRESS
, sg
->hw
->dest_addr
);
277 axi_dmac_write(dmac
, AXI_DMAC_REG_DEST_STRIDE
, sg
->hw
->dst_stride
);
280 if (axi_dmac_src_is_mem(chan
)) {
281 axi_dmac_write(dmac
, AXI_DMAC_REG_SRC_ADDRESS
, sg
->hw
->src_addr
);
282 axi_dmac_write(dmac
, AXI_DMAC_REG_SRC_STRIDE
, sg
->hw
->src_stride
);
287 * If the hardware supports cyclic transfers and there is no callback to
288 * call, enable hw cyclic mode to avoid unnecessary interrupts.
290 if (chan
->hw_cyclic
&& desc
->cyclic
&& !desc
->vdesc
.tx
.callback
) {
292 desc
->sg
[desc
->num_sgs
- 1].hw
->flags
&= ~AXI_DMAC_HW_FLAG_IRQ
;
293 else if (desc
->num_sgs
== 1)
294 flags
|= AXI_DMAC_FLAG_CYCLIC
;
297 if (chan
->hw_partial_xfer
)
298 flags
|= AXI_DMAC_FLAG_PARTIAL_REPORT
;
301 axi_dmac_write(dmac
, AXI_DMAC_REG_SG_ADDRESS
, (u32
)sg
->hw_phys
);
302 axi_dmac_write(dmac
, AXI_DMAC_REG_SG_ADDRESS_HIGH
,
303 (u64
)sg
->hw_phys
>> 32);
305 axi_dmac_write(dmac
, AXI_DMAC_REG_X_LENGTH
, sg
->hw
->x_len
);
306 axi_dmac_write(dmac
, AXI_DMAC_REG_Y_LENGTH
, sg
->hw
->y_len
);
308 axi_dmac_write(dmac
, AXI_DMAC_REG_FLAGS
, flags
);
309 axi_dmac_write(dmac
, AXI_DMAC_REG_START_TRANSFER
, 1);
312 static struct axi_dmac_desc
*axi_dmac_active_desc(struct axi_dmac_chan
*chan
)
314 return list_first_entry_or_null(&chan
->active_descs
,
315 struct axi_dmac_desc
, vdesc
.node
);
318 static inline unsigned int axi_dmac_total_sg_bytes(struct axi_dmac_chan
*chan
,
319 struct axi_dmac_sg
*sg
)
322 return (sg
->hw
->x_len
+ 1) * (sg
->hw
->y_len
+ 1);
324 return (sg
->hw
->x_len
+ 1);
327 static void axi_dmac_dequeue_partial_xfers(struct axi_dmac_chan
*chan
)
329 struct axi_dmac
*dmac
= chan_to_axi_dmac(chan
);
330 struct axi_dmac_desc
*desc
;
331 struct axi_dmac_sg
*sg
;
332 u32 xfer_done
, len
, id
, i
;
336 len
= axi_dmac_read(dmac
, AXI_DMAC_REG_PARTIAL_XFER_LEN
);
337 id
= axi_dmac_read(dmac
, AXI_DMAC_REG_PARTIAL_XFER_ID
);
340 list_for_each_entry(desc
, &chan
->active_descs
, vdesc
.node
) {
341 for (i
= 0; i
< desc
->num_sgs
; i
++) {
343 if (sg
->hw
->id
== AXI_DMAC_SG_UNUSED
)
345 if (sg
->hw
->id
== id
) {
346 desc
->have_partial_xfer
= true;
347 sg
->partial_len
= len
;
357 dev_dbg(dmac
->dma_dev
.dev
,
358 "Found partial segment id=%u, len=%u\n",
361 dev_warn(dmac
->dma_dev
.dev
,
362 "Not found partial segment id=%u, len=%u\n",
366 /* Check if we have any more partial transfers */
367 xfer_done
= axi_dmac_read(dmac
, AXI_DMAC_REG_TRANSFER_DONE
);
368 xfer_done
= !(xfer_done
& AXI_DMAC_FLAG_PARTIAL_XFER_DONE
);
370 } while (!xfer_done
);
373 static void axi_dmac_compute_residue(struct axi_dmac_chan
*chan
,
374 struct axi_dmac_desc
*active
)
376 struct dmaengine_result
*rslt
= &active
->vdesc
.tx_result
;
377 unsigned int start
= active
->num_completed
- 1;
378 struct axi_dmac_sg
*sg
;
379 unsigned int i
, total
;
381 rslt
->result
= DMA_TRANS_NOERROR
;
388 * We get here if the last completed segment is partial, which
389 * means we can compute the residue from that segment onwards
391 for (i
= start
; i
< active
->num_sgs
; i
++) {
393 total
= axi_dmac_total_sg_bytes(chan
, sg
);
394 rslt
->residue
+= (total
- sg
->partial_len
);
398 static bool axi_dmac_transfer_done(struct axi_dmac_chan
*chan
,
399 unsigned int completed_transfers
)
401 struct axi_dmac_desc
*active
;
402 struct axi_dmac_sg
*sg
;
403 bool start_next
= false;
405 active
= axi_dmac_active_desc(chan
);
409 if (chan
->hw_partial_xfer
&&
410 (completed_transfers
& AXI_DMAC_FLAG_PARTIAL_XFER_DONE
))
411 axi_dmac_dequeue_partial_xfers(chan
);
414 if (active
->cyclic
) {
415 vchan_cyclic_callback(&active
->vdesc
);
417 list_del(&active
->vdesc
.node
);
418 vchan_cookie_complete(&active
->vdesc
);
419 active
= axi_dmac_active_desc(chan
);
420 start_next
= !!active
;
424 sg
= &active
->sg
[active
->num_completed
];
425 if (sg
->hw
->id
== AXI_DMAC_SG_UNUSED
) /* Not yet submitted */
427 if (!(BIT(sg
->hw
->id
) & completed_transfers
))
429 active
->num_completed
++;
430 sg
->hw
->id
= AXI_DMAC_SG_UNUSED
;
431 if (sg
->schedule_when_free
) {
432 sg
->schedule_when_free
= false;
437 axi_dmac_compute_residue(chan
, active
);
440 vchan_cyclic_callback(&active
->vdesc
);
442 if (active
->num_completed
== active
->num_sgs
||
444 if (active
->cyclic
) {
445 active
->num_completed
= 0; /* wrap around */
447 list_del(&active
->vdesc
.node
);
448 vchan_cookie_complete(&active
->vdesc
);
449 active
= axi_dmac_active_desc(chan
);
458 static irqreturn_t
axi_dmac_interrupt_handler(int irq
, void *devid
)
460 struct axi_dmac
*dmac
= devid
;
461 unsigned int pending
;
462 bool start_next
= false;
464 pending
= axi_dmac_read(dmac
, AXI_DMAC_REG_IRQ_PENDING
);
468 axi_dmac_write(dmac
, AXI_DMAC_REG_IRQ_PENDING
, pending
);
470 spin_lock(&dmac
->chan
.vchan
.lock
);
471 /* One or more transfers have finished */
472 if (pending
& AXI_DMAC_IRQ_EOT
) {
473 unsigned int completed
;
475 completed
= axi_dmac_read(dmac
, AXI_DMAC_REG_TRANSFER_DONE
);
476 start_next
= axi_dmac_transfer_done(&dmac
->chan
, completed
);
478 /* Space has become available in the descriptor queue */
479 if ((pending
& AXI_DMAC_IRQ_SOT
) || start_next
)
480 axi_dmac_start_transfer(&dmac
->chan
);
481 spin_unlock(&dmac
->chan
.vchan
.lock
);
486 static int axi_dmac_terminate_all(struct dma_chan
*c
)
488 struct axi_dmac_chan
*chan
= to_axi_dmac_chan(c
);
489 struct axi_dmac
*dmac
= chan_to_axi_dmac(chan
);
493 spin_lock_irqsave(&chan
->vchan
.lock
, flags
);
494 axi_dmac_write(dmac
, AXI_DMAC_REG_CTRL
, 0);
495 chan
->next_desc
= NULL
;
496 vchan_get_all_descriptors(&chan
->vchan
, &head
);
497 list_splice_tail_init(&chan
->active_descs
, &head
);
498 spin_unlock_irqrestore(&chan
->vchan
.lock
, flags
);
500 vchan_dma_desc_free_list(&chan
->vchan
, &head
);
505 static void axi_dmac_synchronize(struct dma_chan
*c
)
507 struct axi_dmac_chan
*chan
= to_axi_dmac_chan(c
);
509 vchan_synchronize(&chan
->vchan
);
512 static void axi_dmac_issue_pending(struct dma_chan
*c
)
514 struct axi_dmac_chan
*chan
= to_axi_dmac_chan(c
);
515 struct axi_dmac
*dmac
= chan_to_axi_dmac(chan
);
517 u32 ctrl
= AXI_DMAC_CTRL_ENABLE
;
520 ctrl
|= AXI_DMAC_CTRL_ENABLE_SG
;
522 axi_dmac_write(dmac
, AXI_DMAC_REG_CTRL
, ctrl
);
524 spin_lock_irqsave(&chan
->vchan
.lock
, flags
);
525 if (vchan_issue_pending(&chan
->vchan
))
526 axi_dmac_start_transfer(chan
);
527 spin_unlock_irqrestore(&chan
->vchan
.lock
, flags
);
530 static struct axi_dmac_desc
*
531 axi_dmac_alloc_desc(struct axi_dmac_chan
*chan
, unsigned int num_sgs
)
533 struct axi_dmac
*dmac
= chan_to_axi_dmac(chan
);
534 struct device
*dev
= dmac
->dma_dev
.dev
;
535 struct axi_dmac_hw_desc
*hws
;
536 struct axi_dmac_desc
*desc
;
540 desc
= kzalloc(struct_size(desc
, sg
, num_sgs
), GFP_NOWAIT
);
543 desc
->num_sgs
= num_sgs
;
546 hws
= dma_alloc_coherent(dev
, PAGE_ALIGN(num_sgs
* sizeof(*hws
)),
547 &hw_phys
, GFP_ATOMIC
);
553 for (i
= 0; i
< num_sgs
; i
++) {
554 desc
->sg
[i
].hw
= &hws
[i
];
555 desc
->sg
[i
].hw_phys
= hw_phys
+ i
* sizeof(*hws
);
557 hws
[i
].id
= AXI_DMAC_SG_UNUSED
;
560 /* Link hardware descriptors */
561 hws
[i
].next_sg_addr
= hw_phys
+ (i
+ 1) * sizeof(*hws
);
564 /* The last hardware descriptor will trigger an interrupt */
565 desc
->sg
[num_sgs
- 1].hw
->flags
= AXI_DMAC_HW_FLAG_LAST
| AXI_DMAC_HW_FLAG_IRQ
;
570 static void axi_dmac_free_desc(struct axi_dmac_desc
*desc
)
572 struct axi_dmac
*dmac
= chan_to_axi_dmac(desc
->chan
);
573 struct device
*dev
= dmac
->dma_dev
.dev
;
574 struct axi_dmac_hw_desc
*hw
= desc
->sg
[0].hw
;
575 dma_addr_t hw_phys
= desc
->sg
[0].hw_phys
;
577 dma_free_coherent(dev
, PAGE_ALIGN(desc
->num_sgs
* sizeof(*hw
)),
582 static struct axi_dmac_sg
*axi_dmac_fill_linear_sg(struct axi_dmac_chan
*chan
,
583 enum dma_transfer_direction direction
, dma_addr_t addr
,
584 unsigned int num_periods
, unsigned int period_len
,
585 struct axi_dmac_sg
*sg
)
587 unsigned int num_segments
, i
;
588 unsigned int segment_size
;
591 /* Split into multiple equally sized segments if necessary */
592 num_segments
= DIV_ROUND_UP(period_len
, chan
->max_length
);
593 segment_size
= DIV_ROUND_UP(period_len
, num_segments
);
594 /* Take care of alignment */
595 segment_size
= ((segment_size
- 1) | chan
->length_align_mask
) + 1;
597 for (i
= 0; i
< num_periods
; i
++) {
598 for (len
= period_len
; len
> segment_size
; sg
++) {
599 if (direction
== DMA_DEV_TO_MEM
)
600 sg
->hw
->dest_addr
= addr
;
602 sg
->hw
->src_addr
= addr
;
603 sg
->hw
->x_len
= segment_size
- 1;
606 addr
+= segment_size
;
610 if (direction
== DMA_DEV_TO_MEM
)
611 sg
->hw
->dest_addr
= addr
;
613 sg
->hw
->src_addr
= addr
;
614 sg
->hw
->x_len
= len
- 1;
623 static struct dma_async_tx_descriptor
*
624 axi_dmac_prep_peripheral_dma_vec(struct dma_chan
*c
, const struct dma_vec
*vecs
,
625 size_t nb
, enum dma_transfer_direction direction
,
628 struct axi_dmac_chan
*chan
= to_axi_dmac_chan(c
);
629 struct axi_dmac_desc
*desc
;
630 unsigned int num_sgs
= 0;
631 struct axi_dmac_sg
*dsg
;
634 if (direction
!= chan
->direction
)
637 for (i
= 0; i
< nb
; i
++)
638 num_sgs
+= DIV_ROUND_UP(vecs
[i
].len
, chan
->max_length
);
640 desc
= axi_dmac_alloc_desc(chan
, num_sgs
);
646 for (i
= 0; i
< nb
; i
++) {
647 if (!axi_dmac_check_addr(chan
, vecs
[i
].addr
) ||
648 !axi_dmac_check_len(chan
, vecs
[i
].len
)) {
653 dsg
= axi_dmac_fill_linear_sg(chan
, direction
, vecs
[i
].addr
, 1,
657 desc
->cyclic
= false;
659 return vchan_tx_prep(&chan
->vchan
, &desc
->vdesc
, flags
);
662 static struct dma_async_tx_descriptor
*axi_dmac_prep_slave_sg(
663 struct dma_chan
*c
, struct scatterlist
*sgl
,
664 unsigned int sg_len
, enum dma_transfer_direction direction
,
665 unsigned long flags
, void *context
)
667 struct axi_dmac_chan
*chan
= to_axi_dmac_chan(c
);
668 struct axi_dmac_desc
*desc
;
669 struct axi_dmac_sg
*dsg
;
670 struct scatterlist
*sg
;
671 unsigned int num_sgs
;
674 if (direction
!= chan
->direction
)
678 for_each_sg(sgl
, sg
, sg_len
, i
)
679 num_sgs
+= DIV_ROUND_UP(sg_dma_len(sg
), chan
->max_length
);
681 desc
= axi_dmac_alloc_desc(chan
, num_sgs
);
687 for_each_sg(sgl
, sg
, sg_len
, i
) {
688 if (!axi_dmac_check_addr(chan
, sg_dma_address(sg
)) ||
689 !axi_dmac_check_len(chan
, sg_dma_len(sg
))) {
690 axi_dmac_free_desc(desc
);
694 dsg
= axi_dmac_fill_linear_sg(chan
, direction
, sg_dma_address(sg
), 1,
695 sg_dma_len(sg
), dsg
);
698 desc
->cyclic
= false;
700 return vchan_tx_prep(&chan
->vchan
, &desc
->vdesc
, flags
);
703 static struct dma_async_tx_descriptor
*axi_dmac_prep_dma_cyclic(
704 struct dma_chan
*c
, dma_addr_t buf_addr
, size_t buf_len
,
705 size_t period_len
, enum dma_transfer_direction direction
,
708 struct axi_dmac_chan
*chan
= to_axi_dmac_chan(c
);
709 struct axi_dmac_desc
*desc
;
710 unsigned int num_periods
, num_segments
, num_sgs
;
712 if (direction
!= chan
->direction
)
715 if (!axi_dmac_check_len(chan
, buf_len
) ||
716 !axi_dmac_check_addr(chan
, buf_addr
))
719 if (period_len
== 0 || buf_len
% period_len
)
722 num_periods
= buf_len
/ period_len
;
723 num_segments
= DIV_ROUND_UP(period_len
, chan
->max_length
);
724 num_sgs
= num_periods
* num_segments
;
726 desc
= axi_dmac_alloc_desc(chan
, num_sgs
);
730 /* Chain the last descriptor to the first, and remove its "last" flag */
731 desc
->sg
[num_sgs
- 1].hw
->next_sg_addr
= desc
->sg
[0].hw_phys
;
732 desc
->sg
[num_sgs
- 1].hw
->flags
&= ~AXI_DMAC_HW_FLAG_LAST
;
734 axi_dmac_fill_linear_sg(chan
, direction
, buf_addr
, num_periods
,
735 period_len
, desc
->sg
);
739 return vchan_tx_prep(&chan
->vchan
, &desc
->vdesc
, flags
);
742 static struct dma_async_tx_descriptor
*axi_dmac_prep_interleaved(
743 struct dma_chan
*c
, struct dma_interleaved_template
*xt
,
746 struct axi_dmac_chan
*chan
= to_axi_dmac_chan(c
);
747 struct axi_dmac_desc
*desc
;
748 size_t dst_icg
, src_icg
;
750 if (xt
->frame_size
!= 1)
753 if (xt
->dir
!= chan
->direction
)
756 if (axi_dmac_src_is_mem(chan
)) {
757 if (!xt
->src_inc
|| !axi_dmac_check_addr(chan
, xt
->src_start
))
761 if (axi_dmac_dest_is_mem(chan
)) {
762 if (!xt
->dst_inc
|| !axi_dmac_check_addr(chan
, xt
->dst_start
))
766 dst_icg
= dmaengine_get_dst_icg(xt
, &xt
->sgl
[0]);
767 src_icg
= dmaengine_get_src_icg(xt
, &xt
->sgl
[0]);
770 if (!axi_dmac_check_len(chan
, xt
->sgl
[0].size
) ||
773 if (xt
->sgl
[0].size
+ dst_icg
> chan
->max_length
||
774 xt
->sgl
[0].size
+ src_icg
> chan
->max_length
)
777 if (dst_icg
!= 0 || src_icg
!= 0)
779 if (chan
->max_length
/ xt
->sgl
[0].size
< xt
->numf
)
781 if (!axi_dmac_check_len(chan
, xt
->sgl
[0].size
* xt
->numf
))
785 desc
= axi_dmac_alloc_desc(chan
, 1);
789 if (axi_dmac_src_is_mem(chan
)) {
790 desc
->sg
[0].hw
->src_addr
= xt
->src_start
;
791 desc
->sg
[0].hw
->src_stride
= xt
->sgl
[0].size
+ src_icg
;
794 if (axi_dmac_dest_is_mem(chan
)) {
795 desc
->sg
[0].hw
->dest_addr
= xt
->dst_start
;
796 desc
->sg
[0].hw
->dst_stride
= xt
->sgl
[0].size
+ dst_icg
;
800 desc
->sg
[0].hw
->x_len
= xt
->sgl
[0].size
- 1;
801 desc
->sg
[0].hw
->y_len
= xt
->numf
- 1;
803 desc
->sg
[0].hw
->x_len
= xt
->sgl
[0].size
* xt
->numf
- 1;
804 desc
->sg
[0].hw
->y_len
= 0;
807 if (flags
& DMA_CYCLIC
)
810 return vchan_tx_prep(&chan
->vchan
, &desc
->vdesc
, flags
);
813 static void axi_dmac_free_chan_resources(struct dma_chan
*c
)
815 vchan_free_chan_resources(to_virt_chan(c
));
818 static void axi_dmac_desc_free(struct virt_dma_desc
*vdesc
)
820 axi_dmac_free_desc(to_axi_dmac_desc(vdesc
));
823 static bool axi_dmac_regmap_rdwr(struct device
*dev
, unsigned int reg
)
826 case AXI_DMAC_REG_IRQ_MASK
:
827 case AXI_DMAC_REG_IRQ_SOURCE
:
828 case AXI_DMAC_REG_IRQ_PENDING
:
829 case AXI_DMAC_REG_CTRL
:
830 case AXI_DMAC_REG_TRANSFER_ID
:
831 case AXI_DMAC_REG_START_TRANSFER
:
832 case AXI_DMAC_REG_FLAGS
:
833 case AXI_DMAC_REG_DEST_ADDRESS
:
834 case AXI_DMAC_REG_SRC_ADDRESS
:
835 case AXI_DMAC_REG_X_LENGTH
:
836 case AXI_DMAC_REG_Y_LENGTH
:
837 case AXI_DMAC_REG_DEST_STRIDE
:
838 case AXI_DMAC_REG_SRC_STRIDE
:
839 case AXI_DMAC_REG_TRANSFER_DONE
:
840 case AXI_DMAC_REG_ACTIVE_TRANSFER_ID
:
841 case AXI_DMAC_REG_STATUS
:
842 case AXI_DMAC_REG_CURRENT_SRC_ADDR
:
843 case AXI_DMAC_REG_CURRENT_DEST_ADDR
:
844 case AXI_DMAC_REG_PARTIAL_XFER_LEN
:
845 case AXI_DMAC_REG_PARTIAL_XFER_ID
:
846 case AXI_DMAC_REG_CURRENT_SG_ID
:
847 case AXI_DMAC_REG_SG_ADDRESS
:
848 case AXI_DMAC_REG_SG_ADDRESS_HIGH
:
855 static const struct regmap_config axi_dmac_regmap_config
= {
859 .max_register
= AXI_DMAC_REG_PARTIAL_XFER_ID
,
860 .readable_reg
= axi_dmac_regmap_rdwr
,
861 .writeable_reg
= axi_dmac_regmap_rdwr
,
864 static void axi_dmac_adjust_chan_params(struct axi_dmac_chan
*chan
)
866 chan
->address_align_mask
= max(chan
->dest_width
, chan
->src_width
) - 1;
868 if (axi_dmac_dest_is_mem(chan
) && axi_dmac_src_is_mem(chan
))
869 chan
->direction
= DMA_MEM_TO_MEM
;
870 else if (!axi_dmac_dest_is_mem(chan
) && axi_dmac_src_is_mem(chan
))
871 chan
->direction
= DMA_MEM_TO_DEV
;
872 else if (axi_dmac_dest_is_mem(chan
) && !axi_dmac_src_is_mem(chan
))
873 chan
->direction
= DMA_DEV_TO_MEM
;
875 chan
->direction
= DMA_DEV_TO_DEV
;
879 * The configuration stored in the devicetree matches the configuration
880 * parameters of the peripheral instance and allows the driver to know which
881 * features are implemented and how it should behave.
883 static int axi_dmac_parse_chan_dt(struct device_node
*of_chan
,
884 struct axi_dmac_chan
*chan
)
889 ret
= of_property_read_u32(of_chan
, "reg", &val
);
893 /* We only support 1 channel for now */
897 ret
= of_property_read_u32(of_chan
, "adi,source-bus-type", &val
);
900 if (val
> AXI_DMAC_BUS_TYPE_FIFO
)
902 chan
->src_type
= val
;
904 ret
= of_property_read_u32(of_chan
, "adi,destination-bus-type", &val
);
907 if (val
> AXI_DMAC_BUS_TYPE_FIFO
)
909 chan
->dest_type
= val
;
911 ret
= of_property_read_u32(of_chan
, "adi,source-bus-width", &val
);
914 chan
->src_width
= val
/ 8;
916 ret
= of_property_read_u32(of_chan
, "adi,destination-bus-width", &val
);
919 chan
->dest_width
= val
/ 8;
921 axi_dmac_adjust_chan_params(chan
);
926 static int axi_dmac_parse_dt(struct device
*dev
, struct axi_dmac
*dmac
)
928 struct device_node
*of_channels
, *of_chan
;
931 of_channels
= of_get_child_by_name(dev
->of_node
, "adi,channels");
932 if (of_channels
== NULL
)
935 for_each_child_of_node(of_channels
, of_chan
) {
936 ret
= axi_dmac_parse_chan_dt(of_chan
, &dmac
->chan
);
938 of_node_put(of_chan
);
939 of_node_put(of_channels
);
943 of_node_put(of_channels
);
948 static int axi_dmac_read_chan_config(struct device
*dev
, struct axi_dmac
*dmac
)
950 struct axi_dmac_chan
*chan
= &dmac
->chan
;
951 unsigned int val
, desc
;
953 desc
= axi_dmac_read(dmac
, AXI_DMAC_REG_INTERFACE_DESC
);
955 dev_err(dev
, "DMA interface register reads zero\n");
959 val
= AXI_DMAC_DMA_SRC_TYPE_GET(desc
);
960 if (val
> AXI_DMAC_BUS_TYPE_FIFO
) {
961 dev_err(dev
, "Invalid source bus type read: %d\n", val
);
964 chan
->src_type
= val
;
966 val
= AXI_DMAC_DMA_DST_TYPE_GET(desc
);
967 if (val
> AXI_DMAC_BUS_TYPE_FIFO
) {
968 dev_err(dev
, "Invalid destination bus type read: %d\n", val
);
971 chan
->dest_type
= val
;
973 val
= AXI_DMAC_DMA_SRC_WIDTH_GET(desc
);
975 dev_err(dev
, "Source bus width is zero\n");
978 /* widths are stored in log2 */
979 chan
->src_width
= 1 << val
;
981 val
= AXI_DMAC_DMA_DST_WIDTH_GET(desc
);
983 dev_err(dev
, "Destination bus width is zero\n");
986 chan
->dest_width
= 1 << val
;
988 axi_dmac_adjust_chan_params(chan
);
993 static int axi_dmac_detect_caps(struct axi_dmac
*dmac
, unsigned int version
)
995 struct axi_dmac_chan
*chan
= &dmac
->chan
;
997 axi_dmac_write(dmac
, AXI_DMAC_REG_FLAGS
, AXI_DMAC_FLAG_CYCLIC
);
998 if (axi_dmac_read(dmac
, AXI_DMAC_REG_FLAGS
) == AXI_DMAC_FLAG_CYCLIC
)
999 chan
->hw_cyclic
= true;
1001 axi_dmac_write(dmac
, AXI_DMAC_REG_SG_ADDRESS
, 0xffffffff);
1002 if (axi_dmac_read(dmac
, AXI_DMAC_REG_SG_ADDRESS
))
1005 axi_dmac_write(dmac
, AXI_DMAC_REG_Y_LENGTH
, 1);
1006 if (axi_dmac_read(dmac
, AXI_DMAC_REG_Y_LENGTH
) == 1)
1009 axi_dmac_write(dmac
, AXI_DMAC_REG_X_LENGTH
, 0xffffffff);
1010 chan
->max_length
= axi_dmac_read(dmac
, AXI_DMAC_REG_X_LENGTH
);
1011 if (chan
->max_length
!= UINT_MAX
)
1014 axi_dmac_write(dmac
, AXI_DMAC_REG_DEST_ADDRESS
, 0xffffffff);
1015 if (axi_dmac_read(dmac
, AXI_DMAC_REG_DEST_ADDRESS
) == 0 &&
1016 chan
->dest_type
== AXI_DMAC_BUS_TYPE_AXI_MM
) {
1017 dev_err(dmac
->dma_dev
.dev
,
1018 "Destination memory-mapped interface not supported.");
1022 axi_dmac_write(dmac
, AXI_DMAC_REG_SRC_ADDRESS
, 0xffffffff);
1023 if (axi_dmac_read(dmac
, AXI_DMAC_REG_SRC_ADDRESS
) == 0 &&
1024 chan
->src_type
== AXI_DMAC_BUS_TYPE_AXI_MM
) {
1025 dev_err(dmac
->dma_dev
.dev
,
1026 "Source memory-mapped interface not supported.");
1030 if (version
>= ADI_AXI_PCORE_VER(4, 2, 'a'))
1031 chan
->hw_partial_xfer
= true;
1033 if (version
>= ADI_AXI_PCORE_VER(4, 1, 'a')) {
1034 axi_dmac_write(dmac
, AXI_DMAC_REG_X_LENGTH
, 0x00);
1035 chan
->length_align_mask
=
1036 axi_dmac_read(dmac
, AXI_DMAC_REG_X_LENGTH
);
1038 chan
->length_align_mask
= chan
->address_align_mask
;
1044 static void axi_dmac_tasklet_kill(void *task
)
1049 static void axi_dmac_free_dma_controller(void *of_node
)
1051 of_dma_controller_free(of_node
);
1054 static int axi_dmac_probe(struct platform_device
*pdev
)
1056 struct dma_device
*dma_dev
;
1057 struct axi_dmac
*dmac
;
1058 struct regmap
*regmap
;
1059 unsigned int version
;
1063 dmac
= devm_kzalloc(&pdev
->dev
, sizeof(*dmac
), GFP_KERNEL
);
1067 dmac
->irq
= platform_get_irq(pdev
, 0);
1073 dmac
->base
= devm_platform_ioremap_resource(pdev
, 0);
1074 if (IS_ERR(dmac
->base
))
1075 return PTR_ERR(dmac
->base
);
1077 dmac
->clk
= devm_clk_get_enabled(&pdev
->dev
, NULL
);
1078 if (IS_ERR(dmac
->clk
))
1079 return PTR_ERR(dmac
->clk
);
1081 version
= axi_dmac_read(dmac
, ADI_AXI_REG_VERSION
);
1083 if (version
>= ADI_AXI_PCORE_VER(4, 3, 'a'))
1084 ret
= axi_dmac_read_chan_config(&pdev
->dev
, dmac
);
1086 ret
= axi_dmac_parse_dt(&pdev
->dev
, dmac
);
1091 INIT_LIST_HEAD(&dmac
->chan
.active_descs
);
1093 dma_set_max_seg_size(&pdev
->dev
, UINT_MAX
);
1095 dma_dev
= &dmac
->dma_dev
;
1096 dma_cap_set(DMA_SLAVE
, dma_dev
->cap_mask
);
1097 dma_cap_set(DMA_CYCLIC
, dma_dev
->cap_mask
);
1098 dma_cap_set(DMA_INTERLEAVE
, dma_dev
->cap_mask
);
1099 dma_dev
->device_free_chan_resources
= axi_dmac_free_chan_resources
;
1100 dma_dev
->device_tx_status
= dma_cookie_status
;
1101 dma_dev
->device_issue_pending
= axi_dmac_issue_pending
;
1102 dma_dev
->device_prep_slave_sg
= axi_dmac_prep_slave_sg
;
1103 dma_dev
->device_prep_peripheral_dma_vec
= axi_dmac_prep_peripheral_dma_vec
;
1104 dma_dev
->device_prep_dma_cyclic
= axi_dmac_prep_dma_cyclic
;
1105 dma_dev
->device_prep_interleaved_dma
= axi_dmac_prep_interleaved
;
1106 dma_dev
->device_terminate_all
= axi_dmac_terminate_all
;
1107 dma_dev
->device_synchronize
= axi_dmac_synchronize
;
1108 dma_dev
->dev
= &pdev
->dev
;
1109 dma_dev
->src_addr_widths
= BIT(dmac
->chan
.src_width
);
1110 dma_dev
->dst_addr_widths
= BIT(dmac
->chan
.dest_width
);
1111 dma_dev
->directions
= BIT(dmac
->chan
.direction
);
1112 dma_dev
->residue_granularity
= DMA_RESIDUE_GRANULARITY_DESCRIPTOR
;
1113 dma_dev
->max_sg_burst
= 31; /* 31 SGs maximum in one burst */
1114 INIT_LIST_HEAD(&dma_dev
->channels
);
1116 dmac
->chan
.vchan
.desc_free
= axi_dmac_desc_free
;
1117 vchan_init(&dmac
->chan
.vchan
, dma_dev
);
1119 ret
= axi_dmac_detect_caps(dmac
, version
);
1123 dma_dev
->copy_align
= (dmac
->chan
.address_align_mask
+ 1);
1125 if (dmac
->chan
.hw_sg
)
1126 irq_mask
|= AXI_DMAC_IRQ_SOT
;
1128 axi_dmac_write(dmac
, AXI_DMAC_REG_IRQ_MASK
, irq_mask
);
1130 if (of_dma_is_coherent(pdev
->dev
.of_node
)) {
1131 ret
= axi_dmac_read(dmac
, AXI_DMAC_REG_COHERENCY_DESC
);
1133 if (version
< ADI_AXI_PCORE_VER(4, 4, 'a') ||
1134 !AXI_DMAC_DST_COHERENT_GET(ret
)) {
1135 dev_err(dmac
->dma_dev
.dev
,
1136 "Coherent DMA not supported in hardware");
1141 ret
= dmaenginem_async_device_register(dma_dev
);
1146 * Put the action in here so it get's done before unregistering the DMA
1149 ret
= devm_add_action_or_reset(&pdev
->dev
, axi_dmac_tasklet_kill
,
1150 &dmac
->chan
.vchan
.task
);
1154 ret
= of_dma_controller_register(pdev
->dev
.of_node
,
1155 of_dma_xlate_by_chan_id
, dma_dev
);
1159 ret
= devm_add_action_or_reset(&pdev
->dev
, axi_dmac_free_dma_controller
,
1164 ret
= devm_request_irq(&pdev
->dev
, dmac
->irq
, axi_dmac_interrupt_handler
,
1165 IRQF_SHARED
, dev_name(&pdev
->dev
), dmac
);
1169 regmap
= devm_regmap_init_mmio(&pdev
->dev
, dmac
->base
,
1170 &axi_dmac_regmap_config
);
1172 return PTR_ERR_OR_ZERO(regmap
);
1175 static const struct of_device_id axi_dmac_of_match_table
[] = {
1176 { .compatible
= "adi,axi-dmac-1.00.a" },
1179 MODULE_DEVICE_TABLE(of
, axi_dmac_of_match_table
);
1181 static struct platform_driver axi_dmac_driver
= {
1183 .name
= "dma-axi-dmac",
1184 .of_match_table
= axi_dmac_of_match_table
,
1186 .probe
= axi_dmac_probe
,
1188 module_platform_driver(axi_dmac_driver
);
1190 MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
1191 MODULE_DESCRIPTION("DMA controller driver for the AXI-DMAC controller");
1192 MODULE_LICENSE("GPL v2");