1 // SPDX-License-Identifier: GPL-2.0-only
3 * Driver for the Analog Devices AXI-DMAC core
5 * Copyright 2013-2019 Analog Devices Inc.
6 * Author: Lars-Peter Clausen <lars@metafoo.de>
10 #include <linux/device.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/dmaengine.h>
13 #include <linux/err.h>
14 #include <linux/interrupt.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
19 #include <linux/of_dma.h>
20 #include <linux/platform_device.h>
21 #include <linux/regmap.h>
22 #include <linux/slab.h>
23 #include <linux/fpga/adi-axi-common.h>
25 #include <dt-bindings/dma/axi-dmac.h>
27 #include "dmaengine.h"
31 * The AXI-DMAC is a soft IP core that is used in FPGA designs. The core has
32 * various instantiation parameters which decided the exact feature set support
35 * Each channel of the core has a source interface and a destination interface.
36 * The number of channels and the type of the channel interfaces is selected at
37 * configuration time. A interface can either be a connected to a central memory
38 * interconnect, which allows access to system memory, or it can be connected to
39 * a dedicated bus which is directly connected to a data port on a peripheral.
40 * Given that those are configuration options of the core that are selected when
41 * it is instantiated this means that they can not be changed by software at
42 * runtime. By extension this means that each channel is uni-directional. It can
43 * either be device to memory or memory to device, but not both. Also since the
44 * device side is a dedicated data bus only connected to a single peripheral
45 * there is no address than can or needs to be configured for the device side.
48 #define AXI_DMAC_REG_IRQ_MASK 0x80
49 #define AXI_DMAC_REG_IRQ_PENDING 0x84
50 #define AXI_DMAC_REG_IRQ_SOURCE 0x88
52 #define AXI_DMAC_REG_CTRL 0x400
53 #define AXI_DMAC_REG_TRANSFER_ID 0x404
54 #define AXI_DMAC_REG_START_TRANSFER 0x408
55 #define AXI_DMAC_REG_FLAGS 0x40c
56 #define AXI_DMAC_REG_DEST_ADDRESS 0x410
57 #define AXI_DMAC_REG_SRC_ADDRESS 0x414
58 #define AXI_DMAC_REG_X_LENGTH 0x418
59 #define AXI_DMAC_REG_Y_LENGTH 0x41c
60 #define AXI_DMAC_REG_DEST_STRIDE 0x420
61 #define AXI_DMAC_REG_SRC_STRIDE 0x424
62 #define AXI_DMAC_REG_TRANSFER_DONE 0x428
63 #define AXI_DMAC_REG_ACTIVE_TRANSFER_ID 0x42c
64 #define AXI_DMAC_REG_STATUS 0x430
65 #define AXI_DMAC_REG_CURRENT_SRC_ADDR 0x434
66 #define AXI_DMAC_REG_CURRENT_DEST_ADDR 0x438
67 #define AXI_DMAC_REG_PARTIAL_XFER_LEN 0x44c
68 #define AXI_DMAC_REG_PARTIAL_XFER_ID 0x450
70 #define AXI_DMAC_CTRL_ENABLE BIT(0)
71 #define AXI_DMAC_CTRL_PAUSE BIT(1)
73 #define AXI_DMAC_IRQ_SOT BIT(0)
74 #define AXI_DMAC_IRQ_EOT BIT(1)
76 #define AXI_DMAC_FLAG_CYCLIC BIT(0)
77 #define AXI_DMAC_FLAG_LAST BIT(1)
78 #define AXI_DMAC_FLAG_PARTIAL_REPORT BIT(2)
80 #define AXI_DMAC_FLAG_PARTIAL_XFER_DONE BIT(31)
82 /* The maximum ID allocated by the hardware is 31 */
83 #define AXI_DMAC_SG_UNUSED 32U
90 unsigned int dest_stride
;
91 unsigned int src_stride
;
93 unsigned int partial_len
;
94 bool schedule_when_free
;
97 struct axi_dmac_desc
{
98 struct virt_dma_desc vdesc
;
100 bool have_partial_xfer
;
102 unsigned int num_submitted
;
103 unsigned int num_completed
;
104 unsigned int num_sgs
;
105 struct axi_dmac_sg sg
[];
108 struct axi_dmac_chan
{
109 struct virt_dma_chan vchan
;
111 struct axi_dmac_desc
*next_desc
;
112 struct list_head active_descs
;
113 enum dma_transfer_direction direction
;
115 unsigned int src_width
;
116 unsigned int dest_width
;
117 unsigned int src_type
;
118 unsigned int dest_type
;
120 unsigned int max_length
;
121 unsigned int address_align_mask
;
122 unsigned int length_align_mask
;
124 bool hw_partial_xfer
;
135 struct dma_device dma_dev
;
136 struct axi_dmac_chan chan
;
138 struct device_dma_parameters dma_parms
;
141 static struct axi_dmac
*chan_to_axi_dmac(struct axi_dmac_chan
*chan
)
143 return container_of(chan
->vchan
.chan
.device
, struct axi_dmac
,
147 static struct axi_dmac_chan
*to_axi_dmac_chan(struct dma_chan
*c
)
149 return container_of(c
, struct axi_dmac_chan
, vchan
.chan
);
152 static struct axi_dmac_desc
*to_axi_dmac_desc(struct virt_dma_desc
*vdesc
)
154 return container_of(vdesc
, struct axi_dmac_desc
, vdesc
);
157 static void axi_dmac_write(struct axi_dmac
*axi_dmac
, unsigned int reg
,
160 writel(val
, axi_dmac
->base
+ reg
);
163 static int axi_dmac_read(struct axi_dmac
*axi_dmac
, unsigned int reg
)
165 return readl(axi_dmac
->base
+ reg
);
168 static int axi_dmac_src_is_mem(struct axi_dmac_chan
*chan
)
170 return chan
->src_type
== AXI_DMAC_BUS_TYPE_AXI_MM
;
173 static int axi_dmac_dest_is_mem(struct axi_dmac_chan
*chan
)
175 return chan
->dest_type
== AXI_DMAC_BUS_TYPE_AXI_MM
;
178 static bool axi_dmac_check_len(struct axi_dmac_chan
*chan
, unsigned int len
)
182 if ((len
& chan
->length_align_mask
) != 0) /* Not aligned */
187 static bool axi_dmac_check_addr(struct axi_dmac_chan
*chan
, dma_addr_t addr
)
189 if ((addr
& chan
->address_align_mask
) != 0) /* Not aligned */
194 static void axi_dmac_start_transfer(struct axi_dmac_chan
*chan
)
196 struct axi_dmac
*dmac
= chan_to_axi_dmac(chan
);
197 struct virt_dma_desc
*vdesc
;
198 struct axi_dmac_desc
*desc
;
199 struct axi_dmac_sg
*sg
;
200 unsigned int flags
= 0;
203 val
= axi_dmac_read(dmac
, AXI_DMAC_REG_START_TRANSFER
);
204 if (val
) /* Queue is full, wait for the next SOT IRQ */
207 desc
= chan
->next_desc
;
210 vdesc
= vchan_next_desc(&chan
->vchan
);
213 list_move_tail(&vdesc
->node
, &chan
->active_descs
);
214 desc
= to_axi_dmac_desc(vdesc
);
216 sg
= &desc
->sg
[desc
->num_submitted
];
218 /* Already queued in cyclic mode. Wait for it to finish */
219 if (sg
->id
!= AXI_DMAC_SG_UNUSED
) {
220 sg
->schedule_when_free
= true;
224 desc
->num_submitted
++;
225 if (desc
->num_submitted
== desc
->num_sgs
||
226 desc
->have_partial_xfer
) {
228 desc
->num_submitted
= 0; /* Start again */
230 chan
->next_desc
= NULL
;
231 flags
|= AXI_DMAC_FLAG_LAST
;
233 chan
->next_desc
= desc
;
236 sg
->id
= axi_dmac_read(dmac
, AXI_DMAC_REG_TRANSFER_ID
);
238 if (axi_dmac_dest_is_mem(chan
)) {
239 axi_dmac_write(dmac
, AXI_DMAC_REG_DEST_ADDRESS
, sg
->dest_addr
);
240 axi_dmac_write(dmac
, AXI_DMAC_REG_DEST_STRIDE
, sg
->dest_stride
);
243 if (axi_dmac_src_is_mem(chan
)) {
244 axi_dmac_write(dmac
, AXI_DMAC_REG_SRC_ADDRESS
, sg
->src_addr
);
245 axi_dmac_write(dmac
, AXI_DMAC_REG_SRC_STRIDE
, sg
->src_stride
);
249 * If the hardware supports cyclic transfers and there is no callback to
250 * call and only a single segment, enable hw cyclic mode to avoid
251 * unnecessary interrupts.
253 if (chan
->hw_cyclic
&& desc
->cyclic
&& !desc
->vdesc
.tx
.callback
&&
255 flags
|= AXI_DMAC_FLAG_CYCLIC
;
257 if (chan
->hw_partial_xfer
)
258 flags
|= AXI_DMAC_FLAG_PARTIAL_REPORT
;
260 axi_dmac_write(dmac
, AXI_DMAC_REG_X_LENGTH
, sg
->x_len
- 1);
261 axi_dmac_write(dmac
, AXI_DMAC_REG_Y_LENGTH
, sg
->y_len
- 1);
262 axi_dmac_write(dmac
, AXI_DMAC_REG_FLAGS
, flags
);
263 axi_dmac_write(dmac
, AXI_DMAC_REG_START_TRANSFER
, 1);
266 static struct axi_dmac_desc
*axi_dmac_active_desc(struct axi_dmac_chan
*chan
)
268 return list_first_entry_or_null(&chan
->active_descs
,
269 struct axi_dmac_desc
, vdesc
.node
);
272 static inline unsigned int axi_dmac_total_sg_bytes(struct axi_dmac_chan
*chan
,
273 struct axi_dmac_sg
*sg
)
276 return sg
->x_len
* sg
->y_len
;
281 static void axi_dmac_dequeue_partial_xfers(struct axi_dmac_chan
*chan
)
283 struct axi_dmac
*dmac
= chan_to_axi_dmac(chan
);
284 struct axi_dmac_desc
*desc
;
285 struct axi_dmac_sg
*sg
;
286 u32 xfer_done
, len
, id
, i
;
290 len
= axi_dmac_read(dmac
, AXI_DMAC_REG_PARTIAL_XFER_LEN
);
291 id
= axi_dmac_read(dmac
, AXI_DMAC_REG_PARTIAL_XFER_ID
);
294 list_for_each_entry(desc
, &chan
->active_descs
, vdesc
.node
) {
295 for (i
= 0; i
< desc
->num_sgs
; i
++) {
297 if (sg
->id
== AXI_DMAC_SG_UNUSED
)
300 desc
->have_partial_xfer
= true;
301 sg
->partial_len
= len
;
311 dev_dbg(dmac
->dma_dev
.dev
,
312 "Found partial segment id=%u, len=%u\n",
315 dev_warn(dmac
->dma_dev
.dev
,
316 "Not found partial segment id=%u, len=%u\n",
320 /* Check if we have any more partial transfers */
321 xfer_done
= axi_dmac_read(dmac
, AXI_DMAC_REG_TRANSFER_DONE
);
322 xfer_done
= !(xfer_done
& AXI_DMAC_FLAG_PARTIAL_XFER_DONE
);
324 } while (!xfer_done
);
327 static void axi_dmac_compute_residue(struct axi_dmac_chan
*chan
,
328 struct axi_dmac_desc
*active
)
330 struct dmaengine_result
*rslt
= &active
->vdesc
.tx_result
;
331 unsigned int start
= active
->num_completed
- 1;
332 struct axi_dmac_sg
*sg
;
333 unsigned int i
, total
;
335 rslt
->result
= DMA_TRANS_NOERROR
;
339 * We get here if the last completed segment is partial, which
340 * means we can compute the residue from that segment onwards
342 for (i
= start
; i
< active
->num_sgs
; i
++) {
344 total
= axi_dmac_total_sg_bytes(chan
, sg
);
345 rslt
->residue
+= (total
- sg
->partial_len
);
349 static bool axi_dmac_transfer_done(struct axi_dmac_chan
*chan
,
350 unsigned int completed_transfers
)
352 struct axi_dmac_desc
*active
;
353 struct axi_dmac_sg
*sg
;
354 bool start_next
= false;
356 active
= axi_dmac_active_desc(chan
);
360 if (chan
->hw_partial_xfer
&&
361 (completed_transfers
& AXI_DMAC_FLAG_PARTIAL_XFER_DONE
))
362 axi_dmac_dequeue_partial_xfers(chan
);
365 sg
= &active
->sg
[active
->num_completed
];
366 if (sg
->id
== AXI_DMAC_SG_UNUSED
) /* Not yet submitted */
368 if (!(BIT(sg
->id
) & completed_transfers
))
370 active
->num_completed
++;
371 sg
->id
= AXI_DMAC_SG_UNUSED
;
372 if (sg
->schedule_when_free
) {
373 sg
->schedule_when_free
= false;
378 axi_dmac_compute_residue(chan
, active
);
381 vchan_cyclic_callback(&active
->vdesc
);
383 if (active
->num_completed
== active
->num_sgs
||
385 if (active
->cyclic
) {
386 active
->num_completed
= 0; /* wrap around */
388 list_del(&active
->vdesc
.node
);
389 vchan_cookie_complete(&active
->vdesc
);
390 active
= axi_dmac_active_desc(chan
);
398 static irqreturn_t
axi_dmac_interrupt_handler(int irq
, void *devid
)
400 struct axi_dmac
*dmac
= devid
;
401 unsigned int pending
;
402 bool start_next
= false;
404 pending
= axi_dmac_read(dmac
, AXI_DMAC_REG_IRQ_PENDING
);
408 axi_dmac_write(dmac
, AXI_DMAC_REG_IRQ_PENDING
, pending
);
410 spin_lock(&dmac
->chan
.vchan
.lock
);
411 /* One or more transfers have finished */
412 if (pending
& AXI_DMAC_IRQ_EOT
) {
413 unsigned int completed
;
415 completed
= axi_dmac_read(dmac
, AXI_DMAC_REG_TRANSFER_DONE
);
416 start_next
= axi_dmac_transfer_done(&dmac
->chan
, completed
);
418 /* Space has become available in the descriptor queue */
419 if ((pending
& AXI_DMAC_IRQ_SOT
) || start_next
)
420 axi_dmac_start_transfer(&dmac
->chan
);
421 spin_unlock(&dmac
->chan
.vchan
.lock
);
426 static int axi_dmac_terminate_all(struct dma_chan
*c
)
428 struct axi_dmac_chan
*chan
= to_axi_dmac_chan(c
);
429 struct axi_dmac
*dmac
= chan_to_axi_dmac(chan
);
433 spin_lock_irqsave(&chan
->vchan
.lock
, flags
);
434 axi_dmac_write(dmac
, AXI_DMAC_REG_CTRL
, 0);
435 chan
->next_desc
= NULL
;
436 vchan_get_all_descriptors(&chan
->vchan
, &head
);
437 list_splice_tail_init(&chan
->active_descs
, &head
);
438 spin_unlock_irqrestore(&chan
->vchan
.lock
, flags
);
440 vchan_dma_desc_free_list(&chan
->vchan
, &head
);
445 static void axi_dmac_synchronize(struct dma_chan
*c
)
447 struct axi_dmac_chan
*chan
= to_axi_dmac_chan(c
);
449 vchan_synchronize(&chan
->vchan
);
452 static void axi_dmac_issue_pending(struct dma_chan
*c
)
454 struct axi_dmac_chan
*chan
= to_axi_dmac_chan(c
);
455 struct axi_dmac
*dmac
= chan_to_axi_dmac(chan
);
458 axi_dmac_write(dmac
, AXI_DMAC_REG_CTRL
, AXI_DMAC_CTRL_ENABLE
);
460 spin_lock_irqsave(&chan
->vchan
.lock
, flags
);
461 if (vchan_issue_pending(&chan
->vchan
))
462 axi_dmac_start_transfer(chan
);
463 spin_unlock_irqrestore(&chan
->vchan
.lock
, flags
);
466 static struct axi_dmac_desc
*axi_dmac_alloc_desc(unsigned int num_sgs
)
468 struct axi_dmac_desc
*desc
;
471 desc
= kzalloc(struct_size(desc
, sg
, num_sgs
), GFP_NOWAIT
);
475 for (i
= 0; i
< num_sgs
; i
++)
476 desc
->sg
[i
].id
= AXI_DMAC_SG_UNUSED
;
478 desc
->num_sgs
= num_sgs
;
483 static struct axi_dmac_sg
*axi_dmac_fill_linear_sg(struct axi_dmac_chan
*chan
,
484 enum dma_transfer_direction direction
, dma_addr_t addr
,
485 unsigned int num_periods
, unsigned int period_len
,
486 struct axi_dmac_sg
*sg
)
488 unsigned int num_segments
, i
;
489 unsigned int segment_size
;
492 /* Split into multiple equally sized segments if necessary */
493 num_segments
= DIV_ROUND_UP(period_len
, chan
->max_length
);
494 segment_size
= DIV_ROUND_UP(period_len
, num_segments
);
495 /* Take care of alignment */
496 segment_size
= ((segment_size
- 1) | chan
->length_align_mask
) + 1;
498 for (i
= 0; i
< num_periods
; i
++) {
501 while (len
> segment_size
) {
502 if (direction
== DMA_DEV_TO_MEM
)
503 sg
->dest_addr
= addr
;
506 sg
->x_len
= segment_size
;
509 addr
+= segment_size
;
513 if (direction
== DMA_DEV_TO_MEM
)
514 sg
->dest_addr
= addr
;
526 static struct dma_async_tx_descriptor
*axi_dmac_prep_slave_sg(
527 struct dma_chan
*c
, struct scatterlist
*sgl
,
528 unsigned int sg_len
, enum dma_transfer_direction direction
,
529 unsigned long flags
, void *context
)
531 struct axi_dmac_chan
*chan
= to_axi_dmac_chan(c
);
532 struct axi_dmac_desc
*desc
;
533 struct axi_dmac_sg
*dsg
;
534 struct scatterlist
*sg
;
535 unsigned int num_sgs
;
538 if (direction
!= chan
->direction
)
542 for_each_sg(sgl
, sg
, sg_len
, i
)
543 num_sgs
+= DIV_ROUND_UP(sg_dma_len(sg
), chan
->max_length
);
545 desc
= axi_dmac_alloc_desc(num_sgs
);
551 for_each_sg(sgl
, sg
, sg_len
, i
) {
552 if (!axi_dmac_check_addr(chan
, sg_dma_address(sg
)) ||
553 !axi_dmac_check_len(chan
, sg_dma_len(sg
))) {
558 dsg
= axi_dmac_fill_linear_sg(chan
, direction
, sg_dma_address(sg
), 1,
559 sg_dma_len(sg
), dsg
);
562 desc
->cyclic
= false;
564 return vchan_tx_prep(&chan
->vchan
, &desc
->vdesc
, flags
);
567 static struct dma_async_tx_descriptor
*axi_dmac_prep_dma_cyclic(
568 struct dma_chan
*c
, dma_addr_t buf_addr
, size_t buf_len
,
569 size_t period_len
, enum dma_transfer_direction direction
,
572 struct axi_dmac_chan
*chan
= to_axi_dmac_chan(c
);
573 struct axi_dmac_desc
*desc
;
574 unsigned int num_periods
, num_segments
;
576 if (direction
!= chan
->direction
)
579 if (!axi_dmac_check_len(chan
, buf_len
) ||
580 !axi_dmac_check_addr(chan
, buf_addr
))
583 if (period_len
== 0 || buf_len
% period_len
)
586 num_periods
= buf_len
/ period_len
;
587 num_segments
= DIV_ROUND_UP(period_len
, chan
->max_length
);
589 desc
= axi_dmac_alloc_desc(num_periods
* num_segments
);
593 axi_dmac_fill_linear_sg(chan
, direction
, buf_addr
, num_periods
,
594 period_len
, desc
->sg
);
598 return vchan_tx_prep(&chan
->vchan
, &desc
->vdesc
, flags
);
601 static struct dma_async_tx_descriptor
*axi_dmac_prep_interleaved(
602 struct dma_chan
*c
, struct dma_interleaved_template
*xt
,
605 struct axi_dmac_chan
*chan
= to_axi_dmac_chan(c
);
606 struct axi_dmac_desc
*desc
;
607 size_t dst_icg
, src_icg
;
609 if (xt
->frame_size
!= 1)
612 if (xt
->dir
!= chan
->direction
)
615 if (axi_dmac_src_is_mem(chan
)) {
616 if (!xt
->src_inc
|| !axi_dmac_check_addr(chan
, xt
->src_start
))
620 if (axi_dmac_dest_is_mem(chan
)) {
621 if (!xt
->dst_inc
|| !axi_dmac_check_addr(chan
, xt
->dst_start
))
625 dst_icg
= dmaengine_get_dst_icg(xt
, &xt
->sgl
[0]);
626 src_icg
= dmaengine_get_src_icg(xt
, &xt
->sgl
[0]);
629 if (!axi_dmac_check_len(chan
, xt
->sgl
[0].size
) ||
632 if (xt
->sgl
[0].size
+ dst_icg
> chan
->max_length
||
633 xt
->sgl
[0].size
+ src_icg
> chan
->max_length
)
636 if (dst_icg
!= 0 || src_icg
!= 0)
638 if (chan
->max_length
/ xt
->sgl
[0].size
< xt
->numf
)
640 if (!axi_dmac_check_len(chan
, xt
->sgl
[0].size
* xt
->numf
))
644 desc
= axi_dmac_alloc_desc(1);
648 if (axi_dmac_src_is_mem(chan
)) {
649 desc
->sg
[0].src_addr
= xt
->src_start
;
650 desc
->sg
[0].src_stride
= xt
->sgl
[0].size
+ src_icg
;
653 if (axi_dmac_dest_is_mem(chan
)) {
654 desc
->sg
[0].dest_addr
= xt
->dst_start
;
655 desc
->sg
[0].dest_stride
= xt
->sgl
[0].size
+ dst_icg
;
659 desc
->sg
[0].x_len
= xt
->sgl
[0].size
;
660 desc
->sg
[0].y_len
= xt
->numf
;
662 desc
->sg
[0].x_len
= xt
->sgl
[0].size
* xt
->numf
;
663 desc
->sg
[0].y_len
= 1;
666 if (flags
& DMA_CYCLIC
)
669 return vchan_tx_prep(&chan
->vchan
, &desc
->vdesc
, flags
);
672 static void axi_dmac_free_chan_resources(struct dma_chan
*c
)
674 vchan_free_chan_resources(to_virt_chan(c
));
677 static void axi_dmac_desc_free(struct virt_dma_desc
*vdesc
)
679 kfree(container_of(vdesc
, struct axi_dmac_desc
, vdesc
));
682 static bool axi_dmac_regmap_rdwr(struct device
*dev
, unsigned int reg
)
685 case AXI_DMAC_REG_IRQ_MASK
:
686 case AXI_DMAC_REG_IRQ_SOURCE
:
687 case AXI_DMAC_REG_IRQ_PENDING
:
688 case AXI_DMAC_REG_CTRL
:
689 case AXI_DMAC_REG_TRANSFER_ID
:
690 case AXI_DMAC_REG_START_TRANSFER
:
691 case AXI_DMAC_REG_FLAGS
:
692 case AXI_DMAC_REG_DEST_ADDRESS
:
693 case AXI_DMAC_REG_SRC_ADDRESS
:
694 case AXI_DMAC_REG_X_LENGTH
:
695 case AXI_DMAC_REG_Y_LENGTH
:
696 case AXI_DMAC_REG_DEST_STRIDE
:
697 case AXI_DMAC_REG_SRC_STRIDE
:
698 case AXI_DMAC_REG_TRANSFER_DONE
:
699 case AXI_DMAC_REG_ACTIVE_TRANSFER_ID
:
700 case AXI_DMAC_REG_STATUS
:
701 case AXI_DMAC_REG_CURRENT_SRC_ADDR
:
702 case AXI_DMAC_REG_CURRENT_DEST_ADDR
:
703 case AXI_DMAC_REG_PARTIAL_XFER_LEN
:
704 case AXI_DMAC_REG_PARTIAL_XFER_ID
:
711 static const struct regmap_config axi_dmac_regmap_config
= {
715 .max_register
= AXI_DMAC_REG_PARTIAL_XFER_ID
,
716 .readable_reg
= axi_dmac_regmap_rdwr
,
717 .writeable_reg
= axi_dmac_regmap_rdwr
,
721 * The configuration stored in the devicetree matches the configuration
722 * parameters of the peripheral instance and allows the driver to know which
723 * features are implemented and how it should behave.
725 static int axi_dmac_parse_chan_dt(struct device_node
*of_chan
,
726 struct axi_dmac_chan
*chan
)
731 ret
= of_property_read_u32(of_chan
, "reg", &val
);
735 /* We only support 1 channel for now */
739 ret
= of_property_read_u32(of_chan
, "adi,source-bus-type", &val
);
742 if (val
> AXI_DMAC_BUS_TYPE_FIFO
)
744 chan
->src_type
= val
;
746 ret
= of_property_read_u32(of_chan
, "adi,destination-bus-type", &val
);
749 if (val
> AXI_DMAC_BUS_TYPE_FIFO
)
751 chan
->dest_type
= val
;
753 ret
= of_property_read_u32(of_chan
, "adi,source-bus-width", &val
);
756 chan
->src_width
= val
/ 8;
758 ret
= of_property_read_u32(of_chan
, "adi,destination-bus-width", &val
);
761 chan
->dest_width
= val
/ 8;
763 chan
->address_align_mask
= max(chan
->dest_width
, chan
->src_width
) - 1;
765 if (axi_dmac_dest_is_mem(chan
) && axi_dmac_src_is_mem(chan
))
766 chan
->direction
= DMA_MEM_TO_MEM
;
767 else if (!axi_dmac_dest_is_mem(chan
) && axi_dmac_src_is_mem(chan
))
768 chan
->direction
= DMA_MEM_TO_DEV
;
769 else if (axi_dmac_dest_is_mem(chan
) && !axi_dmac_src_is_mem(chan
))
770 chan
->direction
= DMA_DEV_TO_MEM
;
772 chan
->direction
= DMA_DEV_TO_DEV
;
777 static int axi_dmac_detect_caps(struct axi_dmac
*dmac
)
779 struct axi_dmac_chan
*chan
= &dmac
->chan
;
780 unsigned int version
;
782 version
= axi_dmac_read(dmac
, ADI_AXI_REG_VERSION
);
784 axi_dmac_write(dmac
, AXI_DMAC_REG_FLAGS
, AXI_DMAC_FLAG_CYCLIC
);
785 if (axi_dmac_read(dmac
, AXI_DMAC_REG_FLAGS
) == AXI_DMAC_FLAG_CYCLIC
)
786 chan
->hw_cyclic
= true;
788 axi_dmac_write(dmac
, AXI_DMAC_REG_Y_LENGTH
, 1);
789 if (axi_dmac_read(dmac
, AXI_DMAC_REG_Y_LENGTH
) == 1)
792 axi_dmac_write(dmac
, AXI_DMAC_REG_X_LENGTH
, 0xffffffff);
793 chan
->max_length
= axi_dmac_read(dmac
, AXI_DMAC_REG_X_LENGTH
);
794 if (chan
->max_length
!= UINT_MAX
)
797 axi_dmac_write(dmac
, AXI_DMAC_REG_DEST_ADDRESS
, 0xffffffff);
798 if (axi_dmac_read(dmac
, AXI_DMAC_REG_DEST_ADDRESS
) == 0 &&
799 chan
->dest_type
== AXI_DMAC_BUS_TYPE_AXI_MM
) {
800 dev_err(dmac
->dma_dev
.dev
,
801 "Destination memory-mapped interface not supported.");
805 axi_dmac_write(dmac
, AXI_DMAC_REG_SRC_ADDRESS
, 0xffffffff);
806 if (axi_dmac_read(dmac
, AXI_DMAC_REG_SRC_ADDRESS
) == 0 &&
807 chan
->src_type
== AXI_DMAC_BUS_TYPE_AXI_MM
) {
808 dev_err(dmac
->dma_dev
.dev
,
809 "Source memory-mapped interface not supported.");
813 if (version
>= ADI_AXI_PCORE_VER(4, 2, 'a'))
814 chan
->hw_partial_xfer
= true;
816 if (version
>= ADI_AXI_PCORE_VER(4, 1, 'a')) {
817 axi_dmac_write(dmac
, AXI_DMAC_REG_X_LENGTH
, 0x00);
818 chan
->length_align_mask
=
819 axi_dmac_read(dmac
, AXI_DMAC_REG_X_LENGTH
);
821 chan
->length_align_mask
= chan
->address_align_mask
;
827 static int axi_dmac_probe(struct platform_device
*pdev
)
829 struct device_node
*of_channels
, *of_chan
;
830 struct dma_device
*dma_dev
;
831 struct axi_dmac
*dmac
;
832 struct resource
*res
;
833 struct regmap
*regmap
;
836 dmac
= devm_kzalloc(&pdev
->dev
, sizeof(*dmac
), GFP_KERNEL
);
840 dmac
->irq
= platform_get_irq(pdev
, 0);
846 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
847 dmac
->base
= devm_ioremap_resource(&pdev
->dev
, res
);
848 if (IS_ERR(dmac
->base
))
849 return PTR_ERR(dmac
->base
);
851 dmac
->clk
= devm_clk_get(&pdev
->dev
, NULL
);
852 if (IS_ERR(dmac
->clk
))
853 return PTR_ERR(dmac
->clk
);
855 INIT_LIST_HEAD(&dmac
->chan
.active_descs
);
857 of_channels
= of_get_child_by_name(pdev
->dev
.of_node
, "adi,channels");
858 if (of_channels
== NULL
)
861 for_each_child_of_node(of_channels
, of_chan
) {
862 ret
= axi_dmac_parse_chan_dt(of_chan
, &dmac
->chan
);
864 of_node_put(of_chan
);
865 of_node_put(of_channels
);
869 of_node_put(of_channels
);
871 pdev
->dev
.dma_parms
= &dmac
->dma_parms
;
872 dma_set_max_seg_size(&pdev
->dev
, UINT_MAX
);
874 dma_dev
= &dmac
->dma_dev
;
875 dma_cap_set(DMA_SLAVE
, dma_dev
->cap_mask
);
876 dma_cap_set(DMA_CYCLIC
, dma_dev
->cap_mask
);
877 dma_cap_set(DMA_INTERLEAVE
, dma_dev
->cap_mask
);
878 dma_dev
->device_free_chan_resources
= axi_dmac_free_chan_resources
;
879 dma_dev
->device_tx_status
= dma_cookie_status
;
880 dma_dev
->device_issue_pending
= axi_dmac_issue_pending
;
881 dma_dev
->device_prep_slave_sg
= axi_dmac_prep_slave_sg
;
882 dma_dev
->device_prep_dma_cyclic
= axi_dmac_prep_dma_cyclic
;
883 dma_dev
->device_prep_interleaved_dma
= axi_dmac_prep_interleaved
;
884 dma_dev
->device_terminate_all
= axi_dmac_terminate_all
;
885 dma_dev
->device_synchronize
= axi_dmac_synchronize
;
886 dma_dev
->dev
= &pdev
->dev
;
887 dma_dev
->chancnt
= 1;
888 dma_dev
->src_addr_widths
= BIT(dmac
->chan
.src_width
);
889 dma_dev
->dst_addr_widths
= BIT(dmac
->chan
.dest_width
);
890 dma_dev
->directions
= BIT(dmac
->chan
.direction
);
891 dma_dev
->residue_granularity
= DMA_RESIDUE_GRANULARITY_DESCRIPTOR
;
892 INIT_LIST_HEAD(&dma_dev
->channels
);
894 dmac
->chan
.vchan
.desc_free
= axi_dmac_desc_free
;
895 vchan_init(&dmac
->chan
.vchan
, dma_dev
);
897 ret
= clk_prepare_enable(dmac
->clk
);
901 ret
= axi_dmac_detect_caps(dmac
);
903 goto err_clk_disable
;
905 dma_dev
->copy_align
= (dmac
->chan
.address_align_mask
+ 1);
907 axi_dmac_write(dmac
, AXI_DMAC_REG_IRQ_MASK
, 0x00);
909 ret
= dma_async_device_register(dma_dev
);
911 goto err_clk_disable
;
913 ret
= of_dma_controller_register(pdev
->dev
.of_node
,
914 of_dma_xlate_by_chan_id
, dma_dev
);
916 goto err_unregister_device
;
918 ret
= request_irq(dmac
->irq
, axi_dmac_interrupt_handler
, IRQF_SHARED
,
919 dev_name(&pdev
->dev
), dmac
);
921 goto err_unregister_of
;
923 platform_set_drvdata(pdev
, dmac
);
925 regmap
= devm_regmap_init_mmio(&pdev
->dev
, dmac
->base
,
926 &axi_dmac_regmap_config
);
927 if (IS_ERR(regmap
)) {
928 ret
= PTR_ERR(regmap
);
935 free_irq(dmac
->irq
, dmac
);
937 of_dma_controller_free(pdev
->dev
.of_node
);
938 err_unregister_device
:
939 dma_async_device_unregister(&dmac
->dma_dev
);
941 clk_disable_unprepare(dmac
->clk
);
946 static int axi_dmac_remove(struct platform_device
*pdev
)
948 struct axi_dmac
*dmac
= platform_get_drvdata(pdev
);
950 of_dma_controller_free(pdev
->dev
.of_node
);
951 free_irq(dmac
->irq
, dmac
);
952 tasklet_kill(&dmac
->chan
.vchan
.task
);
953 dma_async_device_unregister(&dmac
->dma_dev
);
954 clk_disable_unprepare(dmac
->clk
);
959 static const struct of_device_id axi_dmac_of_match_table
[] = {
960 { .compatible
= "adi,axi-dmac-1.00.a" },
963 MODULE_DEVICE_TABLE(of
, axi_dmac_of_match_table
);
965 static struct platform_driver axi_dmac_driver
= {
967 .name
= "dma-axi-dmac",
968 .of_match_table
= axi_dmac_of_match_table
,
970 .probe
= axi_dmac_probe
,
971 .remove
= axi_dmac_remove
,
973 module_platform_driver(axi_dmac_driver
);
975 MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
976 MODULE_DESCRIPTION("DMA controller driver for the AXI-DMAC controller");
977 MODULE_LICENSE("GPL v2");