1 // SPDX-License-Identifier: GPL-2.0-only
3 * Driver for the Analog Devices AXI-DMAC core
5 * Copyright 2013-2019 Analog Devices Inc.
6 * Author: Lars-Peter Clausen <lars@metafoo.de>
9 #include <linux/bitfield.h>
10 #include <linux/clk.h>
11 #include <linux/device.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/dmaengine.h>
14 #include <linux/err.h>
15 #include <linux/interrupt.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
20 #include <linux/of_dma.h>
21 #include <linux/platform_device.h>
22 #include <linux/regmap.h>
23 #include <linux/slab.h>
24 #include <linux/fpga/adi-axi-common.h>
26 #include <dt-bindings/dma/axi-dmac.h>
28 #include "dmaengine.h"
32 * The AXI-DMAC is a soft IP core that is used in FPGA designs. The core has
33 * various instantiation parameters which decided the exact feature set support
36 * Each channel of the core has a source interface and a destination interface.
37 * The number of channels and the type of the channel interfaces is selected at
38 * configuration time. A interface can either be a connected to a central memory
39 * interconnect, which allows access to system memory, or it can be connected to
40 * a dedicated bus which is directly connected to a data port on a peripheral.
41 * Given that those are configuration options of the core that are selected when
42 * it is instantiated this means that they can not be changed by software at
43 * runtime. By extension this means that each channel is uni-directional. It can
44 * either be device to memory or memory to device, but not both. Also since the
45 * device side is a dedicated data bus only connected to a single peripheral
46 * there is no address than can or needs to be configured for the device side.
49 #define AXI_DMAC_REG_INTERFACE_DESC 0x10
50 #define AXI_DMAC_DMA_SRC_TYPE_MSK GENMASK(13, 12)
51 #define AXI_DMAC_DMA_SRC_TYPE_GET(x) FIELD_GET(AXI_DMAC_DMA_SRC_TYPE_MSK, x)
52 #define AXI_DMAC_DMA_SRC_WIDTH_MSK GENMASK(11, 8)
53 #define AXI_DMAC_DMA_SRC_WIDTH_GET(x) FIELD_GET(AXI_DMAC_DMA_SRC_WIDTH_MSK, x)
54 #define AXI_DMAC_DMA_DST_TYPE_MSK GENMASK(5, 4)
55 #define AXI_DMAC_DMA_DST_TYPE_GET(x) FIELD_GET(AXI_DMAC_DMA_DST_TYPE_MSK, x)
56 #define AXI_DMAC_DMA_DST_WIDTH_MSK GENMASK(3, 0)
57 #define AXI_DMAC_DMA_DST_WIDTH_GET(x) FIELD_GET(AXI_DMAC_DMA_DST_WIDTH_MSK, x)
59 #define AXI_DMAC_REG_IRQ_MASK 0x80
60 #define AXI_DMAC_REG_IRQ_PENDING 0x84
61 #define AXI_DMAC_REG_IRQ_SOURCE 0x88
63 #define AXI_DMAC_REG_CTRL 0x400
64 #define AXI_DMAC_REG_TRANSFER_ID 0x404
65 #define AXI_DMAC_REG_START_TRANSFER 0x408
66 #define AXI_DMAC_REG_FLAGS 0x40c
67 #define AXI_DMAC_REG_DEST_ADDRESS 0x410
68 #define AXI_DMAC_REG_SRC_ADDRESS 0x414
69 #define AXI_DMAC_REG_X_LENGTH 0x418
70 #define AXI_DMAC_REG_Y_LENGTH 0x41c
71 #define AXI_DMAC_REG_DEST_STRIDE 0x420
72 #define AXI_DMAC_REG_SRC_STRIDE 0x424
73 #define AXI_DMAC_REG_TRANSFER_DONE 0x428
74 #define AXI_DMAC_REG_ACTIVE_TRANSFER_ID 0x42c
75 #define AXI_DMAC_REG_STATUS 0x430
76 #define AXI_DMAC_REG_CURRENT_SRC_ADDR 0x434
77 #define AXI_DMAC_REG_CURRENT_DEST_ADDR 0x438
78 #define AXI_DMAC_REG_PARTIAL_XFER_LEN 0x44c
79 #define AXI_DMAC_REG_PARTIAL_XFER_ID 0x450
81 #define AXI_DMAC_CTRL_ENABLE BIT(0)
82 #define AXI_DMAC_CTRL_PAUSE BIT(1)
84 #define AXI_DMAC_IRQ_SOT BIT(0)
85 #define AXI_DMAC_IRQ_EOT BIT(1)
87 #define AXI_DMAC_FLAG_CYCLIC BIT(0)
88 #define AXI_DMAC_FLAG_LAST BIT(1)
89 #define AXI_DMAC_FLAG_PARTIAL_REPORT BIT(2)
91 #define AXI_DMAC_FLAG_PARTIAL_XFER_DONE BIT(31)
93 /* The maximum ID allocated by the hardware is 31 */
94 #define AXI_DMAC_SG_UNUSED 32U
101 unsigned int dest_stride
;
102 unsigned int src_stride
;
104 unsigned int partial_len
;
105 bool schedule_when_free
;
108 struct axi_dmac_desc
{
109 struct virt_dma_desc vdesc
;
111 bool have_partial_xfer
;
113 unsigned int num_submitted
;
114 unsigned int num_completed
;
115 unsigned int num_sgs
;
116 struct axi_dmac_sg sg
[];
119 struct axi_dmac_chan
{
120 struct virt_dma_chan vchan
;
122 struct axi_dmac_desc
*next_desc
;
123 struct list_head active_descs
;
124 enum dma_transfer_direction direction
;
126 unsigned int src_width
;
127 unsigned int dest_width
;
128 unsigned int src_type
;
129 unsigned int dest_type
;
131 unsigned int max_length
;
132 unsigned int address_align_mask
;
133 unsigned int length_align_mask
;
135 bool hw_partial_xfer
;
146 struct dma_device dma_dev
;
147 struct axi_dmac_chan chan
;
150 static struct axi_dmac
*chan_to_axi_dmac(struct axi_dmac_chan
*chan
)
152 return container_of(chan
->vchan
.chan
.device
, struct axi_dmac
,
156 static struct axi_dmac_chan
*to_axi_dmac_chan(struct dma_chan
*c
)
158 return container_of(c
, struct axi_dmac_chan
, vchan
.chan
);
161 static struct axi_dmac_desc
*to_axi_dmac_desc(struct virt_dma_desc
*vdesc
)
163 return container_of(vdesc
, struct axi_dmac_desc
, vdesc
);
166 static void axi_dmac_write(struct axi_dmac
*axi_dmac
, unsigned int reg
,
169 writel(val
, axi_dmac
->base
+ reg
);
172 static int axi_dmac_read(struct axi_dmac
*axi_dmac
, unsigned int reg
)
174 return readl(axi_dmac
->base
+ reg
);
177 static int axi_dmac_src_is_mem(struct axi_dmac_chan
*chan
)
179 return chan
->src_type
== AXI_DMAC_BUS_TYPE_AXI_MM
;
182 static int axi_dmac_dest_is_mem(struct axi_dmac_chan
*chan
)
184 return chan
->dest_type
== AXI_DMAC_BUS_TYPE_AXI_MM
;
187 static bool axi_dmac_check_len(struct axi_dmac_chan
*chan
, unsigned int len
)
191 if ((len
& chan
->length_align_mask
) != 0) /* Not aligned */
196 static bool axi_dmac_check_addr(struct axi_dmac_chan
*chan
, dma_addr_t addr
)
198 if ((addr
& chan
->address_align_mask
) != 0) /* Not aligned */
203 static void axi_dmac_start_transfer(struct axi_dmac_chan
*chan
)
205 struct axi_dmac
*dmac
= chan_to_axi_dmac(chan
);
206 struct virt_dma_desc
*vdesc
;
207 struct axi_dmac_desc
*desc
;
208 struct axi_dmac_sg
*sg
;
209 unsigned int flags
= 0;
212 val
= axi_dmac_read(dmac
, AXI_DMAC_REG_START_TRANSFER
);
213 if (val
) /* Queue is full, wait for the next SOT IRQ */
216 desc
= chan
->next_desc
;
219 vdesc
= vchan_next_desc(&chan
->vchan
);
222 list_move_tail(&vdesc
->node
, &chan
->active_descs
);
223 desc
= to_axi_dmac_desc(vdesc
);
225 sg
= &desc
->sg
[desc
->num_submitted
];
227 /* Already queued in cyclic mode. Wait for it to finish */
228 if (sg
->id
!= AXI_DMAC_SG_UNUSED
) {
229 sg
->schedule_when_free
= true;
233 desc
->num_submitted
++;
234 if (desc
->num_submitted
== desc
->num_sgs
||
235 desc
->have_partial_xfer
) {
237 desc
->num_submitted
= 0; /* Start again */
239 chan
->next_desc
= NULL
;
240 flags
|= AXI_DMAC_FLAG_LAST
;
242 chan
->next_desc
= desc
;
245 sg
->id
= axi_dmac_read(dmac
, AXI_DMAC_REG_TRANSFER_ID
);
247 if (axi_dmac_dest_is_mem(chan
)) {
248 axi_dmac_write(dmac
, AXI_DMAC_REG_DEST_ADDRESS
, sg
->dest_addr
);
249 axi_dmac_write(dmac
, AXI_DMAC_REG_DEST_STRIDE
, sg
->dest_stride
);
252 if (axi_dmac_src_is_mem(chan
)) {
253 axi_dmac_write(dmac
, AXI_DMAC_REG_SRC_ADDRESS
, sg
->src_addr
);
254 axi_dmac_write(dmac
, AXI_DMAC_REG_SRC_STRIDE
, sg
->src_stride
);
258 * If the hardware supports cyclic transfers and there is no callback to
259 * call and only a single segment, enable hw cyclic mode to avoid
260 * unnecessary interrupts.
262 if (chan
->hw_cyclic
&& desc
->cyclic
&& !desc
->vdesc
.tx
.callback
&&
264 flags
|= AXI_DMAC_FLAG_CYCLIC
;
266 if (chan
->hw_partial_xfer
)
267 flags
|= AXI_DMAC_FLAG_PARTIAL_REPORT
;
269 axi_dmac_write(dmac
, AXI_DMAC_REG_X_LENGTH
, sg
->x_len
- 1);
270 axi_dmac_write(dmac
, AXI_DMAC_REG_Y_LENGTH
, sg
->y_len
- 1);
271 axi_dmac_write(dmac
, AXI_DMAC_REG_FLAGS
, flags
);
272 axi_dmac_write(dmac
, AXI_DMAC_REG_START_TRANSFER
, 1);
275 static struct axi_dmac_desc
*axi_dmac_active_desc(struct axi_dmac_chan
*chan
)
277 return list_first_entry_or_null(&chan
->active_descs
,
278 struct axi_dmac_desc
, vdesc
.node
);
281 static inline unsigned int axi_dmac_total_sg_bytes(struct axi_dmac_chan
*chan
,
282 struct axi_dmac_sg
*sg
)
285 return sg
->x_len
* sg
->y_len
;
290 static void axi_dmac_dequeue_partial_xfers(struct axi_dmac_chan
*chan
)
292 struct axi_dmac
*dmac
= chan_to_axi_dmac(chan
);
293 struct axi_dmac_desc
*desc
;
294 struct axi_dmac_sg
*sg
;
295 u32 xfer_done
, len
, id
, i
;
299 len
= axi_dmac_read(dmac
, AXI_DMAC_REG_PARTIAL_XFER_LEN
);
300 id
= axi_dmac_read(dmac
, AXI_DMAC_REG_PARTIAL_XFER_ID
);
303 list_for_each_entry(desc
, &chan
->active_descs
, vdesc
.node
) {
304 for (i
= 0; i
< desc
->num_sgs
; i
++) {
306 if (sg
->id
== AXI_DMAC_SG_UNUSED
)
309 desc
->have_partial_xfer
= true;
310 sg
->partial_len
= len
;
320 dev_dbg(dmac
->dma_dev
.dev
,
321 "Found partial segment id=%u, len=%u\n",
324 dev_warn(dmac
->dma_dev
.dev
,
325 "Not found partial segment id=%u, len=%u\n",
329 /* Check if we have any more partial transfers */
330 xfer_done
= axi_dmac_read(dmac
, AXI_DMAC_REG_TRANSFER_DONE
);
331 xfer_done
= !(xfer_done
& AXI_DMAC_FLAG_PARTIAL_XFER_DONE
);
333 } while (!xfer_done
);
336 static void axi_dmac_compute_residue(struct axi_dmac_chan
*chan
,
337 struct axi_dmac_desc
*active
)
339 struct dmaengine_result
*rslt
= &active
->vdesc
.tx_result
;
340 unsigned int start
= active
->num_completed
- 1;
341 struct axi_dmac_sg
*sg
;
342 unsigned int i
, total
;
344 rslt
->result
= DMA_TRANS_NOERROR
;
348 * We get here if the last completed segment is partial, which
349 * means we can compute the residue from that segment onwards
351 for (i
= start
; i
< active
->num_sgs
; i
++) {
353 total
= axi_dmac_total_sg_bytes(chan
, sg
);
354 rslt
->residue
+= (total
- sg
->partial_len
);
358 static bool axi_dmac_transfer_done(struct axi_dmac_chan
*chan
,
359 unsigned int completed_transfers
)
361 struct axi_dmac_desc
*active
;
362 struct axi_dmac_sg
*sg
;
363 bool start_next
= false;
365 active
= axi_dmac_active_desc(chan
);
369 if (chan
->hw_partial_xfer
&&
370 (completed_transfers
& AXI_DMAC_FLAG_PARTIAL_XFER_DONE
))
371 axi_dmac_dequeue_partial_xfers(chan
);
374 sg
= &active
->sg
[active
->num_completed
];
375 if (sg
->id
== AXI_DMAC_SG_UNUSED
) /* Not yet submitted */
377 if (!(BIT(sg
->id
) & completed_transfers
))
379 active
->num_completed
++;
380 sg
->id
= AXI_DMAC_SG_UNUSED
;
381 if (sg
->schedule_when_free
) {
382 sg
->schedule_when_free
= false;
387 axi_dmac_compute_residue(chan
, active
);
390 vchan_cyclic_callback(&active
->vdesc
);
392 if (active
->num_completed
== active
->num_sgs
||
394 if (active
->cyclic
) {
395 active
->num_completed
= 0; /* wrap around */
397 list_del(&active
->vdesc
.node
);
398 vchan_cookie_complete(&active
->vdesc
);
399 active
= axi_dmac_active_desc(chan
);
407 static irqreturn_t
axi_dmac_interrupt_handler(int irq
, void *devid
)
409 struct axi_dmac
*dmac
= devid
;
410 unsigned int pending
;
411 bool start_next
= false;
413 pending
= axi_dmac_read(dmac
, AXI_DMAC_REG_IRQ_PENDING
);
417 axi_dmac_write(dmac
, AXI_DMAC_REG_IRQ_PENDING
, pending
);
419 spin_lock(&dmac
->chan
.vchan
.lock
);
420 /* One or more transfers have finished */
421 if (pending
& AXI_DMAC_IRQ_EOT
) {
422 unsigned int completed
;
424 completed
= axi_dmac_read(dmac
, AXI_DMAC_REG_TRANSFER_DONE
);
425 start_next
= axi_dmac_transfer_done(&dmac
->chan
, completed
);
427 /* Space has become available in the descriptor queue */
428 if ((pending
& AXI_DMAC_IRQ_SOT
) || start_next
)
429 axi_dmac_start_transfer(&dmac
->chan
);
430 spin_unlock(&dmac
->chan
.vchan
.lock
);
435 static int axi_dmac_terminate_all(struct dma_chan
*c
)
437 struct axi_dmac_chan
*chan
= to_axi_dmac_chan(c
);
438 struct axi_dmac
*dmac
= chan_to_axi_dmac(chan
);
442 spin_lock_irqsave(&chan
->vchan
.lock
, flags
);
443 axi_dmac_write(dmac
, AXI_DMAC_REG_CTRL
, 0);
444 chan
->next_desc
= NULL
;
445 vchan_get_all_descriptors(&chan
->vchan
, &head
);
446 list_splice_tail_init(&chan
->active_descs
, &head
);
447 spin_unlock_irqrestore(&chan
->vchan
.lock
, flags
);
449 vchan_dma_desc_free_list(&chan
->vchan
, &head
);
454 static void axi_dmac_synchronize(struct dma_chan
*c
)
456 struct axi_dmac_chan
*chan
= to_axi_dmac_chan(c
);
458 vchan_synchronize(&chan
->vchan
);
461 static void axi_dmac_issue_pending(struct dma_chan
*c
)
463 struct axi_dmac_chan
*chan
= to_axi_dmac_chan(c
);
464 struct axi_dmac
*dmac
= chan_to_axi_dmac(chan
);
467 axi_dmac_write(dmac
, AXI_DMAC_REG_CTRL
, AXI_DMAC_CTRL_ENABLE
);
469 spin_lock_irqsave(&chan
->vchan
.lock
, flags
);
470 if (vchan_issue_pending(&chan
->vchan
))
471 axi_dmac_start_transfer(chan
);
472 spin_unlock_irqrestore(&chan
->vchan
.lock
, flags
);
475 static struct axi_dmac_desc
*axi_dmac_alloc_desc(unsigned int num_sgs
)
477 struct axi_dmac_desc
*desc
;
480 desc
= kzalloc(struct_size(desc
, sg
, num_sgs
), GFP_NOWAIT
);
484 for (i
= 0; i
< num_sgs
; i
++)
485 desc
->sg
[i
].id
= AXI_DMAC_SG_UNUSED
;
487 desc
->num_sgs
= num_sgs
;
492 static struct axi_dmac_sg
*axi_dmac_fill_linear_sg(struct axi_dmac_chan
*chan
,
493 enum dma_transfer_direction direction
, dma_addr_t addr
,
494 unsigned int num_periods
, unsigned int period_len
,
495 struct axi_dmac_sg
*sg
)
497 unsigned int num_segments
, i
;
498 unsigned int segment_size
;
501 /* Split into multiple equally sized segments if necessary */
502 num_segments
= DIV_ROUND_UP(period_len
, chan
->max_length
);
503 segment_size
= DIV_ROUND_UP(period_len
, num_segments
);
504 /* Take care of alignment */
505 segment_size
= ((segment_size
- 1) | chan
->length_align_mask
) + 1;
507 for (i
= 0; i
< num_periods
; i
++) {
510 while (len
> segment_size
) {
511 if (direction
== DMA_DEV_TO_MEM
)
512 sg
->dest_addr
= addr
;
515 sg
->x_len
= segment_size
;
518 addr
+= segment_size
;
522 if (direction
== DMA_DEV_TO_MEM
)
523 sg
->dest_addr
= addr
;
535 static struct dma_async_tx_descriptor
*axi_dmac_prep_slave_sg(
536 struct dma_chan
*c
, struct scatterlist
*sgl
,
537 unsigned int sg_len
, enum dma_transfer_direction direction
,
538 unsigned long flags
, void *context
)
540 struct axi_dmac_chan
*chan
= to_axi_dmac_chan(c
);
541 struct axi_dmac_desc
*desc
;
542 struct axi_dmac_sg
*dsg
;
543 struct scatterlist
*sg
;
544 unsigned int num_sgs
;
547 if (direction
!= chan
->direction
)
551 for_each_sg(sgl
, sg
, sg_len
, i
)
552 num_sgs
+= DIV_ROUND_UP(sg_dma_len(sg
), chan
->max_length
);
554 desc
= axi_dmac_alloc_desc(num_sgs
);
560 for_each_sg(sgl
, sg
, sg_len
, i
) {
561 if (!axi_dmac_check_addr(chan
, sg_dma_address(sg
)) ||
562 !axi_dmac_check_len(chan
, sg_dma_len(sg
))) {
567 dsg
= axi_dmac_fill_linear_sg(chan
, direction
, sg_dma_address(sg
), 1,
568 sg_dma_len(sg
), dsg
);
571 desc
->cyclic
= false;
573 return vchan_tx_prep(&chan
->vchan
, &desc
->vdesc
, flags
);
576 static struct dma_async_tx_descriptor
*axi_dmac_prep_dma_cyclic(
577 struct dma_chan
*c
, dma_addr_t buf_addr
, size_t buf_len
,
578 size_t period_len
, enum dma_transfer_direction direction
,
581 struct axi_dmac_chan
*chan
= to_axi_dmac_chan(c
);
582 struct axi_dmac_desc
*desc
;
583 unsigned int num_periods
, num_segments
;
585 if (direction
!= chan
->direction
)
588 if (!axi_dmac_check_len(chan
, buf_len
) ||
589 !axi_dmac_check_addr(chan
, buf_addr
))
592 if (period_len
== 0 || buf_len
% period_len
)
595 num_periods
= buf_len
/ period_len
;
596 num_segments
= DIV_ROUND_UP(period_len
, chan
->max_length
);
598 desc
= axi_dmac_alloc_desc(num_periods
* num_segments
);
602 axi_dmac_fill_linear_sg(chan
, direction
, buf_addr
, num_periods
,
603 period_len
, desc
->sg
);
607 return vchan_tx_prep(&chan
->vchan
, &desc
->vdesc
, flags
);
610 static struct dma_async_tx_descriptor
*axi_dmac_prep_interleaved(
611 struct dma_chan
*c
, struct dma_interleaved_template
*xt
,
614 struct axi_dmac_chan
*chan
= to_axi_dmac_chan(c
);
615 struct axi_dmac_desc
*desc
;
616 size_t dst_icg
, src_icg
;
618 if (xt
->frame_size
!= 1)
621 if (xt
->dir
!= chan
->direction
)
624 if (axi_dmac_src_is_mem(chan
)) {
625 if (!xt
->src_inc
|| !axi_dmac_check_addr(chan
, xt
->src_start
))
629 if (axi_dmac_dest_is_mem(chan
)) {
630 if (!xt
->dst_inc
|| !axi_dmac_check_addr(chan
, xt
->dst_start
))
634 dst_icg
= dmaengine_get_dst_icg(xt
, &xt
->sgl
[0]);
635 src_icg
= dmaengine_get_src_icg(xt
, &xt
->sgl
[0]);
638 if (!axi_dmac_check_len(chan
, xt
->sgl
[0].size
) ||
641 if (xt
->sgl
[0].size
+ dst_icg
> chan
->max_length
||
642 xt
->sgl
[0].size
+ src_icg
> chan
->max_length
)
645 if (dst_icg
!= 0 || src_icg
!= 0)
647 if (chan
->max_length
/ xt
->sgl
[0].size
< xt
->numf
)
649 if (!axi_dmac_check_len(chan
, xt
->sgl
[0].size
* xt
->numf
))
653 desc
= axi_dmac_alloc_desc(1);
657 if (axi_dmac_src_is_mem(chan
)) {
658 desc
->sg
[0].src_addr
= xt
->src_start
;
659 desc
->sg
[0].src_stride
= xt
->sgl
[0].size
+ src_icg
;
662 if (axi_dmac_dest_is_mem(chan
)) {
663 desc
->sg
[0].dest_addr
= xt
->dst_start
;
664 desc
->sg
[0].dest_stride
= xt
->sgl
[0].size
+ dst_icg
;
668 desc
->sg
[0].x_len
= xt
->sgl
[0].size
;
669 desc
->sg
[0].y_len
= xt
->numf
;
671 desc
->sg
[0].x_len
= xt
->sgl
[0].size
* xt
->numf
;
672 desc
->sg
[0].y_len
= 1;
675 if (flags
& DMA_CYCLIC
)
678 return vchan_tx_prep(&chan
->vchan
, &desc
->vdesc
, flags
);
681 static void axi_dmac_free_chan_resources(struct dma_chan
*c
)
683 vchan_free_chan_resources(to_virt_chan(c
));
686 static void axi_dmac_desc_free(struct virt_dma_desc
*vdesc
)
688 kfree(container_of(vdesc
, struct axi_dmac_desc
, vdesc
));
691 static bool axi_dmac_regmap_rdwr(struct device
*dev
, unsigned int reg
)
694 case AXI_DMAC_REG_IRQ_MASK
:
695 case AXI_DMAC_REG_IRQ_SOURCE
:
696 case AXI_DMAC_REG_IRQ_PENDING
:
697 case AXI_DMAC_REG_CTRL
:
698 case AXI_DMAC_REG_TRANSFER_ID
:
699 case AXI_DMAC_REG_START_TRANSFER
:
700 case AXI_DMAC_REG_FLAGS
:
701 case AXI_DMAC_REG_DEST_ADDRESS
:
702 case AXI_DMAC_REG_SRC_ADDRESS
:
703 case AXI_DMAC_REG_X_LENGTH
:
704 case AXI_DMAC_REG_Y_LENGTH
:
705 case AXI_DMAC_REG_DEST_STRIDE
:
706 case AXI_DMAC_REG_SRC_STRIDE
:
707 case AXI_DMAC_REG_TRANSFER_DONE
:
708 case AXI_DMAC_REG_ACTIVE_TRANSFER_ID
:
709 case AXI_DMAC_REG_STATUS
:
710 case AXI_DMAC_REG_CURRENT_SRC_ADDR
:
711 case AXI_DMAC_REG_CURRENT_DEST_ADDR
:
712 case AXI_DMAC_REG_PARTIAL_XFER_LEN
:
713 case AXI_DMAC_REG_PARTIAL_XFER_ID
:
720 static const struct regmap_config axi_dmac_regmap_config
= {
724 .max_register
= AXI_DMAC_REG_PARTIAL_XFER_ID
,
725 .readable_reg
= axi_dmac_regmap_rdwr
,
726 .writeable_reg
= axi_dmac_regmap_rdwr
,
729 static void axi_dmac_adjust_chan_params(struct axi_dmac_chan
*chan
)
731 chan
->address_align_mask
= max(chan
->dest_width
, chan
->src_width
) - 1;
733 if (axi_dmac_dest_is_mem(chan
) && axi_dmac_src_is_mem(chan
))
734 chan
->direction
= DMA_MEM_TO_MEM
;
735 else if (!axi_dmac_dest_is_mem(chan
) && axi_dmac_src_is_mem(chan
))
736 chan
->direction
= DMA_MEM_TO_DEV
;
737 else if (axi_dmac_dest_is_mem(chan
) && !axi_dmac_src_is_mem(chan
))
738 chan
->direction
= DMA_DEV_TO_MEM
;
740 chan
->direction
= DMA_DEV_TO_DEV
;
744 * The configuration stored in the devicetree matches the configuration
745 * parameters of the peripheral instance and allows the driver to know which
746 * features are implemented and how it should behave.
748 static int axi_dmac_parse_chan_dt(struct device_node
*of_chan
,
749 struct axi_dmac_chan
*chan
)
754 ret
= of_property_read_u32(of_chan
, "reg", &val
);
758 /* We only support 1 channel for now */
762 ret
= of_property_read_u32(of_chan
, "adi,source-bus-type", &val
);
765 if (val
> AXI_DMAC_BUS_TYPE_FIFO
)
767 chan
->src_type
= val
;
769 ret
= of_property_read_u32(of_chan
, "adi,destination-bus-type", &val
);
772 if (val
> AXI_DMAC_BUS_TYPE_FIFO
)
774 chan
->dest_type
= val
;
776 ret
= of_property_read_u32(of_chan
, "adi,source-bus-width", &val
);
779 chan
->src_width
= val
/ 8;
781 ret
= of_property_read_u32(of_chan
, "adi,destination-bus-width", &val
);
784 chan
->dest_width
= val
/ 8;
786 axi_dmac_adjust_chan_params(chan
);
791 static int axi_dmac_parse_dt(struct device
*dev
, struct axi_dmac
*dmac
)
793 struct device_node
*of_channels
, *of_chan
;
796 of_channels
= of_get_child_by_name(dev
->of_node
, "adi,channels");
797 if (of_channels
== NULL
)
800 for_each_child_of_node(of_channels
, of_chan
) {
801 ret
= axi_dmac_parse_chan_dt(of_chan
, &dmac
->chan
);
803 of_node_put(of_chan
);
804 of_node_put(of_channels
);
808 of_node_put(of_channels
);
813 static int axi_dmac_read_chan_config(struct device
*dev
, struct axi_dmac
*dmac
)
815 struct axi_dmac_chan
*chan
= &dmac
->chan
;
816 unsigned int val
, desc
;
818 desc
= axi_dmac_read(dmac
, AXI_DMAC_REG_INTERFACE_DESC
);
820 dev_err(dev
, "DMA interface register reads zero\n");
824 val
= AXI_DMAC_DMA_SRC_TYPE_GET(desc
);
825 if (val
> AXI_DMAC_BUS_TYPE_FIFO
) {
826 dev_err(dev
, "Invalid source bus type read: %d\n", val
);
829 chan
->src_type
= val
;
831 val
= AXI_DMAC_DMA_DST_TYPE_GET(desc
);
832 if (val
> AXI_DMAC_BUS_TYPE_FIFO
) {
833 dev_err(dev
, "Invalid destination bus type read: %d\n", val
);
836 chan
->dest_type
= val
;
838 val
= AXI_DMAC_DMA_SRC_WIDTH_GET(desc
);
840 dev_err(dev
, "Source bus width is zero\n");
843 /* widths are stored in log2 */
844 chan
->src_width
= 1 << val
;
846 val
= AXI_DMAC_DMA_DST_WIDTH_GET(desc
);
848 dev_err(dev
, "Destination bus width is zero\n");
851 chan
->dest_width
= 1 << val
;
853 axi_dmac_adjust_chan_params(chan
);
858 static int axi_dmac_detect_caps(struct axi_dmac
*dmac
, unsigned int version
)
860 struct axi_dmac_chan
*chan
= &dmac
->chan
;
862 axi_dmac_write(dmac
, AXI_DMAC_REG_FLAGS
, AXI_DMAC_FLAG_CYCLIC
);
863 if (axi_dmac_read(dmac
, AXI_DMAC_REG_FLAGS
) == AXI_DMAC_FLAG_CYCLIC
)
864 chan
->hw_cyclic
= true;
866 axi_dmac_write(dmac
, AXI_DMAC_REG_Y_LENGTH
, 1);
867 if (axi_dmac_read(dmac
, AXI_DMAC_REG_Y_LENGTH
) == 1)
870 axi_dmac_write(dmac
, AXI_DMAC_REG_X_LENGTH
, 0xffffffff);
871 chan
->max_length
= axi_dmac_read(dmac
, AXI_DMAC_REG_X_LENGTH
);
872 if (chan
->max_length
!= UINT_MAX
)
875 axi_dmac_write(dmac
, AXI_DMAC_REG_DEST_ADDRESS
, 0xffffffff);
876 if (axi_dmac_read(dmac
, AXI_DMAC_REG_DEST_ADDRESS
) == 0 &&
877 chan
->dest_type
== AXI_DMAC_BUS_TYPE_AXI_MM
) {
878 dev_err(dmac
->dma_dev
.dev
,
879 "Destination memory-mapped interface not supported.");
883 axi_dmac_write(dmac
, AXI_DMAC_REG_SRC_ADDRESS
, 0xffffffff);
884 if (axi_dmac_read(dmac
, AXI_DMAC_REG_SRC_ADDRESS
) == 0 &&
885 chan
->src_type
== AXI_DMAC_BUS_TYPE_AXI_MM
) {
886 dev_err(dmac
->dma_dev
.dev
,
887 "Source memory-mapped interface not supported.");
891 if (version
>= ADI_AXI_PCORE_VER(4, 2, 'a'))
892 chan
->hw_partial_xfer
= true;
894 if (version
>= ADI_AXI_PCORE_VER(4, 1, 'a')) {
895 axi_dmac_write(dmac
, AXI_DMAC_REG_X_LENGTH
, 0x00);
896 chan
->length_align_mask
=
897 axi_dmac_read(dmac
, AXI_DMAC_REG_X_LENGTH
);
899 chan
->length_align_mask
= chan
->address_align_mask
;
905 static int axi_dmac_probe(struct platform_device
*pdev
)
907 struct dma_device
*dma_dev
;
908 struct axi_dmac
*dmac
;
909 struct resource
*res
;
910 struct regmap
*regmap
;
911 unsigned int version
;
914 dmac
= devm_kzalloc(&pdev
->dev
, sizeof(*dmac
), GFP_KERNEL
);
918 dmac
->irq
= platform_get_irq(pdev
, 0);
924 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
925 dmac
->base
= devm_ioremap_resource(&pdev
->dev
, res
);
926 if (IS_ERR(dmac
->base
))
927 return PTR_ERR(dmac
->base
);
929 dmac
->clk
= devm_clk_get(&pdev
->dev
, NULL
);
930 if (IS_ERR(dmac
->clk
))
931 return PTR_ERR(dmac
->clk
);
933 ret
= clk_prepare_enable(dmac
->clk
);
937 version
= axi_dmac_read(dmac
, ADI_AXI_REG_VERSION
);
939 if (version
>= ADI_AXI_PCORE_VER(4, 3, 'a'))
940 ret
= axi_dmac_read_chan_config(&pdev
->dev
, dmac
);
942 ret
= axi_dmac_parse_dt(&pdev
->dev
, dmac
);
945 goto err_clk_disable
;
947 INIT_LIST_HEAD(&dmac
->chan
.active_descs
);
949 dma_set_max_seg_size(&pdev
->dev
, UINT_MAX
);
951 dma_dev
= &dmac
->dma_dev
;
952 dma_cap_set(DMA_SLAVE
, dma_dev
->cap_mask
);
953 dma_cap_set(DMA_CYCLIC
, dma_dev
->cap_mask
);
954 dma_cap_set(DMA_INTERLEAVE
, dma_dev
->cap_mask
);
955 dma_dev
->device_free_chan_resources
= axi_dmac_free_chan_resources
;
956 dma_dev
->device_tx_status
= dma_cookie_status
;
957 dma_dev
->device_issue_pending
= axi_dmac_issue_pending
;
958 dma_dev
->device_prep_slave_sg
= axi_dmac_prep_slave_sg
;
959 dma_dev
->device_prep_dma_cyclic
= axi_dmac_prep_dma_cyclic
;
960 dma_dev
->device_prep_interleaved_dma
= axi_dmac_prep_interleaved
;
961 dma_dev
->device_terminate_all
= axi_dmac_terminate_all
;
962 dma_dev
->device_synchronize
= axi_dmac_synchronize
;
963 dma_dev
->dev
= &pdev
->dev
;
964 dma_dev
->chancnt
= 1;
965 dma_dev
->src_addr_widths
= BIT(dmac
->chan
.src_width
);
966 dma_dev
->dst_addr_widths
= BIT(dmac
->chan
.dest_width
);
967 dma_dev
->directions
= BIT(dmac
->chan
.direction
);
968 dma_dev
->residue_granularity
= DMA_RESIDUE_GRANULARITY_DESCRIPTOR
;
969 INIT_LIST_HEAD(&dma_dev
->channels
);
971 dmac
->chan
.vchan
.desc_free
= axi_dmac_desc_free
;
972 vchan_init(&dmac
->chan
.vchan
, dma_dev
);
974 ret
= axi_dmac_detect_caps(dmac
, version
);
976 goto err_clk_disable
;
978 dma_dev
->copy_align
= (dmac
->chan
.address_align_mask
+ 1);
980 axi_dmac_write(dmac
, AXI_DMAC_REG_IRQ_MASK
, 0x00);
982 ret
= dma_async_device_register(dma_dev
);
984 goto err_clk_disable
;
986 ret
= of_dma_controller_register(pdev
->dev
.of_node
,
987 of_dma_xlate_by_chan_id
, dma_dev
);
989 goto err_unregister_device
;
991 ret
= request_irq(dmac
->irq
, axi_dmac_interrupt_handler
, IRQF_SHARED
,
992 dev_name(&pdev
->dev
), dmac
);
994 goto err_unregister_of
;
996 platform_set_drvdata(pdev
, dmac
);
998 regmap
= devm_regmap_init_mmio(&pdev
->dev
, dmac
->base
,
999 &axi_dmac_regmap_config
);
1000 if (IS_ERR(regmap
)) {
1001 ret
= PTR_ERR(regmap
);
1008 free_irq(dmac
->irq
, dmac
);
1010 of_dma_controller_free(pdev
->dev
.of_node
);
1011 err_unregister_device
:
1012 dma_async_device_unregister(&dmac
->dma_dev
);
1014 clk_disable_unprepare(dmac
->clk
);
1019 static int axi_dmac_remove(struct platform_device
*pdev
)
1021 struct axi_dmac
*dmac
= platform_get_drvdata(pdev
);
1023 of_dma_controller_free(pdev
->dev
.of_node
);
1024 free_irq(dmac
->irq
, dmac
);
1025 tasklet_kill(&dmac
->chan
.vchan
.task
);
1026 dma_async_device_unregister(&dmac
->dma_dev
);
1027 clk_disable_unprepare(dmac
->clk
);
1032 static const struct of_device_id axi_dmac_of_match_table
[] = {
1033 { .compatible
= "adi,axi-dmac-1.00.a" },
1036 MODULE_DEVICE_TABLE(of
, axi_dmac_of_match_table
);
1038 static struct platform_driver axi_dmac_driver
= {
1040 .name
= "dma-axi-dmac",
1041 .of_match_table
= axi_dmac_of_match_table
,
1043 .probe
= axi_dmac_probe
,
1044 .remove
= axi_dmac_remove
,
1046 module_platform_driver(axi_dmac_driver
);
1048 MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
1049 MODULE_DESCRIPTION("DMA controller driver for the AXI-DMAC controller");
1050 MODULE_LICENSE("GPL v2");