2 * Driver for the Analog Devices AXI-DMAC core
4 * Copyright 2013-2015 Analog Devices Inc.
5 * Author: Lars-Peter Clausen <lars@metafoo.de>
7 * Licensed under the GPL-2.
10 #include <linux/clk.h>
11 #include <linux/device.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/dmaengine.h>
14 #include <linux/err.h>
15 #include <linux/interrupt.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
20 #include <linux/of_dma.h>
21 #include <linux/platform_device.h>
22 #include <linux/slab.h>
24 #include <dt-bindings/dma/axi-dmac.h>
26 #include "dmaengine.h"
30 * The AXI-DMAC is a soft IP core that is used in FPGA designs. The core has
31 * various instantiation parameters which decided the exact feature set support
34 * Each channel of the core has a source interface and a destination interface.
35 * The number of channels and the type of the channel interfaces is selected at
36 * configuration time. A interface can either be a connected to a central memory
37 * interconnect, which allows access to system memory, or it can be connected to
38 * a dedicated bus which is directly connected to a data port on a peripheral.
39 * Given that those are configuration options of the core that are selected when
40 * it is instantiated this means that they can not be changed by software at
41 * runtime. By extension this means that each channel is uni-directional. It can
42 * either be device to memory or memory to device, but not both. Also since the
43 * device side is a dedicated data bus only connected to a single peripheral
44 * there is no address than can or needs to be configured for the device side.
47 #define AXI_DMAC_REG_IRQ_MASK 0x80
48 #define AXI_DMAC_REG_IRQ_PENDING 0x84
49 #define AXI_DMAC_REG_IRQ_SOURCE 0x88
51 #define AXI_DMAC_REG_CTRL 0x400
52 #define AXI_DMAC_REG_TRANSFER_ID 0x404
53 #define AXI_DMAC_REG_START_TRANSFER 0x408
54 #define AXI_DMAC_REG_FLAGS 0x40c
55 #define AXI_DMAC_REG_DEST_ADDRESS 0x410
56 #define AXI_DMAC_REG_SRC_ADDRESS 0x414
57 #define AXI_DMAC_REG_X_LENGTH 0x418
58 #define AXI_DMAC_REG_Y_LENGTH 0x41c
59 #define AXI_DMAC_REG_DEST_STRIDE 0x420
60 #define AXI_DMAC_REG_SRC_STRIDE 0x424
61 #define AXI_DMAC_REG_TRANSFER_DONE 0x428
62 #define AXI_DMAC_REG_ACTIVE_TRANSFER_ID 0x42c
63 #define AXI_DMAC_REG_STATUS 0x430
64 #define AXI_DMAC_REG_CURRENT_SRC_ADDR 0x434
65 #define AXI_DMAC_REG_CURRENT_DEST_ADDR 0x438
67 #define AXI_DMAC_CTRL_ENABLE BIT(0)
68 #define AXI_DMAC_CTRL_PAUSE BIT(1)
70 #define AXI_DMAC_IRQ_SOT BIT(0)
71 #define AXI_DMAC_IRQ_EOT BIT(1)
73 #define AXI_DMAC_FLAG_CYCLIC BIT(0)
80 unsigned int dest_stride
;
81 unsigned int src_stride
;
85 struct axi_dmac_desc
{
86 struct virt_dma_desc vdesc
;
89 unsigned int num_submitted
;
90 unsigned int num_completed
;
92 struct axi_dmac_sg sg
[];
95 struct axi_dmac_chan
{
96 struct virt_dma_chan vchan
;
98 struct axi_dmac_desc
*next_desc
;
99 struct list_head active_descs
;
100 enum dma_transfer_direction direction
;
102 unsigned int src_width
;
103 unsigned int dest_width
;
104 unsigned int src_type
;
105 unsigned int dest_type
;
107 unsigned int max_length
;
108 unsigned int align_mask
;
120 struct dma_device dma_dev
;
121 struct axi_dmac_chan chan
;
123 struct device_dma_parameters dma_parms
;
126 static struct axi_dmac
*chan_to_axi_dmac(struct axi_dmac_chan
*chan
)
128 return container_of(chan
->vchan
.chan
.device
, struct axi_dmac
,
132 static struct axi_dmac_chan
*to_axi_dmac_chan(struct dma_chan
*c
)
134 return container_of(c
, struct axi_dmac_chan
, vchan
.chan
);
137 static struct axi_dmac_desc
*to_axi_dmac_desc(struct virt_dma_desc
*vdesc
)
139 return container_of(vdesc
, struct axi_dmac_desc
, vdesc
);
142 static void axi_dmac_write(struct axi_dmac
*axi_dmac
, unsigned int reg
,
145 writel(val
, axi_dmac
->base
+ reg
);
148 static int axi_dmac_read(struct axi_dmac
*axi_dmac
, unsigned int reg
)
150 return readl(axi_dmac
->base
+ reg
);
153 static int axi_dmac_src_is_mem(struct axi_dmac_chan
*chan
)
155 return chan
->src_type
== AXI_DMAC_BUS_TYPE_AXI_MM
;
158 static int axi_dmac_dest_is_mem(struct axi_dmac_chan
*chan
)
160 return chan
->dest_type
== AXI_DMAC_BUS_TYPE_AXI_MM
;
163 static bool axi_dmac_check_len(struct axi_dmac_chan
*chan
, unsigned int len
)
165 if (len
== 0 || len
> chan
->max_length
)
167 if ((len
& chan
->align_mask
) != 0) /* Not aligned */
172 static bool axi_dmac_check_addr(struct axi_dmac_chan
*chan
, dma_addr_t addr
)
174 if ((addr
& chan
->align_mask
) != 0) /* Not aligned */
179 static void axi_dmac_start_transfer(struct axi_dmac_chan
*chan
)
181 struct axi_dmac
*dmac
= chan_to_axi_dmac(chan
);
182 struct virt_dma_desc
*vdesc
;
183 struct axi_dmac_desc
*desc
;
184 struct axi_dmac_sg
*sg
;
185 unsigned int flags
= 0;
188 val
= axi_dmac_read(dmac
, AXI_DMAC_REG_START_TRANSFER
);
189 if (val
) /* Queue is full, wait for the next SOT IRQ */
192 desc
= chan
->next_desc
;
195 vdesc
= vchan_next_desc(&chan
->vchan
);
198 list_move_tail(&vdesc
->node
, &chan
->active_descs
);
199 desc
= to_axi_dmac_desc(vdesc
);
201 sg
= &desc
->sg
[desc
->num_submitted
];
203 desc
->num_submitted
++;
204 if (desc
->num_submitted
== desc
->num_sgs
)
205 chan
->next_desc
= NULL
;
207 chan
->next_desc
= desc
;
209 sg
->id
= axi_dmac_read(dmac
, AXI_DMAC_REG_TRANSFER_ID
);
211 if (axi_dmac_dest_is_mem(chan
)) {
212 axi_dmac_write(dmac
, AXI_DMAC_REG_DEST_ADDRESS
, sg
->dest_addr
);
213 axi_dmac_write(dmac
, AXI_DMAC_REG_DEST_STRIDE
, sg
->dest_stride
);
216 if (axi_dmac_src_is_mem(chan
)) {
217 axi_dmac_write(dmac
, AXI_DMAC_REG_SRC_ADDRESS
, sg
->src_addr
);
218 axi_dmac_write(dmac
, AXI_DMAC_REG_SRC_STRIDE
, sg
->src_stride
);
222 * If the hardware supports cyclic transfers and there is no callback to
223 * call, enable hw cyclic mode to avoid unnecessary interrupts.
225 if (chan
->hw_cyclic
&& desc
->cyclic
&& !desc
->vdesc
.tx
.callback
)
226 flags
|= AXI_DMAC_FLAG_CYCLIC
;
228 axi_dmac_write(dmac
, AXI_DMAC_REG_X_LENGTH
, sg
->x_len
- 1);
229 axi_dmac_write(dmac
, AXI_DMAC_REG_Y_LENGTH
, sg
->y_len
- 1);
230 axi_dmac_write(dmac
, AXI_DMAC_REG_FLAGS
, flags
);
231 axi_dmac_write(dmac
, AXI_DMAC_REG_START_TRANSFER
, 1);
234 static struct axi_dmac_desc
*axi_dmac_active_desc(struct axi_dmac_chan
*chan
)
236 return list_first_entry_or_null(&chan
->active_descs
,
237 struct axi_dmac_desc
, vdesc
.node
);
240 static void axi_dmac_transfer_done(struct axi_dmac_chan
*chan
,
241 unsigned int completed_transfers
)
243 struct axi_dmac_desc
*active
;
244 struct axi_dmac_sg
*sg
;
246 active
= axi_dmac_active_desc(chan
);
250 if (active
->cyclic
) {
251 vchan_cyclic_callback(&active
->vdesc
);
254 sg
= &active
->sg
[active
->num_completed
];
255 if (!(BIT(sg
->id
) & completed_transfers
))
257 active
->num_completed
++;
258 if (active
->num_completed
== active
->num_sgs
) {
259 list_del(&active
->vdesc
.node
);
260 vchan_cookie_complete(&active
->vdesc
);
261 active
= axi_dmac_active_desc(chan
);
267 static irqreturn_t
axi_dmac_interrupt_handler(int irq
, void *devid
)
269 struct axi_dmac
*dmac
= devid
;
270 unsigned int pending
;
272 pending
= axi_dmac_read(dmac
, AXI_DMAC_REG_IRQ_PENDING
);
273 axi_dmac_write(dmac
, AXI_DMAC_REG_IRQ_PENDING
, pending
);
275 spin_lock(&dmac
->chan
.vchan
.lock
);
276 /* One or more transfers have finished */
277 if (pending
& AXI_DMAC_IRQ_EOT
) {
278 unsigned int completed
;
280 completed
= axi_dmac_read(dmac
, AXI_DMAC_REG_TRANSFER_DONE
);
281 axi_dmac_transfer_done(&dmac
->chan
, completed
);
283 /* Space has become available in the descriptor queue */
284 if (pending
& AXI_DMAC_IRQ_SOT
)
285 axi_dmac_start_transfer(&dmac
->chan
);
286 spin_unlock(&dmac
->chan
.vchan
.lock
);
291 static int axi_dmac_terminate_all(struct dma_chan
*c
)
293 struct axi_dmac_chan
*chan
= to_axi_dmac_chan(c
);
294 struct axi_dmac
*dmac
= chan_to_axi_dmac(chan
);
298 spin_lock_irqsave(&chan
->vchan
.lock
, flags
);
299 axi_dmac_write(dmac
, AXI_DMAC_REG_CTRL
, 0);
300 chan
->next_desc
= NULL
;
301 vchan_get_all_descriptors(&chan
->vchan
, &head
);
302 list_splice_tail_init(&chan
->active_descs
, &head
);
303 spin_unlock_irqrestore(&chan
->vchan
.lock
, flags
);
305 vchan_dma_desc_free_list(&chan
->vchan
, &head
);
310 static void axi_dmac_issue_pending(struct dma_chan
*c
)
312 struct axi_dmac_chan
*chan
= to_axi_dmac_chan(c
);
313 struct axi_dmac
*dmac
= chan_to_axi_dmac(chan
);
316 axi_dmac_write(dmac
, AXI_DMAC_REG_CTRL
, AXI_DMAC_CTRL_ENABLE
);
318 spin_lock_irqsave(&chan
->vchan
.lock
, flags
);
319 if (vchan_issue_pending(&chan
->vchan
))
320 axi_dmac_start_transfer(chan
);
321 spin_unlock_irqrestore(&chan
->vchan
.lock
, flags
);
324 static struct axi_dmac_desc
*axi_dmac_alloc_desc(unsigned int num_sgs
)
326 struct axi_dmac_desc
*desc
;
328 desc
= kzalloc(sizeof(struct axi_dmac_desc
) +
329 sizeof(struct axi_dmac_sg
) * num_sgs
, GFP_NOWAIT
);
333 desc
->num_sgs
= num_sgs
;
338 static struct dma_async_tx_descriptor
*axi_dmac_prep_slave_sg(
339 struct dma_chan
*c
, struct scatterlist
*sgl
,
340 unsigned int sg_len
, enum dma_transfer_direction direction
,
341 unsigned long flags
, void *context
)
343 struct axi_dmac_chan
*chan
= to_axi_dmac_chan(c
);
344 struct axi_dmac_desc
*desc
;
345 struct scatterlist
*sg
;
348 if (direction
!= chan
->direction
)
351 desc
= axi_dmac_alloc_desc(sg_len
);
355 for_each_sg(sgl
, sg
, sg_len
, i
) {
356 if (!axi_dmac_check_addr(chan
, sg_dma_address(sg
)) ||
357 !axi_dmac_check_len(chan
, sg_dma_len(sg
))) {
362 if (direction
== DMA_DEV_TO_MEM
)
363 desc
->sg
[i
].dest_addr
= sg_dma_address(sg
);
365 desc
->sg
[i
].src_addr
= sg_dma_address(sg
);
366 desc
->sg
[i
].x_len
= sg_dma_len(sg
);
367 desc
->sg
[i
].y_len
= 1;
370 desc
->cyclic
= false;
372 return vchan_tx_prep(&chan
->vchan
, &desc
->vdesc
, flags
);
375 static struct dma_async_tx_descriptor
*axi_dmac_prep_dma_cyclic(
376 struct dma_chan
*c
, dma_addr_t buf_addr
, size_t buf_len
,
377 size_t period_len
, enum dma_transfer_direction direction
,
380 struct axi_dmac_chan
*chan
= to_axi_dmac_chan(c
);
381 struct axi_dmac_desc
*desc
;
382 unsigned int num_periods
, i
;
384 if (direction
!= chan
->direction
)
387 if (!axi_dmac_check_len(chan
, buf_len
) ||
388 !axi_dmac_check_addr(chan
, buf_addr
))
391 if (period_len
== 0 || buf_len
% period_len
)
394 num_periods
= buf_len
/ period_len
;
396 desc
= axi_dmac_alloc_desc(num_periods
);
400 for (i
= 0; i
< num_periods
; i
++) {
401 if (direction
== DMA_DEV_TO_MEM
)
402 desc
->sg
[i
].dest_addr
= buf_addr
;
404 desc
->sg
[i
].src_addr
= buf_addr
;
405 desc
->sg
[i
].x_len
= period_len
;
406 desc
->sg
[i
].y_len
= 1;
407 buf_addr
+= period_len
;
412 return vchan_tx_prep(&chan
->vchan
, &desc
->vdesc
, flags
);
415 static struct dma_async_tx_descriptor
*axi_dmac_prep_interleaved(
416 struct dma_chan
*c
, struct dma_interleaved_template
*xt
,
419 struct axi_dmac_chan
*chan
= to_axi_dmac_chan(c
);
420 struct axi_dmac_desc
*desc
;
421 size_t dst_icg
, src_icg
;
423 if (xt
->frame_size
!= 1)
426 if (xt
->dir
!= chan
->direction
)
429 if (axi_dmac_src_is_mem(chan
)) {
430 if (!xt
->src_inc
|| !axi_dmac_check_addr(chan
, xt
->src_start
))
434 if (axi_dmac_dest_is_mem(chan
)) {
435 if (!xt
->dst_inc
|| !axi_dmac_check_addr(chan
, xt
->dst_start
))
439 dst_icg
= dmaengine_get_dst_icg(xt
, &xt
->sgl
[0]);
440 src_icg
= dmaengine_get_src_icg(xt
, &xt
->sgl
[0]);
443 if (!axi_dmac_check_len(chan
, xt
->sgl
[0].size
) ||
444 !axi_dmac_check_len(chan
, xt
->numf
))
446 if (xt
->sgl
[0].size
+ dst_icg
> chan
->max_length
||
447 xt
->sgl
[0].size
+ src_icg
> chan
->max_length
)
450 if (dst_icg
!= 0 || src_icg
!= 0)
452 if (chan
->max_length
/ xt
->sgl
[0].size
< xt
->numf
)
454 if (!axi_dmac_check_len(chan
, xt
->sgl
[0].size
* xt
->numf
))
458 desc
= axi_dmac_alloc_desc(1);
462 if (axi_dmac_src_is_mem(chan
)) {
463 desc
->sg
[0].src_addr
= xt
->src_start
;
464 desc
->sg
[0].src_stride
= xt
->sgl
[0].size
+ src_icg
;
467 if (axi_dmac_dest_is_mem(chan
)) {
468 desc
->sg
[0].dest_addr
= xt
->dst_start
;
469 desc
->sg
[0].dest_stride
= xt
->sgl
[0].size
+ dst_icg
;
473 desc
->sg
[0].x_len
= xt
->sgl
[0].size
;
474 desc
->sg
[0].y_len
= xt
->numf
;
476 desc
->sg
[0].x_len
= xt
->sgl
[0].size
* xt
->numf
;
477 desc
->sg
[0].y_len
= 1;
480 return vchan_tx_prep(&chan
->vchan
, &desc
->vdesc
, flags
);
483 static void axi_dmac_free_chan_resources(struct dma_chan
*c
)
485 vchan_free_chan_resources(to_virt_chan(c
));
488 static void axi_dmac_desc_free(struct virt_dma_desc
*vdesc
)
490 kfree(container_of(vdesc
, struct axi_dmac_desc
, vdesc
));
494 * The configuration stored in the devicetree matches the configuration
495 * parameters of the peripheral instance and allows the driver to know which
496 * features are implemented and how it should behave.
498 static int axi_dmac_parse_chan_dt(struct device_node
*of_chan
,
499 struct axi_dmac_chan
*chan
)
504 ret
= of_property_read_u32(of_chan
, "reg", &val
);
508 /* We only support 1 channel for now */
512 ret
= of_property_read_u32(of_chan
, "adi,source-bus-type", &val
);
515 if (val
> AXI_DMAC_BUS_TYPE_FIFO
)
517 chan
->src_type
= val
;
519 ret
= of_property_read_u32(of_chan
, "adi,destination-bus-type", &val
);
522 if (val
> AXI_DMAC_BUS_TYPE_FIFO
)
524 chan
->dest_type
= val
;
526 ret
= of_property_read_u32(of_chan
, "adi,source-bus-width", &val
);
529 chan
->src_width
= val
/ 8;
531 ret
= of_property_read_u32(of_chan
, "adi,destination-bus-width", &val
);
534 chan
->dest_width
= val
/ 8;
536 ret
= of_property_read_u32(of_chan
, "adi,length-width", &val
);
541 chan
->max_length
= UINT_MAX
;
543 chan
->max_length
= (1ULL << val
) - 1;
545 chan
->align_mask
= max(chan
->dest_width
, chan
->src_width
) - 1;
547 if (axi_dmac_dest_is_mem(chan
) && axi_dmac_src_is_mem(chan
))
548 chan
->direction
= DMA_MEM_TO_MEM
;
549 else if (!axi_dmac_dest_is_mem(chan
) && axi_dmac_src_is_mem(chan
))
550 chan
->direction
= DMA_MEM_TO_DEV
;
551 else if (axi_dmac_dest_is_mem(chan
) && !axi_dmac_src_is_mem(chan
))
552 chan
->direction
= DMA_DEV_TO_MEM
;
554 chan
->direction
= DMA_DEV_TO_DEV
;
556 chan
->hw_cyclic
= of_property_read_bool(of_chan
, "adi,cyclic");
557 chan
->hw_2d
= of_property_read_bool(of_chan
, "adi,2d");
562 static int axi_dmac_probe(struct platform_device
*pdev
)
564 struct device_node
*of_channels
, *of_chan
;
565 struct dma_device
*dma_dev
;
566 struct axi_dmac
*dmac
;
567 struct resource
*res
;
570 dmac
= devm_kzalloc(&pdev
->dev
, sizeof(*dmac
), GFP_KERNEL
);
574 dmac
->irq
= platform_get_irq(pdev
, 0);
578 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
579 dmac
->base
= devm_ioremap_resource(&pdev
->dev
, res
);
580 if (IS_ERR(dmac
->base
))
581 return PTR_ERR(dmac
->base
);
583 dmac
->clk
= devm_clk_get(&pdev
->dev
, NULL
);
584 if (IS_ERR(dmac
->clk
))
585 return PTR_ERR(dmac
->clk
);
587 INIT_LIST_HEAD(&dmac
->chan
.active_descs
);
589 of_channels
= of_get_child_by_name(pdev
->dev
.of_node
, "adi,channels");
590 if (of_channels
== NULL
)
593 for_each_child_of_node(of_channels
, of_chan
) {
594 ret
= axi_dmac_parse_chan_dt(of_chan
, &dmac
->chan
);
596 of_node_put(of_chan
);
597 of_node_put(of_channels
);
601 of_node_put(of_channels
);
603 pdev
->dev
.dma_parms
= &dmac
->dma_parms
;
604 dma_set_max_seg_size(&pdev
->dev
, dmac
->chan
.max_length
);
606 dma_dev
= &dmac
->dma_dev
;
607 dma_cap_set(DMA_SLAVE
, dma_dev
->cap_mask
);
608 dma_cap_set(DMA_CYCLIC
, dma_dev
->cap_mask
);
609 dma_dev
->device_free_chan_resources
= axi_dmac_free_chan_resources
;
610 dma_dev
->device_tx_status
= dma_cookie_status
;
611 dma_dev
->device_issue_pending
= axi_dmac_issue_pending
;
612 dma_dev
->device_prep_slave_sg
= axi_dmac_prep_slave_sg
;
613 dma_dev
->device_prep_dma_cyclic
= axi_dmac_prep_dma_cyclic
;
614 dma_dev
->device_prep_interleaved_dma
= axi_dmac_prep_interleaved
;
615 dma_dev
->device_terminate_all
= axi_dmac_terminate_all
;
616 dma_dev
->dev
= &pdev
->dev
;
617 dma_dev
->chancnt
= 1;
618 dma_dev
->src_addr_widths
= BIT(dmac
->chan
.src_width
);
619 dma_dev
->dst_addr_widths
= BIT(dmac
->chan
.dest_width
);
620 dma_dev
->directions
= BIT(dmac
->chan
.direction
);
621 dma_dev
->residue_granularity
= DMA_RESIDUE_GRANULARITY_DESCRIPTOR
;
622 INIT_LIST_HEAD(&dma_dev
->channels
);
624 dmac
->chan
.vchan
.desc_free
= axi_dmac_desc_free
;
625 vchan_init(&dmac
->chan
.vchan
, dma_dev
);
627 ret
= clk_prepare_enable(dmac
->clk
);
631 axi_dmac_write(dmac
, AXI_DMAC_REG_IRQ_MASK
, 0x00);
633 ret
= dma_async_device_register(dma_dev
);
635 goto err_clk_disable
;
637 ret
= of_dma_controller_register(pdev
->dev
.of_node
,
638 of_dma_xlate_by_chan_id
, dma_dev
);
640 goto err_unregister_device
;
642 ret
= request_irq(dmac
->irq
, axi_dmac_interrupt_handler
, 0,
643 dev_name(&pdev
->dev
), dmac
);
645 goto err_unregister_of
;
647 platform_set_drvdata(pdev
, dmac
);
652 of_dma_controller_free(pdev
->dev
.of_node
);
653 err_unregister_device
:
654 dma_async_device_unregister(&dmac
->dma_dev
);
656 clk_disable_unprepare(dmac
->clk
);
661 static int axi_dmac_remove(struct platform_device
*pdev
)
663 struct axi_dmac
*dmac
= platform_get_drvdata(pdev
);
665 of_dma_controller_free(pdev
->dev
.of_node
);
666 free_irq(dmac
->irq
, dmac
);
667 tasklet_kill(&dmac
->chan
.vchan
.task
);
668 dma_async_device_unregister(&dmac
->dma_dev
);
669 clk_disable_unprepare(dmac
->clk
);
674 static const struct of_device_id axi_dmac_of_match_table
[] = {
675 { .compatible
= "adi,axi-dmac-1.00.a" },
679 static struct platform_driver axi_dmac_driver
= {
681 .name
= "dma-axi-dmac",
682 .of_match_table
= axi_dmac_of_match_table
,
684 .probe
= axi_dmac_probe
,
685 .remove
= axi_dmac_remove
,
687 module_platform_driver(axi_dmac_driver
);
689 MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
690 MODULE_DESCRIPTION("DMA controller driver for the AXI-DMAC controller");
691 MODULE_LICENSE("GPL v2");