2 * Driver for the Analog Devices AXI-DMAC core
4 * Copyright 2013-2015 Analog Devices Inc.
5 * Author: Lars-Peter Clausen <lars@metafoo.de>
7 * Licensed under the GPL-2.
10 #include <linux/clk.h>
11 #include <linux/device.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/dmaengine.h>
14 #include <linux/err.h>
15 #include <linux/interrupt.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
20 #include <linux/of_dma.h>
21 #include <linux/platform_device.h>
22 #include <linux/slab.h>
24 #include <dt-bindings/dma/axi-dmac.h>
26 #include "dmaengine.h"
30 * The AXI-DMAC is a soft IP core that is used in FPGA designs. The core has
31 * various instantiation parameters which decided the exact feature set support
34 * Each channel of the core has a source interface and a destination interface.
35 * The number of channels and the type of the channel interfaces is selected at
36 * configuration time. A interface can either be a connected to a central memory
37 * interconnect, which allows access to system memory, or it can be connected to
38 * a dedicated bus which is directly connected to a data port on a peripheral.
39 * Given that those are configuration options of the core that are selected when
40 * it is instantiated this means that they can not be changed by software at
41 * runtime. By extension this means that each channel is uni-directional. It can
42 * either be device to memory or memory to device, but not both. Also since the
43 * device side is a dedicated data bus only connected to a single peripheral
44 * there is no address than can or needs to be configured for the device side.
47 #define AXI_DMAC_REG_IRQ_MASK 0x80
48 #define AXI_DMAC_REG_IRQ_PENDING 0x84
49 #define AXI_DMAC_REG_IRQ_SOURCE 0x88
51 #define AXI_DMAC_REG_CTRL 0x400
52 #define AXI_DMAC_REG_TRANSFER_ID 0x404
53 #define AXI_DMAC_REG_START_TRANSFER 0x408
54 #define AXI_DMAC_REG_FLAGS 0x40c
55 #define AXI_DMAC_REG_DEST_ADDRESS 0x410
56 #define AXI_DMAC_REG_SRC_ADDRESS 0x414
57 #define AXI_DMAC_REG_X_LENGTH 0x418
58 #define AXI_DMAC_REG_Y_LENGTH 0x41c
59 #define AXI_DMAC_REG_DEST_STRIDE 0x420
60 #define AXI_DMAC_REG_SRC_STRIDE 0x424
61 #define AXI_DMAC_REG_TRANSFER_DONE 0x428
62 #define AXI_DMAC_REG_ACTIVE_TRANSFER_ID 0x42c
63 #define AXI_DMAC_REG_STATUS 0x430
64 #define AXI_DMAC_REG_CURRENT_SRC_ADDR 0x434
65 #define AXI_DMAC_REG_CURRENT_DEST_ADDR 0x438
67 #define AXI_DMAC_CTRL_ENABLE BIT(0)
68 #define AXI_DMAC_CTRL_PAUSE BIT(1)
70 #define AXI_DMAC_IRQ_SOT BIT(0)
71 #define AXI_DMAC_IRQ_EOT BIT(1)
73 #define AXI_DMAC_FLAG_CYCLIC BIT(0)
75 /* The maximum ID allocated by the hardware is 31 */
76 #define AXI_DMAC_SG_UNUSED 32U
83 unsigned int dest_stride
;
84 unsigned int src_stride
;
86 bool schedule_when_free
;
89 struct axi_dmac_desc
{
90 struct virt_dma_desc vdesc
;
93 unsigned int num_submitted
;
94 unsigned int num_completed
;
96 struct axi_dmac_sg sg
[];
99 struct axi_dmac_chan
{
100 struct virt_dma_chan vchan
;
102 struct axi_dmac_desc
*next_desc
;
103 struct list_head active_descs
;
104 enum dma_transfer_direction direction
;
106 unsigned int src_width
;
107 unsigned int dest_width
;
108 unsigned int src_type
;
109 unsigned int dest_type
;
111 unsigned int max_length
;
112 unsigned int align_mask
;
124 struct dma_device dma_dev
;
125 struct axi_dmac_chan chan
;
127 struct device_dma_parameters dma_parms
;
130 static struct axi_dmac
*chan_to_axi_dmac(struct axi_dmac_chan
*chan
)
132 return container_of(chan
->vchan
.chan
.device
, struct axi_dmac
,
136 static struct axi_dmac_chan
*to_axi_dmac_chan(struct dma_chan
*c
)
138 return container_of(c
, struct axi_dmac_chan
, vchan
.chan
);
141 static struct axi_dmac_desc
*to_axi_dmac_desc(struct virt_dma_desc
*vdesc
)
143 return container_of(vdesc
, struct axi_dmac_desc
, vdesc
);
146 static void axi_dmac_write(struct axi_dmac
*axi_dmac
, unsigned int reg
,
149 writel(val
, axi_dmac
->base
+ reg
);
152 static int axi_dmac_read(struct axi_dmac
*axi_dmac
, unsigned int reg
)
154 return readl(axi_dmac
->base
+ reg
);
157 static int axi_dmac_src_is_mem(struct axi_dmac_chan
*chan
)
159 return chan
->src_type
== AXI_DMAC_BUS_TYPE_AXI_MM
;
162 static int axi_dmac_dest_is_mem(struct axi_dmac_chan
*chan
)
164 return chan
->dest_type
== AXI_DMAC_BUS_TYPE_AXI_MM
;
167 static bool axi_dmac_check_len(struct axi_dmac_chan
*chan
, unsigned int len
)
169 if (len
== 0 || len
> chan
->max_length
)
171 if ((len
& chan
->align_mask
) != 0) /* Not aligned */
176 static bool axi_dmac_check_addr(struct axi_dmac_chan
*chan
, dma_addr_t addr
)
178 if ((addr
& chan
->align_mask
) != 0) /* Not aligned */
183 static void axi_dmac_start_transfer(struct axi_dmac_chan
*chan
)
185 struct axi_dmac
*dmac
= chan_to_axi_dmac(chan
);
186 struct virt_dma_desc
*vdesc
;
187 struct axi_dmac_desc
*desc
;
188 struct axi_dmac_sg
*sg
;
189 unsigned int flags
= 0;
192 val
= axi_dmac_read(dmac
, AXI_DMAC_REG_START_TRANSFER
);
193 if (val
) /* Queue is full, wait for the next SOT IRQ */
196 desc
= chan
->next_desc
;
199 vdesc
= vchan_next_desc(&chan
->vchan
);
202 list_move_tail(&vdesc
->node
, &chan
->active_descs
);
203 desc
= to_axi_dmac_desc(vdesc
);
205 sg
= &desc
->sg
[desc
->num_submitted
];
207 /* Already queued in cyclic mode. Wait for it to finish */
208 if (sg
->id
!= AXI_DMAC_SG_UNUSED
) {
209 sg
->schedule_when_free
= true;
213 desc
->num_submitted
++;
214 if (desc
->num_submitted
== desc
->num_sgs
) {
216 desc
->num_submitted
= 0; /* Start again */
218 chan
->next_desc
= NULL
;
220 chan
->next_desc
= desc
;
223 sg
->id
= axi_dmac_read(dmac
, AXI_DMAC_REG_TRANSFER_ID
);
225 if (axi_dmac_dest_is_mem(chan
)) {
226 axi_dmac_write(dmac
, AXI_DMAC_REG_DEST_ADDRESS
, sg
->dest_addr
);
227 axi_dmac_write(dmac
, AXI_DMAC_REG_DEST_STRIDE
, sg
->dest_stride
);
230 if (axi_dmac_src_is_mem(chan
)) {
231 axi_dmac_write(dmac
, AXI_DMAC_REG_SRC_ADDRESS
, sg
->src_addr
);
232 axi_dmac_write(dmac
, AXI_DMAC_REG_SRC_STRIDE
, sg
->src_stride
);
236 * If the hardware supports cyclic transfers and there is no callback to
237 * call and only a single segment, enable hw cyclic mode to avoid
238 * unnecessary interrupts.
240 if (chan
->hw_cyclic
&& desc
->cyclic
&& !desc
->vdesc
.tx
.callback
&&
242 flags
|= AXI_DMAC_FLAG_CYCLIC
;
244 axi_dmac_write(dmac
, AXI_DMAC_REG_X_LENGTH
, sg
->x_len
- 1);
245 axi_dmac_write(dmac
, AXI_DMAC_REG_Y_LENGTH
, sg
->y_len
- 1);
246 axi_dmac_write(dmac
, AXI_DMAC_REG_FLAGS
, flags
);
247 axi_dmac_write(dmac
, AXI_DMAC_REG_START_TRANSFER
, 1);
250 static struct axi_dmac_desc
*axi_dmac_active_desc(struct axi_dmac_chan
*chan
)
252 return list_first_entry_or_null(&chan
->active_descs
,
253 struct axi_dmac_desc
, vdesc
.node
);
256 static bool axi_dmac_transfer_done(struct axi_dmac_chan
*chan
,
257 unsigned int completed_transfers
)
259 struct axi_dmac_desc
*active
;
260 struct axi_dmac_sg
*sg
;
261 bool start_next
= false;
263 active
= axi_dmac_active_desc(chan
);
268 sg
= &active
->sg
[active
->num_completed
];
269 if (sg
->id
== AXI_DMAC_SG_UNUSED
) /* Not yet submitted */
271 if (!(BIT(sg
->id
) & completed_transfers
))
273 active
->num_completed
++;
274 sg
->id
= AXI_DMAC_SG_UNUSED
;
275 if (sg
->schedule_when_free
) {
276 sg
->schedule_when_free
= false;
281 vchan_cyclic_callback(&active
->vdesc
);
283 if (active
->num_completed
== active
->num_sgs
) {
284 if (active
->cyclic
) {
285 active
->num_completed
= 0; /* wrap around */
287 list_del(&active
->vdesc
.node
);
288 vchan_cookie_complete(&active
->vdesc
);
289 active
= axi_dmac_active_desc(chan
);
297 static irqreturn_t
axi_dmac_interrupt_handler(int irq
, void *devid
)
299 struct axi_dmac
*dmac
= devid
;
300 unsigned int pending
;
301 bool start_next
= false;
303 pending
= axi_dmac_read(dmac
, AXI_DMAC_REG_IRQ_PENDING
);
307 axi_dmac_write(dmac
, AXI_DMAC_REG_IRQ_PENDING
, pending
);
309 spin_lock(&dmac
->chan
.vchan
.lock
);
310 /* One or more transfers have finished */
311 if (pending
& AXI_DMAC_IRQ_EOT
) {
312 unsigned int completed
;
314 completed
= axi_dmac_read(dmac
, AXI_DMAC_REG_TRANSFER_DONE
);
315 start_next
= axi_dmac_transfer_done(&dmac
->chan
, completed
);
317 /* Space has become available in the descriptor queue */
318 if ((pending
& AXI_DMAC_IRQ_SOT
) || start_next
)
319 axi_dmac_start_transfer(&dmac
->chan
);
320 spin_unlock(&dmac
->chan
.vchan
.lock
);
325 static int axi_dmac_terminate_all(struct dma_chan
*c
)
327 struct axi_dmac_chan
*chan
= to_axi_dmac_chan(c
);
328 struct axi_dmac
*dmac
= chan_to_axi_dmac(chan
);
332 spin_lock_irqsave(&chan
->vchan
.lock
, flags
);
333 axi_dmac_write(dmac
, AXI_DMAC_REG_CTRL
, 0);
334 chan
->next_desc
= NULL
;
335 vchan_get_all_descriptors(&chan
->vchan
, &head
);
336 list_splice_tail_init(&chan
->active_descs
, &head
);
337 spin_unlock_irqrestore(&chan
->vchan
.lock
, flags
);
339 vchan_dma_desc_free_list(&chan
->vchan
, &head
);
344 static void axi_dmac_synchronize(struct dma_chan
*c
)
346 struct axi_dmac_chan
*chan
= to_axi_dmac_chan(c
);
348 vchan_synchronize(&chan
->vchan
);
351 static void axi_dmac_issue_pending(struct dma_chan
*c
)
353 struct axi_dmac_chan
*chan
= to_axi_dmac_chan(c
);
354 struct axi_dmac
*dmac
= chan_to_axi_dmac(chan
);
357 axi_dmac_write(dmac
, AXI_DMAC_REG_CTRL
, AXI_DMAC_CTRL_ENABLE
);
359 spin_lock_irqsave(&chan
->vchan
.lock
, flags
);
360 if (vchan_issue_pending(&chan
->vchan
))
361 axi_dmac_start_transfer(chan
);
362 spin_unlock_irqrestore(&chan
->vchan
.lock
, flags
);
365 static struct axi_dmac_desc
*axi_dmac_alloc_desc(unsigned int num_sgs
)
367 struct axi_dmac_desc
*desc
;
370 desc
= kzalloc(sizeof(struct axi_dmac_desc
) +
371 sizeof(struct axi_dmac_sg
) * num_sgs
, GFP_NOWAIT
);
375 for (i
= 0; i
< num_sgs
; i
++)
376 desc
->sg
[i
].id
= AXI_DMAC_SG_UNUSED
;
378 desc
->num_sgs
= num_sgs
;
383 static struct dma_async_tx_descriptor
*axi_dmac_prep_slave_sg(
384 struct dma_chan
*c
, struct scatterlist
*sgl
,
385 unsigned int sg_len
, enum dma_transfer_direction direction
,
386 unsigned long flags
, void *context
)
388 struct axi_dmac_chan
*chan
= to_axi_dmac_chan(c
);
389 struct axi_dmac_desc
*desc
;
390 struct scatterlist
*sg
;
393 if (direction
!= chan
->direction
)
396 desc
= axi_dmac_alloc_desc(sg_len
);
400 for_each_sg(sgl
, sg
, sg_len
, i
) {
401 if (!axi_dmac_check_addr(chan
, sg_dma_address(sg
)) ||
402 !axi_dmac_check_len(chan
, sg_dma_len(sg
))) {
407 if (direction
== DMA_DEV_TO_MEM
)
408 desc
->sg
[i
].dest_addr
= sg_dma_address(sg
);
410 desc
->sg
[i
].src_addr
= sg_dma_address(sg
);
411 desc
->sg
[i
].x_len
= sg_dma_len(sg
);
412 desc
->sg
[i
].y_len
= 1;
415 desc
->cyclic
= false;
417 return vchan_tx_prep(&chan
->vchan
, &desc
->vdesc
, flags
);
420 static struct dma_async_tx_descriptor
*axi_dmac_prep_dma_cyclic(
421 struct dma_chan
*c
, dma_addr_t buf_addr
, size_t buf_len
,
422 size_t period_len
, enum dma_transfer_direction direction
,
425 struct axi_dmac_chan
*chan
= to_axi_dmac_chan(c
);
426 struct axi_dmac_desc
*desc
;
427 unsigned int num_periods
, i
;
429 if (direction
!= chan
->direction
)
432 if (!axi_dmac_check_len(chan
, buf_len
) ||
433 !axi_dmac_check_addr(chan
, buf_addr
))
436 if (period_len
== 0 || buf_len
% period_len
)
439 num_periods
= buf_len
/ period_len
;
441 desc
= axi_dmac_alloc_desc(num_periods
);
445 for (i
= 0; i
< num_periods
; i
++) {
446 if (direction
== DMA_DEV_TO_MEM
)
447 desc
->sg
[i
].dest_addr
= buf_addr
;
449 desc
->sg
[i
].src_addr
= buf_addr
;
450 desc
->sg
[i
].x_len
= period_len
;
451 desc
->sg
[i
].y_len
= 1;
452 buf_addr
+= period_len
;
457 return vchan_tx_prep(&chan
->vchan
, &desc
->vdesc
, flags
);
460 static struct dma_async_tx_descriptor
*axi_dmac_prep_interleaved(
461 struct dma_chan
*c
, struct dma_interleaved_template
*xt
,
464 struct axi_dmac_chan
*chan
= to_axi_dmac_chan(c
);
465 struct axi_dmac_desc
*desc
;
466 size_t dst_icg
, src_icg
;
468 if (xt
->frame_size
!= 1)
471 if (xt
->dir
!= chan
->direction
)
474 if (axi_dmac_src_is_mem(chan
)) {
475 if (!xt
->src_inc
|| !axi_dmac_check_addr(chan
, xt
->src_start
))
479 if (axi_dmac_dest_is_mem(chan
)) {
480 if (!xt
->dst_inc
|| !axi_dmac_check_addr(chan
, xt
->dst_start
))
484 dst_icg
= dmaengine_get_dst_icg(xt
, &xt
->sgl
[0]);
485 src_icg
= dmaengine_get_src_icg(xt
, &xt
->sgl
[0]);
488 if (!axi_dmac_check_len(chan
, xt
->sgl
[0].size
) ||
489 !axi_dmac_check_len(chan
, xt
->numf
))
491 if (xt
->sgl
[0].size
+ dst_icg
> chan
->max_length
||
492 xt
->sgl
[0].size
+ src_icg
> chan
->max_length
)
495 if (dst_icg
!= 0 || src_icg
!= 0)
497 if (chan
->max_length
/ xt
->sgl
[0].size
< xt
->numf
)
499 if (!axi_dmac_check_len(chan
, xt
->sgl
[0].size
* xt
->numf
))
503 desc
= axi_dmac_alloc_desc(1);
507 if (axi_dmac_src_is_mem(chan
)) {
508 desc
->sg
[0].src_addr
= xt
->src_start
;
509 desc
->sg
[0].src_stride
= xt
->sgl
[0].size
+ src_icg
;
512 if (axi_dmac_dest_is_mem(chan
)) {
513 desc
->sg
[0].dest_addr
= xt
->dst_start
;
514 desc
->sg
[0].dest_stride
= xt
->sgl
[0].size
+ dst_icg
;
518 desc
->sg
[0].x_len
= xt
->sgl
[0].size
;
519 desc
->sg
[0].y_len
= xt
->numf
;
521 desc
->sg
[0].x_len
= xt
->sgl
[0].size
* xt
->numf
;
522 desc
->sg
[0].y_len
= 1;
525 return vchan_tx_prep(&chan
->vchan
, &desc
->vdesc
, flags
);
528 static void axi_dmac_free_chan_resources(struct dma_chan
*c
)
530 vchan_free_chan_resources(to_virt_chan(c
));
533 static void axi_dmac_desc_free(struct virt_dma_desc
*vdesc
)
535 kfree(container_of(vdesc
, struct axi_dmac_desc
, vdesc
));
539 * The configuration stored in the devicetree matches the configuration
540 * parameters of the peripheral instance and allows the driver to know which
541 * features are implemented and how it should behave.
543 static int axi_dmac_parse_chan_dt(struct device_node
*of_chan
,
544 struct axi_dmac_chan
*chan
)
549 ret
= of_property_read_u32(of_chan
, "reg", &val
);
553 /* We only support 1 channel for now */
557 ret
= of_property_read_u32(of_chan
, "adi,source-bus-type", &val
);
560 if (val
> AXI_DMAC_BUS_TYPE_FIFO
)
562 chan
->src_type
= val
;
564 ret
= of_property_read_u32(of_chan
, "adi,destination-bus-type", &val
);
567 if (val
> AXI_DMAC_BUS_TYPE_FIFO
)
569 chan
->dest_type
= val
;
571 ret
= of_property_read_u32(of_chan
, "adi,source-bus-width", &val
);
574 chan
->src_width
= val
/ 8;
576 ret
= of_property_read_u32(of_chan
, "adi,destination-bus-width", &val
);
579 chan
->dest_width
= val
/ 8;
581 ret
= of_property_read_u32(of_chan
, "adi,length-width", &val
);
586 chan
->max_length
= UINT_MAX
;
588 chan
->max_length
= (1ULL << val
) - 1;
590 chan
->align_mask
= max(chan
->dest_width
, chan
->src_width
) - 1;
592 if (axi_dmac_dest_is_mem(chan
) && axi_dmac_src_is_mem(chan
))
593 chan
->direction
= DMA_MEM_TO_MEM
;
594 else if (!axi_dmac_dest_is_mem(chan
) && axi_dmac_src_is_mem(chan
))
595 chan
->direction
= DMA_MEM_TO_DEV
;
596 else if (axi_dmac_dest_is_mem(chan
) && !axi_dmac_src_is_mem(chan
))
597 chan
->direction
= DMA_DEV_TO_MEM
;
599 chan
->direction
= DMA_DEV_TO_DEV
;
601 chan
->hw_cyclic
= of_property_read_bool(of_chan
, "adi,cyclic");
602 chan
->hw_2d
= of_property_read_bool(of_chan
, "adi,2d");
607 static int axi_dmac_probe(struct platform_device
*pdev
)
609 struct device_node
*of_channels
, *of_chan
;
610 struct dma_device
*dma_dev
;
611 struct axi_dmac
*dmac
;
612 struct resource
*res
;
615 dmac
= devm_kzalloc(&pdev
->dev
, sizeof(*dmac
), GFP_KERNEL
);
619 dmac
->irq
= platform_get_irq(pdev
, 0);
625 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
626 dmac
->base
= devm_ioremap_resource(&pdev
->dev
, res
);
627 if (IS_ERR(dmac
->base
))
628 return PTR_ERR(dmac
->base
);
630 dmac
->clk
= devm_clk_get(&pdev
->dev
, NULL
);
631 if (IS_ERR(dmac
->clk
))
632 return PTR_ERR(dmac
->clk
);
634 INIT_LIST_HEAD(&dmac
->chan
.active_descs
);
636 of_channels
= of_get_child_by_name(pdev
->dev
.of_node
, "adi,channels");
637 if (of_channels
== NULL
)
640 for_each_child_of_node(of_channels
, of_chan
) {
641 ret
= axi_dmac_parse_chan_dt(of_chan
, &dmac
->chan
);
643 of_node_put(of_chan
);
644 of_node_put(of_channels
);
648 of_node_put(of_channels
);
650 pdev
->dev
.dma_parms
= &dmac
->dma_parms
;
651 dma_set_max_seg_size(&pdev
->dev
, dmac
->chan
.max_length
);
653 dma_dev
= &dmac
->dma_dev
;
654 dma_cap_set(DMA_SLAVE
, dma_dev
->cap_mask
);
655 dma_cap_set(DMA_CYCLIC
, dma_dev
->cap_mask
);
656 dma_dev
->device_free_chan_resources
= axi_dmac_free_chan_resources
;
657 dma_dev
->device_tx_status
= dma_cookie_status
;
658 dma_dev
->device_issue_pending
= axi_dmac_issue_pending
;
659 dma_dev
->device_prep_slave_sg
= axi_dmac_prep_slave_sg
;
660 dma_dev
->device_prep_dma_cyclic
= axi_dmac_prep_dma_cyclic
;
661 dma_dev
->device_prep_interleaved_dma
= axi_dmac_prep_interleaved
;
662 dma_dev
->device_terminate_all
= axi_dmac_terminate_all
;
663 dma_dev
->device_synchronize
= axi_dmac_synchronize
;
664 dma_dev
->dev
= &pdev
->dev
;
665 dma_dev
->chancnt
= 1;
666 dma_dev
->src_addr_widths
= BIT(dmac
->chan
.src_width
);
667 dma_dev
->dst_addr_widths
= BIT(dmac
->chan
.dest_width
);
668 dma_dev
->directions
= BIT(dmac
->chan
.direction
);
669 dma_dev
->residue_granularity
= DMA_RESIDUE_GRANULARITY_DESCRIPTOR
;
670 INIT_LIST_HEAD(&dma_dev
->channels
);
672 dmac
->chan
.vchan
.desc_free
= axi_dmac_desc_free
;
673 vchan_init(&dmac
->chan
.vchan
, dma_dev
);
675 ret
= clk_prepare_enable(dmac
->clk
);
679 axi_dmac_write(dmac
, AXI_DMAC_REG_IRQ_MASK
, 0x00);
681 ret
= dma_async_device_register(dma_dev
);
683 goto err_clk_disable
;
685 ret
= of_dma_controller_register(pdev
->dev
.of_node
,
686 of_dma_xlate_by_chan_id
, dma_dev
);
688 goto err_unregister_device
;
690 ret
= request_irq(dmac
->irq
, axi_dmac_interrupt_handler
, IRQF_SHARED
,
691 dev_name(&pdev
->dev
), dmac
);
693 goto err_unregister_of
;
695 platform_set_drvdata(pdev
, dmac
);
700 of_dma_controller_free(pdev
->dev
.of_node
);
701 err_unregister_device
:
702 dma_async_device_unregister(&dmac
->dma_dev
);
704 clk_disable_unprepare(dmac
->clk
);
709 static int axi_dmac_remove(struct platform_device
*pdev
)
711 struct axi_dmac
*dmac
= platform_get_drvdata(pdev
);
713 of_dma_controller_free(pdev
->dev
.of_node
);
714 free_irq(dmac
->irq
, dmac
);
715 tasklet_kill(&dmac
->chan
.vchan
.task
);
716 dma_async_device_unregister(&dmac
->dma_dev
);
717 clk_disable_unprepare(dmac
->clk
);
722 static const struct of_device_id axi_dmac_of_match_table
[] = {
723 { .compatible
= "adi,axi-dmac-1.00.a" },
726 MODULE_DEVICE_TABLE(of
, axi_dmac_of_match_table
);
728 static struct platform_driver axi_dmac_driver
= {
730 .name
= "dma-axi-dmac",
731 .of_match_table
= axi_dmac_of_match_table
,
733 .probe
= axi_dmac_probe
,
734 .remove
= axi_dmac_remove
,
736 module_platform_driver(axi_dmac_driver
);
738 MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
739 MODULE_DESCRIPTION("DMA controller driver for the AXI-DMAC controller");
740 MODULE_LICENSE("GPL v2");