treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / dma / dma-axi-dmac.c
blobf1d149e328395635fc2a427a7921f22c04672f87
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Driver for the Analog Devices AXI-DMAC core
5 * Copyright 2013-2019 Analog Devices Inc.
6 * Author: Lars-Peter Clausen <lars@metafoo.de>
7 */
9 #include <linux/clk.h>
10 #include <linux/device.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/dmaengine.h>
13 #include <linux/err.h>
14 #include <linux/interrupt.h>
15 #include <linux/io.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/of.h>
19 #include <linux/of_dma.h>
20 #include <linux/platform_device.h>
21 #include <linux/regmap.h>
22 #include <linux/slab.h>
23 #include <linux/fpga/adi-axi-common.h>
25 #include <dt-bindings/dma/axi-dmac.h>
27 #include "dmaengine.h"
28 #include "virt-dma.h"
31 * The AXI-DMAC is a soft IP core that is used in FPGA designs. The core has
32 * various instantiation parameters which decided the exact feature set support
33 * by the core.
35 * Each channel of the core has a source interface and a destination interface.
36 * The number of channels and the type of the channel interfaces is selected at
37 * configuration time. A interface can either be a connected to a central memory
38 * interconnect, which allows access to system memory, or it can be connected to
39 * a dedicated bus which is directly connected to a data port on a peripheral.
40 * Given that those are configuration options of the core that are selected when
41 * it is instantiated this means that they can not be changed by software at
42 * runtime. By extension this means that each channel is uni-directional. It can
43 * either be device to memory or memory to device, but not both. Also since the
44 * device side is a dedicated data bus only connected to a single peripheral
45 * there is no address than can or needs to be configured for the device side.
48 #define AXI_DMAC_REG_IRQ_MASK 0x80
49 #define AXI_DMAC_REG_IRQ_PENDING 0x84
50 #define AXI_DMAC_REG_IRQ_SOURCE 0x88
52 #define AXI_DMAC_REG_CTRL 0x400
53 #define AXI_DMAC_REG_TRANSFER_ID 0x404
54 #define AXI_DMAC_REG_START_TRANSFER 0x408
55 #define AXI_DMAC_REG_FLAGS 0x40c
56 #define AXI_DMAC_REG_DEST_ADDRESS 0x410
57 #define AXI_DMAC_REG_SRC_ADDRESS 0x414
58 #define AXI_DMAC_REG_X_LENGTH 0x418
59 #define AXI_DMAC_REG_Y_LENGTH 0x41c
60 #define AXI_DMAC_REG_DEST_STRIDE 0x420
61 #define AXI_DMAC_REG_SRC_STRIDE 0x424
62 #define AXI_DMAC_REG_TRANSFER_DONE 0x428
63 #define AXI_DMAC_REG_ACTIVE_TRANSFER_ID 0x42c
64 #define AXI_DMAC_REG_STATUS 0x430
65 #define AXI_DMAC_REG_CURRENT_SRC_ADDR 0x434
66 #define AXI_DMAC_REG_CURRENT_DEST_ADDR 0x438
67 #define AXI_DMAC_REG_PARTIAL_XFER_LEN 0x44c
68 #define AXI_DMAC_REG_PARTIAL_XFER_ID 0x450
70 #define AXI_DMAC_CTRL_ENABLE BIT(0)
71 #define AXI_DMAC_CTRL_PAUSE BIT(1)
73 #define AXI_DMAC_IRQ_SOT BIT(0)
74 #define AXI_DMAC_IRQ_EOT BIT(1)
76 #define AXI_DMAC_FLAG_CYCLIC BIT(0)
77 #define AXI_DMAC_FLAG_LAST BIT(1)
78 #define AXI_DMAC_FLAG_PARTIAL_REPORT BIT(2)
80 #define AXI_DMAC_FLAG_PARTIAL_XFER_DONE BIT(31)
82 /* The maximum ID allocated by the hardware is 31 */
83 #define AXI_DMAC_SG_UNUSED 32U
85 struct axi_dmac_sg {
86 dma_addr_t src_addr;
87 dma_addr_t dest_addr;
88 unsigned int x_len;
89 unsigned int y_len;
90 unsigned int dest_stride;
91 unsigned int src_stride;
92 unsigned int id;
93 unsigned int partial_len;
94 bool schedule_when_free;
97 struct axi_dmac_desc {
98 struct virt_dma_desc vdesc;
99 bool cyclic;
100 bool have_partial_xfer;
102 unsigned int num_submitted;
103 unsigned int num_completed;
104 unsigned int num_sgs;
105 struct axi_dmac_sg sg[];
108 struct axi_dmac_chan {
109 struct virt_dma_chan vchan;
111 struct axi_dmac_desc *next_desc;
112 struct list_head active_descs;
113 enum dma_transfer_direction direction;
115 unsigned int src_width;
116 unsigned int dest_width;
117 unsigned int src_type;
118 unsigned int dest_type;
120 unsigned int max_length;
121 unsigned int address_align_mask;
122 unsigned int length_align_mask;
124 bool hw_partial_xfer;
125 bool hw_cyclic;
126 bool hw_2d;
129 struct axi_dmac {
130 void __iomem *base;
131 int irq;
133 struct clk *clk;
135 struct dma_device dma_dev;
136 struct axi_dmac_chan chan;
138 struct device_dma_parameters dma_parms;
141 static struct axi_dmac *chan_to_axi_dmac(struct axi_dmac_chan *chan)
143 return container_of(chan->vchan.chan.device, struct axi_dmac,
144 dma_dev);
147 static struct axi_dmac_chan *to_axi_dmac_chan(struct dma_chan *c)
149 return container_of(c, struct axi_dmac_chan, vchan.chan);
152 static struct axi_dmac_desc *to_axi_dmac_desc(struct virt_dma_desc *vdesc)
154 return container_of(vdesc, struct axi_dmac_desc, vdesc);
157 static void axi_dmac_write(struct axi_dmac *axi_dmac, unsigned int reg,
158 unsigned int val)
160 writel(val, axi_dmac->base + reg);
163 static int axi_dmac_read(struct axi_dmac *axi_dmac, unsigned int reg)
165 return readl(axi_dmac->base + reg);
168 static int axi_dmac_src_is_mem(struct axi_dmac_chan *chan)
170 return chan->src_type == AXI_DMAC_BUS_TYPE_AXI_MM;
173 static int axi_dmac_dest_is_mem(struct axi_dmac_chan *chan)
175 return chan->dest_type == AXI_DMAC_BUS_TYPE_AXI_MM;
178 static bool axi_dmac_check_len(struct axi_dmac_chan *chan, unsigned int len)
180 if (len == 0)
181 return false;
182 if ((len & chan->length_align_mask) != 0) /* Not aligned */
183 return false;
184 return true;
187 static bool axi_dmac_check_addr(struct axi_dmac_chan *chan, dma_addr_t addr)
189 if ((addr & chan->address_align_mask) != 0) /* Not aligned */
190 return false;
191 return true;
194 static void axi_dmac_start_transfer(struct axi_dmac_chan *chan)
196 struct axi_dmac *dmac = chan_to_axi_dmac(chan);
197 struct virt_dma_desc *vdesc;
198 struct axi_dmac_desc *desc;
199 struct axi_dmac_sg *sg;
200 unsigned int flags = 0;
201 unsigned int val;
203 val = axi_dmac_read(dmac, AXI_DMAC_REG_START_TRANSFER);
204 if (val) /* Queue is full, wait for the next SOT IRQ */
205 return;
207 desc = chan->next_desc;
209 if (!desc) {
210 vdesc = vchan_next_desc(&chan->vchan);
211 if (!vdesc)
212 return;
213 list_move_tail(&vdesc->node, &chan->active_descs);
214 desc = to_axi_dmac_desc(vdesc);
216 sg = &desc->sg[desc->num_submitted];
218 /* Already queued in cyclic mode. Wait for it to finish */
219 if (sg->id != AXI_DMAC_SG_UNUSED) {
220 sg->schedule_when_free = true;
221 return;
224 desc->num_submitted++;
225 if (desc->num_submitted == desc->num_sgs ||
226 desc->have_partial_xfer) {
227 if (desc->cyclic)
228 desc->num_submitted = 0; /* Start again */
229 else
230 chan->next_desc = NULL;
231 flags |= AXI_DMAC_FLAG_LAST;
232 } else {
233 chan->next_desc = desc;
236 sg->id = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_ID);
238 if (axi_dmac_dest_is_mem(chan)) {
239 axi_dmac_write(dmac, AXI_DMAC_REG_DEST_ADDRESS, sg->dest_addr);
240 axi_dmac_write(dmac, AXI_DMAC_REG_DEST_STRIDE, sg->dest_stride);
243 if (axi_dmac_src_is_mem(chan)) {
244 axi_dmac_write(dmac, AXI_DMAC_REG_SRC_ADDRESS, sg->src_addr);
245 axi_dmac_write(dmac, AXI_DMAC_REG_SRC_STRIDE, sg->src_stride);
249 * If the hardware supports cyclic transfers and there is no callback to
250 * call and only a single segment, enable hw cyclic mode to avoid
251 * unnecessary interrupts.
253 if (chan->hw_cyclic && desc->cyclic && !desc->vdesc.tx.callback &&
254 desc->num_sgs == 1)
255 flags |= AXI_DMAC_FLAG_CYCLIC;
257 if (chan->hw_partial_xfer)
258 flags |= AXI_DMAC_FLAG_PARTIAL_REPORT;
260 axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, sg->x_len - 1);
261 axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, sg->y_len - 1);
262 axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, flags);
263 axi_dmac_write(dmac, AXI_DMAC_REG_START_TRANSFER, 1);
266 static struct axi_dmac_desc *axi_dmac_active_desc(struct axi_dmac_chan *chan)
268 return list_first_entry_or_null(&chan->active_descs,
269 struct axi_dmac_desc, vdesc.node);
272 static inline unsigned int axi_dmac_total_sg_bytes(struct axi_dmac_chan *chan,
273 struct axi_dmac_sg *sg)
275 if (chan->hw_2d)
276 return sg->x_len * sg->y_len;
277 else
278 return sg->x_len;
281 static void axi_dmac_dequeue_partial_xfers(struct axi_dmac_chan *chan)
283 struct axi_dmac *dmac = chan_to_axi_dmac(chan);
284 struct axi_dmac_desc *desc;
285 struct axi_dmac_sg *sg;
286 u32 xfer_done, len, id, i;
287 bool found_sg;
289 do {
290 len = axi_dmac_read(dmac, AXI_DMAC_REG_PARTIAL_XFER_LEN);
291 id = axi_dmac_read(dmac, AXI_DMAC_REG_PARTIAL_XFER_ID);
293 found_sg = false;
294 list_for_each_entry(desc, &chan->active_descs, vdesc.node) {
295 for (i = 0; i < desc->num_sgs; i++) {
296 sg = &desc->sg[i];
297 if (sg->id == AXI_DMAC_SG_UNUSED)
298 continue;
299 if (sg->id == id) {
300 desc->have_partial_xfer = true;
301 sg->partial_len = len;
302 found_sg = true;
303 break;
306 if (found_sg)
307 break;
310 if (found_sg) {
311 dev_dbg(dmac->dma_dev.dev,
312 "Found partial segment id=%u, len=%u\n",
313 id, len);
314 } else {
315 dev_warn(dmac->dma_dev.dev,
316 "Not found partial segment id=%u, len=%u\n",
317 id, len);
320 /* Check if we have any more partial transfers */
321 xfer_done = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_DONE);
322 xfer_done = !(xfer_done & AXI_DMAC_FLAG_PARTIAL_XFER_DONE);
324 } while (!xfer_done);
327 static void axi_dmac_compute_residue(struct axi_dmac_chan *chan,
328 struct axi_dmac_desc *active)
330 struct dmaengine_result *rslt = &active->vdesc.tx_result;
331 unsigned int start = active->num_completed - 1;
332 struct axi_dmac_sg *sg;
333 unsigned int i, total;
335 rslt->result = DMA_TRANS_NOERROR;
336 rslt->residue = 0;
339 * We get here if the last completed segment is partial, which
340 * means we can compute the residue from that segment onwards
342 for (i = start; i < active->num_sgs; i++) {
343 sg = &active->sg[i];
344 total = axi_dmac_total_sg_bytes(chan, sg);
345 rslt->residue += (total - sg->partial_len);
349 static bool axi_dmac_transfer_done(struct axi_dmac_chan *chan,
350 unsigned int completed_transfers)
352 struct axi_dmac_desc *active;
353 struct axi_dmac_sg *sg;
354 bool start_next = false;
356 active = axi_dmac_active_desc(chan);
357 if (!active)
358 return false;
360 if (chan->hw_partial_xfer &&
361 (completed_transfers & AXI_DMAC_FLAG_PARTIAL_XFER_DONE))
362 axi_dmac_dequeue_partial_xfers(chan);
364 do {
365 sg = &active->sg[active->num_completed];
366 if (sg->id == AXI_DMAC_SG_UNUSED) /* Not yet submitted */
367 break;
368 if (!(BIT(sg->id) & completed_transfers))
369 break;
370 active->num_completed++;
371 sg->id = AXI_DMAC_SG_UNUSED;
372 if (sg->schedule_when_free) {
373 sg->schedule_when_free = false;
374 start_next = true;
377 if (sg->partial_len)
378 axi_dmac_compute_residue(chan, active);
380 if (active->cyclic)
381 vchan_cyclic_callback(&active->vdesc);
383 if (active->num_completed == active->num_sgs ||
384 sg->partial_len) {
385 if (active->cyclic) {
386 active->num_completed = 0; /* wrap around */
387 } else {
388 list_del(&active->vdesc.node);
389 vchan_cookie_complete(&active->vdesc);
390 active = axi_dmac_active_desc(chan);
393 } while (active);
395 return start_next;
398 static irqreturn_t axi_dmac_interrupt_handler(int irq, void *devid)
400 struct axi_dmac *dmac = devid;
401 unsigned int pending;
402 bool start_next = false;
404 pending = axi_dmac_read(dmac, AXI_DMAC_REG_IRQ_PENDING);
405 if (!pending)
406 return IRQ_NONE;
408 axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_PENDING, pending);
410 spin_lock(&dmac->chan.vchan.lock);
411 /* One or more transfers have finished */
412 if (pending & AXI_DMAC_IRQ_EOT) {
413 unsigned int completed;
415 completed = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_DONE);
416 start_next = axi_dmac_transfer_done(&dmac->chan, completed);
418 /* Space has become available in the descriptor queue */
419 if ((pending & AXI_DMAC_IRQ_SOT) || start_next)
420 axi_dmac_start_transfer(&dmac->chan);
421 spin_unlock(&dmac->chan.vchan.lock);
423 return IRQ_HANDLED;
426 static int axi_dmac_terminate_all(struct dma_chan *c)
428 struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
429 struct axi_dmac *dmac = chan_to_axi_dmac(chan);
430 unsigned long flags;
431 LIST_HEAD(head);
433 spin_lock_irqsave(&chan->vchan.lock, flags);
434 axi_dmac_write(dmac, AXI_DMAC_REG_CTRL, 0);
435 chan->next_desc = NULL;
436 vchan_get_all_descriptors(&chan->vchan, &head);
437 list_splice_tail_init(&chan->active_descs, &head);
438 spin_unlock_irqrestore(&chan->vchan.lock, flags);
440 vchan_dma_desc_free_list(&chan->vchan, &head);
442 return 0;
445 static void axi_dmac_synchronize(struct dma_chan *c)
447 struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
449 vchan_synchronize(&chan->vchan);
452 static void axi_dmac_issue_pending(struct dma_chan *c)
454 struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
455 struct axi_dmac *dmac = chan_to_axi_dmac(chan);
456 unsigned long flags;
458 axi_dmac_write(dmac, AXI_DMAC_REG_CTRL, AXI_DMAC_CTRL_ENABLE);
460 spin_lock_irqsave(&chan->vchan.lock, flags);
461 if (vchan_issue_pending(&chan->vchan))
462 axi_dmac_start_transfer(chan);
463 spin_unlock_irqrestore(&chan->vchan.lock, flags);
466 static struct axi_dmac_desc *axi_dmac_alloc_desc(unsigned int num_sgs)
468 struct axi_dmac_desc *desc;
469 unsigned int i;
471 desc = kzalloc(struct_size(desc, sg, num_sgs), GFP_NOWAIT);
472 if (!desc)
473 return NULL;
475 for (i = 0; i < num_sgs; i++)
476 desc->sg[i].id = AXI_DMAC_SG_UNUSED;
478 desc->num_sgs = num_sgs;
480 return desc;
483 static struct axi_dmac_sg *axi_dmac_fill_linear_sg(struct axi_dmac_chan *chan,
484 enum dma_transfer_direction direction, dma_addr_t addr,
485 unsigned int num_periods, unsigned int period_len,
486 struct axi_dmac_sg *sg)
488 unsigned int num_segments, i;
489 unsigned int segment_size;
490 unsigned int len;
492 /* Split into multiple equally sized segments if necessary */
493 num_segments = DIV_ROUND_UP(period_len, chan->max_length);
494 segment_size = DIV_ROUND_UP(period_len, num_segments);
495 /* Take care of alignment */
496 segment_size = ((segment_size - 1) | chan->length_align_mask) + 1;
498 for (i = 0; i < num_periods; i++) {
499 len = period_len;
501 while (len > segment_size) {
502 if (direction == DMA_DEV_TO_MEM)
503 sg->dest_addr = addr;
504 else
505 sg->src_addr = addr;
506 sg->x_len = segment_size;
507 sg->y_len = 1;
508 sg++;
509 addr += segment_size;
510 len -= segment_size;
513 if (direction == DMA_DEV_TO_MEM)
514 sg->dest_addr = addr;
515 else
516 sg->src_addr = addr;
517 sg->x_len = len;
518 sg->y_len = 1;
519 sg++;
520 addr += len;
523 return sg;
526 static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg(
527 struct dma_chan *c, struct scatterlist *sgl,
528 unsigned int sg_len, enum dma_transfer_direction direction,
529 unsigned long flags, void *context)
531 struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
532 struct axi_dmac_desc *desc;
533 struct axi_dmac_sg *dsg;
534 struct scatterlist *sg;
535 unsigned int num_sgs;
536 unsigned int i;
538 if (direction != chan->direction)
539 return NULL;
541 num_sgs = 0;
542 for_each_sg(sgl, sg, sg_len, i)
543 num_sgs += DIV_ROUND_UP(sg_dma_len(sg), chan->max_length);
545 desc = axi_dmac_alloc_desc(num_sgs);
546 if (!desc)
547 return NULL;
549 dsg = desc->sg;
551 for_each_sg(sgl, sg, sg_len, i) {
552 if (!axi_dmac_check_addr(chan, sg_dma_address(sg)) ||
553 !axi_dmac_check_len(chan, sg_dma_len(sg))) {
554 kfree(desc);
555 return NULL;
558 dsg = axi_dmac_fill_linear_sg(chan, direction, sg_dma_address(sg), 1,
559 sg_dma_len(sg), dsg);
562 desc->cyclic = false;
564 return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
567 static struct dma_async_tx_descriptor *axi_dmac_prep_dma_cyclic(
568 struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len,
569 size_t period_len, enum dma_transfer_direction direction,
570 unsigned long flags)
572 struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
573 struct axi_dmac_desc *desc;
574 unsigned int num_periods, num_segments;
576 if (direction != chan->direction)
577 return NULL;
579 if (!axi_dmac_check_len(chan, buf_len) ||
580 !axi_dmac_check_addr(chan, buf_addr))
581 return NULL;
583 if (period_len == 0 || buf_len % period_len)
584 return NULL;
586 num_periods = buf_len / period_len;
587 num_segments = DIV_ROUND_UP(period_len, chan->max_length);
589 desc = axi_dmac_alloc_desc(num_periods * num_segments);
590 if (!desc)
591 return NULL;
593 axi_dmac_fill_linear_sg(chan, direction, buf_addr, num_periods,
594 period_len, desc->sg);
596 desc->cyclic = true;
598 return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
601 static struct dma_async_tx_descriptor *axi_dmac_prep_interleaved(
602 struct dma_chan *c, struct dma_interleaved_template *xt,
603 unsigned long flags)
605 struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
606 struct axi_dmac_desc *desc;
607 size_t dst_icg, src_icg;
609 if (xt->frame_size != 1)
610 return NULL;
612 if (xt->dir != chan->direction)
613 return NULL;
615 if (axi_dmac_src_is_mem(chan)) {
616 if (!xt->src_inc || !axi_dmac_check_addr(chan, xt->src_start))
617 return NULL;
620 if (axi_dmac_dest_is_mem(chan)) {
621 if (!xt->dst_inc || !axi_dmac_check_addr(chan, xt->dst_start))
622 return NULL;
625 dst_icg = dmaengine_get_dst_icg(xt, &xt->sgl[0]);
626 src_icg = dmaengine_get_src_icg(xt, &xt->sgl[0]);
628 if (chan->hw_2d) {
629 if (!axi_dmac_check_len(chan, xt->sgl[0].size) ||
630 xt->numf == 0)
631 return NULL;
632 if (xt->sgl[0].size + dst_icg > chan->max_length ||
633 xt->sgl[0].size + src_icg > chan->max_length)
634 return NULL;
635 } else {
636 if (dst_icg != 0 || src_icg != 0)
637 return NULL;
638 if (chan->max_length / xt->sgl[0].size < xt->numf)
639 return NULL;
640 if (!axi_dmac_check_len(chan, xt->sgl[0].size * xt->numf))
641 return NULL;
644 desc = axi_dmac_alloc_desc(1);
645 if (!desc)
646 return NULL;
648 if (axi_dmac_src_is_mem(chan)) {
649 desc->sg[0].src_addr = xt->src_start;
650 desc->sg[0].src_stride = xt->sgl[0].size + src_icg;
653 if (axi_dmac_dest_is_mem(chan)) {
654 desc->sg[0].dest_addr = xt->dst_start;
655 desc->sg[0].dest_stride = xt->sgl[0].size + dst_icg;
658 if (chan->hw_2d) {
659 desc->sg[0].x_len = xt->sgl[0].size;
660 desc->sg[0].y_len = xt->numf;
661 } else {
662 desc->sg[0].x_len = xt->sgl[0].size * xt->numf;
663 desc->sg[0].y_len = 1;
666 if (flags & DMA_CYCLIC)
667 desc->cyclic = true;
669 return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
672 static void axi_dmac_free_chan_resources(struct dma_chan *c)
674 vchan_free_chan_resources(to_virt_chan(c));
677 static void axi_dmac_desc_free(struct virt_dma_desc *vdesc)
679 kfree(container_of(vdesc, struct axi_dmac_desc, vdesc));
682 static bool axi_dmac_regmap_rdwr(struct device *dev, unsigned int reg)
684 switch (reg) {
685 case AXI_DMAC_REG_IRQ_MASK:
686 case AXI_DMAC_REG_IRQ_SOURCE:
687 case AXI_DMAC_REG_IRQ_PENDING:
688 case AXI_DMAC_REG_CTRL:
689 case AXI_DMAC_REG_TRANSFER_ID:
690 case AXI_DMAC_REG_START_TRANSFER:
691 case AXI_DMAC_REG_FLAGS:
692 case AXI_DMAC_REG_DEST_ADDRESS:
693 case AXI_DMAC_REG_SRC_ADDRESS:
694 case AXI_DMAC_REG_X_LENGTH:
695 case AXI_DMAC_REG_Y_LENGTH:
696 case AXI_DMAC_REG_DEST_STRIDE:
697 case AXI_DMAC_REG_SRC_STRIDE:
698 case AXI_DMAC_REG_TRANSFER_DONE:
699 case AXI_DMAC_REG_ACTIVE_TRANSFER_ID:
700 case AXI_DMAC_REG_STATUS:
701 case AXI_DMAC_REG_CURRENT_SRC_ADDR:
702 case AXI_DMAC_REG_CURRENT_DEST_ADDR:
703 case AXI_DMAC_REG_PARTIAL_XFER_LEN:
704 case AXI_DMAC_REG_PARTIAL_XFER_ID:
705 return true;
706 default:
707 return false;
711 static const struct regmap_config axi_dmac_regmap_config = {
712 .reg_bits = 32,
713 .val_bits = 32,
714 .reg_stride = 4,
715 .max_register = AXI_DMAC_REG_PARTIAL_XFER_ID,
716 .readable_reg = axi_dmac_regmap_rdwr,
717 .writeable_reg = axi_dmac_regmap_rdwr,
721 * The configuration stored in the devicetree matches the configuration
722 * parameters of the peripheral instance and allows the driver to know which
723 * features are implemented and how it should behave.
725 static int axi_dmac_parse_chan_dt(struct device_node *of_chan,
726 struct axi_dmac_chan *chan)
728 u32 val;
729 int ret;
731 ret = of_property_read_u32(of_chan, "reg", &val);
732 if (ret)
733 return ret;
735 /* We only support 1 channel for now */
736 if (val != 0)
737 return -EINVAL;
739 ret = of_property_read_u32(of_chan, "adi,source-bus-type", &val);
740 if (ret)
741 return ret;
742 if (val > AXI_DMAC_BUS_TYPE_FIFO)
743 return -EINVAL;
744 chan->src_type = val;
746 ret = of_property_read_u32(of_chan, "adi,destination-bus-type", &val);
747 if (ret)
748 return ret;
749 if (val > AXI_DMAC_BUS_TYPE_FIFO)
750 return -EINVAL;
751 chan->dest_type = val;
753 ret = of_property_read_u32(of_chan, "adi,source-bus-width", &val);
754 if (ret)
755 return ret;
756 chan->src_width = val / 8;
758 ret = of_property_read_u32(of_chan, "adi,destination-bus-width", &val);
759 if (ret)
760 return ret;
761 chan->dest_width = val / 8;
763 chan->address_align_mask = max(chan->dest_width, chan->src_width) - 1;
765 if (axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan))
766 chan->direction = DMA_MEM_TO_MEM;
767 else if (!axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan))
768 chan->direction = DMA_MEM_TO_DEV;
769 else if (axi_dmac_dest_is_mem(chan) && !axi_dmac_src_is_mem(chan))
770 chan->direction = DMA_DEV_TO_MEM;
771 else
772 chan->direction = DMA_DEV_TO_DEV;
774 return 0;
777 static int axi_dmac_detect_caps(struct axi_dmac *dmac)
779 struct axi_dmac_chan *chan = &dmac->chan;
780 unsigned int version;
782 version = axi_dmac_read(dmac, ADI_AXI_REG_VERSION);
784 axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, AXI_DMAC_FLAG_CYCLIC);
785 if (axi_dmac_read(dmac, AXI_DMAC_REG_FLAGS) == AXI_DMAC_FLAG_CYCLIC)
786 chan->hw_cyclic = true;
788 axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, 1);
789 if (axi_dmac_read(dmac, AXI_DMAC_REG_Y_LENGTH) == 1)
790 chan->hw_2d = true;
792 axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, 0xffffffff);
793 chan->max_length = axi_dmac_read(dmac, AXI_DMAC_REG_X_LENGTH);
794 if (chan->max_length != UINT_MAX)
795 chan->max_length++;
797 axi_dmac_write(dmac, AXI_DMAC_REG_DEST_ADDRESS, 0xffffffff);
798 if (axi_dmac_read(dmac, AXI_DMAC_REG_DEST_ADDRESS) == 0 &&
799 chan->dest_type == AXI_DMAC_BUS_TYPE_AXI_MM) {
800 dev_err(dmac->dma_dev.dev,
801 "Destination memory-mapped interface not supported.");
802 return -ENODEV;
805 axi_dmac_write(dmac, AXI_DMAC_REG_SRC_ADDRESS, 0xffffffff);
806 if (axi_dmac_read(dmac, AXI_DMAC_REG_SRC_ADDRESS) == 0 &&
807 chan->src_type == AXI_DMAC_BUS_TYPE_AXI_MM) {
808 dev_err(dmac->dma_dev.dev,
809 "Source memory-mapped interface not supported.");
810 return -ENODEV;
813 if (version >= ADI_AXI_PCORE_VER(4, 2, 'a'))
814 chan->hw_partial_xfer = true;
816 if (version >= ADI_AXI_PCORE_VER(4, 1, 'a')) {
817 axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, 0x00);
818 chan->length_align_mask =
819 axi_dmac_read(dmac, AXI_DMAC_REG_X_LENGTH);
820 } else {
821 chan->length_align_mask = chan->address_align_mask;
824 return 0;
827 static int axi_dmac_probe(struct platform_device *pdev)
829 struct device_node *of_channels, *of_chan;
830 struct dma_device *dma_dev;
831 struct axi_dmac *dmac;
832 struct resource *res;
833 struct regmap *regmap;
834 int ret;
836 dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
837 if (!dmac)
838 return -ENOMEM;
840 dmac->irq = platform_get_irq(pdev, 0);
841 if (dmac->irq < 0)
842 return dmac->irq;
843 if (dmac->irq == 0)
844 return -EINVAL;
846 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
847 dmac->base = devm_ioremap_resource(&pdev->dev, res);
848 if (IS_ERR(dmac->base))
849 return PTR_ERR(dmac->base);
851 dmac->clk = devm_clk_get(&pdev->dev, NULL);
852 if (IS_ERR(dmac->clk))
853 return PTR_ERR(dmac->clk);
855 INIT_LIST_HEAD(&dmac->chan.active_descs);
857 of_channels = of_get_child_by_name(pdev->dev.of_node, "adi,channels");
858 if (of_channels == NULL)
859 return -ENODEV;
861 for_each_child_of_node(of_channels, of_chan) {
862 ret = axi_dmac_parse_chan_dt(of_chan, &dmac->chan);
863 if (ret) {
864 of_node_put(of_chan);
865 of_node_put(of_channels);
866 return -EINVAL;
869 of_node_put(of_channels);
871 pdev->dev.dma_parms = &dmac->dma_parms;
872 dma_set_max_seg_size(&pdev->dev, UINT_MAX);
874 dma_dev = &dmac->dma_dev;
875 dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
876 dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask);
877 dma_cap_set(DMA_INTERLEAVE, dma_dev->cap_mask);
878 dma_dev->device_free_chan_resources = axi_dmac_free_chan_resources;
879 dma_dev->device_tx_status = dma_cookie_status;
880 dma_dev->device_issue_pending = axi_dmac_issue_pending;
881 dma_dev->device_prep_slave_sg = axi_dmac_prep_slave_sg;
882 dma_dev->device_prep_dma_cyclic = axi_dmac_prep_dma_cyclic;
883 dma_dev->device_prep_interleaved_dma = axi_dmac_prep_interleaved;
884 dma_dev->device_terminate_all = axi_dmac_terminate_all;
885 dma_dev->device_synchronize = axi_dmac_synchronize;
886 dma_dev->dev = &pdev->dev;
887 dma_dev->chancnt = 1;
888 dma_dev->src_addr_widths = BIT(dmac->chan.src_width);
889 dma_dev->dst_addr_widths = BIT(dmac->chan.dest_width);
890 dma_dev->directions = BIT(dmac->chan.direction);
891 dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
892 INIT_LIST_HEAD(&dma_dev->channels);
894 dmac->chan.vchan.desc_free = axi_dmac_desc_free;
895 vchan_init(&dmac->chan.vchan, dma_dev);
897 ret = clk_prepare_enable(dmac->clk);
898 if (ret < 0)
899 return ret;
901 ret = axi_dmac_detect_caps(dmac);
902 if (ret)
903 goto err_clk_disable;
905 dma_dev->copy_align = (dmac->chan.address_align_mask + 1);
907 axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_MASK, 0x00);
909 ret = dma_async_device_register(dma_dev);
910 if (ret)
911 goto err_clk_disable;
913 ret = of_dma_controller_register(pdev->dev.of_node,
914 of_dma_xlate_by_chan_id, dma_dev);
915 if (ret)
916 goto err_unregister_device;
918 ret = request_irq(dmac->irq, axi_dmac_interrupt_handler, IRQF_SHARED,
919 dev_name(&pdev->dev), dmac);
920 if (ret)
921 goto err_unregister_of;
923 platform_set_drvdata(pdev, dmac);
925 regmap = devm_regmap_init_mmio(&pdev->dev, dmac->base,
926 &axi_dmac_regmap_config);
927 if (IS_ERR(regmap)) {
928 ret = PTR_ERR(regmap);
929 goto err_free_irq;
932 return 0;
934 err_free_irq:
935 free_irq(dmac->irq, dmac);
936 err_unregister_of:
937 of_dma_controller_free(pdev->dev.of_node);
938 err_unregister_device:
939 dma_async_device_unregister(&dmac->dma_dev);
940 err_clk_disable:
941 clk_disable_unprepare(dmac->clk);
943 return ret;
946 static int axi_dmac_remove(struct platform_device *pdev)
948 struct axi_dmac *dmac = platform_get_drvdata(pdev);
950 of_dma_controller_free(pdev->dev.of_node);
951 free_irq(dmac->irq, dmac);
952 tasklet_kill(&dmac->chan.vchan.task);
953 dma_async_device_unregister(&dmac->dma_dev);
954 clk_disable_unprepare(dmac->clk);
956 return 0;
959 static const struct of_device_id axi_dmac_of_match_table[] = {
960 { .compatible = "adi,axi-dmac-1.00.a" },
961 { },
963 MODULE_DEVICE_TABLE(of, axi_dmac_of_match_table);
965 static struct platform_driver axi_dmac_driver = {
966 .driver = {
967 .name = "dma-axi-dmac",
968 .of_match_table = axi_dmac_of_match_table,
970 .probe = axi_dmac_probe,
971 .remove = axi_dmac_remove,
973 module_platform_driver(axi_dmac_driver);
975 MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
976 MODULE_DESCRIPTION("DMA controller driver for the AXI-DMAC controller");
977 MODULE_LICENSE("GPL v2");