2 * MOXA ART SoCs DMA Engine support.
4 * Copyright (C) 2013 Jonas Jensen
6 * Jonas Jensen <jonas.jensen@gmail.com>
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
13 #include <linux/dmaengine.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/err.h>
16 #include <linux/init.h>
17 #include <linux/interrupt.h>
18 #include <linux/list.h>
19 #include <linux/module.h>
20 #include <linux/platform_device.h>
21 #include <linux/slab.h>
22 #include <linux/spinlock.h>
23 #include <linux/of_address.h>
24 #include <linux/of_irq.h>
25 #include <linux/of_dma.h>
26 #include <linux/bitops.h>
28 #include <asm/cacheflush.h>
30 #include "dmaengine.h"
33 #define APB_DMA_MAX_CHANNEL 4
35 #define REG_OFF_ADDRESS_SOURCE 0
36 #define REG_OFF_ADDRESS_DEST 4
37 #define REG_OFF_CYCLES 8
38 #define REG_OFF_CTRL 12
39 #define REG_OFF_CHAN_SIZE 16
41 #define APB_DMA_ENABLE BIT(0)
42 #define APB_DMA_FIN_INT_STS BIT(1)
43 #define APB_DMA_FIN_INT_EN BIT(2)
44 #define APB_DMA_BURST_MODE BIT(3)
45 #define APB_DMA_ERR_INT_STS BIT(4)
46 #define APB_DMA_ERR_INT_EN BIT(5)
52 #define APB_DMA_SOURCE_SELECT 0x40
53 #define APB_DMA_DEST_SELECT 0x80
55 #define APB_DMA_SOURCE 0x100
56 #define APB_DMA_DEST 0x1000
58 #define APB_DMA_SOURCE_MASK 0x700
59 #define APB_DMA_DEST_MASK 0x7000
63 * 001: +1 (Burst=0), +4 (Burst=1)
64 * 010: +2 (Burst=0), +8 (Burst=1)
65 * 011: +4 (Burst=0), +16 (Burst=1)
66 * 101: -1 (Burst=0), -4 (Burst=1)
67 * 110: -2 (Burst=0), -8 (Burst=1)
68 * 111: -4 (Burst=0), -16 (Burst=1)
70 #define APB_DMA_SOURCE_INC_0 0
71 #define APB_DMA_SOURCE_INC_1_4 0x100
72 #define APB_DMA_SOURCE_INC_2_8 0x200
73 #define APB_DMA_SOURCE_INC_4_16 0x300
74 #define APB_DMA_SOURCE_DEC_1_4 0x500
75 #define APB_DMA_SOURCE_DEC_2_8 0x600
76 #define APB_DMA_SOURCE_DEC_4_16 0x700
77 #define APB_DMA_DEST_INC_0 0
78 #define APB_DMA_DEST_INC_1_4 0x1000
79 #define APB_DMA_DEST_INC_2_8 0x2000
80 #define APB_DMA_DEST_INC_4_16 0x3000
81 #define APB_DMA_DEST_DEC_1_4 0x5000
82 #define APB_DMA_DEST_DEC_2_8 0x6000
83 #define APB_DMA_DEST_DEC_4_16 0x7000
86 * Request signal select source/destination address for DMA hardware handshake.
88 * The request line number is a property of the DMA controller itself,
89 * e.g. MMC must always request channels where dma_slave_config->slave_id is 5.
91 * 0: No request / Grant signal
92 * 1-15: Request / Grant signal
94 #define APB_DMA_SOURCE_REQ_NO 0x1000000
95 #define APB_DMA_SOURCE_REQ_NO_MASK 0xf000000
96 #define APB_DMA_DEST_REQ_NO 0x10000
97 #define APB_DMA_DEST_REQ_NO_MASK 0xf0000
99 #define APB_DMA_DATA_WIDTH 0x100000
100 #define APB_DMA_DATA_WIDTH_MASK 0x300000
102 * Data width of transfer:
108 #define APB_DMA_DATA_WIDTH_4 0
109 #define APB_DMA_DATA_WIDTH_2 0x100000
110 #define APB_DMA_DATA_WIDTH_1 0x200000
112 #define APB_DMA_CYCLES_MASK 0x00ffffff
114 #define MOXART_DMA_DATA_TYPE_S8 0x00
115 #define MOXART_DMA_DATA_TYPE_S16 0x01
116 #define MOXART_DMA_DATA_TYPE_S32 0x02
124 enum dma_transfer_direction dma_dir
;
127 unsigned int dma_cycles
;
128 struct virt_dma_desc vd
;
130 struct moxart_sg sg
[];
134 struct virt_dma_chan vc
;
137 struct moxart_desc
*desc
;
139 struct dma_slave_config cfg
;
144 unsigned int line_reqno
;
148 struct moxart_dmadev
{
149 struct dma_device dma_slave
;
150 struct moxart_chan slave_chans
[APB_DMA_MAX_CHANNEL
];
154 struct moxart_filter_data
{
155 struct moxart_dmadev
*mdc
;
156 struct of_phandle_args
*dma_spec
;
159 static const unsigned int es_bytes
[] = {
160 [MOXART_DMA_DATA_TYPE_S8
] = 1,
161 [MOXART_DMA_DATA_TYPE_S16
] = 2,
162 [MOXART_DMA_DATA_TYPE_S32
] = 4,
165 static struct device
*chan2dev(struct dma_chan
*chan
)
167 return &chan
->dev
->device
;
170 static inline struct moxart_chan
*to_moxart_dma_chan(struct dma_chan
*c
)
172 return container_of(c
, struct moxart_chan
, vc
.chan
);
175 static inline struct moxart_desc
*to_moxart_dma_desc(
176 struct dma_async_tx_descriptor
*t
)
178 return container_of(t
, struct moxart_desc
, vd
.tx
);
181 static void moxart_dma_desc_free(struct virt_dma_desc
*vd
)
183 kfree(container_of(vd
, struct moxart_desc
, vd
));
186 static int moxart_terminate_all(struct dma_chan
*chan
)
188 struct moxart_chan
*ch
= to_moxart_dma_chan(chan
);
193 dev_dbg(chan2dev(chan
), "%s: ch=%p\n", __func__
, ch
);
195 spin_lock_irqsave(&ch
->vc
.lock
, flags
);
198 moxart_dma_desc_free(&ch
->desc
->vd
);
202 ctrl
= readl(ch
->base
+ REG_OFF_CTRL
);
203 ctrl
&= ~(APB_DMA_ENABLE
| APB_DMA_FIN_INT_EN
| APB_DMA_ERR_INT_EN
);
204 writel(ctrl
, ch
->base
+ REG_OFF_CTRL
);
206 vchan_get_all_descriptors(&ch
->vc
, &head
);
207 spin_unlock_irqrestore(&ch
->vc
.lock
, flags
);
208 vchan_dma_desc_free_list(&ch
->vc
, &head
);
213 static int moxart_slave_config(struct dma_chan
*chan
,
214 struct dma_slave_config
*cfg
)
216 struct moxart_chan
*ch
= to_moxart_dma_chan(chan
);
221 ctrl
= readl(ch
->base
+ REG_OFF_CTRL
);
222 ctrl
|= APB_DMA_BURST_MODE
;
223 ctrl
&= ~(APB_DMA_DEST_MASK
| APB_DMA_SOURCE_MASK
);
224 ctrl
&= ~(APB_DMA_DEST_REQ_NO_MASK
| APB_DMA_SOURCE_REQ_NO_MASK
);
226 switch (ch
->cfg
.src_addr_width
) {
227 case DMA_SLAVE_BUSWIDTH_1_BYTE
:
228 ctrl
|= APB_DMA_DATA_WIDTH_1
;
229 if (ch
->cfg
.direction
!= DMA_MEM_TO_DEV
)
230 ctrl
|= APB_DMA_DEST_INC_1_4
;
232 ctrl
|= APB_DMA_SOURCE_INC_1_4
;
234 case DMA_SLAVE_BUSWIDTH_2_BYTES
:
235 ctrl
|= APB_DMA_DATA_WIDTH_2
;
236 if (ch
->cfg
.direction
!= DMA_MEM_TO_DEV
)
237 ctrl
|= APB_DMA_DEST_INC_2_8
;
239 ctrl
|= APB_DMA_SOURCE_INC_2_8
;
241 case DMA_SLAVE_BUSWIDTH_4_BYTES
:
242 ctrl
&= ~APB_DMA_DATA_WIDTH
;
243 if (ch
->cfg
.direction
!= DMA_MEM_TO_DEV
)
244 ctrl
|= APB_DMA_DEST_INC_4_16
;
246 ctrl
|= APB_DMA_SOURCE_INC_4_16
;
252 if (ch
->cfg
.direction
== DMA_MEM_TO_DEV
) {
253 ctrl
&= ~APB_DMA_DEST_SELECT
;
254 ctrl
|= APB_DMA_SOURCE_SELECT
;
255 ctrl
|= (ch
->line_reqno
<< 16 &
256 APB_DMA_DEST_REQ_NO_MASK
);
258 ctrl
|= APB_DMA_DEST_SELECT
;
259 ctrl
&= ~APB_DMA_SOURCE_SELECT
;
260 ctrl
|= (ch
->line_reqno
<< 24 &
261 APB_DMA_SOURCE_REQ_NO_MASK
);
264 writel(ctrl
, ch
->base
+ REG_OFF_CTRL
);
269 static struct dma_async_tx_descriptor
*moxart_prep_slave_sg(
270 struct dma_chan
*chan
, struct scatterlist
*sgl
,
271 unsigned int sg_len
, enum dma_transfer_direction dir
,
272 unsigned long tx_flags
, void *context
)
274 struct moxart_chan
*ch
= to_moxart_dma_chan(chan
);
275 struct moxart_desc
*d
;
276 enum dma_slave_buswidth dev_width
;
278 struct scatterlist
*sgent
;
282 if (!is_slave_direction(dir
)) {
283 dev_err(chan2dev(chan
), "%s: invalid DMA direction\n",
288 if (dir
== DMA_DEV_TO_MEM
) {
289 dev_addr
= ch
->cfg
.src_addr
;
290 dev_width
= ch
->cfg
.src_addr_width
;
292 dev_addr
= ch
->cfg
.dst_addr
;
293 dev_width
= ch
->cfg
.dst_addr_width
;
297 case DMA_SLAVE_BUSWIDTH_1_BYTE
:
298 es
= MOXART_DMA_DATA_TYPE_S8
;
300 case DMA_SLAVE_BUSWIDTH_2_BYTES
:
301 es
= MOXART_DMA_DATA_TYPE_S16
;
303 case DMA_SLAVE_BUSWIDTH_4_BYTES
:
304 es
= MOXART_DMA_DATA_TYPE_S32
;
307 dev_err(chan2dev(chan
), "%s: unsupported data width (%u)\n",
308 __func__
, dev_width
);
312 d
= kzalloc(struct_size(d
, sg
, sg_len
), GFP_ATOMIC
);
317 d
->dev_addr
= dev_addr
;
320 for_each_sg(sgl
, sgent
, sg_len
, i
) {
321 d
->sg
[i
].addr
= sg_dma_address(sgent
);
322 d
->sg
[i
].len
= sg_dma_len(sgent
);
329 return vchan_tx_prep(&ch
->vc
, &d
->vd
, tx_flags
);
332 static struct dma_chan
*moxart_of_xlate(struct of_phandle_args
*dma_spec
,
333 struct of_dma
*ofdma
)
335 struct moxart_dmadev
*mdc
= ofdma
->of_dma_data
;
336 struct dma_chan
*chan
;
337 struct moxart_chan
*ch
;
339 chan
= dma_get_any_slave_channel(&mdc
->dma_slave
);
343 ch
= to_moxart_dma_chan(chan
);
344 ch
->line_reqno
= dma_spec
->args
[0];
349 static int moxart_alloc_chan_resources(struct dma_chan
*chan
)
351 struct moxart_chan
*ch
= to_moxart_dma_chan(chan
);
353 dev_dbg(chan2dev(chan
), "%s: allocating channel #%u\n",
354 __func__
, ch
->ch_num
);
360 static void moxart_free_chan_resources(struct dma_chan
*chan
)
362 struct moxart_chan
*ch
= to_moxart_dma_chan(chan
);
364 vchan_free_chan_resources(&ch
->vc
);
366 dev_dbg(chan2dev(chan
), "%s: freeing channel #%u\n",
367 __func__
, ch
->ch_num
);
371 static void moxart_dma_set_params(struct moxart_chan
*ch
, dma_addr_t src_addr
,
374 writel(src_addr
, ch
->base
+ REG_OFF_ADDRESS_SOURCE
);
375 writel(dst_addr
, ch
->base
+ REG_OFF_ADDRESS_DEST
);
378 static void moxart_set_transfer_params(struct moxart_chan
*ch
, unsigned int len
)
380 struct moxart_desc
*d
= ch
->desc
;
381 unsigned int sglen_div
= es_bytes
[d
->es
];
383 d
->dma_cycles
= len
>> sglen_div
;
386 * There are 4 cycles on 64 bytes copied, i.e. one cycle copies 16
387 * bytes ( when width is APB_DMAB_DATA_WIDTH_4 ).
389 writel(d
->dma_cycles
, ch
->base
+ REG_OFF_CYCLES
);
391 dev_dbg(chan2dev(&ch
->vc
.chan
), "%s: set %u DMA cycles (len=%u)\n",
392 __func__
, d
->dma_cycles
, len
);
395 static void moxart_start_dma(struct moxart_chan
*ch
)
399 ctrl
= readl(ch
->base
+ REG_OFF_CTRL
);
400 ctrl
|= (APB_DMA_ENABLE
| APB_DMA_FIN_INT_EN
| APB_DMA_ERR_INT_EN
);
401 writel(ctrl
, ch
->base
+ REG_OFF_CTRL
);
404 static void moxart_dma_start_sg(struct moxart_chan
*ch
, unsigned int idx
)
406 struct moxart_desc
*d
= ch
->desc
;
407 struct moxart_sg
*sg
= ch
->desc
->sg
+ idx
;
409 if (ch
->desc
->dma_dir
== DMA_MEM_TO_DEV
)
410 moxart_dma_set_params(ch
, sg
->addr
, d
->dev_addr
);
411 else if (ch
->desc
->dma_dir
== DMA_DEV_TO_MEM
)
412 moxart_dma_set_params(ch
, d
->dev_addr
, sg
->addr
);
414 moxart_set_transfer_params(ch
, sg
->len
);
416 moxart_start_dma(ch
);
419 static void moxart_dma_start_desc(struct dma_chan
*chan
)
421 struct moxart_chan
*ch
= to_moxart_dma_chan(chan
);
422 struct virt_dma_desc
*vd
;
424 vd
= vchan_next_desc(&ch
->vc
);
433 ch
->desc
= to_moxart_dma_desc(&vd
->tx
);
436 moxart_dma_start_sg(ch
, 0);
439 static void moxart_issue_pending(struct dma_chan
*chan
)
441 struct moxart_chan
*ch
= to_moxart_dma_chan(chan
);
444 spin_lock_irqsave(&ch
->vc
.lock
, flags
);
445 if (vchan_issue_pending(&ch
->vc
) && !ch
->desc
)
446 moxart_dma_start_desc(chan
);
447 spin_unlock_irqrestore(&ch
->vc
.lock
, flags
);
450 static size_t moxart_dma_desc_size(struct moxart_desc
*d
,
451 unsigned int completed_sgs
)
456 for (size
= i
= completed_sgs
; i
< d
->sglen
; i
++)
457 size
+= d
->sg
[i
].len
;
462 static size_t moxart_dma_desc_size_in_flight(struct moxart_chan
*ch
)
465 unsigned int completed_cycles
, cycles
;
467 size
= moxart_dma_desc_size(ch
->desc
, ch
->sgidx
);
468 cycles
= readl(ch
->base
+ REG_OFF_CYCLES
);
469 completed_cycles
= (ch
->desc
->dma_cycles
- cycles
);
470 size
-= completed_cycles
<< es_bytes
[ch
->desc
->es
];
472 dev_dbg(chan2dev(&ch
->vc
.chan
), "%s: size=%zu\n", __func__
, size
);
477 static enum dma_status
moxart_tx_status(struct dma_chan
*chan
,
479 struct dma_tx_state
*txstate
)
481 struct moxart_chan
*ch
= to_moxart_dma_chan(chan
);
482 struct virt_dma_desc
*vd
;
483 struct moxart_desc
*d
;
488 * dma_cookie_status() assigns initial residue value.
490 ret
= dma_cookie_status(chan
, cookie
, txstate
);
492 spin_lock_irqsave(&ch
->vc
.lock
, flags
);
493 vd
= vchan_find_desc(&ch
->vc
, cookie
);
495 d
= to_moxart_dma_desc(&vd
->tx
);
496 txstate
->residue
= moxart_dma_desc_size(d
, 0);
497 } else if (ch
->desc
&& ch
->desc
->vd
.tx
.cookie
== cookie
) {
498 txstate
->residue
= moxart_dma_desc_size_in_flight(ch
);
500 spin_unlock_irqrestore(&ch
->vc
.lock
, flags
);
508 static void moxart_dma_init(struct dma_device
*dma
, struct device
*dev
)
510 dma
->device_prep_slave_sg
= moxart_prep_slave_sg
;
511 dma
->device_alloc_chan_resources
= moxart_alloc_chan_resources
;
512 dma
->device_free_chan_resources
= moxart_free_chan_resources
;
513 dma
->device_issue_pending
= moxart_issue_pending
;
514 dma
->device_tx_status
= moxart_tx_status
;
515 dma
->device_config
= moxart_slave_config
;
516 dma
->device_terminate_all
= moxart_terminate_all
;
519 INIT_LIST_HEAD(&dma
->channels
);
522 static irqreturn_t
moxart_dma_interrupt(int irq
, void *devid
)
524 struct moxart_dmadev
*mc
= devid
;
525 struct moxart_chan
*ch
= &mc
->slave_chans
[0];
529 dev_dbg(chan2dev(&ch
->vc
.chan
), "%s\n", __func__
);
531 for (i
= 0; i
< APB_DMA_MAX_CHANNEL
; i
++, ch
++) {
535 ctrl
= readl(ch
->base
+ REG_OFF_CTRL
);
537 dev_dbg(chan2dev(&ch
->vc
.chan
), "%s: ch=%p ch->base=%p ctrl=%x\n",
538 __func__
, ch
, ch
->base
, ctrl
);
540 if (ctrl
& APB_DMA_FIN_INT_STS
) {
541 ctrl
&= ~APB_DMA_FIN_INT_STS
;
543 spin_lock(&ch
->vc
.lock
);
544 if (++ch
->sgidx
< ch
->desc
->sglen
) {
545 moxart_dma_start_sg(ch
, ch
->sgidx
);
547 vchan_cookie_complete(&ch
->desc
->vd
);
548 moxart_dma_start_desc(&ch
->vc
.chan
);
550 spin_unlock(&ch
->vc
.lock
);
554 if (ctrl
& APB_DMA_ERR_INT_STS
) {
555 ctrl
&= ~APB_DMA_ERR_INT_STS
;
559 writel(ctrl
, ch
->base
+ REG_OFF_CTRL
);
565 static int moxart_probe(struct platform_device
*pdev
)
567 struct device
*dev
= &pdev
->dev
;
568 struct device_node
*node
= dev
->of_node
;
569 struct resource
*res
;
570 void __iomem
*dma_base_addr
;
573 struct moxart_chan
*ch
;
574 struct moxart_dmadev
*mdc
;
576 mdc
= devm_kzalloc(dev
, sizeof(*mdc
), GFP_KERNEL
);
580 irq
= irq_of_parse_and_map(node
, 0);
582 dev_err(dev
, "no IRQ resource\n");
586 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
587 dma_base_addr
= devm_ioremap_resource(dev
, res
);
588 if (IS_ERR(dma_base_addr
))
589 return PTR_ERR(dma_base_addr
);
591 dma_cap_zero(mdc
->dma_slave
.cap_mask
);
592 dma_cap_set(DMA_SLAVE
, mdc
->dma_slave
.cap_mask
);
593 dma_cap_set(DMA_PRIVATE
, mdc
->dma_slave
.cap_mask
);
595 moxart_dma_init(&mdc
->dma_slave
, dev
);
597 ch
= &mdc
->slave_chans
[0];
598 for (i
= 0; i
< APB_DMA_MAX_CHANNEL
; i
++, ch
++) {
600 ch
->base
= dma_base_addr
+ i
* REG_OFF_CHAN_SIZE
;
603 ch
->vc
.desc_free
= moxart_dma_desc_free
;
604 vchan_init(&ch
->vc
, &mdc
->dma_slave
);
606 dev_dbg(dev
, "%s: chs[%d]: ch->ch_num=%u ch->base=%p\n",
607 __func__
, i
, ch
->ch_num
, ch
->base
);
610 platform_set_drvdata(pdev
, mdc
);
612 ret
= devm_request_irq(dev
, irq
, moxart_dma_interrupt
, 0,
613 "moxart-dma-engine", mdc
);
615 dev_err(dev
, "devm_request_irq failed\n");
620 ret
= dma_async_device_register(&mdc
->dma_slave
);
622 dev_err(dev
, "dma_async_device_register failed\n");
626 ret
= of_dma_controller_register(node
, moxart_of_xlate
, mdc
);
628 dev_err(dev
, "of_dma_controller_register failed\n");
629 dma_async_device_unregister(&mdc
->dma_slave
);
633 dev_dbg(dev
, "%s: IRQ=%u\n", __func__
, irq
);
638 static int moxart_remove(struct platform_device
*pdev
)
640 struct moxart_dmadev
*m
= platform_get_drvdata(pdev
);
642 devm_free_irq(&pdev
->dev
, m
->irq
, m
);
644 dma_async_device_unregister(&m
->dma_slave
);
646 if (pdev
->dev
.of_node
)
647 of_dma_controller_free(pdev
->dev
.of_node
);
652 static const struct of_device_id moxart_dma_match
[] = {
653 { .compatible
= "moxa,moxart-dma" },
656 MODULE_DEVICE_TABLE(of
, moxart_dma_match
);
658 static struct platform_driver moxart_driver
= {
659 .probe
= moxart_probe
,
660 .remove
= moxart_remove
,
662 .name
= "moxart-dma-engine",
663 .of_match_table
= moxart_dma_match
,
667 static int moxart_init(void)
669 return platform_driver_register(&moxart_driver
);
671 subsys_initcall(moxart_init
);
673 static void __exit
moxart_exit(void)
675 platform_driver_unregister(&moxart_driver
);
677 module_exit(moxart_exit
);
679 MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
680 MODULE_DESCRIPTION("MOXART DMA engine driver");
681 MODULE_LICENSE("GPL v2");