1 // SPDX-License-Identifier: GPL-2.0+
3 // Actions Semi Owl SoCs DMA driver
5 // Copyright (c) 2014 Actions Semi Inc.
6 // Author: David Liu <liuwei@actions-semi.com>
8 // Copyright (c) 2018 Linaro Ltd.
9 // Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
11 #include <linux/bitops.h>
12 #include <linux/clk.h>
13 #include <linux/delay.h>
14 #include <linux/dmaengine.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/dmapool.h>
17 #include <linux/err.h>
18 #include <linux/init.h>
19 #include <linux/interrupt.h>
22 #include <linux/module.h>
23 #include <linux/of_device.h>
24 #include <linux/slab.h>
27 #define OWL_DMA_FRAME_MAX_LENGTH 0xfffff
29 /* Global DMA Controller Registers */
30 #define OWL_DMA_IRQ_PD0 0x00
31 #define OWL_DMA_IRQ_PD1 0x04
32 #define OWL_DMA_IRQ_PD2 0x08
33 #define OWL_DMA_IRQ_PD3 0x0C
34 #define OWL_DMA_IRQ_EN0 0x10
35 #define OWL_DMA_IRQ_EN1 0x14
36 #define OWL_DMA_IRQ_EN2 0x18
37 #define OWL_DMA_IRQ_EN3 0x1C
38 #define OWL_DMA_SECURE_ACCESS_CTL 0x20
39 #define OWL_DMA_NIC_QOS 0x24
40 #define OWL_DMA_DBGSEL 0x28
41 #define OWL_DMA_IDLE_STAT 0x2C
43 /* Channel Registers */
44 #define OWL_DMA_CHAN_BASE(i) (0x100 + (i) * 0x100)
45 #define OWL_DMAX_MODE 0x00
46 #define OWL_DMAX_SOURCE 0x04
47 #define OWL_DMAX_DESTINATION 0x08
48 #define OWL_DMAX_FRAME_LEN 0x0C
49 #define OWL_DMAX_FRAME_CNT 0x10
50 #define OWL_DMAX_REMAIN_FRAME_CNT 0x14
51 #define OWL_DMAX_REMAIN_CNT 0x18
52 #define OWL_DMAX_SOURCE_STRIDE 0x1C
53 #define OWL_DMAX_DESTINATION_STRIDE 0x20
54 #define OWL_DMAX_START 0x24
55 #define OWL_DMAX_PAUSE 0x28
56 #define OWL_DMAX_CHAINED_CTL 0x2C
57 #define OWL_DMAX_CONSTANT 0x30
58 #define OWL_DMAX_LINKLIST_CTL 0x34
59 #define OWL_DMAX_NEXT_DESCRIPTOR 0x38
60 #define OWL_DMAX_CURRENT_DESCRIPTOR_NUM 0x3C
61 #define OWL_DMAX_INT_CTL 0x40
62 #define OWL_DMAX_INT_STATUS 0x44
63 #define OWL_DMAX_CURRENT_SOURCE_POINTER 0x48
64 #define OWL_DMAX_CURRENT_DESTINATION_POINTER 0x4C
66 /* OWL_DMAX_MODE Bits */
67 #define OWL_DMA_MODE_TS(x) (((x) & GENMASK(5, 0)) << 0)
68 #define OWL_DMA_MODE_ST(x) (((x) & GENMASK(1, 0)) << 8)
69 #define OWL_DMA_MODE_ST_DEV OWL_DMA_MODE_ST(0)
70 #define OWL_DMA_MODE_ST_DCU OWL_DMA_MODE_ST(2)
71 #define OWL_DMA_MODE_ST_SRAM OWL_DMA_MODE_ST(3)
72 #define OWL_DMA_MODE_DT(x) (((x) & GENMASK(1, 0)) << 10)
73 #define OWL_DMA_MODE_DT_DEV OWL_DMA_MODE_DT(0)
74 #define OWL_DMA_MODE_DT_DCU OWL_DMA_MODE_DT(2)
75 #define OWL_DMA_MODE_DT_SRAM OWL_DMA_MODE_DT(3)
76 #define OWL_DMA_MODE_SAM(x) (((x) & GENMASK(1, 0)) << 16)
77 #define OWL_DMA_MODE_SAM_CONST OWL_DMA_MODE_SAM(0)
78 #define OWL_DMA_MODE_SAM_INC OWL_DMA_MODE_SAM(1)
79 #define OWL_DMA_MODE_SAM_STRIDE OWL_DMA_MODE_SAM(2)
80 #define OWL_DMA_MODE_DAM(x) (((x) & GENMASK(1, 0)) << 18)
81 #define OWL_DMA_MODE_DAM_CONST OWL_DMA_MODE_DAM(0)
82 #define OWL_DMA_MODE_DAM_INC OWL_DMA_MODE_DAM(1)
83 #define OWL_DMA_MODE_DAM_STRIDE OWL_DMA_MODE_DAM(2)
84 #define OWL_DMA_MODE_PW(x) (((x) & GENMASK(2, 0)) << 20)
85 #define OWL_DMA_MODE_CB BIT(23)
86 #define OWL_DMA_MODE_NDDBW(x) (((x) & 0x1) << 28)
87 #define OWL_DMA_MODE_NDDBW_32BIT OWL_DMA_MODE_NDDBW(0)
88 #define OWL_DMA_MODE_NDDBW_8BIT OWL_DMA_MODE_NDDBW(1)
89 #define OWL_DMA_MODE_CFE BIT(29)
90 #define OWL_DMA_MODE_LME BIT(30)
91 #define OWL_DMA_MODE_CME BIT(31)
93 /* OWL_DMAX_LINKLIST_CTL Bits */
94 #define OWL_DMA_LLC_SAV(x) (((x) & GENMASK(1, 0)) << 8)
95 #define OWL_DMA_LLC_SAV_INC OWL_DMA_LLC_SAV(0)
96 #define OWL_DMA_LLC_SAV_LOAD_NEXT OWL_DMA_LLC_SAV(1)
97 #define OWL_DMA_LLC_SAV_LOAD_PREV OWL_DMA_LLC_SAV(2)
98 #define OWL_DMA_LLC_DAV(x) (((x) & GENMASK(1, 0)) << 10)
99 #define OWL_DMA_LLC_DAV_INC OWL_DMA_LLC_DAV(0)
100 #define OWL_DMA_LLC_DAV_LOAD_NEXT OWL_DMA_LLC_DAV(1)
101 #define OWL_DMA_LLC_DAV_LOAD_PREV OWL_DMA_LLC_DAV(2)
102 #define OWL_DMA_LLC_SUSPEND BIT(16)
104 /* OWL_DMAX_INT_CTL Bits */
105 #define OWL_DMA_INTCTL_BLOCK BIT(0)
106 #define OWL_DMA_INTCTL_SUPER_BLOCK BIT(1)
107 #define OWL_DMA_INTCTL_FRAME BIT(2)
108 #define OWL_DMA_INTCTL_HALF_FRAME BIT(3)
109 #define OWL_DMA_INTCTL_LAST_FRAME BIT(4)
111 /* OWL_DMAX_INT_STATUS Bits */
112 #define OWL_DMA_INTSTAT_BLOCK BIT(0)
113 #define OWL_DMA_INTSTAT_SUPER_BLOCK BIT(1)
114 #define OWL_DMA_INTSTAT_FRAME BIT(2)
115 #define OWL_DMA_INTSTAT_HALF_FRAME BIT(3)
116 #define OWL_DMA_INTSTAT_LAST_FRAME BIT(4)
118 /* Pack shift and newshift in a single word */
119 #define BIT_FIELD(val, width, shift, newshift) \
120 ((((val) >> (shift)) & ((BIT(width)) - 1)) << (newshift))
123 * struct owl_dma_lli_hw - Hardware link list for dma transfer
124 * @next_lli: physical address of the next link list
125 * @saddr: source physical address
126 * @daddr: destination physical address
127 * @flen: frame length
129 * @src_stride: source stride
130 * @dst_stride: destination stride
131 * @ctrla: dma_mode and linklist ctrl config
132 * @ctrlb: interrupt config
133 * @const_num: data for constant fill
135 struct owl_dma_lli_hw
{
149 * struct owl_dma_lli - Link list for dma transfer
150 * @hw: hardware link list
151 * @phys: physical address of hardware link list
152 * @node: node for txd's lli_list
155 struct owl_dma_lli_hw hw
;
157 struct list_head node
;
161 * struct owl_dma_txd - Wrapper for struct dma_async_tx_descriptor
162 * @vd: virtual DMA descriptor
163 * @lli_list: link list of lli nodes
166 struct virt_dma_desc vd
;
167 struct list_head lli_list
;
171 * struct owl_dma_pchan - Holder for the physical channels
172 * @id: physical index to this channel
173 * @base: virtual memory base for the dma channel
174 * @vchan: the virtual channel currently being served by this physical channel
176 struct owl_dma_pchan
{
179 struct owl_dma_vchan
*vchan
;
183 * struct owl_dma_pchan - Wrapper for DMA ENGINE channel
184 * @vc: wrappped virtual channel
185 * @pchan: the physical channel utilized by this channel
186 * @txd: active transaction on this channel
188 struct owl_dma_vchan
{
189 struct virt_dma_chan vc
;
190 struct owl_dma_pchan
*pchan
;
191 struct owl_dma_txd
*txd
;
195 * struct owl_dma - Holder for the Owl DMA controller
196 * @dma: dma engine for this instance
197 * @base: virtual memory base for the DMA controller
198 * @clk: clock for the DMA controller
199 * @lock: a lock to use when change DMA controller global register
200 * @lli_pool: a pool for the LLI descriptors
201 * @nr_pchans: the number of physical channels
202 * @pchans: array of data for the physical channels
203 * @nr_vchans: the number of physical channels
204 * @vchans: array of data for the physical channels
207 struct dma_device dma
;
211 struct dma_pool
*lli_pool
;
214 unsigned int nr_pchans
;
215 struct owl_dma_pchan
*pchans
;
217 unsigned int nr_vchans
;
218 struct owl_dma_vchan
*vchans
;
221 static void pchan_update(struct owl_dma_pchan
*pchan
, u32 reg
,
226 regval
= readl(pchan
->base
+ reg
);
233 writel(val
, pchan
->base
+ reg
);
236 static void pchan_writel(struct owl_dma_pchan
*pchan
, u32 reg
, u32 data
)
238 writel(data
, pchan
->base
+ reg
);
241 static u32
pchan_readl(struct owl_dma_pchan
*pchan
, u32 reg
)
243 return readl(pchan
->base
+ reg
);
246 static void dma_update(struct owl_dma
*od
, u32 reg
, u32 val
, bool state
)
250 regval
= readl(od
->base
+ reg
);
257 writel(val
, od
->base
+ reg
);
260 static void dma_writel(struct owl_dma
*od
, u32 reg
, u32 data
)
262 writel(data
, od
->base
+ reg
);
265 static u32
dma_readl(struct owl_dma
*od
, u32 reg
)
267 return readl(od
->base
+ reg
);
270 static inline struct owl_dma
*to_owl_dma(struct dma_device
*dd
)
272 return container_of(dd
, struct owl_dma
, dma
);
275 static struct device
*chan2dev(struct dma_chan
*chan
)
277 return &chan
->dev
->device
;
280 static inline struct owl_dma_vchan
*to_owl_vchan(struct dma_chan
*chan
)
282 return container_of(chan
, struct owl_dma_vchan
, vc
.chan
);
285 static inline struct owl_dma_txd
*to_owl_txd(struct dma_async_tx_descriptor
*tx
)
287 return container_of(tx
, struct owl_dma_txd
, vd
.tx
);
290 static inline u32
llc_hw_ctrla(u32 mode
, u32 llc_ctl
)
294 ctl
= BIT_FIELD(mode
, 4, 28, 28) |
295 BIT_FIELD(mode
, 8, 16, 20) |
296 BIT_FIELD(mode
, 4, 8, 16) |
297 BIT_FIELD(mode
, 6, 0, 10) |
298 BIT_FIELD(llc_ctl
, 2, 10, 8) |
299 BIT_FIELD(llc_ctl
, 2, 8, 6);
304 static inline u32
llc_hw_ctrlb(u32 int_ctl
)
308 ctl
= BIT_FIELD(int_ctl
, 7, 0, 18);
313 static void owl_dma_free_lli(struct owl_dma
*od
,
314 struct owl_dma_lli
*lli
)
316 list_del(&lli
->node
);
317 dma_pool_free(od
->lli_pool
, lli
, lli
->phys
);
320 static struct owl_dma_lli
*owl_dma_alloc_lli(struct owl_dma
*od
)
322 struct owl_dma_lli
*lli
;
325 lli
= dma_pool_alloc(od
->lli_pool
, GFP_NOWAIT
, &phys
);
329 INIT_LIST_HEAD(&lli
->node
);
335 static struct owl_dma_lli
*owl_dma_add_lli(struct owl_dma_txd
*txd
,
336 struct owl_dma_lli
*prev
,
337 struct owl_dma_lli
*next
)
339 list_add_tail(&next
->node
, &txd
->lli_list
);
342 prev
->hw
.next_lli
= next
->phys
;
343 prev
->hw
.ctrla
|= llc_hw_ctrla(OWL_DMA_MODE_LME
, 0);
349 static inline int owl_dma_cfg_lli(struct owl_dma_vchan
*vchan
,
350 struct owl_dma_lli
*lli
,
351 dma_addr_t src
, dma_addr_t dst
,
352 u32 len
, enum dma_transfer_direction dir
)
354 struct owl_dma_lli_hw
*hw
= &lli
->hw
;
357 mode
= OWL_DMA_MODE_PW(0);
361 mode
|= OWL_DMA_MODE_TS(0) | OWL_DMA_MODE_ST_DCU
|
362 OWL_DMA_MODE_DT_DCU
| OWL_DMA_MODE_SAM_INC
|
363 OWL_DMA_MODE_DAM_INC
;
370 hw
->next_lli
= 0; /* One link list by default */
374 hw
->fcnt
= 1; /* Frame count fixed as 1 */
375 hw
->flen
= len
; /* Max frame length is 1MB */
378 hw
->ctrla
= llc_hw_ctrla(mode
,
379 OWL_DMA_LLC_SAV_LOAD_NEXT
|
380 OWL_DMA_LLC_DAV_LOAD_NEXT
);
382 hw
->ctrlb
= llc_hw_ctrlb(OWL_DMA_INTCTL_SUPER_BLOCK
);
387 static struct owl_dma_pchan
*owl_dma_get_pchan(struct owl_dma
*od
,
388 struct owl_dma_vchan
*vchan
)
390 struct owl_dma_pchan
*pchan
= NULL
;
394 for (i
= 0; i
< od
->nr_pchans
; i
++) {
395 pchan
= &od
->pchans
[i
];
397 spin_lock_irqsave(&od
->lock
, flags
);
399 pchan
->vchan
= vchan
;
400 spin_unlock_irqrestore(&od
->lock
, flags
);
404 spin_unlock_irqrestore(&od
->lock
, flags
);
410 static int owl_dma_pchan_busy(struct owl_dma
*od
, struct owl_dma_pchan
*pchan
)
414 val
= dma_readl(od
, OWL_DMA_IDLE_STAT
);
416 return !(val
& (1 << pchan
->id
));
419 static void owl_dma_terminate_pchan(struct owl_dma
*od
,
420 struct owl_dma_pchan
*pchan
)
425 pchan_writel(pchan
, OWL_DMAX_START
, 0);
426 pchan_update(pchan
, OWL_DMAX_INT_STATUS
, 0xff, false);
428 spin_lock_irqsave(&od
->lock
, flags
);
429 dma_update(od
, OWL_DMA_IRQ_EN0
, (1 << pchan
->id
), false);
431 irq_pd
= dma_readl(od
, OWL_DMA_IRQ_PD0
);
432 if (irq_pd
& (1 << pchan
->id
)) {
433 dev_warn(od
->dma
.dev
,
434 "terminating pchan %d that still has pending irq\n",
436 dma_writel(od
, OWL_DMA_IRQ_PD0
, (1 << pchan
->id
));
441 spin_unlock_irqrestore(&od
->lock
, flags
);
444 static int owl_dma_start_next_txd(struct owl_dma_vchan
*vchan
)
446 struct owl_dma
*od
= to_owl_dma(vchan
->vc
.chan
.device
);
447 struct virt_dma_desc
*vd
= vchan_next_desc(&vchan
->vc
);
448 struct owl_dma_pchan
*pchan
= vchan
->pchan
;
449 struct owl_dma_txd
*txd
= to_owl_txd(&vd
->tx
);
450 struct owl_dma_lli
*lli
;
458 /* Wait for channel inactive */
459 while (owl_dma_pchan_busy(od
, pchan
))
462 lli
= list_first_entry(&txd
->lli_list
,
463 struct owl_dma_lli
, node
);
465 int_ctl
= OWL_DMA_INTCTL_SUPER_BLOCK
;
467 pchan_writel(pchan
, OWL_DMAX_MODE
, OWL_DMA_MODE_LME
);
468 pchan_writel(pchan
, OWL_DMAX_LINKLIST_CTL
,
469 OWL_DMA_LLC_SAV_LOAD_NEXT
| OWL_DMA_LLC_DAV_LOAD_NEXT
);
470 pchan_writel(pchan
, OWL_DMAX_NEXT_DESCRIPTOR
, lli
->phys
);
471 pchan_writel(pchan
, OWL_DMAX_INT_CTL
, int_ctl
);
473 /* Clear IRQ status for this pchan */
474 pchan_update(pchan
, OWL_DMAX_INT_STATUS
, 0xff, false);
476 spin_lock_irqsave(&od
->lock
, flags
);
478 dma_update(od
, OWL_DMA_IRQ_EN0
, (1 << pchan
->id
), true);
480 spin_unlock_irqrestore(&od
->lock
, flags
);
482 dev_dbg(chan2dev(&vchan
->vc
.chan
), "starting pchan %d\n", pchan
->id
);
484 /* Start DMA transfer for this pchan */
485 pchan_writel(pchan
, OWL_DMAX_START
, 0x1);
490 static void owl_dma_phy_free(struct owl_dma
*od
, struct owl_dma_vchan
*vchan
)
492 /* Ensure that the physical channel is stopped */
493 owl_dma_terminate_pchan(od
, vchan
->pchan
);
498 static irqreturn_t
owl_dma_interrupt(int irq
, void *dev_id
)
500 struct owl_dma
*od
= dev_id
;
501 struct owl_dma_vchan
*vchan
;
502 struct owl_dma_pchan
*pchan
;
503 unsigned long pending
;
505 unsigned int global_irq_pending
, chan_irq_pending
;
507 spin_lock(&od
->lock
);
509 pending
= dma_readl(od
, OWL_DMA_IRQ_PD0
);
511 /* Clear IRQ status for each pchan */
512 for_each_set_bit(i
, &pending
, od
->nr_pchans
) {
513 pchan
= &od
->pchans
[i
];
514 pchan_update(pchan
, OWL_DMAX_INT_STATUS
, 0xff, false);
517 /* Clear pending IRQ */
518 dma_writel(od
, OWL_DMA_IRQ_PD0
, pending
);
520 /* Check missed pending IRQ */
521 for (i
= 0; i
< od
->nr_pchans
; i
++) {
522 pchan
= &od
->pchans
[i
];
523 chan_irq_pending
= pchan_readl(pchan
, OWL_DMAX_INT_CTL
) &
524 pchan_readl(pchan
, OWL_DMAX_INT_STATUS
);
526 /* Dummy read to ensure OWL_DMA_IRQ_PD0 value is updated */
527 dma_readl(od
, OWL_DMA_IRQ_PD0
);
529 global_irq_pending
= dma_readl(od
, OWL_DMA_IRQ_PD0
);
531 if (chan_irq_pending
&& !(global_irq_pending
& BIT(i
))) {
533 "global and channel IRQ pending match err\n");
535 /* Clear IRQ status for this pchan */
536 pchan_update(pchan
, OWL_DMAX_INT_STATUS
,
539 /* Update global IRQ pending */
544 spin_unlock(&od
->lock
);
546 for_each_set_bit(i
, &pending
, od
->nr_pchans
) {
547 struct owl_dma_txd
*txd
;
549 pchan
= &od
->pchans
[i
];
551 vchan
= pchan
->vchan
;
553 dev_warn(od
->dma
.dev
, "no vchan attached on pchan %d\n",
558 spin_lock(&vchan
->vc
.lock
);
564 vchan_cookie_complete(&txd
->vd
);
567 * Start the next descriptor (if any),
568 * otherwise free this channel.
570 if (vchan_next_desc(&vchan
->vc
))
571 owl_dma_start_next_txd(vchan
);
573 owl_dma_phy_free(od
, vchan
);
576 spin_unlock(&vchan
->vc
.lock
);
582 static void owl_dma_free_txd(struct owl_dma
*od
, struct owl_dma_txd
*txd
)
584 struct owl_dma_lli
*lli
, *_lli
;
589 list_for_each_entry_safe(lli
, _lli
, &txd
->lli_list
, node
)
590 owl_dma_free_lli(od
, lli
);
595 static void owl_dma_desc_free(struct virt_dma_desc
*vd
)
597 struct owl_dma
*od
= to_owl_dma(vd
->tx
.chan
->device
);
598 struct owl_dma_txd
*txd
= to_owl_txd(&vd
->tx
);
600 owl_dma_free_txd(od
, txd
);
603 static int owl_dma_terminate_all(struct dma_chan
*chan
)
605 struct owl_dma
*od
= to_owl_dma(chan
->device
);
606 struct owl_dma_vchan
*vchan
= to_owl_vchan(chan
);
610 spin_lock_irqsave(&vchan
->vc
.lock
, flags
);
613 owl_dma_phy_free(od
, vchan
);
616 owl_dma_desc_free(&vchan
->txd
->vd
);
620 vchan_get_all_descriptors(&vchan
->vc
, &head
);
621 vchan_dma_desc_free_list(&vchan
->vc
, &head
);
623 spin_unlock_irqrestore(&vchan
->vc
.lock
, flags
);
628 static u32
owl_dma_getbytes_chan(struct owl_dma_vchan
*vchan
)
630 struct owl_dma_pchan
*pchan
;
631 struct owl_dma_txd
*txd
;
632 struct owl_dma_lli
*lli
;
633 unsigned int next_lli_phy
;
636 pchan
= vchan
->pchan
;
642 /* Get remain count of current node in link list */
643 bytes
= pchan_readl(pchan
, OWL_DMAX_REMAIN_CNT
);
645 /* Loop through the preceding nodes to get total remaining bytes */
646 if (pchan_readl(pchan
, OWL_DMAX_MODE
) & OWL_DMA_MODE_LME
) {
647 next_lli_phy
= pchan_readl(pchan
, OWL_DMAX_NEXT_DESCRIPTOR
);
648 list_for_each_entry(lli
, &txd
->lli_list
, node
) {
649 /* Start from the next active node */
650 if (lli
->phys
== next_lli_phy
) {
651 list_for_each_entry(lli
, &txd
->lli_list
, node
)
652 bytes
+= lli
->hw
.flen
;
661 static enum dma_status
owl_dma_tx_status(struct dma_chan
*chan
,
663 struct dma_tx_state
*state
)
665 struct owl_dma_vchan
*vchan
= to_owl_vchan(chan
);
666 struct owl_dma_lli
*lli
;
667 struct virt_dma_desc
*vd
;
668 struct owl_dma_txd
*txd
;
673 ret
= dma_cookie_status(chan
, cookie
, state
);
674 if (ret
== DMA_COMPLETE
|| !state
)
677 spin_lock_irqsave(&vchan
->vc
.lock
, flags
);
679 vd
= vchan_find_desc(&vchan
->vc
, cookie
);
681 txd
= to_owl_txd(&vd
->tx
);
682 list_for_each_entry(lli
, &txd
->lli_list
, node
)
683 bytes
+= lli
->hw
.flen
;
685 bytes
= owl_dma_getbytes_chan(vchan
);
688 spin_unlock_irqrestore(&vchan
->vc
.lock
, flags
);
690 dma_set_residue(state
, bytes
);
695 static void owl_dma_phy_alloc_and_start(struct owl_dma_vchan
*vchan
)
697 struct owl_dma
*od
= to_owl_dma(vchan
->vc
.chan
.device
);
698 struct owl_dma_pchan
*pchan
;
700 pchan
= owl_dma_get_pchan(od
, vchan
);
704 dev_dbg(od
->dma
.dev
, "allocated pchan %d\n", pchan
->id
);
706 vchan
->pchan
= pchan
;
707 owl_dma_start_next_txd(vchan
);
710 static void owl_dma_issue_pending(struct dma_chan
*chan
)
712 struct owl_dma_vchan
*vchan
= to_owl_vchan(chan
);
715 spin_lock_irqsave(&vchan
->vc
.lock
, flags
);
716 if (vchan_issue_pending(&vchan
->vc
)) {
718 owl_dma_phy_alloc_and_start(vchan
);
720 spin_unlock_irqrestore(&vchan
->vc
.lock
, flags
);
723 static struct dma_async_tx_descriptor
724 *owl_dma_prep_memcpy(struct dma_chan
*chan
,
725 dma_addr_t dst
, dma_addr_t src
,
726 size_t len
, unsigned long flags
)
728 struct owl_dma
*od
= to_owl_dma(chan
->device
);
729 struct owl_dma_vchan
*vchan
= to_owl_vchan(chan
);
730 struct owl_dma_txd
*txd
;
731 struct owl_dma_lli
*lli
, *prev
= NULL
;
732 size_t offset
, bytes
;
738 txd
= kzalloc(sizeof(*txd
), GFP_NOWAIT
);
742 INIT_LIST_HEAD(&txd
->lli_list
);
744 /* Process the transfer as frame by frame */
745 for (offset
= 0; offset
< len
; offset
+= bytes
) {
746 lli
= owl_dma_alloc_lli(od
);
748 dev_warn(chan2dev(chan
), "failed to allocate lli\n");
752 bytes
= min_t(size_t, (len
- offset
), OWL_DMA_FRAME_MAX_LENGTH
);
754 ret
= owl_dma_cfg_lli(vchan
, lli
, src
+ offset
, dst
+ offset
,
755 bytes
, DMA_MEM_TO_MEM
);
757 dev_warn(chan2dev(chan
), "failed to config lli\n");
761 prev
= owl_dma_add_lli(txd
, prev
, lli
);
764 return vchan_tx_prep(&vchan
->vc
, &txd
->vd
, flags
);
767 owl_dma_free_txd(od
, txd
);
771 static void owl_dma_free_chan_resources(struct dma_chan
*chan
)
773 struct owl_dma_vchan
*vchan
= to_owl_vchan(chan
);
775 /* Ensure all queued descriptors are freed */
776 vchan_free_chan_resources(&vchan
->vc
);
779 static inline void owl_dma_free(struct owl_dma
*od
)
781 struct owl_dma_vchan
*vchan
= NULL
;
782 struct owl_dma_vchan
*next
;
784 list_for_each_entry_safe(vchan
,
785 next
, &od
->dma
.channels
, vc
.chan
.device_node
) {
786 list_del(&vchan
->vc
.chan
.device_node
);
787 tasklet_kill(&vchan
->vc
.task
);
791 static int owl_dma_probe(struct platform_device
*pdev
)
793 struct device_node
*np
= pdev
->dev
.of_node
;
795 struct resource
*res
;
796 int ret
, i
, nr_channels
, nr_requests
;
798 od
= devm_kzalloc(&pdev
->dev
, sizeof(*od
), GFP_KERNEL
);
802 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
806 od
->base
= devm_ioremap_resource(&pdev
->dev
, res
);
807 if (IS_ERR(od
->base
))
808 return PTR_ERR(od
->base
);
810 ret
= of_property_read_u32(np
, "dma-channels", &nr_channels
);
812 dev_err(&pdev
->dev
, "can't get dma-channels\n");
816 ret
= of_property_read_u32(np
, "dma-requests", &nr_requests
);
818 dev_err(&pdev
->dev
, "can't get dma-requests\n");
822 dev_info(&pdev
->dev
, "dma-channels %d, dma-requests %d\n",
823 nr_channels
, nr_requests
);
825 od
->nr_pchans
= nr_channels
;
826 od
->nr_vchans
= nr_requests
;
828 pdev
->dev
.coherent_dma_mask
= DMA_BIT_MASK(32);
830 platform_set_drvdata(pdev
, od
);
831 spin_lock_init(&od
->lock
);
833 dma_cap_set(DMA_MEMCPY
, od
->dma
.cap_mask
);
835 od
->dma
.dev
= &pdev
->dev
;
836 od
->dma
.device_free_chan_resources
= owl_dma_free_chan_resources
;
837 od
->dma
.device_tx_status
= owl_dma_tx_status
;
838 od
->dma
.device_issue_pending
= owl_dma_issue_pending
;
839 od
->dma
.device_prep_dma_memcpy
= owl_dma_prep_memcpy
;
840 od
->dma
.device_terminate_all
= owl_dma_terminate_all
;
841 od
->dma
.src_addr_widths
= BIT(DMA_SLAVE_BUSWIDTH_4_BYTES
);
842 od
->dma
.dst_addr_widths
= BIT(DMA_SLAVE_BUSWIDTH_4_BYTES
);
843 od
->dma
.directions
= BIT(DMA_MEM_TO_MEM
);
844 od
->dma
.residue_granularity
= DMA_RESIDUE_GRANULARITY_BURST
;
846 INIT_LIST_HEAD(&od
->dma
.channels
);
848 od
->clk
= devm_clk_get(&pdev
->dev
, NULL
);
849 if (IS_ERR(od
->clk
)) {
850 dev_err(&pdev
->dev
, "unable to get clock\n");
851 return PTR_ERR(od
->clk
);
855 * Eventhough the DMA controller is capable of generating 4
856 * IRQ's for DMA priority feature, we only use 1 IRQ for
859 od
->irq
= platform_get_irq(pdev
, 0);
860 ret
= devm_request_irq(&pdev
->dev
, od
->irq
, owl_dma_interrupt
, 0,
861 dev_name(&pdev
->dev
), od
);
863 dev_err(&pdev
->dev
, "unable to request IRQ\n");
867 /* Init physical channel */
868 od
->pchans
= devm_kcalloc(&pdev
->dev
, od
->nr_pchans
,
869 sizeof(struct owl_dma_pchan
), GFP_KERNEL
);
873 for (i
= 0; i
< od
->nr_pchans
; i
++) {
874 struct owl_dma_pchan
*pchan
= &od
->pchans
[i
];
877 pchan
->base
= od
->base
+ OWL_DMA_CHAN_BASE(i
);
880 /* Init virtual channel */
881 od
->vchans
= devm_kcalloc(&pdev
->dev
, od
->nr_vchans
,
882 sizeof(struct owl_dma_vchan
), GFP_KERNEL
);
886 for (i
= 0; i
< od
->nr_vchans
; i
++) {
887 struct owl_dma_vchan
*vchan
= &od
->vchans
[i
];
889 vchan
->vc
.desc_free
= owl_dma_desc_free
;
890 vchan_init(&vchan
->vc
, &od
->dma
);
893 /* Create a pool of consistent memory blocks for hardware descriptors */
894 od
->lli_pool
= dma_pool_create(dev_name(od
->dma
.dev
), od
->dma
.dev
,
895 sizeof(struct owl_dma_lli
),
896 __alignof__(struct owl_dma_lli
),
899 dev_err(&pdev
->dev
, "unable to allocate DMA descriptor pool\n");
903 clk_prepare_enable(od
->clk
);
905 ret
= dma_async_device_register(&od
->dma
);
907 dev_err(&pdev
->dev
, "failed to register DMA engine device\n");
914 clk_disable_unprepare(od
->clk
);
915 dma_pool_destroy(od
->lli_pool
);
920 static int owl_dma_remove(struct platform_device
*pdev
)
922 struct owl_dma
*od
= platform_get_drvdata(pdev
);
924 dma_async_device_unregister(&od
->dma
);
926 /* Mask all interrupts for this execution environment */
927 dma_writel(od
, OWL_DMA_IRQ_EN0
, 0x0);
929 /* Make sure we won't have any further interrupts */
930 devm_free_irq(od
->dma
.dev
, od
->irq
, od
);
934 clk_disable_unprepare(od
->clk
);
939 static const struct of_device_id owl_dma_match
[] = {
940 { .compatible
= "actions,s900-dma", },
943 MODULE_DEVICE_TABLE(of
, owl_dma_match
);
945 static struct platform_driver owl_dma_driver
= {
946 .probe
= owl_dma_probe
,
947 .remove
= owl_dma_remove
,
950 .of_match_table
= of_match_ptr(owl_dma_match
),
954 static int owl_dma_init(void)
956 return platform_driver_register(&owl_dma_driver
);
958 subsys_initcall(owl_dma_init
);
960 static void __exit
owl_dma_exit(void)
962 platform_driver_unregister(&owl_dma_driver
);
964 module_exit(owl_dma_exit
);
966 MODULE_AUTHOR("David Liu <liuwei@actions-semi.com>");
967 MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
968 MODULE_DESCRIPTION("Actions Semi Owl SoCs DMA driver");
969 MODULE_LICENSE("GPL");