2 * Copyright 2015 Robert Jarzmik <robert.jarzmik@free.fr>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
10 #include <linux/module.h>
11 #include <linux/init.h>
12 #include <linux/types.h>
13 #include <linux/interrupt.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/slab.h>
16 #include <linux/dmaengine.h>
17 #include <linux/platform_device.h>
18 #include <linux/device.h>
19 #include <linux/platform_data/mmp_dma.h>
20 #include <linux/dmapool.h>
21 #include <linux/of_device.h>
22 #include <linux/of_dma.h>
24 #include <linux/wait.h>
25 #include <linux/dma/pxa-dma.h>
27 #include "dmaengine.h"
30 #define DCSR(n) (0x0000 + ((n) << 2))
31 #define DALGN(n) 0x00a0
33 #define DDADR(n) (0x0200 + ((n) << 4))
34 #define DSADR(n) (0x0204 + ((n) << 4))
35 #define DTADR(n) (0x0208 + ((n) << 4))
36 #define DCMD(n) (0x020c + ((n) << 4))
38 #define PXA_DCSR_RUN BIT(31) /* Run Bit (read / write) */
39 #define PXA_DCSR_NODESC BIT(30) /* No-Descriptor Fetch (read / write) */
40 #define PXA_DCSR_STOPIRQEN BIT(29) /* Stop Interrupt Enable (R/W) */
41 #define PXA_DCSR_REQPEND BIT(8) /* Request Pending (read-only) */
42 #define PXA_DCSR_STOPSTATE BIT(3) /* Stop State (read-only) */
43 #define PXA_DCSR_ENDINTR BIT(2) /* End Interrupt (read / write) */
44 #define PXA_DCSR_STARTINTR BIT(1) /* Start Interrupt (read / write) */
45 #define PXA_DCSR_BUSERR BIT(0) /* Bus Error Interrupt (read / write) */
47 #define PXA_DCSR_EORIRQEN BIT(28) /* End of Receive IRQ Enable (R/W) */
48 #define PXA_DCSR_EORJMPEN BIT(27) /* Jump to next descriptor on EOR */
49 #define PXA_DCSR_EORSTOPEN BIT(26) /* STOP on an EOR */
50 #define PXA_DCSR_SETCMPST BIT(25) /* Set Descriptor Compare Status */
51 #define PXA_DCSR_CLRCMPST BIT(24) /* Clear Descriptor Compare Status */
52 #define PXA_DCSR_CMPST BIT(10) /* The Descriptor Compare Status */
53 #define PXA_DCSR_EORINTR BIT(9) /* The end of Receive */
55 #define DRCMR_MAPVLD BIT(7) /* Map Valid (read / write) */
56 #define DRCMR_CHLNUM 0x1f /* mask for Channel Number (read / write) */
58 #define DDADR_DESCADDR 0xfffffff0 /* Address of next descriptor (mask) */
59 #define DDADR_STOP BIT(0) /* Stop (read / write) */
61 #define PXA_DCMD_INCSRCADDR BIT(31) /* Source Address Increment Setting. */
62 #define PXA_DCMD_INCTRGADDR BIT(30) /* Target Address Increment Setting. */
63 #define PXA_DCMD_FLOWSRC BIT(29) /* Flow Control by the source. */
64 #define PXA_DCMD_FLOWTRG BIT(28) /* Flow Control by the target. */
65 #define PXA_DCMD_STARTIRQEN BIT(22) /* Start Interrupt Enable */
66 #define PXA_DCMD_ENDIRQEN BIT(21) /* End Interrupt Enable */
67 #define PXA_DCMD_ENDIAN BIT(18) /* Device Endian-ness. */
68 #define PXA_DCMD_BURST8 (1 << 16) /* 8 byte burst */
69 #define PXA_DCMD_BURST16 (2 << 16) /* 16 byte burst */
70 #define PXA_DCMD_BURST32 (3 << 16) /* 32 byte burst */
71 #define PXA_DCMD_WIDTH1 (1 << 14) /* 1 byte width */
72 #define PXA_DCMD_WIDTH2 (2 << 14) /* 2 byte width (HalfWord) */
73 #define PXA_DCMD_WIDTH4 (3 << 14) /* 4 byte width (Word) */
74 #define PXA_DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */
76 #define PDMA_ALIGNMENT 3
77 #define PDMA_MAX_DESC_BYTES (PXA_DCMD_LENGTH & ~((1 << PDMA_ALIGNMENT) - 1))
80 u32 ddadr
; /* Points to the next descriptor + flags */
81 u32 dsadr
; /* DSADR value for the current transfer */
82 u32 dtadr
; /* DTADR value for the current transfer */
83 u32 dcmd
; /* DCMD value for the current transfer */
87 struct virt_dma_desc vd
; /* Virtual descriptor */
88 int nb_desc
; /* Number of hw. descriptors */
89 size_t len
; /* Number of bytes xfered */
90 dma_addr_t first
; /* First descriptor's addr */
92 /* At least one descriptor has an src/dst address not multiple of 8 */
95 struct dma_pool
*desc_pool
; /* Channel's used allocator */
97 struct pxad_desc_hw
*hw_desc
[]; /* DMA coherent descriptors */
103 struct pxad_chan
*vchan
;
107 struct virt_dma_chan vc
; /* Virtual channel */
108 u32 drcmr
; /* Requestor of the channel */
109 enum pxad_chan_prio prio
; /* Required priority of phy */
111 * At least one desc_sw in submitted or issued transfers on this channel
112 * has one address such as: addr % 8 != 0. This implies the DALGN
113 * setting on the phy.
116 struct dma_slave_config cfg
; /* Runtime config */
118 /* protected by vc->lock */
119 struct pxad_phy
*phy
;
120 struct dma_pool
*desc_pool
; /* Descriptors pool */
121 dma_cookie_t bus_error
;
123 wait_queue_head_t wq_state
;
127 struct dma_device slave
;
131 struct pxad_phy
*phys
;
132 spinlock_t phy_lock
; /* Phy association */
133 #ifdef CONFIG_DEBUG_FS
134 struct dentry
*dbgfs_root
;
135 struct dentry
*dbgfs_state
;
136 struct dentry
**dbgfs_chan
;
140 #define tx_to_pxad_desc(tx) \
141 container_of(tx, struct pxad_desc_sw, async_tx)
142 #define to_pxad_chan(dchan) \
143 container_of(dchan, struct pxad_chan, vc.chan)
144 #define to_pxad_dev(dmadev) \
145 container_of(dmadev, struct pxad_device, slave)
146 #define to_pxad_sw_desc(_vd) \
147 container_of((_vd), struct pxad_desc_sw, vd)
149 #define _phy_readl_relaxed(phy, _reg) \
150 readl_relaxed((phy)->base + _reg((phy)->idx))
151 #define phy_readl_relaxed(phy, _reg) \
154 _v = readl_relaxed((phy)->base + _reg((phy)->idx)); \
155 dev_vdbg(&phy->vchan->vc.chan.dev->device, \
156 "%s(): readl(%s): 0x%08x\n", __func__, #_reg, \
160 #define phy_writel(phy, val, _reg) \
162 writel((val), (phy)->base + _reg((phy)->idx)); \
163 dev_vdbg(&phy->vchan->vc.chan.dev->device, \
164 "%s(): writel(0x%08x, %s)\n", \
165 __func__, (u32)(val), #_reg); \
167 #define phy_writel_relaxed(phy, val, _reg) \
169 writel_relaxed((val), (phy)->base + _reg((phy)->idx)); \
170 dev_vdbg(&phy->vchan->vc.chan.dev->device, \
171 "%s(): writel_relaxed(0x%08x, %s)\n", \
172 __func__, (u32)(val), #_reg); \
175 static unsigned int pxad_drcmr(unsigned int line
)
178 return 0x100 + line
* 4;
179 return 0x1000 + line
* 4;
185 #ifdef CONFIG_DEBUG_FS
186 #include <linux/debugfs.h>
187 #include <linux/uaccess.h>
188 #include <linux/seq_file.h>
190 static int dbg_show_requester_chan(struct seq_file
*s
, void *p
)
192 struct pxad_phy
*phy
= s
->private;
196 seq_printf(s
, "DMA channel %d requester :\n", phy
->idx
);
197 for (i
= 0; i
< 70; i
++) {
198 drcmr
= readl_relaxed(phy
->base
+ pxad_drcmr(i
));
199 if ((drcmr
& DRCMR_CHLNUM
) == phy
->idx
)
200 seq_printf(s
, "\tRequester %d (MAPVLD=%d)\n", i
,
201 !!(drcmr
& DRCMR_MAPVLD
));
206 static inline int dbg_burst_from_dcmd(u32 dcmd
)
208 int burst
= (dcmd
>> 16) & 0x3;
210 return burst
? 4 << burst
: 0;
213 static int is_phys_valid(unsigned long addr
)
215 return pfn_valid(__phys_to_pfn(addr
));
218 #define PXA_DCSR_STR(flag) (dcsr & PXA_DCSR_##flag ? #flag" " : "")
219 #define PXA_DCMD_STR(flag) (dcmd & PXA_DCMD_##flag ? #flag" " : "")
221 static int dbg_show_descriptors(struct seq_file
*s
, void *p
)
223 struct pxad_phy
*phy
= s
->private;
224 int i
, max_show
= 20, burst
, width
;
226 unsigned long phys_desc
, ddadr
;
227 struct pxad_desc_hw
*desc
;
229 phys_desc
= ddadr
= _phy_readl_relaxed(phy
, DDADR
);
231 seq_printf(s
, "DMA channel %d descriptors :\n", phy
->idx
);
232 seq_printf(s
, "[%03d] First descriptor unknown\n", 0);
233 for (i
= 1; i
< max_show
&& is_phys_valid(phys_desc
); i
++) {
234 desc
= phys_to_virt(phys_desc
);
236 burst
= dbg_burst_from_dcmd(dcmd
);
237 width
= (1 << ((dcmd
>> 14) & 0x3)) >> 1;
239 seq_printf(s
, "[%03d] Desc at %08lx(virt %p)\n",
241 seq_printf(s
, "\tDDADR = %08x\n", desc
->ddadr
);
242 seq_printf(s
, "\tDSADR = %08x\n", desc
->dsadr
);
243 seq_printf(s
, "\tDTADR = %08x\n", desc
->dtadr
);
244 seq_printf(s
, "\tDCMD = %08x (%s%s%s%s%s%s%sburst=%d width=%d len=%d)\n",
246 PXA_DCMD_STR(INCSRCADDR
), PXA_DCMD_STR(INCTRGADDR
),
247 PXA_DCMD_STR(FLOWSRC
), PXA_DCMD_STR(FLOWTRG
),
248 PXA_DCMD_STR(STARTIRQEN
), PXA_DCMD_STR(ENDIRQEN
),
249 PXA_DCMD_STR(ENDIAN
), burst
, width
,
250 dcmd
& PXA_DCMD_LENGTH
);
251 phys_desc
= desc
->ddadr
;
254 seq_printf(s
, "[%03d] Desc at %08lx ... max display reached\n",
257 seq_printf(s
, "[%03d] Desc at %08lx is %s\n",
258 i
, phys_desc
, phys_desc
== DDADR_STOP
?
259 "DDADR_STOP" : "invalid");
264 static int dbg_show_chan_state(struct seq_file
*s
, void *p
)
266 struct pxad_phy
*phy
= s
->private;
269 static const char * const str_prio
[] = {
270 "high", "normal", "low", "invalid"
273 dcsr
= _phy_readl_relaxed(phy
, DCSR
);
274 dcmd
= _phy_readl_relaxed(phy
, DCMD
);
275 burst
= dbg_burst_from_dcmd(dcmd
);
276 width
= (1 << ((dcmd
>> 14) & 0x3)) >> 1;
278 seq_printf(s
, "DMA channel %d\n", phy
->idx
);
279 seq_printf(s
, "\tPriority : %s\n",
280 str_prio
[(phy
->idx
& 0xf) / 4]);
281 seq_printf(s
, "\tUnaligned transfer bit: %s\n",
282 _phy_readl_relaxed(phy
, DALGN
) & BIT(phy
->idx
) ?
284 seq_printf(s
, "\tDCSR = %08x (%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s)\n",
285 dcsr
, PXA_DCSR_STR(RUN
), PXA_DCSR_STR(NODESC
),
286 PXA_DCSR_STR(STOPIRQEN
), PXA_DCSR_STR(EORIRQEN
),
287 PXA_DCSR_STR(EORJMPEN
), PXA_DCSR_STR(EORSTOPEN
),
288 PXA_DCSR_STR(SETCMPST
), PXA_DCSR_STR(CLRCMPST
),
289 PXA_DCSR_STR(CMPST
), PXA_DCSR_STR(EORINTR
),
290 PXA_DCSR_STR(REQPEND
), PXA_DCSR_STR(STOPSTATE
),
291 PXA_DCSR_STR(ENDINTR
), PXA_DCSR_STR(STARTINTR
),
292 PXA_DCSR_STR(BUSERR
));
294 seq_printf(s
, "\tDCMD = %08x (%s%s%s%s%s%s%sburst=%d width=%d len=%d)\n",
296 PXA_DCMD_STR(INCSRCADDR
), PXA_DCMD_STR(INCTRGADDR
),
297 PXA_DCMD_STR(FLOWSRC
), PXA_DCMD_STR(FLOWTRG
),
298 PXA_DCMD_STR(STARTIRQEN
), PXA_DCMD_STR(ENDIRQEN
),
299 PXA_DCMD_STR(ENDIAN
), burst
, width
, dcmd
& PXA_DCMD_LENGTH
);
300 seq_printf(s
, "\tDSADR = %08x\n", _phy_readl_relaxed(phy
, DSADR
));
301 seq_printf(s
, "\tDTADR = %08x\n", _phy_readl_relaxed(phy
, DTADR
));
302 seq_printf(s
, "\tDDADR = %08x\n", _phy_readl_relaxed(phy
, DDADR
));
307 static int dbg_show_state(struct seq_file
*s
, void *p
)
309 struct pxad_device
*pdev
= s
->private;
311 /* basic device status */
312 seq_puts(s
, "DMA engine status\n");
313 seq_printf(s
, "\tChannel number: %d\n", pdev
->nr_chans
);
318 #define DBGFS_FUNC_DECL(name) \
319 static int dbg_open_##name(struct inode *inode, struct file *file) \
321 return single_open(file, dbg_show_##name, inode->i_private); \
323 static const struct file_operations dbg_fops_##name = { \
324 .open = dbg_open_##name, \
325 .llseek = seq_lseek, \
327 .release = single_release, \
330 DBGFS_FUNC_DECL(state
);
331 DBGFS_FUNC_DECL(chan_state
);
332 DBGFS_FUNC_DECL(descriptors
);
333 DBGFS_FUNC_DECL(requester_chan
);
335 static struct dentry
*pxad_dbg_alloc_chan(struct pxad_device
*pdev
,
336 int ch
, struct dentry
*chandir
)
339 struct dentry
*chan
, *chan_state
= NULL
, *chan_descr
= NULL
;
340 struct dentry
*chan_reqs
= NULL
;
343 scnprintf(chan_name
, sizeof(chan_name
), "%d", ch
);
344 chan
= debugfs_create_dir(chan_name
, chandir
);
345 dt
= (void *)&pdev
->phys
[ch
];
348 chan_state
= debugfs_create_file("state", 0400, chan
, dt
,
349 &dbg_fops_chan_state
);
351 chan_descr
= debugfs_create_file("descriptors", 0400, chan
, dt
,
352 &dbg_fops_descriptors
);
354 chan_reqs
= debugfs_create_file("requesters", 0400, chan
, dt
,
355 &dbg_fops_requester_chan
);
362 debugfs_remove_recursive(chan
);
366 static void pxad_init_debugfs(struct pxad_device
*pdev
)
369 struct dentry
*chandir
;
371 pdev
->dbgfs_root
= debugfs_create_dir(dev_name(pdev
->slave
.dev
), NULL
);
372 if (IS_ERR(pdev
->dbgfs_root
) || !pdev
->dbgfs_root
)
375 pdev
->dbgfs_state
= debugfs_create_file("state", 0400, pdev
->dbgfs_root
,
376 pdev
, &dbg_fops_state
);
377 if (!pdev
->dbgfs_state
)
381 kmalloc_array(pdev
->nr_chans
, sizeof(*pdev
->dbgfs_state
),
383 if (!pdev
->dbgfs_chan
)
386 chandir
= debugfs_create_dir("channels", pdev
->dbgfs_root
);
390 for (i
= 0; i
< pdev
->nr_chans
; i
++) {
391 pdev
->dbgfs_chan
[i
] = pxad_dbg_alloc_chan(pdev
, i
, chandir
);
392 if (!pdev
->dbgfs_chan
[i
])
399 kfree(pdev
->dbgfs_chan
);
402 debugfs_remove_recursive(pdev
->dbgfs_root
);
404 pr_err("pxad: debugfs is not available\n");
407 static void pxad_cleanup_debugfs(struct pxad_device
*pdev
)
409 debugfs_remove_recursive(pdev
->dbgfs_root
);
412 static inline void pxad_init_debugfs(struct pxad_device
*pdev
) {}
413 static inline void pxad_cleanup_debugfs(struct pxad_device
*pdev
) {}
416 static struct pxad_phy
*lookup_phy(struct pxad_chan
*pchan
)
419 struct pxad_device
*pdev
= to_pxad_dev(pchan
->vc
.chan
.device
);
420 struct pxad_phy
*phy
, *found
= NULL
;
424 * dma channel priorities
425 * ch 0 - 3, 16 - 19 <--> (0)
426 * ch 4 - 7, 20 - 23 <--> (1)
427 * ch 8 - 11, 24 - 27 <--> (2)
428 * ch 12 - 15, 28 - 31 <--> (3)
431 spin_lock_irqsave(&pdev
->phy_lock
, flags
);
432 for (prio
= pchan
->prio
; prio
>= PXAD_PRIO_HIGHEST
; prio
--) {
433 for (i
= 0; i
< pdev
->nr_chans
; i
++) {
434 if (prio
!= (i
& 0xf) >> 2)
436 phy
= &pdev
->phys
[i
];
446 spin_unlock_irqrestore(&pdev
->phy_lock
, flags
);
447 dev_dbg(&pchan
->vc
.chan
.dev
->device
,
448 "%s(): phy=%p(%d)\n", __func__
, found
,
449 found
? found
->idx
: -1);
454 static void pxad_free_phy(struct pxad_chan
*chan
)
456 struct pxad_device
*pdev
= to_pxad_dev(chan
->vc
.chan
.device
);
460 dev_dbg(&chan
->vc
.chan
.dev
->device
,
461 "%s(): freeing\n", __func__
);
465 /* clear the channel mapping in DRCMR */
466 if (chan
->drcmr
<= pdev
->nr_requestors
) {
467 reg
= pxad_drcmr(chan
->drcmr
);
468 writel_relaxed(0, chan
->phy
->base
+ reg
);
471 spin_lock_irqsave(&pdev
->phy_lock
, flags
);
472 chan
->phy
->vchan
= NULL
;
474 spin_unlock_irqrestore(&pdev
->phy_lock
, flags
);
477 static bool is_chan_running(struct pxad_chan
*chan
)
480 struct pxad_phy
*phy
= chan
->phy
;
484 dcsr
= phy_readl_relaxed(phy
, DCSR
);
485 return dcsr
& PXA_DCSR_RUN
;
488 static bool is_running_chan_misaligned(struct pxad_chan
*chan
)
493 dalgn
= phy_readl_relaxed(chan
->phy
, DALGN
);
494 return dalgn
& (BIT(chan
->phy
->idx
));
497 static void phy_enable(struct pxad_phy
*phy
, bool misaligned
)
499 struct pxad_device
*pdev
;
505 dev_dbg(&phy
->vchan
->vc
.chan
.dev
->device
,
506 "%s(); phy=%p(%d) misaligned=%d\n", __func__
,
507 phy
, phy
->idx
, misaligned
);
509 pdev
= to_pxad_dev(phy
->vchan
->vc
.chan
.device
);
510 if (phy
->vchan
->drcmr
<= pdev
->nr_requestors
) {
511 reg
= pxad_drcmr(phy
->vchan
->drcmr
);
512 writel_relaxed(DRCMR_MAPVLD
| phy
->idx
, phy
->base
+ reg
);
515 dalgn
= phy_readl_relaxed(phy
, DALGN
);
517 dalgn
|= BIT(phy
->idx
);
519 dalgn
&= ~BIT(phy
->idx
);
520 phy_writel_relaxed(phy
, dalgn
, DALGN
);
522 phy_writel(phy
, PXA_DCSR_STOPIRQEN
| PXA_DCSR_ENDINTR
|
523 PXA_DCSR_BUSERR
| PXA_DCSR_RUN
, DCSR
);
526 static void phy_disable(struct pxad_phy
*phy
)
533 dcsr
= phy_readl_relaxed(phy
, DCSR
);
534 dev_dbg(&phy
->vchan
->vc
.chan
.dev
->device
,
535 "%s(): phy=%p(%d)\n", __func__
, phy
, phy
->idx
);
536 phy_writel(phy
, dcsr
& ~PXA_DCSR_RUN
& ~PXA_DCSR_STOPIRQEN
, DCSR
);
539 static void pxad_launch_chan(struct pxad_chan
*chan
,
540 struct pxad_desc_sw
*desc
)
542 dev_dbg(&chan
->vc
.chan
.dev
->device
,
543 "%s(): desc=%p\n", __func__
, desc
);
545 chan
->phy
= lookup_phy(chan
);
547 dev_dbg(&chan
->vc
.chan
.dev
->device
,
548 "%s(): no free dma channel\n", __func__
);
555 * Program the descriptor's address into the DMA controller,
556 * then start the DMA transaction
558 phy_writel(chan
->phy
, desc
->first
, DDADR
);
559 phy_enable(chan
->phy
, chan
->misaligned
);
560 wake_up(&chan
->wq_state
);
563 static void set_updater_desc(struct pxad_desc_sw
*sw_desc
,
566 struct pxad_desc_hw
*updater
=
567 sw_desc
->hw_desc
[sw_desc
->nb_desc
- 1];
568 dma_addr_t dma
= sw_desc
->hw_desc
[sw_desc
->nb_desc
- 2]->ddadr
;
570 updater
->ddadr
= DDADR_STOP
;
571 updater
->dsadr
= dma
;
572 updater
->dtadr
= dma
+ 8;
573 updater
->dcmd
= PXA_DCMD_WIDTH4
| PXA_DCMD_BURST32
|
574 (PXA_DCMD_LENGTH
& sizeof(u32
));
575 if (flags
& DMA_PREP_INTERRUPT
)
576 updater
->dcmd
|= PXA_DCMD_ENDIRQEN
;
578 sw_desc
->hw_desc
[sw_desc
->nb_desc
- 2]->ddadr
= sw_desc
->first
;
581 static bool is_desc_completed(struct virt_dma_desc
*vd
)
583 struct pxad_desc_sw
*sw_desc
= to_pxad_sw_desc(vd
);
584 struct pxad_desc_hw
*updater
=
585 sw_desc
->hw_desc
[sw_desc
->nb_desc
- 1];
587 return updater
->dtadr
!= (updater
->dsadr
+ 8);
590 static void pxad_desc_chain(struct virt_dma_desc
*vd1
,
591 struct virt_dma_desc
*vd2
)
593 struct pxad_desc_sw
*desc1
= to_pxad_sw_desc(vd1
);
594 struct pxad_desc_sw
*desc2
= to_pxad_sw_desc(vd2
);
595 dma_addr_t dma_to_chain
;
597 dma_to_chain
= desc2
->first
;
598 desc1
->hw_desc
[desc1
->nb_desc
- 1]->ddadr
= dma_to_chain
;
601 static bool pxad_try_hotchain(struct virt_dma_chan
*vc
,
602 struct virt_dma_desc
*vd
)
604 struct virt_dma_desc
*vd_last_issued
= NULL
;
605 struct pxad_chan
*chan
= to_pxad_chan(&vc
->chan
);
608 * Attempt to hot chain the tx if the phy is still running. This is
609 * considered successful only if either the channel is still running
610 * after the chaining, or if the chained transfer is completed after
611 * having been hot chained.
612 * A change of alignment is not allowed, and forbids hotchaining.
614 if (is_chan_running(chan
)) {
615 BUG_ON(list_empty(&vc
->desc_issued
));
617 if (!is_running_chan_misaligned(chan
) &&
618 to_pxad_sw_desc(vd
)->misaligned
)
621 vd_last_issued
= list_entry(vc
->desc_issued
.prev
,
622 struct virt_dma_desc
, node
);
623 pxad_desc_chain(vd_last_issued
, vd
);
624 if (is_chan_running(chan
) || is_desc_completed(vd
))
631 static unsigned int clear_chan_irq(struct pxad_phy
*phy
)
634 u32 dint
= readl(phy
->base
+ DINT
);
636 if (!(dint
& BIT(phy
->idx
)))
640 dcsr
= phy_readl_relaxed(phy
, DCSR
);
641 phy_writel(phy
, dcsr
, DCSR
);
642 if ((dcsr
& PXA_DCSR_BUSERR
) && (phy
->vchan
))
643 dev_warn(&phy
->vchan
->vc
.chan
.dev
->device
,
644 "%s(chan=%p): PXA_DCSR_BUSERR\n",
645 __func__
, &phy
->vchan
);
647 return dcsr
& ~PXA_DCSR_RUN
;
650 static irqreturn_t
pxad_chan_handler(int irq
, void *dev_id
)
652 struct pxad_phy
*phy
= dev_id
;
653 struct pxad_chan
*chan
= phy
->vchan
;
654 struct virt_dma_desc
*vd
, *tmp
;
658 dma_cookie_t last_started
= 0;
662 dcsr
= clear_chan_irq(phy
);
663 if (dcsr
& PXA_DCSR_RUN
)
666 spin_lock_irqsave(&chan
->vc
.lock
, flags
);
667 list_for_each_entry_safe(vd
, tmp
, &chan
->vc
.desc_issued
, node
) {
668 vd_completed
= is_desc_completed(vd
);
669 dev_dbg(&chan
->vc
.chan
.dev
->device
,
670 "%s(): checking txd %p[%x]: completed=%d dcsr=0x%x\n",
671 __func__
, vd
, vd
->tx
.cookie
, vd_completed
,
673 last_started
= vd
->tx
.cookie
;
674 if (to_pxad_sw_desc(vd
)->cyclic
) {
675 vchan_cyclic_callback(vd
);
680 vchan_cookie_complete(vd
);
686 if (dcsr
& PXA_DCSR_BUSERR
) {
687 chan
->bus_error
= last_started
;
691 if (!chan
->bus_error
&& dcsr
& PXA_DCSR_STOPSTATE
) {
692 dev_dbg(&chan
->vc
.chan
.dev
->device
,
693 "%s(): channel stopped, submitted_empty=%d issued_empty=%d",
695 list_empty(&chan
->vc
.desc_submitted
),
696 list_empty(&chan
->vc
.desc_issued
));
697 phy_writel_relaxed(phy
, dcsr
& ~PXA_DCSR_STOPIRQEN
, DCSR
);
699 if (list_empty(&chan
->vc
.desc_issued
)) {
701 !list_empty(&chan
->vc
.desc_submitted
);
703 vd
= list_first_entry(&chan
->vc
.desc_issued
,
704 struct virt_dma_desc
, node
);
705 pxad_launch_chan(chan
, to_pxad_sw_desc(vd
));
708 spin_unlock_irqrestore(&chan
->vc
.lock
, flags
);
709 wake_up(&chan
->wq_state
);
714 static irqreturn_t
pxad_int_handler(int irq
, void *dev_id
)
716 struct pxad_device
*pdev
= dev_id
;
717 struct pxad_phy
*phy
;
718 u32 dint
= readl(pdev
->base
+ DINT
);
719 int i
, ret
= IRQ_NONE
;
724 phy
= &pdev
->phys
[i
];
725 if (pxad_chan_handler(irq
, phy
) == IRQ_HANDLED
)
732 static int pxad_alloc_chan_resources(struct dma_chan
*dchan
)
734 struct pxad_chan
*chan
= to_pxad_chan(dchan
);
735 struct pxad_device
*pdev
= to_pxad_dev(chan
->vc
.chan
.device
);
740 chan
->desc_pool
= dma_pool_create(dma_chan_name(dchan
),
742 sizeof(struct pxad_desc_hw
),
743 __alignof__(struct pxad_desc_hw
),
745 if (!chan
->desc_pool
) {
746 dev_err(&chan
->vc
.chan
.dev
->device
,
747 "%s(): unable to allocate descriptor pool\n",
755 static void pxad_free_chan_resources(struct dma_chan
*dchan
)
757 struct pxad_chan
*chan
= to_pxad_chan(dchan
);
759 vchan_free_chan_resources(&chan
->vc
);
760 dma_pool_destroy(chan
->desc_pool
);
761 chan
->desc_pool
= NULL
;
765 static void pxad_free_desc(struct virt_dma_desc
*vd
)
769 struct pxad_desc_sw
*sw_desc
= to_pxad_sw_desc(vd
);
771 BUG_ON(sw_desc
->nb_desc
== 0);
772 for (i
= sw_desc
->nb_desc
- 1; i
>= 0; i
--) {
774 dma
= sw_desc
->hw_desc
[i
- 1]->ddadr
;
776 dma
= sw_desc
->first
;
777 dma_pool_free(sw_desc
->desc_pool
,
778 sw_desc
->hw_desc
[i
], dma
);
780 sw_desc
->nb_desc
= 0;
784 static struct pxad_desc_sw
*
785 pxad_alloc_desc(struct pxad_chan
*chan
, unsigned int nb_hw_desc
)
787 struct pxad_desc_sw
*sw_desc
;
791 sw_desc
= kzalloc(sizeof(*sw_desc
) +
792 nb_hw_desc
* sizeof(struct pxad_desc_hw
*),
796 sw_desc
->desc_pool
= chan
->desc_pool
;
798 for (i
= 0; i
< nb_hw_desc
; i
++) {
799 sw_desc
->hw_desc
[i
] = dma_pool_alloc(sw_desc
->desc_pool
,
801 if (!sw_desc
->hw_desc
[i
]) {
802 dev_err(&chan
->vc
.chan
.dev
->device
,
803 "%s(): Couldn't allocate the %dth hw_desc from dma_pool %p\n",
804 __func__
, i
, sw_desc
->desc_pool
);
809 sw_desc
->first
= dma
;
811 sw_desc
->hw_desc
[i
- 1]->ddadr
= dma
;
817 pxad_free_desc(&sw_desc
->vd
);
821 static dma_cookie_t
pxad_tx_submit(struct dma_async_tx_descriptor
*tx
)
823 struct virt_dma_chan
*vc
= to_virt_chan(tx
->chan
);
824 struct pxad_chan
*chan
= to_pxad_chan(&vc
->chan
);
825 struct virt_dma_desc
*vd_chained
= NULL
,
826 *vd
= container_of(tx
, struct virt_dma_desc
, tx
);
830 set_updater_desc(to_pxad_sw_desc(vd
), tx
->flags
);
832 spin_lock_irqsave(&vc
->lock
, flags
);
833 cookie
= dma_cookie_assign(tx
);
835 if (list_empty(&vc
->desc_submitted
) && pxad_try_hotchain(vc
, vd
)) {
836 list_move_tail(&vd
->node
, &vc
->desc_issued
);
837 dev_dbg(&chan
->vc
.chan
.dev
->device
,
838 "%s(): txd %p[%x]: submitted (hot linked)\n",
839 __func__
, vd
, cookie
);
844 * Fallback to placing the tx in the submitted queue
846 if (!list_empty(&vc
->desc_submitted
)) {
847 vd_chained
= list_entry(vc
->desc_submitted
.prev
,
848 struct virt_dma_desc
, node
);
850 * Only chain the descriptors if no new misalignment is
851 * introduced. If a new misalignment is chained, let the channel
852 * stop, and be relaunched in misalign mode from the irq
855 if (chan
->misaligned
|| !to_pxad_sw_desc(vd
)->misaligned
)
856 pxad_desc_chain(vd_chained
, vd
);
860 dev_dbg(&chan
->vc
.chan
.dev
->device
,
861 "%s(): txd %p[%x]: submitted (%s linked)\n",
862 __func__
, vd
, cookie
, vd_chained
? "cold" : "not");
863 list_move_tail(&vd
->node
, &vc
->desc_submitted
);
864 chan
->misaligned
|= to_pxad_sw_desc(vd
)->misaligned
;
867 spin_unlock_irqrestore(&vc
->lock
, flags
);
871 static void pxad_issue_pending(struct dma_chan
*dchan
)
873 struct pxad_chan
*chan
= to_pxad_chan(dchan
);
874 struct virt_dma_desc
*vd_first
;
877 spin_lock_irqsave(&chan
->vc
.lock
, flags
);
878 if (list_empty(&chan
->vc
.desc_submitted
))
881 vd_first
= list_first_entry(&chan
->vc
.desc_submitted
,
882 struct virt_dma_desc
, node
);
883 dev_dbg(&chan
->vc
.chan
.dev
->device
,
884 "%s(): txd %p[%x]", __func__
, vd_first
, vd_first
->tx
.cookie
);
886 vchan_issue_pending(&chan
->vc
);
887 if (!pxad_try_hotchain(&chan
->vc
, vd_first
))
888 pxad_launch_chan(chan
, to_pxad_sw_desc(vd_first
));
890 spin_unlock_irqrestore(&chan
->vc
.lock
, flags
);
893 static inline struct dma_async_tx_descriptor
*
894 pxad_tx_prep(struct virt_dma_chan
*vc
, struct virt_dma_desc
*vd
,
895 unsigned long tx_flags
)
897 struct dma_async_tx_descriptor
*tx
;
898 struct pxad_chan
*chan
= container_of(vc
, struct pxad_chan
, vc
);
900 INIT_LIST_HEAD(&vd
->node
);
901 tx
= vchan_tx_prep(vc
, vd
, tx_flags
);
902 tx
->tx_submit
= pxad_tx_submit
;
903 dev_dbg(&chan
->vc
.chan
.dev
->device
,
904 "%s(): vc=%p txd=%p[%x] flags=0x%lx\n", __func__
,
905 vc
, vd
, vd
->tx
.cookie
,
911 static void pxad_get_config(struct pxad_chan
*chan
,
912 enum dma_transfer_direction dir
,
913 u32
*dcmd
, u32
*dev_src
, u32
*dev_dst
)
915 u32 maxburst
= 0, dev_addr
= 0;
916 enum dma_slave_buswidth width
= DMA_SLAVE_BUSWIDTH_UNDEFINED
;
917 struct pxad_device
*pdev
= to_pxad_dev(chan
->vc
.chan
.device
);
920 if (dir
== DMA_DEV_TO_MEM
) {
921 maxburst
= chan
->cfg
.src_maxburst
;
922 width
= chan
->cfg
.src_addr_width
;
923 dev_addr
= chan
->cfg
.src_addr
;
925 *dcmd
|= PXA_DCMD_INCTRGADDR
;
926 if (chan
->drcmr
<= pdev
->nr_requestors
)
927 *dcmd
|= PXA_DCMD_FLOWSRC
;
929 if (dir
== DMA_MEM_TO_DEV
) {
930 maxburst
= chan
->cfg
.dst_maxburst
;
931 width
= chan
->cfg
.dst_addr_width
;
932 dev_addr
= chan
->cfg
.dst_addr
;
934 *dcmd
|= PXA_DCMD_INCSRCADDR
;
935 if (chan
->drcmr
<= pdev
->nr_requestors
)
936 *dcmd
|= PXA_DCMD_FLOWTRG
;
938 if (dir
== DMA_MEM_TO_MEM
)
939 *dcmd
|= PXA_DCMD_BURST32
| PXA_DCMD_INCTRGADDR
|
942 dev_dbg(&chan
->vc
.chan
.dev
->device
,
943 "%s(): dev_addr=0x%x maxburst=%d width=%d dir=%d\n",
944 __func__
, dev_addr
, maxburst
, width
, dir
);
946 if (width
== DMA_SLAVE_BUSWIDTH_1_BYTE
)
947 *dcmd
|= PXA_DCMD_WIDTH1
;
948 else if (width
== DMA_SLAVE_BUSWIDTH_2_BYTES
)
949 *dcmd
|= PXA_DCMD_WIDTH2
;
950 else if (width
== DMA_SLAVE_BUSWIDTH_4_BYTES
)
951 *dcmd
|= PXA_DCMD_WIDTH4
;
954 *dcmd
|= PXA_DCMD_BURST8
;
955 else if (maxburst
== 16)
956 *dcmd
|= PXA_DCMD_BURST16
;
957 else if (maxburst
== 32)
958 *dcmd
|= PXA_DCMD_BURST32
;
960 /* FIXME: drivers should be ported over to use the filter
961 * function. Once that's done, the following two lines can
964 if (chan
->cfg
.slave_id
)
965 chan
->drcmr
= chan
->cfg
.slave_id
;
968 static struct dma_async_tx_descriptor
*
969 pxad_prep_memcpy(struct dma_chan
*dchan
,
970 dma_addr_t dma_dst
, dma_addr_t dma_src
,
971 size_t len
, unsigned long flags
)
973 struct pxad_chan
*chan
= to_pxad_chan(dchan
);
974 struct pxad_desc_sw
*sw_desc
;
975 struct pxad_desc_hw
*hw_desc
;
977 unsigned int i
, nb_desc
= 0;
983 dev_dbg(&chan
->vc
.chan
.dev
->device
,
984 "%s(): dma_dst=0x%lx dma_src=0x%lx len=%zu flags=%lx\n",
985 __func__
, (unsigned long)dma_dst
, (unsigned long)dma_src
,
987 pxad_get_config(chan
, DMA_MEM_TO_MEM
, &dcmd
, NULL
, NULL
);
989 nb_desc
= DIV_ROUND_UP(len
, PDMA_MAX_DESC_BYTES
);
990 sw_desc
= pxad_alloc_desc(chan
, nb_desc
+ 1);
995 if (!IS_ALIGNED(dma_src
, 1 << PDMA_ALIGNMENT
) ||
996 !IS_ALIGNED(dma_dst
, 1 << PDMA_ALIGNMENT
))
997 sw_desc
->misaligned
= true;
1001 hw_desc
= sw_desc
->hw_desc
[i
++];
1002 copy
= min_t(size_t, len
, PDMA_MAX_DESC_BYTES
);
1003 hw_desc
->dcmd
= dcmd
| (PXA_DCMD_LENGTH
& copy
);
1004 hw_desc
->dsadr
= dma_src
;
1005 hw_desc
->dtadr
= dma_dst
;
1010 set_updater_desc(sw_desc
, flags
);
1012 return pxad_tx_prep(&chan
->vc
, &sw_desc
->vd
, flags
);
1015 static struct dma_async_tx_descriptor
*
1016 pxad_prep_slave_sg(struct dma_chan
*dchan
, struct scatterlist
*sgl
,
1017 unsigned int sg_len
, enum dma_transfer_direction dir
,
1018 unsigned long flags
, void *context
)
1020 struct pxad_chan
*chan
= to_pxad_chan(dchan
);
1021 struct pxad_desc_sw
*sw_desc
;
1023 struct scatterlist
*sg
;
1025 u32 dcmd
, dsadr
= 0, dtadr
= 0;
1026 unsigned int nb_desc
= 0, i
, j
= 0;
1028 if ((sgl
== NULL
) || (sg_len
== 0))
1031 pxad_get_config(chan
, dir
, &dcmd
, &dsadr
, &dtadr
);
1032 dev_dbg(&chan
->vc
.chan
.dev
->device
,
1033 "%s(): dir=%d flags=%lx\n", __func__
, dir
, flags
);
1035 for_each_sg(sgl
, sg
, sg_len
, i
)
1036 nb_desc
+= DIV_ROUND_UP(sg_dma_len(sg
), PDMA_MAX_DESC_BYTES
);
1037 sw_desc
= pxad_alloc_desc(chan
, nb_desc
+ 1);
1041 for_each_sg(sgl
, sg
, sg_len
, i
) {
1042 dma
= sg_dma_address(sg
);
1043 avail
= sg_dma_len(sg
);
1044 sw_desc
->len
+= avail
;
1047 len
= min_t(size_t, avail
, PDMA_MAX_DESC_BYTES
);
1049 sw_desc
->misaligned
= true;
1051 sw_desc
->hw_desc
[j
]->dcmd
=
1052 dcmd
| (PXA_DCMD_LENGTH
& len
);
1053 sw_desc
->hw_desc
[j
]->dsadr
= dsadr
? dsadr
: dma
;
1054 sw_desc
->hw_desc
[j
++]->dtadr
= dtadr
? dtadr
: dma
;
1060 set_updater_desc(sw_desc
, flags
);
1062 return pxad_tx_prep(&chan
->vc
, &sw_desc
->vd
, flags
);
1065 static struct dma_async_tx_descriptor
*
1066 pxad_prep_dma_cyclic(struct dma_chan
*dchan
,
1067 dma_addr_t buf_addr
, size_t len
, size_t period_len
,
1068 enum dma_transfer_direction dir
, unsigned long flags
)
1070 struct pxad_chan
*chan
= to_pxad_chan(dchan
);
1071 struct pxad_desc_sw
*sw_desc
;
1072 struct pxad_desc_hw
**phw_desc
;
1074 u32 dcmd
, dsadr
= 0, dtadr
= 0;
1075 unsigned int nb_desc
= 0;
1077 if (!dchan
|| !len
|| !period_len
)
1079 if ((dir
!= DMA_DEV_TO_MEM
) && (dir
!= DMA_MEM_TO_DEV
)) {
1080 dev_err(&chan
->vc
.chan
.dev
->device
,
1081 "Unsupported direction for cyclic DMA\n");
1084 /* the buffer length must be a multiple of period_len */
1085 if (len
% period_len
!= 0 || period_len
> PDMA_MAX_DESC_BYTES
||
1086 !IS_ALIGNED(period_len
, 1 << PDMA_ALIGNMENT
))
1089 pxad_get_config(chan
, dir
, &dcmd
, &dsadr
, &dtadr
);
1090 dcmd
|= PXA_DCMD_ENDIRQEN
| (PXA_DCMD_LENGTH
& period_len
);
1091 dev_dbg(&chan
->vc
.chan
.dev
->device
,
1092 "%s(): buf_addr=0x%lx len=%zu period=%zu dir=%d flags=%lx\n",
1093 __func__
, (unsigned long)buf_addr
, len
, period_len
, dir
, flags
);
1095 nb_desc
= DIV_ROUND_UP(period_len
, PDMA_MAX_DESC_BYTES
);
1096 nb_desc
*= DIV_ROUND_UP(len
, period_len
);
1097 sw_desc
= pxad_alloc_desc(chan
, nb_desc
+ 1);
1100 sw_desc
->cyclic
= true;
1103 phw_desc
= sw_desc
->hw_desc
;
1106 phw_desc
[0]->dsadr
= dsadr
? dsadr
: dma
;
1107 phw_desc
[0]->dtadr
= dtadr
? dtadr
: dma
;
1108 phw_desc
[0]->dcmd
= dcmd
;
1113 set_updater_desc(sw_desc
, flags
);
1115 return pxad_tx_prep(&chan
->vc
, &sw_desc
->vd
, flags
);
1118 static int pxad_config(struct dma_chan
*dchan
,
1119 struct dma_slave_config
*cfg
)
1121 struct pxad_chan
*chan
= to_pxad_chan(dchan
);
1130 static int pxad_terminate_all(struct dma_chan
*dchan
)
1132 struct pxad_chan
*chan
= to_pxad_chan(dchan
);
1133 struct pxad_device
*pdev
= to_pxad_dev(chan
->vc
.chan
.device
);
1134 struct virt_dma_desc
*vd
= NULL
;
1135 unsigned long flags
;
1136 struct pxad_phy
*phy
;
1139 dev_dbg(&chan
->vc
.chan
.dev
->device
,
1140 "%s(): vchan %p: terminate all\n", __func__
, &chan
->vc
);
1142 spin_lock_irqsave(&chan
->vc
.lock
, flags
);
1143 vchan_get_all_descriptors(&chan
->vc
, &head
);
1145 list_for_each_entry(vd
, &head
, node
) {
1146 dev_dbg(&chan
->vc
.chan
.dev
->device
,
1147 "%s(): cancelling txd %p[%x] (completed=%d)", __func__
,
1148 vd
, vd
->tx
.cookie
, is_desc_completed(vd
));
1153 phy_disable(chan
->phy
);
1154 pxad_free_phy(chan
);
1156 spin_lock(&pdev
->phy_lock
);
1158 spin_unlock(&pdev
->phy_lock
);
1160 spin_unlock_irqrestore(&chan
->vc
.lock
, flags
);
1161 vchan_dma_desc_free_list(&chan
->vc
, &head
);
1166 static unsigned int pxad_residue(struct pxad_chan
*chan
,
1167 dma_cookie_t cookie
)
1169 struct virt_dma_desc
*vd
= NULL
;
1170 struct pxad_desc_sw
*sw_desc
= NULL
;
1171 struct pxad_desc_hw
*hw_desc
= NULL
;
1172 u32 curr
, start
, len
, end
, residue
= 0;
1173 unsigned long flags
;
1174 bool passed
= false;
1178 * If the channel does not have a phy pointer anymore, it has already
1179 * been completed. Therefore, its residue is 0.
1184 spin_lock_irqsave(&chan
->vc
.lock
, flags
);
1186 vd
= vchan_find_desc(&chan
->vc
, cookie
);
1190 sw_desc
= to_pxad_sw_desc(vd
);
1191 if (sw_desc
->hw_desc
[0]->dcmd
& PXA_DCMD_INCSRCADDR
)
1192 curr
= phy_readl_relaxed(chan
->phy
, DSADR
);
1194 curr
= phy_readl_relaxed(chan
->phy
, DTADR
);
1197 * curr has to be actually read before checking descriptor
1198 * completion, so that a curr inside a status updater
1199 * descriptor implies the following test returns true, and
1200 * preventing reordering of curr load and the test.
1203 if (is_desc_completed(vd
))
1206 for (i
= 0; i
< sw_desc
->nb_desc
- 1; i
++) {
1207 hw_desc
= sw_desc
->hw_desc
[i
];
1208 if (sw_desc
->hw_desc
[0]->dcmd
& PXA_DCMD_INCSRCADDR
)
1209 start
= hw_desc
->dsadr
;
1211 start
= hw_desc
->dtadr
;
1212 len
= hw_desc
->dcmd
& PXA_DCMD_LENGTH
;
1216 * 'passed' will be latched once we found the descriptor
1217 * which lies inside the boundaries of the curr
1218 * pointer. All descriptors that occur in the list
1219 * _after_ we found that partially handled descriptor
1220 * are still to be processed and are hence added to the
1221 * residual bytes counter.
1226 } else if (curr
>= start
&& curr
<= end
) {
1227 residue
+= end
- curr
;
1232 residue
= sw_desc
->len
;
1235 spin_unlock_irqrestore(&chan
->vc
.lock
, flags
);
1236 dev_dbg(&chan
->vc
.chan
.dev
->device
,
1237 "%s(): txd %p[%x] sw_desc=%p: %d\n",
1238 __func__
, vd
, cookie
, sw_desc
, residue
);
1242 static enum dma_status
pxad_tx_status(struct dma_chan
*dchan
,
1243 dma_cookie_t cookie
,
1244 struct dma_tx_state
*txstate
)
1246 struct pxad_chan
*chan
= to_pxad_chan(dchan
);
1247 enum dma_status ret
;
1249 if (cookie
== chan
->bus_error
)
1252 ret
= dma_cookie_status(dchan
, cookie
, txstate
);
1253 if (likely(txstate
&& (ret
!= DMA_ERROR
)))
1254 dma_set_residue(txstate
, pxad_residue(chan
, cookie
));
1259 static void pxad_synchronize(struct dma_chan
*dchan
)
1261 struct pxad_chan
*chan
= to_pxad_chan(dchan
);
1263 wait_event(chan
->wq_state
, !is_chan_running(chan
));
1264 vchan_synchronize(&chan
->vc
);
1267 static void pxad_free_channels(struct dma_device
*dmadev
)
1269 struct pxad_chan
*c
, *cn
;
1271 list_for_each_entry_safe(c
, cn
, &dmadev
->channels
,
1272 vc
.chan
.device_node
) {
1273 list_del(&c
->vc
.chan
.device_node
);
1274 tasklet_kill(&c
->vc
.task
);
1278 static int pxad_remove(struct platform_device
*op
)
1280 struct pxad_device
*pdev
= platform_get_drvdata(op
);
1282 pxad_cleanup_debugfs(pdev
);
1283 pxad_free_channels(&pdev
->slave
);
1284 dma_async_device_unregister(&pdev
->slave
);
1288 static int pxad_init_phys(struct platform_device
*op
,
1289 struct pxad_device
*pdev
,
1290 unsigned int nb_phy_chans
)
1292 int irq0
, irq
, nr_irq
= 0, i
, ret
;
1293 struct pxad_phy
*phy
;
1295 irq0
= platform_get_irq(op
, 0);
1299 pdev
->phys
= devm_kcalloc(&op
->dev
, nb_phy_chans
,
1300 sizeof(pdev
->phys
[0]), GFP_KERNEL
);
1304 for (i
= 0; i
< nb_phy_chans
; i
++)
1305 if (platform_get_irq(op
, i
) > 0)
1308 for (i
= 0; i
< nb_phy_chans
; i
++) {
1309 phy
= &pdev
->phys
[i
];
1310 phy
->base
= pdev
->base
;
1312 irq
= platform_get_irq(op
, i
);
1313 if ((nr_irq
> 1) && (irq
> 0))
1314 ret
= devm_request_irq(&op
->dev
, irq
,
1316 IRQF_SHARED
, "pxa-dma", phy
);
1317 if ((nr_irq
== 1) && (i
== 0))
1318 ret
= devm_request_irq(&op
->dev
, irq0
,
1320 IRQF_SHARED
, "pxa-dma", pdev
);
1322 dev_err(pdev
->slave
.dev
,
1323 "%s(): can't request irq %d:%d\n", __func__
,
1332 static const struct of_device_id pxad_dt_ids
[] = {
1333 { .compatible
= "marvell,pdma-1.0", },
1336 MODULE_DEVICE_TABLE(of
, pxad_dt_ids
);
1338 static struct dma_chan
*pxad_dma_xlate(struct of_phandle_args
*dma_spec
,
1339 struct of_dma
*ofdma
)
1341 struct pxad_device
*d
= ofdma
->of_dma_data
;
1342 struct dma_chan
*chan
;
1344 chan
= dma_get_any_slave_channel(&d
->slave
);
1348 to_pxad_chan(chan
)->drcmr
= dma_spec
->args
[0];
1349 to_pxad_chan(chan
)->prio
= dma_spec
->args
[1];
1354 static int pxad_init_dmadev(struct platform_device
*op
,
1355 struct pxad_device
*pdev
,
1356 unsigned int nr_phy_chans
,
1357 unsigned int nr_requestors
)
1361 struct pxad_chan
*c
;
1363 pdev
->nr_chans
= nr_phy_chans
;
1364 pdev
->nr_requestors
= nr_requestors
;
1365 INIT_LIST_HEAD(&pdev
->slave
.channels
);
1366 pdev
->slave
.device_alloc_chan_resources
= pxad_alloc_chan_resources
;
1367 pdev
->slave
.device_free_chan_resources
= pxad_free_chan_resources
;
1368 pdev
->slave
.device_tx_status
= pxad_tx_status
;
1369 pdev
->slave
.device_issue_pending
= pxad_issue_pending
;
1370 pdev
->slave
.device_config
= pxad_config
;
1371 pdev
->slave
.device_synchronize
= pxad_synchronize
;
1372 pdev
->slave
.device_terminate_all
= pxad_terminate_all
;
1374 if (op
->dev
.coherent_dma_mask
)
1375 dma_set_mask(&op
->dev
, op
->dev
.coherent_dma_mask
);
1377 dma_set_mask(&op
->dev
, DMA_BIT_MASK(32));
1379 ret
= pxad_init_phys(op
, pdev
, nr_phy_chans
);
1383 for (i
= 0; i
< nr_phy_chans
; i
++) {
1384 c
= devm_kzalloc(&op
->dev
, sizeof(*c
), GFP_KERNEL
);
1387 c
->vc
.desc_free
= pxad_free_desc
;
1388 vchan_init(&c
->vc
, &pdev
->slave
);
1389 init_waitqueue_head(&c
->wq_state
);
1392 return dma_async_device_register(&pdev
->slave
);
1395 static int pxad_probe(struct platform_device
*op
)
1397 struct pxad_device
*pdev
;
1398 const struct of_device_id
*of_id
;
1399 struct mmp_dma_platdata
*pdata
= dev_get_platdata(&op
->dev
);
1400 struct resource
*iores
;
1401 int ret
, dma_channels
= 0, nb_requestors
= 0;
1402 const enum dma_slave_buswidth widths
=
1403 DMA_SLAVE_BUSWIDTH_1_BYTE
| DMA_SLAVE_BUSWIDTH_2_BYTES
|
1404 DMA_SLAVE_BUSWIDTH_4_BYTES
;
1406 pdev
= devm_kzalloc(&op
->dev
, sizeof(*pdev
), GFP_KERNEL
);
1410 spin_lock_init(&pdev
->phy_lock
);
1412 iores
= platform_get_resource(op
, IORESOURCE_MEM
, 0);
1413 pdev
->base
= devm_ioremap_resource(&op
->dev
, iores
);
1414 if (IS_ERR(pdev
->base
))
1415 return PTR_ERR(pdev
->base
);
1417 of_id
= of_match_device(pxad_dt_ids
, &op
->dev
);
1419 of_property_read_u32(op
->dev
.of_node
, "#dma-channels",
1421 ret
= of_property_read_u32(op
->dev
.of_node
, "#dma-requests",
1424 dev_warn(pdev
->slave
.dev
,
1425 "#dma-requests set to default 32 as missing in OF: %d",
1429 } else if (pdata
&& pdata
->dma_channels
) {
1430 dma_channels
= pdata
->dma_channels
;
1431 nb_requestors
= pdata
->nb_requestors
;
1433 dma_channels
= 32; /* default 32 channel */
1436 dma_cap_set(DMA_SLAVE
, pdev
->slave
.cap_mask
);
1437 dma_cap_set(DMA_MEMCPY
, pdev
->slave
.cap_mask
);
1438 dma_cap_set(DMA_CYCLIC
, pdev
->slave
.cap_mask
);
1439 dma_cap_set(DMA_PRIVATE
, pdev
->slave
.cap_mask
);
1440 pdev
->slave
.device_prep_dma_memcpy
= pxad_prep_memcpy
;
1441 pdev
->slave
.device_prep_slave_sg
= pxad_prep_slave_sg
;
1442 pdev
->slave
.device_prep_dma_cyclic
= pxad_prep_dma_cyclic
;
1444 pdev
->slave
.copy_align
= PDMA_ALIGNMENT
;
1445 pdev
->slave
.src_addr_widths
= widths
;
1446 pdev
->slave
.dst_addr_widths
= widths
;
1447 pdev
->slave
.directions
= BIT(DMA_MEM_TO_DEV
) | BIT(DMA_DEV_TO_MEM
);
1448 pdev
->slave
.residue_granularity
= DMA_RESIDUE_GRANULARITY_DESCRIPTOR
;
1449 pdev
->slave
.descriptor_reuse
= true;
1451 pdev
->slave
.dev
= &op
->dev
;
1452 ret
= pxad_init_dmadev(op
, pdev
, dma_channels
, nb_requestors
);
1454 dev_err(pdev
->slave
.dev
, "unable to register\n");
1458 if (op
->dev
.of_node
) {
1459 /* Device-tree DMA controller registration */
1460 ret
= of_dma_controller_register(op
->dev
.of_node
,
1461 pxad_dma_xlate
, pdev
);
1463 dev_err(pdev
->slave
.dev
,
1464 "of_dma_controller_register failed\n");
1469 platform_set_drvdata(op
, pdev
);
1470 pxad_init_debugfs(pdev
);
1471 dev_info(pdev
->slave
.dev
, "initialized %d channels on %d requestors\n",
1472 dma_channels
, nb_requestors
);
1476 static const struct platform_device_id pxad_id_table
[] = {
1481 static struct platform_driver pxad_driver
= {
1484 .of_match_table
= pxad_dt_ids
,
1486 .id_table
= pxad_id_table
,
1487 .probe
= pxad_probe
,
1488 .remove
= pxad_remove
,
1491 bool pxad_filter_fn(struct dma_chan
*chan
, void *param
)
1493 struct pxad_chan
*c
= to_pxad_chan(chan
);
1494 struct pxad_param
*p
= param
;
1496 if (chan
->device
->dev
->driver
!= &pxad_driver
.driver
)
1499 c
->drcmr
= p
->drcmr
;
1504 EXPORT_SYMBOL_GPL(pxad_filter_fn
);
1506 module_platform_driver(pxad_driver
);
1508 MODULE_DESCRIPTION("Marvell PXA Peripheral DMA Driver");
1509 MODULE_AUTHOR("Robert Jarzmik <robert.jarzmik@free.fr>");
1510 MODULE_LICENSE("GPL v2");