2 * Copyright 2015 Robert Jarzmik <robert.jarzmik@free.fr>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
10 #include <linux/module.h>
11 #include <linux/init.h>
12 #include <linux/types.h>
13 #include <linux/interrupt.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/slab.h>
16 #include <linux/dmaengine.h>
17 #include <linux/platform_device.h>
18 #include <linux/device.h>
19 #include <linux/platform_data/mmp_dma.h>
20 #include <linux/dmapool.h>
21 #include <linux/of_device.h>
22 #include <linux/of_dma.h>
24 #include <linux/wait.h>
25 #include <linux/dma/pxa-dma.h>
27 #include "dmaengine.h"
30 #define DCSR(n) (0x0000 + ((n) << 2))
31 #define DALGN(n) 0x00a0
33 #define DDADR(n) (0x0200 + ((n) << 4))
34 #define DSADR(n) (0x0204 + ((n) << 4))
35 #define DTADR(n) (0x0208 + ((n) << 4))
36 #define DCMD(n) (0x020c + ((n) << 4))
38 #define PXA_DCSR_RUN BIT(31) /* Run Bit (read / write) */
39 #define PXA_DCSR_NODESC BIT(30) /* No-Descriptor Fetch (read / write) */
40 #define PXA_DCSR_STOPIRQEN BIT(29) /* Stop Interrupt Enable (R/W) */
41 #define PXA_DCSR_REQPEND BIT(8) /* Request Pending (read-only) */
42 #define PXA_DCSR_STOPSTATE BIT(3) /* Stop State (read-only) */
43 #define PXA_DCSR_ENDINTR BIT(2) /* End Interrupt (read / write) */
44 #define PXA_DCSR_STARTINTR BIT(1) /* Start Interrupt (read / write) */
45 #define PXA_DCSR_BUSERR BIT(0) /* Bus Error Interrupt (read / write) */
47 #define PXA_DCSR_EORIRQEN BIT(28) /* End of Receive IRQ Enable (R/W) */
48 #define PXA_DCSR_EORJMPEN BIT(27) /* Jump to next descriptor on EOR */
49 #define PXA_DCSR_EORSTOPEN BIT(26) /* STOP on an EOR */
50 #define PXA_DCSR_SETCMPST BIT(25) /* Set Descriptor Compare Status */
51 #define PXA_DCSR_CLRCMPST BIT(24) /* Clear Descriptor Compare Status */
52 #define PXA_DCSR_CMPST BIT(10) /* The Descriptor Compare Status */
53 #define PXA_DCSR_EORINTR BIT(9) /* The end of Receive */
55 #define DRCMR_MAPVLD BIT(7) /* Map Valid (read / write) */
56 #define DRCMR_CHLNUM 0x1f /* mask for Channel Number (read / write) */
58 #define DDADR_DESCADDR 0xfffffff0 /* Address of next descriptor (mask) */
59 #define DDADR_STOP BIT(0) /* Stop (read / write) */
61 #define PXA_DCMD_INCSRCADDR BIT(31) /* Source Address Increment Setting. */
62 #define PXA_DCMD_INCTRGADDR BIT(30) /* Target Address Increment Setting. */
63 #define PXA_DCMD_FLOWSRC BIT(29) /* Flow Control by the source. */
64 #define PXA_DCMD_FLOWTRG BIT(28) /* Flow Control by the target. */
65 #define PXA_DCMD_STARTIRQEN BIT(22) /* Start Interrupt Enable */
66 #define PXA_DCMD_ENDIRQEN BIT(21) /* End Interrupt Enable */
67 #define PXA_DCMD_ENDIAN BIT(18) /* Device Endian-ness. */
68 #define PXA_DCMD_BURST8 (1 << 16) /* 8 byte burst */
69 #define PXA_DCMD_BURST16 (2 << 16) /* 16 byte burst */
70 #define PXA_DCMD_BURST32 (3 << 16) /* 32 byte burst */
71 #define PXA_DCMD_WIDTH1 (1 << 14) /* 1 byte width */
72 #define PXA_DCMD_WIDTH2 (2 << 14) /* 2 byte width (HalfWord) */
73 #define PXA_DCMD_WIDTH4 (3 << 14) /* 4 byte width (Word) */
74 #define PXA_DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */
76 #define PDMA_ALIGNMENT 3
77 #define PDMA_MAX_DESC_BYTES (PXA_DCMD_LENGTH & ~((1 << PDMA_ALIGNMENT) - 1))
80 u32 ddadr
; /* Points to the next descriptor + flags */
81 u32 dsadr
; /* DSADR value for the current transfer */
82 u32 dtadr
; /* DTADR value for the current transfer */
83 u32 dcmd
; /* DCMD value for the current transfer */
87 struct virt_dma_desc vd
; /* Virtual descriptor */
88 int nb_desc
; /* Number of hw. descriptors */
89 size_t len
; /* Number of bytes xfered */
90 dma_addr_t first
; /* First descriptor's addr */
92 /* At least one descriptor has an src/dst address not multiple of 8 */
95 struct dma_pool
*desc_pool
; /* Channel's used allocator */
97 struct pxad_desc_hw
*hw_desc
[]; /* DMA coherent descriptors */
103 struct pxad_chan
*vchan
;
107 struct virt_dma_chan vc
; /* Virtual channel */
108 u32 drcmr
; /* Requestor of the channel */
109 enum pxad_chan_prio prio
; /* Required priority of phy */
111 * At least one desc_sw in submitted or issued transfers on this channel
112 * has one address such as: addr % 8 != 0. This implies the DALGN
113 * setting on the phy.
116 struct dma_slave_config cfg
; /* Runtime config */
118 /* protected by vc->lock */
119 struct pxad_phy
*phy
;
120 struct dma_pool
*desc_pool
; /* Descriptors pool */
121 dma_cookie_t bus_error
;
123 wait_queue_head_t wq_state
;
127 struct dma_device slave
;
131 struct pxad_phy
*phys
;
132 spinlock_t phy_lock
; /* Phy association */
133 #ifdef CONFIG_DEBUG_FS
134 struct dentry
*dbgfs_root
;
135 struct dentry
*dbgfs_state
;
136 struct dentry
**dbgfs_chan
;
140 #define tx_to_pxad_desc(tx) \
141 container_of(tx, struct pxad_desc_sw, async_tx)
142 #define to_pxad_chan(dchan) \
143 container_of(dchan, struct pxad_chan, vc.chan)
144 #define to_pxad_dev(dmadev) \
145 container_of(dmadev, struct pxad_device, slave)
146 #define to_pxad_sw_desc(_vd) \
147 container_of((_vd), struct pxad_desc_sw, vd)
149 #define _phy_readl_relaxed(phy, _reg) \
150 readl_relaxed((phy)->base + _reg((phy)->idx))
151 #define phy_readl_relaxed(phy, _reg) \
154 _v = readl_relaxed((phy)->base + _reg((phy)->idx)); \
155 dev_vdbg(&phy->vchan->vc.chan.dev->device, \
156 "%s(): readl(%s): 0x%08x\n", __func__, #_reg, \
160 #define phy_writel(phy, val, _reg) \
162 writel((val), (phy)->base + _reg((phy)->idx)); \
163 dev_vdbg(&phy->vchan->vc.chan.dev->device, \
164 "%s(): writel(0x%08x, %s)\n", \
165 __func__, (u32)(val), #_reg); \
167 #define phy_writel_relaxed(phy, val, _reg) \
169 writel_relaxed((val), (phy)->base + _reg((phy)->idx)); \
170 dev_vdbg(&phy->vchan->vc.chan.dev->device, \
171 "%s(): writel_relaxed(0x%08x, %s)\n", \
172 __func__, (u32)(val), #_reg); \
175 static unsigned int pxad_drcmr(unsigned int line
)
178 return 0x100 + line
* 4;
179 return 0x1000 + line
* 4;
182 static bool pxad_filter_fn(struct dma_chan
*chan
, void *param
);
187 #ifdef CONFIG_DEBUG_FS
188 #include <linux/debugfs.h>
189 #include <linux/uaccess.h>
190 #include <linux/seq_file.h>
192 static int requester_chan_show(struct seq_file
*s
, void *p
)
194 struct pxad_phy
*phy
= s
->private;
198 seq_printf(s
, "DMA channel %d requester :\n", phy
->idx
);
199 for (i
= 0; i
< 70; i
++) {
200 drcmr
= readl_relaxed(phy
->base
+ pxad_drcmr(i
));
201 if ((drcmr
& DRCMR_CHLNUM
) == phy
->idx
)
202 seq_printf(s
, "\tRequester %d (MAPVLD=%d)\n", i
,
203 !!(drcmr
& DRCMR_MAPVLD
));
208 static inline int dbg_burst_from_dcmd(u32 dcmd
)
210 int burst
= (dcmd
>> 16) & 0x3;
212 return burst
? 4 << burst
: 0;
215 static int is_phys_valid(unsigned long addr
)
217 return pfn_valid(__phys_to_pfn(addr
));
220 #define PXA_DCSR_STR(flag) (dcsr & PXA_DCSR_##flag ? #flag" " : "")
221 #define PXA_DCMD_STR(flag) (dcmd & PXA_DCMD_##flag ? #flag" " : "")
223 static int descriptors_show(struct seq_file
*s
, void *p
)
225 struct pxad_phy
*phy
= s
->private;
226 int i
, max_show
= 20, burst
, width
;
228 unsigned long phys_desc
, ddadr
;
229 struct pxad_desc_hw
*desc
;
231 phys_desc
= ddadr
= _phy_readl_relaxed(phy
, DDADR
);
233 seq_printf(s
, "DMA channel %d descriptors :\n", phy
->idx
);
234 seq_printf(s
, "[%03d] First descriptor unknown\n", 0);
235 for (i
= 1; i
< max_show
&& is_phys_valid(phys_desc
); i
++) {
236 desc
= phys_to_virt(phys_desc
);
238 burst
= dbg_burst_from_dcmd(dcmd
);
239 width
= (1 << ((dcmd
>> 14) & 0x3)) >> 1;
241 seq_printf(s
, "[%03d] Desc at %08lx(virt %p)\n",
243 seq_printf(s
, "\tDDADR = %08x\n", desc
->ddadr
);
244 seq_printf(s
, "\tDSADR = %08x\n", desc
->dsadr
);
245 seq_printf(s
, "\tDTADR = %08x\n", desc
->dtadr
);
246 seq_printf(s
, "\tDCMD = %08x (%s%s%s%s%s%s%sburst=%d width=%d len=%d)\n",
248 PXA_DCMD_STR(INCSRCADDR
), PXA_DCMD_STR(INCTRGADDR
),
249 PXA_DCMD_STR(FLOWSRC
), PXA_DCMD_STR(FLOWTRG
),
250 PXA_DCMD_STR(STARTIRQEN
), PXA_DCMD_STR(ENDIRQEN
),
251 PXA_DCMD_STR(ENDIAN
), burst
, width
,
252 dcmd
& PXA_DCMD_LENGTH
);
253 phys_desc
= desc
->ddadr
;
256 seq_printf(s
, "[%03d] Desc at %08lx ... max display reached\n",
259 seq_printf(s
, "[%03d] Desc at %08lx is %s\n",
260 i
, phys_desc
, phys_desc
== DDADR_STOP
?
261 "DDADR_STOP" : "invalid");
266 static int chan_state_show(struct seq_file
*s
, void *p
)
268 struct pxad_phy
*phy
= s
->private;
271 static const char * const str_prio
[] = {
272 "high", "normal", "low", "invalid"
275 dcsr
= _phy_readl_relaxed(phy
, DCSR
);
276 dcmd
= _phy_readl_relaxed(phy
, DCMD
);
277 burst
= dbg_burst_from_dcmd(dcmd
);
278 width
= (1 << ((dcmd
>> 14) & 0x3)) >> 1;
280 seq_printf(s
, "DMA channel %d\n", phy
->idx
);
281 seq_printf(s
, "\tPriority : %s\n",
282 str_prio
[(phy
->idx
& 0xf) / 4]);
283 seq_printf(s
, "\tUnaligned transfer bit: %s\n",
284 _phy_readl_relaxed(phy
, DALGN
) & BIT(phy
->idx
) ?
286 seq_printf(s
, "\tDCSR = %08x (%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s)\n",
287 dcsr
, PXA_DCSR_STR(RUN
), PXA_DCSR_STR(NODESC
),
288 PXA_DCSR_STR(STOPIRQEN
), PXA_DCSR_STR(EORIRQEN
),
289 PXA_DCSR_STR(EORJMPEN
), PXA_DCSR_STR(EORSTOPEN
),
290 PXA_DCSR_STR(SETCMPST
), PXA_DCSR_STR(CLRCMPST
),
291 PXA_DCSR_STR(CMPST
), PXA_DCSR_STR(EORINTR
),
292 PXA_DCSR_STR(REQPEND
), PXA_DCSR_STR(STOPSTATE
),
293 PXA_DCSR_STR(ENDINTR
), PXA_DCSR_STR(STARTINTR
),
294 PXA_DCSR_STR(BUSERR
));
296 seq_printf(s
, "\tDCMD = %08x (%s%s%s%s%s%s%sburst=%d width=%d len=%d)\n",
298 PXA_DCMD_STR(INCSRCADDR
), PXA_DCMD_STR(INCTRGADDR
),
299 PXA_DCMD_STR(FLOWSRC
), PXA_DCMD_STR(FLOWTRG
),
300 PXA_DCMD_STR(STARTIRQEN
), PXA_DCMD_STR(ENDIRQEN
),
301 PXA_DCMD_STR(ENDIAN
), burst
, width
, dcmd
& PXA_DCMD_LENGTH
);
302 seq_printf(s
, "\tDSADR = %08x\n", _phy_readl_relaxed(phy
, DSADR
));
303 seq_printf(s
, "\tDTADR = %08x\n", _phy_readl_relaxed(phy
, DTADR
));
304 seq_printf(s
, "\tDDADR = %08x\n", _phy_readl_relaxed(phy
, DDADR
));
309 static int state_show(struct seq_file
*s
, void *p
)
311 struct pxad_device
*pdev
= s
->private;
313 /* basic device status */
314 seq_puts(s
, "DMA engine status\n");
315 seq_printf(s
, "\tChannel number: %d\n", pdev
->nr_chans
);
320 DEFINE_SHOW_ATTRIBUTE(state
);
321 DEFINE_SHOW_ATTRIBUTE(chan_state
);
322 DEFINE_SHOW_ATTRIBUTE(descriptors
);
323 DEFINE_SHOW_ATTRIBUTE(requester_chan
);
325 static struct dentry
*pxad_dbg_alloc_chan(struct pxad_device
*pdev
,
326 int ch
, struct dentry
*chandir
)
329 struct dentry
*chan
, *chan_state
= NULL
, *chan_descr
= NULL
;
330 struct dentry
*chan_reqs
= NULL
;
333 scnprintf(chan_name
, sizeof(chan_name
), "%d", ch
);
334 chan
= debugfs_create_dir(chan_name
, chandir
);
335 dt
= (void *)&pdev
->phys
[ch
];
338 chan_state
= debugfs_create_file("state", 0400, chan
, dt
,
341 chan_descr
= debugfs_create_file("descriptors", 0400, chan
, dt
,
344 chan_reqs
= debugfs_create_file("requesters", 0400, chan
, dt
,
345 &requester_chan_fops
);
352 debugfs_remove_recursive(chan
);
356 static void pxad_init_debugfs(struct pxad_device
*pdev
)
359 struct dentry
*chandir
;
361 pdev
->dbgfs_root
= debugfs_create_dir(dev_name(pdev
->slave
.dev
), NULL
);
362 if (IS_ERR(pdev
->dbgfs_root
) || !pdev
->dbgfs_root
)
365 pdev
->dbgfs_state
= debugfs_create_file("state", 0400, pdev
->dbgfs_root
,
367 if (!pdev
->dbgfs_state
)
371 kmalloc_array(pdev
->nr_chans
, sizeof(*pdev
->dbgfs_state
),
373 if (!pdev
->dbgfs_chan
)
376 chandir
= debugfs_create_dir("channels", pdev
->dbgfs_root
);
380 for (i
= 0; i
< pdev
->nr_chans
; i
++) {
381 pdev
->dbgfs_chan
[i
] = pxad_dbg_alloc_chan(pdev
, i
, chandir
);
382 if (!pdev
->dbgfs_chan
[i
])
389 kfree(pdev
->dbgfs_chan
);
392 debugfs_remove_recursive(pdev
->dbgfs_root
);
394 pr_err("pxad: debugfs is not available\n");
397 static void pxad_cleanup_debugfs(struct pxad_device
*pdev
)
399 debugfs_remove_recursive(pdev
->dbgfs_root
);
402 static inline void pxad_init_debugfs(struct pxad_device
*pdev
) {}
403 static inline void pxad_cleanup_debugfs(struct pxad_device
*pdev
) {}
406 static struct pxad_phy
*lookup_phy(struct pxad_chan
*pchan
)
409 struct pxad_device
*pdev
= to_pxad_dev(pchan
->vc
.chan
.device
);
410 struct pxad_phy
*phy
, *found
= NULL
;
414 * dma channel priorities
415 * ch 0 - 3, 16 - 19 <--> (0)
416 * ch 4 - 7, 20 - 23 <--> (1)
417 * ch 8 - 11, 24 - 27 <--> (2)
418 * ch 12 - 15, 28 - 31 <--> (3)
421 spin_lock_irqsave(&pdev
->phy_lock
, flags
);
422 for (prio
= pchan
->prio
; prio
>= PXAD_PRIO_HIGHEST
; prio
--) {
423 for (i
= 0; i
< pdev
->nr_chans
; i
++) {
424 if (prio
!= (i
& 0xf) >> 2)
426 phy
= &pdev
->phys
[i
];
436 spin_unlock_irqrestore(&pdev
->phy_lock
, flags
);
437 dev_dbg(&pchan
->vc
.chan
.dev
->device
,
438 "%s(): phy=%p(%d)\n", __func__
, found
,
439 found
? found
->idx
: -1);
444 static void pxad_free_phy(struct pxad_chan
*chan
)
446 struct pxad_device
*pdev
= to_pxad_dev(chan
->vc
.chan
.device
);
450 dev_dbg(&chan
->vc
.chan
.dev
->device
,
451 "%s(): freeing\n", __func__
);
455 /* clear the channel mapping in DRCMR */
456 if (chan
->drcmr
<= pdev
->nr_requestors
) {
457 reg
= pxad_drcmr(chan
->drcmr
);
458 writel_relaxed(0, chan
->phy
->base
+ reg
);
461 spin_lock_irqsave(&pdev
->phy_lock
, flags
);
462 chan
->phy
->vchan
= NULL
;
464 spin_unlock_irqrestore(&pdev
->phy_lock
, flags
);
467 static bool is_chan_running(struct pxad_chan
*chan
)
470 struct pxad_phy
*phy
= chan
->phy
;
474 dcsr
= phy_readl_relaxed(phy
, DCSR
);
475 return dcsr
& PXA_DCSR_RUN
;
478 static bool is_running_chan_misaligned(struct pxad_chan
*chan
)
483 dalgn
= phy_readl_relaxed(chan
->phy
, DALGN
);
484 return dalgn
& (BIT(chan
->phy
->idx
));
487 static void phy_enable(struct pxad_phy
*phy
, bool misaligned
)
489 struct pxad_device
*pdev
;
495 dev_dbg(&phy
->vchan
->vc
.chan
.dev
->device
,
496 "%s(); phy=%p(%d) misaligned=%d\n", __func__
,
497 phy
, phy
->idx
, misaligned
);
499 pdev
= to_pxad_dev(phy
->vchan
->vc
.chan
.device
);
500 if (phy
->vchan
->drcmr
<= pdev
->nr_requestors
) {
501 reg
= pxad_drcmr(phy
->vchan
->drcmr
);
502 writel_relaxed(DRCMR_MAPVLD
| phy
->idx
, phy
->base
+ reg
);
505 dalgn
= phy_readl_relaxed(phy
, DALGN
);
507 dalgn
|= BIT(phy
->idx
);
509 dalgn
&= ~BIT(phy
->idx
);
510 phy_writel_relaxed(phy
, dalgn
, DALGN
);
512 phy_writel(phy
, PXA_DCSR_STOPIRQEN
| PXA_DCSR_ENDINTR
|
513 PXA_DCSR_BUSERR
| PXA_DCSR_RUN
, DCSR
);
516 static void phy_disable(struct pxad_phy
*phy
)
523 dcsr
= phy_readl_relaxed(phy
, DCSR
);
524 dev_dbg(&phy
->vchan
->vc
.chan
.dev
->device
,
525 "%s(): phy=%p(%d)\n", __func__
, phy
, phy
->idx
);
526 phy_writel(phy
, dcsr
& ~PXA_DCSR_RUN
& ~PXA_DCSR_STOPIRQEN
, DCSR
);
529 static void pxad_launch_chan(struct pxad_chan
*chan
,
530 struct pxad_desc_sw
*desc
)
532 dev_dbg(&chan
->vc
.chan
.dev
->device
,
533 "%s(): desc=%p\n", __func__
, desc
);
535 chan
->phy
= lookup_phy(chan
);
537 dev_dbg(&chan
->vc
.chan
.dev
->device
,
538 "%s(): no free dma channel\n", __func__
);
545 * Program the descriptor's address into the DMA controller,
546 * then start the DMA transaction
548 phy_writel(chan
->phy
, desc
->first
, DDADR
);
549 phy_enable(chan
->phy
, chan
->misaligned
);
550 wake_up(&chan
->wq_state
);
553 static void set_updater_desc(struct pxad_desc_sw
*sw_desc
,
556 struct pxad_desc_hw
*updater
=
557 sw_desc
->hw_desc
[sw_desc
->nb_desc
- 1];
558 dma_addr_t dma
= sw_desc
->hw_desc
[sw_desc
->nb_desc
- 2]->ddadr
;
560 updater
->ddadr
= DDADR_STOP
;
561 updater
->dsadr
= dma
;
562 updater
->dtadr
= dma
+ 8;
563 updater
->dcmd
= PXA_DCMD_WIDTH4
| PXA_DCMD_BURST32
|
564 (PXA_DCMD_LENGTH
& sizeof(u32
));
565 if (flags
& DMA_PREP_INTERRUPT
)
566 updater
->dcmd
|= PXA_DCMD_ENDIRQEN
;
568 sw_desc
->hw_desc
[sw_desc
->nb_desc
- 2]->ddadr
= sw_desc
->first
;
571 static bool is_desc_completed(struct virt_dma_desc
*vd
)
573 struct pxad_desc_sw
*sw_desc
= to_pxad_sw_desc(vd
);
574 struct pxad_desc_hw
*updater
=
575 sw_desc
->hw_desc
[sw_desc
->nb_desc
- 1];
577 return updater
->dtadr
!= (updater
->dsadr
+ 8);
580 static void pxad_desc_chain(struct virt_dma_desc
*vd1
,
581 struct virt_dma_desc
*vd2
)
583 struct pxad_desc_sw
*desc1
= to_pxad_sw_desc(vd1
);
584 struct pxad_desc_sw
*desc2
= to_pxad_sw_desc(vd2
);
585 dma_addr_t dma_to_chain
;
587 dma_to_chain
= desc2
->first
;
588 desc1
->hw_desc
[desc1
->nb_desc
- 1]->ddadr
= dma_to_chain
;
591 static bool pxad_try_hotchain(struct virt_dma_chan
*vc
,
592 struct virt_dma_desc
*vd
)
594 struct virt_dma_desc
*vd_last_issued
= NULL
;
595 struct pxad_chan
*chan
= to_pxad_chan(&vc
->chan
);
598 * Attempt to hot chain the tx if the phy is still running. This is
599 * considered successful only if either the channel is still running
600 * after the chaining, or if the chained transfer is completed after
601 * having been hot chained.
602 * A change of alignment is not allowed, and forbids hotchaining.
604 if (is_chan_running(chan
)) {
605 BUG_ON(list_empty(&vc
->desc_issued
));
607 if (!is_running_chan_misaligned(chan
) &&
608 to_pxad_sw_desc(vd
)->misaligned
)
611 vd_last_issued
= list_entry(vc
->desc_issued
.prev
,
612 struct virt_dma_desc
, node
);
613 pxad_desc_chain(vd_last_issued
, vd
);
614 if (is_chan_running(chan
) || is_desc_completed(vd
))
621 static unsigned int clear_chan_irq(struct pxad_phy
*phy
)
624 u32 dint
= readl(phy
->base
+ DINT
);
626 if (!(dint
& BIT(phy
->idx
)))
630 dcsr
= phy_readl_relaxed(phy
, DCSR
);
631 phy_writel(phy
, dcsr
, DCSR
);
632 if ((dcsr
& PXA_DCSR_BUSERR
) && (phy
->vchan
))
633 dev_warn(&phy
->vchan
->vc
.chan
.dev
->device
,
634 "%s(chan=%p): PXA_DCSR_BUSERR\n",
635 __func__
, &phy
->vchan
);
637 return dcsr
& ~PXA_DCSR_RUN
;
640 static irqreturn_t
pxad_chan_handler(int irq
, void *dev_id
)
642 struct pxad_phy
*phy
= dev_id
;
643 struct pxad_chan
*chan
= phy
->vchan
;
644 struct virt_dma_desc
*vd
, *tmp
;
648 dma_cookie_t last_started
= 0;
652 dcsr
= clear_chan_irq(phy
);
653 if (dcsr
& PXA_DCSR_RUN
)
656 spin_lock_irqsave(&chan
->vc
.lock
, flags
);
657 list_for_each_entry_safe(vd
, tmp
, &chan
->vc
.desc_issued
, node
) {
658 vd_completed
= is_desc_completed(vd
);
659 dev_dbg(&chan
->vc
.chan
.dev
->device
,
660 "%s(): checking txd %p[%x]: completed=%d dcsr=0x%x\n",
661 __func__
, vd
, vd
->tx
.cookie
, vd_completed
,
663 last_started
= vd
->tx
.cookie
;
664 if (to_pxad_sw_desc(vd
)->cyclic
) {
665 vchan_cyclic_callback(vd
);
670 vchan_cookie_complete(vd
);
676 if (dcsr
& PXA_DCSR_BUSERR
) {
677 chan
->bus_error
= last_started
;
681 if (!chan
->bus_error
&& dcsr
& PXA_DCSR_STOPSTATE
) {
682 dev_dbg(&chan
->vc
.chan
.dev
->device
,
683 "%s(): channel stopped, submitted_empty=%d issued_empty=%d",
685 list_empty(&chan
->vc
.desc_submitted
),
686 list_empty(&chan
->vc
.desc_issued
));
687 phy_writel_relaxed(phy
, dcsr
& ~PXA_DCSR_STOPIRQEN
, DCSR
);
689 if (list_empty(&chan
->vc
.desc_issued
)) {
691 !list_empty(&chan
->vc
.desc_submitted
);
693 vd
= list_first_entry(&chan
->vc
.desc_issued
,
694 struct virt_dma_desc
, node
);
695 pxad_launch_chan(chan
, to_pxad_sw_desc(vd
));
698 spin_unlock_irqrestore(&chan
->vc
.lock
, flags
);
699 wake_up(&chan
->wq_state
);
704 static irqreturn_t
pxad_int_handler(int irq
, void *dev_id
)
706 struct pxad_device
*pdev
= dev_id
;
707 struct pxad_phy
*phy
;
708 u32 dint
= readl(pdev
->base
+ DINT
);
709 int i
, ret
= IRQ_NONE
;
714 phy
= &pdev
->phys
[i
];
715 if (pxad_chan_handler(irq
, phy
) == IRQ_HANDLED
)
722 static int pxad_alloc_chan_resources(struct dma_chan
*dchan
)
724 struct pxad_chan
*chan
= to_pxad_chan(dchan
);
725 struct pxad_device
*pdev
= to_pxad_dev(chan
->vc
.chan
.device
);
730 chan
->desc_pool
= dma_pool_create(dma_chan_name(dchan
),
732 sizeof(struct pxad_desc_hw
),
733 __alignof__(struct pxad_desc_hw
),
735 if (!chan
->desc_pool
) {
736 dev_err(&chan
->vc
.chan
.dev
->device
,
737 "%s(): unable to allocate descriptor pool\n",
745 static void pxad_free_chan_resources(struct dma_chan
*dchan
)
747 struct pxad_chan
*chan
= to_pxad_chan(dchan
);
749 vchan_free_chan_resources(&chan
->vc
);
750 dma_pool_destroy(chan
->desc_pool
);
751 chan
->desc_pool
= NULL
;
753 chan
->drcmr
= U32_MAX
;
754 chan
->prio
= PXAD_PRIO_LOWEST
;
757 static void pxad_free_desc(struct virt_dma_desc
*vd
)
761 struct pxad_desc_sw
*sw_desc
= to_pxad_sw_desc(vd
);
763 BUG_ON(sw_desc
->nb_desc
== 0);
764 for (i
= sw_desc
->nb_desc
- 1; i
>= 0; i
--) {
766 dma
= sw_desc
->hw_desc
[i
- 1]->ddadr
;
768 dma
= sw_desc
->first
;
769 dma_pool_free(sw_desc
->desc_pool
,
770 sw_desc
->hw_desc
[i
], dma
);
772 sw_desc
->nb_desc
= 0;
776 static struct pxad_desc_sw
*
777 pxad_alloc_desc(struct pxad_chan
*chan
, unsigned int nb_hw_desc
)
779 struct pxad_desc_sw
*sw_desc
;
783 sw_desc
= kzalloc(sizeof(*sw_desc
) +
784 nb_hw_desc
* sizeof(struct pxad_desc_hw
*),
788 sw_desc
->desc_pool
= chan
->desc_pool
;
790 for (i
= 0; i
< nb_hw_desc
; i
++) {
791 sw_desc
->hw_desc
[i
] = dma_pool_alloc(sw_desc
->desc_pool
,
793 if (!sw_desc
->hw_desc
[i
]) {
794 dev_err(&chan
->vc
.chan
.dev
->device
,
795 "%s(): Couldn't allocate the %dth hw_desc from dma_pool %p\n",
796 __func__
, i
, sw_desc
->desc_pool
);
801 sw_desc
->first
= dma
;
803 sw_desc
->hw_desc
[i
- 1]->ddadr
= dma
;
809 pxad_free_desc(&sw_desc
->vd
);
813 static dma_cookie_t
pxad_tx_submit(struct dma_async_tx_descriptor
*tx
)
815 struct virt_dma_chan
*vc
= to_virt_chan(tx
->chan
);
816 struct pxad_chan
*chan
= to_pxad_chan(&vc
->chan
);
817 struct virt_dma_desc
*vd_chained
= NULL
,
818 *vd
= container_of(tx
, struct virt_dma_desc
, tx
);
822 set_updater_desc(to_pxad_sw_desc(vd
), tx
->flags
);
824 spin_lock_irqsave(&vc
->lock
, flags
);
825 cookie
= dma_cookie_assign(tx
);
827 if (list_empty(&vc
->desc_submitted
) && pxad_try_hotchain(vc
, vd
)) {
828 list_move_tail(&vd
->node
, &vc
->desc_issued
);
829 dev_dbg(&chan
->vc
.chan
.dev
->device
,
830 "%s(): txd %p[%x]: submitted (hot linked)\n",
831 __func__
, vd
, cookie
);
836 * Fallback to placing the tx in the submitted queue
838 if (!list_empty(&vc
->desc_submitted
)) {
839 vd_chained
= list_entry(vc
->desc_submitted
.prev
,
840 struct virt_dma_desc
, node
);
842 * Only chain the descriptors if no new misalignment is
843 * introduced. If a new misalignment is chained, let the channel
844 * stop, and be relaunched in misalign mode from the irq
847 if (chan
->misaligned
|| !to_pxad_sw_desc(vd
)->misaligned
)
848 pxad_desc_chain(vd_chained
, vd
);
852 dev_dbg(&chan
->vc
.chan
.dev
->device
,
853 "%s(): txd %p[%x]: submitted (%s linked)\n",
854 __func__
, vd
, cookie
, vd_chained
? "cold" : "not");
855 list_move_tail(&vd
->node
, &vc
->desc_submitted
);
856 chan
->misaligned
|= to_pxad_sw_desc(vd
)->misaligned
;
859 spin_unlock_irqrestore(&vc
->lock
, flags
);
863 static void pxad_issue_pending(struct dma_chan
*dchan
)
865 struct pxad_chan
*chan
= to_pxad_chan(dchan
);
866 struct virt_dma_desc
*vd_first
;
869 spin_lock_irqsave(&chan
->vc
.lock
, flags
);
870 if (list_empty(&chan
->vc
.desc_submitted
))
873 vd_first
= list_first_entry(&chan
->vc
.desc_submitted
,
874 struct virt_dma_desc
, node
);
875 dev_dbg(&chan
->vc
.chan
.dev
->device
,
876 "%s(): txd %p[%x]", __func__
, vd_first
, vd_first
->tx
.cookie
);
878 vchan_issue_pending(&chan
->vc
);
879 if (!pxad_try_hotchain(&chan
->vc
, vd_first
))
880 pxad_launch_chan(chan
, to_pxad_sw_desc(vd_first
));
882 spin_unlock_irqrestore(&chan
->vc
.lock
, flags
);
885 static inline struct dma_async_tx_descriptor
*
886 pxad_tx_prep(struct virt_dma_chan
*vc
, struct virt_dma_desc
*vd
,
887 unsigned long tx_flags
)
889 struct dma_async_tx_descriptor
*tx
;
890 struct pxad_chan
*chan
= container_of(vc
, struct pxad_chan
, vc
);
892 INIT_LIST_HEAD(&vd
->node
);
893 tx
= vchan_tx_prep(vc
, vd
, tx_flags
);
894 tx
->tx_submit
= pxad_tx_submit
;
895 dev_dbg(&chan
->vc
.chan
.dev
->device
,
896 "%s(): vc=%p txd=%p[%x] flags=0x%lx\n", __func__
,
897 vc
, vd
, vd
->tx
.cookie
,
903 static void pxad_get_config(struct pxad_chan
*chan
,
904 enum dma_transfer_direction dir
,
905 u32
*dcmd
, u32
*dev_src
, u32
*dev_dst
)
907 u32 maxburst
= 0, dev_addr
= 0;
908 enum dma_slave_buswidth width
= DMA_SLAVE_BUSWIDTH_UNDEFINED
;
909 struct pxad_device
*pdev
= to_pxad_dev(chan
->vc
.chan
.device
);
912 if (dir
== DMA_DEV_TO_MEM
) {
913 maxburst
= chan
->cfg
.src_maxburst
;
914 width
= chan
->cfg
.src_addr_width
;
915 dev_addr
= chan
->cfg
.src_addr
;
917 *dcmd
|= PXA_DCMD_INCTRGADDR
;
918 if (chan
->drcmr
<= pdev
->nr_requestors
)
919 *dcmd
|= PXA_DCMD_FLOWSRC
;
921 if (dir
== DMA_MEM_TO_DEV
) {
922 maxburst
= chan
->cfg
.dst_maxburst
;
923 width
= chan
->cfg
.dst_addr_width
;
924 dev_addr
= chan
->cfg
.dst_addr
;
926 *dcmd
|= PXA_DCMD_INCSRCADDR
;
927 if (chan
->drcmr
<= pdev
->nr_requestors
)
928 *dcmd
|= PXA_DCMD_FLOWTRG
;
930 if (dir
== DMA_MEM_TO_MEM
)
931 *dcmd
|= PXA_DCMD_BURST32
| PXA_DCMD_INCTRGADDR
|
934 dev_dbg(&chan
->vc
.chan
.dev
->device
,
935 "%s(): dev_addr=0x%x maxburst=%d width=%d dir=%d\n",
936 __func__
, dev_addr
, maxburst
, width
, dir
);
938 if (width
== DMA_SLAVE_BUSWIDTH_1_BYTE
)
939 *dcmd
|= PXA_DCMD_WIDTH1
;
940 else if (width
== DMA_SLAVE_BUSWIDTH_2_BYTES
)
941 *dcmd
|= PXA_DCMD_WIDTH2
;
942 else if (width
== DMA_SLAVE_BUSWIDTH_4_BYTES
)
943 *dcmd
|= PXA_DCMD_WIDTH4
;
946 *dcmd
|= PXA_DCMD_BURST8
;
947 else if (maxburst
== 16)
948 *dcmd
|= PXA_DCMD_BURST16
;
949 else if (maxburst
== 32)
950 *dcmd
|= PXA_DCMD_BURST32
;
952 /* FIXME: drivers should be ported over to use the filter
953 * function. Once that's done, the following two lines can
956 if (chan
->cfg
.slave_id
)
957 chan
->drcmr
= chan
->cfg
.slave_id
;
960 static struct dma_async_tx_descriptor
*
961 pxad_prep_memcpy(struct dma_chan
*dchan
,
962 dma_addr_t dma_dst
, dma_addr_t dma_src
,
963 size_t len
, unsigned long flags
)
965 struct pxad_chan
*chan
= to_pxad_chan(dchan
);
966 struct pxad_desc_sw
*sw_desc
;
967 struct pxad_desc_hw
*hw_desc
;
969 unsigned int i
, nb_desc
= 0;
975 dev_dbg(&chan
->vc
.chan
.dev
->device
,
976 "%s(): dma_dst=0x%lx dma_src=0x%lx len=%zu flags=%lx\n",
977 __func__
, (unsigned long)dma_dst
, (unsigned long)dma_src
,
979 pxad_get_config(chan
, DMA_MEM_TO_MEM
, &dcmd
, NULL
, NULL
);
981 nb_desc
= DIV_ROUND_UP(len
, PDMA_MAX_DESC_BYTES
);
982 sw_desc
= pxad_alloc_desc(chan
, nb_desc
+ 1);
987 if (!IS_ALIGNED(dma_src
, 1 << PDMA_ALIGNMENT
) ||
988 !IS_ALIGNED(dma_dst
, 1 << PDMA_ALIGNMENT
))
989 sw_desc
->misaligned
= true;
993 hw_desc
= sw_desc
->hw_desc
[i
++];
994 copy
= min_t(size_t, len
, PDMA_MAX_DESC_BYTES
);
995 hw_desc
->dcmd
= dcmd
| (PXA_DCMD_LENGTH
& copy
);
996 hw_desc
->dsadr
= dma_src
;
997 hw_desc
->dtadr
= dma_dst
;
1002 set_updater_desc(sw_desc
, flags
);
1004 return pxad_tx_prep(&chan
->vc
, &sw_desc
->vd
, flags
);
1007 static struct dma_async_tx_descriptor
*
1008 pxad_prep_slave_sg(struct dma_chan
*dchan
, struct scatterlist
*sgl
,
1009 unsigned int sg_len
, enum dma_transfer_direction dir
,
1010 unsigned long flags
, void *context
)
1012 struct pxad_chan
*chan
= to_pxad_chan(dchan
);
1013 struct pxad_desc_sw
*sw_desc
;
1015 struct scatterlist
*sg
;
1017 u32 dcmd
, dsadr
= 0, dtadr
= 0;
1018 unsigned int nb_desc
= 0, i
, j
= 0;
1020 if ((sgl
== NULL
) || (sg_len
== 0))
1023 pxad_get_config(chan
, dir
, &dcmd
, &dsadr
, &dtadr
);
1024 dev_dbg(&chan
->vc
.chan
.dev
->device
,
1025 "%s(): dir=%d flags=%lx\n", __func__
, dir
, flags
);
1027 for_each_sg(sgl
, sg
, sg_len
, i
)
1028 nb_desc
+= DIV_ROUND_UP(sg_dma_len(sg
), PDMA_MAX_DESC_BYTES
);
1029 sw_desc
= pxad_alloc_desc(chan
, nb_desc
+ 1);
1033 for_each_sg(sgl
, sg
, sg_len
, i
) {
1034 dma
= sg_dma_address(sg
);
1035 avail
= sg_dma_len(sg
);
1036 sw_desc
->len
+= avail
;
1039 len
= min_t(size_t, avail
, PDMA_MAX_DESC_BYTES
);
1041 sw_desc
->misaligned
= true;
1043 sw_desc
->hw_desc
[j
]->dcmd
=
1044 dcmd
| (PXA_DCMD_LENGTH
& len
);
1045 sw_desc
->hw_desc
[j
]->dsadr
= dsadr
? dsadr
: dma
;
1046 sw_desc
->hw_desc
[j
++]->dtadr
= dtadr
? dtadr
: dma
;
1052 set_updater_desc(sw_desc
, flags
);
1054 return pxad_tx_prep(&chan
->vc
, &sw_desc
->vd
, flags
);
1057 static struct dma_async_tx_descriptor
*
1058 pxad_prep_dma_cyclic(struct dma_chan
*dchan
,
1059 dma_addr_t buf_addr
, size_t len
, size_t period_len
,
1060 enum dma_transfer_direction dir
, unsigned long flags
)
1062 struct pxad_chan
*chan
= to_pxad_chan(dchan
);
1063 struct pxad_desc_sw
*sw_desc
;
1064 struct pxad_desc_hw
**phw_desc
;
1066 u32 dcmd
, dsadr
= 0, dtadr
= 0;
1067 unsigned int nb_desc
= 0;
1069 if (!dchan
|| !len
|| !period_len
)
1071 if ((dir
!= DMA_DEV_TO_MEM
) && (dir
!= DMA_MEM_TO_DEV
)) {
1072 dev_err(&chan
->vc
.chan
.dev
->device
,
1073 "Unsupported direction for cyclic DMA\n");
1076 /* the buffer length must be a multiple of period_len */
1077 if (len
% period_len
!= 0 || period_len
> PDMA_MAX_DESC_BYTES
||
1078 !IS_ALIGNED(period_len
, 1 << PDMA_ALIGNMENT
))
1081 pxad_get_config(chan
, dir
, &dcmd
, &dsadr
, &dtadr
);
1082 dcmd
|= PXA_DCMD_ENDIRQEN
| (PXA_DCMD_LENGTH
& period_len
);
1083 dev_dbg(&chan
->vc
.chan
.dev
->device
,
1084 "%s(): buf_addr=0x%lx len=%zu period=%zu dir=%d flags=%lx\n",
1085 __func__
, (unsigned long)buf_addr
, len
, period_len
, dir
, flags
);
1087 nb_desc
= DIV_ROUND_UP(period_len
, PDMA_MAX_DESC_BYTES
);
1088 nb_desc
*= DIV_ROUND_UP(len
, period_len
);
1089 sw_desc
= pxad_alloc_desc(chan
, nb_desc
+ 1);
1092 sw_desc
->cyclic
= true;
1095 phw_desc
= sw_desc
->hw_desc
;
1098 phw_desc
[0]->dsadr
= dsadr
? dsadr
: dma
;
1099 phw_desc
[0]->dtadr
= dtadr
? dtadr
: dma
;
1100 phw_desc
[0]->dcmd
= dcmd
;
1105 set_updater_desc(sw_desc
, flags
);
1107 return pxad_tx_prep(&chan
->vc
, &sw_desc
->vd
, flags
);
1110 static int pxad_config(struct dma_chan
*dchan
,
1111 struct dma_slave_config
*cfg
)
1113 struct pxad_chan
*chan
= to_pxad_chan(dchan
);
1122 static int pxad_terminate_all(struct dma_chan
*dchan
)
1124 struct pxad_chan
*chan
= to_pxad_chan(dchan
);
1125 struct pxad_device
*pdev
= to_pxad_dev(chan
->vc
.chan
.device
);
1126 struct virt_dma_desc
*vd
= NULL
;
1127 unsigned long flags
;
1128 struct pxad_phy
*phy
;
1131 dev_dbg(&chan
->vc
.chan
.dev
->device
,
1132 "%s(): vchan %p: terminate all\n", __func__
, &chan
->vc
);
1134 spin_lock_irqsave(&chan
->vc
.lock
, flags
);
1135 vchan_get_all_descriptors(&chan
->vc
, &head
);
1137 list_for_each_entry(vd
, &head
, node
) {
1138 dev_dbg(&chan
->vc
.chan
.dev
->device
,
1139 "%s(): cancelling txd %p[%x] (completed=%d)", __func__
,
1140 vd
, vd
->tx
.cookie
, is_desc_completed(vd
));
1145 phy_disable(chan
->phy
);
1146 pxad_free_phy(chan
);
1148 spin_lock(&pdev
->phy_lock
);
1150 spin_unlock(&pdev
->phy_lock
);
1152 spin_unlock_irqrestore(&chan
->vc
.lock
, flags
);
1153 vchan_dma_desc_free_list(&chan
->vc
, &head
);
1158 static unsigned int pxad_residue(struct pxad_chan
*chan
,
1159 dma_cookie_t cookie
)
1161 struct virt_dma_desc
*vd
= NULL
;
1162 struct pxad_desc_sw
*sw_desc
= NULL
;
1163 struct pxad_desc_hw
*hw_desc
= NULL
;
1164 u32 curr
, start
, len
, end
, residue
= 0;
1165 unsigned long flags
;
1166 bool passed
= false;
1170 * If the channel does not have a phy pointer anymore, it has already
1171 * been completed. Therefore, its residue is 0.
1176 spin_lock_irqsave(&chan
->vc
.lock
, flags
);
1178 vd
= vchan_find_desc(&chan
->vc
, cookie
);
1182 sw_desc
= to_pxad_sw_desc(vd
);
1183 if (sw_desc
->hw_desc
[0]->dcmd
& PXA_DCMD_INCSRCADDR
)
1184 curr
= phy_readl_relaxed(chan
->phy
, DSADR
);
1186 curr
= phy_readl_relaxed(chan
->phy
, DTADR
);
1189 * curr has to be actually read before checking descriptor
1190 * completion, so that a curr inside a status updater
1191 * descriptor implies the following test returns true, and
1192 * preventing reordering of curr load and the test.
1195 if (is_desc_completed(vd
))
1198 for (i
= 0; i
< sw_desc
->nb_desc
- 1; i
++) {
1199 hw_desc
= sw_desc
->hw_desc
[i
];
1200 if (sw_desc
->hw_desc
[0]->dcmd
& PXA_DCMD_INCSRCADDR
)
1201 start
= hw_desc
->dsadr
;
1203 start
= hw_desc
->dtadr
;
1204 len
= hw_desc
->dcmd
& PXA_DCMD_LENGTH
;
1208 * 'passed' will be latched once we found the descriptor
1209 * which lies inside the boundaries of the curr
1210 * pointer. All descriptors that occur in the list
1211 * _after_ we found that partially handled descriptor
1212 * are still to be processed and are hence added to the
1213 * residual bytes counter.
1218 } else if (curr
>= start
&& curr
<= end
) {
1219 residue
+= end
- curr
;
1224 residue
= sw_desc
->len
;
1227 spin_unlock_irqrestore(&chan
->vc
.lock
, flags
);
1228 dev_dbg(&chan
->vc
.chan
.dev
->device
,
1229 "%s(): txd %p[%x] sw_desc=%p: %d\n",
1230 __func__
, vd
, cookie
, sw_desc
, residue
);
1234 static enum dma_status
pxad_tx_status(struct dma_chan
*dchan
,
1235 dma_cookie_t cookie
,
1236 struct dma_tx_state
*txstate
)
1238 struct pxad_chan
*chan
= to_pxad_chan(dchan
);
1239 enum dma_status ret
;
1241 if (cookie
== chan
->bus_error
)
1244 ret
= dma_cookie_status(dchan
, cookie
, txstate
);
1245 if (likely(txstate
&& (ret
!= DMA_ERROR
)))
1246 dma_set_residue(txstate
, pxad_residue(chan
, cookie
));
1251 static void pxad_synchronize(struct dma_chan
*dchan
)
1253 struct pxad_chan
*chan
= to_pxad_chan(dchan
);
1255 wait_event(chan
->wq_state
, !is_chan_running(chan
));
1256 vchan_synchronize(&chan
->vc
);
1259 static void pxad_free_channels(struct dma_device
*dmadev
)
1261 struct pxad_chan
*c
, *cn
;
1263 list_for_each_entry_safe(c
, cn
, &dmadev
->channels
,
1264 vc
.chan
.device_node
) {
1265 list_del(&c
->vc
.chan
.device_node
);
1266 tasklet_kill(&c
->vc
.task
);
1270 static int pxad_remove(struct platform_device
*op
)
1272 struct pxad_device
*pdev
= platform_get_drvdata(op
);
1274 pxad_cleanup_debugfs(pdev
);
1275 pxad_free_channels(&pdev
->slave
);
1279 static int pxad_init_phys(struct platform_device
*op
,
1280 struct pxad_device
*pdev
,
1281 unsigned int nb_phy_chans
)
1283 int irq0
, irq
, nr_irq
= 0, i
, ret
;
1284 struct pxad_phy
*phy
;
1286 irq0
= platform_get_irq(op
, 0);
1290 pdev
->phys
= devm_kcalloc(&op
->dev
, nb_phy_chans
,
1291 sizeof(pdev
->phys
[0]), GFP_KERNEL
);
1295 for (i
= 0; i
< nb_phy_chans
; i
++)
1296 if (platform_get_irq(op
, i
) > 0)
1299 for (i
= 0; i
< nb_phy_chans
; i
++) {
1300 phy
= &pdev
->phys
[i
];
1301 phy
->base
= pdev
->base
;
1303 irq
= platform_get_irq(op
, i
);
1304 if ((nr_irq
> 1) && (irq
> 0))
1305 ret
= devm_request_irq(&op
->dev
, irq
,
1307 IRQF_SHARED
, "pxa-dma", phy
);
1308 if ((nr_irq
== 1) && (i
== 0))
1309 ret
= devm_request_irq(&op
->dev
, irq0
,
1311 IRQF_SHARED
, "pxa-dma", pdev
);
1313 dev_err(pdev
->slave
.dev
,
1314 "%s(): can't request irq %d:%d\n", __func__
,
1323 static const struct of_device_id pxad_dt_ids
[] = {
1324 { .compatible
= "marvell,pdma-1.0", },
1327 MODULE_DEVICE_TABLE(of
, pxad_dt_ids
);
1329 static struct dma_chan
*pxad_dma_xlate(struct of_phandle_args
*dma_spec
,
1330 struct of_dma
*ofdma
)
1332 struct pxad_device
*d
= ofdma
->of_dma_data
;
1333 struct dma_chan
*chan
;
1335 chan
= dma_get_any_slave_channel(&d
->slave
);
1339 to_pxad_chan(chan
)->drcmr
= dma_spec
->args
[0];
1340 to_pxad_chan(chan
)->prio
= dma_spec
->args
[1];
1345 static int pxad_init_dmadev(struct platform_device
*op
,
1346 struct pxad_device
*pdev
,
1347 unsigned int nr_phy_chans
,
1348 unsigned int nr_requestors
)
1352 struct pxad_chan
*c
;
1354 pdev
->nr_chans
= nr_phy_chans
;
1355 pdev
->nr_requestors
= nr_requestors
;
1356 INIT_LIST_HEAD(&pdev
->slave
.channels
);
1357 pdev
->slave
.device_alloc_chan_resources
= pxad_alloc_chan_resources
;
1358 pdev
->slave
.device_free_chan_resources
= pxad_free_chan_resources
;
1359 pdev
->slave
.device_tx_status
= pxad_tx_status
;
1360 pdev
->slave
.device_issue_pending
= pxad_issue_pending
;
1361 pdev
->slave
.device_config
= pxad_config
;
1362 pdev
->slave
.device_synchronize
= pxad_synchronize
;
1363 pdev
->slave
.device_terminate_all
= pxad_terminate_all
;
1365 if (op
->dev
.coherent_dma_mask
)
1366 dma_set_mask(&op
->dev
, op
->dev
.coherent_dma_mask
);
1368 dma_set_mask(&op
->dev
, DMA_BIT_MASK(32));
1370 ret
= pxad_init_phys(op
, pdev
, nr_phy_chans
);
1374 for (i
= 0; i
< nr_phy_chans
; i
++) {
1375 c
= devm_kzalloc(&op
->dev
, sizeof(*c
), GFP_KERNEL
);
1380 c
->prio
= PXAD_PRIO_LOWEST
;
1381 c
->vc
.desc_free
= pxad_free_desc
;
1382 vchan_init(&c
->vc
, &pdev
->slave
);
1383 init_waitqueue_head(&c
->wq_state
);
1386 return dmaenginem_async_device_register(&pdev
->slave
);
1389 static int pxad_probe(struct platform_device
*op
)
1391 struct pxad_device
*pdev
;
1392 const struct of_device_id
*of_id
;
1393 const struct dma_slave_map
*slave_map
= NULL
;
1394 struct mmp_dma_platdata
*pdata
= dev_get_platdata(&op
->dev
);
1395 struct resource
*iores
;
1396 int ret
, dma_channels
= 0, nb_requestors
= 0, slave_map_cnt
= 0;
1397 const enum dma_slave_buswidth widths
=
1398 DMA_SLAVE_BUSWIDTH_1_BYTE
| DMA_SLAVE_BUSWIDTH_2_BYTES
|
1399 DMA_SLAVE_BUSWIDTH_4_BYTES
;
1401 pdev
= devm_kzalloc(&op
->dev
, sizeof(*pdev
), GFP_KERNEL
);
1405 spin_lock_init(&pdev
->phy_lock
);
1407 iores
= platform_get_resource(op
, IORESOURCE_MEM
, 0);
1408 pdev
->base
= devm_ioremap_resource(&op
->dev
, iores
);
1409 if (IS_ERR(pdev
->base
))
1410 return PTR_ERR(pdev
->base
);
1412 of_id
= of_match_device(pxad_dt_ids
, &op
->dev
);
1414 of_property_read_u32(op
->dev
.of_node
, "#dma-channels",
1416 ret
= of_property_read_u32(op
->dev
.of_node
, "#dma-requests",
1419 dev_warn(pdev
->slave
.dev
,
1420 "#dma-requests set to default 32 as missing in OF: %d",
1424 } else if (pdata
&& pdata
->dma_channels
) {
1425 dma_channels
= pdata
->dma_channels
;
1426 nb_requestors
= pdata
->nb_requestors
;
1427 slave_map
= pdata
->slave_map
;
1428 slave_map_cnt
= pdata
->slave_map_cnt
;
1430 dma_channels
= 32; /* default 32 channel */
1433 dma_cap_set(DMA_SLAVE
, pdev
->slave
.cap_mask
);
1434 dma_cap_set(DMA_MEMCPY
, pdev
->slave
.cap_mask
);
1435 dma_cap_set(DMA_CYCLIC
, pdev
->slave
.cap_mask
);
1436 dma_cap_set(DMA_PRIVATE
, pdev
->slave
.cap_mask
);
1437 pdev
->slave
.device_prep_dma_memcpy
= pxad_prep_memcpy
;
1438 pdev
->slave
.device_prep_slave_sg
= pxad_prep_slave_sg
;
1439 pdev
->slave
.device_prep_dma_cyclic
= pxad_prep_dma_cyclic
;
1440 pdev
->slave
.filter
.map
= slave_map
;
1441 pdev
->slave
.filter
.mapcnt
= slave_map_cnt
;
1442 pdev
->slave
.filter
.fn
= pxad_filter_fn
;
1444 pdev
->slave
.copy_align
= PDMA_ALIGNMENT
;
1445 pdev
->slave
.src_addr_widths
= widths
;
1446 pdev
->slave
.dst_addr_widths
= widths
;
1447 pdev
->slave
.directions
= BIT(DMA_MEM_TO_DEV
) | BIT(DMA_DEV_TO_MEM
);
1448 pdev
->slave
.residue_granularity
= DMA_RESIDUE_GRANULARITY_DESCRIPTOR
;
1449 pdev
->slave
.descriptor_reuse
= true;
1451 pdev
->slave
.dev
= &op
->dev
;
1452 ret
= pxad_init_dmadev(op
, pdev
, dma_channels
, nb_requestors
);
1454 dev_err(pdev
->slave
.dev
, "unable to register\n");
1458 if (op
->dev
.of_node
) {
1459 /* Device-tree DMA controller registration */
1460 ret
= of_dma_controller_register(op
->dev
.of_node
,
1461 pxad_dma_xlate
, pdev
);
1463 dev_err(pdev
->slave
.dev
,
1464 "of_dma_controller_register failed\n");
1469 platform_set_drvdata(op
, pdev
);
1470 pxad_init_debugfs(pdev
);
1471 dev_info(pdev
->slave
.dev
, "initialized %d channels on %d requestors\n",
1472 dma_channels
, nb_requestors
);
1476 static const struct platform_device_id pxad_id_table
[] = {
1481 static struct platform_driver pxad_driver
= {
1484 .of_match_table
= pxad_dt_ids
,
1486 .id_table
= pxad_id_table
,
1487 .probe
= pxad_probe
,
1488 .remove
= pxad_remove
,
1491 static bool pxad_filter_fn(struct dma_chan
*chan
, void *param
)
1493 struct pxad_chan
*c
= to_pxad_chan(chan
);
1494 struct pxad_param
*p
= param
;
1496 if (chan
->device
->dev
->driver
!= &pxad_driver
.driver
)
1499 c
->drcmr
= p
->drcmr
;
1505 module_platform_driver(pxad_driver
);
1507 MODULE_DESCRIPTION("Marvell PXA Peripheral DMA Driver");
1508 MODULE_AUTHOR("Robert Jarzmik <robert.jarzmik@free.fr>");
1509 MODULE_LICENSE("GPL v2");