2 * DMA controller driver for CSR SiRFprimaII
4 * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
6 * Licensed under GPLv2 or later.
9 #include <linux/module.h>
10 #include <linux/dmaengine.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/interrupt.h>
15 #include <linux/slab.h>
16 #include <linux/of_irq.h>
17 #include <linux/of_address.h>
18 #include <linux/of_device.h>
19 #include <linux/of_platform.h>
20 #include <linux/clk.h>
21 #include <linux/of_dma.h>
22 #include <linux/sirfsoc_dma.h>
24 #include "dmaengine.h"
26 #define SIRFSOC_DMA_DESCRIPTORS 16
27 #define SIRFSOC_DMA_CHANNELS 16
29 #define SIRFSOC_DMA_CH_ADDR 0x00
30 #define SIRFSOC_DMA_CH_XLEN 0x04
31 #define SIRFSOC_DMA_CH_YLEN 0x08
32 #define SIRFSOC_DMA_CH_CTRL 0x0C
34 #define SIRFSOC_DMA_WIDTH_0 0x100
35 #define SIRFSOC_DMA_CH_VALID 0x140
36 #define SIRFSOC_DMA_CH_INT 0x144
37 #define SIRFSOC_DMA_INT_EN 0x148
38 #define SIRFSOC_DMA_INT_EN_CLR 0x14C
39 #define SIRFSOC_DMA_CH_LOOP_CTRL 0x150
40 #define SIRFSOC_DMA_CH_LOOP_CTRL_CLR 0x15C
42 #define SIRFSOC_DMA_MODE_CTRL_BIT 4
43 #define SIRFSOC_DMA_DIR_CTRL_BIT 5
45 /* xlen and dma_width register is in 4 bytes boundary */
46 #define SIRFSOC_DMA_WORD_LEN 4
48 struct sirfsoc_dma_desc
{
49 struct dma_async_tx_descriptor desc
;
50 struct list_head node
;
52 /* SiRFprimaII 2D-DMA parameters */
54 int xlen
; /* DMA xlen */
55 int ylen
; /* DMA ylen */
56 int width
; /* DMA width */
58 bool cyclic
; /* is loop DMA? */
59 u32 addr
; /* DMA buffer address */
62 struct sirfsoc_dma_chan
{
64 struct list_head free
;
65 struct list_head prepared
;
66 struct list_head queued
;
67 struct list_head active
;
68 struct list_head completed
;
69 unsigned long happened_cyclic
;
70 unsigned long completed_cyclic
;
72 /* Lock for this structure */
78 struct sirfsoc_dma_regs
{
79 u32 ctrl
[SIRFSOC_DMA_CHANNELS
];
84 struct dma_device dma
;
85 struct tasklet_struct tasklet
;
86 struct sirfsoc_dma_chan channels
[SIRFSOC_DMA_CHANNELS
];
91 struct sirfsoc_dma_regs regs_save
;
94 #define DRV_NAME "sirfsoc_dma"
96 static int sirfsoc_dma_runtime_suspend(struct device
*dev
);
98 /* Convert struct dma_chan to struct sirfsoc_dma_chan */
100 struct sirfsoc_dma_chan
*dma_chan_to_sirfsoc_dma_chan(struct dma_chan
*c
)
102 return container_of(c
, struct sirfsoc_dma_chan
, chan
);
105 /* Convert struct dma_chan to struct sirfsoc_dma */
106 static inline struct sirfsoc_dma
*dma_chan_to_sirfsoc_dma(struct dma_chan
*c
)
108 struct sirfsoc_dma_chan
*schan
= dma_chan_to_sirfsoc_dma_chan(c
);
109 return container_of(schan
, struct sirfsoc_dma
, channels
[c
->chan_id
]);
112 /* Execute all queued DMA descriptors */
113 static void sirfsoc_dma_execute(struct sirfsoc_dma_chan
*schan
)
115 struct sirfsoc_dma
*sdma
= dma_chan_to_sirfsoc_dma(&schan
->chan
);
116 int cid
= schan
->chan
.chan_id
;
117 struct sirfsoc_dma_desc
*sdesc
= NULL
;
120 * lock has been held by functions calling this, so we don't hold
124 sdesc
= list_first_entry(&schan
->queued
, struct sirfsoc_dma_desc
,
126 /* Move the first queued descriptor to active list */
127 list_move_tail(&sdesc
->node
, &schan
->active
);
129 /* Start the DMA transfer */
130 writel_relaxed(sdesc
->width
, sdma
->base
+ SIRFSOC_DMA_WIDTH_0
+
132 writel_relaxed(cid
| (schan
->mode
<< SIRFSOC_DMA_MODE_CTRL_BIT
) |
133 (sdesc
->dir
<< SIRFSOC_DMA_DIR_CTRL_BIT
),
134 sdma
->base
+ cid
* 0x10 + SIRFSOC_DMA_CH_CTRL
);
135 writel_relaxed(sdesc
->xlen
, sdma
->base
+ cid
* 0x10 +
136 SIRFSOC_DMA_CH_XLEN
);
137 writel_relaxed(sdesc
->ylen
, sdma
->base
+ cid
* 0x10 +
138 SIRFSOC_DMA_CH_YLEN
);
139 writel_relaxed(readl_relaxed(sdma
->base
+ SIRFSOC_DMA_INT_EN
) |
140 (1 << cid
), sdma
->base
+ SIRFSOC_DMA_INT_EN
);
143 * writel has an implict memory write barrier to make sure data is
144 * flushed into memory before starting DMA
146 writel(sdesc
->addr
>> 2, sdma
->base
+ cid
* 0x10 + SIRFSOC_DMA_CH_ADDR
);
149 writel((1 << cid
) | 1 << (cid
+ 16) |
150 readl_relaxed(sdma
->base
+ SIRFSOC_DMA_CH_LOOP_CTRL
),
151 sdma
->base
+ SIRFSOC_DMA_CH_LOOP_CTRL
);
152 schan
->happened_cyclic
= schan
->completed_cyclic
= 0;
156 /* Interrupt handler */
157 static irqreturn_t
sirfsoc_dma_irq(int irq
, void *data
)
159 struct sirfsoc_dma
*sdma
= data
;
160 struct sirfsoc_dma_chan
*schan
;
161 struct sirfsoc_dma_desc
*sdesc
= NULL
;
165 is
= readl(sdma
->base
+ SIRFSOC_DMA_CH_INT
);
166 while ((ch
= fls(is
) - 1) >= 0) {
168 writel_relaxed(1 << ch
, sdma
->base
+ SIRFSOC_DMA_CH_INT
);
169 schan
= &sdma
->channels
[ch
];
171 spin_lock(&schan
->lock
);
173 sdesc
= list_first_entry(&schan
->active
, struct sirfsoc_dma_desc
,
175 if (!sdesc
->cyclic
) {
176 /* Execute queued descriptors */
177 list_splice_tail_init(&schan
->active
, &schan
->completed
);
178 if (!list_empty(&schan
->queued
))
179 sirfsoc_dma_execute(schan
);
181 schan
->happened_cyclic
++;
183 spin_unlock(&schan
->lock
);
186 /* Schedule tasklet */
187 tasklet_schedule(&sdma
->tasklet
);
192 /* process completed descriptors */
193 static void sirfsoc_dma_process_completed(struct sirfsoc_dma
*sdma
)
195 dma_cookie_t last_cookie
= 0;
196 struct sirfsoc_dma_chan
*schan
;
197 struct sirfsoc_dma_desc
*sdesc
;
198 struct dma_async_tx_descriptor
*desc
;
200 unsigned long happened_cyclic
;
204 for (i
= 0; i
< sdma
->dma
.chancnt
; i
++) {
205 schan
= &sdma
->channels
[i
];
207 /* Get all completed descriptors */
208 spin_lock_irqsave(&schan
->lock
, flags
);
209 if (!list_empty(&schan
->completed
)) {
210 list_splice_tail_init(&schan
->completed
, &list
);
211 spin_unlock_irqrestore(&schan
->lock
, flags
);
213 /* Execute callbacks and run dependencies */
214 list_for_each_entry(sdesc
, &list
, node
) {
218 desc
->callback(desc
->callback_param
);
220 last_cookie
= desc
->cookie
;
221 dma_run_dependencies(desc
);
224 /* Free descriptors */
225 spin_lock_irqsave(&schan
->lock
, flags
);
226 list_splice_tail_init(&list
, &schan
->free
);
227 schan
->chan
.completed_cookie
= last_cookie
;
228 spin_unlock_irqrestore(&schan
->lock
, flags
);
230 /* for cyclic channel, desc is always in active list */
231 sdesc
= list_first_entry(&schan
->active
, struct sirfsoc_dma_desc
,
234 if (!sdesc
|| (sdesc
&& !sdesc
->cyclic
)) {
235 /* without active cyclic DMA */
236 spin_unlock_irqrestore(&schan
->lock
, flags
);
241 happened_cyclic
= schan
->happened_cyclic
;
242 spin_unlock_irqrestore(&schan
->lock
, flags
);
245 while (happened_cyclic
!= schan
->completed_cyclic
) {
247 desc
->callback(desc
->callback_param
);
248 schan
->completed_cyclic
++;
255 static void sirfsoc_dma_tasklet(unsigned long data
)
257 struct sirfsoc_dma
*sdma
= (void *)data
;
259 sirfsoc_dma_process_completed(sdma
);
262 /* Submit descriptor to hardware */
263 static dma_cookie_t
sirfsoc_dma_tx_submit(struct dma_async_tx_descriptor
*txd
)
265 struct sirfsoc_dma_chan
*schan
= dma_chan_to_sirfsoc_dma_chan(txd
->chan
);
266 struct sirfsoc_dma_desc
*sdesc
;
270 sdesc
= container_of(txd
, struct sirfsoc_dma_desc
, desc
);
272 spin_lock_irqsave(&schan
->lock
, flags
);
274 /* Move descriptor to queue */
275 list_move_tail(&sdesc
->node
, &schan
->queued
);
277 cookie
= dma_cookie_assign(txd
);
279 spin_unlock_irqrestore(&schan
->lock
, flags
);
284 static int sirfsoc_dma_slave_config(struct sirfsoc_dma_chan
*schan
,
285 struct dma_slave_config
*config
)
289 if ((config
->src_addr_width
!= DMA_SLAVE_BUSWIDTH_4_BYTES
) ||
290 (config
->dst_addr_width
!= DMA_SLAVE_BUSWIDTH_4_BYTES
))
293 spin_lock_irqsave(&schan
->lock
, flags
);
294 schan
->mode
= (config
->src_maxburst
== 4 ? 1 : 0);
295 spin_unlock_irqrestore(&schan
->lock
, flags
);
300 static int sirfsoc_dma_terminate_all(struct sirfsoc_dma_chan
*schan
)
302 struct sirfsoc_dma
*sdma
= dma_chan_to_sirfsoc_dma(&schan
->chan
);
303 int cid
= schan
->chan
.chan_id
;
306 spin_lock_irqsave(&schan
->lock
, flags
);
308 if (!sdma
->is_marco
) {
309 writel_relaxed(readl_relaxed(sdma
->base
+ SIRFSOC_DMA_INT_EN
) &
310 ~(1 << cid
), sdma
->base
+ SIRFSOC_DMA_INT_EN
);
311 writel_relaxed(readl_relaxed(sdma
->base
+ SIRFSOC_DMA_CH_LOOP_CTRL
)
312 & ~((1 << cid
) | 1 << (cid
+ 16)),
313 sdma
->base
+ SIRFSOC_DMA_CH_LOOP_CTRL
);
315 writel_relaxed(1 << cid
, sdma
->base
+ SIRFSOC_DMA_INT_EN_CLR
);
316 writel_relaxed((1 << cid
) | 1 << (cid
+ 16),
317 sdma
->base
+ SIRFSOC_DMA_CH_LOOP_CTRL_CLR
);
320 writel_relaxed(1 << cid
, sdma
->base
+ SIRFSOC_DMA_CH_VALID
);
322 list_splice_tail_init(&schan
->active
, &schan
->free
);
323 list_splice_tail_init(&schan
->queued
, &schan
->free
);
325 spin_unlock_irqrestore(&schan
->lock
, flags
);
330 static int sirfsoc_dma_pause_chan(struct sirfsoc_dma_chan
*schan
)
332 struct sirfsoc_dma
*sdma
= dma_chan_to_sirfsoc_dma(&schan
->chan
);
333 int cid
= schan
->chan
.chan_id
;
336 spin_lock_irqsave(&schan
->lock
, flags
);
339 writel_relaxed(readl_relaxed(sdma
->base
+ SIRFSOC_DMA_CH_LOOP_CTRL
)
340 & ~((1 << cid
) | 1 << (cid
+ 16)),
341 sdma
->base
+ SIRFSOC_DMA_CH_LOOP_CTRL
);
343 writel_relaxed((1 << cid
) | 1 << (cid
+ 16),
344 sdma
->base
+ SIRFSOC_DMA_CH_LOOP_CTRL_CLR
);
346 spin_unlock_irqrestore(&schan
->lock
, flags
);
351 static int sirfsoc_dma_resume_chan(struct sirfsoc_dma_chan
*schan
)
353 struct sirfsoc_dma
*sdma
= dma_chan_to_sirfsoc_dma(&schan
->chan
);
354 int cid
= schan
->chan
.chan_id
;
357 spin_lock_irqsave(&schan
->lock
, flags
);
360 writel_relaxed(readl_relaxed(sdma
->base
+ SIRFSOC_DMA_CH_LOOP_CTRL
)
361 | ((1 << cid
) | 1 << (cid
+ 16)),
362 sdma
->base
+ SIRFSOC_DMA_CH_LOOP_CTRL
);
364 writel_relaxed((1 << cid
) | 1 << (cid
+ 16),
365 sdma
->base
+ SIRFSOC_DMA_CH_LOOP_CTRL
);
367 spin_unlock_irqrestore(&schan
->lock
, flags
);
372 static int sirfsoc_dma_control(struct dma_chan
*chan
, enum dma_ctrl_cmd cmd
,
375 struct dma_slave_config
*config
;
376 struct sirfsoc_dma_chan
*schan
= dma_chan_to_sirfsoc_dma_chan(chan
);
380 return sirfsoc_dma_pause_chan(schan
);
382 return sirfsoc_dma_resume_chan(schan
);
383 case DMA_TERMINATE_ALL
:
384 return sirfsoc_dma_terminate_all(schan
);
385 case DMA_SLAVE_CONFIG
:
386 config
= (struct dma_slave_config
*)arg
;
387 return sirfsoc_dma_slave_config(schan
, config
);
396 /* Alloc channel resources */
397 static int sirfsoc_dma_alloc_chan_resources(struct dma_chan
*chan
)
399 struct sirfsoc_dma
*sdma
= dma_chan_to_sirfsoc_dma(chan
);
400 struct sirfsoc_dma_chan
*schan
= dma_chan_to_sirfsoc_dma_chan(chan
);
401 struct sirfsoc_dma_desc
*sdesc
;
406 pm_runtime_get_sync(sdma
->dma
.dev
);
408 /* Alloc descriptors for this channel */
409 for (i
= 0; i
< SIRFSOC_DMA_DESCRIPTORS
; i
++) {
410 sdesc
= kzalloc(sizeof(*sdesc
), GFP_KERNEL
);
412 dev_notice(sdma
->dma
.dev
, "Memory allocation error. "
413 "Allocated only %u descriptors\n", i
);
417 dma_async_tx_descriptor_init(&sdesc
->desc
, chan
);
418 sdesc
->desc
.flags
= DMA_CTRL_ACK
;
419 sdesc
->desc
.tx_submit
= sirfsoc_dma_tx_submit
;
421 list_add_tail(&sdesc
->node
, &descs
);
424 /* Return error only if no descriptors were allocated */
428 spin_lock_irqsave(&schan
->lock
, flags
);
430 list_splice_tail_init(&descs
, &schan
->free
);
431 spin_unlock_irqrestore(&schan
->lock
, flags
);
436 /* Free channel resources */
437 static void sirfsoc_dma_free_chan_resources(struct dma_chan
*chan
)
439 struct sirfsoc_dma_chan
*schan
= dma_chan_to_sirfsoc_dma_chan(chan
);
440 struct sirfsoc_dma
*sdma
= dma_chan_to_sirfsoc_dma(chan
);
441 struct sirfsoc_dma_desc
*sdesc
, *tmp
;
445 spin_lock_irqsave(&schan
->lock
, flags
);
447 /* Channel must be idle */
448 BUG_ON(!list_empty(&schan
->prepared
));
449 BUG_ON(!list_empty(&schan
->queued
));
450 BUG_ON(!list_empty(&schan
->active
));
451 BUG_ON(!list_empty(&schan
->completed
));
454 list_splice_tail_init(&schan
->free
, &descs
);
456 spin_unlock_irqrestore(&schan
->lock
, flags
);
458 /* Free descriptors */
459 list_for_each_entry_safe(sdesc
, tmp
, &descs
, node
)
462 pm_runtime_put(sdma
->dma
.dev
);
465 /* Send pending descriptor to hardware */
466 static void sirfsoc_dma_issue_pending(struct dma_chan
*chan
)
468 struct sirfsoc_dma_chan
*schan
= dma_chan_to_sirfsoc_dma_chan(chan
);
471 spin_lock_irqsave(&schan
->lock
, flags
);
473 if (list_empty(&schan
->active
) && !list_empty(&schan
->queued
))
474 sirfsoc_dma_execute(schan
);
476 spin_unlock_irqrestore(&schan
->lock
, flags
);
479 /* Check request completion status */
480 static enum dma_status
481 sirfsoc_dma_tx_status(struct dma_chan
*chan
, dma_cookie_t cookie
,
482 struct dma_tx_state
*txstate
)
484 struct sirfsoc_dma
*sdma
= dma_chan_to_sirfsoc_dma(chan
);
485 struct sirfsoc_dma_chan
*schan
= dma_chan_to_sirfsoc_dma_chan(chan
);
488 struct sirfsoc_dma_desc
*sdesc
;
489 int cid
= schan
->chan
.chan_id
;
490 unsigned long dma_pos
;
491 unsigned long dma_request_bytes
;
492 unsigned long residue
;
494 spin_lock_irqsave(&schan
->lock
, flags
);
496 sdesc
= list_first_entry(&schan
->active
, struct sirfsoc_dma_desc
,
498 dma_request_bytes
= (sdesc
->xlen
+ 1) * (sdesc
->ylen
+ 1) *
499 (sdesc
->width
* SIRFSOC_DMA_WORD_LEN
);
501 ret
= dma_cookie_status(chan
, cookie
, txstate
);
502 dma_pos
= readl_relaxed(sdma
->base
+ cid
* 0x10 + SIRFSOC_DMA_CH_ADDR
)
504 residue
= dma_request_bytes
- (dma_pos
- sdesc
->addr
);
505 dma_set_residue(txstate
, residue
);
507 spin_unlock_irqrestore(&schan
->lock
, flags
);
512 static struct dma_async_tx_descriptor
*sirfsoc_dma_prep_interleaved(
513 struct dma_chan
*chan
, struct dma_interleaved_template
*xt
,
516 struct sirfsoc_dma
*sdma
= dma_chan_to_sirfsoc_dma(chan
);
517 struct sirfsoc_dma_chan
*schan
= dma_chan_to_sirfsoc_dma_chan(chan
);
518 struct sirfsoc_dma_desc
*sdesc
= NULL
;
519 unsigned long iflags
;
522 if ((xt
->dir
!= DMA_MEM_TO_DEV
) && (xt
->dir
!= DMA_DEV_TO_MEM
)) {
527 /* Get free descriptor */
528 spin_lock_irqsave(&schan
->lock
, iflags
);
529 if (!list_empty(&schan
->free
)) {
530 sdesc
= list_first_entry(&schan
->free
, struct sirfsoc_dma_desc
,
532 list_del(&sdesc
->node
);
534 spin_unlock_irqrestore(&schan
->lock
, iflags
);
537 /* try to free completed descriptors */
538 sirfsoc_dma_process_completed(sdma
);
543 /* Place descriptor in prepared list */
544 spin_lock_irqsave(&schan
->lock
, iflags
);
547 * Number of chunks in a frame can only be 1 for prima2
548 * and ylen (number of frame - 1) must be at least 0
550 if ((xt
->frame_size
== 1) && (xt
->numf
> 0)) {
552 sdesc
->xlen
= xt
->sgl
[0].size
/ SIRFSOC_DMA_WORD_LEN
;
553 sdesc
->width
= (xt
->sgl
[0].size
+ xt
->sgl
[0].icg
) /
554 SIRFSOC_DMA_WORD_LEN
;
555 sdesc
->ylen
= xt
->numf
- 1;
556 if (xt
->dir
== DMA_MEM_TO_DEV
) {
557 sdesc
->addr
= xt
->src_start
;
560 sdesc
->addr
= xt
->dst_start
;
564 list_add_tail(&sdesc
->node
, &schan
->prepared
);
566 pr_err("sirfsoc DMA Invalid xfer\n");
570 spin_unlock_irqrestore(&schan
->lock
, iflags
);
574 spin_unlock_irqrestore(&schan
->lock
, iflags
);
580 static struct dma_async_tx_descriptor
*
581 sirfsoc_dma_prep_cyclic(struct dma_chan
*chan
, dma_addr_t addr
,
582 size_t buf_len
, size_t period_len
,
583 enum dma_transfer_direction direction
, unsigned long flags
)
585 struct sirfsoc_dma_chan
*schan
= dma_chan_to_sirfsoc_dma_chan(chan
);
586 struct sirfsoc_dma_desc
*sdesc
= NULL
;
587 unsigned long iflags
;
590 * we only support cycle transfer with 2 period
591 * If the X-length is set to 0, it would be the loop mode.
592 * The DMA address keeps increasing until reaching the end of a loop
593 * area whose size is defined by (DMA_WIDTH x (Y_LENGTH + 1)). Then
594 * the DMA address goes back to the beginning of this area.
595 * In loop mode, the DMA data region is divided into two parts, BUFA
596 * and BUFB. DMA controller generates interrupts twice in each loop:
597 * when the DMA address reaches the end of BUFA or the end of the
600 if (buf_len
!= 2 * period_len
)
601 return ERR_PTR(-EINVAL
);
603 /* Get free descriptor */
604 spin_lock_irqsave(&schan
->lock
, iflags
);
605 if (!list_empty(&schan
->free
)) {
606 sdesc
= list_first_entry(&schan
->free
, struct sirfsoc_dma_desc
,
608 list_del(&sdesc
->node
);
610 spin_unlock_irqrestore(&schan
->lock
, iflags
);
615 /* Place descriptor in prepared list */
616 spin_lock_irqsave(&schan
->lock
, iflags
);
620 sdesc
->ylen
= buf_len
/ SIRFSOC_DMA_WORD_LEN
- 1;
622 list_add_tail(&sdesc
->node
, &schan
->prepared
);
623 spin_unlock_irqrestore(&schan
->lock
, iflags
);
629 * The DMA controller consists of 16 independent DMA channels.
630 * Each channel is allocated to a different function
632 bool sirfsoc_dma_filter_id(struct dma_chan
*chan
, void *chan_id
)
634 unsigned int ch_nr
= (unsigned int) chan_id
;
636 if (ch_nr
== chan
->chan_id
+
637 chan
->device
->dev_id
* SIRFSOC_DMA_CHANNELS
)
642 EXPORT_SYMBOL(sirfsoc_dma_filter_id
);
644 #define SIRFSOC_DMA_BUSWIDTHS \
645 (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
646 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
647 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
648 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
649 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
651 static int sirfsoc_dma_device_slave_caps(struct dma_chan
*dchan
,
652 struct dma_slave_caps
*caps
)
654 caps
->src_addr_widths
= SIRFSOC_DMA_BUSWIDTHS
;
655 caps
->dstn_addr_widths
= SIRFSOC_DMA_BUSWIDTHS
;
656 caps
->directions
= BIT(DMA_DEV_TO_MEM
) | BIT(DMA_MEM_TO_DEV
);
657 caps
->cmd_pause
= true;
658 caps
->cmd_terminate
= true;
663 static struct dma_chan
*of_dma_sirfsoc_xlate(struct of_phandle_args
*dma_spec
,
664 struct of_dma
*ofdma
)
666 struct sirfsoc_dma
*sdma
= ofdma
->of_dma_data
;
667 unsigned int request
= dma_spec
->args
[0];
669 if (request
>= SIRFSOC_DMA_CHANNELS
)
672 return dma_get_slave_channel(&sdma
->channels
[request
].chan
);
675 static int sirfsoc_dma_probe(struct platform_device
*op
)
677 struct device_node
*dn
= op
->dev
.of_node
;
678 struct device
*dev
= &op
->dev
;
679 struct dma_device
*dma
;
680 struct sirfsoc_dma
*sdma
;
681 struct sirfsoc_dma_chan
*schan
;
683 ulong regs_start
, regs_size
;
687 sdma
= devm_kzalloc(dev
, sizeof(*sdma
), GFP_KERNEL
);
689 dev_err(dev
, "Memory exhausted!\n");
693 if (of_device_is_compatible(dn
, "sirf,marco-dmac"))
694 sdma
->is_marco
= true;
696 if (of_property_read_u32(dn
, "cell-index", &id
)) {
697 dev_err(dev
, "Fail to get DMAC index\n");
701 sdma
->irq
= irq_of_parse_and_map(dn
, 0);
702 if (sdma
->irq
== NO_IRQ
) {
703 dev_err(dev
, "Error mapping IRQ!\n");
707 sdma
->clk
= devm_clk_get(dev
, NULL
);
708 if (IS_ERR(sdma
->clk
)) {
709 dev_err(dev
, "failed to get a clock.\n");
710 return PTR_ERR(sdma
->clk
);
713 ret
= of_address_to_resource(dn
, 0, &res
);
715 dev_err(dev
, "Error parsing memory region!\n");
719 regs_start
= res
.start
;
720 regs_size
= resource_size(&res
);
722 sdma
->base
= devm_ioremap(dev
, regs_start
, regs_size
);
724 dev_err(dev
, "Error mapping memory region!\n");
729 ret
= request_irq(sdma
->irq
, &sirfsoc_dma_irq
, 0, DRV_NAME
, sdma
);
731 dev_err(dev
, "Error requesting IRQ!\n");
738 dma
->chancnt
= SIRFSOC_DMA_CHANNELS
;
740 dma
->device_alloc_chan_resources
= sirfsoc_dma_alloc_chan_resources
;
741 dma
->device_free_chan_resources
= sirfsoc_dma_free_chan_resources
;
742 dma
->device_issue_pending
= sirfsoc_dma_issue_pending
;
743 dma
->device_control
= sirfsoc_dma_control
;
744 dma
->device_tx_status
= sirfsoc_dma_tx_status
;
745 dma
->device_prep_interleaved_dma
= sirfsoc_dma_prep_interleaved
;
746 dma
->device_prep_dma_cyclic
= sirfsoc_dma_prep_cyclic
;
747 dma
->device_slave_caps
= sirfsoc_dma_device_slave_caps
;
749 INIT_LIST_HEAD(&dma
->channels
);
750 dma_cap_set(DMA_SLAVE
, dma
->cap_mask
);
751 dma_cap_set(DMA_CYCLIC
, dma
->cap_mask
);
752 dma_cap_set(DMA_INTERLEAVE
, dma
->cap_mask
);
753 dma_cap_set(DMA_PRIVATE
, dma
->cap_mask
);
755 for (i
= 0; i
< dma
->chancnt
; i
++) {
756 schan
= &sdma
->channels
[i
];
758 schan
->chan
.device
= dma
;
759 dma_cookie_init(&schan
->chan
);
761 INIT_LIST_HEAD(&schan
->free
);
762 INIT_LIST_HEAD(&schan
->prepared
);
763 INIT_LIST_HEAD(&schan
->queued
);
764 INIT_LIST_HEAD(&schan
->active
);
765 INIT_LIST_HEAD(&schan
->completed
);
767 spin_lock_init(&schan
->lock
);
768 list_add_tail(&schan
->chan
.device_node
, &dma
->channels
);
771 tasklet_init(&sdma
->tasklet
, sirfsoc_dma_tasklet
, (unsigned long)sdma
);
773 /* Register DMA engine */
774 dev_set_drvdata(dev
, sdma
);
776 ret
= dma_async_device_register(dma
);
780 /* Device-tree DMA controller registration */
781 ret
= of_dma_controller_register(dn
, of_dma_sirfsoc_xlate
, sdma
);
783 dev_err(dev
, "failed to register DMA controller\n");
787 pm_runtime_enable(&op
->dev
);
788 dev_info(dev
, "initialized SIRFSOC DMAC driver\n");
793 dma_async_device_unregister(dma
);
795 free_irq(sdma
->irq
, sdma
);
797 irq_dispose_mapping(sdma
->irq
);
801 static int sirfsoc_dma_remove(struct platform_device
*op
)
803 struct device
*dev
= &op
->dev
;
804 struct sirfsoc_dma
*sdma
= dev_get_drvdata(dev
);
806 of_dma_controller_free(op
->dev
.of_node
);
807 dma_async_device_unregister(&sdma
->dma
);
808 free_irq(sdma
->irq
, sdma
);
809 irq_dispose_mapping(sdma
->irq
);
810 pm_runtime_disable(&op
->dev
);
811 if (!pm_runtime_status_suspended(&op
->dev
))
812 sirfsoc_dma_runtime_suspend(&op
->dev
);
817 static int sirfsoc_dma_runtime_suspend(struct device
*dev
)
819 struct sirfsoc_dma
*sdma
= dev_get_drvdata(dev
);
821 clk_disable_unprepare(sdma
->clk
);
825 static int sirfsoc_dma_runtime_resume(struct device
*dev
)
827 struct sirfsoc_dma
*sdma
= dev_get_drvdata(dev
);
830 ret
= clk_prepare_enable(sdma
->clk
);
832 dev_err(dev
, "clk_enable failed: %d\n", ret
);
838 static int sirfsoc_dma_pm_suspend(struct device
*dev
)
840 struct sirfsoc_dma
*sdma
= dev_get_drvdata(dev
);
841 struct sirfsoc_dma_regs
*save
= &sdma
->regs_save
;
842 struct sirfsoc_dma_desc
*sdesc
;
843 struct sirfsoc_dma_chan
*schan
;
848 * if we were runtime-suspended before, resume to enable clock
849 * before accessing register
851 if (pm_runtime_status_suspended(dev
)) {
852 ret
= sirfsoc_dma_runtime_resume(dev
);
858 * DMA controller will lose all registers while suspending
859 * so we need to save registers for active channels
861 for (ch
= 0; ch
< SIRFSOC_DMA_CHANNELS
; ch
++) {
862 schan
= &sdma
->channels
[ch
];
863 if (list_empty(&schan
->active
))
865 sdesc
= list_first_entry(&schan
->active
,
866 struct sirfsoc_dma_desc
,
868 save
->ctrl
[ch
] = readl_relaxed(sdma
->base
+
869 ch
* 0x10 + SIRFSOC_DMA_CH_CTRL
);
871 save
->interrupt_en
= readl_relaxed(sdma
->base
+ SIRFSOC_DMA_INT_EN
);
874 sirfsoc_dma_runtime_suspend(dev
);
879 static int sirfsoc_dma_pm_resume(struct device
*dev
)
881 struct sirfsoc_dma
*sdma
= dev_get_drvdata(dev
);
882 struct sirfsoc_dma_regs
*save
= &sdma
->regs_save
;
883 struct sirfsoc_dma_desc
*sdesc
;
884 struct sirfsoc_dma_chan
*schan
;
888 /* Enable clock before accessing register */
889 ret
= sirfsoc_dma_runtime_resume(dev
);
893 writel_relaxed(save
->interrupt_en
, sdma
->base
+ SIRFSOC_DMA_INT_EN
);
894 for (ch
= 0; ch
< SIRFSOC_DMA_CHANNELS
; ch
++) {
895 schan
= &sdma
->channels
[ch
];
896 if (list_empty(&schan
->active
))
898 sdesc
= list_first_entry(&schan
->active
,
899 struct sirfsoc_dma_desc
,
901 writel_relaxed(sdesc
->width
,
902 sdma
->base
+ SIRFSOC_DMA_WIDTH_0
+ ch
* 4);
903 writel_relaxed(sdesc
->xlen
,
904 sdma
->base
+ ch
* 0x10 + SIRFSOC_DMA_CH_XLEN
);
905 writel_relaxed(sdesc
->ylen
,
906 sdma
->base
+ ch
* 0x10 + SIRFSOC_DMA_CH_YLEN
);
907 writel_relaxed(save
->ctrl
[ch
],
908 sdma
->base
+ ch
* 0x10 + SIRFSOC_DMA_CH_CTRL
);
909 writel_relaxed(sdesc
->addr
>> 2,
910 sdma
->base
+ ch
* 0x10 + SIRFSOC_DMA_CH_ADDR
);
913 /* if we were runtime-suspended before, suspend again */
914 if (pm_runtime_status_suspended(dev
))
915 sirfsoc_dma_runtime_suspend(dev
);
920 static const struct dev_pm_ops sirfsoc_dma_pm_ops
= {
921 SET_RUNTIME_PM_OPS(sirfsoc_dma_runtime_suspend
, sirfsoc_dma_runtime_resume
, NULL
)
922 SET_SYSTEM_SLEEP_PM_OPS(sirfsoc_dma_pm_suspend
, sirfsoc_dma_pm_resume
)
925 static struct of_device_id sirfsoc_dma_match
[] = {
926 { .compatible
= "sirf,prima2-dmac", },
927 { .compatible
= "sirf,marco-dmac", },
931 static struct platform_driver sirfsoc_dma_driver
= {
932 .probe
= sirfsoc_dma_probe
,
933 .remove
= sirfsoc_dma_remove
,
936 .owner
= THIS_MODULE
,
937 .pm
= &sirfsoc_dma_pm_ops
,
938 .of_match_table
= sirfsoc_dma_match
,
942 static __init
int sirfsoc_dma_init(void)
944 return platform_driver_register(&sirfsoc_dma_driver
);
947 static void __exit
sirfsoc_dma_exit(void)
949 platform_driver_unregister(&sirfsoc_dma_driver
);
952 subsys_initcall(sirfsoc_dma_init
);
953 module_exit(sirfsoc_dma_exit
);
955 MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>, "
956 "Barry Song <baohua.song@csr.com>");
957 MODULE_DESCRIPTION("SIRFSOC DMA control driver");
958 MODULE_LICENSE("GPL v2");