2 * DMA controller driver for CSR SiRFprimaII
4 * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
6 * Licensed under GPLv2 or later.
9 #include <linux/module.h>
10 #include <linux/dmaengine.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/interrupt.h>
15 #include <linux/slab.h>
16 #include <linux/of_irq.h>
17 #include <linux/of_address.h>
18 #include <linux/of_device.h>
19 #include <linux/of_platform.h>
20 #include <linux/clk.h>
21 #include <linux/sirfsoc_dma.h>
23 #include "dmaengine.h"
25 #define SIRFSOC_DMA_DESCRIPTORS 16
26 #define SIRFSOC_DMA_CHANNELS 16
28 #define SIRFSOC_DMA_CH_ADDR 0x00
29 #define SIRFSOC_DMA_CH_XLEN 0x04
30 #define SIRFSOC_DMA_CH_YLEN 0x08
31 #define SIRFSOC_DMA_CH_CTRL 0x0C
33 #define SIRFSOC_DMA_WIDTH_0 0x100
34 #define SIRFSOC_DMA_CH_VALID 0x140
35 #define SIRFSOC_DMA_CH_INT 0x144
36 #define SIRFSOC_DMA_INT_EN 0x148
37 #define SIRFSOC_DMA_INT_EN_CLR 0x14C
38 #define SIRFSOC_DMA_CH_LOOP_CTRL 0x150
39 #define SIRFSOC_DMA_CH_LOOP_CTRL_CLR 0x15C
41 #define SIRFSOC_DMA_MODE_CTRL_BIT 4
42 #define SIRFSOC_DMA_DIR_CTRL_BIT 5
44 /* xlen and dma_width register is in 4 bytes boundary */
45 #define SIRFSOC_DMA_WORD_LEN 4
47 struct sirfsoc_dma_desc
{
48 struct dma_async_tx_descriptor desc
;
49 struct list_head node
;
51 /* SiRFprimaII 2D-DMA parameters */
53 int xlen
; /* DMA xlen */
54 int ylen
; /* DMA ylen */
55 int width
; /* DMA width */
57 bool cyclic
; /* is loop DMA? */
58 u32 addr
; /* DMA buffer address */
61 struct sirfsoc_dma_chan
{
63 struct list_head free
;
64 struct list_head prepared
;
65 struct list_head queued
;
66 struct list_head active
;
67 struct list_head completed
;
68 unsigned long happened_cyclic
;
69 unsigned long completed_cyclic
;
71 /* Lock for this structure */
77 struct sirfsoc_dma_regs
{
78 u32 ctrl
[SIRFSOC_DMA_CHANNELS
];
83 struct dma_device dma
;
84 struct tasklet_struct tasklet
;
85 struct sirfsoc_dma_chan channels
[SIRFSOC_DMA_CHANNELS
];
90 struct sirfsoc_dma_regs regs_save
;
93 #define DRV_NAME "sirfsoc_dma"
95 static int sirfsoc_dma_runtime_suspend(struct device
*dev
);
97 /* Convert struct dma_chan to struct sirfsoc_dma_chan */
99 struct sirfsoc_dma_chan
*dma_chan_to_sirfsoc_dma_chan(struct dma_chan
*c
)
101 return container_of(c
, struct sirfsoc_dma_chan
, chan
);
104 /* Convert struct dma_chan to struct sirfsoc_dma */
105 static inline struct sirfsoc_dma
*dma_chan_to_sirfsoc_dma(struct dma_chan
*c
)
107 struct sirfsoc_dma_chan
*schan
= dma_chan_to_sirfsoc_dma_chan(c
);
108 return container_of(schan
, struct sirfsoc_dma
, channels
[c
->chan_id
]);
111 /* Execute all queued DMA descriptors */
112 static void sirfsoc_dma_execute(struct sirfsoc_dma_chan
*schan
)
114 struct sirfsoc_dma
*sdma
= dma_chan_to_sirfsoc_dma(&schan
->chan
);
115 int cid
= schan
->chan
.chan_id
;
116 struct sirfsoc_dma_desc
*sdesc
= NULL
;
119 * lock has been held by functions calling this, so we don't hold
123 sdesc
= list_first_entry(&schan
->queued
, struct sirfsoc_dma_desc
,
125 /* Move the first queued descriptor to active list */
126 list_move_tail(&sdesc
->node
, &schan
->active
);
128 /* Start the DMA transfer */
129 writel_relaxed(sdesc
->width
, sdma
->base
+ SIRFSOC_DMA_WIDTH_0
+
131 writel_relaxed(cid
| (schan
->mode
<< SIRFSOC_DMA_MODE_CTRL_BIT
) |
132 (sdesc
->dir
<< SIRFSOC_DMA_DIR_CTRL_BIT
),
133 sdma
->base
+ cid
* 0x10 + SIRFSOC_DMA_CH_CTRL
);
134 writel_relaxed(sdesc
->xlen
, sdma
->base
+ cid
* 0x10 +
135 SIRFSOC_DMA_CH_XLEN
);
136 writel_relaxed(sdesc
->ylen
, sdma
->base
+ cid
* 0x10 +
137 SIRFSOC_DMA_CH_YLEN
);
138 writel_relaxed(readl_relaxed(sdma
->base
+ SIRFSOC_DMA_INT_EN
) |
139 (1 << cid
), sdma
->base
+ SIRFSOC_DMA_INT_EN
);
142 * writel has an implict memory write barrier to make sure data is
143 * flushed into memory before starting DMA
145 writel(sdesc
->addr
>> 2, sdma
->base
+ cid
* 0x10 + SIRFSOC_DMA_CH_ADDR
);
148 writel((1 << cid
) | 1 << (cid
+ 16) |
149 readl_relaxed(sdma
->base
+ SIRFSOC_DMA_CH_LOOP_CTRL
),
150 sdma
->base
+ SIRFSOC_DMA_CH_LOOP_CTRL
);
151 schan
->happened_cyclic
= schan
->completed_cyclic
= 0;
155 /* Interrupt handler */
156 static irqreturn_t
sirfsoc_dma_irq(int irq
, void *data
)
158 struct sirfsoc_dma
*sdma
= data
;
159 struct sirfsoc_dma_chan
*schan
;
160 struct sirfsoc_dma_desc
*sdesc
= NULL
;
164 is
= readl(sdma
->base
+ SIRFSOC_DMA_CH_INT
);
165 while ((ch
= fls(is
) - 1) >= 0) {
167 writel_relaxed(1 << ch
, sdma
->base
+ SIRFSOC_DMA_CH_INT
);
168 schan
= &sdma
->channels
[ch
];
170 spin_lock(&schan
->lock
);
172 sdesc
= list_first_entry(&schan
->active
, struct sirfsoc_dma_desc
,
174 if (!sdesc
->cyclic
) {
175 /* Execute queued descriptors */
176 list_splice_tail_init(&schan
->active
, &schan
->completed
);
177 if (!list_empty(&schan
->queued
))
178 sirfsoc_dma_execute(schan
);
180 schan
->happened_cyclic
++;
182 spin_unlock(&schan
->lock
);
185 /* Schedule tasklet */
186 tasklet_schedule(&sdma
->tasklet
);
191 /* process completed descriptors */
192 static void sirfsoc_dma_process_completed(struct sirfsoc_dma
*sdma
)
194 dma_cookie_t last_cookie
= 0;
195 struct sirfsoc_dma_chan
*schan
;
196 struct sirfsoc_dma_desc
*sdesc
;
197 struct dma_async_tx_descriptor
*desc
;
199 unsigned long happened_cyclic
;
203 for (i
= 0; i
< sdma
->dma
.chancnt
; i
++) {
204 schan
= &sdma
->channels
[i
];
206 /* Get all completed descriptors */
207 spin_lock_irqsave(&schan
->lock
, flags
);
208 if (!list_empty(&schan
->completed
)) {
209 list_splice_tail_init(&schan
->completed
, &list
);
210 spin_unlock_irqrestore(&schan
->lock
, flags
);
212 /* Execute callbacks and run dependencies */
213 list_for_each_entry(sdesc
, &list
, node
) {
217 desc
->callback(desc
->callback_param
);
219 last_cookie
= desc
->cookie
;
220 dma_run_dependencies(desc
);
223 /* Free descriptors */
224 spin_lock_irqsave(&schan
->lock
, flags
);
225 list_splice_tail_init(&list
, &schan
->free
);
226 schan
->chan
.completed_cookie
= last_cookie
;
227 spin_unlock_irqrestore(&schan
->lock
, flags
);
229 /* for cyclic channel, desc is always in active list */
230 sdesc
= list_first_entry(&schan
->active
, struct sirfsoc_dma_desc
,
233 if (!sdesc
|| (sdesc
&& !sdesc
->cyclic
)) {
234 /* without active cyclic DMA */
235 spin_unlock_irqrestore(&schan
->lock
, flags
);
240 happened_cyclic
= schan
->happened_cyclic
;
241 spin_unlock_irqrestore(&schan
->lock
, flags
);
244 while (happened_cyclic
!= schan
->completed_cyclic
) {
246 desc
->callback(desc
->callback_param
);
247 schan
->completed_cyclic
++;
254 static void sirfsoc_dma_tasklet(unsigned long data
)
256 struct sirfsoc_dma
*sdma
= (void *)data
;
258 sirfsoc_dma_process_completed(sdma
);
261 /* Submit descriptor to hardware */
262 static dma_cookie_t
sirfsoc_dma_tx_submit(struct dma_async_tx_descriptor
*txd
)
264 struct sirfsoc_dma_chan
*schan
= dma_chan_to_sirfsoc_dma_chan(txd
->chan
);
265 struct sirfsoc_dma_desc
*sdesc
;
269 sdesc
= container_of(txd
, struct sirfsoc_dma_desc
, desc
);
271 spin_lock_irqsave(&schan
->lock
, flags
);
273 /* Move descriptor to queue */
274 list_move_tail(&sdesc
->node
, &schan
->queued
);
276 cookie
= dma_cookie_assign(txd
);
278 spin_unlock_irqrestore(&schan
->lock
, flags
);
283 static int sirfsoc_dma_slave_config(struct sirfsoc_dma_chan
*schan
,
284 struct dma_slave_config
*config
)
288 if ((config
->src_addr_width
!= DMA_SLAVE_BUSWIDTH_4_BYTES
) ||
289 (config
->dst_addr_width
!= DMA_SLAVE_BUSWIDTH_4_BYTES
))
292 spin_lock_irqsave(&schan
->lock
, flags
);
293 schan
->mode
= (config
->src_maxburst
== 4 ? 1 : 0);
294 spin_unlock_irqrestore(&schan
->lock
, flags
);
299 static int sirfsoc_dma_terminate_all(struct sirfsoc_dma_chan
*schan
)
301 struct sirfsoc_dma
*sdma
= dma_chan_to_sirfsoc_dma(&schan
->chan
);
302 int cid
= schan
->chan
.chan_id
;
305 spin_lock_irqsave(&schan
->lock
, flags
);
307 if (!sdma
->is_marco
) {
308 writel_relaxed(readl_relaxed(sdma
->base
+ SIRFSOC_DMA_INT_EN
) &
309 ~(1 << cid
), sdma
->base
+ SIRFSOC_DMA_INT_EN
);
310 writel_relaxed(readl_relaxed(sdma
->base
+ SIRFSOC_DMA_CH_LOOP_CTRL
)
311 & ~((1 << cid
) | 1 << (cid
+ 16)),
312 sdma
->base
+ SIRFSOC_DMA_CH_LOOP_CTRL
);
314 writel_relaxed(1 << cid
, sdma
->base
+ SIRFSOC_DMA_INT_EN_CLR
);
315 writel_relaxed((1 << cid
) | 1 << (cid
+ 16),
316 sdma
->base
+ SIRFSOC_DMA_CH_LOOP_CTRL_CLR
);
319 writel_relaxed(1 << cid
, sdma
->base
+ SIRFSOC_DMA_CH_VALID
);
321 list_splice_tail_init(&schan
->active
, &schan
->free
);
322 list_splice_tail_init(&schan
->queued
, &schan
->free
);
324 spin_unlock_irqrestore(&schan
->lock
, flags
);
329 static int sirfsoc_dma_pause_chan(struct sirfsoc_dma_chan
*schan
)
331 struct sirfsoc_dma
*sdma
= dma_chan_to_sirfsoc_dma(&schan
->chan
);
332 int cid
= schan
->chan
.chan_id
;
335 spin_lock_irqsave(&schan
->lock
, flags
);
338 writel_relaxed(readl_relaxed(sdma
->base
+ SIRFSOC_DMA_CH_LOOP_CTRL
)
339 & ~((1 << cid
) | 1 << (cid
+ 16)),
340 sdma
->base
+ SIRFSOC_DMA_CH_LOOP_CTRL
);
342 writel_relaxed((1 << cid
) | 1 << (cid
+ 16),
343 sdma
->base
+ SIRFSOC_DMA_CH_LOOP_CTRL_CLR
);
345 spin_unlock_irqrestore(&schan
->lock
, flags
);
350 static int sirfsoc_dma_resume_chan(struct sirfsoc_dma_chan
*schan
)
352 struct sirfsoc_dma
*sdma
= dma_chan_to_sirfsoc_dma(&schan
->chan
);
353 int cid
= schan
->chan
.chan_id
;
356 spin_lock_irqsave(&schan
->lock
, flags
);
359 writel_relaxed(readl_relaxed(sdma
->base
+ SIRFSOC_DMA_CH_LOOP_CTRL
)
360 | ((1 << cid
) | 1 << (cid
+ 16)),
361 sdma
->base
+ SIRFSOC_DMA_CH_LOOP_CTRL
);
363 writel_relaxed((1 << cid
) | 1 << (cid
+ 16),
364 sdma
->base
+ SIRFSOC_DMA_CH_LOOP_CTRL
);
366 spin_unlock_irqrestore(&schan
->lock
, flags
);
371 static int sirfsoc_dma_control(struct dma_chan
*chan
, enum dma_ctrl_cmd cmd
,
374 struct dma_slave_config
*config
;
375 struct sirfsoc_dma_chan
*schan
= dma_chan_to_sirfsoc_dma_chan(chan
);
379 return sirfsoc_dma_pause_chan(schan
);
381 return sirfsoc_dma_resume_chan(schan
);
382 case DMA_TERMINATE_ALL
:
383 return sirfsoc_dma_terminate_all(schan
);
384 case DMA_SLAVE_CONFIG
:
385 config
= (struct dma_slave_config
*)arg
;
386 return sirfsoc_dma_slave_config(schan
, config
);
395 /* Alloc channel resources */
396 static int sirfsoc_dma_alloc_chan_resources(struct dma_chan
*chan
)
398 struct sirfsoc_dma
*sdma
= dma_chan_to_sirfsoc_dma(chan
);
399 struct sirfsoc_dma_chan
*schan
= dma_chan_to_sirfsoc_dma_chan(chan
);
400 struct sirfsoc_dma_desc
*sdesc
;
405 pm_runtime_get_sync(sdma
->dma
.dev
);
407 /* Alloc descriptors for this channel */
408 for (i
= 0; i
< SIRFSOC_DMA_DESCRIPTORS
; i
++) {
409 sdesc
= kzalloc(sizeof(*sdesc
), GFP_KERNEL
);
411 dev_notice(sdma
->dma
.dev
, "Memory allocation error. "
412 "Allocated only %u descriptors\n", i
);
416 dma_async_tx_descriptor_init(&sdesc
->desc
, chan
);
417 sdesc
->desc
.flags
= DMA_CTRL_ACK
;
418 sdesc
->desc
.tx_submit
= sirfsoc_dma_tx_submit
;
420 list_add_tail(&sdesc
->node
, &descs
);
423 /* Return error only if no descriptors were allocated */
427 spin_lock_irqsave(&schan
->lock
, flags
);
429 list_splice_tail_init(&descs
, &schan
->free
);
430 spin_unlock_irqrestore(&schan
->lock
, flags
);
435 /* Free channel resources */
436 static void sirfsoc_dma_free_chan_resources(struct dma_chan
*chan
)
438 struct sirfsoc_dma_chan
*schan
= dma_chan_to_sirfsoc_dma_chan(chan
);
439 struct sirfsoc_dma
*sdma
= dma_chan_to_sirfsoc_dma(chan
);
440 struct sirfsoc_dma_desc
*sdesc
, *tmp
;
444 spin_lock_irqsave(&schan
->lock
, flags
);
446 /* Channel must be idle */
447 BUG_ON(!list_empty(&schan
->prepared
));
448 BUG_ON(!list_empty(&schan
->queued
));
449 BUG_ON(!list_empty(&schan
->active
));
450 BUG_ON(!list_empty(&schan
->completed
));
453 list_splice_tail_init(&schan
->free
, &descs
);
455 spin_unlock_irqrestore(&schan
->lock
, flags
);
457 /* Free descriptors */
458 list_for_each_entry_safe(sdesc
, tmp
, &descs
, node
)
461 pm_runtime_put(sdma
->dma
.dev
);
464 /* Send pending descriptor to hardware */
465 static void sirfsoc_dma_issue_pending(struct dma_chan
*chan
)
467 struct sirfsoc_dma_chan
*schan
= dma_chan_to_sirfsoc_dma_chan(chan
);
470 spin_lock_irqsave(&schan
->lock
, flags
);
472 if (list_empty(&schan
->active
) && !list_empty(&schan
->queued
))
473 sirfsoc_dma_execute(schan
);
475 spin_unlock_irqrestore(&schan
->lock
, flags
);
478 /* Check request completion status */
479 static enum dma_status
480 sirfsoc_dma_tx_status(struct dma_chan
*chan
, dma_cookie_t cookie
,
481 struct dma_tx_state
*txstate
)
483 struct sirfsoc_dma
*sdma
= dma_chan_to_sirfsoc_dma(chan
);
484 struct sirfsoc_dma_chan
*schan
= dma_chan_to_sirfsoc_dma_chan(chan
);
487 struct sirfsoc_dma_desc
*sdesc
;
488 int cid
= schan
->chan
.chan_id
;
489 unsigned long dma_pos
;
490 unsigned long dma_request_bytes
;
491 unsigned long residue
;
493 spin_lock_irqsave(&schan
->lock
, flags
);
495 sdesc
= list_first_entry(&schan
->active
, struct sirfsoc_dma_desc
,
497 dma_request_bytes
= (sdesc
->xlen
+ 1) * (sdesc
->ylen
+ 1) *
498 (sdesc
->width
* SIRFSOC_DMA_WORD_LEN
);
500 ret
= dma_cookie_status(chan
, cookie
, txstate
);
501 dma_pos
= readl_relaxed(sdma
->base
+ cid
* 0x10 + SIRFSOC_DMA_CH_ADDR
)
503 residue
= dma_request_bytes
- (dma_pos
- sdesc
->addr
);
504 dma_set_residue(txstate
, residue
);
506 spin_unlock_irqrestore(&schan
->lock
, flags
);
511 static struct dma_async_tx_descriptor
*sirfsoc_dma_prep_interleaved(
512 struct dma_chan
*chan
, struct dma_interleaved_template
*xt
,
515 struct sirfsoc_dma
*sdma
= dma_chan_to_sirfsoc_dma(chan
);
516 struct sirfsoc_dma_chan
*schan
= dma_chan_to_sirfsoc_dma_chan(chan
);
517 struct sirfsoc_dma_desc
*sdesc
= NULL
;
518 unsigned long iflags
;
521 if ((xt
->dir
!= DMA_MEM_TO_DEV
) && (xt
->dir
!= DMA_DEV_TO_MEM
)) {
526 /* Get free descriptor */
527 spin_lock_irqsave(&schan
->lock
, iflags
);
528 if (!list_empty(&schan
->free
)) {
529 sdesc
= list_first_entry(&schan
->free
, struct sirfsoc_dma_desc
,
531 list_del(&sdesc
->node
);
533 spin_unlock_irqrestore(&schan
->lock
, iflags
);
536 /* try to free completed descriptors */
537 sirfsoc_dma_process_completed(sdma
);
542 /* Place descriptor in prepared list */
543 spin_lock_irqsave(&schan
->lock
, iflags
);
546 * Number of chunks in a frame can only be 1 for prima2
547 * and ylen (number of frame - 1) must be at least 0
549 if ((xt
->frame_size
== 1) && (xt
->numf
> 0)) {
551 sdesc
->xlen
= xt
->sgl
[0].size
/ SIRFSOC_DMA_WORD_LEN
;
552 sdesc
->width
= (xt
->sgl
[0].size
+ xt
->sgl
[0].icg
) /
553 SIRFSOC_DMA_WORD_LEN
;
554 sdesc
->ylen
= xt
->numf
- 1;
555 if (xt
->dir
== DMA_MEM_TO_DEV
) {
556 sdesc
->addr
= xt
->src_start
;
559 sdesc
->addr
= xt
->dst_start
;
563 list_add_tail(&sdesc
->node
, &schan
->prepared
);
565 pr_err("sirfsoc DMA Invalid xfer\n");
569 spin_unlock_irqrestore(&schan
->lock
, iflags
);
573 spin_unlock_irqrestore(&schan
->lock
, iflags
);
579 static struct dma_async_tx_descriptor
*
580 sirfsoc_dma_prep_cyclic(struct dma_chan
*chan
, dma_addr_t addr
,
581 size_t buf_len
, size_t period_len
,
582 enum dma_transfer_direction direction
, unsigned long flags
, void *context
)
584 struct sirfsoc_dma_chan
*schan
= dma_chan_to_sirfsoc_dma_chan(chan
);
585 struct sirfsoc_dma_desc
*sdesc
= NULL
;
586 unsigned long iflags
;
589 * we only support cycle transfer with 2 period
590 * If the X-length is set to 0, it would be the loop mode.
591 * The DMA address keeps increasing until reaching the end of a loop
592 * area whose size is defined by (DMA_WIDTH x (Y_LENGTH + 1)). Then
593 * the DMA address goes back to the beginning of this area.
594 * In loop mode, the DMA data region is divided into two parts, BUFA
595 * and BUFB. DMA controller generates interrupts twice in each loop:
596 * when the DMA address reaches the end of BUFA or the end of the
599 if (buf_len
!= 2 * period_len
)
600 return ERR_PTR(-EINVAL
);
602 /* Get free descriptor */
603 spin_lock_irqsave(&schan
->lock
, iflags
);
604 if (!list_empty(&schan
->free
)) {
605 sdesc
= list_first_entry(&schan
->free
, struct sirfsoc_dma_desc
,
607 list_del(&sdesc
->node
);
609 spin_unlock_irqrestore(&schan
->lock
, iflags
);
614 /* Place descriptor in prepared list */
615 spin_lock_irqsave(&schan
->lock
, iflags
);
619 sdesc
->ylen
= buf_len
/ SIRFSOC_DMA_WORD_LEN
- 1;
621 list_add_tail(&sdesc
->node
, &schan
->prepared
);
622 spin_unlock_irqrestore(&schan
->lock
, iflags
);
628 * The DMA controller consists of 16 independent DMA channels.
629 * Each channel is allocated to a different function
631 bool sirfsoc_dma_filter_id(struct dma_chan
*chan
, void *chan_id
)
633 unsigned int ch_nr
= (unsigned int) chan_id
;
635 if (ch_nr
== chan
->chan_id
+
636 chan
->device
->dev_id
* SIRFSOC_DMA_CHANNELS
)
641 EXPORT_SYMBOL(sirfsoc_dma_filter_id
);
643 #define SIRFSOC_DMA_BUSWIDTHS \
644 (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
645 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
646 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
647 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
648 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
650 static int sirfsoc_dma_device_slave_caps(struct dma_chan
*dchan
,
651 struct dma_slave_caps
*caps
)
653 caps
->src_addr_widths
= SIRFSOC_DMA_BUSWIDTHS
;
654 caps
->dstn_addr_widths
= SIRFSOC_DMA_BUSWIDTHS
;
655 caps
->directions
= BIT(DMA_DEV_TO_MEM
) | BIT(DMA_MEM_TO_DEV
);
656 caps
->cmd_pause
= true;
657 caps
->cmd_terminate
= true;
662 static int sirfsoc_dma_probe(struct platform_device
*op
)
664 struct device_node
*dn
= op
->dev
.of_node
;
665 struct device
*dev
= &op
->dev
;
666 struct dma_device
*dma
;
667 struct sirfsoc_dma
*sdma
;
668 struct sirfsoc_dma_chan
*schan
;
670 ulong regs_start
, regs_size
;
674 sdma
= devm_kzalloc(dev
, sizeof(*sdma
), GFP_KERNEL
);
676 dev_err(dev
, "Memory exhausted!\n");
680 if (of_device_is_compatible(dn
, "sirf,marco-dmac"))
681 sdma
->is_marco
= true;
683 if (of_property_read_u32(dn
, "cell-index", &id
)) {
684 dev_err(dev
, "Fail to get DMAC index\n");
688 sdma
->irq
= irq_of_parse_and_map(dn
, 0);
689 if (sdma
->irq
== NO_IRQ
) {
690 dev_err(dev
, "Error mapping IRQ!\n");
694 sdma
->clk
= devm_clk_get(dev
, NULL
);
695 if (IS_ERR(sdma
->clk
)) {
696 dev_err(dev
, "failed to get a clock.\n");
697 return PTR_ERR(sdma
->clk
);
700 ret
= of_address_to_resource(dn
, 0, &res
);
702 dev_err(dev
, "Error parsing memory region!\n");
706 regs_start
= res
.start
;
707 regs_size
= resource_size(&res
);
709 sdma
->base
= devm_ioremap(dev
, regs_start
, regs_size
);
711 dev_err(dev
, "Error mapping memory region!\n");
716 ret
= request_irq(sdma
->irq
, &sirfsoc_dma_irq
, 0, DRV_NAME
, sdma
);
718 dev_err(dev
, "Error requesting IRQ!\n");
725 dma
->chancnt
= SIRFSOC_DMA_CHANNELS
;
727 dma
->device_alloc_chan_resources
= sirfsoc_dma_alloc_chan_resources
;
728 dma
->device_free_chan_resources
= sirfsoc_dma_free_chan_resources
;
729 dma
->device_issue_pending
= sirfsoc_dma_issue_pending
;
730 dma
->device_control
= sirfsoc_dma_control
;
731 dma
->device_tx_status
= sirfsoc_dma_tx_status
;
732 dma
->device_prep_interleaved_dma
= sirfsoc_dma_prep_interleaved
;
733 dma
->device_prep_dma_cyclic
= sirfsoc_dma_prep_cyclic
;
734 dma
->device_slave_caps
= sirfsoc_dma_device_slave_caps
;
736 INIT_LIST_HEAD(&dma
->channels
);
737 dma_cap_set(DMA_SLAVE
, dma
->cap_mask
);
738 dma_cap_set(DMA_CYCLIC
, dma
->cap_mask
);
739 dma_cap_set(DMA_INTERLEAVE
, dma
->cap_mask
);
740 dma_cap_set(DMA_PRIVATE
, dma
->cap_mask
);
742 for (i
= 0; i
< dma
->chancnt
; i
++) {
743 schan
= &sdma
->channels
[i
];
745 schan
->chan
.device
= dma
;
746 dma_cookie_init(&schan
->chan
);
748 INIT_LIST_HEAD(&schan
->free
);
749 INIT_LIST_HEAD(&schan
->prepared
);
750 INIT_LIST_HEAD(&schan
->queued
);
751 INIT_LIST_HEAD(&schan
->active
);
752 INIT_LIST_HEAD(&schan
->completed
);
754 spin_lock_init(&schan
->lock
);
755 list_add_tail(&schan
->chan
.device_node
, &dma
->channels
);
758 tasklet_init(&sdma
->tasklet
, sirfsoc_dma_tasklet
, (unsigned long)sdma
);
760 /* Register DMA engine */
761 dev_set_drvdata(dev
, sdma
);
763 ret
= dma_async_device_register(dma
);
767 pm_runtime_enable(&op
->dev
);
768 dev_info(dev
, "initialized SIRFSOC DMAC driver\n");
773 free_irq(sdma
->irq
, sdma
);
775 irq_dispose_mapping(sdma
->irq
);
779 static int sirfsoc_dma_remove(struct platform_device
*op
)
781 struct device
*dev
= &op
->dev
;
782 struct sirfsoc_dma
*sdma
= dev_get_drvdata(dev
);
784 dma_async_device_unregister(&sdma
->dma
);
785 free_irq(sdma
->irq
, sdma
);
786 irq_dispose_mapping(sdma
->irq
);
787 pm_runtime_disable(&op
->dev
);
788 if (!pm_runtime_status_suspended(&op
->dev
))
789 sirfsoc_dma_runtime_suspend(&op
->dev
);
794 static int sirfsoc_dma_runtime_suspend(struct device
*dev
)
796 struct sirfsoc_dma
*sdma
= dev_get_drvdata(dev
);
798 clk_disable_unprepare(sdma
->clk
);
802 static int sirfsoc_dma_runtime_resume(struct device
*dev
)
804 struct sirfsoc_dma
*sdma
= dev_get_drvdata(dev
);
807 ret
= clk_prepare_enable(sdma
->clk
);
809 dev_err(dev
, "clk_enable failed: %d\n", ret
);
815 static int sirfsoc_dma_pm_suspend(struct device
*dev
)
817 struct sirfsoc_dma
*sdma
= dev_get_drvdata(dev
);
818 struct sirfsoc_dma_regs
*save
= &sdma
->regs_save
;
819 struct sirfsoc_dma_desc
*sdesc
;
820 struct sirfsoc_dma_chan
*schan
;
825 * if we were runtime-suspended before, resume to enable clock
826 * before accessing register
828 if (pm_runtime_status_suspended(dev
)) {
829 ret
= sirfsoc_dma_runtime_resume(dev
);
835 * DMA controller will lose all registers while suspending
836 * so we need to save registers for active channels
838 for (ch
= 0; ch
< SIRFSOC_DMA_CHANNELS
; ch
++) {
839 schan
= &sdma
->channels
[ch
];
840 if (list_empty(&schan
->active
))
842 sdesc
= list_first_entry(&schan
->active
,
843 struct sirfsoc_dma_desc
,
845 save
->ctrl
[ch
] = readl_relaxed(sdma
->base
+
846 ch
* 0x10 + SIRFSOC_DMA_CH_CTRL
);
848 save
->interrupt_en
= readl_relaxed(sdma
->base
+ SIRFSOC_DMA_INT_EN
);
851 sirfsoc_dma_runtime_suspend(dev
);
856 static int sirfsoc_dma_pm_resume(struct device
*dev
)
858 struct sirfsoc_dma
*sdma
= dev_get_drvdata(dev
);
859 struct sirfsoc_dma_regs
*save
= &sdma
->regs_save
;
860 struct sirfsoc_dma_desc
*sdesc
;
861 struct sirfsoc_dma_chan
*schan
;
865 /* Enable clock before accessing register */
866 ret
= sirfsoc_dma_runtime_resume(dev
);
870 writel_relaxed(save
->interrupt_en
, sdma
->base
+ SIRFSOC_DMA_INT_EN
);
871 for (ch
= 0; ch
< SIRFSOC_DMA_CHANNELS
; ch
++) {
872 schan
= &sdma
->channels
[ch
];
873 if (list_empty(&schan
->active
))
875 sdesc
= list_first_entry(&schan
->active
,
876 struct sirfsoc_dma_desc
,
878 writel_relaxed(sdesc
->width
,
879 sdma
->base
+ SIRFSOC_DMA_WIDTH_0
+ ch
* 4);
880 writel_relaxed(sdesc
->xlen
,
881 sdma
->base
+ ch
* 0x10 + SIRFSOC_DMA_CH_XLEN
);
882 writel_relaxed(sdesc
->ylen
,
883 sdma
->base
+ ch
* 0x10 + SIRFSOC_DMA_CH_YLEN
);
884 writel_relaxed(save
->ctrl
[ch
],
885 sdma
->base
+ ch
* 0x10 + SIRFSOC_DMA_CH_CTRL
);
886 writel_relaxed(sdesc
->addr
>> 2,
887 sdma
->base
+ ch
* 0x10 + SIRFSOC_DMA_CH_ADDR
);
890 /* if we were runtime-suspended before, suspend again */
891 if (pm_runtime_status_suspended(dev
))
892 sirfsoc_dma_runtime_suspend(dev
);
897 static const struct dev_pm_ops sirfsoc_dma_pm_ops
= {
898 SET_RUNTIME_PM_OPS(sirfsoc_dma_runtime_suspend
, sirfsoc_dma_runtime_resume
, NULL
)
899 SET_SYSTEM_SLEEP_PM_OPS(sirfsoc_dma_pm_suspend
, sirfsoc_dma_pm_resume
)
902 static struct of_device_id sirfsoc_dma_match
[] = {
903 { .compatible
= "sirf,prima2-dmac", },
904 { .compatible
= "sirf,marco-dmac", },
908 static struct platform_driver sirfsoc_dma_driver
= {
909 .probe
= sirfsoc_dma_probe
,
910 .remove
= sirfsoc_dma_remove
,
913 .owner
= THIS_MODULE
,
914 .pm
= &sirfsoc_dma_pm_ops
,
915 .of_match_table
= sirfsoc_dma_match
,
919 static __init
int sirfsoc_dma_init(void)
921 return platform_driver_register(&sirfsoc_dma_driver
);
924 static void __exit
sirfsoc_dma_exit(void)
926 platform_driver_unregister(&sirfsoc_dma_driver
);
929 subsys_initcall(sirfsoc_dma_init
);
930 module_exit(sirfsoc_dma_exit
);
932 MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>, "
933 "Barry Song <baohua.song@csr.com>");
934 MODULE_DESCRIPTION("SIRFSOC DMA control driver");
935 MODULE_LICENSE("GPL v2");