2 * DMA controller driver for CSR SiRFprimaII
4 * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
6 * Licensed under GPLv2 or later.
9 #include <linux/module.h>
10 #include <linux/dmaengine.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/interrupt.h>
15 #include <linux/slab.h>
16 #include <linux/of_irq.h>
17 #include <linux/of_address.h>
18 #include <linux/of_device.h>
19 #include <linux/of_platform.h>
20 #include <linux/clk.h>
21 #include <linux/of_dma.h>
22 #include <linux/sirfsoc_dma.h>
24 #include "dmaengine.h"
26 #define SIRFSOC_DMA_DESCRIPTORS 16
27 #define SIRFSOC_DMA_CHANNELS 16
29 #define SIRFSOC_DMA_CH_ADDR 0x00
30 #define SIRFSOC_DMA_CH_XLEN 0x04
31 #define SIRFSOC_DMA_CH_YLEN 0x08
32 #define SIRFSOC_DMA_CH_CTRL 0x0C
34 #define SIRFSOC_DMA_WIDTH_0 0x100
35 #define SIRFSOC_DMA_CH_VALID 0x140
36 #define SIRFSOC_DMA_CH_INT 0x144
37 #define SIRFSOC_DMA_INT_EN 0x148
38 #define SIRFSOC_DMA_INT_EN_CLR 0x14C
39 #define SIRFSOC_DMA_CH_LOOP_CTRL 0x150
40 #define SIRFSOC_DMA_CH_LOOP_CTRL_CLR 0x15C
42 #define SIRFSOC_DMA_MODE_CTRL_BIT 4
43 #define SIRFSOC_DMA_DIR_CTRL_BIT 5
45 /* xlen and dma_width register is in 4 bytes boundary */
46 #define SIRFSOC_DMA_WORD_LEN 4
48 struct sirfsoc_dma_desc
{
49 struct dma_async_tx_descriptor desc
;
50 struct list_head node
;
52 /* SiRFprimaII 2D-DMA parameters */
54 int xlen
; /* DMA xlen */
55 int ylen
; /* DMA ylen */
56 int width
; /* DMA width */
58 bool cyclic
; /* is loop DMA? */
59 u32 addr
; /* DMA buffer address */
62 struct sirfsoc_dma_chan
{
64 struct list_head free
;
65 struct list_head prepared
;
66 struct list_head queued
;
67 struct list_head active
;
68 struct list_head completed
;
69 unsigned long happened_cyclic
;
70 unsigned long completed_cyclic
;
72 /* Lock for this structure */
78 struct sirfsoc_dma_regs
{
79 u32 ctrl
[SIRFSOC_DMA_CHANNELS
];
84 struct dma_device dma
;
85 struct tasklet_struct tasklet
;
86 struct sirfsoc_dma_chan channels
[SIRFSOC_DMA_CHANNELS
];
91 struct sirfsoc_dma_regs regs_save
;
94 #define DRV_NAME "sirfsoc_dma"
96 static int sirfsoc_dma_runtime_suspend(struct device
*dev
);
98 /* Convert struct dma_chan to struct sirfsoc_dma_chan */
100 struct sirfsoc_dma_chan
*dma_chan_to_sirfsoc_dma_chan(struct dma_chan
*c
)
102 return container_of(c
, struct sirfsoc_dma_chan
, chan
);
105 /* Convert struct dma_chan to struct sirfsoc_dma */
106 static inline struct sirfsoc_dma
*dma_chan_to_sirfsoc_dma(struct dma_chan
*c
)
108 struct sirfsoc_dma_chan
*schan
= dma_chan_to_sirfsoc_dma_chan(c
);
109 return container_of(schan
, struct sirfsoc_dma
, channels
[c
->chan_id
]);
112 /* Execute all queued DMA descriptors */
113 static void sirfsoc_dma_execute(struct sirfsoc_dma_chan
*schan
)
115 struct sirfsoc_dma
*sdma
= dma_chan_to_sirfsoc_dma(&schan
->chan
);
116 int cid
= schan
->chan
.chan_id
;
117 struct sirfsoc_dma_desc
*sdesc
= NULL
;
120 * lock has been held by functions calling this, so we don't hold
124 sdesc
= list_first_entry(&schan
->queued
, struct sirfsoc_dma_desc
,
126 /* Move the first queued descriptor to active list */
127 list_move_tail(&sdesc
->node
, &schan
->active
);
129 /* Start the DMA transfer */
130 writel_relaxed(sdesc
->width
, sdma
->base
+ SIRFSOC_DMA_WIDTH_0
+
132 writel_relaxed(cid
| (schan
->mode
<< SIRFSOC_DMA_MODE_CTRL_BIT
) |
133 (sdesc
->dir
<< SIRFSOC_DMA_DIR_CTRL_BIT
),
134 sdma
->base
+ cid
* 0x10 + SIRFSOC_DMA_CH_CTRL
);
135 writel_relaxed(sdesc
->xlen
, sdma
->base
+ cid
* 0x10 +
136 SIRFSOC_DMA_CH_XLEN
);
137 writel_relaxed(sdesc
->ylen
, sdma
->base
+ cid
* 0x10 +
138 SIRFSOC_DMA_CH_YLEN
);
139 writel_relaxed(readl_relaxed(sdma
->base
+ SIRFSOC_DMA_INT_EN
) |
140 (1 << cid
), sdma
->base
+ SIRFSOC_DMA_INT_EN
);
143 * writel has an implict memory write barrier to make sure data is
144 * flushed into memory before starting DMA
146 writel(sdesc
->addr
>> 2, sdma
->base
+ cid
* 0x10 + SIRFSOC_DMA_CH_ADDR
);
149 writel((1 << cid
) | 1 << (cid
+ 16) |
150 readl_relaxed(sdma
->base
+ SIRFSOC_DMA_CH_LOOP_CTRL
),
151 sdma
->base
+ SIRFSOC_DMA_CH_LOOP_CTRL
);
152 schan
->happened_cyclic
= schan
->completed_cyclic
= 0;
156 /* Interrupt handler */
157 static irqreturn_t
sirfsoc_dma_irq(int irq
, void *data
)
159 struct sirfsoc_dma
*sdma
= data
;
160 struct sirfsoc_dma_chan
*schan
;
161 struct sirfsoc_dma_desc
*sdesc
= NULL
;
165 is
= readl(sdma
->base
+ SIRFSOC_DMA_CH_INT
);
166 while ((ch
= fls(is
) - 1) >= 0) {
168 writel_relaxed(1 << ch
, sdma
->base
+ SIRFSOC_DMA_CH_INT
);
169 schan
= &sdma
->channels
[ch
];
171 spin_lock(&schan
->lock
);
173 sdesc
= list_first_entry(&schan
->active
, struct sirfsoc_dma_desc
,
175 if (!sdesc
->cyclic
) {
176 /* Execute queued descriptors */
177 list_splice_tail_init(&schan
->active
, &schan
->completed
);
178 if (!list_empty(&schan
->queued
))
179 sirfsoc_dma_execute(schan
);
181 schan
->happened_cyclic
++;
183 spin_unlock(&schan
->lock
);
186 /* Schedule tasklet */
187 tasklet_schedule(&sdma
->tasklet
);
192 /* process completed descriptors */
193 static void sirfsoc_dma_process_completed(struct sirfsoc_dma
*sdma
)
195 dma_cookie_t last_cookie
= 0;
196 struct sirfsoc_dma_chan
*schan
;
197 struct sirfsoc_dma_desc
*sdesc
;
198 struct dma_async_tx_descriptor
*desc
;
200 unsigned long happened_cyclic
;
204 for (i
= 0; i
< sdma
->dma
.chancnt
; i
++) {
205 schan
= &sdma
->channels
[i
];
207 /* Get all completed descriptors */
208 spin_lock_irqsave(&schan
->lock
, flags
);
209 if (!list_empty(&schan
->completed
)) {
210 list_splice_tail_init(&schan
->completed
, &list
);
211 spin_unlock_irqrestore(&schan
->lock
, flags
);
213 /* Execute callbacks and run dependencies */
214 list_for_each_entry(sdesc
, &list
, node
) {
218 desc
->callback(desc
->callback_param
);
220 last_cookie
= desc
->cookie
;
221 dma_run_dependencies(desc
);
224 /* Free descriptors */
225 spin_lock_irqsave(&schan
->lock
, flags
);
226 list_splice_tail_init(&list
, &schan
->free
);
227 schan
->chan
.completed_cookie
= last_cookie
;
228 spin_unlock_irqrestore(&schan
->lock
, flags
);
230 /* for cyclic channel, desc is always in active list */
231 sdesc
= list_first_entry(&schan
->active
, struct sirfsoc_dma_desc
,
234 if (!sdesc
|| (sdesc
&& !sdesc
->cyclic
)) {
235 /* without active cyclic DMA */
236 spin_unlock_irqrestore(&schan
->lock
, flags
);
241 happened_cyclic
= schan
->happened_cyclic
;
242 spin_unlock_irqrestore(&schan
->lock
, flags
);
245 while (happened_cyclic
!= schan
->completed_cyclic
) {
247 desc
->callback(desc
->callback_param
);
248 schan
->completed_cyclic
++;
255 static void sirfsoc_dma_tasklet(unsigned long data
)
257 struct sirfsoc_dma
*sdma
= (void *)data
;
259 sirfsoc_dma_process_completed(sdma
);
262 /* Submit descriptor to hardware */
263 static dma_cookie_t
sirfsoc_dma_tx_submit(struct dma_async_tx_descriptor
*txd
)
265 struct sirfsoc_dma_chan
*schan
= dma_chan_to_sirfsoc_dma_chan(txd
->chan
);
266 struct sirfsoc_dma_desc
*sdesc
;
270 sdesc
= container_of(txd
, struct sirfsoc_dma_desc
, desc
);
272 spin_lock_irqsave(&schan
->lock
, flags
);
274 /* Move descriptor to queue */
275 list_move_tail(&sdesc
->node
, &schan
->queued
);
277 cookie
= dma_cookie_assign(txd
);
279 spin_unlock_irqrestore(&schan
->lock
, flags
);
284 static int sirfsoc_dma_slave_config(struct dma_chan
*chan
,
285 struct dma_slave_config
*config
)
287 struct sirfsoc_dma_chan
*schan
= dma_chan_to_sirfsoc_dma_chan(chan
);
290 if ((config
->src_addr_width
!= DMA_SLAVE_BUSWIDTH_4_BYTES
) ||
291 (config
->dst_addr_width
!= DMA_SLAVE_BUSWIDTH_4_BYTES
))
294 spin_lock_irqsave(&schan
->lock
, flags
);
295 schan
->mode
= (config
->src_maxburst
== 4 ? 1 : 0);
296 spin_unlock_irqrestore(&schan
->lock
, flags
);
301 static int sirfsoc_dma_terminate_all(struct dma_chan
*chan
)
303 struct sirfsoc_dma_chan
*schan
= dma_chan_to_sirfsoc_dma_chan(chan
);
304 struct sirfsoc_dma
*sdma
= dma_chan_to_sirfsoc_dma(&schan
->chan
);
305 int cid
= schan
->chan
.chan_id
;
308 spin_lock_irqsave(&schan
->lock
, flags
);
310 if (!sdma
->is_marco
) {
311 writel_relaxed(readl_relaxed(sdma
->base
+ SIRFSOC_DMA_INT_EN
) &
312 ~(1 << cid
), sdma
->base
+ SIRFSOC_DMA_INT_EN
);
313 writel_relaxed(readl_relaxed(sdma
->base
+ SIRFSOC_DMA_CH_LOOP_CTRL
)
314 & ~((1 << cid
) | 1 << (cid
+ 16)),
315 sdma
->base
+ SIRFSOC_DMA_CH_LOOP_CTRL
);
317 writel_relaxed(1 << cid
, sdma
->base
+ SIRFSOC_DMA_INT_EN_CLR
);
318 writel_relaxed((1 << cid
) | 1 << (cid
+ 16),
319 sdma
->base
+ SIRFSOC_DMA_CH_LOOP_CTRL_CLR
);
322 writel_relaxed(1 << cid
, sdma
->base
+ SIRFSOC_DMA_CH_VALID
);
324 list_splice_tail_init(&schan
->active
, &schan
->free
);
325 list_splice_tail_init(&schan
->queued
, &schan
->free
);
327 spin_unlock_irqrestore(&schan
->lock
, flags
);
332 static int sirfsoc_dma_pause_chan(struct dma_chan
*chan
)
334 struct sirfsoc_dma_chan
*schan
= dma_chan_to_sirfsoc_dma_chan(chan
);
335 struct sirfsoc_dma
*sdma
= dma_chan_to_sirfsoc_dma(&schan
->chan
);
336 int cid
= schan
->chan
.chan_id
;
339 spin_lock_irqsave(&schan
->lock
, flags
);
342 writel_relaxed(readl_relaxed(sdma
->base
+ SIRFSOC_DMA_CH_LOOP_CTRL
)
343 & ~((1 << cid
) | 1 << (cid
+ 16)),
344 sdma
->base
+ SIRFSOC_DMA_CH_LOOP_CTRL
);
346 writel_relaxed((1 << cid
) | 1 << (cid
+ 16),
347 sdma
->base
+ SIRFSOC_DMA_CH_LOOP_CTRL_CLR
);
349 spin_unlock_irqrestore(&schan
->lock
, flags
);
354 static int sirfsoc_dma_resume_chan(struct dma_chan
*chan
)
356 struct sirfsoc_dma_chan
*schan
= dma_chan_to_sirfsoc_dma_chan(chan
);
357 struct sirfsoc_dma
*sdma
= dma_chan_to_sirfsoc_dma(&schan
->chan
);
358 int cid
= schan
->chan
.chan_id
;
361 spin_lock_irqsave(&schan
->lock
, flags
);
364 writel_relaxed(readl_relaxed(sdma
->base
+ SIRFSOC_DMA_CH_LOOP_CTRL
)
365 | ((1 << cid
) | 1 << (cid
+ 16)),
366 sdma
->base
+ SIRFSOC_DMA_CH_LOOP_CTRL
);
368 writel_relaxed((1 << cid
) | 1 << (cid
+ 16),
369 sdma
->base
+ SIRFSOC_DMA_CH_LOOP_CTRL
);
371 spin_unlock_irqrestore(&schan
->lock
, flags
);
376 /* Alloc channel resources */
377 static int sirfsoc_dma_alloc_chan_resources(struct dma_chan
*chan
)
379 struct sirfsoc_dma
*sdma
= dma_chan_to_sirfsoc_dma(chan
);
380 struct sirfsoc_dma_chan
*schan
= dma_chan_to_sirfsoc_dma_chan(chan
);
381 struct sirfsoc_dma_desc
*sdesc
;
386 pm_runtime_get_sync(sdma
->dma
.dev
);
388 /* Alloc descriptors for this channel */
389 for (i
= 0; i
< SIRFSOC_DMA_DESCRIPTORS
; i
++) {
390 sdesc
= kzalloc(sizeof(*sdesc
), GFP_KERNEL
);
392 dev_notice(sdma
->dma
.dev
, "Memory allocation error. "
393 "Allocated only %u descriptors\n", i
);
397 dma_async_tx_descriptor_init(&sdesc
->desc
, chan
);
398 sdesc
->desc
.flags
= DMA_CTRL_ACK
;
399 sdesc
->desc
.tx_submit
= sirfsoc_dma_tx_submit
;
401 list_add_tail(&sdesc
->node
, &descs
);
404 /* Return error only if no descriptors were allocated */
408 spin_lock_irqsave(&schan
->lock
, flags
);
410 list_splice_tail_init(&descs
, &schan
->free
);
411 spin_unlock_irqrestore(&schan
->lock
, flags
);
416 /* Free channel resources */
417 static void sirfsoc_dma_free_chan_resources(struct dma_chan
*chan
)
419 struct sirfsoc_dma_chan
*schan
= dma_chan_to_sirfsoc_dma_chan(chan
);
420 struct sirfsoc_dma
*sdma
= dma_chan_to_sirfsoc_dma(chan
);
421 struct sirfsoc_dma_desc
*sdesc
, *tmp
;
425 spin_lock_irqsave(&schan
->lock
, flags
);
427 /* Channel must be idle */
428 BUG_ON(!list_empty(&schan
->prepared
));
429 BUG_ON(!list_empty(&schan
->queued
));
430 BUG_ON(!list_empty(&schan
->active
));
431 BUG_ON(!list_empty(&schan
->completed
));
434 list_splice_tail_init(&schan
->free
, &descs
);
436 spin_unlock_irqrestore(&schan
->lock
, flags
);
438 /* Free descriptors */
439 list_for_each_entry_safe(sdesc
, tmp
, &descs
, node
)
442 pm_runtime_put(sdma
->dma
.dev
);
445 /* Send pending descriptor to hardware */
446 static void sirfsoc_dma_issue_pending(struct dma_chan
*chan
)
448 struct sirfsoc_dma_chan
*schan
= dma_chan_to_sirfsoc_dma_chan(chan
);
451 spin_lock_irqsave(&schan
->lock
, flags
);
453 if (list_empty(&schan
->active
) && !list_empty(&schan
->queued
))
454 sirfsoc_dma_execute(schan
);
456 spin_unlock_irqrestore(&schan
->lock
, flags
);
459 /* Check request completion status */
460 static enum dma_status
461 sirfsoc_dma_tx_status(struct dma_chan
*chan
, dma_cookie_t cookie
,
462 struct dma_tx_state
*txstate
)
464 struct sirfsoc_dma
*sdma
= dma_chan_to_sirfsoc_dma(chan
);
465 struct sirfsoc_dma_chan
*schan
= dma_chan_to_sirfsoc_dma_chan(chan
);
468 struct sirfsoc_dma_desc
*sdesc
;
469 int cid
= schan
->chan
.chan_id
;
470 unsigned long dma_pos
;
471 unsigned long dma_request_bytes
;
472 unsigned long residue
;
474 spin_lock_irqsave(&schan
->lock
, flags
);
476 sdesc
= list_first_entry(&schan
->active
, struct sirfsoc_dma_desc
,
478 dma_request_bytes
= (sdesc
->xlen
+ 1) * (sdesc
->ylen
+ 1) *
479 (sdesc
->width
* SIRFSOC_DMA_WORD_LEN
);
481 ret
= dma_cookie_status(chan
, cookie
, txstate
);
482 dma_pos
= readl_relaxed(sdma
->base
+ cid
* 0x10 + SIRFSOC_DMA_CH_ADDR
)
484 residue
= dma_request_bytes
- (dma_pos
- sdesc
->addr
);
485 dma_set_residue(txstate
, residue
);
487 spin_unlock_irqrestore(&schan
->lock
, flags
);
492 static struct dma_async_tx_descriptor
*sirfsoc_dma_prep_interleaved(
493 struct dma_chan
*chan
, struct dma_interleaved_template
*xt
,
496 struct sirfsoc_dma
*sdma
= dma_chan_to_sirfsoc_dma(chan
);
497 struct sirfsoc_dma_chan
*schan
= dma_chan_to_sirfsoc_dma_chan(chan
);
498 struct sirfsoc_dma_desc
*sdesc
= NULL
;
499 unsigned long iflags
;
502 if ((xt
->dir
!= DMA_MEM_TO_DEV
) && (xt
->dir
!= DMA_DEV_TO_MEM
)) {
507 /* Get free descriptor */
508 spin_lock_irqsave(&schan
->lock
, iflags
);
509 if (!list_empty(&schan
->free
)) {
510 sdesc
= list_first_entry(&schan
->free
, struct sirfsoc_dma_desc
,
512 list_del(&sdesc
->node
);
514 spin_unlock_irqrestore(&schan
->lock
, iflags
);
517 /* try to free completed descriptors */
518 sirfsoc_dma_process_completed(sdma
);
523 /* Place descriptor in prepared list */
524 spin_lock_irqsave(&schan
->lock
, iflags
);
527 * Number of chunks in a frame can only be 1 for prima2
528 * and ylen (number of frame - 1) must be at least 0
530 if ((xt
->frame_size
== 1) && (xt
->numf
> 0)) {
532 sdesc
->xlen
= xt
->sgl
[0].size
/ SIRFSOC_DMA_WORD_LEN
;
533 sdesc
->width
= (xt
->sgl
[0].size
+ xt
->sgl
[0].icg
) /
534 SIRFSOC_DMA_WORD_LEN
;
535 sdesc
->ylen
= xt
->numf
- 1;
536 if (xt
->dir
== DMA_MEM_TO_DEV
) {
537 sdesc
->addr
= xt
->src_start
;
540 sdesc
->addr
= xt
->dst_start
;
544 list_add_tail(&sdesc
->node
, &schan
->prepared
);
546 pr_err("sirfsoc DMA Invalid xfer\n");
550 spin_unlock_irqrestore(&schan
->lock
, iflags
);
554 spin_unlock_irqrestore(&schan
->lock
, iflags
);
560 static struct dma_async_tx_descriptor
*
561 sirfsoc_dma_prep_cyclic(struct dma_chan
*chan
, dma_addr_t addr
,
562 size_t buf_len
, size_t period_len
,
563 enum dma_transfer_direction direction
, unsigned long flags
)
565 struct sirfsoc_dma_chan
*schan
= dma_chan_to_sirfsoc_dma_chan(chan
);
566 struct sirfsoc_dma_desc
*sdesc
= NULL
;
567 unsigned long iflags
;
570 * we only support cycle transfer with 2 period
571 * If the X-length is set to 0, it would be the loop mode.
572 * The DMA address keeps increasing until reaching the end of a loop
573 * area whose size is defined by (DMA_WIDTH x (Y_LENGTH + 1)). Then
574 * the DMA address goes back to the beginning of this area.
575 * In loop mode, the DMA data region is divided into two parts, BUFA
576 * and BUFB. DMA controller generates interrupts twice in each loop:
577 * when the DMA address reaches the end of BUFA or the end of the
580 if (buf_len
!= 2 * period_len
)
581 return ERR_PTR(-EINVAL
);
583 /* Get free descriptor */
584 spin_lock_irqsave(&schan
->lock
, iflags
);
585 if (!list_empty(&schan
->free
)) {
586 sdesc
= list_first_entry(&schan
->free
, struct sirfsoc_dma_desc
,
588 list_del(&sdesc
->node
);
590 spin_unlock_irqrestore(&schan
->lock
, iflags
);
595 /* Place descriptor in prepared list */
596 spin_lock_irqsave(&schan
->lock
, iflags
);
600 sdesc
->ylen
= buf_len
/ SIRFSOC_DMA_WORD_LEN
- 1;
602 list_add_tail(&sdesc
->node
, &schan
->prepared
);
603 spin_unlock_irqrestore(&schan
->lock
, iflags
);
609 * The DMA controller consists of 16 independent DMA channels.
610 * Each channel is allocated to a different function
612 bool sirfsoc_dma_filter_id(struct dma_chan
*chan
, void *chan_id
)
614 unsigned int ch_nr
= (unsigned int) chan_id
;
616 if (ch_nr
== chan
->chan_id
+
617 chan
->device
->dev_id
* SIRFSOC_DMA_CHANNELS
)
622 EXPORT_SYMBOL(sirfsoc_dma_filter_id
);
624 #define SIRFSOC_DMA_BUSWIDTHS \
625 (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
626 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
627 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
628 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
629 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
631 static struct dma_chan
*of_dma_sirfsoc_xlate(struct of_phandle_args
*dma_spec
,
632 struct of_dma
*ofdma
)
634 struct sirfsoc_dma
*sdma
= ofdma
->of_dma_data
;
635 unsigned int request
= dma_spec
->args
[0];
637 if (request
>= SIRFSOC_DMA_CHANNELS
)
640 return dma_get_slave_channel(&sdma
->channels
[request
].chan
);
643 static int sirfsoc_dma_probe(struct platform_device
*op
)
645 struct device_node
*dn
= op
->dev
.of_node
;
646 struct device
*dev
= &op
->dev
;
647 struct dma_device
*dma
;
648 struct sirfsoc_dma
*sdma
;
649 struct sirfsoc_dma_chan
*schan
;
651 ulong regs_start
, regs_size
;
655 sdma
= devm_kzalloc(dev
, sizeof(*sdma
), GFP_KERNEL
);
657 dev_err(dev
, "Memory exhausted!\n");
661 if (of_device_is_compatible(dn
, "sirf,marco-dmac"))
662 sdma
->is_marco
= true;
664 if (of_property_read_u32(dn
, "cell-index", &id
)) {
665 dev_err(dev
, "Fail to get DMAC index\n");
669 sdma
->irq
= irq_of_parse_and_map(dn
, 0);
670 if (sdma
->irq
== NO_IRQ
) {
671 dev_err(dev
, "Error mapping IRQ!\n");
675 sdma
->clk
= devm_clk_get(dev
, NULL
);
676 if (IS_ERR(sdma
->clk
)) {
677 dev_err(dev
, "failed to get a clock.\n");
678 return PTR_ERR(sdma
->clk
);
681 ret
= of_address_to_resource(dn
, 0, &res
);
683 dev_err(dev
, "Error parsing memory region!\n");
687 regs_start
= res
.start
;
688 regs_size
= resource_size(&res
);
690 sdma
->base
= devm_ioremap(dev
, regs_start
, regs_size
);
692 dev_err(dev
, "Error mapping memory region!\n");
697 ret
= request_irq(sdma
->irq
, &sirfsoc_dma_irq
, 0, DRV_NAME
, sdma
);
699 dev_err(dev
, "Error requesting IRQ!\n");
707 dma
->device_alloc_chan_resources
= sirfsoc_dma_alloc_chan_resources
;
708 dma
->device_free_chan_resources
= sirfsoc_dma_free_chan_resources
;
709 dma
->device_issue_pending
= sirfsoc_dma_issue_pending
;
710 dma
->device_config
= sirfsoc_dma_slave_config
;
711 dma
->device_pause
= sirfsoc_dma_pause_chan
;
712 dma
->device_resume
= sirfsoc_dma_resume_chan
;
713 dma
->device_terminate_all
= sirfsoc_dma_terminate_all
;
714 dma
->device_tx_status
= sirfsoc_dma_tx_status
;
715 dma
->device_prep_interleaved_dma
= sirfsoc_dma_prep_interleaved
;
716 dma
->device_prep_dma_cyclic
= sirfsoc_dma_prep_cyclic
;
717 dma
->src_addr_widths
= SIRFSOC_DMA_BUSWIDTHS
;
718 dma
->dst_addr_widths
= SIRFSOC_DMA_BUSWIDTHS
;
719 dma
->directions
= BIT(DMA_DEV_TO_MEM
) | BIT(DMA_MEM_TO_DEV
);
721 INIT_LIST_HEAD(&dma
->channels
);
722 dma_cap_set(DMA_SLAVE
, dma
->cap_mask
);
723 dma_cap_set(DMA_CYCLIC
, dma
->cap_mask
);
724 dma_cap_set(DMA_INTERLEAVE
, dma
->cap_mask
);
725 dma_cap_set(DMA_PRIVATE
, dma
->cap_mask
);
727 for (i
= 0; i
< SIRFSOC_DMA_CHANNELS
; i
++) {
728 schan
= &sdma
->channels
[i
];
730 schan
->chan
.device
= dma
;
731 dma_cookie_init(&schan
->chan
);
733 INIT_LIST_HEAD(&schan
->free
);
734 INIT_LIST_HEAD(&schan
->prepared
);
735 INIT_LIST_HEAD(&schan
->queued
);
736 INIT_LIST_HEAD(&schan
->active
);
737 INIT_LIST_HEAD(&schan
->completed
);
739 spin_lock_init(&schan
->lock
);
740 list_add_tail(&schan
->chan
.device_node
, &dma
->channels
);
743 tasklet_init(&sdma
->tasklet
, sirfsoc_dma_tasklet
, (unsigned long)sdma
);
745 /* Register DMA engine */
746 dev_set_drvdata(dev
, sdma
);
748 ret
= dma_async_device_register(dma
);
752 /* Device-tree DMA controller registration */
753 ret
= of_dma_controller_register(dn
, of_dma_sirfsoc_xlate
, sdma
);
755 dev_err(dev
, "failed to register DMA controller\n");
759 pm_runtime_enable(&op
->dev
);
760 dev_info(dev
, "initialized SIRFSOC DMAC driver\n");
765 dma_async_device_unregister(dma
);
767 free_irq(sdma
->irq
, sdma
);
769 irq_dispose_mapping(sdma
->irq
);
773 static int sirfsoc_dma_remove(struct platform_device
*op
)
775 struct device
*dev
= &op
->dev
;
776 struct sirfsoc_dma
*sdma
= dev_get_drvdata(dev
);
778 of_dma_controller_free(op
->dev
.of_node
);
779 dma_async_device_unregister(&sdma
->dma
);
780 free_irq(sdma
->irq
, sdma
);
781 irq_dispose_mapping(sdma
->irq
);
782 pm_runtime_disable(&op
->dev
);
783 if (!pm_runtime_status_suspended(&op
->dev
))
784 sirfsoc_dma_runtime_suspend(&op
->dev
);
789 static int sirfsoc_dma_runtime_suspend(struct device
*dev
)
791 struct sirfsoc_dma
*sdma
= dev_get_drvdata(dev
);
793 clk_disable_unprepare(sdma
->clk
);
797 static int sirfsoc_dma_runtime_resume(struct device
*dev
)
799 struct sirfsoc_dma
*sdma
= dev_get_drvdata(dev
);
802 ret
= clk_prepare_enable(sdma
->clk
);
804 dev_err(dev
, "clk_enable failed: %d\n", ret
);
810 #ifdef CONFIG_PM_SLEEP
811 static int sirfsoc_dma_pm_suspend(struct device
*dev
)
813 struct sirfsoc_dma
*sdma
= dev_get_drvdata(dev
);
814 struct sirfsoc_dma_regs
*save
= &sdma
->regs_save
;
815 struct sirfsoc_dma_desc
*sdesc
;
816 struct sirfsoc_dma_chan
*schan
;
821 * if we were runtime-suspended before, resume to enable clock
822 * before accessing register
824 if (pm_runtime_status_suspended(dev
)) {
825 ret
= sirfsoc_dma_runtime_resume(dev
);
831 * DMA controller will lose all registers while suspending
832 * so we need to save registers for active channels
834 for (ch
= 0; ch
< SIRFSOC_DMA_CHANNELS
; ch
++) {
835 schan
= &sdma
->channels
[ch
];
836 if (list_empty(&schan
->active
))
838 sdesc
= list_first_entry(&schan
->active
,
839 struct sirfsoc_dma_desc
,
841 save
->ctrl
[ch
] = readl_relaxed(sdma
->base
+
842 ch
* 0x10 + SIRFSOC_DMA_CH_CTRL
);
844 save
->interrupt_en
= readl_relaxed(sdma
->base
+ SIRFSOC_DMA_INT_EN
);
847 sirfsoc_dma_runtime_suspend(dev
);
852 static int sirfsoc_dma_pm_resume(struct device
*dev
)
854 struct sirfsoc_dma
*sdma
= dev_get_drvdata(dev
);
855 struct sirfsoc_dma_regs
*save
= &sdma
->regs_save
;
856 struct sirfsoc_dma_desc
*sdesc
;
857 struct sirfsoc_dma_chan
*schan
;
861 /* Enable clock before accessing register */
862 ret
= sirfsoc_dma_runtime_resume(dev
);
866 writel_relaxed(save
->interrupt_en
, sdma
->base
+ SIRFSOC_DMA_INT_EN
);
867 for (ch
= 0; ch
< SIRFSOC_DMA_CHANNELS
; ch
++) {
868 schan
= &sdma
->channels
[ch
];
869 if (list_empty(&schan
->active
))
871 sdesc
= list_first_entry(&schan
->active
,
872 struct sirfsoc_dma_desc
,
874 writel_relaxed(sdesc
->width
,
875 sdma
->base
+ SIRFSOC_DMA_WIDTH_0
+ ch
* 4);
876 writel_relaxed(sdesc
->xlen
,
877 sdma
->base
+ ch
* 0x10 + SIRFSOC_DMA_CH_XLEN
);
878 writel_relaxed(sdesc
->ylen
,
879 sdma
->base
+ ch
* 0x10 + SIRFSOC_DMA_CH_YLEN
);
880 writel_relaxed(save
->ctrl
[ch
],
881 sdma
->base
+ ch
* 0x10 + SIRFSOC_DMA_CH_CTRL
);
882 writel_relaxed(sdesc
->addr
>> 2,
883 sdma
->base
+ ch
* 0x10 + SIRFSOC_DMA_CH_ADDR
);
886 /* if we were runtime-suspended before, suspend again */
887 if (pm_runtime_status_suspended(dev
))
888 sirfsoc_dma_runtime_suspend(dev
);
894 static const struct dev_pm_ops sirfsoc_dma_pm_ops
= {
895 SET_RUNTIME_PM_OPS(sirfsoc_dma_runtime_suspend
, sirfsoc_dma_runtime_resume
, NULL
)
896 SET_SYSTEM_SLEEP_PM_OPS(sirfsoc_dma_pm_suspend
, sirfsoc_dma_pm_resume
)
899 static const struct of_device_id sirfsoc_dma_match
[] = {
900 { .compatible
= "sirf,prima2-dmac", },
901 { .compatible
= "sirf,marco-dmac", },
905 static struct platform_driver sirfsoc_dma_driver
= {
906 .probe
= sirfsoc_dma_probe
,
907 .remove
= sirfsoc_dma_remove
,
910 .pm
= &sirfsoc_dma_pm_ops
,
911 .of_match_table
= sirfsoc_dma_match
,
915 static __init
int sirfsoc_dma_init(void)
917 return platform_driver_register(&sirfsoc_dma_driver
);
920 static void __exit
sirfsoc_dma_exit(void)
922 platform_driver_unregister(&sirfsoc_dma_driver
);
925 subsys_initcall(sirfsoc_dma_init
);
926 module_exit(sirfsoc_dma_exit
);
928 MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>, "
929 "Barry Song <baohua.song@csr.com>");
930 MODULE_DESCRIPTION("SIRFSOC DMA control driver");
931 MODULE_LICENSE("GPL v2");