2 * DMA controller driver for CSR SiRFprimaII
4 * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
6 * Licensed under GPLv2 or later.
9 #include <linux/module.h>
10 #include <linux/dmaengine.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/interrupt.h>
15 #include <linux/slab.h>
16 #include <linux/of_irq.h>
17 #include <linux/of_address.h>
18 #include <linux/of_device.h>
19 #include <linux/of_platform.h>
20 #include <linux/clk.h>
21 #include <linux/of_dma.h>
22 #include <linux/sirfsoc_dma.h>
24 #include "dmaengine.h"
26 #define SIRFSOC_DMA_VER_A7V1 1
27 #define SIRFSOC_DMA_VER_A7V2 2
28 #define SIRFSOC_DMA_VER_A6 4
30 #define SIRFSOC_DMA_DESCRIPTORS 16
31 #define SIRFSOC_DMA_CHANNELS 16
32 #define SIRFSOC_DMA_TABLE_NUM 256
34 #define SIRFSOC_DMA_CH_ADDR 0x00
35 #define SIRFSOC_DMA_CH_XLEN 0x04
36 #define SIRFSOC_DMA_CH_YLEN 0x08
37 #define SIRFSOC_DMA_CH_CTRL 0x0C
39 #define SIRFSOC_DMA_WIDTH_0 0x100
40 #define SIRFSOC_DMA_CH_VALID 0x140
41 #define SIRFSOC_DMA_CH_INT 0x144
42 #define SIRFSOC_DMA_INT_EN 0x148
43 #define SIRFSOC_DMA_INT_EN_CLR 0x14C
44 #define SIRFSOC_DMA_CH_LOOP_CTRL 0x150
45 #define SIRFSOC_DMA_CH_LOOP_CTRL_CLR 0x154
46 #define SIRFSOC_DMA_WIDTH_ATLAS7 0x10
47 #define SIRFSOC_DMA_VALID_ATLAS7 0x14
48 #define SIRFSOC_DMA_INT_ATLAS7 0x18
49 #define SIRFSOC_DMA_INT_EN_ATLAS7 0x1c
50 #define SIRFSOC_DMA_LOOP_CTRL_ATLAS7 0x20
51 #define SIRFSOC_DMA_CUR_DATA_ADDR 0x34
52 #define SIRFSOC_DMA_MUL_ATLAS7 0x38
53 #define SIRFSOC_DMA_CH_LOOP_CTRL_ATLAS7 0x158
54 #define SIRFSOC_DMA_CH_LOOP_CTRL_CLR_ATLAS7 0x15C
55 #define SIRFSOC_DMA_IOBG_SCMD_EN 0x800
56 #define SIRFSOC_DMA_EARLY_RESP_SET 0x818
57 #define SIRFSOC_DMA_EARLY_RESP_CLR 0x81C
59 #define SIRFSOC_DMA_MODE_CTRL_BIT 4
60 #define SIRFSOC_DMA_DIR_CTRL_BIT 5
61 #define SIRFSOC_DMA_MODE_CTRL_BIT_ATLAS7 2
62 #define SIRFSOC_DMA_CHAIN_CTRL_BIT_ATLAS7 3
63 #define SIRFSOC_DMA_DIR_CTRL_BIT_ATLAS7 4
64 #define SIRFSOC_DMA_TAB_NUM_ATLAS7 7
65 #define SIRFSOC_DMA_CHAIN_INT_BIT_ATLAS7 5
66 #define SIRFSOC_DMA_CHAIN_FLAG_SHIFT_ATLAS7 25
67 #define SIRFSOC_DMA_CHAIN_ADDR_SHIFT 32
69 #define SIRFSOC_DMA_INT_FINI_INT_ATLAS7 BIT(0)
70 #define SIRFSOC_DMA_INT_CNT_INT_ATLAS7 BIT(1)
71 #define SIRFSOC_DMA_INT_PAU_INT_ATLAS7 BIT(2)
72 #define SIRFSOC_DMA_INT_LOOP_INT_ATLAS7 BIT(3)
73 #define SIRFSOC_DMA_INT_INV_INT_ATLAS7 BIT(4)
74 #define SIRFSOC_DMA_INT_END_INT_ATLAS7 BIT(5)
75 #define SIRFSOC_DMA_INT_ALL_ATLAS7 0x3F
77 /* xlen and dma_width register is in 4 bytes boundary */
78 #define SIRFSOC_DMA_WORD_LEN 4
79 #define SIRFSOC_DMA_XLEN_MAX_V1 0x800
80 #define SIRFSOC_DMA_XLEN_MAX_V2 0x1000
82 struct sirfsoc_dma_desc
{
83 struct dma_async_tx_descriptor desc
;
84 struct list_head node
;
86 /* SiRFprimaII 2D-DMA parameters */
88 int xlen
; /* DMA xlen */
89 int ylen
; /* DMA ylen */
90 int width
; /* DMA width */
92 bool cyclic
; /* is loop DMA? */
93 bool chain
; /* is chain DMA? */
94 u32 addr
; /* DMA buffer address */
95 u64 chain_table
[SIRFSOC_DMA_TABLE_NUM
]; /* chain tbl */
98 struct sirfsoc_dma_chan
{
100 struct list_head free
;
101 struct list_head prepared
;
102 struct list_head queued
;
103 struct list_head active
;
104 struct list_head completed
;
105 unsigned long happened_cyclic
;
106 unsigned long completed_cyclic
;
108 /* Lock for this structure */
114 struct sirfsoc_dma_regs
{
115 u32 ctrl
[SIRFSOC_DMA_CHANNELS
];
120 struct dma_device dma
;
121 struct tasklet_struct tasklet
;
122 struct sirfsoc_dma_chan channels
[SIRFSOC_DMA_CHANNELS
];
127 void (*exec_desc
)(struct sirfsoc_dma_desc
*sdesc
,
128 int cid
, int burst_mode
, void __iomem
*base
);
129 struct sirfsoc_dma_regs regs_save
;
132 struct sirfsoc_dmadata
{
133 void (*exec
)(struct sirfsoc_dma_desc
*sdesc
,
134 int cid
, int burst_mode
, void __iomem
*base
);
138 enum sirfsoc_dma_chain_flag
{
139 SIRFSOC_DMA_CHAIN_NORMAL
= 0x01,
140 SIRFSOC_DMA_CHAIN_PAUSE
= 0x02,
141 SIRFSOC_DMA_CHAIN_LOOP
= 0x03,
142 SIRFSOC_DMA_CHAIN_END
= 0x04
145 #define DRV_NAME "sirfsoc_dma"
147 static int sirfsoc_dma_runtime_suspend(struct device
*dev
);
149 /* Convert struct dma_chan to struct sirfsoc_dma_chan */
151 struct sirfsoc_dma_chan
*dma_chan_to_sirfsoc_dma_chan(struct dma_chan
*c
)
153 return container_of(c
, struct sirfsoc_dma_chan
, chan
);
156 /* Convert struct dma_chan to struct sirfsoc_dma */
157 static inline struct sirfsoc_dma
*dma_chan_to_sirfsoc_dma(struct dma_chan
*c
)
159 struct sirfsoc_dma_chan
*schan
= dma_chan_to_sirfsoc_dma_chan(c
);
160 return container_of(schan
, struct sirfsoc_dma
, channels
[c
->chan_id
]);
163 static void sirfsoc_dma_execute_hw_a7v2(struct sirfsoc_dma_desc
*sdesc
,
164 int cid
, int burst_mode
, void __iomem
*base
)
167 /* DMA v2 HW chain mode */
168 writel_relaxed((sdesc
->dir
<< SIRFSOC_DMA_DIR_CTRL_BIT_ATLAS7
) |
170 SIRFSOC_DMA_CHAIN_CTRL_BIT_ATLAS7
) |
171 (0x8 << SIRFSOC_DMA_TAB_NUM_ATLAS7
) | 0x3,
172 base
+ SIRFSOC_DMA_CH_CTRL
);
174 /* DMA v2 legacy mode */
175 writel_relaxed(sdesc
->xlen
, base
+ SIRFSOC_DMA_CH_XLEN
);
176 writel_relaxed(sdesc
->ylen
, base
+ SIRFSOC_DMA_CH_YLEN
);
177 writel_relaxed(sdesc
->width
, base
+ SIRFSOC_DMA_WIDTH_ATLAS7
);
178 writel_relaxed((sdesc
->width
*((sdesc
->ylen
+1)>>1)),
179 base
+ SIRFSOC_DMA_MUL_ATLAS7
);
180 writel_relaxed((sdesc
->dir
<< SIRFSOC_DMA_DIR_CTRL_BIT_ATLAS7
) |
182 SIRFSOC_DMA_CHAIN_CTRL_BIT_ATLAS7
) |
183 0x3, base
+ SIRFSOC_DMA_CH_CTRL
);
185 writel_relaxed(sdesc
->chain
? SIRFSOC_DMA_INT_END_INT_ATLAS7
:
186 (SIRFSOC_DMA_INT_FINI_INT_ATLAS7
|
187 SIRFSOC_DMA_INT_LOOP_INT_ATLAS7
),
188 base
+ SIRFSOC_DMA_INT_EN_ATLAS7
);
189 writel(sdesc
->addr
, base
+ SIRFSOC_DMA_CH_ADDR
);
191 writel(0x10001, base
+ SIRFSOC_DMA_LOOP_CTRL_ATLAS7
);
194 static void sirfsoc_dma_execute_hw_a7v1(struct sirfsoc_dma_desc
*sdesc
,
195 int cid
, int burst_mode
, void __iomem
*base
)
197 writel_relaxed(1, base
+ SIRFSOC_DMA_IOBG_SCMD_EN
);
198 writel_relaxed((1 << cid
), base
+ SIRFSOC_DMA_EARLY_RESP_SET
);
199 writel_relaxed(sdesc
->width
, base
+ SIRFSOC_DMA_WIDTH_0
+ cid
* 4);
200 writel_relaxed(cid
| (burst_mode
<< SIRFSOC_DMA_MODE_CTRL_BIT
) |
201 (sdesc
->dir
<< SIRFSOC_DMA_DIR_CTRL_BIT
),
202 base
+ cid
* 0x10 + SIRFSOC_DMA_CH_CTRL
);
203 writel_relaxed(sdesc
->xlen
, base
+ cid
* 0x10 + SIRFSOC_DMA_CH_XLEN
);
204 writel_relaxed(sdesc
->ylen
, base
+ cid
* 0x10 + SIRFSOC_DMA_CH_YLEN
);
205 writel_relaxed(readl_relaxed(base
+ SIRFSOC_DMA_INT_EN
) |
206 (1 << cid
), base
+ SIRFSOC_DMA_INT_EN
);
207 writel(sdesc
->addr
>> 2, base
+ cid
* 0x10 + SIRFSOC_DMA_CH_ADDR
);
209 writel((1 << cid
) | 1 << (cid
+ 16) |
210 readl_relaxed(base
+ SIRFSOC_DMA_CH_LOOP_CTRL_ATLAS7
),
211 base
+ SIRFSOC_DMA_CH_LOOP_CTRL_ATLAS7
);
216 static void sirfsoc_dma_execute_hw_a6(struct sirfsoc_dma_desc
*sdesc
,
217 int cid
, int burst_mode
, void __iomem
*base
)
219 writel_relaxed(sdesc
->width
, base
+ SIRFSOC_DMA_WIDTH_0
+ cid
* 4);
220 writel_relaxed(cid
| (burst_mode
<< SIRFSOC_DMA_MODE_CTRL_BIT
) |
221 (sdesc
->dir
<< SIRFSOC_DMA_DIR_CTRL_BIT
),
222 base
+ cid
* 0x10 + SIRFSOC_DMA_CH_CTRL
);
223 writel_relaxed(sdesc
->xlen
, base
+ cid
* 0x10 + SIRFSOC_DMA_CH_XLEN
);
224 writel_relaxed(sdesc
->ylen
, base
+ cid
* 0x10 + SIRFSOC_DMA_CH_YLEN
);
225 writel_relaxed(readl_relaxed(base
+ SIRFSOC_DMA_INT_EN
) |
226 (1 << cid
), base
+ SIRFSOC_DMA_INT_EN
);
227 writel(sdesc
->addr
>> 2, base
+ cid
* 0x10 + SIRFSOC_DMA_CH_ADDR
);
229 writel((1 << cid
) | 1 << (cid
+ 16) |
230 readl_relaxed(base
+ SIRFSOC_DMA_CH_LOOP_CTRL
),
231 base
+ SIRFSOC_DMA_CH_LOOP_CTRL
);
236 /* Execute all queued DMA descriptors */
237 static void sirfsoc_dma_execute(struct sirfsoc_dma_chan
*schan
)
239 struct sirfsoc_dma
*sdma
= dma_chan_to_sirfsoc_dma(&schan
->chan
);
240 int cid
= schan
->chan
.chan_id
;
241 struct sirfsoc_dma_desc
*sdesc
= NULL
;
245 * lock has been held by functions calling this, so we don't hold
249 sdesc
= list_first_entry(&schan
->queued
, struct sirfsoc_dma_desc
,
251 /* Move the first queued descriptor to active list */
252 list_move_tail(&sdesc
->node
, &schan
->active
);
254 if (sdma
->type
== SIRFSOC_DMA_VER_A7V2
)
257 /* Start the DMA transfer */
258 sdma
->exec_desc(sdesc
, cid
, schan
->mode
, base
);
261 schan
->happened_cyclic
= schan
->completed_cyclic
= 0;
264 /* Interrupt handler */
265 static irqreturn_t
sirfsoc_dma_irq(int irq
, void *data
)
267 struct sirfsoc_dma
*sdma
= data
;
268 struct sirfsoc_dma_chan
*schan
;
269 struct sirfsoc_dma_desc
*sdesc
= NULL
;
275 switch (sdma
->type
) {
276 case SIRFSOC_DMA_VER_A6
:
277 case SIRFSOC_DMA_VER_A7V1
:
278 is
= readl(sdma
->base
+ SIRFSOC_DMA_CH_INT
);
279 reg
= sdma
->base
+ SIRFSOC_DMA_CH_INT
;
280 while ((ch
= fls(is
) - 1) >= 0) {
282 writel_relaxed(1 << ch
, reg
);
283 schan
= &sdma
->channels
[ch
];
284 spin_lock(&schan
->lock
);
285 sdesc
= list_first_entry(&schan
->active
,
286 struct sirfsoc_dma_desc
, node
);
287 if (!sdesc
->cyclic
) {
288 /* Execute queued descriptors */
289 list_splice_tail_init(&schan
->active
,
291 dma_cookie_complete(&sdesc
->desc
);
292 if (!list_empty(&schan
->queued
))
293 sirfsoc_dma_execute(schan
);
295 schan
->happened_cyclic
++;
296 spin_unlock(&schan
->lock
);
300 case SIRFSOC_DMA_VER_A7V2
:
301 is
= readl(sdma
->base
+ SIRFSOC_DMA_INT_ATLAS7
);
303 reg
= sdma
->base
+ SIRFSOC_DMA_INT_ATLAS7
;
304 writel_relaxed(SIRFSOC_DMA_INT_ALL_ATLAS7
, reg
);
305 schan
= &sdma
->channels
[0];
306 spin_lock(&schan
->lock
);
307 sdesc
= list_first_entry(&schan
->active
,
308 struct sirfsoc_dma_desc
, node
);
309 if (!sdesc
->cyclic
) {
310 chain
= sdesc
->chain
;
311 if ((chain
&& (is
& SIRFSOC_DMA_INT_END_INT_ATLAS7
)) ||
313 (is
& SIRFSOC_DMA_INT_FINI_INT_ATLAS7
))) {
314 /* Execute queued descriptors */
315 list_splice_tail_init(&schan
->active
,
317 dma_cookie_complete(&sdesc
->desc
);
318 if (!list_empty(&schan
->queued
))
319 sirfsoc_dma_execute(schan
);
321 } else if (sdesc
->cyclic
&& (is
&
322 SIRFSOC_DMA_INT_LOOP_INT_ATLAS7
))
323 schan
->happened_cyclic
++;
325 spin_unlock(&schan
->lock
);
332 /* Schedule tasklet */
333 tasklet_schedule(&sdma
->tasklet
);
338 /* process completed descriptors */
339 static void sirfsoc_dma_process_completed(struct sirfsoc_dma
*sdma
)
341 dma_cookie_t last_cookie
= 0;
342 struct sirfsoc_dma_chan
*schan
;
343 struct sirfsoc_dma_desc
*sdesc
;
344 struct dma_async_tx_descriptor
*desc
;
346 unsigned long happened_cyclic
;
350 for (i
= 0; i
< sdma
->dma
.chancnt
; i
++) {
351 schan
= &sdma
->channels
[i
];
353 /* Get all completed descriptors */
354 spin_lock_irqsave(&schan
->lock
, flags
);
355 if (!list_empty(&schan
->completed
)) {
356 list_splice_tail_init(&schan
->completed
, &list
);
357 spin_unlock_irqrestore(&schan
->lock
, flags
);
359 /* Execute callbacks and run dependencies */
360 list_for_each_entry(sdesc
, &list
, node
) {
363 dmaengine_desc_get_callback_invoke(desc
, NULL
);
364 last_cookie
= desc
->cookie
;
365 dma_run_dependencies(desc
);
368 /* Free descriptors */
369 spin_lock_irqsave(&schan
->lock
, flags
);
370 list_splice_tail_init(&list
, &schan
->free
);
371 schan
->chan
.completed_cookie
= last_cookie
;
372 spin_unlock_irqrestore(&schan
->lock
, flags
);
374 if (list_empty(&schan
->active
)) {
375 spin_unlock_irqrestore(&schan
->lock
, flags
);
379 /* for cyclic channel, desc is always in active list */
380 sdesc
= list_first_entry(&schan
->active
,
381 struct sirfsoc_dma_desc
, node
);
384 happened_cyclic
= schan
->happened_cyclic
;
385 spin_unlock_irqrestore(&schan
->lock
, flags
);
388 while (happened_cyclic
!= schan
->completed_cyclic
) {
389 dmaengine_desc_get_callback_invoke(desc
, NULL
);
390 schan
->completed_cyclic
++;
397 static void sirfsoc_dma_tasklet(unsigned long data
)
399 struct sirfsoc_dma
*sdma
= (void *)data
;
401 sirfsoc_dma_process_completed(sdma
);
404 /* Submit descriptor to hardware */
405 static dma_cookie_t
sirfsoc_dma_tx_submit(struct dma_async_tx_descriptor
*txd
)
407 struct sirfsoc_dma_chan
*schan
= dma_chan_to_sirfsoc_dma_chan(txd
->chan
);
408 struct sirfsoc_dma_desc
*sdesc
;
412 sdesc
= container_of(txd
, struct sirfsoc_dma_desc
, desc
);
414 spin_lock_irqsave(&schan
->lock
, flags
);
416 /* Move descriptor to queue */
417 list_move_tail(&sdesc
->node
, &schan
->queued
);
419 cookie
= dma_cookie_assign(txd
);
421 spin_unlock_irqrestore(&schan
->lock
, flags
);
426 static int sirfsoc_dma_slave_config(struct dma_chan
*chan
,
427 struct dma_slave_config
*config
)
429 struct sirfsoc_dma_chan
*schan
= dma_chan_to_sirfsoc_dma_chan(chan
);
432 if ((config
->src_addr_width
!= DMA_SLAVE_BUSWIDTH_4_BYTES
) ||
433 (config
->dst_addr_width
!= DMA_SLAVE_BUSWIDTH_4_BYTES
))
436 spin_lock_irqsave(&schan
->lock
, flags
);
437 schan
->mode
= (config
->src_maxburst
== 4 ? 1 : 0);
438 spin_unlock_irqrestore(&schan
->lock
, flags
);
443 static int sirfsoc_dma_terminate_all(struct dma_chan
*chan
)
445 struct sirfsoc_dma_chan
*schan
= dma_chan_to_sirfsoc_dma_chan(chan
);
446 struct sirfsoc_dma
*sdma
= dma_chan_to_sirfsoc_dma(&schan
->chan
);
447 int cid
= schan
->chan
.chan_id
;
450 spin_lock_irqsave(&schan
->lock
, flags
);
452 switch (sdma
->type
) {
453 case SIRFSOC_DMA_VER_A7V1
:
454 writel_relaxed(1 << cid
, sdma
->base
+ SIRFSOC_DMA_INT_EN_CLR
);
455 writel_relaxed(1 << cid
, sdma
->base
+ SIRFSOC_DMA_CH_INT
);
456 writel_relaxed((1 << cid
) | 1 << (cid
+ 16),
458 SIRFSOC_DMA_CH_LOOP_CTRL_CLR_ATLAS7
);
459 writel_relaxed(1 << cid
, sdma
->base
+ SIRFSOC_DMA_CH_VALID
);
461 case SIRFSOC_DMA_VER_A7V2
:
462 writel_relaxed(0, sdma
->base
+ SIRFSOC_DMA_INT_EN_ATLAS7
);
463 writel_relaxed(SIRFSOC_DMA_INT_ALL_ATLAS7
,
464 sdma
->base
+ SIRFSOC_DMA_INT_ATLAS7
);
465 writel_relaxed(0, sdma
->base
+ SIRFSOC_DMA_LOOP_CTRL_ATLAS7
);
466 writel_relaxed(0, sdma
->base
+ SIRFSOC_DMA_VALID_ATLAS7
);
468 case SIRFSOC_DMA_VER_A6
:
469 writel_relaxed(readl_relaxed(sdma
->base
+ SIRFSOC_DMA_INT_EN
) &
470 ~(1 << cid
), sdma
->base
+ SIRFSOC_DMA_INT_EN
);
471 writel_relaxed(readl_relaxed(sdma
->base
+
472 SIRFSOC_DMA_CH_LOOP_CTRL
) &
473 ~((1 << cid
) | 1 << (cid
+ 16)),
474 sdma
->base
+ SIRFSOC_DMA_CH_LOOP_CTRL
);
475 writel_relaxed(1 << cid
, sdma
->base
+ SIRFSOC_DMA_CH_VALID
);
481 list_splice_tail_init(&schan
->active
, &schan
->free
);
482 list_splice_tail_init(&schan
->queued
, &schan
->free
);
484 spin_unlock_irqrestore(&schan
->lock
, flags
);
489 static int sirfsoc_dma_pause_chan(struct dma_chan
*chan
)
491 struct sirfsoc_dma_chan
*schan
= dma_chan_to_sirfsoc_dma_chan(chan
);
492 struct sirfsoc_dma
*sdma
= dma_chan_to_sirfsoc_dma(&schan
->chan
);
493 int cid
= schan
->chan
.chan_id
;
496 spin_lock_irqsave(&schan
->lock
, flags
);
498 switch (sdma
->type
) {
499 case SIRFSOC_DMA_VER_A7V1
:
500 writel_relaxed((1 << cid
) | 1 << (cid
+ 16),
502 SIRFSOC_DMA_CH_LOOP_CTRL_CLR_ATLAS7
);
504 case SIRFSOC_DMA_VER_A7V2
:
505 writel_relaxed(0, sdma
->base
+ SIRFSOC_DMA_LOOP_CTRL_ATLAS7
);
507 case SIRFSOC_DMA_VER_A6
:
508 writel_relaxed(readl_relaxed(sdma
->base
+
509 SIRFSOC_DMA_CH_LOOP_CTRL
) &
510 ~((1 << cid
) | 1 << (cid
+ 16)),
511 sdma
->base
+ SIRFSOC_DMA_CH_LOOP_CTRL
);
518 spin_unlock_irqrestore(&schan
->lock
, flags
);
523 static int sirfsoc_dma_resume_chan(struct dma_chan
*chan
)
525 struct sirfsoc_dma_chan
*schan
= dma_chan_to_sirfsoc_dma_chan(chan
);
526 struct sirfsoc_dma
*sdma
= dma_chan_to_sirfsoc_dma(&schan
->chan
);
527 int cid
= schan
->chan
.chan_id
;
530 spin_lock_irqsave(&schan
->lock
, flags
);
531 switch (sdma
->type
) {
532 case SIRFSOC_DMA_VER_A7V1
:
533 writel_relaxed((1 << cid
) | 1 << (cid
+ 16),
534 sdma
->base
+ SIRFSOC_DMA_CH_LOOP_CTRL_ATLAS7
);
536 case SIRFSOC_DMA_VER_A7V2
:
537 writel_relaxed(0x10001,
538 sdma
->base
+ SIRFSOC_DMA_LOOP_CTRL_ATLAS7
);
540 case SIRFSOC_DMA_VER_A6
:
541 writel_relaxed(readl_relaxed(sdma
->base
+
542 SIRFSOC_DMA_CH_LOOP_CTRL
) |
543 ((1 << cid
) | 1 << (cid
+ 16)),
544 sdma
->base
+ SIRFSOC_DMA_CH_LOOP_CTRL
);
551 spin_unlock_irqrestore(&schan
->lock
, flags
);
556 /* Alloc channel resources */
557 static int sirfsoc_dma_alloc_chan_resources(struct dma_chan
*chan
)
559 struct sirfsoc_dma
*sdma
= dma_chan_to_sirfsoc_dma(chan
);
560 struct sirfsoc_dma_chan
*schan
= dma_chan_to_sirfsoc_dma_chan(chan
);
561 struct sirfsoc_dma_desc
*sdesc
;
566 pm_runtime_get_sync(sdma
->dma
.dev
);
568 /* Alloc descriptors for this channel */
569 for (i
= 0; i
< SIRFSOC_DMA_DESCRIPTORS
; i
++) {
570 sdesc
= kzalloc(sizeof(*sdesc
), GFP_KERNEL
);
572 dev_notice(sdma
->dma
.dev
, "Memory allocation error. "
573 "Allocated only %u descriptors\n", i
);
577 dma_async_tx_descriptor_init(&sdesc
->desc
, chan
);
578 sdesc
->desc
.flags
= DMA_CTRL_ACK
;
579 sdesc
->desc
.tx_submit
= sirfsoc_dma_tx_submit
;
581 list_add_tail(&sdesc
->node
, &descs
);
584 /* Return error only if no descriptors were allocated */
588 spin_lock_irqsave(&schan
->lock
, flags
);
590 list_splice_tail_init(&descs
, &schan
->free
);
591 spin_unlock_irqrestore(&schan
->lock
, flags
);
596 /* Free channel resources */
597 static void sirfsoc_dma_free_chan_resources(struct dma_chan
*chan
)
599 struct sirfsoc_dma_chan
*schan
= dma_chan_to_sirfsoc_dma_chan(chan
);
600 struct sirfsoc_dma
*sdma
= dma_chan_to_sirfsoc_dma(chan
);
601 struct sirfsoc_dma_desc
*sdesc
, *tmp
;
605 spin_lock_irqsave(&schan
->lock
, flags
);
607 /* Channel must be idle */
608 BUG_ON(!list_empty(&schan
->prepared
));
609 BUG_ON(!list_empty(&schan
->queued
));
610 BUG_ON(!list_empty(&schan
->active
));
611 BUG_ON(!list_empty(&schan
->completed
));
614 list_splice_tail_init(&schan
->free
, &descs
);
616 spin_unlock_irqrestore(&schan
->lock
, flags
);
618 /* Free descriptors */
619 list_for_each_entry_safe(sdesc
, tmp
, &descs
, node
)
622 pm_runtime_put(sdma
->dma
.dev
);
625 /* Send pending descriptor to hardware */
626 static void sirfsoc_dma_issue_pending(struct dma_chan
*chan
)
628 struct sirfsoc_dma_chan
*schan
= dma_chan_to_sirfsoc_dma_chan(chan
);
631 spin_lock_irqsave(&schan
->lock
, flags
);
633 if (list_empty(&schan
->active
) && !list_empty(&schan
->queued
))
634 sirfsoc_dma_execute(schan
);
636 spin_unlock_irqrestore(&schan
->lock
, flags
);
639 /* Check request completion status */
640 static enum dma_status
641 sirfsoc_dma_tx_status(struct dma_chan
*chan
, dma_cookie_t cookie
,
642 struct dma_tx_state
*txstate
)
644 struct sirfsoc_dma
*sdma
= dma_chan_to_sirfsoc_dma(chan
);
645 struct sirfsoc_dma_chan
*schan
= dma_chan_to_sirfsoc_dma_chan(chan
);
648 struct sirfsoc_dma_desc
*sdesc
;
649 int cid
= schan
->chan
.chan_id
;
650 unsigned long dma_pos
;
651 unsigned long dma_request_bytes
;
652 unsigned long residue
;
654 spin_lock_irqsave(&schan
->lock
, flags
);
656 if (list_empty(&schan
->active
)) {
657 ret
= dma_cookie_status(chan
, cookie
, txstate
);
658 dma_set_residue(txstate
, 0);
659 spin_unlock_irqrestore(&schan
->lock
, flags
);
662 sdesc
= list_first_entry(&schan
->active
, struct sirfsoc_dma_desc
, node
);
664 dma_request_bytes
= (sdesc
->xlen
+ 1) * (sdesc
->ylen
+ 1) *
665 (sdesc
->width
* SIRFSOC_DMA_WORD_LEN
);
667 dma_request_bytes
= sdesc
->xlen
* SIRFSOC_DMA_WORD_LEN
;
669 ret
= dma_cookie_status(chan
, cookie
, txstate
);
671 if (sdma
->type
== SIRFSOC_DMA_VER_A7V2
)
674 if (sdma
->type
== SIRFSOC_DMA_VER_A7V2
) {
675 dma_pos
= readl_relaxed(sdma
->base
+ SIRFSOC_DMA_CUR_DATA_ADDR
);
677 dma_pos
= readl_relaxed(
678 sdma
->base
+ cid
* 0x10 + SIRFSOC_DMA_CH_ADDR
) << 2;
681 residue
= dma_request_bytes
- (dma_pos
- sdesc
->addr
);
682 dma_set_residue(txstate
, residue
);
684 spin_unlock_irqrestore(&schan
->lock
, flags
);
689 static struct dma_async_tx_descriptor
*sirfsoc_dma_prep_interleaved(
690 struct dma_chan
*chan
, struct dma_interleaved_template
*xt
,
693 struct sirfsoc_dma
*sdma
= dma_chan_to_sirfsoc_dma(chan
);
694 struct sirfsoc_dma_chan
*schan
= dma_chan_to_sirfsoc_dma_chan(chan
);
695 struct sirfsoc_dma_desc
*sdesc
= NULL
;
696 unsigned long iflags
;
699 if ((xt
->dir
!= DMA_MEM_TO_DEV
) && (xt
->dir
!= DMA_DEV_TO_MEM
)) {
704 /* Get free descriptor */
705 spin_lock_irqsave(&schan
->lock
, iflags
);
706 if (!list_empty(&schan
->free
)) {
707 sdesc
= list_first_entry(&schan
->free
, struct sirfsoc_dma_desc
,
709 list_del(&sdesc
->node
);
711 spin_unlock_irqrestore(&schan
->lock
, iflags
);
714 /* try to free completed descriptors */
715 sirfsoc_dma_process_completed(sdma
);
720 /* Place descriptor in prepared list */
721 spin_lock_irqsave(&schan
->lock
, iflags
);
724 * Number of chunks in a frame can only be 1 for prima2
725 * and ylen (number of frame - 1) must be at least 0
727 if ((xt
->frame_size
== 1) && (xt
->numf
> 0)) {
729 sdesc
->xlen
= xt
->sgl
[0].size
/ SIRFSOC_DMA_WORD_LEN
;
730 sdesc
->width
= (xt
->sgl
[0].size
+ xt
->sgl
[0].icg
) /
731 SIRFSOC_DMA_WORD_LEN
;
732 sdesc
->ylen
= xt
->numf
- 1;
733 if (xt
->dir
== DMA_MEM_TO_DEV
) {
734 sdesc
->addr
= xt
->src_start
;
737 sdesc
->addr
= xt
->dst_start
;
741 list_add_tail(&sdesc
->node
, &schan
->prepared
);
743 pr_err("sirfsoc DMA Invalid xfer\n");
747 spin_unlock_irqrestore(&schan
->lock
, iflags
);
751 spin_unlock_irqrestore(&schan
->lock
, iflags
);
757 static struct dma_async_tx_descriptor
*
758 sirfsoc_dma_prep_cyclic(struct dma_chan
*chan
, dma_addr_t addr
,
759 size_t buf_len
, size_t period_len
,
760 enum dma_transfer_direction direction
, unsigned long flags
)
762 struct sirfsoc_dma_chan
*schan
= dma_chan_to_sirfsoc_dma_chan(chan
);
763 struct sirfsoc_dma_desc
*sdesc
= NULL
;
764 unsigned long iflags
;
767 * we only support cycle transfer with 2 period
768 * If the X-length is set to 0, it would be the loop mode.
769 * The DMA address keeps increasing until reaching the end of a loop
770 * area whose size is defined by (DMA_WIDTH x (Y_LENGTH + 1)). Then
771 * the DMA address goes back to the beginning of this area.
772 * In loop mode, the DMA data region is divided into two parts, BUFA
773 * and BUFB. DMA controller generates interrupts twice in each loop:
774 * when the DMA address reaches the end of BUFA or the end of the
777 if (buf_len
!= 2 * period_len
)
778 return ERR_PTR(-EINVAL
);
780 /* Get free descriptor */
781 spin_lock_irqsave(&schan
->lock
, iflags
);
782 if (!list_empty(&schan
->free
)) {
783 sdesc
= list_first_entry(&schan
->free
, struct sirfsoc_dma_desc
,
785 list_del(&sdesc
->node
);
787 spin_unlock_irqrestore(&schan
->lock
, iflags
);
792 /* Place descriptor in prepared list */
793 spin_lock_irqsave(&schan
->lock
, iflags
);
797 sdesc
->ylen
= buf_len
/ SIRFSOC_DMA_WORD_LEN
- 1;
799 list_add_tail(&sdesc
->node
, &schan
->prepared
);
800 spin_unlock_irqrestore(&schan
->lock
, iflags
);
806 * The DMA controller consists of 16 independent DMA channels.
807 * Each channel is allocated to a different function
809 bool sirfsoc_dma_filter_id(struct dma_chan
*chan
, void *chan_id
)
811 unsigned int ch_nr
= (unsigned int) chan_id
;
813 if (ch_nr
== chan
->chan_id
+
814 chan
->device
->dev_id
* SIRFSOC_DMA_CHANNELS
)
819 EXPORT_SYMBOL(sirfsoc_dma_filter_id
);
821 #define SIRFSOC_DMA_BUSWIDTHS \
822 (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
823 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
824 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
825 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
826 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
828 static struct dma_chan
*of_dma_sirfsoc_xlate(struct of_phandle_args
*dma_spec
,
829 struct of_dma
*ofdma
)
831 struct sirfsoc_dma
*sdma
= ofdma
->of_dma_data
;
832 unsigned int request
= dma_spec
->args
[0];
834 if (request
>= SIRFSOC_DMA_CHANNELS
)
837 return dma_get_slave_channel(&sdma
->channels
[request
].chan
);
840 static int sirfsoc_dma_probe(struct platform_device
*op
)
842 struct device_node
*dn
= op
->dev
.of_node
;
843 struct device
*dev
= &op
->dev
;
844 struct dma_device
*dma
;
845 struct sirfsoc_dma
*sdma
;
846 struct sirfsoc_dma_chan
*schan
;
847 struct sirfsoc_dmadata
*data
;
849 ulong regs_start
, regs_size
;
853 sdma
= devm_kzalloc(dev
, sizeof(*sdma
), GFP_KERNEL
);
857 data
= (struct sirfsoc_dmadata
*)
858 (of_match_device(op
->dev
.driver
->of_match_table
,
860 sdma
->exec_desc
= data
->exec
;
861 sdma
->type
= data
->type
;
863 if (of_property_read_u32(dn
, "cell-index", &id
)) {
864 dev_err(dev
, "Fail to get DMAC index\n");
868 sdma
->irq
= irq_of_parse_and_map(dn
, 0);
870 dev_err(dev
, "Error mapping IRQ!\n");
874 sdma
->clk
= devm_clk_get(dev
, NULL
);
875 if (IS_ERR(sdma
->clk
)) {
876 dev_err(dev
, "failed to get a clock.\n");
877 return PTR_ERR(sdma
->clk
);
880 ret
= of_address_to_resource(dn
, 0, &res
);
882 dev_err(dev
, "Error parsing memory region!\n");
886 regs_start
= res
.start
;
887 regs_size
= resource_size(&res
);
889 sdma
->base
= devm_ioremap(dev
, regs_start
, regs_size
);
891 dev_err(dev
, "Error mapping memory region!\n");
896 ret
= request_irq(sdma
->irq
, &sirfsoc_dma_irq
, 0, DRV_NAME
, sdma
);
898 dev_err(dev
, "Error requesting IRQ!\n");
906 dma
->device_alloc_chan_resources
= sirfsoc_dma_alloc_chan_resources
;
907 dma
->device_free_chan_resources
= sirfsoc_dma_free_chan_resources
;
908 dma
->device_issue_pending
= sirfsoc_dma_issue_pending
;
909 dma
->device_config
= sirfsoc_dma_slave_config
;
910 dma
->device_pause
= sirfsoc_dma_pause_chan
;
911 dma
->device_resume
= sirfsoc_dma_resume_chan
;
912 dma
->device_terminate_all
= sirfsoc_dma_terminate_all
;
913 dma
->device_tx_status
= sirfsoc_dma_tx_status
;
914 dma
->device_prep_interleaved_dma
= sirfsoc_dma_prep_interleaved
;
915 dma
->device_prep_dma_cyclic
= sirfsoc_dma_prep_cyclic
;
916 dma
->src_addr_widths
= SIRFSOC_DMA_BUSWIDTHS
;
917 dma
->dst_addr_widths
= SIRFSOC_DMA_BUSWIDTHS
;
918 dma
->directions
= BIT(DMA_DEV_TO_MEM
) | BIT(DMA_MEM_TO_DEV
);
920 INIT_LIST_HEAD(&dma
->channels
);
921 dma_cap_set(DMA_SLAVE
, dma
->cap_mask
);
922 dma_cap_set(DMA_CYCLIC
, dma
->cap_mask
);
923 dma_cap_set(DMA_INTERLEAVE
, dma
->cap_mask
);
924 dma_cap_set(DMA_PRIVATE
, dma
->cap_mask
);
926 for (i
= 0; i
< SIRFSOC_DMA_CHANNELS
; i
++) {
927 schan
= &sdma
->channels
[i
];
929 schan
->chan
.device
= dma
;
930 dma_cookie_init(&schan
->chan
);
932 INIT_LIST_HEAD(&schan
->free
);
933 INIT_LIST_HEAD(&schan
->prepared
);
934 INIT_LIST_HEAD(&schan
->queued
);
935 INIT_LIST_HEAD(&schan
->active
);
936 INIT_LIST_HEAD(&schan
->completed
);
938 spin_lock_init(&schan
->lock
);
939 list_add_tail(&schan
->chan
.device_node
, &dma
->channels
);
942 tasklet_init(&sdma
->tasklet
, sirfsoc_dma_tasklet
, (unsigned long)sdma
);
944 /* Register DMA engine */
945 dev_set_drvdata(dev
, sdma
);
947 ret
= dma_async_device_register(dma
);
951 /* Device-tree DMA controller registration */
952 ret
= of_dma_controller_register(dn
, of_dma_sirfsoc_xlate
, sdma
);
954 dev_err(dev
, "failed to register DMA controller\n");
958 pm_runtime_enable(&op
->dev
);
959 dev_info(dev
, "initialized SIRFSOC DMAC driver\n");
964 dma_async_device_unregister(dma
);
966 free_irq(sdma
->irq
, sdma
);
968 irq_dispose_mapping(sdma
->irq
);
972 static int sirfsoc_dma_remove(struct platform_device
*op
)
974 struct device
*dev
= &op
->dev
;
975 struct sirfsoc_dma
*sdma
= dev_get_drvdata(dev
);
977 of_dma_controller_free(op
->dev
.of_node
);
978 dma_async_device_unregister(&sdma
->dma
);
979 free_irq(sdma
->irq
, sdma
);
980 tasklet_kill(&sdma
->tasklet
);
981 irq_dispose_mapping(sdma
->irq
);
982 pm_runtime_disable(&op
->dev
);
983 if (!pm_runtime_status_suspended(&op
->dev
))
984 sirfsoc_dma_runtime_suspend(&op
->dev
);
989 static int __maybe_unused
sirfsoc_dma_runtime_suspend(struct device
*dev
)
991 struct sirfsoc_dma
*sdma
= dev_get_drvdata(dev
);
993 clk_disable_unprepare(sdma
->clk
);
997 static int __maybe_unused
sirfsoc_dma_runtime_resume(struct device
*dev
)
999 struct sirfsoc_dma
*sdma
= dev_get_drvdata(dev
);
1002 ret
= clk_prepare_enable(sdma
->clk
);
1004 dev_err(dev
, "clk_enable failed: %d\n", ret
);
1010 static int __maybe_unused
sirfsoc_dma_pm_suspend(struct device
*dev
)
1012 struct sirfsoc_dma
*sdma
= dev_get_drvdata(dev
);
1013 struct sirfsoc_dma_regs
*save
= &sdma
->regs_save
;
1014 struct sirfsoc_dma_chan
*schan
;
1021 * if we were runtime-suspended before, resume to enable clock
1022 * before accessing register
1024 if (pm_runtime_status_suspended(dev
)) {
1025 ret
= sirfsoc_dma_runtime_resume(dev
);
1030 if (sdma
->type
== SIRFSOC_DMA_VER_A7V2
) {
1032 int_offset
= SIRFSOC_DMA_INT_EN_ATLAS7
;
1034 count
= SIRFSOC_DMA_CHANNELS
;
1035 int_offset
= SIRFSOC_DMA_INT_EN
;
1039 * DMA controller will lose all registers while suspending
1040 * so we need to save registers for active channels
1042 for (ch
= 0; ch
< count
; ch
++) {
1043 schan
= &sdma
->channels
[ch
];
1044 if (list_empty(&schan
->active
))
1046 save
->ctrl
[ch
] = readl_relaxed(sdma
->base
+
1047 ch
* 0x10 + SIRFSOC_DMA_CH_CTRL
);
1049 save
->interrupt_en
= readl_relaxed(sdma
->base
+ int_offset
);
1052 sirfsoc_dma_runtime_suspend(dev
);
1057 static int __maybe_unused
sirfsoc_dma_pm_resume(struct device
*dev
)
1059 struct sirfsoc_dma
*sdma
= dev_get_drvdata(dev
);
1060 struct sirfsoc_dma_regs
*save
= &sdma
->regs_save
;
1061 struct sirfsoc_dma_desc
*sdesc
;
1062 struct sirfsoc_dma_chan
*schan
;
1069 /* Enable clock before accessing register */
1070 ret
= sirfsoc_dma_runtime_resume(dev
);
1074 if (sdma
->type
== SIRFSOC_DMA_VER_A7V2
) {
1076 int_offset
= SIRFSOC_DMA_INT_EN_ATLAS7
;
1077 width_offset
= SIRFSOC_DMA_WIDTH_ATLAS7
;
1079 count
= SIRFSOC_DMA_CHANNELS
;
1080 int_offset
= SIRFSOC_DMA_INT_EN
;
1081 width_offset
= SIRFSOC_DMA_WIDTH_0
;
1084 writel_relaxed(save
->interrupt_en
, sdma
->base
+ int_offset
);
1085 for (ch
= 0; ch
< count
; ch
++) {
1086 schan
= &sdma
->channels
[ch
];
1087 if (list_empty(&schan
->active
))
1089 sdesc
= list_first_entry(&schan
->active
,
1090 struct sirfsoc_dma_desc
,
1092 writel_relaxed(sdesc
->width
,
1093 sdma
->base
+ width_offset
+ ch
* 4);
1094 writel_relaxed(sdesc
->xlen
,
1095 sdma
->base
+ ch
* 0x10 + SIRFSOC_DMA_CH_XLEN
);
1096 writel_relaxed(sdesc
->ylen
,
1097 sdma
->base
+ ch
* 0x10 + SIRFSOC_DMA_CH_YLEN
);
1098 writel_relaxed(save
->ctrl
[ch
],
1099 sdma
->base
+ ch
* 0x10 + SIRFSOC_DMA_CH_CTRL
);
1100 if (sdma
->type
== SIRFSOC_DMA_VER_A7V2
) {
1101 writel_relaxed(sdesc
->addr
,
1102 sdma
->base
+ SIRFSOC_DMA_CH_ADDR
);
1104 writel_relaxed(sdesc
->addr
>> 2,
1105 sdma
->base
+ ch
* 0x10 + SIRFSOC_DMA_CH_ADDR
);
1110 /* if we were runtime-suspended before, suspend again */
1111 if (pm_runtime_status_suspended(dev
))
1112 sirfsoc_dma_runtime_suspend(dev
);
1117 static const struct dev_pm_ops sirfsoc_dma_pm_ops
= {
1118 SET_RUNTIME_PM_OPS(sirfsoc_dma_runtime_suspend
, sirfsoc_dma_runtime_resume
, NULL
)
1119 SET_SYSTEM_SLEEP_PM_OPS(sirfsoc_dma_pm_suspend
, sirfsoc_dma_pm_resume
)
1122 static struct sirfsoc_dmadata sirfsoc_dmadata_a6
= {
1123 .exec
= sirfsoc_dma_execute_hw_a6
,
1124 .type
= SIRFSOC_DMA_VER_A6
,
1127 static struct sirfsoc_dmadata sirfsoc_dmadata_a7v1
= {
1128 .exec
= sirfsoc_dma_execute_hw_a7v1
,
1129 .type
= SIRFSOC_DMA_VER_A7V1
,
1132 static struct sirfsoc_dmadata sirfsoc_dmadata_a7v2
= {
1133 .exec
= sirfsoc_dma_execute_hw_a7v2
,
1134 .type
= SIRFSOC_DMA_VER_A7V2
,
1137 static const struct of_device_id sirfsoc_dma_match
[] = {
1138 { .compatible
= "sirf,prima2-dmac", .data
= &sirfsoc_dmadata_a6
,},
1139 { .compatible
= "sirf,atlas7-dmac", .data
= &sirfsoc_dmadata_a7v1
,},
1140 { .compatible
= "sirf,atlas7-dmac-v2", .data
= &sirfsoc_dmadata_a7v2
,},
1143 MODULE_DEVICE_TABLE(of
, sirfsoc_dma_match
);
1145 static struct platform_driver sirfsoc_dma_driver
= {
1146 .probe
= sirfsoc_dma_probe
,
1147 .remove
= sirfsoc_dma_remove
,
1150 .pm
= &sirfsoc_dma_pm_ops
,
1151 .of_match_table
= sirfsoc_dma_match
,
1155 static __init
int sirfsoc_dma_init(void)
1157 return platform_driver_register(&sirfsoc_dma_driver
);
1160 static void __exit
sirfsoc_dma_exit(void)
1162 platform_driver_unregister(&sirfsoc_dma_driver
);
1165 subsys_initcall(sirfsoc_dma_init
);
1166 module_exit(sirfsoc_dma_exit
);
1168 MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>");
1169 MODULE_AUTHOR("Barry Song <baohua.song@csr.com>");
1170 MODULE_DESCRIPTION("SIRFSOC DMA control driver");
1171 MODULE_LICENSE("GPL v2");