treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / dma / sirf-dma.c
blob30064689d67fbb154a43833890b6c43e0eda7c45
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * DMA controller driver for CSR SiRFprimaII
5 * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
6 */
8 #include <linux/module.h>
9 #include <linux/dmaengine.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/pm_runtime.h>
12 #include <linux/interrupt.h>
13 #include <linux/io.h>
14 #include <linux/slab.h>
15 #include <linux/of_irq.h>
16 #include <linux/of_address.h>
17 #include <linux/of_device.h>
18 #include <linux/of_platform.h>
19 #include <linux/clk.h>
20 #include <linux/of_dma.h>
21 #include <linux/sirfsoc_dma.h>
23 #include "dmaengine.h"
25 #define SIRFSOC_DMA_VER_A7V1 1
26 #define SIRFSOC_DMA_VER_A7V2 2
27 #define SIRFSOC_DMA_VER_A6 4
29 #define SIRFSOC_DMA_DESCRIPTORS 16
30 #define SIRFSOC_DMA_CHANNELS 16
31 #define SIRFSOC_DMA_TABLE_NUM 256
33 #define SIRFSOC_DMA_CH_ADDR 0x00
34 #define SIRFSOC_DMA_CH_XLEN 0x04
35 #define SIRFSOC_DMA_CH_YLEN 0x08
36 #define SIRFSOC_DMA_CH_CTRL 0x0C
38 #define SIRFSOC_DMA_WIDTH_0 0x100
39 #define SIRFSOC_DMA_CH_VALID 0x140
40 #define SIRFSOC_DMA_CH_INT 0x144
41 #define SIRFSOC_DMA_INT_EN 0x148
42 #define SIRFSOC_DMA_INT_EN_CLR 0x14C
43 #define SIRFSOC_DMA_CH_LOOP_CTRL 0x150
44 #define SIRFSOC_DMA_CH_LOOP_CTRL_CLR 0x154
45 #define SIRFSOC_DMA_WIDTH_ATLAS7 0x10
46 #define SIRFSOC_DMA_VALID_ATLAS7 0x14
47 #define SIRFSOC_DMA_INT_ATLAS7 0x18
48 #define SIRFSOC_DMA_INT_EN_ATLAS7 0x1c
49 #define SIRFSOC_DMA_LOOP_CTRL_ATLAS7 0x20
50 #define SIRFSOC_DMA_CUR_DATA_ADDR 0x34
51 #define SIRFSOC_DMA_MUL_ATLAS7 0x38
52 #define SIRFSOC_DMA_CH_LOOP_CTRL_ATLAS7 0x158
53 #define SIRFSOC_DMA_CH_LOOP_CTRL_CLR_ATLAS7 0x15C
54 #define SIRFSOC_DMA_IOBG_SCMD_EN 0x800
55 #define SIRFSOC_DMA_EARLY_RESP_SET 0x818
56 #define SIRFSOC_DMA_EARLY_RESP_CLR 0x81C
58 #define SIRFSOC_DMA_MODE_CTRL_BIT 4
59 #define SIRFSOC_DMA_DIR_CTRL_BIT 5
60 #define SIRFSOC_DMA_MODE_CTRL_BIT_ATLAS7 2
61 #define SIRFSOC_DMA_CHAIN_CTRL_BIT_ATLAS7 3
62 #define SIRFSOC_DMA_DIR_CTRL_BIT_ATLAS7 4
63 #define SIRFSOC_DMA_TAB_NUM_ATLAS7 7
64 #define SIRFSOC_DMA_CHAIN_INT_BIT_ATLAS7 5
65 #define SIRFSOC_DMA_CHAIN_FLAG_SHIFT_ATLAS7 25
66 #define SIRFSOC_DMA_CHAIN_ADDR_SHIFT 32
68 #define SIRFSOC_DMA_INT_FINI_INT_ATLAS7 BIT(0)
69 #define SIRFSOC_DMA_INT_CNT_INT_ATLAS7 BIT(1)
70 #define SIRFSOC_DMA_INT_PAU_INT_ATLAS7 BIT(2)
71 #define SIRFSOC_DMA_INT_LOOP_INT_ATLAS7 BIT(3)
72 #define SIRFSOC_DMA_INT_INV_INT_ATLAS7 BIT(4)
73 #define SIRFSOC_DMA_INT_END_INT_ATLAS7 BIT(5)
74 #define SIRFSOC_DMA_INT_ALL_ATLAS7 0x3F
76 /* xlen and dma_width register is in 4 bytes boundary */
77 #define SIRFSOC_DMA_WORD_LEN 4
78 #define SIRFSOC_DMA_XLEN_MAX_V1 0x800
79 #define SIRFSOC_DMA_XLEN_MAX_V2 0x1000
81 struct sirfsoc_dma_desc {
82 struct dma_async_tx_descriptor desc;
83 struct list_head node;
85 /* SiRFprimaII 2D-DMA parameters */
87 int xlen; /* DMA xlen */
88 int ylen; /* DMA ylen */
89 int width; /* DMA width */
90 int dir;
91 bool cyclic; /* is loop DMA? */
92 bool chain; /* is chain DMA? */
93 u32 addr; /* DMA buffer address */
94 u64 chain_table[SIRFSOC_DMA_TABLE_NUM]; /* chain tbl */
97 struct sirfsoc_dma_chan {
98 struct dma_chan chan;
99 struct list_head free;
100 struct list_head prepared;
101 struct list_head queued;
102 struct list_head active;
103 struct list_head completed;
104 unsigned long happened_cyclic;
105 unsigned long completed_cyclic;
107 /* Lock for this structure */
108 spinlock_t lock;
110 int mode;
113 struct sirfsoc_dma_regs {
114 u32 ctrl[SIRFSOC_DMA_CHANNELS];
115 u32 interrupt_en;
118 struct sirfsoc_dma {
119 struct dma_device dma;
120 struct tasklet_struct tasklet;
121 struct sirfsoc_dma_chan channels[SIRFSOC_DMA_CHANNELS];
122 void __iomem *base;
123 int irq;
124 struct clk *clk;
125 int type;
126 void (*exec_desc)(struct sirfsoc_dma_desc *sdesc,
127 int cid, int burst_mode, void __iomem *base);
128 struct sirfsoc_dma_regs regs_save;
131 struct sirfsoc_dmadata {
132 void (*exec)(struct sirfsoc_dma_desc *sdesc,
133 int cid, int burst_mode, void __iomem *base);
134 int type;
137 enum sirfsoc_dma_chain_flag {
138 SIRFSOC_DMA_CHAIN_NORMAL = 0x01,
139 SIRFSOC_DMA_CHAIN_PAUSE = 0x02,
140 SIRFSOC_DMA_CHAIN_LOOP = 0x03,
141 SIRFSOC_DMA_CHAIN_END = 0x04
144 #define DRV_NAME "sirfsoc_dma"
146 static int sirfsoc_dma_runtime_suspend(struct device *dev);
148 /* Convert struct dma_chan to struct sirfsoc_dma_chan */
149 static inline
150 struct sirfsoc_dma_chan *dma_chan_to_sirfsoc_dma_chan(struct dma_chan *c)
152 return container_of(c, struct sirfsoc_dma_chan, chan);
155 /* Convert struct dma_chan to struct sirfsoc_dma */
156 static inline struct sirfsoc_dma *dma_chan_to_sirfsoc_dma(struct dma_chan *c)
158 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(c);
159 return container_of(schan, struct sirfsoc_dma, channels[c->chan_id]);
162 static void sirfsoc_dma_execute_hw_a7v2(struct sirfsoc_dma_desc *sdesc,
163 int cid, int burst_mode, void __iomem *base)
165 if (sdesc->chain) {
166 /* DMA v2 HW chain mode */
167 writel_relaxed((sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT_ATLAS7) |
168 (sdesc->chain <<
169 SIRFSOC_DMA_CHAIN_CTRL_BIT_ATLAS7) |
170 (0x8 << SIRFSOC_DMA_TAB_NUM_ATLAS7) | 0x3,
171 base + SIRFSOC_DMA_CH_CTRL);
172 } else {
173 /* DMA v2 legacy mode */
174 writel_relaxed(sdesc->xlen, base + SIRFSOC_DMA_CH_XLEN);
175 writel_relaxed(sdesc->ylen, base + SIRFSOC_DMA_CH_YLEN);
176 writel_relaxed(sdesc->width, base + SIRFSOC_DMA_WIDTH_ATLAS7);
177 writel_relaxed((sdesc->width*((sdesc->ylen+1)>>1)),
178 base + SIRFSOC_DMA_MUL_ATLAS7);
179 writel_relaxed((sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT_ATLAS7) |
180 (sdesc->chain <<
181 SIRFSOC_DMA_CHAIN_CTRL_BIT_ATLAS7) |
182 0x3, base + SIRFSOC_DMA_CH_CTRL);
184 writel_relaxed(sdesc->chain ? SIRFSOC_DMA_INT_END_INT_ATLAS7 :
185 (SIRFSOC_DMA_INT_FINI_INT_ATLAS7 |
186 SIRFSOC_DMA_INT_LOOP_INT_ATLAS7),
187 base + SIRFSOC_DMA_INT_EN_ATLAS7);
188 writel(sdesc->addr, base + SIRFSOC_DMA_CH_ADDR);
189 if (sdesc->cyclic)
190 writel(0x10001, base + SIRFSOC_DMA_LOOP_CTRL_ATLAS7);
193 static void sirfsoc_dma_execute_hw_a7v1(struct sirfsoc_dma_desc *sdesc,
194 int cid, int burst_mode, void __iomem *base)
196 writel_relaxed(1, base + SIRFSOC_DMA_IOBG_SCMD_EN);
197 writel_relaxed((1 << cid), base + SIRFSOC_DMA_EARLY_RESP_SET);
198 writel_relaxed(sdesc->width, base + SIRFSOC_DMA_WIDTH_0 + cid * 4);
199 writel_relaxed(cid | (burst_mode << SIRFSOC_DMA_MODE_CTRL_BIT) |
200 (sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT),
201 base + cid * 0x10 + SIRFSOC_DMA_CH_CTRL);
202 writel_relaxed(sdesc->xlen, base + cid * 0x10 + SIRFSOC_DMA_CH_XLEN);
203 writel_relaxed(sdesc->ylen, base + cid * 0x10 + SIRFSOC_DMA_CH_YLEN);
204 writel_relaxed(readl_relaxed(base + SIRFSOC_DMA_INT_EN) |
205 (1 << cid), base + SIRFSOC_DMA_INT_EN);
206 writel(sdesc->addr >> 2, base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR);
207 if (sdesc->cyclic) {
208 writel((1 << cid) | 1 << (cid + 16) |
209 readl_relaxed(base + SIRFSOC_DMA_CH_LOOP_CTRL_ATLAS7),
210 base + SIRFSOC_DMA_CH_LOOP_CTRL_ATLAS7);
215 static void sirfsoc_dma_execute_hw_a6(struct sirfsoc_dma_desc *sdesc,
216 int cid, int burst_mode, void __iomem *base)
218 writel_relaxed(sdesc->width, base + SIRFSOC_DMA_WIDTH_0 + cid * 4);
219 writel_relaxed(cid | (burst_mode << SIRFSOC_DMA_MODE_CTRL_BIT) |
220 (sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT),
221 base + cid * 0x10 + SIRFSOC_DMA_CH_CTRL);
222 writel_relaxed(sdesc->xlen, base + cid * 0x10 + SIRFSOC_DMA_CH_XLEN);
223 writel_relaxed(sdesc->ylen, base + cid * 0x10 + SIRFSOC_DMA_CH_YLEN);
224 writel_relaxed(readl_relaxed(base + SIRFSOC_DMA_INT_EN) |
225 (1 << cid), base + SIRFSOC_DMA_INT_EN);
226 writel(sdesc->addr >> 2, base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR);
227 if (sdesc->cyclic) {
228 writel((1 << cid) | 1 << (cid + 16) |
229 readl_relaxed(base + SIRFSOC_DMA_CH_LOOP_CTRL),
230 base + SIRFSOC_DMA_CH_LOOP_CTRL);
235 /* Execute all queued DMA descriptors */
236 static void sirfsoc_dma_execute(struct sirfsoc_dma_chan *schan)
238 struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
239 int cid = schan->chan.chan_id;
240 struct sirfsoc_dma_desc *sdesc = NULL;
241 void __iomem *base;
244 * lock has been held by functions calling this, so we don't hold
245 * lock again
247 base = sdma->base;
248 sdesc = list_first_entry(&schan->queued, struct sirfsoc_dma_desc,
249 node);
250 /* Move the first queued descriptor to active list */
251 list_move_tail(&sdesc->node, &schan->active);
253 if (sdma->type == SIRFSOC_DMA_VER_A7V2)
254 cid = 0;
256 /* Start the DMA transfer */
257 sdma->exec_desc(sdesc, cid, schan->mode, base);
259 if (sdesc->cyclic)
260 schan->happened_cyclic = schan->completed_cyclic = 0;
263 /* Interrupt handler */
264 static irqreturn_t sirfsoc_dma_irq(int irq, void *data)
266 struct sirfsoc_dma *sdma = data;
267 struct sirfsoc_dma_chan *schan;
268 struct sirfsoc_dma_desc *sdesc = NULL;
269 u32 is;
270 bool chain;
271 int ch;
272 void __iomem *reg;
274 switch (sdma->type) {
275 case SIRFSOC_DMA_VER_A6:
276 case SIRFSOC_DMA_VER_A7V1:
277 is = readl(sdma->base + SIRFSOC_DMA_CH_INT);
278 reg = sdma->base + SIRFSOC_DMA_CH_INT;
279 while ((ch = fls(is) - 1) >= 0) {
280 is &= ~(1 << ch);
281 writel_relaxed(1 << ch, reg);
282 schan = &sdma->channels[ch];
283 spin_lock(&schan->lock);
284 sdesc = list_first_entry(&schan->active,
285 struct sirfsoc_dma_desc, node);
286 if (!sdesc->cyclic) {
287 /* Execute queued descriptors */
288 list_splice_tail_init(&schan->active,
289 &schan->completed);
290 dma_cookie_complete(&sdesc->desc);
291 if (!list_empty(&schan->queued))
292 sirfsoc_dma_execute(schan);
293 } else
294 schan->happened_cyclic++;
295 spin_unlock(&schan->lock);
297 break;
299 case SIRFSOC_DMA_VER_A7V2:
300 is = readl(sdma->base + SIRFSOC_DMA_INT_ATLAS7);
302 reg = sdma->base + SIRFSOC_DMA_INT_ATLAS7;
303 writel_relaxed(SIRFSOC_DMA_INT_ALL_ATLAS7, reg);
304 schan = &sdma->channels[0];
305 spin_lock(&schan->lock);
306 sdesc = list_first_entry(&schan->active,
307 struct sirfsoc_dma_desc, node);
308 if (!sdesc->cyclic) {
309 chain = sdesc->chain;
310 if ((chain && (is & SIRFSOC_DMA_INT_END_INT_ATLAS7)) ||
311 (!chain &&
312 (is & SIRFSOC_DMA_INT_FINI_INT_ATLAS7))) {
313 /* Execute queued descriptors */
314 list_splice_tail_init(&schan->active,
315 &schan->completed);
316 dma_cookie_complete(&sdesc->desc);
317 if (!list_empty(&schan->queued))
318 sirfsoc_dma_execute(schan);
320 } else if (sdesc->cyclic && (is &
321 SIRFSOC_DMA_INT_LOOP_INT_ATLAS7))
322 schan->happened_cyclic++;
324 spin_unlock(&schan->lock);
325 break;
327 default:
328 break;
331 /* Schedule tasklet */
332 tasklet_schedule(&sdma->tasklet);
334 return IRQ_HANDLED;
337 /* process completed descriptors */
338 static void sirfsoc_dma_process_completed(struct sirfsoc_dma *sdma)
340 dma_cookie_t last_cookie = 0;
341 struct sirfsoc_dma_chan *schan;
342 struct sirfsoc_dma_desc *sdesc;
343 struct dma_async_tx_descriptor *desc;
344 unsigned long flags;
345 unsigned long happened_cyclic;
346 LIST_HEAD(list);
347 int i;
349 for (i = 0; i < sdma->dma.chancnt; i++) {
350 schan = &sdma->channels[i];
352 /* Get all completed descriptors */
353 spin_lock_irqsave(&schan->lock, flags);
354 if (!list_empty(&schan->completed)) {
355 list_splice_tail_init(&schan->completed, &list);
356 spin_unlock_irqrestore(&schan->lock, flags);
358 /* Execute callbacks and run dependencies */
359 list_for_each_entry(sdesc, &list, node) {
360 desc = &sdesc->desc;
362 dmaengine_desc_get_callback_invoke(desc, NULL);
363 last_cookie = desc->cookie;
364 dma_run_dependencies(desc);
367 /* Free descriptors */
368 spin_lock_irqsave(&schan->lock, flags);
369 list_splice_tail_init(&list, &schan->free);
370 schan->chan.completed_cookie = last_cookie;
371 spin_unlock_irqrestore(&schan->lock, flags);
372 } else {
373 if (list_empty(&schan->active)) {
374 spin_unlock_irqrestore(&schan->lock, flags);
375 continue;
378 /* for cyclic channel, desc is always in active list */
379 sdesc = list_first_entry(&schan->active,
380 struct sirfsoc_dma_desc, node);
382 /* cyclic DMA */
383 happened_cyclic = schan->happened_cyclic;
384 spin_unlock_irqrestore(&schan->lock, flags);
386 desc = &sdesc->desc;
387 while (happened_cyclic != schan->completed_cyclic) {
388 dmaengine_desc_get_callback_invoke(desc, NULL);
389 schan->completed_cyclic++;
395 /* DMA Tasklet */
396 static void sirfsoc_dma_tasklet(unsigned long data)
398 struct sirfsoc_dma *sdma = (void *)data;
400 sirfsoc_dma_process_completed(sdma);
403 /* Submit descriptor to hardware */
404 static dma_cookie_t sirfsoc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
406 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(txd->chan);
407 struct sirfsoc_dma_desc *sdesc;
408 unsigned long flags;
409 dma_cookie_t cookie;
411 sdesc = container_of(txd, struct sirfsoc_dma_desc, desc);
413 spin_lock_irqsave(&schan->lock, flags);
415 /* Move descriptor to queue */
416 list_move_tail(&sdesc->node, &schan->queued);
418 cookie = dma_cookie_assign(txd);
420 spin_unlock_irqrestore(&schan->lock, flags);
422 return cookie;
425 static int sirfsoc_dma_slave_config(struct dma_chan *chan,
426 struct dma_slave_config *config)
428 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
429 unsigned long flags;
431 if ((config->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
432 (config->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES))
433 return -EINVAL;
435 spin_lock_irqsave(&schan->lock, flags);
436 schan->mode = (config->src_maxburst == 4 ? 1 : 0);
437 spin_unlock_irqrestore(&schan->lock, flags);
439 return 0;
442 static int sirfsoc_dma_terminate_all(struct dma_chan *chan)
444 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
445 struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
446 int cid = schan->chan.chan_id;
447 unsigned long flags;
449 spin_lock_irqsave(&schan->lock, flags);
451 switch (sdma->type) {
452 case SIRFSOC_DMA_VER_A7V1:
453 writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_INT_EN_CLR);
454 writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_INT);
455 writel_relaxed((1 << cid) | 1 << (cid + 16),
456 sdma->base +
457 SIRFSOC_DMA_CH_LOOP_CTRL_CLR_ATLAS7);
458 writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_VALID);
459 break;
460 case SIRFSOC_DMA_VER_A7V2:
461 writel_relaxed(0, sdma->base + SIRFSOC_DMA_INT_EN_ATLAS7);
462 writel_relaxed(SIRFSOC_DMA_INT_ALL_ATLAS7,
463 sdma->base + SIRFSOC_DMA_INT_ATLAS7);
464 writel_relaxed(0, sdma->base + SIRFSOC_DMA_LOOP_CTRL_ATLAS7);
465 writel_relaxed(0, sdma->base + SIRFSOC_DMA_VALID_ATLAS7);
466 break;
467 case SIRFSOC_DMA_VER_A6:
468 writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) &
469 ~(1 << cid), sdma->base + SIRFSOC_DMA_INT_EN);
470 writel_relaxed(readl_relaxed(sdma->base +
471 SIRFSOC_DMA_CH_LOOP_CTRL) &
472 ~((1 << cid) | 1 << (cid + 16)),
473 sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
474 writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_VALID);
475 break;
476 default:
477 break;
480 list_splice_tail_init(&schan->active, &schan->free);
481 list_splice_tail_init(&schan->queued, &schan->free);
483 spin_unlock_irqrestore(&schan->lock, flags);
485 return 0;
488 static int sirfsoc_dma_pause_chan(struct dma_chan *chan)
490 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
491 struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
492 int cid = schan->chan.chan_id;
493 unsigned long flags;
495 spin_lock_irqsave(&schan->lock, flags);
497 switch (sdma->type) {
498 case SIRFSOC_DMA_VER_A7V1:
499 writel_relaxed((1 << cid) | 1 << (cid + 16),
500 sdma->base +
501 SIRFSOC_DMA_CH_LOOP_CTRL_CLR_ATLAS7);
502 break;
503 case SIRFSOC_DMA_VER_A7V2:
504 writel_relaxed(0, sdma->base + SIRFSOC_DMA_LOOP_CTRL_ATLAS7);
505 break;
506 case SIRFSOC_DMA_VER_A6:
507 writel_relaxed(readl_relaxed(sdma->base +
508 SIRFSOC_DMA_CH_LOOP_CTRL) &
509 ~((1 << cid) | 1 << (cid + 16)),
510 sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
511 break;
513 default:
514 break;
517 spin_unlock_irqrestore(&schan->lock, flags);
519 return 0;
522 static int sirfsoc_dma_resume_chan(struct dma_chan *chan)
524 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
525 struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
526 int cid = schan->chan.chan_id;
527 unsigned long flags;
529 spin_lock_irqsave(&schan->lock, flags);
530 switch (sdma->type) {
531 case SIRFSOC_DMA_VER_A7V1:
532 writel_relaxed((1 << cid) | 1 << (cid + 16),
533 sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL_ATLAS7);
534 break;
535 case SIRFSOC_DMA_VER_A7V2:
536 writel_relaxed(0x10001,
537 sdma->base + SIRFSOC_DMA_LOOP_CTRL_ATLAS7);
538 break;
539 case SIRFSOC_DMA_VER_A6:
540 writel_relaxed(readl_relaxed(sdma->base +
541 SIRFSOC_DMA_CH_LOOP_CTRL) |
542 ((1 << cid) | 1 << (cid + 16)),
543 sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
544 break;
546 default:
547 break;
550 spin_unlock_irqrestore(&schan->lock, flags);
552 return 0;
555 /* Alloc channel resources */
556 static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan)
558 struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
559 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
560 struct sirfsoc_dma_desc *sdesc;
561 unsigned long flags;
562 LIST_HEAD(descs);
563 int i;
565 pm_runtime_get_sync(sdma->dma.dev);
567 /* Alloc descriptors for this channel */
568 for (i = 0; i < SIRFSOC_DMA_DESCRIPTORS; i++) {
569 sdesc = kzalloc(sizeof(*sdesc), GFP_KERNEL);
570 if (!sdesc) {
571 dev_notice(sdma->dma.dev, "Memory allocation error. "
572 "Allocated only %u descriptors\n", i);
573 break;
576 dma_async_tx_descriptor_init(&sdesc->desc, chan);
577 sdesc->desc.flags = DMA_CTRL_ACK;
578 sdesc->desc.tx_submit = sirfsoc_dma_tx_submit;
580 list_add_tail(&sdesc->node, &descs);
583 /* Return error only if no descriptors were allocated */
584 if (i == 0)
585 return -ENOMEM;
587 spin_lock_irqsave(&schan->lock, flags);
589 list_splice_tail_init(&descs, &schan->free);
590 spin_unlock_irqrestore(&schan->lock, flags);
592 return i;
595 /* Free channel resources */
596 static void sirfsoc_dma_free_chan_resources(struct dma_chan *chan)
598 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
599 struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
600 struct sirfsoc_dma_desc *sdesc, *tmp;
601 unsigned long flags;
602 LIST_HEAD(descs);
604 spin_lock_irqsave(&schan->lock, flags);
606 /* Channel must be idle */
607 BUG_ON(!list_empty(&schan->prepared));
608 BUG_ON(!list_empty(&schan->queued));
609 BUG_ON(!list_empty(&schan->active));
610 BUG_ON(!list_empty(&schan->completed));
612 /* Move data */
613 list_splice_tail_init(&schan->free, &descs);
615 spin_unlock_irqrestore(&schan->lock, flags);
617 /* Free descriptors */
618 list_for_each_entry_safe(sdesc, tmp, &descs, node)
619 kfree(sdesc);
621 pm_runtime_put(sdma->dma.dev);
624 /* Send pending descriptor to hardware */
625 static void sirfsoc_dma_issue_pending(struct dma_chan *chan)
627 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
628 unsigned long flags;
630 spin_lock_irqsave(&schan->lock, flags);
632 if (list_empty(&schan->active) && !list_empty(&schan->queued))
633 sirfsoc_dma_execute(schan);
635 spin_unlock_irqrestore(&schan->lock, flags);
638 /* Check request completion status */
639 static enum dma_status
640 sirfsoc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
641 struct dma_tx_state *txstate)
643 struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
644 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
645 unsigned long flags;
646 enum dma_status ret;
647 struct sirfsoc_dma_desc *sdesc;
648 int cid = schan->chan.chan_id;
649 unsigned long dma_pos;
650 unsigned long dma_request_bytes;
651 unsigned long residue;
653 spin_lock_irqsave(&schan->lock, flags);
655 if (list_empty(&schan->active)) {
656 ret = dma_cookie_status(chan, cookie, txstate);
657 dma_set_residue(txstate, 0);
658 spin_unlock_irqrestore(&schan->lock, flags);
659 return ret;
661 sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc, node);
662 if (sdesc->cyclic)
663 dma_request_bytes = (sdesc->xlen + 1) * (sdesc->ylen + 1) *
664 (sdesc->width * SIRFSOC_DMA_WORD_LEN);
665 else
666 dma_request_bytes = sdesc->xlen * SIRFSOC_DMA_WORD_LEN;
668 ret = dma_cookie_status(chan, cookie, txstate);
670 if (sdma->type == SIRFSOC_DMA_VER_A7V2)
671 cid = 0;
673 if (sdma->type == SIRFSOC_DMA_VER_A7V2) {
674 dma_pos = readl_relaxed(sdma->base + SIRFSOC_DMA_CUR_DATA_ADDR);
675 } else {
676 dma_pos = readl_relaxed(
677 sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR) << 2;
680 residue = dma_request_bytes - (dma_pos - sdesc->addr);
681 dma_set_residue(txstate, residue);
683 spin_unlock_irqrestore(&schan->lock, flags);
685 return ret;
688 static struct dma_async_tx_descriptor *sirfsoc_dma_prep_interleaved(
689 struct dma_chan *chan, struct dma_interleaved_template *xt,
690 unsigned long flags)
692 struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
693 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
694 struct sirfsoc_dma_desc *sdesc = NULL;
695 unsigned long iflags;
696 int ret;
698 if ((xt->dir != DMA_MEM_TO_DEV) && (xt->dir != DMA_DEV_TO_MEM)) {
699 ret = -EINVAL;
700 goto err_dir;
703 /* Get free descriptor */
704 spin_lock_irqsave(&schan->lock, iflags);
705 if (!list_empty(&schan->free)) {
706 sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc,
707 node);
708 list_del(&sdesc->node);
710 spin_unlock_irqrestore(&schan->lock, iflags);
712 if (!sdesc) {
713 /* try to free completed descriptors */
714 sirfsoc_dma_process_completed(sdma);
715 ret = 0;
716 goto no_desc;
719 /* Place descriptor in prepared list */
720 spin_lock_irqsave(&schan->lock, iflags);
723 * Number of chunks in a frame can only be 1 for prima2
724 * and ylen (number of frame - 1) must be at least 0
726 if ((xt->frame_size == 1) && (xt->numf > 0)) {
727 sdesc->cyclic = 0;
728 sdesc->xlen = xt->sgl[0].size / SIRFSOC_DMA_WORD_LEN;
729 sdesc->width = (xt->sgl[0].size + xt->sgl[0].icg) /
730 SIRFSOC_DMA_WORD_LEN;
731 sdesc->ylen = xt->numf - 1;
732 if (xt->dir == DMA_MEM_TO_DEV) {
733 sdesc->addr = xt->src_start;
734 sdesc->dir = 1;
735 } else {
736 sdesc->addr = xt->dst_start;
737 sdesc->dir = 0;
740 list_add_tail(&sdesc->node, &schan->prepared);
741 } else {
742 pr_err("sirfsoc DMA Invalid xfer\n");
743 ret = -EINVAL;
744 goto err_xfer;
746 spin_unlock_irqrestore(&schan->lock, iflags);
748 return &sdesc->desc;
749 err_xfer:
750 spin_unlock_irqrestore(&schan->lock, iflags);
751 no_desc:
752 err_dir:
753 return ERR_PTR(ret);
756 static struct dma_async_tx_descriptor *
757 sirfsoc_dma_prep_cyclic(struct dma_chan *chan, dma_addr_t addr,
758 size_t buf_len, size_t period_len,
759 enum dma_transfer_direction direction, unsigned long flags)
761 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
762 struct sirfsoc_dma_desc *sdesc = NULL;
763 unsigned long iflags;
766 * we only support cycle transfer with 2 period
767 * If the X-length is set to 0, it would be the loop mode.
768 * The DMA address keeps increasing until reaching the end of a loop
769 * area whose size is defined by (DMA_WIDTH x (Y_LENGTH + 1)). Then
770 * the DMA address goes back to the beginning of this area.
771 * In loop mode, the DMA data region is divided into two parts, BUFA
772 * and BUFB. DMA controller generates interrupts twice in each loop:
773 * when the DMA address reaches the end of BUFA or the end of the
774 * BUFB
776 if (buf_len != 2 * period_len)
777 return ERR_PTR(-EINVAL);
779 /* Get free descriptor */
780 spin_lock_irqsave(&schan->lock, iflags);
781 if (!list_empty(&schan->free)) {
782 sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc,
783 node);
784 list_del(&sdesc->node);
786 spin_unlock_irqrestore(&schan->lock, iflags);
788 if (!sdesc)
789 return NULL;
791 /* Place descriptor in prepared list */
792 spin_lock_irqsave(&schan->lock, iflags);
793 sdesc->addr = addr;
794 sdesc->cyclic = 1;
795 sdesc->xlen = 0;
796 sdesc->ylen = buf_len / SIRFSOC_DMA_WORD_LEN - 1;
797 sdesc->width = 1;
798 list_add_tail(&sdesc->node, &schan->prepared);
799 spin_unlock_irqrestore(&schan->lock, iflags);
801 return &sdesc->desc;
805 * The DMA controller consists of 16 independent DMA channels.
806 * Each channel is allocated to a different function
808 bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id)
810 unsigned int ch_nr = (unsigned int) chan_id;
812 if (ch_nr == chan->chan_id +
813 chan->device->dev_id * SIRFSOC_DMA_CHANNELS)
814 return true;
816 return false;
818 EXPORT_SYMBOL(sirfsoc_dma_filter_id);
820 #define SIRFSOC_DMA_BUSWIDTHS \
821 (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
822 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
823 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
824 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
825 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
827 static struct dma_chan *of_dma_sirfsoc_xlate(struct of_phandle_args *dma_spec,
828 struct of_dma *ofdma)
830 struct sirfsoc_dma *sdma = ofdma->of_dma_data;
831 unsigned int request = dma_spec->args[0];
833 if (request >= SIRFSOC_DMA_CHANNELS)
834 return NULL;
836 return dma_get_slave_channel(&sdma->channels[request].chan);
839 static int sirfsoc_dma_probe(struct platform_device *op)
841 struct device_node *dn = op->dev.of_node;
842 struct device *dev = &op->dev;
843 struct dma_device *dma;
844 struct sirfsoc_dma *sdma;
845 struct sirfsoc_dma_chan *schan;
846 struct sirfsoc_dmadata *data;
847 struct resource res;
848 ulong regs_start, regs_size;
849 u32 id;
850 int ret, i;
852 sdma = devm_kzalloc(dev, sizeof(*sdma), GFP_KERNEL);
853 if (!sdma)
854 return -ENOMEM;
856 data = (struct sirfsoc_dmadata *)
857 (of_match_device(op->dev.driver->of_match_table,
858 &op->dev)->data);
859 sdma->exec_desc = data->exec;
860 sdma->type = data->type;
862 if (of_property_read_u32(dn, "cell-index", &id)) {
863 dev_err(dev, "Fail to get DMAC index\n");
864 return -ENODEV;
867 sdma->irq = irq_of_parse_and_map(dn, 0);
868 if (!sdma->irq) {
869 dev_err(dev, "Error mapping IRQ!\n");
870 return -EINVAL;
873 sdma->clk = devm_clk_get(dev, NULL);
874 if (IS_ERR(sdma->clk)) {
875 dev_err(dev, "failed to get a clock.\n");
876 return PTR_ERR(sdma->clk);
879 ret = of_address_to_resource(dn, 0, &res);
880 if (ret) {
881 dev_err(dev, "Error parsing memory region!\n");
882 goto irq_dispose;
885 regs_start = res.start;
886 regs_size = resource_size(&res);
888 sdma->base = devm_ioremap(dev, regs_start, regs_size);
889 if (!sdma->base) {
890 dev_err(dev, "Error mapping memory region!\n");
891 ret = -ENOMEM;
892 goto irq_dispose;
895 ret = request_irq(sdma->irq, &sirfsoc_dma_irq, 0, DRV_NAME, sdma);
896 if (ret) {
897 dev_err(dev, "Error requesting IRQ!\n");
898 ret = -EINVAL;
899 goto irq_dispose;
902 dma = &sdma->dma;
903 dma->dev = dev;
905 dma->device_alloc_chan_resources = sirfsoc_dma_alloc_chan_resources;
906 dma->device_free_chan_resources = sirfsoc_dma_free_chan_resources;
907 dma->device_issue_pending = sirfsoc_dma_issue_pending;
908 dma->device_config = sirfsoc_dma_slave_config;
909 dma->device_pause = sirfsoc_dma_pause_chan;
910 dma->device_resume = sirfsoc_dma_resume_chan;
911 dma->device_terminate_all = sirfsoc_dma_terminate_all;
912 dma->device_tx_status = sirfsoc_dma_tx_status;
913 dma->device_prep_interleaved_dma = sirfsoc_dma_prep_interleaved;
914 dma->device_prep_dma_cyclic = sirfsoc_dma_prep_cyclic;
915 dma->src_addr_widths = SIRFSOC_DMA_BUSWIDTHS;
916 dma->dst_addr_widths = SIRFSOC_DMA_BUSWIDTHS;
917 dma->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
919 INIT_LIST_HEAD(&dma->channels);
920 dma_cap_set(DMA_SLAVE, dma->cap_mask);
921 dma_cap_set(DMA_CYCLIC, dma->cap_mask);
922 dma_cap_set(DMA_INTERLEAVE, dma->cap_mask);
923 dma_cap_set(DMA_PRIVATE, dma->cap_mask);
925 for (i = 0; i < SIRFSOC_DMA_CHANNELS; i++) {
926 schan = &sdma->channels[i];
928 schan->chan.device = dma;
929 dma_cookie_init(&schan->chan);
931 INIT_LIST_HEAD(&schan->free);
932 INIT_LIST_HEAD(&schan->prepared);
933 INIT_LIST_HEAD(&schan->queued);
934 INIT_LIST_HEAD(&schan->active);
935 INIT_LIST_HEAD(&schan->completed);
937 spin_lock_init(&schan->lock);
938 list_add_tail(&schan->chan.device_node, &dma->channels);
941 tasklet_init(&sdma->tasklet, sirfsoc_dma_tasklet, (unsigned long)sdma);
943 /* Register DMA engine */
944 dev_set_drvdata(dev, sdma);
946 ret = dma_async_device_register(dma);
947 if (ret)
948 goto free_irq;
950 /* Device-tree DMA controller registration */
951 ret = of_dma_controller_register(dn, of_dma_sirfsoc_xlate, sdma);
952 if (ret) {
953 dev_err(dev, "failed to register DMA controller\n");
954 goto unreg_dma_dev;
957 pm_runtime_enable(&op->dev);
958 dev_info(dev, "initialized SIRFSOC DMAC driver\n");
960 return 0;
962 unreg_dma_dev:
963 dma_async_device_unregister(dma);
964 free_irq:
965 free_irq(sdma->irq, sdma);
966 irq_dispose:
967 irq_dispose_mapping(sdma->irq);
968 return ret;
971 static int sirfsoc_dma_remove(struct platform_device *op)
973 struct device *dev = &op->dev;
974 struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
976 of_dma_controller_free(op->dev.of_node);
977 dma_async_device_unregister(&sdma->dma);
978 free_irq(sdma->irq, sdma);
979 tasklet_kill(&sdma->tasklet);
980 irq_dispose_mapping(sdma->irq);
981 pm_runtime_disable(&op->dev);
982 if (!pm_runtime_status_suspended(&op->dev))
983 sirfsoc_dma_runtime_suspend(&op->dev);
985 return 0;
988 static int __maybe_unused sirfsoc_dma_runtime_suspend(struct device *dev)
990 struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
992 clk_disable_unprepare(sdma->clk);
993 return 0;
996 static int __maybe_unused sirfsoc_dma_runtime_resume(struct device *dev)
998 struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
999 int ret;
1001 ret = clk_prepare_enable(sdma->clk);
1002 if (ret < 0) {
1003 dev_err(dev, "clk_enable failed: %d\n", ret);
1004 return ret;
1006 return 0;
1009 static int __maybe_unused sirfsoc_dma_pm_suspend(struct device *dev)
1011 struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
1012 struct sirfsoc_dma_regs *save = &sdma->regs_save;
1013 struct sirfsoc_dma_chan *schan;
1014 int ch;
1015 int ret;
1016 int count;
1017 u32 int_offset;
1020 * if we were runtime-suspended before, resume to enable clock
1021 * before accessing register
1023 if (pm_runtime_status_suspended(dev)) {
1024 ret = sirfsoc_dma_runtime_resume(dev);
1025 if (ret < 0)
1026 return ret;
1029 if (sdma->type == SIRFSOC_DMA_VER_A7V2) {
1030 count = 1;
1031 int_offset = SIRFSOC_DMA_INT_EN_ATLAS7;
1032 } else {
1033 count = SIRFSOC_DMA_CHANNELS;
1034 int_offset = SIRFSOC_DMA_INT_EN;
1038 * DMA controller will lose all registers while suspending
1039 * so we need to save registers for active channels
1041 for (ch = 0; ch < count; ch++) {
1042 schan = &sdma->channels[ch];
1043 if (list_empty(&schan->active))
1044 continue;
1045 save->ctrl[ch] = readl_relaxed(sdma->base +
1046 ch * 0x10 + SIRFSOC_DMA_CH_CTRL);
1048 save->interrupt_en = readl_relaxed(sdma->base + int_offset);
1050 /* Disable clock */
1051 sirfsoc_dma_runtime_suspend(dev);
1053 return 0;
1056 static int __maybe_unused sirfsoc_dma_pm_resume(struct device *dev)
1058 struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
1059 struct sirfsoc_dma_regs *save = &sdma->regs_save;
1060 struct sirfsoc_dma_desc *sdesc;
1061 struct sirfsoc_dma_chan *schan;
1062 int ch;
1063 int ret;
1064 int count;
1065 u32 int_offset;
1066 u32 width_offset;
1068 /* Enable clock before accessing register */
1069 ret = sirfsoc_dma_runtime_resume(dev);
1070 if (ret < 0)
1071 return ret;
1073 if (sdma->type == SIRFSOC_DMA_VER_A7V2) {
1074 count = 1;
1075 int_offset = SIRFSOC_DMA_INT_EN_ATLAS7;
1076 width_offset = SIRFSOC_DMA_WIDTH_ATLAS7;
1077 } else {
1078 count = SIRFSOC_DMA_CHANNELS;
1079 int_offset = SIRFSOC_DMA_INT_EN;
1080 width_offset = SIRFSOC_DMA_WIDTH_0;
1083 writel_relaxed(save->interrupt_en, sdma->base + int_offset);
1084 for (ch = 0; ch < count; ch++) {
1085 schan = &sdma->channels[ch];
1086 if (list_empty(&schan->active))
1087 continue;
1088 sdesc = list_first_entry(&schan->active,
1089 struct sirfsoc_dma_desc,
1090 node);
1091 writel_relaxed(sdesc->width,
1092 sdma->base + width_offset + ch * 4);
1093 writel_relaxed(sdesc->xlen,
1094 sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_XLEN);
1095 writel_relaxed(sdesc->ylen,
1096 sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_YLEN);
1097 writel_relaxed(save->ctrl[ch],
1098 sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_CTRL);
1099 if (sdma->type == SIRFSOC_DMA_VER_A7V2) {
1100 writel_relaxed(sdesc->addr,
1101 sdma->base + SIRFSOC_DMA_CH_ADDR);
1102 } else {
1103 writel_relaxed(sdesc->addr >> 2,
1104 sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_ADDR);
1109 /* if we were runtime-suspended before, suspend again */
1110 if (pm_runtime_status_suspended(dev))
1111 sirfsoc_dma_runtime_suspend(dev);
1113 return 0;
1116 static const struct dev_pm_ops sirfsoc_dma_pm_ops = {
1117 SET_RUNTIME_PM_OPS(sirfsoc_dma_runtime_suspend, sirfsoc_dma_runtime_resume, NULL)
1118 SET_SYSTEM_SLEEP_PM_OPS(sirfsoc_dma_pm_suspend, sirfsoc_dma_pm_resume)
1121 static struct sirfsoc_dmadata sirfsoc_dmadata_a6 = {
1122 .exec = sirfsoc_dma_execute_hw_a6,
1123 .type = SIRFSOC_DMA_VER_A6,
1126 static struct sirfsoc_dmadata sirfsoc_dmadata_a7v1 = {
1127 .exec = sirfsoc_dma_execute_hw_a7v1,
1128 .type = SIRFSOC_DMA_VER_A7V1,
1131 static struct sirfsoc_dmadata sirfsoc_dmadata_a7v2 = {
1132 .exec = sirfsoc_dma_execute_hw_a7v2,
1133 .type = SIRFSOC_DMA_VER_A7V2,
1136 static const struct of_device_id sirfsoc_dma_match[] = {
1137 { .compatible = "sirf,prima2-dmac", .data = &sirfsoc_dmadata_a6,},
1138 { .compatible = "sirf,atlas7-dmac", .data = &sirfsoc_dmadata_a7v1,},
1139 { .compatible = "sirf,atlas7-dmac-v2", .data = &sirfsoc_dmadata_a7v2,},
1142 MODULE_DEVICE_TABLE(of, sirfsoc_dma_match);
1144 static struct platform_driver sirfsoc_dma_driver = {
1145 .probe = sirfsoc_dma_probe,
1146 .remove = sirfsoc_dma_remove,
1147 .driver = {
1148 .name = DRV_NAME,
1149 .pm = &sirfsoc_dma_pm_ops,
1150 .of_match_table = sirfsoc_dma_match,
1154 static __init int sirfsoc_dma_init(void)
1156 return platform_driver_register(&sirfsoc_dma_driver);
1159 static void __exit sirfsoc_dma_exit(void)
1161 platform_driver_unregister(&sirfsoc_dma_driver);
1164 subsys_initcall(sirfsoc_dma_init);
1165 module_exit(sirfsoc_dma_exit);
1167 MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>");
1168 MODULE_AUTHOR("Barry Song <baohua.song@csr.com>");
1169 MODULE_DESCRIPTION("SIRFSOC DMA control driver");
1170 MODULE_LICENSE("GPL v2");