1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (C) 2013, Lars-Peter Clausen <lars@metafoo.de>
4 * GDMA4740 DMAC support
7 #include <linux/dmaengine.h>
8 #include <linux/dma-mapping.h>
10 #include <linux/init.h>
11 #include <linux/list.h>
12 #include <linux/module.h>
13 #include <linux/platform_device.h>
14 #include <linux/slab.h>
15 #include <linux/spinlock.h>
16 #include <linux/irq.h>
17 #include <linux/of_dma.h>
18 #include <linux/reset.h>
19 #include <linux/of_device.h>
23 #define GDMA_REG_SRC_ADDR(x) (0x00 + (x) * 0x10)
24 #define GDMA_REG_DST_ADDR(x) (0x04 + (x) * 0x10)
26 #define GDMA_REG_CTRL0(x) (0x08 + (x) * 0x10)
27 #define GDMA_REG_CTRL0_TX_MASK 0xffff
28 #define GDMA_REG_CTRL0_TX_SHIFT 16
29 #define GDMA_REG_CTRL0_CURR_MASK 0xff
30 #define GDMA_REG_CTRL0_CURR_SHIFT 8
31 #define GDMA_REG_CTRL0_SRC_ADDR_FIXED BIT(7)
32 #define GDMA_REG_CTRL0_DST_ADDR_FIXED BIT(6)
33 #define GDMA_REG_CTRL0_BURST_MASK 0x7
34 #define GDMA_REG_CTRL0_BURST_SHIFT 3
35 #define GDMA_REG_CTRL0_DONE_INT BIT(2)
36 #define GDMA_REG_CTRL0_ENABLE BIT(1)
37 #define GDMA_REG_CTRL0_SW_MODE BIT(0)
39 #define GDMA_REG_CTRL1(x) (0x0c + (x) * 0x10)
40 #define GDMA_REG_CTRL1_SEG_MASK 0xf
41 #define GDMA_REG_CTRL1_SEG_SHIFT 22
42 #define GDMA_REG_CTRL1_REQ_MASK 0x3f
43 #define GDMA_REG_CTRL1_SRC_REQ_SHIFT 16
44 #define GDMA_REG_CTRL1_DST_REQ_SHIFT 8
45 #define GDMA_REG_CTRL1_NEXT_MASK 0x1f
46 #define GDMA_REG_CTRL1_NEXT_SHIFT 3
47 #define GDMA_REG_CTRL1_COHERENT BIT(2)
48 #define GDMA_REG_CTRL1_FAIL BIT(1)
49 #define GDMA_REG_CTRL1_MASK BIT(0)
51 #define GDMA_REG_UNMASK_INT 0x200
52 #define GDMA_REG_DONE_INT 0x204
54 #define GDMA_REG_GCT 0x220
55 #define GDMA_REG_GCT_CHAN_MASK 0x3
56 #define GDMA_REG_GCT_CHAN_SHIFT 3
57 #define GDMA_REG_GCT_VER_MASK 0x3
58 #define GDMA_REG_GCT_VER_SHIFT 1
59 #define GDMA_REG_GCT_ARBIT_RR BIT(0)
61 #define GDMA_REG_REQSTS 0x2a0
62 #define GDMA_REG_ACKSTS 0x2a4
63 #define GDMA_REG_FINSTS 0x2a8
65 /* for RT305X gdma registers */
66 #define GDMA_RT305X_CTRL0_REQ_MASK 0xf
67 #define GDMA_RT305X_CTRL0_SRC_REQ_SHIFT 12
68 #define GDMA_RT305X_CTRL0_DST_REQ_SHIFT 8
70 #define GDMA_RT305X_CTRL1_FAIL BIT(4)
71 #define GDMA_RT305X_CTRL1_NEXT_MASK 0x7
72 #define GDMA_RT305X_CTRL1_NEXT_SHIFT 1
74 #define GDMA_RT305X_STATUS_INT 0x80
75 #define GDMA_RT305X_STATUS_SIGNAL 0x84
76 #define GDMA_RT305X_GCT 0x88
78 /* for MT7621 gdma registers */
79 #define GDMA_REG_PERF_START(x) (0x230 + (x) * 0x8)
80 #define GDMA_REG_PERF_END(x) (0x234 + (x) * 0x8)
82 enum gdma_dma_transfer_size
{
83 GDMA_TRANSFER_SIZE_4BYTE
= 0,
84 GDMA_TRANSFER_SIZE_8BYTE
= 1,
85 GDMA_TRANSFER_SIZE_16BYTE
= 2,
86 GDMA_TRANSFER_SIZE_32BYTE
= 3,
87 GDMA_TRANSFER_SIZE_64BYTE
= 4,
96 struct gdma_dma_desc
{
97 struct virt_dma_desc vdesc
;
99 enum dma_transfer_direction direction
;
103 unsigned int num_sgs
;
104 struct gdma_dma_sg sg
[];
107 struct gdma_dmaengine_chan
{
108 struct virt_dma_chan vchan
;
110 unsigned int slave_id
;
112 dma_addr_t fifo_addr
;
113 enum gdma_dma_transfer_size burst_size
;
115 struct gdma_dma_desc
*desc
;
116 unsigned int next_sg
;
119 struct gdma_dma_dev
{
120 struct dma_device ddev
;
121 struct device_dma_parameters dma_parms
;
122 struct gdma_data
*data
;
124 struct tasklet_struct task
;
125 volatile unsigned long chan_issued
;
128 struct gdma_dmaengine_chan chan
[];
134 void (*init
)(struct gdma_dma_dev
*dma_dev
);
135 int (*start_transfer
)(struct gdma_dmaengine_chan
*chan
);
138 static struct gdma_dma_dev
*gdma_dma_chan_get_dev(
139 struct gdma_dmaengine_chan
*chan
)
141 return container_of(chan
->vchan
.chan
.device
, struct gdma_dma_dev
,
145 static struct gdma_dmaengine_chan
*to_gdma_dma_chan(struct dma_chan
*c
)
147 return container_of(c
, struct gdma_dmaengine_chan
, vchan
.chan
);
150 static struct gdma_dma_desc
*to_gdma_dma_desc(struct virt_dma_desc
*vdesc
)
152 return container_of(vdesc
, struct gdma_dma_desc
, vdesc
);
155 static inline uint32_t gdma_dma_read(struct gdma_dma_dev
*dma_dev
,
158 return readl(dma_dev
->base
+ reg
);
161 static inline void gdma_dma_write(struct gdma_dma_dev
*dma_dev
,
162 unsigned int reg
, uint32_t val
)
164 writel(val
, dma_dev
->base
+ reg
);
167 static enum gdma_dma_transfer_size
gdma_dma_maxburst(u32 maxburst
)
170 return GDMA_TRANSFER_SIZE_4BYTE
;
171 else if (maxburst
< 4)
172 return GDMA_TRANSFER_SIZE_8BYTE
;
173 else if (maxburst
< 8)
174 return GDMA_TRANSFER_SIZE_16BYTE
;
175 else if (maxburst
< 16)
176 return GDMA_TRANSFER_SIZE_32BYTE
;
178 return GDMA_TRANSFER_SIZE_64BYTE
;
181 static int gdma_dma_config(struct dma_chan
*c
,
182 struct dma_slave_config
*config
)
184 struct gdma_dmaengine_chan
*chan
= to_gdma_dma_chan(c
);
185 struct gdma_dma_dev
*dma_dev
= gdma_dma_chan_get_dev(chan
);
187 if (config
->device_fc
) {
188 dev_err(dma_dev
->ddev
.dev
, "not support flow controller\n");
192 switch (config
->direction
) {
194 if (config
->dst_addr_width
!= DMA_SLAVE_BUSWIDTH_4_BYTES
) {
195 dev_err(dma_dev
->ddev
.dev
, "only support 4 byte buswidth\n");
198 chan
->slave_id
= config
->slave_id
;
199 chan
->fifo_addr
= config
->dst_addr
;
200 chan
->burst_size
= gdma_dma_maxburst(config
->dst_maxburst
);
203 if (config
->src_addr_width
!= DMA_SLAVE_BUSWIDTH_4_BYTES
) {
204 dev_err(dma_dev
->ddev
.dev
, "only support 4 byte buswidth\n");
207 chan
->slave_id
= config
->slave_id
;
208 chan
->fifo_addr
= config
->src_addr
;
209 chan
->burst_size
= gdma_dma_maxburst(config
->src_maxburst
);
212 dev_err(dma_dev
->ddev
.dev
, "direction type %d error\n",
220 static int gdma_dma_terminate_all(struct dma_chan
*c
)
222 struct gdma_dmaengine_chan
*chan
= to_gdma_dma_chan(c
);
223 struct gdma_dma_dev
*dma_dev
= gdma_dma_chan_get_dev(chan
);
224 unsigned long flags
, timeout
;
228 spin_lock_irqsave(&chan
->vchan
.lock
, flags
);
230 clear_bit(chan
->id
, &dma_dev
->chan_issued
);
231 vchan_get_all_descriptors(&chan
->vchan
, &head
);
232 spin_unlock_irqrestore(&chan
->vchan
.lock
, flags
);
234 vchan_dma_desc_free_list(&chan
->vchan
, &head
);
236 /* wait dma transfer complete */
237 timeout
= jiffies
+ msecs_to_jiffies(5000);
238 while (gdma_dma_read(dma_dev
, GDMA_REG_CTRL0(chan
->id
)) &
239 GDMA_REG_CTRL0_ENABLE
) {
240 if (time_after_eq(jiffies
, timeout
)) {
241 dev_err(dma_dev
->ddev
.dev
, "chan %d wait timeout\n",
243 /* restore to init value */
244 gdma_dma_write(dma_dev
, GDMA_REG_CTRL0(chan
->id
), 0);
252 dev_dbg(dma_dev
->ddev
.dev
, "terminate chan %d loops %d\n",
258 static void rt305x_dump_reg(struct gdma_dma_dev
*dma_dev
, int id
)
260 dev_dbg(dma_dev
->ddev
.dev
, "chan %d, src %08x, dst %08x, ctr0 %08x, ctr1 %08x, intr %08x, signal %08x\n",
262 gdma_dma_read(dma_dev
, GDMA_REG_SRC_ADDR(id
)),
263 gdma_dma_read(dma_dev
, GDMA_REG_DST_ADDR(id
)),
264 gdma_dma_read(dma_dev
, GDMA_REG_CTRL0(id
)),
265 gdma_dma_read(dma_dev
, GDMA_REG_CTRL1(id
)),
266 gdma_dma_read(dma_dev
, GDMA_RT305X_STATUS_INT
),
267 gdma_dma_read(dma_dev
, GDMA_RT305X_STATUS_SIGNAL
));
270 static int rt305x_gdma_start_transfer(struct gdma_dmaengine_chan
*chan
)
272 struct gdma_dma_dev
*dma_dev
= gdma_dma_chan_get_dev(chan
);
273 dma_addr_t src_addr
, dst_addr
;
274 struct gdma_dma_sg
*sg
;
277 /* verify chan is already stopped */
278 ctrl0
= gdma_dma_read(dma_dev
, GDMA_REG_CTRL0(chan
->id
));
279 if (unlikely(ctrl0
& GDMA_REG_CTRL0_ENABLE
)) {
280 dev_err(dma_dev
->ddev
.dev
, "chan %d is start(%08x).\n",
282 rt305x_dump_reg(dma_dev
, chan
->id
);
286 sg
= &chan
->desc
->sg
[chan
->next_sg
];
287 if (chan
->desc
->direction
== DMA_MEM_TO_DEV
) {
288 src_addr
= sg
->src_addr
;
289 dst_addr
= chan
->fifo_addr
;
290 ctrl0
= GDMA_REG_CTRL0_DST_ADDR_FIXED
|
291 (8 << GDMA_RT305X_CTRL0_SRC_REQ_SHIFT
) |
292 (chan
->slave_id
<< GDMA_RT305X_CTRL0_DST_REQ_SHIFT
);
293 } else if (chan
->desc
->direction
== DMA_DEV_TO_MEM
) {
294 src_addr
= chan
->fifo_addr
;
295 dst_addr
= sg
->dst_addr
;
296 ctrl0
= GDMA_REG_CTRL0_SRC_ADDR_FIXED
|
297 (chan
->slave_id
<< GDMA_RT305X_CTRL0_SRC_REQ_SHIFT
) |
298 (8 << GDMA_RT305X_CTRL0_DST_REQ_SHIFT
);
299 } else if (chan
->desc
->direction
== DMA_MEM_TO_MEM
) {
301 * TODO: memcpy function have bugs. sometime it will copy
302 * more 8 bytes data when using dmatest verify.
304 src_addr
= sg
->src_addr
;
305 dst_addr
= sg
->dst_addr
;
306 ctrl0
= GDMA_REG_CTRL0_SW_MODE
|
307 (8 << GDMA_REG_CTRL1_SRC_REQ_SHIFT
) |
308 (8 << GDMA_REG_CTRL1_DST_REQ_SHIFT
);
310 dev_err(dma_dev
->ddev
.dev
, "direction type %d error\n",
311 chan
->desc
->direction
);
315 ctrl0
|= (sg
->len
<< GDMA_REG_CTRL0_TX_SHIFT
) |
316 (chan
->burst_size
<< GDMA_REG_CTRL0_BURST_SHIFT
) |
317 GDMA_REG_CTRL0_DONE_INT
| GDMA_REG_CTRL0_ENABLE
;
318 ctrl1
= chan
->id
<< GDMA_REG_CTRL1_NEXT_SHIFT
;
321 gdma_dma_write(dma_dev
, GDMA_REG_SRC_ADDR(chan
->id
), src_addr
);
322 gdma_dma_write(dma_dev
, GDMA_REG_DST_ADDR(chan
->id
), dst_addr
);
323 gdma_dma_write(dma_dev
, GDMA_REG_CTRL1(chan
->id
), ctrl1
);
325 /* make sure next_sg is update */
327 gdma_dma_write(dma_dev
, GDMA_REG_CTRL0(chan
->id
), ctrl0
);
332 static void rt3883_dump_reg(struct gdma_dma_dev
*dma_dev
, int id
)
334 dev_dbg(dma_dev
->ddev
.dev
, "chan %d, src %08x, dst %08x, ctr0 %08x, ctr1 %08x, unmask %08x, done %08x, req %08x, ack %08x, fin %08x\n",
336 gdma_dma_read(dma_dev
, GDMA_REG_SRC_ADDR(id
)),
337 gdma_dma_read(dma_dev
, GDMA_REG_DST_ADDR(id
)),
338 gdma_dma_read(dma_dev
, GDMA_REG_CTRL0(id
)),
339 gdma_dma_read(dma_dev
, GDMA_REG_CTRL1(id
)),
340 gdma_dma_read(dma_dev
, GDMA_REG_UNMASK_INT
),
341 gdma_dma_read(dma_dev
, GDMA_REG_DONE_INT
),
342 gdma_dma_read(dma_dev
, GDMA_REG_REQSTS
),
343 gdma_dma_read(dma_dev
, GDMA_REG_ACKSTS
),
344 gdma_dma_read(dma_dev
, GDMA_REG_FINSTS
));
347 static int rt3883_gdma_start_transfer(struct gdma_dmaengine_chan
*chan
)
349 struct gdma_dma_dev
*dma_dev
= gdma_dma_chan_get_dev(chan
);
350 dma_addr_t src_addr
, dst_addr
;
351 struct gdma_dma_sg
*sg
;
354 /* verify chan is already stopped */
355 ctrl0
= gdma_dma_read(dma_dev
, GDMA_REG_CTRL0(chan
->id
));
356 if (unlikely(ctrl0
& GDMA_REG_CTRL0_ENABLE
)) {
357 dev_err(dma_dev
->ddev
.dev
, "chan %d is start(%08x).\n",
359 rt3883_dump_reg(dma_dev
, chan
->id
);
363 sg
= &chan
->desc
->sg
[chan
->next_sg
];
364 if (chan
->desc
->direction
== DMA_MEM_TO_DEV
) {
365 src_addr
= sg
->src_addr
;
366 dst_addr
= chan
->fifo_addr
;
367 ctrl0
= GDMA_REG_CTRL0_DST_ADDR_FIXED
;
368 ctrl1
= (32 << GDMA_REG_CTRL1_SRC_REQ_SHIFT
) |
369 (chan
->slave_id
<< GDMA_REG_CTRL1_DST_REQ_SHIFT
);
370 } else if (chan
->desc
->direction
== DMA_DEV_TO_MEM
) {
371 src_addr
= chan
->fifo_addr
;
372 dst_addr
= sg
->dst_addr
;
373 ctrl0
= GDMA_REG_CTRL0_SRC_ADDR_FIXED
;
374 ctrl1
= (chan
->slave_id
<< GDMA_REG_CTRL1_SRC_REQ_SHIFT
) |
375 (32 << GDMA_REG_CTRL1_DST_REQ_SHIFT
) |
376 GDMA_REG_CTRL1_COHERENT
;
377 } else if (chan
->desc
->direction
== DMA_MEM_TO_MEM
) {
378 src_addr
= sg
->src_addr
;
379 dst_addr
= sg
->dst_addr
;
380 ctrl0
= GDMA_REG_CTRL0_SW_MODE
;
381 ctrl1
= (32 << GDMA_REG_CTRL1_SRC_REQ_SHIFT
) |
382 (32 << GDMA_REG_CTRL1_DST_REQ_SHIFT
) |
383 GDMA_REG_CTRL1_COHERENT
;
385 dev_err(dma_dev
->ddev
.dev
, "direction type %d error\n",
386 chan
->desc
->direction
);
390 ctrl0
|= (sg
->len
<< GDMA_REG_CTRL0_TX_SHIFT
) |
391 (chan
->burst_size
<< GDMA_REG_CTRL0_BURST_SHIFT
) |
392 GDMA_REG_CTRL0_DONE_INT
| GDMA_REG_CTRL0_ENABLE
;
393 ctrl1
|= chan
->id
<< GDMA_REG_CTRL1_NEXT_SHIFT
;
396 gdma_dma_write(dma_dev
, GDMA_REG_SRC_ADDR(chan
->id
), src_addr
);
397 gdma_dma_write(dma_dev
, GDMA_REG_DST_ADDR(chan
->id
), dst_addr
);
398 gdma_dma_write(dma_dev
, GDMA_REG_CTRL1(chan
->id
), ctrl1
);
400 /* make sure next_sg is update */
402 gdma_dma_write(dma_dev
, GDMA_REG_CTRL0(chan
->id
), ctrl0
);
407 static inline int gdma_start_transfer(struct gdma_dma_dev
*dma_dev
,
408 struct gdma_dmaengine_chan
*chan
)
410 return dma_dev
->data
->start_transfer(chan
);
413 static int gdma_next_desc(struct gdma_dmaengine_chan
*chan
)
415 struct virt_dma_desc
*vdesc
;
417 vdesc
= vchan_next_desc(&chan
->vchan
);
422 chan
->desc
= to_gdma_dma_desc(vdesc
);
428 static void gdma_dma_chan_irq(struct gdma_dma_dev
*dma_dev
,
429 struct gdma_dmaengine_chan
*chan
)
431 struct gdma_dma_desc
*desc
;
436 spin_lock_irqsave(&chan
->vchan
.lock
, flags
);
440 vchan_cyclic_callback(&desc
->vdesc
);
441 if (chan
->next_sg
== desc
->num_sgs
)
445 desc
->residue
-= desc
->sg
[chan
->next_sg
- 1].len
;
446 if (chan
->next_sg
== desc
->num_sgs
) {
447 list_del(&desc
->vdesc
.node
);
448 vchan_cookie_complete(&desc
->vdesc
);
449 chan_issued
= gdma_next_desc(chan
);
455 dev_dbg(dma_dev
->ddev
.dev
, "chan %d no desc to complete\n",
459 set_bit(chan
->id
, &dma_dev
->chan_issued
);
460 spin_unlock_irqrestore(&chan
->vchan
.lock
, flags
);
463 static irqreturn_t
gdma_dma_irq(int irq
, void *devid
)
465 struct gdma_dma_dev
*dma_dev
= devid
;
469 done_reg
= dma_dev
->data
->done_int_reg
;
470 done
= gdma_dma_read(dma_dev
, done_reg
);
474 /* clean done bits */
475 gdma_dma_write(dma_dev
, done_reg
, done
);
480 gdma_dma_chan_irq(dma_dev
, &dma_dev
->chan
[i
]);
481 atomic_dec(&dma_dev
->cnt
);
487 /* start only have work to do */
488 if (dma_dev
->chan_issued
)
489 tasklet_schedule(&dma_dev
->task
);
494 static void gdma_dma_issue_pending(struct dma_chan
*c
)
496 struct gdma_dmaengine_chan
*chan
= to_gdma_dma_chan(c
);
497 struct gdma_dma_dev
*dma_dev
= gdma_dma_chan_get_dev(chan
);
500 spin_lock_irqsave(&chan
->vchan
.lock
, flags
);
501 if (vchan_issue_pending(&chan
->vchan
) && !chan
->desc
) {
502 if (gdma_next_desc(chan
)) {
503 set_bit(chan
->id
, &dma_dev
->chan_issued
);
504 tasklet_schedule(&dma_dev
->task
);
506 dev_dbg(dma_dev
->ddev
.dev
, "chan %d no desc to issue\n",
510 spin_unlock_irqrestore(&chan
->vchan
.lock
, flags
);
513 static struct dma_async_tx_descriptor
*gdma_dma_prep_slave_sg(
514 struct dma_chan
*c
, struct scatterlist
*sgl
,
515 unsigned int sg_len
, enum dma_transfer_direction direction
,
516 unsigned long flags
, void *context
)
518 struct gdma_dmaengine_chan
*chan
= to_gdma_dma_chan(c
);
519 struct gdma_dma_desc
*desc
;
520 struct scatterlist
*sg
;
523 desc
= kzalloc(struct_size(desc
, sg
, sg_len
), GFP_ATOMIC
);
525 dev_err(c
->device
->dev
, "alloc sg decs error\n");
530 for_each_sg(sgl
, sg
, sg_len
, i
) {
531 if (direction
== DMA_MEM_TO_DEV
) {
532 desc
->sg
[i
].src_addr
= sg_dma_address(sg
);
533 } else if (direction
== DMA_DEV_TO_MEM
) {
534 desc
->sg
[i
].dst_addr
= sg_dma_address(sg
);
536 dev_err(c
->device
->dev
, "direction type %d error\n",
541 if (unlikely(sg_dma_len(sg
) > GDMA_REG_CTRL0_TX_MASK
)) {
542 dev_err(c
->device
->dev
, "sg len too large %d\n",
546 desc
->sg
[i
].len
= sg_dma_len(sg
);
547 desc
->residue
+= sg_dma_len(sg
);
550 desc
->num_sgs
= sg_len
;
551 desc
->direction
= direction
;
552 desc
->cyclic
= false;
554 return vchan_tx_prep(&chan
->vchan
, &desc
->vdesc
, flags
);
561 static struct dma_async_tx_descriptor
*gdma_dma_prep_dma_memcpy(
562 struct dma_chan
*c
, dma_addr_t dest
, dma_addr_t src
,
563 size_t len
, unsigned long flags
)
565 struct gdma_dmaengine_chan
*chan
= to_gdma_dma_chan(c
);
566 struct gdma_dma_desc
*desc
;
567 unsigned int num_periods
, i
;
573 chan
->burst_size
= gdma_dma_maxburst(len
>> 2);
575 xfer_count
= GDMA_REG_CTRL0_TX_MASK
;
576 num_periods
= DIV_ROUND_UP(len
, xfer_count
);
578 desc
= kzalloc(struct_size(desc
, sg
, num_periods
), GFP_ATOMIC
);
580 dev_err(c
->device
->dev
, "alloc memcpy decs error\n");
585 for (i
= 0; i
< num_periods
; i
++) {
586 desc
->sg
[i
].src_addr
= src
;
587 desc
->sg
[i
].dst_addr
= dest
;
588 if (len
> xfer_count
)
589 desc
->sg
[i
].len
= xfer_count
;
591 desc
->sg
[i
].len
= len
;
592 src
+= desc
->sg
[i
].len
;
593 dest
+= desc
->sg
[i
].len
;
594 len
-= desc
->sg
[i
].len
;
597 desc
->num_sgs
= num_periods
;
598 desc
->direction
= DMA_MEM_TO_MEM
;
599 desc
->cyclic
= false;
601 return vchan_tx_prep(&chan
->vchan
, &desc
->vdesc
, flags
);
604 static struct dma_async_tx_descriptor
*gdma_dma_prep_dma_cyclic(
605 struct dma_chan
*c
, dma_addr_t buf_addr
, size_t buf_len
,
606 size_t period_len
, enum dma_transfer_direction direction
,
609 struct gdma_dmaengine_chan
*chan
= to_gdma_dma_chan(c
);
610 struct gdma_dma_desc
*desc
;
611 unsigned int num_periods
, i
;
613 if (buf_len
% period_len
)
616 if (period_len
> GDMA_REG_CTRL0_TX_MASK
) {
617 dev_err(c
->device
->dev
, "cyclic len too large %d\n",
622 num_periods
= buf_len
/ period_len
;
623 desc
= kzalloc(struct_size(desc
, sg
, num_periods
), GFP_ATOMIC
);
625 dev_err(c
->device
->dev
, "alloc cyclic decs error\n");
628 desc
->residue
= buf_len
;
630 for (i
= 0; i
< num_periods
; i
++) {
631 if (direction
== DMA_MEM_TO_DEV
) {
632 desc
->sg
[i
].src_addr
= buf_addr
;
633 } else if (direction
== DMA_DEV_TO_MEM
) {
634 desc
->sg
[i
].dst_addr
= buf_addr
;
636 dev_err(c
->device
->dev
, "direction type %d error\n",
640 desc
->sg
[i
].len
= period_len
;
641 buf_addr
+= period_len
;
644 desc
->num_sgs
= num_periods
;
645 desc
->direction
= direction
;
648 return vchan_tx_prep(&chan
->vchan
, &desc
->vdesc
, flags
);
655 static enum dma_status
gdma_dma_tx_status(struct dma_chan
*c
,
657 struct dma_tx_state
*state
)
659 struct gdma_dmaengine_chan
*chan
= to_gdma_dma_chan(c
);
660 struct virt_dma_desc
*vdesc
;
661 enum dma_status status
;
663 struct gdma_dma_desc
*desc
;
665 status
= dma_cookie_status(c
, cookie
, state
);
666 if (status
== DMA_COMPLETE
|| !state
)
669 spin_lock_irqsave(&chan
->vchan
.lock
, flags
);
671 if (desc
&& (cookie
== desc
->vdesc
.tx
.cookie
)) {
673 * We never update edesc->residue in the cyclic case, so we
674 * can tell the remaining room to the end of the circular
678 state
->residue
= desc
->residue
-
679 ((chan
->next_sg
- 1) * desc
->sg
[0].len
);
681 state
->residue
= desc
->residue
;
683 vdesc
= vchan_find_desc(&chan
->vchan
, cookie
);
685 state
->residue
= to_gdma_dma_desc(vdesc
)->residue
;
687 spin_unlock_irqrestore(&chan
->vchan
.lock
, flags
);
689 dev_dbg(c
->device
->dev
, "tx residue %d bytes\n", state
->residue
);
694 static void gdma_dma_free_chan_resources(struct dma_chan
*c
)
696 vchan_free_chan_resources(to_virt_chan(c
));
699 static void gdma_dma_desc_free(struct virt_dma_desc
*vdesc
)
701 kfree(container_of(vdesc
, struct gdma_dma_desc
, vdesc
));
704 static void gdma_dma_tasklet(struct tasklet_struct
*t
)
706 struct gdma_dma_dev
*dma_dev
= from_tasklet(dma_dev
, t
, task
);
707 struct gdma_dmaengine_chan
*chan
;
708 static unsigned int last_chan
;
709 unsigned int i
, chan_mask
;
711 /* record last chan to round robin all chans */
713 chan_mask
= dma_dev
->data
->chancnt
- 1;
716 * on mt7621. when verify with dmatest with all
717 * channel is enable. we need to limit only two
718 * channel is working at the same time. otherwise the
719 * data will have problem.
721 if (atomic_read(&dma_dev
->cnt
) >= 2) {
726 if (test_and_clear_bit(i
, &dma_dev
->chan_issued
)) {
727 chan
= &dma_dev
->chan
[i
];
729 atomic_inc(&dma_dev
->cnt
);
730 gdma_start_transfer(dma_dev
, chan
);
732 dev_dbg(dma_dev
->ddev
.dev
,
733 "chan %d no desc to issue\n",
736 if (!dma_dev
->chan_issued
)
740 i
= (i
+ 1) & chan_mask
;
741 } while (i
!= last_chan
);
744 static void rt305x_gdma_init(struct gdma_dma_dev
*dma_dev
)
748 /* all chans round robin */
749 gdma_dma_write(dma_dev
, GDMA_RT305X_GCT
, GDMA_REG_GCT_ARBIT_RR
);
751 gct
= gdma_dma_read(dma_dev
, GDMA_RT305X_GCT
);
752 dev_info(dma_dev
->ddev
.dev
, "revision: %d, channels: %d\n",
753 (gct
>> GDMA_REG_GCT_VER_SHIFT
) & GDMA_REG_GCT_VER_MASK
,
754 8 << ((gct
>> GDMA_REG_GCT_CHAN_SHIFT
) &
755 GDMA_REG_GCT_CHAN_MASK
));
758 static void rt3883_gdma_init(struct gdma_dma_dev
*dma_dev
)
762 /* all chans round robin */
763 gdma_dma_write(dma_dev
, GDMA_REG_GCT
, GDMA_REG_GCT_ARBIT_RR
);
765 gct
= gdma_dma_read(dma_dev
, GDMA_REG_GCT
);
766 dev_info(dma_dev
->ddev
.dev
, "revision: %d, channels: %d\n",
767 (gct
>> GDMA_REG_GCT_VER_SHIFT
) & GDMA_REG_GCT_VER_MASK
,
768 8 << ((gct
>> GDMA_REG_GCT_CHAN_SHIFT
) &
769 GDMA_REG_GCT_CHAN_MASK
));
772 static struct gdma_data rt305x_gdma_data
= {
774 .done_int_reg
= GDMA_RT305X_STATUS_INT
,
775 .init
= rt305x_gdma_init
,
776 .start_transfer
= rt305x_gdma_start_transfer
,
779 static struct gdma_data rt3883_gdma_data
= {
781 .done_int_reg
= GDMA_REG_DONE_INT
,
782 .init
= rt3883_gdma_init
,
783 .start_transfer
= rt3883_gdma_start_transfer
,
786 static const struct of_device_id gdma_of_match_table
[] = {
787 { .compatible
= "ralink,rt305x-gdma", .data
= &rt305x_gdma_data
},
788 { .compatible
= "ralink,rt3883-gdma", .data
= &rt3883_gdma_data
},
792 static int gdma_dma_probe(struct platform_device
*pdev
)
794 const struct of_device_id
*match
;
795 struct gdma_dmaengine_chan
*chan
;
796 struct gdma_dma_dev
*dma_dev
;
797 struct dma_device
*dd
;
802 struct gdma_data
*data
;
804 ret
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
808 match
= of_match_device(gdma_of_match_table
, &pdev
->dev
);
811 data
= (struct gdma_data
*)match
->data
;
813 dma_dev
= devm_kzalloc(&pdev
->dev
,
814 struct_size(dma_dev
, chan
, data
->chancnt
),
818 dma_dev
->data
= data
;
820 base
= devm_platform_ioremap_resource(pdev
, 0);
822 return PTR_ERR(base
);
823 dma_dev
->base
= base
;
824 tasklet_setup(&dma_dev
->task
, gdma_dma_tasklet
);
826 irq
= platform_get_irq(pdev
, 0);
829 ret
= devm_request_irq(&pdev
->dev
, irq
, gdma_dma_irq
,
830 0, dev_name(&pdev
->dev
), dma_dev
);
832 dev_err(&pdev
->dev
, "failed to request irq\n");
836 device_reset(&pdev
->dev
);
839 dma_cap_set(DMA_MEMCPY
, dd
->cap_mask
);
840 dma_cap_set(DMA_SLAVE
, dd
->cap_mask
);
841 dma_cap_set(DMA_CYCLIC
, dd
->cap_mask
);
842 dd
->device_free_chan_resources
= gdma_dma_free_chan_resources
;
843 dd
->device_prep_dma_memcpy
= gdma_dma_prep_dma_memcpy
;
844 dd
->device_prep_slave_sg
= gdma_dma_prep_slave_sg
;
845 dd
->device_prep_dma_cyclic
= gdma_dma_prep_dma_cyclic
;
846 dd
->device_config
= gdma_dma_config
;
847 dd
->device_terminate_all
= gdma_dma_terminate_all
;
848 dd
->device_tx_status
= gdma_dma_tx_status
;
849 dd
->device_issue_pending
= gdma_dma_issue_pending
;
851 dd
->src_addr_widths
= BIT(DMA_SLAVE_BUSWIDTH_4_BYTES
);
852 dd
->dst_addr_widths
= BIT(DMA_SLAVE_BUSWIDTH_4_BYTES
);
853 dd
->directions
= BIT(DMA_DEV_TO_MEM
) | BIT(DMA_MEM_TO_DEV
);
854 dd
->residue_granularity
= DMA_RESIDUE_GRANULARITY_SEGMENT
;
856 dd
->dev
= &pdev
->dev
;
857 dd
->dev
->dma_parms
= &dma_dev
->dma_parms
;
858 dma_set_max_seg_size(dd
->dev
, GDMA_REG_CTRL0_TX_MASK
);
859 INIT_LIST_HEAD(&dd
->channels
);
861 for (i
= 0; i
< data
->chancnt
; i
++) {
862 chan
= &dma_dev
->chan
[i
];
864 chan
->vchan
.desc_free
= gdma_dma_desc_free
;
865 vchan_init(&chan
->vchan
, dd
);
871 ret
= dma_async_device_register(dd
);
873 dev_err(&pdev
->dev
, "failed to register dma device\n");
877 ret
= of_dma_controller_register(pdev
->dev
.of_node
,
878 of_dma_xlate_by_chan_id
, dma_dev
);
880 dev_err(&pdev
->dev
, "failed to register of dma controller\n");
884 platform_set_drvdata(pdev
, dma_dev
);
889 dma_async_device_unregister(dd
);
893 static int gdma_dma_remove(struct platform_device
*pdev
)
895 struct gdma_dma_dev
*dma_dev
= platform_get_drvdata(pdev
);
897 tasklet_kill(&dma_dev
->task
);
898 of_dma_controller_free(pdev
->dev
.of_node
);
899 dma_async_device_unregister(&dma_dev
->ddev
);
904 static struct platform_driver gdma_dma_driver
= {
905 .probe
= gdma_dma_probe
,
906 .remove
= gdma_dma_remove
,
908 .name
= "gdma-rt2880",
909 .of_match_table
= gdma_of_match_table
,
912 module_platform_driver(gdma_dma_driver
);
914 MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
915 MODULE_DESCRIPTION("Ralink/MTK DMA driver");
916 MODULE_LICENSE("GPL v2");