2 * Copyright (C) 2013, Lars-Peter Clausen <lars@metafoo.de>
3 * GDMA4740 DMAC support
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
12 #include <linux/dmaengine.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/err.h>
15 #include <linux/init.h>
16 #include <linux/list.h>
17 #include <linux/module.h>
18 #include <linux/platform_device.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock.h>
21 #include <linux/irq.h>
22 #include <linux/of_dma.h>
23 #include <linux/reset.h>
24 #include <linux/of_device.h>
28 #define GDMA_REG_SRC_ADDR(x) (0x00 + (x) * 0x10)
29 #define GDMA_REG_DST_ADDR(x) (0x04 + (x) * 0x10)
31 #define GDMA_REG_CTRL0(x) (0x08 + (x) * 0x10)
32 #define GDMA_REG_CTRL0_TX_MASK 0xffff
33 #define GDMA_REG_CTRL0_TX_SHIFT 16
34 #define GDMA_REG_CTRL0_CURR_MASK 0xff
35 #define GDMA_REG_CTRL0_CURR_SHIFT 8
36 #define GDMA_REG_CTRL0_SRC_ADDR_FIXED BIT(7)
37 #define GDMA_REG_CTRL0_DST_ADDR_FIXED BIT(6)
38 #define GDMA_REG_CTRL0_BURST_MASK 0x7
39 #define GDMA_REG_CTRL0_BURST_SHIFT 3
40 #define GDMA_REG_CTRL0_DONE_INT BIT(2)
41 #define GDMA_REG_CTRL0_ENABLE BIT(1)
42 #define GDMA_REG_CTRL0_SW_MODE BIT(0)
44 #define GDMA_REG_CTRL1(x) (0x0c + (x) * 0x10)
45 #define GDMA_REG_CTRL1_SEG_MASK 0xf
46 #define GDMA_REG_CTRL1_SEG_SHIFT 22
47 #define GDMA_REG_CTRL1_REQ_MASK 0x3f
48 #define GDMA_REG_CTRL1_SRC_REQ_SHIFT 16
49 #define GDMA_REG_CTRL1_DST_REQ_SHIFT 8
50 #define GDMA_REG_CTRL1_CONTINOUS BIT(14)
51 #define GDMA_REG_CTRL1_NEXT_MASK 0x1f
52 #define GDMA_REG_CTRL1_NEXT_SHIFT 3
53 #define GDMA_REG_CTRL1_COHERENT BIT(2)
54 #define GDMA_REG_CTRL1_FAIL BIT(1)
55 #define GDMA_REG_CTRL1_MASK BIT(0)
57 #define GDMA_REG_UNMASK_INT 0x200
58 #define GDMA_REG_DONE_INT 0x204
60 #define GDMA_REG_GCT 0x220
61 #define GDMA_REG_GCT_CHAN_MASK 0x3
62 #define GDMA_REG_GCT_CHAN_SHIFT 3
63 #define GDMA_REG_GCT_VER_MASK 0x3
64 #define GDMA_REG_GCT_VER_SHIFT 1
65 #define GDMA_REG_GCT_ARBIT_RR BIT(0)
67 #define GDMA_REG_REQSTS 0x2a0
68 #define GDMA_REG_ACKSTS 0x2a4
69 #define GDMA_REG_FINSTS 0x2a8
71 /* for RT305X gdma registers */
72 #define GDMA_RT305X_CTRL0_REQ_MASK 0xf
73 #define GDMA_RT305X_CTRL0_SRC_REQ_SHIFT 12
74 #define GDMA_RT305X_CTRL0_DST_REQ_SHIFT 8
76 #define GDMA_RT305X_CTRL1_FAIL BIT(4)
77 #define GDMA_RT305X_CTRL1_NEXT_MASK 0x7
78 #define GDMA_RT305X_CTRL1_NEXT_SHIFT 1
80 #define GDMA_RT305X_STATUS_INT 0x80
81 #define GDMA_RT305X_STATUS_SIGNAL 0x84
82 #define GDMA_RT305X_GCT 0x88
84 /* for MT7621 gdma registers */
85 #define GDMA_REG_PERF_START(x) (0x230 + (x) * 0x8)
86 #define GDMA_REG_PERF_END(x) (0x234 + (x) * 0x8)
88 enum gdma_dma_transfer_size
{
89 GDMA_TRANSFER_SIZE_4BYTE
= 0,
90 GDMA_TRANSFER_SIZE_8BYTE
= 1,
91 GDMA_TRANSFER_SIZE_16BYTE
= 2,
92 GDMA_TRANSFER_SIZE_32BYTE
= 3,
93 GDMA_TRANSFER_SIZE_64BYTE
= 4,
102 struct gdma_dma_desc
{
103 struct virt_dma_desc vdesc
;
105 enum dma_transfer_direction direction
;
109 unsigned int num_sgs
;
110 struct gdma_dma_sg sg
[];
113 struct gdma_dmaengine_chan
{
114 struct virt_dma_chan vchan
;
116 unsigned int slave_id
;
118 dma_addr_t fifo_addr
;
119 enum gdma_dma_transfer_size burst_size
;
121 struct gdma_dma_desc
*desc
;
122 unsigned int next_sg
;
125 struct gdma_dma_dev
{
126 struct dma_device ddev
;
127 struct device_dma_parameters dma_parms
;
128 struct gdma_data
*data
;
130 struct tasklet_struct task
;
131 volatile unsigned long chan_issued
;
134 struct gdma_dmaengine_chan chan
[];
140 void (*init
)(struct gdma_dma_dev
*dma_dev
);
141 int (*start_transfer
)(struct gdma_dmaengine_chan
*chan
);
144 static struct gdma_dma_dev
*gdma_dma_chan_get_dev(
145 struct gdma_dmaengine_chan
*chan
)
147 return container_of(chan
->vchan
.chan
.device
, struct gdma_dma_dev
,
151 static struct gdma_dmaengine_chan
*to_gdma_dma_chan(struct dma_chan
*c
)
153 return container_of(c
, struct gdma_dmaengine_chan
, vchan
.chan
);
156 static struct gdma_dma_desc
*to_gdma_dma_desc(struct virt_dma_desc
*vdesc
)
158 return container_of(vdesc
, struct gdma_dma_desc
, vdesc
);
161 static inline uint32_t gdma_dma_read(struct gdma_dma_dev
*dma_dev
,
164 return readl(dma_dev
->base
+ reg
);
167 static inline void gdma_dma_write(struct gdma_dma_dev
*dma_dev
,
168 unsigned reg
, uint32_t val
)
170 writel(val
, dma_dev
->base
+ reg
);
173 static struct gdma_dma_desc
*gdma_dma_alloc_desc(unsigned int num_sgs
)
175 return kzalloc(sizeof(struct gdma_dma_desc
) +
176 sizeof(struct gdma_dma_sg
) * num_sgs
, GFP_ATOMIC
);
179 static enum gdma_dma_transfer_size
gdma_dma_maxburst(u32 maxburst
)
182 return GDMA_TRANSFER_SIZE_4BYTE
;
183 else if (maxburst
< 4)
184 return GDMA_TRANSFER_SIZE_8BYTE
;
185 else if (maxburst
< 8)
186 return GDMA_TRANSFER_SIZE_16BYTE
;
187 else if (maxburst
< 16)
188 return GDMA_TRANSFER_SIZE_32BYTE
;
190 return GDMA_TRANSFER_SIZE_64BYTE
;
193 static int gdma_dma_config(struct dma_chan
*c
,
194 struct dma_slave_config
*config
)
196 struct gdma_dmaengine_chan
*chan
= to_gdma_dma_chan(c
);
197 struct gdma_dma_dev
*dma_dev
= gdma_dma_chan_get_dev(chan
);
199 if (config
->device_fc
) {
200 dev_err(dma_dev
->ddev
.dev
, "not support flow controller\n");
204 switch (config
->direction
) {
206 if (config
->dst_addr_width
!= DMA_SLAVE_BUSWIDTH_4_BYTES
) {
207 dev_err(dma_dev
->ddev
.dev
, "only support 4 byte buswidth\n");
210 chan
->slave_id
= config
->slave_id
;
211 chan
->fifo_addr
= config
->dst_addr
;
212 chan
->burst_size
= gdma_dma_maxburst(config
->dst_maxburst
);
215 if (config
->src_addr_width
!= DMA_SLAVE_BUSWIDTH_4_BYTES
) {
216 dev_err(dma_dev
->ddev
.dev
, "only support 4 byte buswidth\n");
219 chan
->slave_id
= config
->slave_id
;
220 chan
->fifo_addr
= config
->src_addr
;
221 chan
->burst_size
= gdma_dma_maxburst(config
->src_maxburst
);
224 dev_err(dma_dev
->ddev
.dev
, "direction type %d error\n",
232 static int gdma_dma_terminate_all(struct dma_chan
*c
)
234 struct gdma_dmaengine_chan
*chan
= to_gdma_dma_chan(c
);
235 struct gdma_dma_dev
*dma_dev
= gdma_dma_chan_get_dev(chan
);
236 unsigned long flags
, timeout
;
240 spin_lock_irqsave(&chan
->vchan
.lock
, flags
);
242 clear_bit(chan
->id
, &dma_dev
->chan_issued
);
243 vchan_get_all_descriptors(&chan
->vchan
, &head
);
244 spin_unlock_irqrestore(&chan
->vchan
.lock
, flags
);
246 vchan_dma_desc_free_list(&chan
->vchan
, &head
);
248 /* wait dma transfer complete */
249 timeout
= jiffies
+ msecs_to_jiffies(5000);
250 while (gdma_dma_read(dma_dev
, GDMA_REG_CTRL0(chan
->id
)) &
251 GDMA_REG_CTRL0_ENABLE
) {
252 if (time_after_eq(jiffies
, timeout
)) {
253 dev_err(dma_dev
->ddev
.dev
, "chan %d wait timeout\n",
255 /* restore to init value */
256 gdma_dma_write(dma_dev
, GDMA_REG_CTRL0(chan
->id
), 0);
264 dev_dbg(dma_dev
->ddev
.dev
, "terminate chan %d loops %d\n",
270 static void rt305x_dump_reg(struct gdma_dma_dev
*dma_dev
, int id
)
272 dev_dbg(dma_dev
->ddev
.dev
, "chan %d, src %08x, dst %08x, ctr0 %08x, " \
273 "ctr1 %08x, intr %08x, signal %08x\n", id
,
274 gdma_dma_read(dma_dev
, GDMA_REG_SRC_ADDR(id
)),
275 gdma_dma_read(dma_dev
, GDMA_REG_DST_ADDR(id
)),
276 gdma_dma_read(dma_dev
, GDMA_REG_CTRL0(id
)),
277 gdma_dma_read(dma_dev
, GDMA_REG_CTRL1(id
)),
278 gdma_dma_read(dma_dev
, GDMA_RT305X_STATUS_INT
),
279 gdma_dma_read(dma_dev
, GDMA_RT305X_STATUS_SIGNAL
));
282 static int rt305x_gdma_start_transfer(struct gdma_dmaengine_chan
*chan
)
284 struct gdma_dma_dev
*dma_dev
= gdma_dma_chan_get_dev(chan
);
285 dma_addr_t src_addr
, dst_addr
;
286 struct gdma_dma_sg
*sg
;
287 uint32_t ctrl0
, ctrl1
;
289 /* verify chan is already stopped */
290 ctrl0
= gdma_dma_read(dma_dev
, GDMA_REG_CTRL0(chan
->id
));
291 if (unlikely(ctrl0
& GDMA_REG_CTRL0_ENABLE
)) {
292 dev_err(dma_dev
->ddev
.dev
, "chan %d is start(%08x).\n",
294 rt305x_dump_reg(dma_dev
, chan
->id
);
298 sg
= &chan
->desc
->sg
[chan
->next_sg
];
299 if (chan
->desc
->direction
== DMA_MEM_TO_DEV
) {
300 src_addr
= sg
->src_addr
;
301 dst_addr
= chan
->fifo_addr
;
302 ctrl0
= GDMA_REG_CTRL0_DST_ADDR_FIXED
| \
303 (8 << GDMA_RT305X_CTRL0_SRC_REQ_SHIFT
) | \
304 (chan
->slave_id
<< GDMA_RT305X_CTRL0_DST_REQ_SHIFT
);
305 } else if (chan
->desc
->direction
== DMA_DEV_TO_MEM
) {
306 src_addr
= chan
->fifo_addr
;
307 dst_addr
= sg
->dst_addr
;
308 ctrl0
= GDMA_REG_CTRL0_SRC_ADDR_FIXED
| \
309 (chan
->slave_id
<< GDMA_RT305X_CTRL0_SRC_REQ_SHIFT
) | \
310 (8 << GDMA_RT305X_CTRL0_DST_REQ_SHIFT
);
311 } else if (chan
->desc
->direction
== DMA_MEM_TO_MEM
) {
313 * TODO: memcpy function have bugs. sometime it will copy
314 * more 8 bytes data when using dmatest verify.
316 src_addr
= sg
->src_addr
;
317 dst_addr
= sg
->dst_addr
;
318 ctrl0
= GDMA_REG_CTRL0_SW_MODE
| \
319 (8 << GDMA_REG_CTRL1_SRC_REQ_SHIFT
) | \
320 (8 << GDMA_REG_CTRL1_DST_REQ_SHIFT
);
322 dev_err(dma_dev
->ddev
.dev
, "direction type %d error\n",
323 chan
->desc
->direction
);
327 ctrl0
|= (sg
->len
<< GDMA_REG_CTRL0_TX_SHIFT
) | \
328 (chan
->burst_size
<< GDMA_REG_CTRL0_BURST_SHIFT
) | \
329 GDMA_REG_CTRL0_DONE_INT
| GDMA_REG_CTRL0_ENABLE
;
330 ctrl1
= chan
->id
<< GDMA_REG_CTRL1_NEXT_SHIFT
;
333 gdma_dma_write(dma_dev
, GDMA_REG_SRC_ADDR(chan
->id
), src_addr
);
334 gdma_dma_write(dma_dev
, GDMA_REG_DST_ADDR(chan
->id
), dst_addr
);
335 gdma_dma_write(dma_dev
, GDMA_REG_CTRL1(chan
->id
), ctrl1
);
337 /* make sure next_sg is update */
339 gdma_dma_write(dma_dev
, GDMA_REG_CTRL0(chan
->id
), ctrl0
);
344 static void rt3883_dump_reg(struct gdma_dma_dev
*dma_dev
, int id
)
346 dev_dbg(dma_dev
->ddev
.dev
, "chan %d, src %08x, dst %08x, ctr0 %08x, " \
347 "ctr1 %08x, unmask %08x, done %08x, " \
348 "req %08x, ack %08x, fin %08x\n", id
,
349 gdma_dma_read(dma_dev
, GDMA_REG_SRC_ADDR(id
)),
350 gdma_dma_read(dma_dev
, GDMA_REG_DST_ADDR(id
)),
351 gdma_dma_read(dma_dev
, GDMA_REG_CTRL0(id
)),
352 gdma_dma_read(dma_dev
, GDMA_REG_CTRL1(id
)),
353 gdma_dma_read(dma_dev
, GDMA_REG_UNMASK_INT
),
354 gdma_dma_read(dma_dev
, GDMA_REG_DONE_INT
),
355 gdma_dma_read(dma_dev
, GDMA_REG_REQSTS
),
356 gdma_dma_read(dma_dev
, GDMA_REG_ACKSTS
),
357 gdma_dma_read(dma_dev
, GDMA_REG_FINSTS
));
360 static int rt3883_gdma_start_transfer(struct gdma_dmaengine_chan
*chan
)
362 struct gdma_dma_dev
*dma_dev
= gdma_dma_chan_get_dev(chan
);
363 dma_addr_t src_addr
, dst_addr
;
364 struct gdma_dma_sg
*sg
;
365 uint32_t ctrl0
, ctrl1
;
367 /* verify chan is already stopped */
368 ctrl0
= gdma_dma_read(dma_dev
, GDMA_REG_CTRL0(chan
->id
));
369 if (unlikely(ctrl0
& GDMA_REG_CTRL0_ENABLE
)) {
370 dev_err(dma_dev
->ddev
.dev
, "chan %d is start(%08x).\n",
372 rt3883_dump_reg(dma_dev
, chan
->id
);
376 sg
= &chan
->desc
->sg
[chan
->next_sg
];
377 if (chan
->desc
->direction
== DMA_MEM_TO_DEV
) {
378 src_addr
= sg
->src_addr
;
379 dst_addr
= chan
->fifo_addr
;
380 ctrl0
= GDMA_REG_CTRL0_DST_ADDR_FIXED
;
381 ctrl1
= (32 << GDMA_REG_CTRL1_SRC_REQ_SHIFT
) | \
382 (chan
->slave_id
<< GDMA_REG_CTRL1_DST_REQ_SHIFT
);
383 } else if (chan
->desc
->direction
== DMA_DEV_TO_MEM
) {
384 src_addr
= chan
->fifo_addr
;
385 dst_addr
= sg
->dst_addr
;
386 ctrl0
= GDMA_REG_CTRL0_SRC_ADDR_FIXED
;
387 ctrl1
= (chan
->slave_id
<< GDMA_REG_CTRL1_SRC_REQ_SHIFT
) | \
388 (32 << GDMA_REG_CTRL1_DST_REQ_SHIFT
) | \
389 GDMA_REG_CTRL1_COHERENT
;
390 } else if (chan
->desc
->direction
== DMA_MEM_TO_MEM
) {
391 src_addr
= sg
->src_addr
;
392 dst_addr
= sg
->dst_addr
;
393 ctrl0
= GDMA_REG_CTRL0_SW_MODE
;
394 ctrl1
= (32 << GDMA_REG_CTRL1_SRC_REQ_SHIFT
) | \
395 (32 << GDMA_REG_CTRL1_DST_REQ_SHIFT
) | \
396 GDMA_REG_CTRL1_COHERENT
;
398 dev_err(dma_dev
->ddev
.dev
, "direction type %d error\n",
399 chan
->desc
->direction
);
403 ctrl0
|= (sg
->len
<< GDMA_REG_CTRL0_TX_SHIFT
) | \
404 (chan
->burst_size
<< GDMA_REG_CTRL0_BURST_SHIFT
) | \
405 GDMA_REG_CTRL0_DONE_INT
| GDMA_REG_CTRL0_ENABLE
;
406 ctrl1
|= chan
->id
<< GDMA_REG_CTRL1_NEXT_SHIFT
;
409 gdma_dma_write(dma_dev
, GDMA_REG_SRC_ADDR(chan
->id
), src_addr
);
410 gdma_dma_write(dma_dev
, GDMA_REG_DST_ADDR(chan
->id
), dst_addr
);
411 gdma_dma_write(dma_dev
, GDMA_REG_CTRL1(chan
->id
), ctrl1
);
413 /* make sure next_sg is update */
415 gdma_dma_write(dma_dev
, GDMA_REG_CTRL0(chan
->id
), ctrl0
);
420 static inline int gdma_start_transfer(struct gdma_dma_dev
*dma_dev
,
421 struct gdma_dmaengine_chan
*chan
)
423 return dma_dev
->data
->start_transfer(chan
);
426 static int gdma_next_desc(struct gdma_dmaengine_chan
*chan
)
428 struct virt_dma_desc
*vdesc
;
430 vdesc
= vchan_next_desc(&chan
->vchan
);
435 chan
->desc
= to_gdma_dma_desc(vdesc
);
441 static void gdma_dma_chan_irq(struct gdma_dma_dev
*dma_dev
,
442 struct gdma_dmaengine_chan
*chan
)
444 struct gdma_dma_desc
*desc
;
449 spin_lock_irqsave(&chan
->vchan
.lock
, flags
);
453 vchan_cyclic_callback(&desc
->vdesc
);
454 if (chan
->next_sg
== desc
->num_sgs
)
458 desc
->residue
-= desc
->sg
[chan
->next_sg
- 1].len
;
459 if (chan
->next_sg
== desc
->num_sgs
) {
460 list_del(&desc
->vdesc
.node
);
461 vchan_cookie_complete(&desc
->vdesc
);
462 chan_issued
= gdma_next_desc(chan
);
467 dev_dbg(dma_dev
->ddev
.dev
, "chan %d no desc to complete\n",
470 set_bit(chan
->id
, &dma_dev
->chan_issued
);
471 spin_unlock_irqrestore(&chan
->vchan
.lock
, flags
);
474 static irqreturn_t
gdma_dma_irq(int irq
, void *devid
)
476 struct gdma_dma_dev
*dma_dev
= devid
;
480 done_reg
= dma_dev
->data
->done_int_reg
;
481 done
= gdma_dma_read(dma_dev
, done_reg
);
485 /* clean done bits */
486 gdma_dma_write(dma_dev
, done_reg
, done
);
491 gdma_dma_chan_irq(dma_dev
, &dma_dev
->chan
[i
]);
492 atomic_dec(&dma_dev
->cnt
);
498 /* start only have work to do */
499 if (dma_dev
->chan_issued
)
500 tasklet_schedule(&dma_dev
->task
);
505 static void gdma_dma_issue_pending(struct dma_chan
*c
)
507 struct gdma_dmaengine_chan
*chan
= to_gdma_dma_chan(c
);
508 struct gdma_dma_dev
*dma_dev
= gdma_dma_chan_get_dev(chan
);
511 spin_lock_irqsave(&chan
->vchan
.lock
, flags
);
512 if (vchan_issue_pending(&chan
->vchan
) && !chan
->desc
) {
513 if (gdma_next_desc(chan
)) {
514 set_bit(chan
->id
, &dma_dev
->chan_issued
);
515 tasklet_schedule(&dma_dev
->task
);
517 dev_dbg(dma_dev
->ddev
.dev
, "chan %d no desc to issue\n",
520 spin_unlock_irqrestore(&chan
->vchan
.lock
, flags
);
523 static struct dma_async_tx_descriptor
*gdma_dma_prep_slave_sg(
524 struct dma_chan
*c
, struct scatterlist
*sgl
,
525 unsigned int sg_len
, enum dma_transfer_direction direction
,
526 unsigned long flags
, void *context
)
528 struct gdma_dmaengine_chan
*chan
= to_gdma_dma_chan(c
);
529 struct gdma_dma_desc
*desc
;
530 struct scatterlist
*sg
;
533 desc
= gdma_dma_alloc_desc(sg_len
);
535 dev_err(c
->device
->dev
, "alloc sg decs error\n");
540 for_each_sg(sgl
, sg
, sg_len
, i
) {
541 if (direction
== DMA_MEM_TO_DEV
)
542 desc
->sg
[i
].src_addr
= sg_dma_address(sg
);
543 else if (direction
== DMA_DEV_TO_MEM
)
544 desc
->sg
[i
].dst_addr
= sg_dma_address(sg
);
546 dev_err(c
->device
->dev
, "direction type %d error\n",
551 if (unlikely(sg_dma_len(sg
) > GDMA_REG_CTRL0_TX_MASK
)) {
552 dev_err(c
->device
->dev
, "sg len too large %d\n",
556 desc
->sg
[i
].len
= sg_dma_len(sg
);
557 desc
->residue
+= sg_dma_len(sg
);
560 desc
->num_sgs
= sg_len
;
561 desc
->direction
= direction
;
562 desc
->cyclic
= false;
564 return vchan_tx_prep(&chan
->vchan
, &desc
->vdesc
, flags
);
571 static struct dma_async_tx_descriptor
*gdma_dma_prep_dma_memcpy(
572 struct dma_chan
*c
, dma_addr_t dest
, dma_addr_t src
,
573 size_t len
, unsigned long flags
)
575 struct gdma_dmaengine_chan
*chan
= to_gdma_dma_chan(c
);
576 struct gdma_dma_desc
*desc
;
577 unsigned int num_periods
, i
;
583 chan
->burst_size
= gdma_dma_maxburst(len
>> 2);
585 xfer_count
= GDMA_REG_CTRL0_TX_MASK
;
586 num_periods
= DIV_ROUND_UP(len
, xfer_count
);
588 desc
= gdma_dma_alloc_desc(num_periods
);
590 dev_err(c
->device
->dev
, "alloc memcpy decs error\n");
595 for (i
= 0; i
< num_periods
; i
++) {
596 desc
->sg
[i
].src_addr
= src
;
597 desc
->sg
[i
].dst_addr
= dest
;
598 if (len
> xfer_count
)
599 desc
->sg
[i
].len
= xfer_count
;
601 desc
->sg
[i
].len
= len
;
602 src
+= desc
->sg
[i
].len
;
603 dest
+= desc
->sg
[i
].len
;
604 len
-= desc
->sg
[i
].len
;
607 desc
->num_sgs
= num_periods
;
608 desc
->direction
= DMA_MEM_TO_MEM
;
609 desc
->cyclic
= false;
611 return vchan_tx_prep(&chan
->vchan
, &desc
->vdesc
, flags
);
614 static struct dma_async_tx_descriptor
*gdma_dma_prep_dma_cyclic(
615 struct dma_chan
*c
, dma_addr_t buf_addr
, size_t buf_len
,
616 size_t period_len
, enum dma_transfer_direction direction
,
619 struct gdma_dmaengine_chan
*chan
= to_gdma_dma_chan(c
);
620 struct gdma_dma_desc
*desc
;
621 unsigned int num_periods
, i
;
623 if (buf_len
% period_len
)
626 if (period_len
> GDMA_REG_CTRL0_TX_MASK
) {
627 dev_err(c
->device
->dev
, "cyclic len too large %d\n",
632 num_periods
= buf_len
/ period_len
;
633 desc
= gdma_dma_alloc_desc(num_periods
);
635 dev_err(c
->device
->dev
, "alloc cyclic decs error\n");
638 desc
->residue
= buf_len
;
640 for (i
= 0; i
< num_periods
; i
++) {
641 if (direction
== DMA_MEM_TO_DEV
)
642 desc
->sg
[i
].src_addr
= buf_addr
;
643 else if (direction
== DMA_DEV_TO_MEM
)
644 desc
->sg
[i
].dst_addr
= buf_addr
;
646 dev_err(c
->device
->dev
, "direction type %d error\n",
650 desc
->sg
[i
].len
= period_len
;
651 buf_addr
+= period_len
;
654 desc
->num_sgs
= num_periods
;
655 desc
->direction
= direction
;
658 return vchan_tx_prep(&chan
->vchan
, &desc
->vdesc
, flags
);
665 static enum dma_status
gdma_dma_tx_status(struct dma_chan
*c
,
667 struct dma_tx_state
*state
)
669 struct gdma_dmaengine_chan
*chan
= to_gdma_dma_chan(c
);
670 struct virt_dma_desc
*vdesc
;
671 enum dma_status status
;
673 struct gdma_dma_desc
*desc
;
675 status
= dma_cookie_status(c
, cookie
, state
);
676 if (status
== DMA_COMPLETE
|| !state
)
679 spin_lock_irqsave(&chan
->vchan
.lock
, flags
);
681 if (desc
&& (cookie
== desc
->vdesc
.tx
.cookie
)) {
683 * We never update edesc->residue in the cyclic case, so we
684 * can tell the remaining room to the end of the circular
688 state
->residue
= desc
->residue
-
689 ((chan
->next_sg
- 1) * desc
->sg
[0].len
);
691 state
->residue
= desc
->residue
;
693 vdesc
= vchan_find_desc(&chan
->vchan
, cookie
);
695 state
->residue
= to_gdma_dma_desc(vdesc
)->residue
;
697 spin_unlock_irqrestore(&chan
->vchan
.lock
, flags
);
699 dev_dbg(c
->device
->dev
, "tx residue %d bytes\n", state
->residue
);
704 static void gdma_dma_free_chan_resources(struct dma_chan
*c
)
706 vchan_free_chan_resources(to_virt_chan(c
));
709 static void gdma_dma_desc_free(struct virt_dma_desc
*vdesc
)
711 kfree(container_of(vdesc
, struct gdma_dma_desc
, vdesc
));
714 static void gdma_dma_tasklet(unsigned long arg
)
716 struct gdma_dma_dev
*dma_dev
= (struct gdma_dma_dev
*)arg
;
717 struct gdma_dmaengine_chan
*chan
;
718 static unsigned int last_chan
;
719 unsigned int i
, chan_mask
;
721 /* record last chan to round robin all chans */
723 chan_mask
= dma_dev
->data
->chancnt
- 1;
726 * on mt7621. when verify with dmatest with all
727 * channel is enable. we need to limit only two
728 * channel is working at the same time. otherwise the
729 * data will have problem.
731 if (atomic_read(&dma_dev
->cnt
) >= 2) {
736 if (test_and_clear_bit(i
, &dma_dev
->chan_issued
)) {
737 chan
= &dma_dev
->chan
[i
];
739 atomic_inc(&dma_dev
->cnt
);
740 gdma_start_transfer(dma_dev
, chan
);
742 dev_dbg(dma_dev
->ddev
.dev
, "chan %d no desc to issue\n", chan
->id
);
744 if (!dma_dev
->chan_issued
)
748 i
= (i
+ 1) & chan_mask
;
749 } while (i
!= last_chan
);
752 static void rt305x_gdma_init(struct gdma_dma_dev
*dma_dev
)
756 /* all chans round robin */
757 gdma_dma_write(dma_dev
, GDMA_RT305X_GCT
, GDMA_REG_GCT_ARBIT_RR
);
759 gct
= gdma_dma_read(dma_dev
, GDMA_RT305X_GCT
);
760 dev_info(dma_dev
->ddev
.dev
, "revision: %d, channels: %d\n",
761 (gct
>> GDMA_REG_GCT_VER_SHIFT
) & GDMA_REG_GCT_VER_MASK
,
762 8 << ((gct
>> GDMA_REG_GCT_CHAN_SHIFT
) &
763 GDMA_REG_GCT_CHAN_MASK
));
766 static void rt3883_gdma_init(struct gdma_dma_dev
*dma_dev
)
770 /* all chans round robin */
771 gdma_dma_write(dma_dev
, GDMA_REG_GCT
, GDMA_REG_GCT_ARBIT_RR
);
773 gct
= gdma_dma_read(dma_dev
, GDMA_REG_GCT
);
774 dev_info(dma_dev
->ddev
.dev
, "revision: %d, channels: %d\n",
775 (gct
>> GDMA_REG_GCT_VER_SHIFT
) & GDMA_REG_GCT_VER_MASK
,
776 8 << ((gct
>> GDMA_REG_GCT_CHAN_SHIFT
) &
777 GDMA_REG_GCT_CHAN_MASK
));
780 static struct gdma_data rt305x_gdma_data
= {
782 .done_int_reg
= GDMA_RT305X_STATUS_INT
,
783 .init
= rt305x_gdma_init
,
784 .start_transfer
= rt305x_gdma_start_transfer
,
787 static struct gdma_data rt3883_gdma_data
= {
789 .done_int_reg
= GDMA_REG_DONE_INT
,
790 .init
= rt3883_gdma_init
,
791 .start_transfer
= rt3883_gdma_start_transfer
,
794 static const struct of_device_id gdma_of_match_table
[] = {
795 { .compatible
= "ralink,rt305x-gdma", .data
= &rt305x_gdma_data
},
796 { .compatible
= "ralink,rt3883-gdma", .data
= &rt3883_gdma_data
},
800 static int gdma_dma_probe(struct platform_device
*pdev
)
802 const struct of_device_id
*match
;
803 struct gdma_dmaengine_chan
*chan
;
804 struct gdma_dma_dev
*dma_dev
;
805 struct dma_device
*dd
;
807 struct resource
*res
;
811 struct gdma_data
*data
;
813 ret
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
817 match
= of_match_device(gdma_of_match_table
, &pdev
->dev
);
820 data
= (struct gdma_data
*) match
->data
;
822 dma_dev
= devm_kzalloc(&pdev
->dev
, sizeof(*dma_dev
) +
823 (sizeof(struct gdma_dmaengine_chan
) * data
->chancnt
),
826 dev_err(&pdev
->dev
, "alloc dma device failed\n");
829 dma_dev
->data
= data
;
831 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
832 base
= devm_ioremap_resource(&pdev
->dev
, res
);
834 return PTR_ERR(base
);
835 dma_dev
->base
= base
;
836 tasklet_init(&dma_dev
->task
, gdma_dma_tasklet
, (unsigned long)dma_dev
);
838 irq
= platform_get_irq(pdev
, 0);
840 dev_err(&pdev
->dev
, "failed to get irq\n");
843 ret
= devm_request_irq(&pdev
->dev
, irq
, gdma_dma_irq
,
844 0, dev_name(&pdev
->dev
), dma_dev
);
846 dev_err(&pdev
->dev
, "failed to request irq\n");
850 device_reset(&pdev
->dev
);
853 dma_cap_set(DMA_MEMCPY
, dd
->cap_mask
);
854 dma_cap_set(DMA_SLAVE
, dd
->cap_mask
);
855 dma_cap_set(DMA_CYCLIC
, dd
->cap_mask
);
856 dd
->device_free_chan_resources
= gdma_dma_free_chan_resources
;
857 dd
->device_prep_dma_memcpy
= gdma_dma_prep_dma_memcpy
;
858 dd
->device_prep_slave_sg
= gdma_dma_prep_slave_sg
;
859 dd
->device_prep_dma_cyclic
= gdma_dma_prep_dma_cyclic
;
860 dd
->device_config
= gdma_dma_config
;
861 dd
->device_terminate_all
= gdma_dma_terminate_all
;
862 dd
->device_tx_status
= gdma_dma_tx_status
;
863 dd
->device_issue_pending
= gdma_dma_issue_pending
;
865 dd
->src_addr_widths
= BIT(DMA_SLAVE_BUSWIDTH_4_BYTES
);
866 dd
->dst_addr_widths
= BIT(DMA_SLAVE_BUSWIDTH_4_BYTES
);
867 dd
->directions
= BIT(DMA_DEV_TO_MEM
) | BIT(DMA_MEM_TO_DEV
);
868 dd
->residue_granularity
= DMA_RESIDUE_GRANULARITY_SEGMENT
;
870 dd
->dev
= &pdev
->dev
;
871 dd
->dev
->dma_parms
= &dma_dev
->dma_parms
;
872 dma_set_max_seg_size(dd
->dev
, GDMA_REG_CTRL0_TX_MASK
);
873 INIT_LIST_HEAD(&dd
->channels
);
875 for (i
= 0; i
< data
->chancnt
; i
++) {
876 chan
= &dma_dev
->chan
[i
];
878 chan
->vchan
.desc_free
= gdma_dma_desc_free
;
879 vchan_init(&chan
->vchan
, dd
);
885 ret
= dma_async_device_register(dd
);
887 dev_err(&pdev
->dev
, "failed to register dma device\n");
891 ret
= of_dma_controller_register(pdev
->dev
.of_node
,
892 of_dma_xlate_by_chan_id
, dma_dev
);
894 dev_err(&pdev
->dev
, "failed to register of dma controller\n");
898 platform_set_drvdata(pdev
, dma_dev
);
903 dma_async_device_unregister(dd
);
907 static int gdma_dma_remove(struct platform_device
*pdev
)
909 struct gdma_dma_dev
*dma_dev
= platform_get_drvdata(pdev
);
911 tasklet_kill(&dma_dev
->task
);
912 of_dma_controller_free(pdev
->dev
.of_node
);
913 dma_async_device_unregister(&dma_dev
->ddev
);
918 static struct platform_driver gdma_dma_driver
= {
919 .probe
= gdma_dma_probe
,
920 .remove
= gdma_dma_remove
,
922 .name
= "gdma-rt2880",
923 .of_match_table
= gdma_of_match_table
,
926 module_platform_driver(gdma_dma_driver
);
928 MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
929 MODULE_DESCRIPTION("Ralink/MTK DMA driver");
930 MODULE_LICENSE("GPL v2");