1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Driver For Marvell Two-channel DMA Engine
5 * Copyright: Marvell International Ltd.
9 #include <linux/module.h>
10 #include <linux/init.h>
11 #include <linux/types.h>
12 #include <linux/interrupt.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/slab.h>
15 #include <linux/dmaengine.h>
16 #include <linux/platform_device.h>
17 #include <linux/device.h>
18 #include <linux/platform_data/dma-mmp_tdma.h>
19 #include <linux/of_device.h>
20 #include <linux/of_dma.h>
22 #include "dmaengine.h"
25 * Two-Channel DMA registers
27 #define TDBCR 0x00 /* Byte Count */
28 #define TDSAR 0x10 /* Src Addr */
29 #define TDDAR 0x20 /* Dst Addr */
30 #define TDNDPR 0x30 /* Next Desc */
31 #define TDCR 0x40 /* Control */
32 #define TDCP 0x60 /* Priority*/
33 #define TDCDPR 0x70 /* Current Desc */
34 #define TDIMR 0x80 /* Int Mask */
35 #define TDISR 0xa0 /* Int Status */
37 /* Two-Channel DMA Control Register */
38 #define TDCR_SSZ_8_BITS (0x0 << 22) /* Sample Size */
39 #define TDCR_SSZ_12_BITS (0x1 << 22)
40 #define TDCR_SSZ_16_BITS (0x2 << 22)
41 #define TDCR_SSZ_20_BITS (0x3 << 22)
42 #define TDCR_SSZ_24_BITS (0x4 << 22)
43 #define TDCR_SSZ_32_BITS (0x5 << 22)
44 #define TDCR_SSZ_SHIFT (0x1 << 22)
45 #define TDCR_SSZ_MASK (0x7 << 22)
46 #define TDCR_SSPMOD (0x1 << 21) /* SSP MOD */
47 #define TDCR_ABR (0x1 << 20) /* Channel Abort */
48 #define TDCR_CDE (0x1 << 17) /* Close Desc Enable */
49 #define TDCR_PACKMOD (0x1 << 16) /* Pack Mode (ADMA Only) */
50 #define TDCR_CHANACT (0x1 << 14) /* Channel Active */
51 #define TDCR_FETCHND (0x1 << 13) /* Fetch Next Desc */
52 #define TDCR_CHANEN (0x1 << 12) /* Channel Enable */
53 #define TDCR_INTMODE (0x1 << 10) /* Interrupt Mode */
54 #define TDCR_CHAINMOD (0x1 << 9) /* Chain Mode */
55 #define TDCR_BURSTSZ_MSK (0x7 << 6) /* Burst Size */
56 #define TDCR_BURSTSZ_4B (0x0 << 6)
57 #define TDCR_BURSTSZ_8B (0x1 << 6)
58 #define TDCR_BURSTSZ_16B (0x3 << 6)
59 #define TDCR_BURSTSZ_32B (0x6 << 6)
60 #define TDCR_BURSTSZ_64B (0x7 << 6)
61 #define TDCR_BURSTSZ_SQU_1B (0x5 << 6)
62 #define TDCR_BURSTSZ_SQU_2B (0x6 << 6)
63 #define TDCR_BURSTSZ_SQU_4B (0x0 << 6)
64 #define TDCR_BURSTSZ_SQU_8B (0x1 << 6)
65 #define TDCR_BURSTSZ_SQU_16B (0x3 << 6)
66 #define TDCR_BURSTSZ_SQU_32B (0x7 << 6)
67 #define TDCR_BURSTSZ_128B (0x5 << 6)
68 #define TDCR_DSTDIR_MSK (0x3 << 4) /* Dst Direction */
69 #define TDCR_DSTDIR_ADDR_HOLD (0x2 << 4) /* Dst Addr Hold */
70 #define TDCR_DSTDIR_ADDR_INC (0x0 << 4) /* Dst Addr Increment */
71 #define TDCR_SRCDIR_MSK (0x3 << 2) /* Src Direction */
72 #define TDCR_SRCDIR_ADDR_HOLD (0x2 << 2) /* Src Addr Hold */
73 #define TDCR_SRCDIR_ADDR_INC (0x0 << 2) /* Src Addr Increment */
74 #define TDCR_DSTDESCCONT (0x1 << 1)
75 #define TDCR_SRCDESTCONT (0x1 << 0)
77 /* Two-Channel DMA Int Mask Register */
78 #define TDIMR_COMP (0x1 << 0)
80 /* Two-Channel DMA Int Status Register */
81 #define TDISR_COMP (0x1 << 0)
84 * Two-Channel DMA Descriptor Struct
85 * NOTE: desc's buf must be aligned to 16 bytes.
87 struct mmp_tdma_desc
{
99 #define TDMA_MAX_XFER_BYTES SZ_64K
101 struct mmp_tdma_chan
{
103 struct dma_chan chan
;
104 struct dma_async_tx_descriptor desc
;
105 struct tasklet_struct tasklet
;
107 struct mmp_tdma_desc
*desc_arr
;
108 dma_addr_t desc_arr_phys
;
110 enum dma_transfer_direction dir
;
113 enum dma_slave_buswidth buswidth
;
114 enum dma_status status
;
115 struct dma_slave_config slave_config
;
118 enum mmp_tdma_type type
;
120 void __iomem
*reg_base
;
126 struct gen_pool
*pool
;
129 #define TDMA_CHANNEL_NUM 2
130 struct mmp_tdma_device
{
133 struct dma_device device
;
134 struct mmp_tdma_chan
*tdmac
[TDMA_CHANNEL_NUM
];
137 #define to_mmp_tdma_chan(dchan) container_of(dchan, struct mmp_tdma_chan, chan)
139 static int mmp_tdma_config_write(struct dma_chan
*chan
,
140 enum dma_transfer_direction dir
,
141 struct dma_slave_config
*dmaengine_cfg
);
143 static void mmp_tdma_chan_set_desc(struct mmp_tdma_chan
*tdmac
, dma_addr_t phys
)
145 writel(phys
, tdmac
->reg_base
+ TDNDPR
);
146 writel(readl(tdmac
->reg_base
+ TDCR
) | TDCR_FETCHND
,
147 tdmac
->reg_base
+ TDCR
);
150 static void mmp_tdma_enable_irq(struct mmp_tdma_chan
*tdmac
, bool enable
)
153 writel(TDIMR_COMP
, tdmac
->reg_base
+ TDIMR
);
155 writel(0, tdmac
->reg_base
+ TDIMR
);
158 static void mmp_tdma_enable_chan(struct mmp_tdma_chan
*tdmac
)
160 /* enable dma chan */
161 writel(readl(tdmac
->reg_base
+ TDCR
) | TDCR_CHANEN
,
162 tdmac
->reg_base
+ TDCR
);
163 tdmac
->status
= DMA_IN_PROGRESS
;
166 static int mmp_tdma_disable_chan(struct dma_chan
*chan
)
168 struct mmp_tdma_chan
*tdmac
= to_mmp_tdma_chan(chan
);
171 tdcr
= readl(tdmac
->reg_base
+ TDCR
);
173 tdcr
&= ~TDCR_CHANEN
;
174 writel(tdcr
, tdmac
->reg_base
+ TDCR
);
176 tdmac
->status
= DMA_COMPLETE
;
181 static int mmp_tdma_resume_chan(struct dma_chan
*chan
)
183 struct mmp_tdma_chan
*tdmac
= to_mmp_tdma_chan(chan
);
185 writel(readl(tdmac
->reg_base
+ TDCR
) | TDCR_CHANEN
,
186 tdmac
->reg_base
+ TDCR
);
187 tdmac
->status
= DMA_IN_PROGRESS
;
192 static int mmp_tdma_pause_chan(struct dma_chan
*chan
)
194 struct mmp_tdma_chan
*tdmac
= to_mmp_tdma_chan(chan
);
196 writel(readl(tdmac
->reg_base
+ TDCR
) & ~TDCR_CHANEN
,
197 tdmac
->reg_base
+ TDCR
);
198 tdmac
->status
= DMA_PAUSED
;
203 static int mmp_tdma_config_chan(struct dma_chan
*chan
)
205 struct mmp_tdma_chan
*tdmac
= to_mmp_tdma_chan(chan
);
206 unsigned int tdcr
= 0;
208 mmp_tdma_disable_chan(chan
);
210 if (tdmac
->dir
== DMA_MEM_TO_DEV
)
211 tdcr
= TDCR_DSTDIR_ADDR_HOLD
| TDCR_SRCDIR_ADDR_INC
;
212 else if (tdmac
->dir
== DMA_DEV_TO_MEM
)
213 tdcr
= TDCR_SRCDIR_ADDR_HOLD
| TDCR_DSTDIR_ADDR_INC
;
215 if (tdmac
->type
== MMP_AUD_TDMA
) {
216 tdcr
|= TDCR_PACKMOD
;
218 switch (tdmac
->burst_sz
) {
220 tdcr
|= TDCR_BURSTSZ_4B
;
223 tdcr
|= TDCR_BURSTSZ_8B
;
226 tdcr
|= TDCR_BURSTSZ_16B
;
229 tdcr
|= TDCR_BURSTSZ_32B
;
232 tdcr
|= TDCR_BURSTSZ_64B
;
235 tdcr
|= TDCR_BURSTSZ_128B
;
238 dev_err(tdmac
->dev
, "unknown burst size.\n");
242 switch (tdmac
->buswidth
) {
243 case DMA_SLAVE_BUSWIDTH_1_BYTE
:
244 tdcr
|= TDCR_SSZ_8_BITS
;
246 case DMA_SLAVE_BUSWIDTH_2_BYTES
:
247 tdcr
|= TDCR_SSZ_16_BITS
;
249 case DMA_SLAVE_BUSWIDTH_4_BYTES
:
250 tdcr
|= TDCR_SSZ_32_BITS
;
253 dev_err(tdmac
->dev
, "unknown bus size.\n");
256 } else if (tdmac
->type
== PXA910_SQU
) {
259 switch (tdmac
->burst_sz
) {
261 tdcr
|= TDCR_BURSTSZ_SQU_1B
;
264 tdcr
|= TDCR_BURSTSZ_SQU_2B
;
267 tdcr
|= TDCR_BURSTSZ_SQU_4B
;
270 tdcr
|= TDCR_BURSTSZ_SQU_8B
;
273 tdcr
|= TDCR_BURSTSZ_SQU_16B
;
276 tdcr
|= TDCR_BURSTSZ_SQU_32B
;
279 dev_err(tdmac
->dev
, "unknown burst size.\n");
284 writel(tdcr
, tdmac
->reg_base
+ TDCR
);
288 static int mmp_tdma_clear_chan_irq(struct mmp_tdma_chan
*tdmac
)
290 u32 reg
= readl(tdmac
->reg_base
+ TDISR
);
292 if (reg
& TDISR_COMP
) {
295 writel(reg
, tdmac
->reg_base
+ TDISR
);
302 static size_t mmp_tdma_get_pos(struct mmp_tdma_chan
*tdmac
)
306 if (tdmac
->idx
== 0) {
307 reg
= __raw_readl(tdmac
->reg_base
+ TDSAR
);
308 reg
-= tdmac
->desc_arr
[0].src_addr
;
309 } else if (tdmac
->idx
== 1) {
310 reg
= __raw_readl(tdmac
->reg_base
+ TDDAR
);
311 reg
-= tdmac
->desc_arr
[0].dst_addr
;
318 static irqreturn_t
mmp_tdma_chan_handler(int irq
, void *dev_id
)
320 struct mmp_tdma_chan
*tdmac
= dev_id
;
322 if (mmp_tdma_clear_chan_irq(tdmac
) == 0) {
323 tasklet_schedule(&tdmac
->tasklet
);
329 static irqreturn_t
mmp_tdma_int_handler(int irq
, void *dev_id
)
331 struct mmp_tdma_device
*tdev
= dev_id
;
335 for (i
= 0; i
< TDMA_CHANNEL_NUM
; i
++) {
336 struct mmp_tdma_chan
*tdmac
= tdev
->tdmac
[i
];
338 ret
= mmp_tdma_chan_handler(irq
, tdmac
);
339 if (ret
== IRQ_HANDLED
)
349 static void dma_do_tasklet(struct tasklet_struct
*t
)
351 struct mmp_tdma_chan
*tdmac
= from_tasklet(tdmac
, t
, tasklet
);
353 dmaengine_desc_get_callback_invoke(&tdmac
->desc
, NULL
);
356 static void mmp_tdma_free_descriptor(struct mmp_tdma_chan
*tdmac
)
358 struct gen_pool
*gpool
;
359 int size
= tdmac
->desc_num
* sizeof(struct mmp_tdma_desc
);
362 if (gpool
&& tdmac
->desc_arr
)
363 gen_pool_free(gpool
, (unsigned long)tdmac
->desc_arr
,
365 tdmac
->desc_arr
= NULL
;
366 if (tdmac
->status
== DMA_ERROR
)
367 tdmac
->status
= DMA_COMPLETE
;
372 static dma_cookie_t
mmp_tdma_tx_submit(struct dma_async_tx_descriptor
*tx
)
374 struct mmp_tdma_chan
*tdmac
= to_mmp_tdma_chan(tx
->chan
);
376 mmp_tdma_chan_set_desc(tdmac
, tdmac
->desc_arr_phys
);
381 static int mmp_tdma_alloc_chan_resources(struct dma_chan
*chan
)
383 struct mmp_tdma_chan
*tdmac
= to_mmp_tdma_chan(chan
);
386 dma_async_tx_descriptor_init(&tdmac
->desc
, chan
);
387 tdmac
->desc
.tx_submit
= mmp_tdma_tx_submit
;
390 ret
= devm_request_irq(tdmac
->dev
, tdmac
->irq
,
391 mmp_tdma_chan_handler
, 0, "tdma", tdmac
);
398 static void mmp_tdma_free_chan_resources(struct dma_chan
*chan
)
400 struct mmp_tdma_chan
*tdmac
= to_mmp_tdma_chan(chan
);
403 devm_free_irq(tdmac
->dev
, tdmac
->irq
, tdmac
);
404 mmp_tdma_free_descriptor(tdmac
);
408 static struct mmp_tdma_desc
*mmp_tdma_alloc_descriptor(struct mmp_tdma_chan
*tdmac
)
410 struct gen_pool
*gpool
;
411 int size
= tdmac
->desc_num
* sizeof(struct mmp_tdma_desc
);
417 tdmac
->desc_arr
= gen_pool_dma_alloc(gpool
, size
, &tdmac
->desc_arr_phys
);
419 return tdmac
->desc_arr
;
422 static struct dma_async_tx_descriptor
*mmp_tdma_prep_dma_cyclic(
423 struct dma_chan
*chan
, dma_addr_t dma_addr
, size_t buf_len
,
424 size_t period_len
, enum dma_transfer_direction direction
,
427 struct mmp_tdma_chan
*tdmac
= to_mmp_tdma_chan(chan
);
428 struct mmp_tdma_desc
*desc
;
429 int num_periods
= buf_len
/ period_len
;
432 if (!is_slave_direction(direction
)) {
433 dev_err(tdmac
->dev
, "unsupported transfer direction\n");
437 if (tdmac
->status
!= DMA_COMPLETE
) {
438 dev_err(tdmac
->dev
, "controller busy");
442 if (period_len
> TDMA_MAX_XFER_BYTES
) {
444 "maximum period size exceeded: %zu > %d\n",
445 period_len
, TDMA_MAX_XFER_BYTES
);
449 tdmac
->status
= DMA_IN_PROGRESS
;
450 tdmac
->desc_num
= num_periods
;
451 desc
= mmp_tdma_alloc_descriptor(tdmac
);
455 if (mmp_tdma_config_write(chan
, direction
, &tdmac
->slave_config
))
458 while (buf
< buf_len
) {
459 desc
= &tdmac
->desc_arr
[i
];
461 if (i
+ 1 == num_periods
)
462 desc
->nxt_desc
= tdmac
->desc_arr_phys
;
464 desc
->nxt_desc
= tdmac
->desc_arr_phys
+
465 sizeof(*desc
) * (i
+ 1);
467 if (direction
== DMA_MEM_TO_DEV
) {
468 desc
->src_addr
= dma_addr
;
469 desc
->dst_addr
= tdmac
->dev_addr
;
471 desc
->src_addr
= tdmac
->dev_addr
;
472 desc
->dst_addr
= dma_addr
;
474 desc
->byte_cnt
= period_len
;
475 dma_addr
+= period_len
;
480 /* enable interrupt */
481 if (flags
& DMA_PREP_INTERRUPT
)
482 mmp_tdma_enable_irq(tdmac
, true);
484 tdmac
->buf_len
= buf_len
;
485 tdmac
->period_len
= period_len
;
491 tdmac
->status
= DMA_ERROR
;
495 static int mmp_tdma_terminate_all(struct dma_chan
*chan
)
497 struct mmp_tdma_chan
*tdmac
= to_mmp_tdma_chan(chan
);
499 mmp_tdma_disable_chan(chan
);
500 /* disable interrupt */
501 mmp_tdma_enable_irq(tdmac
, false);
506 static int mmp_tdma_config(struct dma_chan
*chan
,
507 struct dma_slave_config
*dmaengine_cfg
)
509 struct mmp_tdma_chan
*tdmac
= to_mmp_tdma_chan(chan
);
511 memcpy(&tdmac
->slave_config
, dmaengine_cfg
, sizeof(*dmaengine_cfg
));
516 static int mmp_tdma_config_write(struct dma_chan
*chan
,
517 enum dma_transfer_direction dir
,
518 struct dma_slave_config
*dmaengine_cfg
)
520 struct mmp_tdma_chan
*tdmac
= to_mmp_tdma_chan(chan
);
522 if (dir
== DMA_DEV_TO_MEM
) {
523 tdmac
->dev_addr
= dmaengine_cfg
->src_addr
;
524 tdmac
->burst_sz
= dmaengine_cfg
->src_maxburst
;
525 tdmac
->buswidth
= dmaengine_cfg
->src_addr_width
;
527 tdmac
->dev_addr
= dmaengine_cfg
->dst_addr
;
528 tdmac
->burst_sz
= dmaengine_cfg
->dst_maxburst
;
529 tdmac
->buswidth
= dmaengine_cfg
->dst_addr_width
;
533 return mmp_tdma_config_chan(chan
);
536 static enum dma_status
mmp_tdma_tx_status(struct dma_chan
*chan
,
537 dma_cookie_t cookie
, struct dma_tx_state
*txstate
)
539 struct mmp_tdma_chan
*tdmac
= to_mmp_tdma_chan(chan
);
541 tdmac
->pos
= mmp_tdma_get_pos(tdmac
);
542 dma_set_tx_state(txstate
, chan
->completed_cookie
, chan
->cookie
,
543 tdmac
->buf_len
- tdmac
->pos
);
545 return tdmac
->status
;
548 static void mmp_tdma_issue_pending(struct dma_chan
*chan
)
550 struct mmp_tdma_chan
*tdmac
= to_mmp_tdma_chan(chan
);
552 mmp_tdma_enable_chan(tdmac
);
555 static int mmp_tdma_remove(struct platform_device
*pdev
)
557 if (pdev
->dev
.of_node
)
558 of_dma_controller_free(pdev
->dev
.of_node
);
563 static int mmp_tdma_chan_init(struct mmp_tdma_device
*tdev
,
565 int type
, struct gen_pool
*pool
)
567 struct mmp_tdma_chan
*tdmac
;
569 if (idx
>= TDMA_CHANNEL_NUM
) {
570 dev_err(tdev
->dev
, "too many channels for device!\n");
575 tdmac
= devm_kzalloc(tdev
->dev
, sizeof(*tdmac
), GFP_KERNEL
);
581 tdmac
->dev
= tdev
->dev
;
582 tdmac
->chan
.device
= &tdev
->device
;
585 tdmac
->reg_base
= tdev
->base
+ idx
* 4;
587 tdmac
->status
= DMA_COMPLETE
;
588 tdev
->tdmac
[tdmac
->idx
] = tdmac
;
589 tasklet_setup(&tdmac
->tasklet
, dma_do_tasklet
);
591 /* add the channel to tdma_chan list */
592 list_add_tail(&tdmac
->chan
.device_node
,
593 &tdev
->device
.channels
);
597 struct mmp_tdma_filter_param
{
598 unsigned int chan_id
;
601 static bool mmp_tdma_filter_fn(struct dma_chan
*chan
, void *fn_param
)
603 struct mmp_tdma_filter_param
*param
= fn_param
;
605 if (chan
->chan_id
!= param
->chan_id
)
611 static struct dma_chan
*mmp_tdma_xlate(struct of_phandle_args
*dma_spec
,
612 struct of_dma
*ofdma
)
614 struct mmp_tdma_device
*tdev
= ofdma
->of_dma_data
;
615 dma_cap_mask_t mask
= tdev
->device
.cap_mask
;
616 struct mmp_tdma_filter_param param
;
618 if (dma_spec
->args_count
!= 1)
621 param
.chan_id
= dma_spec
->args
[0];
623 if (param
.chan_id
>= TDMA_CHANNEL_NUM
)
626 return __dma_request_channel(&mask
, mmp_tdma_filter_fn
, ¶m
,
630 static const struct of_device_id mmp_tdma_dt_ids
[] = {
631 { .compatible
= "marvell,adma-1.0", .data
= (void *)MMP_AUD_TDMA
},
632 { .compatible
= "marvell,pxa910-squ", .data
= (void *)PXA910_SQU
},
635 MODULE_DEVICE_TABLE(of
, mmp_tdma_dt_ids
);
637 static int mmp_tdma_probe(struct platform_device
*pdev
)
639 enum mmp_tdma_type type
;
640 const struct of_device_id
*of_id
;
641 struct mmp_tdma_device
*tdev
;
642 struct resource
*iores
;
644 int irq
= 0, irq_num
= 0;
645 int chan_num
= TDMA_CHANNEL_NUM
;
646 struct gen_pool
*pool
= NULL
;
648 of_id
= of_match_device(mmp_tdma_dt_ids
, &pdev
->dev
);
650 type
= (enum mmp_tdma_type
) of_id
->data
;
652 type
= platform_get_device_id(pdev
)->driver_data
;
654 /* always have couple channels */
655 tdev
= devm_kzalloc(&pdev
->dev
, sizeof(*tdev
), GFP_KERNEL
);
659 tdev
->dev
= &pdev
->dev
;
661 for (i
= 0; i
< chan_num
; i
++) {
662 if (platform_get_irq(pdev
, i
) > 0)
666 iores
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
667 tdev
->base
= devm_ioremap_resource(&pdev
->dev
, iores
);
668 if (IS_ERR(tdev
->base
))
669 return PTR_ERR(tdev
->base
);
671 INIT_LIST_HEAD(&tdev
->device
.channels
);
673 if (pdev
->dev
.of_node
)
674 pool
= of_gen_pool_get(pdev
->dev
.of_node
, "asram", 0);
676 pool
= sram_get_gpool("asram");
678 dev_err(&pdev
->dev
, "asram pool not available\n");
682 if (irq_num
!= chan_num
) {
683 irq
= platform_get_irq(pdev
, 0);
684 ret
= devm_request_irq(&pdev
->dev
, irq
,
685 mmp_tdma_int_handler
, IRQF_SHARED
, "tdma", tdev
);
690 /* initialize channel parameters */
691 for (i
= 0; i
< chan_num
; i
++) {
692 irq
= (irq_num
!= chan_num
) ? 0 : platform_get_irq(pdev
, i
);
693 ret
= mmp_tdma_chan_init(tdev
, i
, irq
, type
, pool
);
698 dma_cap_set(DMA_SLAVE
, tdev
->device
.cap_mask
);
699 dma_cap_set(DMA_CYCLIC
, tdev
->device
.cap_mask
);
700 tdev
->device
.dev
= &pdev
->dev
;
701 tdev
->device
.device_alloc_chan_resources
=
702 mmp_tdma_alloc_chan_resources
;
703 tdev
->device
.device_free_chan_resources
=
704 mmp_tdma_free_chan_resources
;
705 tdev
->device
.device_prep_dma_cyclic
= mmp_tdma_prep_dma_cyclic
;
706 tdev
->device
.device_tx_status
= mmp_tdma_tx_status
;
707 tdev
->device
.device_issue_pending
= mmp_tdma_issue_pending
;
708 tdev
->device
.device_config
= mmp_tdma_config
;
709 tdev
->device
.device_pause
= mmp_tdma_pause_chan
;
710 tdev
->device
.device_resume
= mmp_tdma_resume_chan
;
711 tdev
->device
.device_terminate_all
= mmp_tdma_terminate_all
;
712 tdev
->device
.copy_align
= DMAENGINE_ALIGN_8_BYTES
;
714 tdev
->device
.directions
= BIT(DMA_DEV_TO_MEM
) | BIT(DMA_MEM_TO_DEV
);
715 if (type
== MMP_AUD_TDMA
) {
716 tdev
->device
.max_burst
= SZ_128
;
717 tdev
->device
.src_addr_widths
= BIT(DMA_SLAVE_BUSWIDTH_4_BYTES
);
718 tdev
->device
.dst_addr_widths
= BIT(DMA_SLAVE_BUSWIDTH_4_BYTES
);
719 } else if (type
== PXA910_SQU
) {
720 tdev
->device
.max_burst
= SZ_32
;
722 tdev
->device
.residue_granularity
= DMA_RESIDUE_GRANULARITY_BURST
;
723 tdev
->device
.descriptor_reuse
= true;
725 dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(64));
726 platform_set_drvdata(pdev
, tdev
);
728 ret
= dmaenginem_async_device_register(&tdev
->device
);
730 dev_err(tdev
->device
.dev
, "unable to register\n");
734 if (pdev
->dev
.of_node
) {
735 ret
= of_dma_controller_register(pdev
->dev
.of_node
,
736 mmp_tdma_xlate
, tdev
);
738 dev_err(tdev
->device
.dev
,
739 "failed to register controller\n");
744 dev_info(tdev
->device
.dev
, "initialized\n");
748 static const struct platform_device_id mmp_tdma_id_table
[] = {
749 { "mmp-adma", MMP_AUD_TDMA
},
750 { "pxa910-squ", PXA910_SQU
},
754 static struct platform_driver mmp_tdma_driver
= {
757 .of_match_table
= mmp_tdma_dt_ids
,
759 .id_table
= mmp_tdma_id_table
,
760 .probe
= mmp_tdma_probe
,
761 .remove
= mmp_tdma_remove
,
764 module_platform_driver(mmp_tdma_driver
);
766 MODULE_LICENSE("GPL");
767 MODULE_DESCRIPTION("MMP Two-Channel DMA Driver");
768 MODULE_ALIAS("platform:mmp-tdma");
769 MODULE_AUTHOR("Leo Yan <leoy@marvell.com>");
770 MODULE_AUTHOR("Zhangfei Gao <zhangfei.gao@marvell.com>");