2 * Driver For Marvell Two-channel DMA Engine
4 * Copyright: Marvell International Ltd.
6 * The code contained herein is licensed under the GNU General Public
7 * License. You may obtain a copy of the GNU General Public License
8 * Version 2 or later at the following locations:
12 #include <linux/err.h>
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/types.h>
16 #include <linux/interrupt.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/slab.h>
19 #include <linux/dmaengine.h>
20 #include <linux/platform_device.h>
21 #include <linux/device.h>
22 #include <linux/platform_data/dma-mmp_tdma.h>
23 #include <linux/of_device.h>
24 #include <linux/of_dma.h>
26 #include "dmaengine.h"
29 * Two-Channel DMA registers
31 #define TDBCR 0x00 /* Byte Count */
32 #define TDSAR 0x10 /* Src Addr */
33 #define TDDAR 0x20 /* Dst Addr */
34 #define TDNDPR 0x30 /* Next Desc */
35 #define TDCR 0x40 /* Control */
36 #define TDCP 0x60 /* Priority*/
37 #define TDCDPR 0x70 /* Current Desc */
38 #define TDIMR 0x80 /* Int Mask */
39 #define TDISR 0xa0 /* Int Status */
41 /* Two-Channel DMA Control Register */
42 #define TDCR_SSZ_8_BITS (0x0 << 22) /* Sample Size */
43 #define TDCR_SSZ_12_BITS (0x1 << 22)
44 #define TDCR_SSZ_16_BITS (0x2 << 22)
45 #define TDCR_SSZ_20_BITS (0x3 << 22)
46 #define TDCR_SSZ_24_BITS (0x4 << 22)
47 #define TDCR_SSZ_32_BITS (0x5 << 22)
48 #define TDCR_SSZ_SHIFT (0x1 << 22)
49 #define TDCR_SSZ_MASK (0x7 << 22)
50 #define TDCR_SSPMOD (0x1 << 21) /* SSP MOD */
51 #define TDCR_ABR (0x1 << 20) /* Channel Abort */
52 #define TDCR_CDE (0x1 << 17) /* Close Desc Enable */
53 #define TDCR_PACKMOD (0x1 << 16) /* Pack Mode (ADMA Only) */
54 #define TDCR_CHANACT (0x1 << 14) /* Channel Active */
55 #define TDCR_FETCHND (0x1 << 13) /* Fetch Next Desc */
56 #define TDCR_CHANEN (0x1 << 12) /* Channel Enable */
57 #define TDCR_INTMODE (0x1 << 10) /* Interrupt Mode */
58 #define TDCR_CHAINMOD (0x1 << 9) /* Chain Mode */
59 #define TDCR_BURSTSZ_MSK (0x7 << 6) /* Burst Size */
60 #define TDCR_BURSTSZ_4B (0x0 << 6)
61 #define TDCR_BURSTSZ_8B (0x1 << 6)
62 #define TDCR_BURSTSZ_16B (0x3 << 6)
63 #define TDCR_BURSTSZ_32B (0x6 << 6)
64 #define TDCR_BURSTSZ_64B (0x7 << 6)
65 #define TDCR_BURSTSZ_SQU_1B (0x5 << 6)
66 #define TDCR_BURSTSZ_SQU_2B (0x6 << 6)
67 #define TDCR_BURSTSZ_SQU_4B (0x0 << 6)
68 #define TDCR_BURSTSZ_SQU_8B (0x1 << 6)
69 #define TDCR_BURSTSZ_SQU_16B (0x3 << 6)
70 #define TDCR_BURSTSZ_SQU_32B (0x7 << 6)
71 #define TDCR_BURSTSZ_128B (0x5 << 6)
72 #define TDCR_DSTDIR_MSK (0x3 << 4) /* Dst Direction */
73 #define TDCR_DSTDIR_ADDR_HOLD (0x2 << 4) /* Dst Addr Hold */
74 #define TDCR_DSTDIR_ADDR_INC (0x0 << 4) /* Dst Addr Increment */
75 #define TDCR_SRCDIR_MSK (0x3 << 2) /* Src Direction */
76 #define TDCR_SRCDIR_ADDR_HOLD (0x2 << 2) /* Src Addr Hold */
77 #define TDCR_SRCDIR_ADDR_INC (0x0 << 2) /* Src Addr Increment */
78 #define TDCR_DSTDESCCONT (0x1 << 1)
79 #define TDCR_SRCDESTCONT (0x1 << 0)
81 /* Two-Channel DMA Int Mask Register */
82 #define TDIMR_COMP (0x1 << 0)
84 /* Two-Channel DMA Int Status Register */
85 #define TDISR_COMP (0x1 << 0)
88 * Two-Channel DMA Descriptor Struct
89 * NOTE: desc's buf must be aligned to 16 bytes.
91 struct mmp_tdma_desc
{
103 #define TDMA_MAX_XFER_BYTES SZ_64K
105 struct mmp_tdma_chan
{
107 struct dma_chan chan
;
108 struct dma_async_tx_descriptor desc
;
109 struct tasklet_struct tasklet
;
111 struct mmp_tdma_desc
*desc_arr
;
112 dma_addr_t desc_arr_phys
;
114 enum dma_transfer_direction dir
;
117 enum dma_slave_buswidth buswidth
;
118 enum dma_status status
;
119 struct dma_slave_config slave_config
;
122 enum mmp_tdma_type type
;
124 void __iomem
*reg_base
;
130 struct gen_pool
*pool
;
133 #define TDMA_CHANNEL_NUM 2
134 struct mmp_tdma_device
{
137 struct dma_device device
;
138 struct mmp_tdma_chan
*tdmac
[TDMA_CHANNEL_NUM
];
141 #define to_mmp_tdma_chan(dchan) container_of(dchan, struct mmp_tdma_chan, chan)
143 static int mmp_tdma_config_write(struct dma_chan
*chan
,
144 enum dma_transfer_direction dir
,
145 struct dma_slave_config
*dmaengine_cfg
);
147 static void mmp_tdma_chan_set_desc(struct mmp_tdma_chan
*tdmac
, dma_addr_t phys
)
149 writel(phys
, tdmac
->reg_base
+ TDNDPR
);
150 writel(readl(tdmac
->reg_base
+ TDCR
) | TDCR_FETCHND
,
151 tdmac
->reg_base
+ TDCR
);
154 static void mmp_tdma_enable_irq(struct mmp_tdma_chan
*tdmac
, bool enable
)
157 writel(TDIMR_COMP
, tdmac
->reg_base
+ TDIMR
);
159 writel(0, tdmac
->reg_base
+ TDIMR
);
162 static void mmp_tdma_enable_chan(struct mmp_tdma_chan
*tdmac
)
164 /* enable dma chan */
165 writel(readl(tdmac
->reg_base
+ TDCR
) | TDCR_CHANEN
,
166 tdmac
->reg_base
+ TDCR
);
167 tdmac
->status
= DMA_IN_PROGRESS
;
170 static int mmp_tdma_disable_chan(struct dma_chan
*chan
)
172 struct mmp_tdma_chan
*tdmac
= to_mmp_tdma_chan(chan
);
175 tdcr
= readl(tdmac
->reg_base
+ TDCR
);
177 tdcr
&= ~TDCR_CHANEN
;
178 writel(tdcr
, tdmac
->reg_base
+ TDCR
);
180 tdmac
->status
= DMA_COMPLETE
;
185 static int mmp_tdma_resume_chan(struct dma_chan
*chan
)
187 struct mmp_tdma_chan
*tdmac
= to_mmp_tdma_chan(chan
);
189 writel(readl(tdmac
->reg_base
+ TDCR
) | TDCR_CHANEN
,
190 tdmac
->reg_base
+ TDCR
);
191 tdmac
->status
= DMA_IN_PROGRESS
;
196 static int mmp_tdma_pause_chan(struct dma_chan
*chan
)
198 struct mmp_tdma_chan
*tdmac
= to_mmp_tdma_chan(chan
);
200 writel(readl(tdmac
->reg_base
+ TDCR
) & ~TDCR_CHANEN
,
201 tdmac
->reg_base
+ TDCR
);
202 tdmac
->status
= DMA_PAUSED
;
207 static int mmp_tdma_config_chan(struct dma_chan
*chan
)
209 struct mmp_tdma_chan
*tdmac
= to_mmp_tdma_chan(chan
);
210 unsigned int tdcr
= 0;
212 mmp_tdma_disable_chan(chan
);
214 if (tdmac
->dir
== DMA_MEM_TO_DEV
)
215 tdcr
= TDCR_DSTDIR_ADDR_HOLD
| TDCR_SRCDIR_ADDR_INC
;
216 else if (tdmac
->dir
== DMA_DEV_TO_MEM
)
217 tdcr
= TDCR_SRCDIR_ADDR_HOLD
| TDCR_DSTDIR_ADDR_INC
;
219 if (tdmac
->type
== MMP_AUD_TDMA
) {
220 tdcr
|= TDCR_PACKMOD
;
222 switch (tdmac
->burst_sz
) {
224 tdcr
|= TDCR_BURSTSZ_4B
;
227 tdcr
|= TDCR_BURSTSZ_8B
;
230 tdcr
|= TDCR_BURSTSZ_16B
;
233 tdcr
|= TDCR_BURSTSZ_32B
;
236 tdcr
|= TDCR_BURSTSZ_64B
;
239 tdcr
|= TDCR_BURSTSZ_128B
;
242 dev_err(tdmac
->dev
, "mmp_tdma: unknown burst size.\n");
246 switch (tdmac
->buswidth
) {
247 case DMA_SLAVE_BUSWIDTH_1_BYTE
:
248 tdcr
|= TDCR_SSZ_8_BITS
;
250 case DMA_SLAVE_BUSWIDTH_2_BYTES
:
251 tdcr
|= TDCR_SSZ_16_BITS
;
253 case DMA_SLAVE_BUSWIDTH_4_BYTES
:
254 tdcr
|= TDCR_SSZ_32_BITS
;
257 dev_err(tdmac
->dev
, "mmp_tdma: unknown bus size.\n");
260 } else if (tdmac
->type
== PXA910_SQU
) {
263 switch (tdmac
->burst_sz
) {
265 tdcr
|= TDCR_BURSTSZ_SQU_1B
;
268 tdcr
|= TDCR_BURSTSZ_SQU_2B
;
271 tdcr
|= TDCR_BURSTSZ_SQU_4B
;
274 tdcr
|= TDCR_BURSTSZ_SQU_8B
;
277 tdcr
|= TDCR_BURSTSZ_SQU_16B
;
280 tdcr
|= TDCR_BURSTSZ_SQU_32B
;
283 dev_err(tdmac
->dev
, "mmp_tdma: unknown burst size.\n");
288 writel(tdcr
, tdmac
->reg_base
+ TDCR
);
292 static int mmp_tdma_clear_chan_irq(struct mmp_tdma_chan
*tdmac
)
294 u32 reg
= readl(tdmac
->reg_base
+ TDISR
);
296 if (reg
& TDISR_COMP
) {
299 writel(reg
, tdmac
->reg_base
+ TDISR
);
306 static size_t mmp_tdma_get_pos(struct mmp_tdma_chan
*tdmac
)
310 if (tdmac
->idx
== 0) {
311 reg
= __raw_readl(tdmac
->reg_base
+ TDSAR
);
312 reg
-= tdmac
->desc_arr
[0].src_addr
;
313 } else if (tdmac
->idx
== 1) {
314 reg
= __raw_readl(tdmac
->reg_base
+ TDDAR
);
315 reg
-= tdmac
->desc_arr
[0].dst_addr
;
322 static irqreturn_t
mmp_tdma_chan_handler(int irq
, void *dev_id
)
324 struct mmp_tdma_chan
*tdmac
= dev_id
;
326 if (mmp_tdma_clear_chan_irq(tdmac
) == 0) {
327 tasklet_schedule(&tdmac
->tasklet
);
333 static irqreturn_t
mmp_tdma_int_handler(int irq
, void *dev_id
)
335 struct mmp_tdma_device
*tdev
= dev_id
;
339 for (i
= 0; i
< TDMA_CHANNEL_NUM
; i
++) {
340 struct mmp_tdma_chan
*tdmac
= tdev
->tdmac
[i
];
342 ret
= mmp_tdma_chan_handler(irq
, tdmac
);
343 if (ret
== IRQ_HANDLED
)
353 static void dma_do_tasklet(unsigned long data
)
355 struct mmp_tdma_chan
*tdmac
= (struct mmp_tdma_chan
*)data
;
357 dmaengine_desc_get_callback_invoke(&tdmac
->desc
, NULL
);
360 static void mmp_tdma_free_descriptor(struct mmp_tdma_chan
*tdmac
)
362 struct gen_pool
*gpool
;
363 int size
= tdmac
->desc_num
* sizeof(struct mmp_tdma_desc
);
366 if (gpool
&& tdmac
->desc_arr
)
367 gen_pool_free(gpool
, (unsigned long)tdmac
->desc_arr
,
369 tdmac
->desc_arr
= NULL
;
374 static dma_cookie_t
mmp_tdma_tx_submit(struct dma_async_tx_descriptor
*tx
)
376 struct mmp_tdma_chan
*tdmac
= to_mmp_tdma_chan(tx
->chan
);
378 mmp_tdma_chan_set_desc(tdmac
, tdmac
->desc_arr_phys
);
383 static int mmp_tdma_alloc_chan_resources(struct dma_chan
*chan
)
385 struct mmp_tdma_chan
*tdmac
= to_mmp_tdma_chan(chan
);
388 dma_async_tx_descriptor_init(&tdmac
->desc
, chan
);
389 tdmac
->desc
.tx_submit
= mmp_tdma_tx_submit
;
392 ret
= devm_request_irq(tdmac
->dev
, tdmac
->irq
,
393 mmp_tdma_chan_handler
, 0, "tdma", tdmac
);
400 static void mmp_tdma_free_chan_resources(struct dma_chan
*chan
)
402 struct mmp_tdma_chan
*tdmac
= to_mmp_tdma_chan(chan
);
405 devm_free_irq(tdmac
->dev
, tdmac
->irq
, tdmac
);
406 mmp_tdma_free_descriptor(tdmac
);
410 static struct mmp_tdma_desc
*mmp_tdma_alloc_descriptor(struct mmp_tdma_chan
*tdmac
)
412 struct gen_pool
*gpool
;
413 int size
= tdmac
->desc_num
* sizeof(struct mmp_tdma_desc
);
419 tdmac
->desc_arr
= gen_pool_dma_alloc(gpool
, size
, &tdmac
->desc_arr_phys
);
421 return tdmac
->desc_arr
;
424 static struct dma_async_tx_descriptor
*mmp_tdma_prep_dma_cyclic(
425 struct dma_chan
*chan
, dma_addr_t dma_addr
, size_t buf_len
,
426 size_t period_len
, enum dma_transfer_direction direction
,
429 struct mmp_tdma_chan
*tdmac
= to_mmp_tdma_chan(chan
);
430 struct mmp_tdma_desc
*desc
;
431 int num_periods
= buf_len
/ period_len
;
434 if (tdmac
->status
!= DMA_COMPLETE
)
437 if (period_len
> TDMA_MAX_XFER_BYTES
) {
439 "maximum period size exceeded: %zu > %d\n",
440 period_len
, TDMA_MAX_XFER_BYTES
);
444 tdmac
->status
= DMA_IN_PROGRESS
;
445 tdmac
->desc_num
= num_periods
;
446 desc
= mmp_tdma_alloc_descriptor(tdmac
);
450 mmp_tdma_config_write(chan
, direction
, &tdmac
->slave_config
);
452 while (buf
< buf_len
) {
453 desc
= &tdmac
->desc_arr
[i
];
455 if (i
+ 1 == num_periods
)
456 desc
->nxt_desc
= tdmac
->desc_arr_phys
;
458 desc
->nxt_desc
= tdmac
->desc_arr_phys
+
459 sizeof(*desc
) * (i
+ 1);
461 if (direction
== DMA_MEM_TO_DEV
) {
462 desc
->src_addr
= dma_addr
;
463 desc
->dst_addr
= tdmac
->dev_addr
;
465 desc
->src_addr
= tdmac
->dev_addr
;
466 desc
->dst_addr
= dma_addr
;
468 desc
->byte_cnt
= period_len
;
469 dma_addr
+= period_len
;
474 /* enable interrupt */
475 if (flags
& DMA_PREP_INTERRUPT
)
476 mmp_tdma_enable_irq(tdmac
, true);
478 tdmac
->buf_len
= buf_len
;
479 tdmac
->period_len
= period_len
;
485 tdmac
->status
= DMA_ERROR
;
489 static int mmp_tdma_terminate_all(struct dma_chan
*chan
)
491 struct mmp_tdma_chan
*tdmac
= to_mmp_tdma_chan(chan
);
493 mmp_tdma_disable_chan(chan
);
494 /* disable interrupt */
495 mmp_tdma_enable_irq(tdmac
, false);
500 static int mmp_tdma_config(struct dma_chan
*chan
,
501 struct dma_slave_config
*dmaengine_cfg
)
503 struct mmp_tdma_chan
*tdmac
= to_mmp_tdma_chan(chan
);
505 memcpy(&tdmac
->slave_config
, dmaengine_cfg
, sizeof(*dmaengine_cfg
));
510 static int mmp_tdma_config_write(struct dma_chan
*chan
,
511 enum dma_transfer_direction dir
,
512 struct dma_slave_config
*dmaengine_cfg
)
514 struct mmp_tdma_chan
*tdmac
= to_mmp_tdma_chan(chan
);
516 if (dir
== DMA_DEV_TO_MEM
) {
517 tdmac
->dev_addr
= dmaengine_cfg
->src_addr
;
518 tdmac
->burst_sz
= dmaengine_cfg
->src_maxburst
;
519 tdmac
->buswidth
= dmaengine_cfg
->src_addr_width
;
521 tdmac
->dev_addr
= dmaengine_cfg
->dst_addr
;
522 tdmac
->burst_sz
= dmaengine_cfg
->dst_maxburst
;
523 tdmac
->buswidth
= dmaengine_cfg
->dst_addr_width
;
527 return mmp_tdma_config_chan(chan
);
530 static enum dma_status
mmp_tdma_tx_status(struct dma_chan
*chan
,
531 dma_cookie_t cookie
, struct dma_tx_state
*txstate
)
533 struct mmp_tdma_chan
*tdmac
= to_mmp_tdma_chan(chan
);
535 tdmac
->pos
= mmp_tdma_get_pos(tdmac
);
536 dma_set_tx_state(txstate
, chan
->completed_cookie
, chan
->cookie
,
537 tdmac
->buf_len
- tdmac
->pos
);
539 return tdmac
->status
;
542 static void mmp_tdma_issue_pending(struct dma_chan
*chan
)
544 struct mmp_tdma_chan
*tdmac
= to_mmp_tdma_chan(chan
);
546 mmp_tdma_enable_chan(tdmac
);
549 static int mmp_tdma_remove(struct platform_device
*pdev
)
554 static int mmp_tdma_chan_init(struct mmp_tdma_device
*tdev
,
556 int type
, struct gen_pool
*pool
)
558 struct mmp_tdma_chan
*tdmac
;
560 if (idx
>= TDMA_CHANNEL_NUM
) {
561 dev_err(tdev
->dev
, "too many channels for device!\n");
566 tdmac
= devm_kzalloc(tdev
->dev
, sizeof(*tdmac
), GFP_KERNEL
);
572 tdmac
->dev
= tdev
->dev
;
573 tdmac
->chan
.device
= &tdev
->device
;
576 tdmac
->reg_base
= tdev
->base
+ idx
* 4;
578 tdmac
->status
= DMA_COMPLETE
;
579 tdev
->tdmac
[tdmac
->idx
] = tdmac
;
580 tasklet_init(&tdmac
->tasklet
, dma_do_tasklet
, (unsigned long)tdmac
);
582 /* add the channel to tdma_chan list */
583 list_add_tail(&tdmac
->chan
.device_node
,
584 &tdev
->device
.channels
);
588 struct mmp_tdma_filter_param
{
589 struct device_node
*of_node
;
590 unsigned int chan_id
;
593 static bool mmp_tdma_filter_fn(struct dma_chan
*chan
, void *fn_param
)
595 struct mmp_tdma_filter_param
*param
= fn_param
;
596 struct mmp_tdma_chan
*tdmac
= to_mmp_tdma_chan(chan
);
597 struct dma_device
*pdma_device
= tdmac
->chan
.device
;
599 if (pdma_device
->dev
->of_node
!= param
->of_node
)
602 if (chan
->chan_id
!= param
->chan_id
)
608 static struct dma_chan
*mmp_tdma_xlate(struct of_phandle_args
*dma_spec
,
609 struct of_dma
*ofdma
)
611 struct mmp_tdma_device
*tdev
= ofdma
->of_dma_data
;
612 dma_cap_mask_t mask
= tdev
->device
.cap_mask
;
613 struct mmp_tdma_filter_param param
;
615 if (dma_spec
->args_count
!= 1)
618 param
.of_node
= ofdma
->of_node
;
619 param
.chan_id
= dma_spec
->args
[0];
621 if (param
.chan_id
>= TDMA_CHANNEL_NUM
)
624 return dma_request_channel(mask
, mmp_tdma_filter_fn
, ¶m
);
627 static const struct of_device_id mmp_tdma_dt_ids
[] = {
628 { .compatible
= "marvell,adma-1.0", .data
= (void *)MMP_AUD_TDMA
},
629 { .compatible
= "marvell,pxa910-squ", .data
= (void *)PXA910_SQU
},
632 MODULE_DEVICE_TABLE(of
, mmp_tdma_dt_ids
);
634 static int mmp_tdma_probe(struct platform_device
*pdev
)
636 enum mmp_tdma_type type
;
637 const struct of_device_id
*of_id
;
638 struct mmp_tdma_device
*tdev
;
639 struct resource
*iores
;
641 int irq
= 0, irq_num
= 0;
642 int chan_num
= TDMA_CHANNEL_NUM
;
643 struct gen_pool
*pool
= NULL
;
645 of_id
= of_match_device(mmp_tdma_dt_ids
, &pdev
->dev
);
647 type
= (enum mmp_tdma_type
) of_id
->data
;
649 type
= platform_get_device_id(pdev
)->driver_data
;
651 /* always have couple channels */
652 tdev
= devm_kzalloc(&pdev
->dev
, sizeof(*tdev
), GFP_KERNEL
);
656 tdev
->dev
= &pdev
->dev
;
658 for (i
= 0; i
< chan_num
; i
++) {
659 if (platform_get_irq(pdev
, i
) > 0)
663 iores
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
664 tdev
->base
= devm_ioremap_resource(&pdev
->dev
, iores
);
665 if (IS_ERR(tdev
->base
))
666 return PTR_ERR(tdev
->base
);
668 INIT_LIST_HEAD(&tdev
->device
.channels
);
670 if (pdev
->dev
.of_node
)
671 pool
= of_gen_pool_get(pdev
->dev
.of_node
, "asram", 0);
673 pool
= sram_get_gpool("asram");
675 dev_err(&pdev
->dev
, "asram pool not available\n");
679 if (irq_num
!= chan_num
) {
680 irq
= platform_get_irq(pdev
, 0);
681 ret
= devm_request_irq(&pdev
->dev
, irq
,
682 mmp_tdma_int_handler
, 0, "tdma", tdev
);
687 /* initialize channel parameters */
688 for (i
= 0; i
< chan_num
; i
++) {
689 irq
= (irq_num
!= chan_num
) ? 0 : platform_get_irq(pdev
, i
);
690 ret
= mmp_tdma_chan_init(tdev
, i
, irq
, type
, pool
);
695 dma_cap_set(DMA_SLAVE
, tdev
->device
.cap_mask
);
696 dma_cap_set(DMA_CYCLIC
, tdev
->device
.cap_mask
);
697 tdev
->device
.dev
= &pdev
->dev
;
698 tdev
->device
.device_alloc_chan_resources
=
699 mmp_tdma_alloc_chan_resources
;
700 tdev
->device
.device_free_chan_resources
=
701 mmp_tdma_free_chan_resources
;
702 tdev
->device
.device_prep_dma_cyclic
= mmp_tdma_prep_dma_cyclic
;
703 tdev
->device
.device_tx_status
= mmp_tdma_tx_status
;
704 tdev
->device
.device_issue_pending
= mmp_tdma_issue_pending
;
705 tdev
->device
.device_config
= mmp_tdma_config
;
706 tdev
->device
.device_pause
= mmp_tdma_pause_chan
;
707 tdev
->device
.device_resume
= mmp_tdma_resume_chan
;
708 tdev
->device
.device_terminate_all
= mmp_tdma_terminate_all
;
709 tdev
->device
.copy_align
= DMAENGINE_ALIGN_8_BYTES
;
711 dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(64));
712 platform_set_drvdata(pdev
, tdev
);
714 ret
= dmaenginem_async_device_register(&tdev
->device
);
716 dev_err(tdev
->device
.dev
, "unable to register\n");
720 if (pdev
->dev
.of_node
) {
721 ret
= of_dma_controller_register(pdev
->dev
.of_node
,
722 mmp_tdma_xlate
, tdev
);
724 dev_err(tdev
->device
.dev
,
725 "failed to register controller\n");
730 dev_info(tdev
->device
.dev
, "initialized\n");
734 static const struct platform_device_id mmp_tdma_id_table
[] = {
735 { "mmp-adma", MMP_AUD_TDMA
},
736 { "pxa910-squ", PXA910_SQU
},
740 static struct platform_driver mmp_tdma_driver
= {
743 .of_match_table
= mmp_tdma_dt_ids
,
745 .id_table
= mmp_tdma_id_table
,
746 .probe
= mmp_tdma_probe
,
747 .remove
= mmp_tdma_remove
,
750 module_platform_driver(mmp_tdma_driver
);
752 MODULE_LICENSE("GPL");
753 MODULE_DESCRIPTION("MMP Two-Channel DMA Driver");
754 MODULE_ALIAS("platform:mmp-tdma");
755 MODULE_AUTHOR("Leo Yan <leoy@marvell.com>");
756 MODULE_AUTHOR("Zhangfei Gao <zhangfei.gao@marvell.com>");