2 * Driver for the Cirrus Logic EP93xx DMA Controller
4 * Copyright (C) 2011 Mika Westerberg
6 * DMA M2P implementation is based on the original
7 * arch/arm/mach-ep93xx/dma-m2p.c which has following copyrights:
9 * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
10 * Copyright (C) 2006 Applied Data Systems
11 * Copyright (C) 2009 Ryan Mallon <rmallon@gmail.com>
13 * This driver is based on dw_dmac and amba-pl08x drivers.
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or
18 * (at your option) any later version.
21 #include <linux/clk.h>
22 #include <linux/init.h>
23 #include <linux/interrupt.h>
24 #include <linux/dmaengine.h>
25 #include <linux/module.h>
26 #include <linux/mod_devicetable.h>
27 #include <linux/platform_device.h>
28 #include <linux/slab.h>
30 #include <linux/platform_data/dma-ep93xx.h>
32 #include "dmaengine.h"
35 #define M2P_CONTROL 0x0000
36 #define M2P_CONTROL_STALLINT BIT(0)
37 #define M2P_CONTROL_NFBINT BIT(1)
38 #define M2P_CONTROL_CH_ERROR_INT BIT(3)
39 #define M2P_CONTROL_ENABLE BIT(4)
40 #define M2P_CONTROL_ICE BIT(6)
42 #define M2P_INTERRUPT 0x0004
43 #define M2P_INTERRUPT_STALL BIT(0)
44 #define M2P_INTERRUPT_NFB BIT(1)
45 #define M2P_INTERRUPT_ERROR BIT(3)
47 #define M2P_PPALLOC 0x0008
48 #define M2P_STATUS 0x000c
50 #define M2P_MAXCNT0 0x0020
51 #define M2P_BASE0 0x0024
52 #define M2P_MAXCNT1 0x0030
53 #define M2P_BASE1 0x0034
55 #define M2P_STATE_IDLE 0
56 #define M2P_STATE_STALL 1
57 #define M2P_STATE_ON 2
58 #define M2P_STATE_NEXT 3
61 #define M2M_CONTROL 0x0000
62 #define M2M_CONTROL_DONEINT BIT(2)
63 #define M2M_CONTROL_ENABLE BIT(3)
64 #define M2M_CONTROL_START BIT(4)
65 #define M2M_CONTROL_DAH BIT(11)
66 #define M2M_CONTROL_SAH BIT(12)
67 #define M2M_CONTROL_PW_SHIFT 9
68 #define M2M_CONTROL_PW_8 (0 << M2M_CONTROL_PW_SHIFT)
69 #define M2M_CONTROL_PW_16 (1 << M2M_CONTROL_PW_SHIFT)
70 #define M2M_CONTROL_PW_32 (2 << M2M_CONTROL_PW_SHIFT)
71 #define M2M_CONTROL_PW_MASK (3 << M2M_CONTROL_PW_SHIFT)
72 #define M2M_CONTROL_TM_SHIFT 13
73 #define M2M_CONTROL_TM_TX (1 << M2M_CONTROL_TM_SHIFT)
74 #define M2M_CONTROL_TM_RX (2 << M2M_CONTROL_TM_SHIFT)
75 #define M2M_CONTROL_NFBINT BIT(21)
76 #define M2M_CONTROL_RSS_SHIFT 22
77 #define M2M_CONTROL_RSS_SSPRX (1 << M2M_CONTROL_RSS_SHIFT)
78 #define M2M_CONTROL_RSS_SSPTX (2 << M2M_CONTROL_RSS_SHIFT)
79 #define M2M_CONTROL_RSS_IDE (3 << M2M_CONTROL_RSS_SHIFT)
80 #define M2M_CONTROL_NO_HDSK BIT(24)
81 #define M2M_CONTROL_PWSC_SHIFT 25
83 #define M2M_INTERRUPT 0x0004
84 #define M2M_INTERRUPT_MASK 6
86 #define M2M_STATUS 0x000c
87 #define M2M_STATUS_CTL_SHIFT 1
88 #define M2M_STATUS_CTL_IDLE (0 << M2M_STATUS_CTL_SHIFT)
89 #define M2M_STATUS_CTL_STALL (1 << M2M_STATUS_CTL_SHIFT)
90 #define M2M_STATUS_CTL_MEMRD (2 << M2M_STATUS_CTL_SHIFT)
91 #define M2M_STATUS_CTL_MEMWR (3 << M2M_STATUS_CTL_SHIFT)
92 #define M2M_STATUS_CTL_BWCWAIT (4 << M2M_STATUS_CTL_SHIFT)
93 #define M2M_STATUS_CTL_MASK (7 << M2M_STATUS_CTL_SHIFT)
94 #define M2M_STATUS_BUF_SHIFT 4
95 #define M2M_STATUS_BUF_NO (0 << M2M_STATUS_BUF_SHIFT)
96 #define M2M_STATUS_BUF_ON (1 << M2M_STATUS_BUF_SHIFT)
97 #define M2M_STATUS_BUF_NEXT (2 << M2M_STATUS_BUF_SHIFT)
98 #define M2M_STATUS_BUF_MASK (3 << M2M_STATUS_BUF_SHIFT)
99 #define M2M_STATUS_DONE BIT(6)
101 #define M2M_BCR0 0x0010
102 #define M2M_BCR1 0x0014
103 #define M2M_SAR_BASE0 0x0018
104 #define M2M_SAR_BASE1 0x001c
105 #define M2M_DAR_BASE0 0x002c
106 #define M2M_DAR_BASE1 0x0030
108 #define DMA_MAX_CHAN_BYTES 0xffff
109 #define DMA_MAX_CHAN_DESCRIPTORS 32
111 struct ep93xx_dma_engine
;
112 static int ep93xx_dma_slave_config_write(struct dma_chan
*chan
,
113 enum dma_transfer_direction dir
,
114 struct dma_slave_config
*config
);
117 * struct ep93xx_dma_desc - EP93xx specific transaction descriptor
118 * @src_addr: source address of the transaction
119 * @dst_addr: destination address of the transaction
120 * @size: size of the transaction (in bytes)
121 * @complete: this descriptor is completed
122 * @txd: dmaengine API descriptor
123 * @tx_list: list of linked descriptors
124 * @node: link used for putting this into a channel queue
126 struct ep93xx_dma_desc
{
131 struct dma_async_tx_descriptor txd
;
132 struct list_head tx_list
;
133 struct list_head node
;
137 * struct ep93xx_dma_chan - an EP93xx DMA M2P/M2M channel
138 * @chan: dmaengine API channel
139 * @edma: pointer to to the engine device
140 * @regs: memory mapped registers
141 * @irq: interrupt number of the channel
142 * @clk: clock used by this channel
143 * @tasklet: channel specific tasklet used for callbacks
144 * @lock: lock protecting the fields following
145 * @flags: flags for the channel
146 * @buffer: which buffer to use next (0/1)
147 * @active: flattened chain of descriptors currently being processed
148 * @queue: pending descriptors which are handled next
149 * @free_list: list of free descriptors which can be used
150 * @runtime_addr: physical address currently used as dest/src (M2M only). This
151 * is set via .device_config before slave operation is
153 * @runtime_ctrl: M2M runtime values for the control register.
155 * As EP93xx DMA controller doesn't support real chained DMA descriptors we
156 * will have slightly different scheme here: @active points to a head of
157 * flattened DMA descriptor chain.
159 * @queue holds pending transactions. These are linked through the first
160 * descriptor in the chain. When a descriptor is moved to the @active queue,
161 * the first and chained descriptors are flattened into a single list.
163 * @chan.private holds pointer to &struct ep93xx_dma_data which contains
164 * necessary channel configuration information. For memcpy channels this must
167 struct ep93xx_dma_chan
{
168 struct dma_chan chan
;
169 const struct ep93xx_dma_engine
*edma
;
173 struct tasklet_struct tasklet
;
174 /* protects the fields following */
177 /* Channel is configured for cyclic transfers */
178 #define EP93XX_DMA_IS_CYCLIC 0
181 struct list_head active
;
182 struct list_head queue
;
183 struct list_head free_list
;
186 struct dma_slave_config slave_config
;
190 * struct ep93xx_dma_engine - the EP93xx DMA engine instance
191 * @dma_dev: holds the dmaengine device
192 * @m2m: is this an M2M or M2P device
193 * @hw_setup: method which sets the channel up for operation
194 * @hw_shutdown: shuts the channel down and flushes whatever is left
195 * @hw_submit: pushes active descriptor(s) to the hardware
196 * @hw_interrupt: handle the interrupt
197 * @num_channels: number of channels for this instance
198 * @channels: array of channels
200 * There is one instance of this struct for the M2P channels and one for the
201 * M2M channels. hw_xxx() methods are used to perform operations which are
202 * different on M2M and M2P channels. These methods are called with channel
203 * lock held and interrupts disabled so they cannot sleep.
205 struct ep93xx_dma_engine
{
206 struct dma_device dma_dev
;
208 int (*hw_setup
)(struct ep93xx_dma_chan
*);
209 void (*hw_synchronize
)(struct ep93xx_dma_chan
*);
210 void (*hw_shutdown
)(struct ep93xx_dma_chan
*);
211 void (*hw_submit
)(struct ep93xx_dma_chan
*);
212 int (*hw_interrupt
)(struct ep93xx_dma_chan
*);
213 #define INTERRUPT_UNKNOWN 0
214 #define INTERRUPT_DONE 1
215 #define INTERRUPT_NEXT_BUFFER 2
218 struct ep93xx_dma_chan channels
[];
221 static inline struct device
*chan2dev(struct ep93xx_dma_chan
*edmac
)
223 return &edmac
->chan
.dev
->device
;
226 static struct ep93xx_dma_chan
*to_ep93xx_dma_chan(struct dma_chan
*chan
)
228 return container_of(chan
, struct ep93xx_dma_chan
, chan
);
232 * ep93xx_dma_set_active - set new active descriptor chain
234 * @desc: head of the new active descriptor chain
236 * Sets @desc to be the head of the new active descriptor chain. This is the
237 * chain which is processed next. The active list must be empty before calling
240 * Called with @edmac->lock held and interrupts disabled.
242 static void ep93xx_dma_set_active(struct ep93xx_dma_chan
*edmac
,
243 struct ep93xx_dma_desc
*desc
)
245 BUG_ON(!list_empty(&edmac
->active
));
247 list_add_tail(&desc
->node
, &edmac
->active
);
249 /* Flatten the @desc->tx_list chain into @edmac->active list */
250 while (!list_empty(&desc
->tx_list
)) {
251 struct ep93xx_dma_desc
*d
= list_first_entry(&desc
->tx_list
,
252 struct ep93xx_dma_desc
, node
);
255 * We copy the callback parameters from the first descriptor
256 * to all the chained descriptors. This way we can call the
257 * callback without having to find out the first descriptor in
258 * the chain. Useful for cyclic transfers.
260 d
->txd
.callback
= desc
->txd
.callback
;
261 d
->txd
.callback_param
= desc
->txd
.callback_param
;
263 list_move_tail(&d
->node
, &edmac
->active
);
267 /* Called with @edmac->lock held and interrupts disabled */
268 static struct ep93xx_dma_desc
*
269 ep93xx_dma_get_active(struct ep93xx_dma_chan
*edmac
)
271 return list_first_entry_or_null(&edmac
->active
,
272 struct ep93xx_dma_desc
, node
);
276 * ep93xx_dma_advance_active - advances to the next active descriptor
279 * Function advances active descriptor to the next in the @edmac->active and
280 * returns %true if we still have descriptors in the chain to process.
281 * Otherwise returns %false.
283 * When the channel is in cyclic mode always returns %true.
285 * Called with @edmac->lock held and interrupts disabled.
287 static bool ep93xx_dma_advance_active(struct ep93xx_dma_chan
*edmac
)
289 struct ep93xx_dma_desc
*desc
;
291 list_rotate_left(&edmac
->active
);
293 if (test_bit(EP93XX_DMA_IS_CYCLIC
, &edmac
->flags
))
296 desc
= ep93xx_dma_get_active(edmac
);
301 * If txd.cookie is set it means that we are back in the first
302 * descriptor in the chain and hence done with it.
304 return !desc
->txd
.cookie
;
308 * M2P DMA implementation
311 static void m2p_set_control(struct ep93xx_dma_chan
*edmac
, u32 control
)
313 writel(control
, edmac
->regs
+ M2P_CONTROL
);
315 * EP93xx User's Guide states that we must perform a dummy read after
316 * write to the control register.
318 readl(edmac
->regs
+ M2P_CONTROL
);
321 static int m2p_hw_setup(struct ep93xx_dma_chan
*edmac
)
323 struct ep93xx_dma_data
*data
= edmac
->chan
.private;
326 writel(data
->port
& 0xf, edmac
->regs
+ M2P_PPALLOC
);
328 control
= M2P_CONTROL_CH_ERROR_INT
| M2P_CONTROL_ICE
329 | M2P_CONTROL_ENABLE
;
330 m2p_set_control(edmac
, control
);
337 static inline u32
m2p_channel_state(struct ep93xx_dma_chan
*edmac
)
339 return (readl(edmac
->regs
+ M2P_STATUS
) >> 4) & 0x3;
342 static void m2p_hw_synchronize(struct ep93xx_dma_chan
*edmac
)
347 spin_lock_irqsave(&edmac
->lock
, flags
);
348 control
= readl(edmac
->regs
+ M2P_CONTROL
);
349 control
&= ~(M2P_CONTROL_STALLINT
| M2P_CONTROL_NFBINT
);
350 m2p_set_control(edmac
, control
);
351 spin_unlock_irqrestore(&edmac
->lock
, flags
);
353 while (m2p_channel_state(edmac
) >= M2P_STATE_ON
)
357 static void m2p_hw_shutdown(struct ep93xx_dma_chan
*edmac
)
359 m2p_set_control(edmac
, 0);
361 while (m2p_channel_state(edmac
) != M2P_STATE_IDLE
)
362 dev_warn(chan2dev(edmac
), "M2P: Not yet IDLE\n");
365 static void m2p_fill_desc(struct ep93xx_dma_chan
*edmac
)
367 struct ep93xx_dma_desc
*desc
;
370 desc
= ep93xx_dma_get_active(edmac
);
372 dev_warn(chan2dev(edmac
), "M2P: empty descriptor list\n");
376 if (ep93xx_dma_chan_direction(&edmac
->chan
) == DMA_MEM_TO_DEV
)
377 bus_addr
= desc
->src_addr
;
379 bus_addr
= desc
->dst_addr
;
381 if (edmac
->buffer
== 0) {
382 writel(desc
->size
, edmac
->regs
+ M2P_MAXCNT0
);
383 writel(bus_addr
, edmac
->regs
+ M2P_BASE0
);
385 writel(desc
->size
, edmac
->regs
+ M2P_MAXCNT1
);
386 writel(bus_addr
, edmac
->regs
+ M2P_BASE1
);
392 static void m2p_hw_submit(struct ep93xx_dma_chan
*edmac
)
394 u32 control
= readl(edmac
->regs
+ M2P_CONTROL
);
396 m2p_fill_desc(edmac
);
397 control
|= M2P_CONTROL_STALLINT
;
399 if (ep93xx_dma_advance_active(edmac
)) {
400 m2p_fill_desc(edmac
);
401 control
|= M2P_CONTROL_NFBINT
;
404 m2p_set_control(edmac
, control
);
407 static int m2p_hw_interrupt(struct ep93xx_dma_chan
*edmac
)
409 u32 irq_status
= readl(edmac
->regs
+ M2P_INTERRUPT
);
412 if (irq_status
& M2P_INTERRUPT_ERROR
) {
413 struct ep93xx_dma_desc
*desc
= ep93xx_dma_get_active(edmac
);
415 /* Clear the error interrupt */
416 writel(1, edmac
->regs
+ M2P_INTERRUPT
);
419 * It seems that there is no easy way of reporting errors back
420 * to client so we just report the error here and continue as
423 * Revisit this when there is a mechanism to report back the
426 dev_err(chan2dev(edmac
),
427 "DMA transfer failed! Details:\n"
429 "\tsrc_addr : 0x%08x\n"
430 "\tdst_addr : 0x%08x\n"
432 desc
->txd
.cookie
, desc
->src_addr
, desc
->dst_addr
,
437 * Even latest E2 silicon revision sometimes assert STALL interrupt
438 * instead of NFB. Therefore we treat them equally, basing on the
439 * amount of data we still have to transfer.
441 if (!(irq_status
& (M2P_INTERRUPT_STALL
| M2P_INTERRUPT_NFB
)))
442 return INTERRUPT_UNKNOWN
;
444 if (ep93xx_dma_advance_active(edmac
)) {
445 m2p_fill_desc(edmac
);
446 return INTERRUPT_NEXT_BUFFER
;
449 /* Disable interrupts */
450 control
= readl(edmac
->regs
+ M2P_CONTROL
);
451 control
&= ~(M2P_CONTROL_STALLINT
| M2P_CONTROL_NFBINT
);
452 m2p_set_control(edmac
, control
);
454 return INTERRUPT_DONE
;
458 * M2M DMA implementation
461 static int m2m_hw_setup(struct ep93xx_dma_chan
*edmac
)
463 const struct ep93xx_dma_data
*data
= edmac
->chan
.private;
467 /* This is memcpy channel, nothing to configure */
468 writel(control
, edmac
->regs
+ M2M_CONTROL
);
472 switch (data
->port
) {
475 * This was found via experimenting - anything less than 5
476 * causes the channel to perform only a partial transfer which
477 * leads to problems since we don't get DONE interrupt then.
479 control
= (5 << M2M_CONTROL_PWSC_SHIFT
);
480 control
|= M2M_CONTROL_NO_HDSK
;
482 if (data
->direction
== DMA_MEM_TO_DEV
) {
483 control
|= M2M_CONTROL_DAH
;
484 control
|= M2M_CONTROL_TM_TX
;
485 control
|= M2M_CONTROL_RSS_SSPTX
;
487 control
|= M2M_CONTROL_SAH
;
488 control
|= M2M_CONTROL_TM_RX
;
489 control
|= M2M_CONTROL_RSS_SSPRX
;
495 * This IDE part is totally untested. Values below are taken
496 * from the EP93xx Users's Guide and might not be correct.
498 if (data
->direction
== DMA_MEM_TO_DEV
) {
499 /* Worst case from the UG */
500 control
= (3 << M2M_CONTROL_PWSC_SHIFT
);
501 control
|= M2M_CONTROL_DAH
;
502 control
|= M2M_CONTROL_TM_TX
;
504 control
= (2 << M2M_CONTROL_PWSC_SHIFT
);
505 control
|= M2M_CONTROL_SAH
;
506 control
|= M2M_CONTROL_TM_RX
;
509 control
|= M2M_CONTROL_NO_HDSK
;
510 control
|= M2M_CONTROL_RSS_IDE
;
511 control
|= M2M_CONTROL_PW_16
;
518 writel(control
, edmac
->regs
+ M2M_CONTROL
);
522 static void m2m_hw_shutdown(struct ep93xx_dma_chan
*edmac
)
524 /* Just disable the channel */
525 writel(0, edmac
->regs
+ M2M_CONTROL
);
528 static void m2m_fill_desc(struct ep93xx_dma_chan
*edmac
)
530 struct ep93xx_dma_desc
*desc
;
532 desc
= ep93xx_dma_get_active(edmac
);
534 dev_warn(chan2dev(edmac
), "M2M: empty descriptor list\n");
538 if (edmac
->buffer
== 0) {
539 writel(desc
->src_addr
, edmac
->regs
+ M2M_SAR_BASE0
);
540 writel(desc
->dst_addr
, edmac
->regs
+ M2M_DAR_BASE0
);
541 writel(desc
->size
, edmac
->regs
+ M2M_BCR0
);
543 writel(desc
->src_addr
, edmac
->regs
+ M2M_SAR_BASE1
);
544 writel(desc
->dst_addr
, edmac
->regs
+ M2M_DAR_BASE1
);
545 writel(desc
->size
, edmac
->regs
+ M2M_BCR1
);
551 static void m2m_hw_submit(struct ep93xx_dma_chan
*edmac
)
553 struct ep93xx_dma_data
*data
= edmac
->chan
.private;
554 u32 control
= readl(edmac
->regs
+ M2M_CONTROL
);
557 * Since we allow clients to configure PW (peripheral width) we always
558 * clear PW bits here and then set them according what is given in
559 * the runtime configuration.
561 control
&= ~M2M_CONTROL_PW_MASK
;
562 control
|= edmac
->runtime_ctrl
;
564 m2m_fill_desc(edmac
);
565 control
|= M2M_CONTROL_DONEINT
;
567 if (ep93xx_dma_advance_active(edmac
)) {
568 m2m_fill_desc(edmac
);
569 control
|= M2M_CONTROL_NFBINT
;
573 * Now we can finally enable the channel. For M2M channel this must be
574 * done _after_ the BCRx registers are programmed.
576 control
|= M2M_CONTROL_ENABLE
;
577 writel(control
, edmac
->regs
+ M2M_CONTROL
);
581 * For memcpy channels the software trigger must be asserted
582 * in order to start the memcpy operation.
584 control
|= M2M_CONTROL_START
;
585 writel(control
, edmac
->regs
+ M2M_CONTROL
);
590 * According to EP93xx User's Guide, we should receive DONE interrupt when all
591 * M2M DMA controller transactions complete normally. This is not always the
592 * case - sometimes EP93xx M2M DMA asserts DONE interrupt when the DMA channel
593 * is still running (channel Buffer FSM in DMA_BUF_ON state, and channel
594 * Control FSM in DMA_MEM_RD state, observed at least in IDE-DMA operation).
595 * In effect, disabling the channel when only DONE bit is set could stop
596 * currently running DMA transfer. To avoid this, we use Buffer FSM and
597 * Control FSM to check current state of DMA channel.
599 static int m2m_hw_interrupt(struct ep93xx_dma_chan
*edmac
)
601 u32 status
= readl(edmac
->regs
+ M2M_STATUS
);
602 u32 ctl_fsm
= status
& M2M_STATUS_CTL_MASK
;
603 u32 buf_fsm
= status
& M2M_STATUS_BUF_MASK
;
604 bool done
= status
& M2M_STATUS_DONE
;
607 struct ep93xx_dma_desc
*desc
;
609 /* Accept only DONE and NFB interrupts */
610 if (!(readl(edmac
->regs
+ M2M_INTERRUPT
) & M2M_INTERRUPT_MASK
))
611 return INTERRUPT_UNKNOWN
;
614 /* Clear the DONE bit */
615 writel(0, edmac
->regs
+ M2M_INTERRUPT
);
619 * Check whether we are done with descriptors or not. This, together
620 * with DMA channel state, determines action to take in interrupt.
622 desc
= ep93xx_dma_get_active(edmac
);
623 last_done
= !desc
|| desc
->txd
.cookie
;
626 * Use M2M DMA Buffer FSM and Control FSM to check current state of
627 * DMA channel. Using DONE and NFB bits from channel status register
628 * or bits from channel interrupt register is not reliable.
631 (buf_fsm
== M2M_STATUS_BUF_NO
||
632 buf_fsm
== M2M_STATUS_BUF_ON
)) {
634 * Two buffers are ready for update when Buffer FSM is in
635 * DMA_NO_BUF state. Only one buffer can be prepared without
636 * disabling the channel or polling the DONE bit.
637 * To simplify things, always prepare only one buffer.
639 if (ep93xx_dma_advance_active(edmac
)) {
640 m2m_fill_desc(edmac
);
641 if (done
&& !edmac
->chan
.private) {
642 /* Software trigger for memcpy channel */
643 control
= readl(edmac
->regs
+ M2M_CONTROL
);
644 control
|= M2M_CONTROL_START
;
645 writel(control
, edmac
->regs
+ M2M_CONTROL
);
647 return INTERRUPT_NEXT_BUFFER
;
654 * Disable the channel only when Buffer FSM is in DMA_NO_BUF state
655 * and Control FSM is in DMA_STALL state.
658 buf_fsm
== M2M_STATUS_BUF_NO
&&
659 ctl_fsm
== M2M_STATUS_CTL_STALL
) {
660 /* Disable interrupts and the channel */
661 control
= readl(edmac
->regs
+ M2M_CONTROL
);
662 control
&= ~(M2M_CONTROL_DONEINT
| M2M_CONTROL_NFBINT
663 | M2M_CONTROL_ENABLE
);
664 writel(control
, edmac
->regs
+ M2M_CONTROL
);
665 return INTERRUPT_DONE
;
669 * Nothing to do this time.
671 return INTERRUPT_NEXT_BUFFER
;
675 * DMA engine API implementation
678 static struct ep93xx_dma_desc
*
679 ep93xx_dma_desc_get(struct ep93xx_dma_chan
*edmac
)
681 struct ep93xx_dma_desc
*desc
, *_desc
;
682 struct ep93xx_dma_desc
*ret
= NULL
;
685 spin_lock_irqsave(&edmac
->lock
, flags
);
686 list_for_each_entry_safe(desc
, _desc
, &edmac
->free_list
, node
) {
687 if (async_tx_test_ack(&desc
->txd
)) {
688 list_del_init(&desc
->node
);
690 /* Re-initialize the descriptor */
694 desc
->complete
= false;
695 desc
->txd
.cookie
= 0;
696 desc
->txd
.callback
= NULL
;
697 desc
->txd
.callback_param
= NULL
;
703 spin_unlock_irqrestore(&edmac
->lock
, flags
);
707 static void ep93xx_dma_desc_put(struct ep93xx_dma_chan
*edmac
,
708 struct ep93xx_dma_desc
*desc
)
713 spin_lock_irqsave(&edmac
->lock
, flags
);
714 list_splice_init(&desc
->tx_list
, &edmac
->free_list
);
715 list_add(&desc
->node
, &edmac
->free_list
);
716 spin_unlock_irqrestore(&edmac
->lock
, flags
);
721 * ep93xx_dma_advance_work - start processing the next pending transaction
724 * If we have pending transactions queued and we are currently idling, this
725 * function takes the next queued transaction from the @edmac->queue and
726 * pushes it to the hardware for execution.
728 static void ep93xx_dma_advance_work(struct ep93xx_dma_chan
*edmac
)
730 struct ep93xx_dma_desc
*new;
733 spin_lock_irqsave(&edmac
->lock
, flags
);
734 if (!list_empty(&edmac
->active
) || list_empty(&edmac
->queue
)) {
735 spin_unlock_irqrestore(&edmac
->lock
, flags
);
739 /* Take the next descriptor from the pending queue */
740 new = list_first_entry(&edmac
->queue
, struct ep93xx_dma_desc
, node
);
741 list_del_init(&new->node
);
743 ep93xx_dma_set_active(edmac
, new);
745 /* Push it to the hardware */
746 edmac
->edma
->hw_submit(edmac
);
747 spin_unlock_irqrestore(&edmac
->lock
, flags
);
750 static void ep93xx_dma_tasklet(unsigned long data
)
752 struct ep93xx_dma_chan
*edmac
= (struct ep93xx_dma_chan
*)data
;
753 struct ep93xx_dma_desc
*desc
, *d
;
754 struct dmaengine_desc_callback cb
;
757 memset(&cb
, 0, sizeof(cb
));
758 spin_lock_irq(&edmac
->lock
);
760 * If dma_terminate_all() was called before we get to run, the active
761 * list has become empty. If that happens we aren't supposed to do
762 * anything more than call ep93xx_dma_advance_work().
764 desc
= ep93xx_dma_get_active(edmac
);
766 if (desc
->complete
) {
767 /* mark descriptor complete for non cyclic case only */
768 if (!test_bit(EP93XX_DMA_IS_CYCLIC
, &edmac
->flags
))
769 dma_cookie_complete(&desc
->txd
);
770 list_splice_init(&edmac
->active
, &list
);
772 dmaengine_desc_get_callback(&desc
->txd
, &cb
);
774 spin_unlock_irq(&edmac
->lock
);
776 /* Pick up the next descriptor from the queue */
777 ep93xx_dma_advance_work(edmac
);
779 /* Now we can release all the chained descriptors */
780 list_for_each_entry_safe(desc
, d
, &list
, node
) {
781 dma_descriptor_unmap(&desc
->txd
);
782 ep93xx_dma_desc_put(edmac
, desc
);
785 dmaengine_desc_callback_invoke(&cb
, NULL
);
788 static irqreturn_t
ep93xx_dma_interrupt(int irq
, void *dev_id
)
790 struct ep93xx_dma_chan
*edmac
= dev_id
;
791 struct ep93xx_dma_desc
*desc
;
792 irqreturn_t ret
= IRQ_HANDLED
;
794 spin_lock(&edmac
->lock
);
796 desc
= ep93xx_dma_get_active(edmac
);
798 dev_warn(chan2dev(edmac
),
799 "got interrupt while active list is empty\n");
800 spin_unlock(&edmac
->lock
);
804 switch (edmac
->edma
->hw_interrupt(edmac
)) {
806 desc
->complete
= true;
807 tasklet_schedule(&edmac
->tasklet
);
810 case INTERRUPT_NEXT_BUFFER
:
811 if (test_bit(EP93XX_DMA_IS_CYCLIC
, &edmac
->flags
))
812 tasklet_schedule(&edmac
->tasklet
);
816 dev_warn(chan2dev(edmac
), "unknown interrupt!\n");
821 spin_unlock(&edmac
->lock
);
826 * ep93xx_dma_tx_submit - set the prepared descriptor(s) to be executed
827 * @tx: descriptor to be executed
829 * Function will execute given descriptor on the hardware or if the hardware
830 * is busy, queue the descriptor to be executed later on. Returns cookie which
831 * can be used to poll the status of the descriptor.
833 static dma_cookie_t
ep93xx_dma_tx_submit(struct dma_async_tx_descriptor
*tx
)
835 struct ep93xx_dma_chan
*edmac
= to_ep93xx_dma_chan(tx
->chan
);
836 struct ep93xx_dma_desc
*desc
;
840 spin_lock_irqsave(&edmac
->lock
, flags
);
841 cookie
= dma_cookie_assign(tx
);
843 desc
= container_of(tx
, struct ep93xx_dma_desc
, txd
);
846 * If nothing is currently prosessed, we push this descriptor
847 * directly to the hardware. Otherwise we put the descriptor
848 * to the pending queue.
850 if (list_empty(&edmac
->active
)) {
851 ep93xx_dma_set_active(edmac
, desc
);
852 edmac
->edma
->hw_submit(edmac
);
854 list_add_tail(&desc
->node
, &edmac
->queue
);
857 spin_unlock_irqrestore(&edmac
->lock
, flags
);
862 * ep93xx_dma_alloc_chan_resources - allocate resources for the channel
863 * @chan: channel to allocate resources
865 * Function allocates necessary resources for the given DMA channel and
866 * returns number of allocated descriptors for the channel. Negative errno
867 * is returned in case of failure.
869 static int ep93xx_dma_alloc_chan_resources(struct dma_chan
*chan
)
871 struct ep93xx_dma_chan
*edmac
= to_ep93xx_dma_chan(chan
);
872 struct ep93xx_dma_data
*data
= chan
->private;
873 const char *name
= dma_chan_name(chan
);
876 /* Sanity check the channel parameters */
877 if (!edmac
->edma
->m2m
) {
880 if (data
->port
< EP93XX_DMA_I2S1
||
881 data
->port
> EP93XX_DMA_IRDA
)
883 if (data
->direction
!= ep93xx_dma_chan_direction(chan
))
887 switch (data
->port
) {
890 if (!is_slave_direction(data
->direction
))
899 if (data
&& data
->name
)
902 ret
= clk_enable(edmac
->clk
);
906 ret
= request_irq(edmac
->irq
, ep93xx_dma_interrupt
, 0, name
, edmac
);
908 goto fail_clk_disable
;
910 spin_lock_irq(&edmac
->lock
);
911 dma_cookie_init(&edmac
->chan
);
912 ret
= edmac
->edma
->hw_setup(edmac
);
913 spin_unlock_irq(&edmac
->lock
);
918 for (i
= 0; i
< DMA_MAX_CHAN_DESCRIPTORS
; i
++) {
919 struct ep93xx_dma_desc
*desc
;
921 desc
= kzalloc(sizeof(*desc
), GFP_KERNEL
);
923 dev_warn(chan2dev(edmac
), "not enough descriptors\n");
927 INIT_LIST_HEAD(&desc
->tx_list
);
929 dma_async_tx_descriptor_init(&desc
->txd
, chan
);
930 desc
->txd
.flags
= DMA_CTRL_ACK
;
931 desc
->txd
.tx_submit
= ep93xx_dma_tx_submit
;
933 ep93xx_dma_desc_put(edmac
, desc
);
939 free_irq(edmac
->irq
, edmac
);
941 clk_disable(edmac
->clk
);
947 * ep93xx_dma_free_chan_resources - release resources for the channel
950 * Function releases all the resources allocated for the given channel.
951 * The channel must be idle when this is called.
953 static void ep93xx_dma_free_chan_resources(struct dma_chan
*chan
)
955 struct ep93xx_dma_chan
*edmac
= to_ep93xx_dma_chan(chan
);
956 struct ep93xx_dma_desc
*desc
, *d
;
960 BUG_ON(!list_empty(&edmac
->active
));
961 BUG_ON(!list_empty(&edmac
->queue
));
963 spin_lock_irqsave(&edmac
->lock
, flags
);
964 edmac
->edma
->hw_shutdown(edmac
);
965 edmac
->runtime_addr
= 0;
966 edmac
->runtime_ctrl
= 0;
968 list_splice_init(&edmac
->free_list
, &list
);
969 spin_unlock_irqrestore(&edmac
->lock
, flags
);
971 list_for_each_entry_safe(desc
, d
, &list
, node
)
974 clk_disable(edmac
->clk
);
975 free_irq(edmac
->irq
, edmac
);
979 * ep93xx_dma_prep_dma_memcpy - prepare a memcpy DMA operation
981 * @dest: destination bus address
982 * @src: source bus address
983 * @len: size of the transaction
984 * @flags: flags for the descriptor
986 * Returns a valid DMA descriptor or %NULL in case of failure.
988 static struct dma_async_tx_descriptor
*
989 ep93xx_dma_prep_dma_memcpy(struct dma_chan
*chan
, dma_addr_t dest
,
990 dma_addr_t src
, size_t len
, unsigned long flags
)
992 struct ep93xx_dma_chan
*edmac
= to_ep93xx_dma_chan(chan
);
993 struct ep93xx_dma_desc
*desc
, *first
;
994 size_t bytes
, offset
;
997 for (offset
= 0; offset
< len
; offset
+= bytes
) {
998 desc
= ep93xx_dma_desc_get(edmac
);
1000 dev_warn(chan2dev(edmac
), "couln't get descriptor\n");
1004 bytes
= min_t(size_t, len
- offset
, DMA_MAX_CHAN_BYTES
);
1006 desc
->src_addr
= src
+ offset
;
1007 desc
->dst_addr
= dest
+ offset
;
1013 list_add_tail(&desc
->node
, &first
->tx_list
);
1016 first
->txd
.cookie
= -EBUSY
;
1017 first
->txd
.flags
= flags
;
1021 ep93xx_dma_desc_put(edmac
, first
);
1026 * ep93xx_dma_prep_slave_sg - prepare a slave DMA operation
1028 * @sgl: list of buffers to transfer
1029 * @sg_len: number of entries in @sgl
1030 * @dir: direction of tha DMA transfer
1031 * @flags: flags for the descriptor
1032 * @context: operation context (ignored)
1034 * Returns a valid DMA descriptor or %NULL in case of failure.
1036 static struct dma_async_tx_descriptor
*
1037 ep93xx_dma_prep_slave_sg(struct dma_chan
*chan
, struct scatterlist
*sgl
,
1038 unsigned int sg_len
, enum dma_transfer_direction dir
,
1039 unsigned long flags
, void *context
)
1041 struct ep93xx_dma_chan
*edmac
= to_ep93xx_dma_chan(chan
);
1042 struct ep93xx_dma_desc
*desc
, *first
;
1043 struct scatterlist
*sg
;
1046 if (!edmac
->edma
->m2m
&& dir
!= ep93xx_dma_chan_direction(chan
)) {
1047 dev_warn(chan2dev(edmac
),
1048 "channel was configured with different direction\n");
1052 if (test_bit(EP93XX_DMA_IS_CYCLIC
, &edmac
->flags
)) {
1053 dev_warn(chan2dev(edmac
),
1054 "channel is already used for cyclic transfers\n");
1058 ep93xx_dma_slave_config_write(chan
, dir
, &edmac
->slave_config
);
1061 for_each_sg(sgl
, sg
, sg_len
, i
) {
1062 size_t len
= sg_dma_len(sg
);
1064 if (len
> DMA_MAX_CHAN_BYTES
) {
1065 dev_warn(chan2dev(edmac
), "too big transfer size %zu\n",
1070 desc
= ep93xx_dma_desc_get(edmac
);
1072 dev_warn(chan2dev(edmac
), "couln't get descriptor\n");
1076 if (dir
== DMA_MEM_TO_DEV
) {
1077 desc
->src_addr
= sg_dma_address(sg
);
1078 desc
->dst_addr
= edmac
->runtime_addr
;
1080 desc
->src_addr
= edmac
->runtime_addr
;
1081 desc
->dst_addr
= sg_dma_address(sg
);
1088 list_add_tail(&desc
->node
, &first
->tx_list
);
1091 first
->txd
.cookie
= -EBUSY
;
1092 first
->txd
.flags
= flags
;
1097 ep93xx_dma_desc_put(edmac
, first
);
1102 * ep93xx_dma_prep_dma_cyclic - prepare a cyclic DMA operation
1104 * @dma_addr: DMA mapped address of the buffer
1105 * @buf_len: length of the buffer (in bytes)
1106 * @period_len: length of a single period
1107 * @dir: direction of the operation
1108 * @flags: tx descriptor status flags
1110 * Prepares a descriptor for cyclic DMA operation. This means that once the
1111 * descriptor is submitted, we will be submitting in a @period_len sized
1112 * buffers and calling callback once the period has been elapsed. Transfer
1113 * terminates only when client calls dmaengine_terminate_all() for this
1116 * Returns a valid DMA descriptor or %NULL in case of failure.
1118 static struct dma_async_tx_descriptor
*
1119 ep93xx_dma_prep_dma_cyclic(struct dma_chan
*chan
, dma_addr_t dma_addr
,
1120 size_t buf_len
, size_t period_len
,
1121 enum dma_transfer_direction dir
, unsigned long flags
)
1123 struct ep93xx_dma_chan
*edmac
= to_ep93xx_dma_chan(chan
);
1124 struct ep93xx_dma_desc
*desc
, *first
;
1127 if (!edmac
->edma
->m2m
&& dir
!= ep93xx_dma_chan_direction(chan
)) {
1128 dev_warn(chan2dev(edmac
),
1129 "channel was configured with different direction\n");
1133 if (test_and_set_bit(EP93XX_DMA_IS_CYCLIC
, &edmac
->flags
)) {
1134 dev_warn(chan2dev(edmac
),
1135 "channel is already used for cyclic transfers\n");
1139 if (period_len
> DMA_MAX_CHAN_BYTES
) {
1140 dev_warn(chan2dev(edmac
), "too big period length %zu\n",
1145 ep93xx_dma_slave_config_write(chan
, dir
, &edmac
->slave_config
);
1147 /* Split the buffer into period size chunks */
1149 for (offset
= 0; offset
< buf_len
; offset
+= period_len
) {
1150 desc
= ep93xx_dma_desc_get(edmac
);
1152 dev_warn(chan2dev(edmac
), "couln't get descriptor\n");
1156 if (dir
== DMA_MEM_TO_DEV
) {
1157 desc
->src_addr
= dma_addr
+ offset
;
1158 desc
->dst_addr
= edmac
->runtime_addr
;
1160 desc
->src_addr
= edmac
->runtime_addr
;
1161 desc
->dst_addr
= dma_addr
+ offset
;
1164 desc
->size
= period_len
;
1169 list_add_tail(&desc
->node
, &first
->tx_list
);
1172 first
->txd
.cookie
= -EBUSY
;
1177 ep93xx_dma_desc_put(edmac
, first
);
1182 * ep93xx_dma_synchronize - Synchronizes the termination of transfers to the
1186 * Synchronizes the DMA channel termination to the current context. When this
1187 * function returns it is guaranteed that all transfers for previously issued
1188 * descriptors have stopped and and it is safe to free the memory associated
1189 * with them. Furthermore it is guaranteed that all complete callback functions
1190 * for a previously submitted descriptor have finished running and it is safe to
1191 * free resources accessed from within the complete callbacks.
1193 static void ep93xx_dma_synchronize(struct dma_chan
*chan
)
1195 struct ep93xx_dma_chan
*edmac
= to_ep93xx_dma_chan(chan
);
1197 if (edmac
->edma
->hw_synchronize
)
1198 edmac
->edma
->hw_synchronize(edmac
);
1202 * ep93xx_dma_terminate_all - terminate all transactions
1205 * Stops all DMA transactions. All descriptors are put back to the
1206 * @edmac->free_list and callbacks are _not_ called.
1208 static int ep93xx_dma_terminate_all(struct dma_chan
*chan
)
1210 struct ep93xx_dma_chan
*edmac
= to_ep93xx_dma_chan(chan
);
1211 struct ep93xx_dma_desc
*desc
, *_d
;
1212 unsigned long flags
;
1215 spin_lock_irqsave(&edmac
->lock
, flags
);
1216 /* First we disable and flush the DMA channel */
1217 edmac
->edma
->hw_shutdown(edmac
);
1218 clear_bit(EP93XX_DMA_IS_CYCLIC
, &edmac
->flags
);
1219 list_splice_init(&edmac
->active
, &list
);
1220 list_splice_init(&edmac
->queue
, &list
);
1222 * We then re-enable the channel. This way we can continue submitting
1223 * the descriptors by just calling ->hw_submit() again.
1225 edmac
->edma
->hw_setup(edmac
);
1226 spin_unlock_irqrestore(&edmac
->lock
, flags
);
1228 list_for_each_entry_safe(desc
, _d
, &list
, node
)
1229 ep93xx_dma_desc_put(edmac
, desc
);
1234 static int ep93xx_dma_slave_config(struct dma_chan
*chan
,
1235 struct dma_slave_config
*config
)
1237 struct ep93xx_dma_chan
*edmac
= to_ep93xx_dma_chan(chan
);
1239 memcpy(&edmac
->slave_config
, config
, sizeof(*config
));
1244 static int ep93xx_dma_slave_config_write(struct dma_chan
*chan
,
1245 enum dma_transfer_direction dir
,
1246 struct dma_slave_config
*config
)
1248 struct ep93xx_dma_chan
*edmac
= to_ep93xx_dma_chan(chan
);
1249 enum dma_slave_buswidth width
;
1250 unsigned long flags
;
1253 if (!edmac
->edma
->m2m
)
1257 case DMA_DEV_TO_MEM
:
1258 width
= config
->src_addr_width
;
1259 addr
= config
->src_addr
;
1262 case DMA_MEM_TO_DEV
:
1263 width
= config
->dst_addr_width
;
1264 addr
= config
->dst_addr
;
1272 case DMA_SLAVE_BUSWIDTH_1_BYTE
:
1275 case DMA_SLAVE_BUSWIDTH_2_BYTES
:
1276 ctrl
= M2M_CONTROL_PW_16
;
1278 case DMA_SLAVE_BUSWIDTH_4_BYTES
:
1279 ctrl
= M2M_CONTROL_PW_32
;
1285 spin_lock_irqsave(&edmac
->lock
, flags
);
1286 edmac
->runtime_addr
= addr
;
1287 edmac
->runtime_ctrl
= ctrl
;
1288 spin_unlock_irqrestore(&edmac
->lock
, flags
);
1294 * ep93xx_dma_tx_status - check if a transaction is completed
1296 * @cookie: transaction specific cookie
1297 * @state: state of the transaction is stored here if given
1299 * This function can be used to query state of a given transaction.
1301 static enum dma_status
ep93xx_dma_tx_status(struct dma_chan
*chan
,
1302 dma_cookie_t cookie
,
1303 struct dma_tx_state
*state
)
1305 return dma_cookie_status(chan
, cookie
, state
);
1309 * ep93xx_dma_issue_pending - push pending transactions to the hardware
1312 * When this function is called, all pending transactions are pushed to the
1313 * hardware and executed.
1315 static void ep93xx_dma_issue_pending(struct dma_chan
*chan
)
1317 ep93xx_dma_advance_work(to_ep93xx_dma_chan(chan
));
1320 static int __init
ep93xx_dma_probe(struct platform_device
*pdev
)
1322 struct ep93xx_dma_platform_data
*pdata
= dev_get_platdata(&pdev
->dev
);
1323 struct ep93xx_dma_engine
*edma
;
1324 struct dma_device
*dma_dev
;
1328 edma_size
= pdata
->num_channels
* sizeof(struct ep93xx_dma_chan
);
1329 edma
= kzalloc(sizeof(*edma
) + edma_size
, GFP_KERNEL
);
1333 dma_dev
= &edma
->dma_dev
;
1334 edma
->m2m
= platform_get_device_id(pdev
)->driver_data
;
1335 edma
->num_channels
= pdata
->num_channels
;
1337 INIT_LIST_HEAD(&dma_dev
->channels
);
1338 for (i
= 0; i
< pdata
->num_channels
; i
++) {
1339 const struct ep93xx_dma_chan_data
*cdata
= &pdata
->channels
[i
];
1340 struct ep93xx_dma_chan
*edmac
= &edma
->channels
[i
];
1342 edmac
->chan
.device
= dma_dev
;
1343 edmac
->regs
= cdata
->base
;
1344 edmac
->irq
= cdata
->irq
;
1347 edmac
->clk
= clk_get(NULL
, cdata
->name
);
1348 if (IS_ERR(edmac
->clk
)) {
1349 dev_warn(&pdev
->dev
, "failed to get clock for %s\n",
1354 spin_lock_init(&edmac
->lock
);
1355 INIT_LIST_HEAD(&edmac
->active
);
1356 INIT_LIST_HEAD(&edmac
->queue
);
1357 INIT_LIST_HEAD(&edmac
->free_list
);
1358 tasklet_init(&edmac
->tasklet
, ep93xx_dma_tasklet
,
1359 (unsigned long)edmac
);
1361 list_add_tail(&edmac
->chan
.device_node
,
1362 &dma_dev
->channels
);
1365 dma_cap_zero(dma_dev
->cap_mask
);
1366 dma_cap_set(DMA_SLAVE
, dma_dev
->cap_mask
);
1367 dma_cap_set(DMA_CYCLIC
, dma_dev
->cap_mask
);
1369 dma_dev
->dev
= &pdev
->dev
;
1370 dma_dev
->device_alloc_chan_resources
= ep93xx_dma_alloc_chan_resources
;
1371 dma_dev
->device_free_chan_resources
= ep93xx_dma_free_chan_resources
;
1372 dma_dev
->device_prep_slave_sg
= ep93xx_dma_prep_slave_sg
;
1373 dma_dev
->device_prep_dma_cyclic
= ep93xx_dma_prep_dma_cyclic
;
1374 dma_dev
->device_config
= ep93xx_dma_slave_config
;
1375 dma_dev
->device_synchronize
= ep93xx_dma_synchronize
;
1376 dma_dev
->device_terminate_all
= ep93xx_dma_terminate_all
;
1377 dma_dev
->device_issue_pending
= ep93xx_dma_issue_pending
;
1378 dma_dev
->device_tx_status
= ep93xx_dma_tx_status
;
1380 dma_set_max_seg_size(dma_dev
->dev
, DMA_MAX_CHAN_BYTES
);
1383 dma_cap_set(DMA_MEMCPY
, dma_dev
->cap_mask
);
1384 dma_dev
->device_prep_dma_memcpy
= ep93xx_dma_prep_dma_memcpy
;
1386 edma
->hw_setup
= m2m_hw_setup
;
1387 edma
->hw_shutdown
= m2m_hw_shutdown
;
1388 edma
->hw_submit
= m2m_hw_submit
;
1389 edma
->hw_interrupt
= m2m_hw_interrupt
;
1391 dma_cap_set(DMA_PRIVATE
, dma_dev
->cap_mask
);
1393 edma
->hw_synchronize
= m2p_hw_synchronize
;
1394 edma
->hw_setup
= m2p_hw_setup
;
1395 edma
->hw_shutdown
= m2p_hw_shutdown
;
1396 edma
->hw_submit
= m2p_hw_submit
;
1397 edma
->hw_interrupt
= m2p_hw_interrupt
;
1400 ret
= dma_async_device_register(dma_dev
);
1401 if (unlikely(ret
)) {
1402 for (i
= 0; i
< edma
->num_channels
; i
++) {
1403 struct ep93xx_dma_chan
*edmac
= &edma
->channels
[i
];
1404 if (!IS_ERR_OR_NULL(edmac
->clk
))
1405 clk_put(edmac
->clk
);
1409 dev_info(dma_dev
->dev
, "EP93xx M2%s DMA ready\n",
1410 edma
->m2m
? "M" : "P");
1416 static const struct platform_device_id ep93xx_dma_driver_ids
[] = {
1417 { "ep93xx-dma-m2p", 0 },
1418 { "ep93xx-dma-m2m", 1 },
1422 static struct platform_driver ep93xx_dma_driver
= {
1424 .name
= "ep93xx-dma",
1426 .id_table
= ep93xx_dma_driver_ids
,
1429 static int __init
ep93xx_dma_module_init(void)
1431 return platform_driver_probe(&ep93xx_dma_driver
, ep93xx_dma_probe
);
1433 subsys_initcall(ep93xx_dma_module_init
);
1435 MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
1436 MODULE_DESCRIPTION("EP93xx DMA driver");
1437 MODULE_LICENSE("GPL");