1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Driver for the Cirrus Logic EP93xx DMA Controller
5 * Copyright (C) 2011 Mika Westerberg
7 * DMA M2P implementation is based on the original
8 * arch/arm/mach-ep93xx/dma-m2p.c which has following copyrights:
10 * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
11 * Copyright (C) 2006 Applied Data Systems
12 * Copyright (C) 2009 Ryan Mallon <rmallon@gmail.com>
14 * This driver is based on dw_dmac and amba-pl08x drivers.
17 #include <linux/clk.h>
18 #include <linux/init.h>
19 #include <linux/interrupt.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/dmaengine.h>
22 #include <linux/module.h>
23 #include <linux/mod_devicetable.h>
24 #include <linux/of_dma.h>
25 #include <linux/overflow.h>
26 #include <linux/platform_device.h>
27 #include <linux/slab.h>
29 #include "dmaengine.h"
32 #define M2P_CONTROL 0x0000
33 #define M2P_CONTROL_STALLINT BIT(0)
34 #define M2P_CONTROL_NFBINT BIT(1)
35 #define M2P_CONTROL_CH_ERROR_INT BIT(3)
36 #define M2P_CONTROL_ENABLE BIT(4)
37 #define M2P_CONTROL_ICE BIT(6)
39 #define M2P_INTERRUPT 0x0004
40 #define M2P_INTERRUPT_STALL BIT(0)
41 #define M2P_INTERRUPT_NFB BIT(1)
42 #define M2P_INTERRUPT_ERROR BIT(3)
44 #define M2P_PPALLOC 0x0008
45 #define M2P_STATUS 0x000c
47 #define M2P_MAXCNT0 0x0020
48 #define M2P_BASE0 0x0024
49 #define M2P_MAXCNT1 0x0030
50 #define M2P_BASE1 0x0034
52 #define M2P_STATE_IDLE 0
53 #define M2P_STATE_STALL 1
54 #define M2P_STATE_ON 2
55 #define M2P_STATE_NEXT 3
58 #define M2M_CONTROL 0x0000
59 #define M2M_CONTROL_DONEINT BIT(2)
60 #define M2M_CONTROL_ENABLE BIT(3)
61 #define M2M_CONTROL_START BIT(4)
62 #define M2M_CONTROL_DAH BIT(11)
63 #define M2M_CONTROL_SAH BIT(12)
64 #define M2M_CONTROL_PW_SHIFT 9
65 #define M2M_CONTROL_PW_8 (0 << M2M_CONTROL_PW_SHIFT)
66 #define M2M_CONTROL_PW_16 (1 << M2M_CONTROL_PW_SHIFT)
67 #define M2M_CONTROL_PW_32 (2 << M2M_CONTROL_PW_SHIFT)
68 #define M2M_CONTROL_PW_MASK (3 << M2M_CONTROL_PW_SHIFT)
69 #define M2M_CONTROL_TM_SHIFT 13
70 #define M2M_CONTROL_TM_TX (1 << M2M_CONTROL_TM_SHIFT)
71 #define M2M_CONTROL_TM_RX (2 << M2M_CONTROL_TM_SHIFT)
72 #define M2M_CONTROL_NFBINT BIT(21)
73 #define M2M_CONTROL_RSS_SHIFT 22
74 #define M2M_CONTROL_RSS_SSPRX (1 << M2M_CONTROL_RSS_SHIFT)
75 #define M2M_CONTROL_RSS_SSPTX (2 << M2M_CONTROL_RSS_SHIFT)
76 #define M2M_CONTROL_RSS_IDE (3 << M2M_CONTROL_RSS_SHIFT)
77 #define M2M_CONTROL_NO_HDSK BIT(24)
78 #define M2M_CONTROL_PWSC_SHIFT 25
80 #define M2M_INTERRUPT 0x0004
81 #define M2M_INTERRUPT_MASK 6
83 #define M2M_STATUS 0x000c
84 #define M2M_STATUS_CTL_SHIFT 1
85 #define M2M_STATUS_CTL_IDLE (0 << M2M_STATUS_CTL_SHIFT)
86 #define M2M_STATUS_CTL_STALL (1 << M2M_STATUS_CTL_SHIFT)
87 #define M2M_STATUS_CTL_MEMRD (2 << M2M_STATUS_CTL_SHIFT)
88 #define M2M_STATUS_CTL_MEMWR (3 << M2M_STATUS_CTL_SHIFT)
89 #define M2M_STATUS_CTL_BWCWAIT (4 << M2M_STATUS_CTL_SHIFT)
90 #define M2M_STATUS_CTL_MASK (7 << M2M_STATUS_CTL_SHIFT)
91 #define M2M_STATUS_BUF_SHIFT 4
92 #define M2M_STATUS_BUF_NO (0 << M2M_STATUS_BUF_SHIFT)
93 #define M2M_STATUS_BUF_ON (1 << M2M_STATUS_BUF_SHIFT)
94 #define M2M_STATUS_BUF_NEXT (2 << M2M_STATUS_BUF_SHIFT)
95 #define M2M_STATUS_BUF_MASK (3 << M2M_STATUS_BUF_SHIFT)
96 #define M2M_STATUS_DONE BIT(6)
98 #define M2M_BCR0 0x0010
99 #define M2M_BCR1 0x0014
100 #define M2M_SAR_BASE0 0x0018
101 #define M2M_SAR_BASE1 0x001c
102 #define M2M_DAR_BASE0 0x002c
103 #define M2M_DAR_BASE1 0x0030
105 #define DMA_MAX_CHAN_BYTES 0xffff
106 #define DMA_MAX_CHAN_DESCRIPTORS 32
111 * Note that these values are also directly used for setting the PPALLOC
114 #define EP93XX_DMA_I2S1 0
115 #define EP93XX_DMA_I2S2 1
116 #define EP93XX_DMA_AAC1 2
117 #define EP93XX_DMA_AAC2 3
118 #define EP93XX_DMA_AAC3 4
119 #define EP93XX_DMA_I2S3 5
120 #define EP93XX_DMA_UART1 6
121 #define EP93XX_DMA_UART2 7
122 #define EP93XX_DMA_UART3 8
123 #define EP93XX_DMA_IRDA 9
125 #define EP93XX_DMA_SSP 10
126 #define EP93XX_DMA_IDE 11
128 enum ep93xx_dma_type
{
133 struct ep93xx_dma_engine
;
134 static int ep93xx_dma_slave_config_write(struct dma_chan
*chan
,
135 enum dma_transfer_direction dir
,
136 struct dma_slave_config
*config
);
139 * struct ep93xx_dma_desc - EP93xx specific transaction descriptor
140 * @src_addr: source address of the transaction
141 * @dst_addr: destination address of the transaction
142 * @size: size of the transaction (in bytes)
143 * @complete: this descriptor is completed
144 * @txd: dmaengine API descriptor
145 * @tx_list: list of linked descriptors
146 * @node: link used for putting this into a channel queue
148 struct ep93xx_dma_desc
{
153 struct dma_async_tx_descriptor txd
;
154 struct list_head tx_list
;
155 struct list_head node
;
158 struct ep93xx_dma_chan_cfg
{
160 enum dma_transfer_direction dir
;
164 * struct ep93xx_dma_chan - an EP93xx DMA M2P/M2M channel
165 * @chan: dmaengine API channel
166 * @edma: pointer to the engine device
167 * @regs: memory mapped registers
168 * @dma_cfg: channel number, direction
169 * @irq: interrupt number of the channel
170 * @clk: clock used by this channel
171 * @tasklet: channel specific tasklet used for callbacks
172 * @lock: lock protecting the fields following
173 * @flags: flags for the channel
174 * @buffer: which buffer to use next (0/1)
175 * @active: flattened chain of descriptors currently being processed
176 * @queue: pending descriptors which are handled next
177 * @free_list: list of free descriptors which can be used
178 * @runtime_addr: physical address currently used as dest/src (M2M only). This
179 * is set via .device_config before slave operation is
181 * @runtime_ctrl: M2M runtime values for the control register.
182 * @slave_config: slave configuration
184 * As EP93xx DMA controller doesn't support real chained DMA descriptors we
185 * will have slightly different scheme here: @active points to a head of
186 * flattened DMA descriptor chain.
188 * @queue holds pending transactions. These are linked through the first
189 * descriptor in the chain. When a descriptor is moved to the @active queue,
190 * the first and chained descriptors are flattened into a single list.
193 struct ep93xx_dma_chan
{
194 struct dma_chan chan
;
195 const struct ep93xx_dma_engine
*edma
;
197 struct ep93xx_dma_chan_cfg dma_cfg
;
200 struct tasklet_struct tasklet
;
201 /* protects the fields following */
204 /* Channel is configured for cyclic transfers */
205 #define EP93XX_DMA_IS_CYCLIC 0
208 struct list_head active
;
209 struct list_head queue
;
210 struct list_head free_list
;
213 struct dma_slave_config slave_config
;
217 * struct ep93xx_dma_engine - the EP93xx DMA engine instance
218 * @dma_dev: holds the dmaengine device
219 * @m2m: is this an M2M or M2P device
220 * @hw_setup: method which sets the channel up for operation
221 * @hw_synchronize: synchronizes DMA channel termination to current context
222 * @hw_shutdown: shuts the channel down and flushes whatever is left
223 * @hw_submit: pushes active descriptor(s) to the hardware
224 * @hw_interrupt: handle the interrupt
225 * @num_channels: number of channels for this instance
226 * @channels: array of channels
228 * There is one instance of this struct for the M2P channels and one for the
229 * M2M channels. hw_xxx() methods are used to perform operations which are
230 * different on M2M and M2P channels. These methods are called with channel
231 * lock held and interrupts disabled so they cannot sleep.
233 struct ep93xx_dma_engine
{
234 struct dma_device dma_dev
;
236 int (*hw_setup
)(struct ep93xx_dma_chan
*);
237 void (*hw_synchronize
)(struct ep93xx_dma_chan
*);
238 void (*hw_shutdown
)(struct ep93xx_dma_chan
*);
239 void (*hw_submit
)(struct ep93xx_dma_chan
*);
240 int (*hw_interrupt
)(struct ep93xx_dma_chan
*);
241 #define INTERRUPT_UNKNOWN 0
242 #define INTERRUPT_DONE 1
243 #define INTERRUPT_NEXT_BUFFER 2
246 struct ep93xx_dma_chan channels
[] __counted_by(num_channels
);
249 struct ep93xx_edma_data
{
254 static inline struct device
*chan2dev(struct ep93xx_dma_chan
*edmac
)
256 return &edmac
->chan
.dev
->device
;
259 static struct ep93xx_dma_chan
*to_ep93xx_dma_chan(struct dma_chan
*chan
)
261 return container_of(chan
, struct ep93xx_dma_chan
, chan
);
264 static inline bool ep93xx_dma_chan_is_m2p(struct dma_chan
*chan
)
266 if (device_is_compatible(chan
->device
->dev
, "cirrus,ep9301-dma-m2p"))
269 return !strcmp(dev_name(chan
->device
->dev
), "ep93xx-dma-m2p");
273 * ep93xx_dma_chan_direction - returns direction the channel can be used
275 * This function can be used in filter functions to find out whether the
276 * channel supports given DMA direction. Only M2P channels have such
277 * limitation, for M2M channels the direction is configurable.
279 static inline enum dma_transfer_direction
280 ep93xx_dma_chan_direction(struct dma_chan
*chan
)
282 if (!ep93xx_dma_chan_is_m2p(chan
))
283 return DMA_TRANS_NONE
;
285 /* even channels are for TX, odd for RX */
286 return (chan
->chan_id
% 2 == 0) ? DMA_MEM_TO_DEV
: DMA_DEV_TO_MEM
;
290 * ep93xx_dma_set_active - set new active descriptor chain
292 * @desc: head of the new active descriptor chain
294 * Sets @desc to be the head of the new active descriptor chain. This is the
295 * chain which is processed next. The active list must be empty before calling
298 * Called with @edmac->lock held and interrupts disabled.
300 static void ep93xx_dma_set_active(struct ep93xx_dma_chan
*edmac
,
301 struct ep93xx_dma_desc
*desc
)
303 BUG_ON(!list_empty(&edmac
->active
));
305 list_add_tail(&desc
->node
, &edmac
->active
);
307 /* Flatten the @desc->tx_list chain into @edmac->active list */
308 while (!list_empty(&desc
->tx_list
)) {
309 struct ep93xx_dma_desc
*d
= list_first_entry(&desc
->tx_list
,
310 struct ep93xx_dma_desc
, node
);
313 * We copy the callback parameters from the first descriptor
314 * to all the chained descriptors. This way we can call the
315 * callback without having to find out the first descriptor in
316 * the chain. Useful for cyclic transfers.
318 d
->txd
.callback
= desc
->txd
.callback
;
319 d
->txd
.callback_param
= desc
->txd
.callback_param
;
321 list_move_tail(&d
->node
, &edmac
->active
);
325 /* Called with @edmac->lock held and interrupts disabled */
326 static struct ep93xx_dma_desc
*
327 ep93xx_dma_get_active(struct ep93xx_dma_chan
*edmac
)
329 return list_first_entry_or_null(&edmac
->active
,
330 struct ep93xx_dma_desc
, node
);
334 * ep93xx_dma_advance_active - advances to the next active descriptor
337 * Function advances active descriptor to the next in the @edmac->active and
338 * returns %true if we still have descriptors in the chain to process.
339 * Otherwise returns %false.
341 * When the channel is in cyclic mode always returns %true.
343 * Called with @edmac->lock held and interrupts disabled.
345 static bool ep93xx_dma_advance_active(struct ep93xx_dma_chan
*edmac
)
347 struct ep93xx_dma_desc
*desc
;
349 list_rotate_left(&edmac
->active
);
351 if (test_bit(EP93XX_DMA_IS_CYCLIC
, &edmac
->flags
))
354 desc
= ep93xx_dma_get_active(edmac
);
359 * If txd.cookie is set it means that we are back in the first
360 * descriptor in the chain and hence done with it.
362 return !desc
->txd
.cookie
;
366 * M2P DMA implementation
369 static void m2p_set_control(struct ep93xx_dma_chan
*edmac
, u32 control
)
371 writel(control
, edmac
->regs
+ M2P_CONTROL
);
373 * EP93xx User's Guide states that we must perform a dummy read after
374 * write to the control register.
376 readl(edmac
->regs
+ M2P_CONTROL
);
379 static int m2p_hw_setup(struct ep93xx_dma_chan
*edmac
)
383 writel(edmac
->dma_cfg
.port
& 0xf, edmac
->regs
+ M2P_PPALLOC
);
385 control
= M2P_CONTROL_CH_ERROR_INT
| M2P_CONTROL_ICE
386 | M2P_CONTROL_ENABLE
;
387 m2p_set_control(edmac
, control
);
394 static inline u32
m2p_channel_state(struct ep93xx_dma_chan
*edmac
)
396 return (readl(edmac
->regs
+ M2P_STATUS
) >> 4) & 0x3;
399 static void m2p_hw_synchronize(struct ep93xx_dma_chan
*edmac
)
404 spin_lock_irqsave(&edmac
->lock
, flags
);
405 control
= readl(edmac
->regs
+ M2P_CONTROL
);
406 control
&= ~(M2P_CONTROL_STALLINT
| M2P_CONTROL_NFBINT
);
407 m2p_set_control(edmac
, control
);
408 spin_unlock_irqrestore(&edmac
->lock
, flags
);
410 while (m2p_channel_state(edmac
) >= M2P_STATE_ON
)
414 static void m2p_hw_shutdown(struct ep93xx_dma_chan
*edmac
)
416 m2p_set_control(edmac
, 0);
418 while (m2p_channel_state(edmac
) != M2P_STATE_IDLE
)
419 dev_warn(chan2dev(edmac
), "M2P: Not yet IDLE\n");
422 static void m2p_fill_desc(struct ep93xx_dma_chan
*edmac
)
424 struct ep93xx_dma_desc
*desc
;
427 desc
= ep93xx_dma_get_active(edmac
);
429 dev_warn(chan2dev(edmac
), "M2P: empty descriptor list\n");
433 if (ep93xx_dma_chan_direction(&edmac
->chan
) == DMA_MEM_TO_DEV
)
434 bus_addr
= desc
->src_addr
;
436 bus_addr
= desc
->dst_addr
;
438 if (edmac
->buffer
== 0) {
439 writel(desc
->size
, edmac
->regs
+ M2P_MAXCNT0
);
440 writel(bus_addr
, edmac
->regs
+ M2P_BASE0
);
442 writel(desc
->size
, edmac
->regs
+ M2P_MAXCNT1
);
443 writel(bus_addr
, edmac
->regs
+ M2P_BASE1
);
449 static void m2p_hw_submit(struct ep93xx_dma_chan
*edmac
)
451 u32 control
= readl(edmac
->regs
+ M2P_CONTROL
);
453 m2p_fill_desc(edmac
);
454 control
|= M2P_CONTROL_STALLINT
;
456 if (ep93xx_dma_advance_active(edmac
)) {
457 m2p_fill_desc(edmac
);
458 control
|= M2P_CONTROL_NFBINT
;
461 m2p_set_control(edmac
, control
);
464 static int m2p_hw_interrupt(struct ep93xx_dma_chan
*edmac
)
466 u32 irq_status
= readl(edmac
->regs
+ M2P_INTERRUPT
);
469 if (irq_status
& M2P_INTERRUPT_ERROR
) {
470 struct ep93xx_dma_desc
*desc
= ep93xx_dma_get_active(edmac
);
472 /* Clear the error interrupt */
473 writel(1, edmac
->regs
+ M2P_INTERRUPT
);
476 * It seems that there is no easy way of reporting errors back
477 * to client so we just report the error here and continue as
480 * Revisit this when there is a mechanism to report back the
483 dev_err(chan2dev(edmac
),
484 "DMA transfer failed! Details:\n"
486 "\tsrc_addr : 0x%08x\n"
487 "\tdst_addr : 0x%08x\n"
489 desc
->txd
.cookie
, desc
->src_addr
, desc
->dst_addr
,
494 * Even latest E2 silicon revision sometimes assert STALL interrupt
495 * instead of NFB. Therefore we treat them equally, basing on the
496 * amount of data we still have to transfer.
498 if (!(irq_status
& (M2P_INTERRUPT_STALL
| M2P_INTERRUPT_NFB
)))
499 return INTERRUPT_UNKNOWN
;
501 if (ep93xx_dma_advance_active(edmac
)) {
502 m2p_fill_desc(edmac
);
503 return INTERRUPT_NEXT_BUFFER
;
506 /* Disable interrupts */
507 control
= readl(edmac
->regs
+ M2P_CONTROL
);
508 control
&= ~(M2P_CONTROL_STALLINT
| M2P_CONTROL_NFBINT
);
509 m2p_set_control(edmac
, control
);
511 return INTERRUPT_DONE
;
515 * M2M DMA implementation
518 static int m2m_hw_setup(struct ep93xx_dma_chan
*edmac
)
522 if (edmac
->dma_cfg
.dir
== DMA_MEM_TO_MEM
) {
523 /* This is memcpy channel, nothing to configure */
524 writel(control
, edmac
->regs
+ M2M_CONTROL
);
528 switch (edmac
->dma_cfg
.port
) {
531 * This was found via experimenting - anything less than 5
532 * causes the channel to perform only a partial transfer which
533 * leads to problems since we don't get DONE interrupt then.
535 control
= (5 << M2M_CONTROL_PWSC_SHIFT
);
536 control
|= M2M_CONTROL_NO_HDSK
;
538 if (edmac
->dma_cfg
.dir
== DMA_MEM_TO_DEV
) {
539 control
|= M2M_CONTROL_DAH
;
540 control
|= M2M_CONTROL_TM_TX
;
541 control
|= M2M_CONTROL_RSS_SSPTX
;
543 control
|= M2M_CONTROL_SAH
;
544 control
|= M2M_CONTROL_TM_RX
;
545 control
|= M2M_CONTROL_RSS_SSPRX
;
551 * This IDE part is totally untested. Values below are taken
552 * from the EP93xx Users's Guide and might not be correct.
554 if (edmac
->dma_cfg
.dir
== DMA_MEM_TO_DEV
) {
555 /* Worst case from the UG */
556 control
= (3 << M2M_CONTROL_PWSC_SHIFT
);
557 control
|= M2M_CONTROL_DAH
;
558 control
|= M2M_CONTROL_TM_TX
;
560 control
= (2 << M2M_CONTROL_PWSC_SHIFT
);
561 control
|= M2M_CONTROL_SAH
;
562 control
|= M2M_CONTROL_TM_RX
;
565 control
|= M2M_CONTROL_NO_HDSK
;
566 control
|= M2M_CONTROL_RSS_IDE
;
567 control
|= M2M_CONTROL_PW_16
;
574 writel(control
, edmac
->regs
+ M2M_CONTROL
);
578 static void m2m_hw_shutdown(struct ep93xx_dma_chan
*edmac
)
580 /* Just disable the channel */
581 writel(0, edmac
->regs
+ M2M_CONTROL
);
584 static void m2m_fill_desc(struct ep93xx_dma_chan
*edmac
)
586 struct ep93xx_dma_desc
*desc
;
588 desc
= ep93xx_dma_get_active(edmac
);
590 dev_warn(chan2dev(edmac
), "M2M: empty descriptor list\n");
594 if (edmac
->buffer
== 0) {
595 writel(desc
->src_addr
, edmac
->regs
+ M2M_SAR_BASE0
);
596 writel(desc
->dst_addr
, edmac
->regs
+ M2M_DAR_BASE0
);
597 writel(desc
->size
, edmac
->regs
+ M2M_BCR0
);
599 writel(desc
->src_addr
, edmac
->regs
+ M2M_SAR_BASE1
);
600 writel(desc
->dst_addr
, edmac
->regs
+ M2M_DAR_BASE1
);
601 writel(desc
->size
, edmac
->regs
+ M2M_BCR1
);
607 static void m2m_hw_submit(struct ep93xx_dma_chan
*edmac
)
609 u32 control
= readl(edmac
->regs
+ M2M_CONTROL
);
612 * Since we allow clients to configure PW (peripheral width) we always
613 * clear PW bits here and then set them according what is given in
614 * the runtime configuration.
616 control
&= ~M2M_CONTROL_PW_MASK
;
617 control
|= edmac
->runtime_ctrl
;
619 m2m_fill_desc(edmac
);
620 control
|= M2M_CONTROL_DONEINT
;
622 if (ep93xx_dma_advance_active(edmac
)) {
623 m2m_fill_desc(edmac
);
624 control
|= M2M_CONTROL_NFBINT
;
628 * Now we can finally enable the channel. For M2M channel this must be
629 * done _after_ the BCRx registers are programmed.
631 control
|= M2M_CONTROL_ENABLE
;
632 writel(control
, edmac
->regs
+ M2M_CONTROL
);
634 if (edmac
->dma_cfg
.dir
== DMA_MEM_TO_MEM
) {
636 * For memcpy channels the software trigger must be asserted
637 * in order to start the memcpy operation.
639 control
|= M2M_CONTROL_START
;
640 writel(control
, edmac
->regs
+ M2M_CONTROL
);
645 * According to EP93xx User's Guide, we should receive DONE interrupt when all
646 * M2M DMA controller transactions complete normally. This is not always the
647 * case - sometimes EP93xx M2M DMA asserts DONE interrupt when the DMA channel
648 * is still running (channel Buffer FSM in DMA_BUF_ON state, and channel
649 * Control FSM in DMA_MEM_RD state, observed at least in IDE-DMA operation).
650 * In effect, disabling the channel when only DONE bit is set could stop
651 * currently running DMA transfer. To avoid this, we use Buffer FSM and
652 * Control FSM to check current state of DMA channel.
654 static int m2m_hw_interrupt(struct ep93xx_dma_chan
*edmac
)
656 u32 status
= readl(edmac
->regs
+ M2M_STATUS
);
657 u32 ctl_fsm
= status
& M2M_STATUS_CTL_MASK
;
658 u32 buf_fsm
= status
& M2M_STATUS_BUF_MASK
;
659 bool done
= status
& M2M_STATUS_DONE
;
662 struct ep93xx_dma_desc
*desc
;
664 /* Accept only DONE and NFB interrupts */
665 if (!(readl(edmac
->regs
+ M2M_INTERRUPT
) & M2M_INTERRUPT_MASK
))
666 return INTERRUPT_UNKNOWN
;
669 /* Clear the DONE bit */
670 writel(0, edmac
->regs
+ M2M_INTERRUPT
);
674 * Check whether we are done with descriptors or not. This, together
675 * with DMA channel state, determines action to take in interrupt.
677 desc
= ep93xx_dma_get_active(edmac
);
678 last_done
= !desc
|| desc
->txd
.cookie
;
681 * Use M2M DMA Buffer FSM and Control FSM to check current state of
682 * DMA channel. Using DONE and NFB bits from channel status register
683 * or bits from channel interrupt register is not reliable.
686 (buf_fsm
== M2M_STATUS_BUF_NO
||
687 buf_fsm
== M2M_STATUS_BUF_ON
)) {
689 * Two buffers are ready for update when Buffer FSM is in
690 * DMA_NO_BUF state. Only one buffer can be prepared without
691 * disabling the channel or polling the DONE bit.
692 * To simplify things, always prepare only one buffer.
694 if (ep93xx_dma_advance_active(edmac
)) {
695 m2m_fill_desc(edmac
);
696 if (done
&& edmac
->dma_cfg
.dir
== DMA_MEM_TO_MEM
) {
697 /* Software trigger for memcpy channel */
698 control
= readl(edmac
->regs
+ M2M_CONTROL
);
699 control
|= M2M_CONTROL_START
;
700 writel(control
, edmac
->regs
+ M2M_CONTROL
);
702 return INTERRUPT_NEXT_BUFFER
;
709 * Disable the channel only when Buffer FSM is in DMA_NO_BUF state
710 * and Control FSM is in DMA_STALL state.
713 buf_fsm
== M2M_STATUS_BUF_NO
&&
714 ctl_fsm
== M2M_STATUS_CTL_STALL
) {
715 /* Disable interrupts and the channel */
716 control
= readl(edmac
->regs
+ M2M_CONTROL
);
717 control
&= ~(M2M_CONTROL_DONEINT
| M2M_CONTROL_NFBINT
718 | M2M_CONTROL_ENABLE
);
719 writel(control
, edmac
->regs
+ M2M_CONTROL
);
720 return INTERRUPT_DONE
;
724 * Nothing to do this time.
726 return INTERRUPT_NEXT_BUFFER
;
730 * DMA engine API implementation
733 static struct ep93xx_dma_desc
*
734 ep93xx_dma_desc_get(struct ep93xx_dma_chan
*edmac
)
736 struct ep93xx_dma_desc
*desc
, *_desc
;
737 struct ep93xx_dma_desc
*ret
= NULL
;
740 spin_lock_irqsave(&edmac
->lock
, flags
);
741 list_for_each_entry_safe(desc
, _desc
, &edmac
->free_list
, node
) {
742 if (async_tx_test_ack(&desc
->txd
)) {
743 list_del_init(&desc
->node
);
745 /* Re-initialize the descriptor */
749 desc
->complete
= false;
750 desc
->txd
.cookie
= 0;
751 desc
->txd
.callback
= NULL
;
752 desc
->txd
.callback_param
= NULL
;
758 spin_unlock_irqrestore(&edmac
->lock
, flags
);
762 static void ep93xx_dma_desc_put(struct ep93xx_dma_chan
*edmac
,
763 struct ep93xx_dma_desc
*desc
)
768 spin_lock_irqsave(&edmac
->lock
, flags
);
769 list_splice_init(&desc
->tx_list
, &edmac
->free_list
);
770 list_add(&desc
->node
, &edmac
->free_list
);
771 spin_unlock_irqrestore(&edmac
->lock
, flags
);
776 * ep93xx_dma_advance_work - start processing the next pending transaction
779 * If we have pending transactions queued and we are currently idling, this
780 * function takes the next queued transaction from the @edmac->queue and
781 * pushes it to the hardware for execution.
783 static void ep93xx_dma_advance_work(struct ep93xx_dma_chan
*edmac
)
785 struct ep93xx_dma_desc
*new;
788 spin_lock_irqsave(&edmac
->lock
, flags
);
789 if (!list_empty(&edmac
->active
) || list_empty(&edmac
->queue
)) {
790 spin_unlock_irqrestore(&edmac
->lock
, flags
);
794 /* Take the next descriptor from the pending queue */
795 new = list_first_entry(&edmac
->queue
, struct ep93xx_dma_desc
, node
);
796 list_del_init(&new->node
);
798 ep93xx_dma_set_active(edmac
, new);
800 /* Push it to the hardware */
801 edmac
->edma
->hw_submit(edmac
);
802 spin_unlock_irqrestore(&edmac
->lock
, flags
);
805 static void ep93xx_dma_tasklet(struct tasklet_struct
*t
)
807 struct ep93xx_dma_chan
*edmac
= from_tasklet(edmac
, t
, tasklet
);
808 struct ep93xx_dma_desc
*desc
, *d
;
809 struct dmaengine_desc_callback cb
;
812 memset(&cb
, 0, sizeof(cb
));
813 spin_lock_irq(&edmac
->lock
);
815 * If dma_terminate_all() was called before we get to run, the active
816 * list has become empty. If that happens we aren't supposed to do
817 * anything more than call ep93xx_dma_advance_work().
819 desc
= ep93xx_dma_get_active(edmac
);
821 if (desc
->complete
) {
822 /* mark descriptor complete for non cyclic case only */
823 if (!test_bit(EP93XX_DMA_IS_CYCLIC
, &edmac
->flags
))
824 dma_cookie_complete(&desc
->txd
);
825 list_splice_init(&edmac
->active
, &list
);
827 dmaengine_desc_get_callback(&desc
->txd
, &cb
);
829 spin_unlock_irq(&edmac
->lock
);
831 /* Pick up the next descriptor from the queue */
832 ep93xx_dma_advance_work(edmac
);
834 /* Now we can release all the chained descriptors */
835 list_for_each_entry_safe(desc
, d
, &list
, node
) {
836 dma_descriptor_unmap(&desc
->txd
);
837 ep93xx_dma_desc_put(edmac
, desc
);
840 dmaengine_desc_callback_invoke(&cb
, NULL
);
843 static irqreturn_t
ep93xx_dma_interrupt(int irq
, void *dev_id
)
845 struct ep93xx_dma_chan
*edmac
= dev_id
;
846 struct ep93xx_dma_desc
*desc
;
847 irqreturn_t ret
= IRQ_HANDLED
;
849 spin_lock(&edmac
->lock
);
851 desc
= ep93xx_dma_get_active(edmac
);
853 dev_warn(chan2dev(edmac
),
854 "got interrupt while active list is empty\n");
855 spin_unlock(&edmac
->lock
);
859 switch (edmac
->edma
->hw_interrupt(edmac
)) {
861 desc
->complete
= true;
862 tasklet_schedule(&edmac
->tasklet
);
865 case INTERRUPT_NEXT_BUFFER
:
866 if (test_bit(EP93XX_DMA_IS_CYCLIC
, &edmac
->flags
))
867 tasklet_schedule(&edmac
->tasklet
);
871 dev_warn(chan2dev(edmac
), "unknown interrupt!\n");
876 spin_unlock(&edmac
->lock
);
881 * ep93xx_dma_tx_submit - set the prepared descriptor(s) to be executed
882 * @tx: descriptor to be executed
884 * Function will execute given descriptor on the hardware or if the hardware
885 * is busy, queue the descriptor to be executed later on. Returns cookie which
886 * can be used to poll the status of the descriptor.
888 static dma_cookie_t
ep93xx_dma_tx_submit(struct dma_async_tx_descriptor
*tx
)
890 struct ep93xx_dma_chan
*edmac
= to_ep93xx_dma_chan(tx
->chan
);
891 struct ep93xx_dma_desc
*desc
;
895 spin_lock_irqsave(&edmac
->lock
, flags
);
896 cookie
= dma_cookie_assign(tx
);
898 desc
= container_of(tx
, struct ep93xx_dma_desc
, txd
);
901 * If nothing is currently processed, we push this descriptor
902 * directly to the hardware. Otherwise we put the descriptor
903 * to the pending queue.
905 if (list_empty(&edmac
->active
)) {
906 ep93xx_dma_set_active(edmac
, desc
);
907 edmac
->edma
->hw_submit(edmac
);
909 list_add_tail(&desc
->node
, &edmac
->queue
);
912 spin_unlock_irqrestore(&edmac
->lock
, flags
);
917 * ep93xx_dma_alloc_chan_resources - allocate resources for the channel
918 * @chan: channel to allocate resources
920 * Function allocates necessary resources for the given DMA channel and
921 * returns number of allocated descriptors for the channel. Negative errno
922 * is returned in case of failure.
924 static int ep93xx_dma_alloc_chan_resources(struct dma_chan
*chan
)
926 struct ep93xx_dma_chan
*edmac
= to_ep93xx_dma_chan(chan
);
927 const char *name
= dma_chan_name(chan
);
930 /* Sanity check the channel parameters */
931 if (!edmac
->edma
->m2m
) {
932 if (edmac
->dma_cfg
.port
> EP93XX_DMA_IRDA
)
934 if (edmac
->dma_cfg
.dir
!= ep93xx_dma_chan_direction(chan
))
937 if (edmac
->dma_cfg
.dir
!= DMA_MEM_TO_MEM
) {
938 switch (edmac
->dma_cfg
.port
) {
941 if (!is_slave_direction(edmac
->dma_cfg
.dir
))
950 ret
= clk_prepare_enable(edmac
->clk
);
954 ret
= request_irq(edmac
->irq
, ep93xx_dma_interrupt
, 0, name
, edmac
);
956 goto fail_clk_disable
;
958 spin_lock_irq(&edmac
->lock
);
959 dma_cookie_init(&edmac
->chan
);
960 ret
= edmac
->edma
->hw_setup(edmac
);
961 spin_unlock_irq(&edmac
->lock
);
966 for (i
= 0; i
< DMA_MAX_CHAN_DESCRIPTORS
; i
++) {
967 struct ep93xx_dma_desc
*desc
;
969 desc
= kzalloc(sizeof(*desc
), GFP_KERNEL
);
971 dev_warn(chan2dev(edmac
), "not enough descriptors\n");
975 INIT_LIST_HEAD(&desc
->tx_list
);
977 dma_async_tx_descriptor_init(&desc
->txd
, chan
);
978 desc
->txd
.flags
= DMA_CTRL_ACK
;
979 desc
->txd
.tx_submit
= ep93xx_dma_tx_submit
;
981 ep93xx_dma_desc_put(edmac
, desc
);
987 free_irq(edmac
->irq
, edmac
);
989 clk_disable_unprepare(edmac
->clk
);
995 * ep93xx_dma_free_chan_resources - release resources for the channel
998 * Function releases all the resources allocated for the given channel.
999 * The channel must be idle when this is called.
1001 static void ep93xx_dma_free_chan_resources(struct dma_chan
*chan
)
1003 struct ep93xx_dma_chan
*edmac
= to_ep93xx_dma_chan(chan
);
1004 struct ep93xx_dma_desc
*desc
, *d
;
1005 unsigned long flags
;
1008 BUG_ON(!list_empty(&edmac
->active
));
1009 BUG_ON(!list_empty(&edmac
->queue
));
1011 spin_lock_irqsave(&edmac
->lock
, flags
);
1012 edmac
->edma
->hw_shutdown(edmac
);
1013 edmac
->runtime_addr
= 0;
1014 edmac
->runtime_ctrl
= 0;
1016 list_splice_init(&edmac
->free_list
, &list
);
1017 spin_unlock_irqrestore(&edmac
->lock
, flags
);
1019 list_for_each_entry_safe(desc
, d
, &list
, node
)
1022 clk_disable_unprepare(edmac
->clk
);
1023 free_irq(edmac
->irq
, edmac
);
1027 * ep93xx_dma_prep_dma_memcpy - prepare a memcpy DMA operation
1029 * @dest: destination bus address
1030 * @src: source bus address
1031 * @len: size of the transaction
1032 * @flags: flags for the descriptor
1034 * Returns a valid DMA descriptor or %NULL in case of failure.
1036 static struct dma_async_tx_descriptor
*
1037 ep93xx_dma_prep_dma_memcpy(struct dma_chan
*chan
, dma_addr_t dest
,
1038 dma_addr_t src
, size_t len
, unsigned long flags
)
1040 struct ep93xx_dma_chan
*edmac
= to_ep93xx_dma_chan(chan
);
1041 struct ep93xx_dma_desc
*desc
, *first
;
1042 size_t bytes
, offset
;
1045 for (offset
= 0; offset
< len
; offset
+= bytes
) {
1046 desc
= ep93xx_dma_desc_get(edmac
);
1048 dev_warn(chan2dev(edmac
), "couldn't get descriptor\n");
1052 bytes
= min_t(size_t, len
- offset
, DMA_MAX_CHAN_BYTES
);
1054 desc
->src_addr
= src
+ offset
;
1055 desc
->dst_addr
= dest
+ offset
;
1061 list_add_tail(&desc
->node
, &first
->tx_list
);
1064 first
->txd
.cookie
= -EBUSY
;
1065 first
->txd
.flags
= flags
;
1069 ep93xx_dma_desc_put(edmac
, first
);
1074 * ep93xx_dma_prep_slave_sg - prepare a slave DMA operation
1076 * @sgl: list of buffers to transfer
1077 * @sg_len: number of entries in @sgl
1078 * @dir: direction of the DMA transfer
1079 * @flags: flags for the descriptor
1080 * @context: operation context (ignored)
1082 * Returns a valid DMA descriptor or %NULL in case of failure.
1084 static struct dma_async_tx_descriptor
*
1085 ep93xx_dma_prep_slave_sg(struct dma_chan
*chan
, struct scatterlist
*sgl
,
1086 unsigned int sg_len
, enum dma_transfer_direction dir
,
1087 unsigned long flags
, void *context
)
1089 struct ep93xx_dma_chan
*edmac
= to_ep93xx_dma_chan(chan
);
1090 struct ep93xx_dma_desc
*desc
, *first
;
1091 struct scatterlist
*sg
;
1094 if (!edmac
->edma
->m2m
&& dir
!= ep93xx_dma_chan_direction(chan
)) {
1095 dev_warn(chan2dev(edmac
),
1096 "channel was configured with different direction\n");
1100 if (test_bit(EP93XX_DMA_IS_CYCLIC
, &edmac
->flags
)) {
1101 dev_warn(chan2dev(edmac
),
1102 "channel is already used for cyclic transfers\n");
1106 ep93xx_dma_slave_config_write(chan
, dir
, &edmac
->slave_config
);
1109 for_each_sg(sgl
, sg
, sg_len
, i
) {
1110 size_t len
= sg_dma_len(sg
);
1112 if (len
> DMA_MAX_CHAN_BYTES
) {
1113 dev_warn(chan2dev(edmac
), "too big transfer size %zu\n",
1118 desc
= ep93xx_dma_desc_get(edmac
);
1120 dev_warn(chan2dev(edmac
), "couldn't get descriptor\n");
1124 if (dir
== DMA_MEM_TO_DEV
) {
1125 desc
->src_addr
= sg_dma_address(sg
);
1126 desc
->dst_addr
= edmac
->runtime_addr
;
1128 desc
->src_addr
= edmac
->runtime_addr
;
1129 desc
->dst_addr
= sg_dma_address(sg
);
1136 list_add_tail(&desc
->node
, &first
->tx_list
);
1139 first
->txd
.cookie
= -EBUSY
;
1140 first
->txd
.flags
= flags
;
1145 ep93xx_dma_desc_put(edmac
, first
);
1150 * ep93xx_dma_prep_dma_cyclic - prepare a cyclic DMA operation
1152 * @dma_addr: DMA mapped address of the buffer
1153 * @buf_len: length of the buffer (in bytes)
1154 * @period_len: length of a single period
1155 * @dir: direction of the operation
1156 * @flags: tx descriptor status flags
1158 * Prepares a descriptor for cyclic DMA operation. This means that once the
1159 * descriptor is submitted, we will be submitting in a @period_len sized
1160 * buffers and calling callback once the period has been elapsed. Transfer
1161 * terminates only when client calls dmaengine_terminate_all() for this
1164 * Returns a valid DMA descriptor or %NULL in case of failure.
1166 static struct dma_async_tx_descriptor
*
1167 ep93xx_dma_prep_dma_cyclic(struct dma_chan
*chan
, dma_addr_t dma_addr
,
1168 size_t buf_len
, size_t period_len
,
1169 enum dma_transfer_direction dir
, unsigned long flags
)
1171 struct ep93xx_dma_chan
*edmac
= to_ep93xx_dma_chan(chan
);
1172 struct ep93xx_dma_desc
*desc
, *first
;
1175 if (!edmac
->edma
->m2m
&& dir
!= ep93xx_dma_chan_direction(chan
)) {
1176 dev_warn(chan2dev(edmac
),
1177 "channel was configured with different direction\n");
1181 if (test_and_set_bit(EP93XX_DMA_IS_CYCLIC
, &edmac
->flags
)) {
1182 dev_warn(chan2dev(edmac
),
1183 "channel is already used for cyclic transfers\n");
1187 if (period_len
> DMA_MAX_CHAN_BYTES
) {
1188 dev_warn(chan2dev(edmac
), "too big period length %zu\n",
1193 ep93xx_dma_slave_config_write(chan
, dir
, &edmac
->slave_config
);
1195 /* Split the buffer into period size chunks */
1197 for (offset
= 0; offset
< buf_len
; offset
+= period_len
) {
1198 desc
= ep93xx_dma_desc_get(edmac
);
1200 dev_warn(chan2dev(edmac
), "couldn't get descriptor\n");
1204 if (dir
== DMA_MEM_TO_DEV
) {
1205 desc
->src_addr
= dma_addr
+ offset
;
1206 desc
->dst_addr
= edmac
->runtime_addr
;
1208 desc
->src_addr
= edmac
->runtime_addr
;
1209 desc
->dst_addr
= dma_addr
+ offset
;
1212 desc
->size
= period_len
;
1217 list_add_tail(&desc
->node
, &first
->tx_list
);
1220 first
->txd
.cookie
= -EBUSY
;
1225 ep93xx_dma_desc_put(edmac
, first
);
1230 * ep93xx_dma_synchronize - Synchronizes the termination of transfers to the
1234 * Synchronizes the DMA channel termination to the current context. When this
1235 * function returns it is guaranteed that all transfers for previously issued
1236 * descriptors have stopped and it is safe to free the memory associated
1237 * with them. Furthermore it is guaranteed that all complete callback functions
1238 * for a previously submitted descriptor have finished running and it is safe to
1239 * free resources accessed from within the complete callbacks.
1241 static void ep93xx_dma_synchronize(struct dma_chan
*chan
)
1243 struct ep93xx_dma_chan
*edmac
= to_ep93xx_dma_chan(chan
);
1245 if (edmac
->edma
->hw_synchronize
)
1246 edmac
->edma
->hw_synchronize(edmac
);
1250 * ep93xx_dma_terminate_all - terminate all transactions
1253 * Stops all DMA transactions. All descriptors are put back to the
1254 * @edmac->free_list and callbacks are _not_ called.
1256 static int ep93xx_dma_terminate_all(struct dma_chan
*chan
)
1258 struct ep93xx_dma_chan
*edmac
= to_ep93xx_dma_chan(chan
);
1259 struct ep93xx_dma_desc
*desc
, *_d
;
1260 unsigned long flags
;
1263 spin_lock_irqsave(&edmac
->lock
, flags
);
1264 /* First we disable and flush the DMA channel */
1265 edmac
->edma
->hw_shutdown(edmac
);
1266 clear_bit(EP93XX_DMA_IS_CYCLIC
, &edmac
->flags
);
1267 list_splice_init(&edmac
->active
, &list
);
1268 list_splice_init(&edmac
->queue
, &list
);
1270 * We then re-enable the channel. This way we can continue submitting
1271 * the descriptors by just calling ->hw_submit() again.
1273 edmac
->edma
->hw_setup(edmac
);
1274 spin_unlock_irqrestore(&edmac
->lock
, flags
);
1276 list_for_each_entry_safe(desc
, _d
, &list
, node
)
1277 ep93xx_dma_desc_put(edmac
, desc
);
1282 static int ep93xx_dma_slave_config(struct dma_chan
*chan
,
1283 struct dma_slave_config
*config
)
1285 struct ep93xx_dma_chan
*edmac
= to_ep93xx_dma_chan(chan
);
1287 memcpy(&edmac
->slave_config
, config
, sizeof(*config
));
1292 static int ep93xx_dma_slave_config_write(struct dma_chan
*chan
,
1293 enum dma_transfer_direction dir
,
1294 struct dma_slave_config
*config
)
1296 struct ep93xx_dma_chan
*edmac
= to_ep93xx_dma_chan(chan
);
1297 enum dma_slave_buswidth width
;
1298 unsigned long flags
;
1301 if (!edmac
->edma
->m2m
)
1305 case DMA_DEV_TO_MEM
:
1306 width
= config
->src_addr_width
;
1307 addr
= config
->src_addr
;
1310 case DMA_MEM_TO_DEV
:
1311 width
= config
->dst_addr_width
;
1312 addr
= config
->dst_addr
;
1320 case DMA_SLAVE_BUSWIDTH_1_BYTE
:
1323 case DMA_SLAVE_BUSWIDTH_2_BYTES
:
1324 ctrl
= M2M_CONTROL_PW_16
;
1326 case DMA_SLAVE_BUSWIDTH_4_BYTES
:
1327 ctrl
= M2M_CONTROL_PW_32
;
1333 spin_lock_irqsave(&edmac
->lock
, flags
);
1334 edmac
->runtime_addr
= addr
;
1335 edmac
->runtime_ctrl
= ctrl
;
1336 spin_unlock_irqrestore(&edmac
->lock
, flags
);
1342 * ep93xx_dma_tx_status - check if a transaction is completed
1344 * @cookie: transaction specific cookie
1345 * @state: state of the transaction is stored here if given
1347 * This function can be used to query state of a given transaction.
1349 static enum dma_status
ep93xx_dma_tx_status(struct dma_chan
*chan
,
1350 dma_cookie_t cookie
,
1351 struct dma_tx_state
*state
)
1353 return dma_cookie_status(chan
, cookie
, state
);
1357 * ep93xx_dma_issue_pending - push pending transactions to the hardware
1360 * When this function is called, all pending transactions are pushed to the
1361 * hardware and executed.
1363 static void ep93xx_dma_issue_pending(struct dma_chan
*chan
)
1365 ep93xx_dma_advance_work(to_ep93xx_dma_chan(chan
));
1368 static struct ep93xx_dma_engine
*ep93xx_dma_of_probe(struct platform_device
*pdev
)
1370 const struct ep93xx_edma_data
*data
;
1371 struct device
*dev
= &pdev
->dev
;
1372 struct ep93xx_dma_engine
*edma
;
1373 struct dma_device
*dma_dev
;
1374 char dma_clk_name
[5];
1377 data
= device_get_match_data(dev
);
1379 return ERR_PTR(dev_err_probe(dev
, -ENODEV
, "No device match found\n"));
1381 edma
= devm_kzalloc(dev
, struct_size(edma
, channels
, data
->num_channels
),
1384 return ERR_PTR(-ENOMEM
);
1386 edma
->m2m
= data
->id
;
1387 edma
->num_channels
= data
->num_channels
;
1388 dma_dev
= &edma
->dma_dev
;
1390 INIT_LIST_HEAD(&dma_dev
->channels
);
1391 for (i
= 0; i
< edma
->num_channels
; i
++) {
1392 struct ep93xx_dma_chan
*edmac
= &edma
->channels
[i
];
1395 edmac
->chan
.device
= dma_dev
;
1396 edmac
->regs
= devm_platform_ioremap_resource(pdev
, i
);
1397 if (IS_ERR(edmac
->regs
))
1398 return ERR_CAST(edmac
->regs
);
1400 edmac
->irq
= fwnode_irq_get(dev_fwnode(dev
), i
);
1402 return ERR_PTR(edmac
->irq
);
1407 len
= snprintf(dma_clk_name
, sizeof(dma_clk_name
), "m2m%u", i
);
1409 len
= snprintf(dma_clk_name
, sizeof(dma_clk_name
), "m2p%u", i
);
1410 if (len
>= sizeof(dma_clk_name
))
1411 return ERR_PTR(-ENOBUFS
);
1413 edmac
->clk
= devm_clk_get(dev
, dma_clk_name
);
1414 if (IS_ERR(edmac
->clk
)) {
1415 dev_err_probe(dev
, PTR_ERR(edmac
->clk
),
1416 "no %s clock found\n", dma_clk_name
);
1417 return ERR_CAST(edmac
->clk
);
1420 spin_lock_init(&edmac
->lock
);
1421 INIT_LIST_HEAD(&edmac
->active
);
1422 INIT_LIST_HEAD(&edmac
->queue
);
1423 INIT_LIST_HEAD(&edmac
->free_list
);
1424 tasklet_setup(&edmac
->tasklet
, ep93xx_dma_tasklet
);
1426 list_add_tail(&edmac
->chan
.device_node
,
1427 &dma_dev
->channels
);
1433 static bool ep93xx_m2p_dma_filter(struct dma_chan
*chan
, void *filter_param
)
1435 struct ep93xx_dma_chan
*echan
= to_ep93xx_dma_chan(chan
);
1436 struct ep93xx_dma_chan_cfg
*cfg
= filter_param
;
1438 if (cfg
->dir
!= ep93xx_dma_chan_direction(chan
))
1441 echan
->dma_cfg
= *cfg
;
1445 static struct dma_chan
*ep93xx_m2p_dma_of_xlate(struct of_phandle_args
*dma_spec
,
1446 struct of_dma
*ofdma
)
1448 struct ep93xx_dma_engine
*edma
= ofdma
->of_dma_data
;
1449 dma_cap_mask_t mask
= edma
->dma_dev
.cap_mask
;
1450 struct ep93xx_dma_chan_cfg dma_cfg
;
1451 u8 port
= dma_spec
->args
[0];
1452 u8 direction
= dma_spec
->args
[1];
1454 if (port
> EP93XX_DMA_IRDA
)
1457 if (!is_slave_direction(direction
))
1460 dma_cfg
.port
= port
;
1461 dma_cfg
.dir
= direction
;
1463 return __dma_request_channel(&mask
, ep93xx_m2p_dma_filter
, &dma_cfg
, ofdma
->of_node
);
1466 static bool ep93xx_m2m_dma_filter(struct dma_chan
*chan
, void *filter_param
)
1468 struct ep93xx_dma_chan
*echan
= to_ep93xx_dma_chan(chan
);
1469 struct ep93xx_dma_chan_cfg
*cfg
= filter_param
;
1471 echan
->dma_cfg
= *cfg
;
1476 static struct dma_chan
*ep93xx_m2m_dma_of_xlate(struct of_phandle_args
*dma_spec
,
1477 struct of_dma
*ofdma
)
1479 struct ep93xx_dma_engine
*edma
= ofdma
->of_dma_data
;
1480 dma_cap_mask_t mask
= edma
->dma_dev
.cap_mask
;
1481 struct ep93xx_dma_chan_cfg dma_cfg
;
1482 u8 port
= dma_spec
->args
[0];
1483 u8 direction
= dma_spec
->args
[1];
1485 if (!is_slave_direction(direction
))
1489 case EP93XX_DMA_SSP
:
1490 case EP93XX_DMA_IDE
:
1496 dma_cfg
.port
= port
;
1497 dma_cfg
.dir
= direction
;
1499 return __dma_request_channel(&mask
, ep93xx_m2m_dma_filter
, &dma_cfg
, ofdma
->of_node
);
1502 static int ep93xx_dma_probe(struct platform_device
*pdev
)
1504 struct ep93xx_dma_engine
*edma
;
1505 struct dma_device
*dma_dev
;
1508 edma
= ep93xx_dma_of_probe(pdev
);
1510 return PTR_ERR(edma
);
1512 dma_dev
= &edma
->dma_dev
;
1514 dma_cap_zero(dma_dev
->cap_mask
);
1515 dma_cap_set(DMA_SLAVE
, dma_dev
->cap_mask
);
1516 dma_cap_set(DMA_CYCLIC
, dma_dev
->cap_mask
);
1518 dma_dev
->dev
= &pdev
->dev
;
1519 dma_dev
->device_alloc_chan_resources
= ep93xx_dma_alloc_chan_resources
;
1520 dma_dev
->device_free_chan_resources
= ep93xx_dma_free_chan_resources
;
1521 dma_dev
->device_prep_slave_sg
= ep93xx_dma_prep_slave_sg
;
1522 dma_dev
->device_prep_dma_cyclic
= ep93xx_dma_prep_dma_cyclic
;
1523 dma_dev
->device_config
= ep93xx_dma_slave_config
;
1524 dma_dev
->device_synchronize
= ep93xx_dma_synchronize
;
1525 dma_dev
->device_terminate_all
= ep93xx_dma_terminate_all
;
1526 dma_dev
->device_issue_pending
= ep93xx_dma_issue_pending
;
1527 dma_dev
->device_tx_status
= ep93xx_dma_tx_status
;
1529 dma_set_max_seg_size(dma_dev
->dev
, DMA_MAX_CHAN_BYTES
);
1532 dma_cap_set(DMA_MEMCPY
, dma_dev
->cap_mask
);
1533 dma_dev
->device_prep_dma_memcpy
= ep93xx_dma_prep_dma_memcpy
;
1535 edma
->hw_setup
= m2m_hw_setup
;
1536 edma
->hw_shutdown
= m2m_hw_shutdown
;
1537 edma
->hw_submit
= m2m_hw_submit
;
1538 edma
->hw_interrupt
= m2m_hw_interrupt
;
1540 dma_cap_set(DMA_PRIVATE
, dma_dev
->cap_mask
);
1542 edma
->hw_synchronize
= m2p_hw_synchronize
;
1543 edma
->hw_setup
= m2p_hw_setup
;
1544 edma
->hw_shutdown
= m2p_hw_shutdown
;
1545 edma
->hw_submit
= m2p_hw_submit
;
1546 edma
->hw_interrupt
= m2p_hw_interrupt
;
1549 ret
= dma_async_device_register(dma_dev
);
1554 ret
= of_dma_controller_register(pdev
->dev
.of_node
, ep93xx_m2m_dma_of_xlate
,
1557 ret
= of_dma_controller_register(pdev
->dev
.of_node
, ep93xx_m2p_dma_of_xlate
,
1561 goto err_dma_unregister
;
1563 dev_info(dma_dev
->dev
, "EP93xx M2%s DMA ready\n", edma
->m2m
? "M" : "P");
1568 dma_async_device_unregister(dma_dev
);
1573 static const struct ep93xx_edma_data edma_m2p
= {
1578 static const struct ep93xx_edma_data edma_m2m
= {
1583 static const struct of_device_id ep93xx_dma_of_ids
[] = {
1584 { .compatible
= "cirrus,ep9301-dma-m2p", .data
= &edma_m2p
},
1585 { .compatible
= "cirrus,ep9301-dma-m2m", .data
= &edma_m2m
},
1588 MODULE_DEVICE_TABLE(of
, ep93xx_dma_of_ids
);
1590 static const struct platform_device_id ep93xx_dma_driver_ids
[] = {
1591 { "ep93xx-dma-m2p", 0 },
1592 { "ep93xx-dma-m2m", 1 },
1596 static struct platform_driver ep93xx_dma_driver
= {
1598 .name
= "ep93xx-dma",
1599 .of_match_table
= ep93xx_dma_of_ids
,
1601 .id_table
= ep93xx_dma_driver_ids
,
1602 .probe
= ep93xx_dma_probe
,
1605 module_platform_driver(ep93xx_dma_driver
);
1607 MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
1608 MODULE_DESCRIPTION("EP93xx DMA driver");