2 * TI EDMA DMA engine driver
4 * Copyright 2012 Texas Instruments
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation version 2.
10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11 * kind, whether express or implied; without even the implied warranty
12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
16 #include <linux/dmaengine.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/bitmap.h>
19 #include <linux/err.h>
20 #include <linux/init.h>
21 #include <linux/interrupt.h>
22 #include <linux/list.h>
23 #include <linux/module.h>
24 #include <linux/platform_device.h>
25 #include <linux/slab.h>
26 #include <linux/spinlock.h>
28 #include <linux/of_dma.h>
29 #include <linux/of_irq.h>
30 #include <linux/of_address.h>
31 #include <linux/of_device.h>
32 #include <linux/pm_runtime.h>
34 #include <linux/platform_data/edma.h>
36 #include "../dmaengine.h"
37 #include "../virt-dma.h"
39 /* Offsets matching "struct edmacc_param" */
42 #define PARM_A_B_CNT 0x08
44 #define PARM_SRC_DST_BIDX 0x10
45 #define PARM_LINK_BCNTRLD 0x14
46 #define PARM_SRC_DST_CIDX 0x18
47 #define PARM_CCNT 0x1c
49 #define PARM_SIZE 0x20
51 /* Offsets for EDMA CC global channel registers and their shadows */
52 #define SH_ER 0x00 /* 64 bits */
53 #define SH_ECR 0x08 /* 64 bits */
54 #define SH_ESR 0x10 /* 64 bits */
55 #define SH_CER 0x18 /* 64 bits */
56 #define SH_EER 0x20 /* 64 bits */
57 #define SH_EECR 0x28 /* 64 bits */
58 #define SH_EESR 0x30 /* 64 bits */
59 #define SH_SER 0x38 /* 64 bits */
60 #define SH_SECR 0x40 /* 64 bits */
61 #define SH_IER 0x50 /* 64 bits */
62 #define SH_IECR 0x58 /* 64 bits */
63 #define SH_IESR 0x60 /* 64 bits */
64 #define SH_IPR 0x68 /* 64 bits */
65 #define SH_ICR 0x70 /* 64 bits */
75 /* Offsets for EDMA CC global registers */
76 #define EDMA_REV 0x0000
77 #define EDMA_CCCFG 0x0004
78 #define EDMA_QCHMAP 0x0200 /* 8 registers */
79 #define EDMA_DMAQNUM 0x0240 /* 8 registers (4 on OMAP-L1xx) */
80 #define EDMA_QDMAQNUM 0x0260
81 #define EDMA_QUETCMAP 0x0280
82 #define EDMA_QUEPRI 0x0284
83 #define EDMA_EMR 0x0300 /* 64 bits */
84 #define EDMA_EMCR 0x0308 /* 64 bits */
85 #define EDMA_QEMR 0x0310
86 #define EDMA_QEMCR 0x0314
87 #define EDMA_CCERR 0x0318
88 #define EDMA_CCERRCLR 0x031c
89 #define EDMA_EEVAL 0x0320
90 #define EDMA_DRAE 0x0340 /* 4 x 64 bits*/
91 #define EDMA_QRAE 0x0380 /* 4 registers */
92 #define EDMA_QUEEVTENTRY 0x0400 /* 2 x 16 registers */
93 #define EDMA_QSTAT 0x0600 /* 2 registers */
94 #define EDMA_QWMTHRA 0x0620
95 #define EDMA_QWMTHRB 0x0624
96 #define EDMA_CCSTAT 0x0640
98 #define EDMA_M 0x1000 /* global channel registers */
99 #define EDMA_ECR 0x1008
100 #define EDMA_ECRH 0x100C
101 #define EDMA_SHADOW0 0x2000 /* 4 shadow regions */
102 #define EDMA_PARM 0x4000 /* PaRAM entries */
104 #define PARM_OFFSET(param_no) (EDMA_PARM + ((param_no) << 5))
106 #define EDMA_DCHMAP 0x0100 /* 64 registers */
109 #define GET_NUM_DMACH(x) (x & 0x7) /* bits 0-2 */
110 #define GET_NUM_QDMACH(x) ((x & 0x70) >> 4) /* bits 4-6 */
111 #define GET_NUM_PAENTRY(x) ((x & 0x7000) >> 12) /* bits 12-14 */
112 #define GET_NUM_EVQUE(x) ((x & 0x70000) >> 16) /* bits 16-18 */
113 #define GET_NUM_REGN(x) ((x & 0x300000) >> 20) /* bits 20-21 */
114 #define CHMAP_EXIST BIT(24)
116 /* CCSTAT register */
117 #define EDMA_CCSTAT_ACTV BIT(4)
120 * Max of 20 segments per channel to conserve PaRAM slots
121 * Also note that MAX_NR_SG should be atleast the no.of periods
122 * that are required for ASoC, otherwise DMA prep calls will
123 * fail. Today davinci-pcm is the only user of this driver and
124 * requires atleast 17 slots, so we setup the default to 20.
127 #define EDMA_MAX_SLOTS MAX_NR_SG
128 #define EDMA_DESCRIPTORS 16
130 #define EDMA_CHANNEL_ANY -1 /* for edma_alloc_channel() */
131 #define EDMA_SLOT_ANY -1 /* for edma_alloc_slot() */
132 #define EDMA_CONT_PARAMS_ANY 1001
133 #define EDMA_CONT_PARAMS_FIXED_EXACT 1002
134 #define EDMA_CONT_PARAMS_FIXED_NOT_EXACT 1003
137 * 64bit array registers are split into two 32bit registers:
138 * reg0: channel/event 0-31
139 * reg1: channel/event 32-63
141 * bit 5 in the channel number tells the array index (0/1)
142 * bit 0-4 (0x1f) is the bit offset within the register
144 #define EDMA_REG_ARRAY_INDEX(channel) ((channel) >> 5)
145 #define EDMA_CHANNEL_BIT(channel) (BIT((channel) & 0x1f))
147 /* PaRAM slots are laid out like this */
148 struct edmacc_param
{
159 /* fields in edmacc_param.opt */
162 #define SYNCDIM BIT(2)
163 #define STATIC BIT(3)
164 #define EDMA_FWID (0x07 << 8)
165 #define TCCMODE BIT(11)
166 #define EDMA_TCC(t) ((t) << 12)
167 #define TCINTEN BIT(20)
168 #define ITCINTEN BIT(21)
169 #define TCCHEN BIT(22)
170 #define ITCCHEN BIT(23)
175 struct edmacc_param param
;
179 struct virt_dma_desc vdesc
;
180 struct list_head node
;
181 enum dma_transfer_direction direction
;
186 struct edma_chan
*echan
;
190 * The following 4 elements are used for residue accounting.
192 * - processed_stat: the number of SG elements we have traversed
193 * so far to cover accounting. This is updated directly to processed
194 * during edma_callback and is always <= processed, because processed
195 * refers to the number of pending transfer (programmed to EDMA
196 * controller), where as processed_stat tracks number of transfers
197 * accounted for so far.
199 * - residue: The amount of bytes we have left to transfer for this desc
201 * - residue_stat: The residue in bytes of data we have covered
202 * so far for accounting. This is updated directly to residue
203 * during callbacks to keep it current.
205 * - sg_len: Tracks the length of the current intermediate transfer,
206 * this is required to update the residue during intermediate transfer
207 * completion callback.
214 struct edma_pset pset
[0];
220 struct device_node
*node
;
225 struct virt_dma_chan vchan
;
226 struct list_head node
;
227 struct edma_desc
*edesc
;
233 int slot
[EDMA_MAX_SLOTS
];
235 struct dma_slave_config cfg
;
240 struct edma_soc_info
*info
;
245 /* eDMA3 resource information */
246 unsigned num_channels
;
247 unsigned num_qchannels
;
252 enum dma_event_q default_queue
;
255 unsigned int ccerrint
;
258 * The slot_inuse bit for each PaRAM slot is clear unless the slot is
259 * in use by Linux or if it is allocated to be used by DSP.
261 unsigned long *slot_inuse
;
264 * For tracking reserved channels used by DSP.
265 * If the bit is cleared, the channel is allocated to be used by DSP
266 * and Linux must not touch it.
268 unsigned long *channels_mask
;
270 struct dma_device dma_slave
;
271 struct dma_device
*dma_memcpy
;
272 struct edma_chan
*slave_chans
;
273 struct edma_tc
*tc_list
;
277 /* dummy param set used to (re)initialize parameter RAM slots */
278 static const struct edmacc_param dummy_paramset
= {
279 .link_bcntrld
= 0xffff,
283 #define EDMA_BINDING_LEGACY 0
284 #define EDMA_BINDING_TPCC 1
285 static const u32 edma_binding_type
[] = {
286 [EDMA_BINDING_LEGACY
] = EDMA_BINDING_LEGACY
,
287 [EDMA_BINDING_TPCC
] = EDMA_BINDING_TPCC
,
290 static const struct of_device_id edma_of_ids
[] = {
292 .compatible
= "ti,edma3",
293 .data
= &edma_binding_type
[EDMA_BINDING_LEGACY
],
296 .compatible
= "ti,edma3-tpcc",
297 .data
= &edma_binding_type
[EDMA_BINDING_TPCC
],
301 MODULE_DEVICE_TABLE(of
, edma_of_ids
);
303 static const struct of_device_id edma_tptc_of_ids
[] = {
304 { .compatible
= "ti,edma3-tptc", },
307 MODULE_DEVICE_TABLE(of
, edma_tptc_of_ids
);
309 static inline unsigned int edma_read(struct edma_cc
*ecc
, int offset
)
311 return (unsigned int)__raw_readl(ecc
->base
+ offset
);
314 static inline void edma_write(struct edma_cc
*ecc
, int offset
, int val
)
316 __raw_writel(val
, ecc
->base
+ offset
);
319 static inline void edma_modify(struct edma_cc
*ecc
, int offset
, unsigned and,
322 unsigned val
= edma_read(ecc
, offset
);
326 edma_write(ecc
, offset
, val
);
329 static inline void edma_and(struct edma_cc
*ecc
, int offset
, unsigned and)
331 unsigned val
= edma_read(ecc
, offset
);
334 edma_write(ecc
, offset
, val
);
337 static inline void edma_or(struct edma_cc
*ecc
, int offset
, unsigned or)
339 unsigned val
= edma_read(ecc
, offset
);
342 edma_write(ecc
, offset
, val
);
345 static inline unsigned int edma_read_array(struct edma_cc
*ecc
, int offset
,
348 return edma_read(ecc
, offset
+ (i
<< 2));
351 static inline void edma_write_array(struct edma_cc
*ecc
, int offset
, int i
,
354 edma_write(ecc
, offset
+ (i
<< 2), val
);
357 static inline void edma_modify_array(struct edma_cc
*ecc
, int offset
, int i
,
358 unsigned and, unsigned or)
360 edma_modify(ecc
, offset
+ (i
<< 2), and, or);
363 static inline void edma_or_array(struct edma_cc
*ecc
, int offset
, int i
,
366 edma_or(ecc
, offset
+ (i
<< 2), or);
369 static inline void edma_or_array2(struct edma_cc
*ecc
, int offset
, int i
, int j
,
372 edma_or(ecc
, offset
+ ((i
* 2 + j
) << 2), or);
375 static inline void edma_write_array2(struct edma_cc
*ecc
, int offset
, int i
,
378 edma_write(ecc
, offset
+ ((i
* 2 + j
) << 2), val
);
381 static inline unsigned int edma_shadow0_read(struct edma_cc
*ecc
, int offset
)
383 return edma_read(ecc
, EDMA_SHADOW0
+ offset
);
386 static inline unsigned int edma_shadow0_read_array(struct edma_cc
*ecc
,
389 return edma_read(ecc
, EDMA_SHADOW0
+ offset
+ (i
<< 2));
392 static inline void edma_shadow0_write(struct edma_cc
*ecc
, int offset
,
395 edma_write(ecc
, EDMA_SHADOW0
+ offset
, val
);
398 static inline void edma_shadow0_write_array(struct edma_cc
*ecc
, int offset
,
401 edma_write(ecc
, EDMA_SHADOW0
+ offset
+ (i
<< 2), val
);
404 static inline unsigned int edma_param_read(struct edma_cc
*ecc
, int offset
,
407 return edma_read(ecc
, EDMA_PARM
+ offset
+ (param_no
<< 5));
410 static inline void edma_param_write(struct edma_cc
*ecc
, int offset
,
411 int param_no
, unsigned val
)
413 edma_write(ecc
, EDMA_PARM
+ offset
+ (param_no
<< 5), val
);
416 static inline void edma_param_modify(struct edma_cc
*ecc
, int offset
,
417 int param_no
, unsigned and, unsigned or)
419 edma_modify(ecc
, EDMA_PARM
+ offset
+ (param_no
<< 5), and, or);
422 static inline void edma_param_and(struct edma_cc
*ecc
, int offset
, int param_no
,
425 edma_and(ecc
, EDMA_PARM
+ offset
+ (param_no
<< 5), and);
428 static inline void edma_param_or(struct edma_cc
*ecc
, int offset
, int param_no
,
431 edma_or(ecc
, EDMA_PARM
+ offset
+ (param_no
<< 5), or);
434 static void edma_assign_priority_to_queue(struct edma_cc
*ecc
, int queue_no
,
437 int bit
= queue_no
* 4;
439 edma_modify(ecc
, EDMA_QUEPRI
, ~(0x7 << bit
), ((priority
& 0x7) << bit
));
442 static void edma_set_chmap(struct edma_chan
*echan
, int slot
)
444 struct edma_cc
*ecc
= echan
->ecc
;
445 int channel
= EDMA_CHAN_SLOT(echan
->ch_num
);
447 if (ecc
->chmap_exist
) {
448 slot
= EDMA_CHAN_SLOT(slot
);
449 edma_write_array(ecc
, EDMA_DCHMAP
, channel
, (slot
<< 5));
453 static void edma_setup_interrupt(struct edma_chan
*echan
, bool enable
)
455 struct edma_cc
*ecc
= echan
->ecc
;
456 int channel
= EDMA_CHAN_SLOT(echan
->ch_num
);
457 int idx
= EDMA_REG_ARRAY_INDEX(channel
);
458 int ch_bit
= EDMA_CHANNEL_BIT(channel
);
461 edma_shadow0_write_array(ecc
, SH_ICR
, idx
, ch_bit
);
462 edma_shadow0_write_array(ecc
, SH_IESR
, idx
, ch_bit
);
464 edma_shadow0_write_array(ecc
, SH_IECR
, idx
, ch_bit
);
469 * paRAM slot management functions
471 static void edma_write_slot(struct edma_cc
*ecc
, unsigned slot
,
472 const struct edmacc_param
*param
)
474 slot
= EDMA_CHAN_SLOT(slot
);
475 if (slot
>= ecc
->num_slots
)
477 memcpy_toio(ecc
->base
+ PARM_OFFSET(slot
), param
, PARM_SIZE
);
480 static int edma_read_slot(struct edma_cc
*ecc
, unsigned slot
,
481 struct edmacc_param
*param
)
483 slot
= EDMA_CHAN_SLOT(slot
);
484 if (slot
>= ecc
->num_slots
)
486 memcpy_fromio(param
, ecc
->base
+ PARM_OFFSET(slot
), PARM_SIZE
);
492 * edma_alloc_slot - allocate DMA parameter RAM
493 * @ecc: pointer to edma_cc struct
494 * @slot: specific slot to allocate; negative for "any unused slot"
496 * This allocates a parameter RAM slot, initializing it to hold a
497 * dummy transfer. Slots allocated using this routine have not been
498 * mapped to a hardware DMA channel, and will normally be used by
499 * linking to them from a slot associated with a DMA channel.
501 * Normal use is to pass EDMA_SLOT_ANY as the @slot, but specific
502 * slots may be allocated on behalf of DSP firmware.
504 * Returns the number of the slot, else negative errno.
506 static int edma_alloc_slot(struct edma_cc
*ecc
, int slot
)
509 slot
= EDMA_CHAN_SLOT(slot
);
510 /* Requesting entry paRAM slot for a HW triggered channel. */
511 if (ecc
->chmap_exist
&& slot
< ecc
->num_channels
)
512 slot
= EDMA_SLOT_ANY
;
516 if (ecc
->chmap_exist
)
519 slot
= ecc
->num_channels
;
521 slot
= find_next_zero_bit(ecc
->slot_inuse
,
524 if (slot
== ecc
->num_slots
)
526 if (!test_and_set_bit(slot
, ecc
->slot_inuse
))
529 } else if (slot
>= ecc
->num_slots
) {
531 } else if (test_and_set_bit(slot
, ecc
->slot_inuse
)) {
535 edma_write_slot(ecc
, slot
, &dummy_paramset
);
537 return EDMA_CTLR_CHAN(ecc
->id
, slot
);
540 static void edma_free_slot(struct edma_cc
*ecc
, unsigned slot
)
542 slot
= EDMA_CHAN_SLOT(slot
);
543 if (slot
>= ecc
->num_slots
)
546 edma_write_slot(ecc
, slot
, &dummy_paramset
);
547 clear_bit(slot
, ecc
->slot_inuse
);
551 * edma_link - link one parameter RAM slot to another
552 * @ecc: pointer to edma_cc struct
553 * @from: parameter RAM slot originating the link
554 * @to: parameter RAM slot which is the link target
556 * The originating slot should not be part of any active DMA transfer.
558 static void edma_link(struct edma_cc
*ecc
, unsigned from
, unsigned to
)
560 if (unlikely(EDMA_CTLR(from
) != EDMA_CTLR(to
)))
561 dev_warn(ecc
->dev
, "Ignoring eDMA instance for linking\n");
563 from
= EDMA_CHAN_SLOT(from
);
564 to
= EDMA_CHAN_SLOT(to
);
565 if (from
>= ecc
->num_slots
|| to
>= ecc
->num_slots
)
568 edma_param_modify(ecc
, PARM_LINK_BCNTRLD
, from
, 0xffff0000,
573 * edma_get_position - returns the current transfer point
574 * @ecc: pointer to edma_cc struct
575 * @slot: parameter RAM slot being examined
576 * @dst: true selects the dest position, false the source
578 * Returns the position of the current active slot
580 static dma_addr_t
edma_get_position(struct edma_cc
*ecc
, unsigned slot
,
585 slot
= EDMA_CHAN_SLOT(slot
);
586 offs
= PARM_OFFSET(slot
);
587 offs
+= dst
? PARM_DST
: PARM_SRC
;
589 return edma_read(ecc
, offs
);
593 * Channels with event associations will be triggered by their hardware
594 * events, and channels without such associations will be triggered by
595 * software. (At this writing there is no interface for using software
596 * triggers except with channels that don't support hardware triggers.)
598 static void edma_start(struct edma_chan
*echan
)
600 struct edma_cc
*ecc
= echan
->ecc
;
601 int channel
= EDMA_CHAN_SLOT(echan
->ch_num
);
602 int idx
= EDMA_REG_ARRAY_INDEX(channel
);
603 int ch_bit
= EDMA_CHANNEL_BIT(channel
);
605 if (!echan
->hw_triggered
) {
606 /* EDMA channels without event association */
607 dev_dbg(ecc
->dev
, "ESR%d %08x\n", idx
,
608 edma_shadow0_read_array(ecc
, SH_ESR
, idx
));
609 edma_shadow0_write_array(ecc
, SH_ESR
, idx
, ch_bit
);
611 /* EDMA channel with event association */
612 dev_dbg(ecc
->dev
, "ER%d %08x\n", idx
,
613 edma_shadow0_read_array(ecc
, SH_ER
, idx
));
614 /* Clear any pending event or error */
615 edma_write_array(ecc
, EDMA_ECR
, idx
, ch_bit
);
616 edma_write_array(ecc
, EDMA_EMCR
, idx
, ch_bit
);
618 edma_shadow0_write_array(ecc
, SH_SECR
, idx
, ch_bit
);
619 edma_shadow0_write_array(ecc
, SH_EESR
, idx
, ch_bit
);
620 dev_dbg(ecc
->dev
, "EER%d %08x\n", idx
,
621 edma_shadow0_read_array(ecc
, SH_EER
, idx
));
625 static void edma_stop(struct edma_chan
*echan
)
627 struct edma_cc
*ecc
= echan
->ecc
;
628 int channel
= EDMA_CHAN_SLOT(echan
->ch_num
);
629 int idx
= EDMA_REG_ARRAY_INDEX(channel
);
630 int ch_bit
= EDMA_CHANNEL_BIT(channel
);
632 edma_shadow0_write_array(ecc
, SH_EECR
, idx
, ch_bit
);
633 edma_shadow0_write_array(ecc
, SH_ECR
, idx
, ch_bit
);
634 edma_shadow0_write_array(ecc
, SH_SECR
, idx
, ch_bit
);
635 edma_write_array(ecc
, EDMA_EMCR
, idx
, ch_bit
);
637 /* clear possibly pending completion interrupt */
638 edma_shadow0_write_array(ecc
, SH_ICR
, idx
, ch_bit
);
640 dev_dbg(ecc
->dev
, "EER%d %08x\n", idx
,
641 edma_shadow0_read_array(ecc
, SH_EER
, idx
));
643 /* REVISIT: consider guarding against inappropriate event
644 * chaining by overwriting with dummy_paramset.
649 * Temporarily disable EDMA hardware events on the specified channel,
650 * preventing them from triggering new transfers
652 static void edma_pause(struct edma_chan
*echan
)
654 int channel
= EDMA_CHAN_SLOT(echan
->ch_num
);
656 edma_shadow0_write_array(echan
->ecc
, SH_EECR
,
657 EDMA_REG_ARRAY_INDEX(channel
),
658 EDMA_CHANNEL_BIT(channel
));
661 /* Re-enable EDMA hardware events on the specified channel. */
662 static void edma_resume(struct edma_chan
*echan
)
664 int channel
= EDMA_CHAN_SLOT(echan
->ch_num
);
666 edma_shadow0_write_array(echan
->ecc
, SH_EESR
,
667 EDMA_REG_ARRAY_INDEX(channel
),
668 EDMA_CHANNEL_BIT(channel
));
671 static void edma_trigger_channel(struct edma_chan
*echan
)
673 struct edma_cc
*ecc
= echan
->ecc
;
674 int channel
= EDMA_CHAN_SLOT(echan
->ch_num
);
675 int idx
= EDMA_REG_ARRAY_INDEX(channel
);
676 int ch_bit
= EDMA_CHANNEL_BIT(channel
);
678 edma_shadow0_write_array(ecc
, SH_ESR
, idx
, ch_bit
);
680 dev_dbg(ecc
->dev
, "ESR%d %08x\n", idx
,
681 edma_shadow0_read_array(ecc
, SH_ESR
, idx
));
684 static void edma_clean_channel(struct edma_chan
*echan
)
686 struct edma_cc
*ecc
= echan
->ecc
;
687 int channel
= EDMA_CHAN_SLOT(echan
->ch_num
);
688 int idx
= EDMA_REG_ARRAY_INDEX(channel
);
689 int ch_bit
= EDMA_CHANNEL_BIT(channel
);
691 dev_dbg(ecc
->dev
, "EMR%d %08x\n", idx
,
692 edma_read_array(ecc
, EDMA_EMR
, idx
));
693 edma_shadow0_write_array(ecc
, SH_ECR
, idx
, ch_bit
);
694 /* Clear the corresponding EMR bits */
695 edma_write_array(ecc
, EDMA_EMCR
, idx
, ch_bit
);
697 edma_shadow0_write_array(ecc
, SH_SECR
, idx
, ch_bit
);
698 edma_write(ecc
, EDMA_CCERRCLR
, BIT(16) | BIT(1) | BIT(0));
701 /* Move channel to a specific event queue */
702 static void edma_assign_channel_eventq(struct edma_chan
*echan
,
703 enum dma_event_q eventq_no
)
705 struct edma_cc
*ecc
= echan
->ecc
;
706 int channel
= EDMA_CHAN_SLOT(echan
->ch_num
);
707 int bit
= (channel
& 0x7) * 4;
709 /* default to low priority queue */
710 if (eventq_no
== EVENTQ_DEFAULT
)
711 eventq_no
= ecc
->default_queue
;
712 if (eventq_no
>= ecc
->num_tc
)
716 edma_modify_array(ecc
, EDMA_DMAQNUM
, (channel
>> 3), ~(0x7 << bit
),
720 static int edma_alloc_channel(struct edma_chan
*echan
,
721 enum dma_event_q eventq_no
)
723 struct edma_cc
*ecc
= echan
->ecc
;
724 int channel
= EDMA_CHAN_SLOT(echan
->ch_num
);
726 if (!test_bit(echan
->ch_num
, ecc
->channels_mask
)) {
727 dev_err(ecc
->dev
, "Channel%d is reserved, can not be used!\n",
732 /* ensure access through shadow region 0 */
733 edma_or_array2(ecc
, EDMA_DRAE
, 0, EDMA_REG_ARRAY_INDEX(channel
),
734 EDMA_CHANNEL_BIT(channel
));
736 /* ensure no events are pending */
739 edma_setup_interrupt(echan
, true);
741 edma_assign_channel_eventq(echan
, eventq_no
);
746 static void edma_free_channel(struct edma_chan
*echan
)
748 /* ensure no events are pending */
750 /* REVISIT should probably take out of shadow region 0 */
751 edma_setup_interrupt(echan
, false);
754 static inline struct edma_cc
*to_edma_cc(struct dma_device
*d
)
756 return container_of(d
, struct edma_cc
, dma_slave
);
759 static inline struct edma_chan
*to_edma_chan(struct dma_chan
*c
)
761 return container_of(c
, struct edma_chan
, vchan
.chan
);
764 static inline struct edma_desc
*to_edma_desc(struct dma_async_tx_descriptor
*tx
)
766 return container_of(tx
, struct edma_desc
, vdesc
.tx
);
769 static void edma_desc_free(struct virt_dma_desc
*vdesc
)
771 kfree(container_of(vdesc
, struct edma_desc
, vdesc
));
774 /* Dispatch a queued descriptor to the controller (caller holds lock) */
775 static void edma_execute(struct edma_chan
*echan
)
777 struct edma_cc
*ecc
= echan
->ecc
;
778 struct virt_dma_desc
*vdesc
;
779 struct edma_desc
*edesc
;
780 struct device
*dev
= echan
->vchan
.chan
.device
->dev
;
781 int i
, j
, left
, nslots
;
784 /* Setup is needed for the first transfer */
785 vdesc
= vchan_next_desc(&echan
->vchan
);
788 list_del(&vdesc
->node
);
789 echan
->edesc
= to_edma_desc(&vdesc
->tx
);
792 edesc
= echan
->edesc
;
794 /* Find out how many left */
795 left
= edesc
->pset_nr
- edesc
->processed
;
796 nslots
= min(MAX_NR_SG
, left
);
799 /* Write descriptor PaRAM set(s) */
800 for (i
= 0; i
< nslots
; i
++) {
801 j
= i
+ edesc
->processed
;
802 edma_write_slot(ecc
, echan
->slot
[i
], &edesc
->pset
[j
].param
);
803 edesc
->sg_len
+= edesc
->pset
[j
].len
;
816 j
, echan
->ch_num
, echan
->slot
[i
],
817 edesc
->pset
[j
].param
.opt
,
818 edesc
->pset
[j
].param
.src
,
819 edesc
->pset
[j
].param
.dst
,
820 edesc
->pset
[j
].param
.a_b_cnt
,
821 edesc
->pset
[j
].param
.ccnt
,
822 edesc
->pset
[j
].param
.src_dst_bidx
,
823 edesc
->pset
[j
].param
.src_dst_cidx
,
824 edesc
->pset
[j
].param
.link_bcntrld
);
825 /* Link to the previous slot if not the last set */
826 if (i
!= (nslots
- 1))
827 edma_link(ecc
, echan
->slot
[i
], echan
->slot
[i
+ 1]);
830 edesc
->processed
+= nslots
;
833 * If this is either the last set in a set of SG-list transactions
834 * then setup a link to the dummy slot, this results in all future
835 * events being absorbed and that's OK because we're done
837 if (edesc
->processed
== edesc
->pset_nr
) {
839 edma_link(ecc
, echan
->slot
[nslots
- 1], echan
->slot
[1]);
841 edma_link(ecc
, echan
->slot
[nslots
- 1],
842 echan
->ecc
->dummy_slot
);
847 * This happens due to setup times between intermediate
848 * transfers in long SG lists which have to be broken up into
849 * transfers of MAX_NR_SG
851 dev_dbg(dev
, "missed event on channel %d\n", echan
->ch_num
);
852 edma_clean_channel(echan
);
855 edma_trigger_channel(echan
);
857 } else if (edesc
->processed
<= MAX_NR_SG
) {
858 dev_dbg(dev
, "first transfer starting on channel %d\n",
862 dev_dbg(dev
, "chan: %d: completed %d elements, resuming\n",
863 echan
->ch_num
, edesc
->processed
);
868 static int edma_terminate_all(struct dma_chan
*chan
)
870 struct edma_chan
*echan
= to_edma_chan(chan
);
874 spin_lock_irqsave(&echan
->vchan
.lock
, flags
);
877 * Stop DMA activity: we assume the callback will not be called
878 * after edma_dma() returns (even if it does, it will see
879 * echan->edesc is NULL and exit.)
883 /* Move the cyclic channel back to default queue */
884 if (!echan
->tc
&& echan
->edesc
->cyclic
)
885 edma_assign_channel_eventq(echan
, EVENTQ_DEFAULT
);
887 vchan_terminate_vdesc(&echan
->edesc
->vdesc
);
891 vchan_get_all_descriptors(&echan
->vchan
, &head
);
892 spin_unlock_irqrestore(&echan
->vchan
.lock
, flags
);
893 vchan_dma_desc_free_list(&echan
->vchan
, &head
);
898 static void edma_synchronize(struct dma_chan
*chan
)
900 struct edma_chan
*echan
= to_edma_chan(chan
);
902 vchan_synchronize(&echan
->vchan
);
905 static int edma_slave_config(struct dma_chan
*chan
,
906 struct dma_slave_config
*cfg
)
908 struct edma_chan
*echan
= to_edma_chan(chan
);
910 if (cfg
->src_addr_width
== DMA_SLAVE_BUSWIDTH_8_BYTES
||
911 cfg
->dst_addr_width
== DMA_SLAVE_BUSWIDTH_8_BYTES
)
914 if (cfg
->src_maxburst
> chan
->device
->max_burst
||
915 cfg
->dst_maxburst
> chan
->device
->max_burst
)
918 memcpy(&echan
->cfg
, cfg
, sizeof(echan
->cfg
));
923 static int edma_dma_pause(struct dma_chan
*chan
)
925 struct edma_chan
*echan
= to_edma_chan(chan
);
934 static int edma_dma_resume(struct dma_chan
*chan
)
936 struct edma_chan
*echan
= to_edma_chan(chan
);
943 * A PaRAM set configuration abstraction used by other modes
944 * @chan: Channel who's PaRAM set we're configuring
945 * @pset: PaRAM set to initialize and setup.
946 * @src_addr: Source address of the DMA
947 * @dst_addr: Destination address of the DMA
948 * @burst: In units of dev_width, how much to send
949 * @dev_width: How much is the dev_width
950 * @dma_length: Total length of the DMA transfer
951 * @direction: Direction of the transfer
953 static int edma_config_pset(struct dma_chan
*chan
, struct edma_pset
*epset
,
954 dma_addr_t src_addr
, dma_addr_t dst_addr
, u32 burst
,
955 unsigned int acnt
, unsigned int dma_length
,
956 enum dma_transfer_direction direction
)
958 struct edma_chan
*echan
= to_edma_chan(chan
);
959 struct device
*dev
= chan
->device
->dev
;
960 struct edmacc_param
*param
= &epset
->param
;
961 int bcnt
, ccnt
, cidx
;
962 int src_bidx
, dst_bidx
, src_cidx
, dst_cidx
;
965 /* src/dst_maxburst == 0 is the same case as src/dst_maxburst == 1 */
969 * If the maxburst is equal to the fifo width, use
970 * A-synced transfers. This allows for large contiguous
971 * buffer transfers using only one PaRAM set.
975 * For the A-sync case, bcnt and ccnt are the remainder
976 * and quotient respectively of the division of:
977 * (dma_length / acnt) by (SZ_64K -1). This is so
978 * that in case bcnt over flows, we have ccnt to use.
979 * Note: In A-sync tranfer only, bcntrld is used, but it
980 * only applies for sg_dma_len(sg) >= SZ_64K.
981 * In this case, the best way adopted is- bccnt for the
982 * first frame will be the remainder below. Then for
983 * every successive frame, bcnt will be SZ_64K-1. This
984 * is assured as bcntrld = 0xffff in end of function.
987 ccnt
= dma_length
/ acnt
/ (SZ_64K
- 1);
988 bcnt
= dma_length
/ acnt
- ccnt
* (SZ_64K
- 1);
990 * If bcnt is non-zero, we have a remainder and hence an
991 * extra frame to transfer, so increment ccnt.
1000 * If maxburst is greater than the fifo address_width,
1001 * use AB-synced transfers where A count is the fifo
1002 * address_width and B count is the maxburst. In this
1003 * case, we are limited to transfers of C count frames
1004 * of (address_width * maxburst) where C count is limited
1005 * to SZ_64K-1. This places an upper bound on the length
1006 * of an SG segment that can be handled.
1010 ccnt
= dma_length
/ (acnt
* bcnt
);
1011 if (ccnt
> (SZ_64K
- 1)) {
1012 dev_err(dev
, "Exceeded max SG segment size\n");
1018 epset
->len
= dma_length
;
1020 if (direction
== DMA_MEM_TO_DEV
) {
1025 epset
->addr
= src_addr
;
1026 } else if (direction
== DMA_DEV_TO_MEM
) {
1031 epset
->addr
= dst_addr
;
1032 } else if (direction
== DMA_MEM_TO_MEM
) {
1037 epset
->addr
= src_addr
;
1039 dev_err(dev
, "%s: direction not implemented yet\n", __func__
);
1043 param
->opt
= EDMA_TCC(EDMA_CHAN_SLOT(echan
->ch_num
));
1044 /* Configure A or AB synchronized transfers */
1046 param
->opt
|= SYNCDIM
;
1048 param
->src
= src_addr
;
1049 param
->dst
= dst_addr
;
1051 param
->src_dst_bidx
= (dst_bidx
<< 16) | src_bidx
;
1052 param
->src_dst_cidx
= (dst_cidx
<< 16) | src_cidx
;
1054 param
->a_b_cnt
= bcnt
<< 16 | acnt
;
1057 * Only time when (bcntrld) auto reload is required is for
1058 * A-sync case, and in this case, a requirement of reload value
1059 * of SZ_64K-1 only is assured. 'link' is initially set to NULL
1060 * and then later will be populated by edma_execute.
1062 param
->link_bcntrld
= 0xffffffff;
1066 static struct dma_async_tx_descriptor
*edma_prep_slave_sg(
1067 struct dma_chan
*chan
, struct scatterlist
*sgl
,
1068 unsigned int sg_len
, enum dma_transfer_direction direction
,
1069 unsigned long tx_flags
, void *context
)
1071 struct edma_chan
*echan
= to_edma_chan(chan
);
1072 struct device
*dev
= chan
->device
->dev
;
1073 struct edma_desc
*edesc
;
1074 dma_addr_t src_addr
= 0, dst_addr
= 0;
1075 enum dma_slave_buswidth dev_width
;
1077 struct scatterlist
*sg
;
1080 if (unlikely(!echan
|| !sgl
|| !sg_len
))
1083 if (direction
== DMA_DEV_TO_MEM
) {
1084 src_addr
= echan
->cfg
.src_addr
;
1085 dev_width
= echan
->cfg
.src_addr_width
;
1086 burst
= echan
->cfg
.src_maxburst
;
1087 } else if (direction
== DMA_MEM_TO_DEV
) {
1088 dst_addr
= echan
->cfg
.dst_addr
;
1089 dev_width
= echan
->cfg
.dst_addr_width
;
1090 burst
= echan
->cfg
.dst_maxburst
;
1092 dev_err(dev
, "%s: bad direction: %d\n", __func__
, direction
);
1096 if (dev_width
== DMA_SLAVE_BUSWIDTH_UNDEFINED
) {
1097 dev_err(dev
, "%s: Undefined slave buswidth\n", __func__
);
1101 edesc
= kzalloc(struct_size(edesc
, pset
, sg_len
), GFP_ATOMIC
);
1105 edesc
->pset_nr
= sg_len
;
1107 edesc
->direction
= direction
;
1108 edesc
->echan
= echan
;
1110 /* Allocate a PaRAM slot, if needed */
1111 nslots
= min_t(unsigned, MAX_NR_SG
, sg_len
);
1113 for (i
= 0; i
< nslots
; i
++) {
1114 if (echan
->slot
[i
] < 0) {
1116 edma_alloc_slot(echan
->ecc
, EDMA_SLOT_ANY
);
1117 if (echan
->slot
[i
] < 0) {
1119 dev_err(dev
, "%s: Failed to allocate slot\n",
1126 /* Configure PaRAM sets for each SG */
1127 for_each_sg(sgl
, sg
, sg_len
, i
) {
1128 /* Get address for each SG */
1129 if (direction
== DMA_DEV_TO_MEM
)
1130 dst_addr
= sg_dma_address(sg
);
1132 src_addr
= sg_dma_address(sg
);
1134 ret
= edma_config_pset(chan
, &edesc
->pset
[i
], src_addr
,
1135 dst_addr
, burst
, dev_width
,
1136 sg_dma_len(sg
), direction
);
1142 edesc
->absync
= ret
;
1143 edesc
->residue
+= sg_dma_len(sg
);
1145 if (i
== sg_len
- 1)
1146 /* Enable completion interrupt */
1147 edesc
->pset
[i
].param
.opt
|= TCINTEN
;
1148 else if (!((i
+1) % MAX_NR_SG
))
1150 * Enable early completion interrupt for the
1151 * intermediateset. In this case the driver will be
1152 * notified when the paRAM set is submitted to TC. This
1153 * will allow more time to set up the next set of slots.
1155 edesc
->pset
[i
].param
.opt
|= (TCINTEN
| TCCMODE
);
1157 edesc
->residue_stat
= edesc
->residue
;
1159 return vchan_tx_prep(&echan
->vchan
, &edesc
->vdesc
, tx_flags
);
1162 static struct dma_async_tx_descriptor
*edma_prep_dma_memcpy(
1163 struct dma_chan
*chan
, dma_addr_t dest
, dma_addr_t src
,
1164 size_t len
, unsigned long tx_flags
)
1167 struct edma_desc
*edesc
;
1168 struct device
*dev
= chan
->device
->dev
;
1169 struct edma_chan
*echan
= to_edma_chan(chan
);
1170 unsigned int width
, pset_len
, array_size
;
1172 if (unlikely(!echan
|| !len
))
1175 /* Align the array size (acnt block) with the transfer properties */
1176 switch (__ffs((src
| dest
| len
))) {
1178 array_size
= SZ_32K
- 1;
1181 array_size
= SZ_32K
- 2;
1184 array_size
= SZ_32K
- 4;
1190 * Transfer size less than 64K can be handled with one paRAM
1191 * slot and with one burst.
1199 * Transfer size bigger than 64K will be handled with maximum of
1201 * slot1: (full_length / 32767) times 32767 bytes bursts.
1202 * ACNT = 32767, length1: (full_length / 32767) * 32767
1203 * slot2: the remaining amount of data after slot1.
1204 * ACNT = full_length - length1, length2 = ACNT
1206 * When the full_length is multibple of 32767 one slot can be
1207 * used to complete the transfer.
1210 pset_len
= rounddown(len
, width
);
1211 /* One slot is enough for lengths multiple of (SZ_32K -1) */
1212 if (unlikely(pset_len
== len
))
1218 edesc
= kzalloc(struct_size(edesc
, pset
, nslots
), GFP_ATOMIC
);
1222 edesc
->pset_nr
= nslots
;
1223 edesc
->residue
= edesc
->residue_stat
= len
;
1224 edesc
->direction
= DMA_MEM_TO_MEM
;
1225 edesc
->echan
= echan
;
1227 ret
= edma_config_pset(chan
, &edesc
->pset
[0], src
, dest
, 1,
1228 width
, pset_len
, DMA_MEM_TO_MEM
);
1234 edesc
->absync
= ret
;
1236 edesc
->pset
[0].param
.opt
|= ITCCHEN
;
1238 /* Enable transfer complete interrupt if requested */
1239 if (tx_flags
& DMA_PREP_INTERRUPT
)
1240 edesc
->pset
[0].param
.opt
|= TCINTEN
;
1242 /* Enable transfer complete chaining for the first slot */
1243 edesc
->pset
[0].param
.opt
|= TCCHEN
;
1245 if (echan
->slot
[1] < 0) {
1246 echan
->slot
[1] = edma_alloc_slot(echan
->ecc
,
1248 if (echan
->slot
[1] < 0) {
1250 dev_err(dev
, "%s: Failed to allocate slot\n",
1257 pset_len
= width
= len
% array_size
;
1259 ret
= edma_config_pset(chan
, &edesc
->pset
[1], src
, dest
, 1,
1260 width
, pset_len
, DMA_MEM_TO_MEM
);
1266 edesc
->pset
[1].param
.opt
|= ITCCHEN
;
1267 /* Enable transfer complete interrupt if requested */
1268 if (tx_flags
& DMA_PREP_INTERRUPT
)
1269 edesc
->pset
[1].param
.opt
|= TCINTEN
;
1272 if (!(tx_flags
& DMA_PREP_INTERRUPT
))
1273 edesc
->polled
= true;
1275 return vchan_tx_prep(&echan
->vchan
, &edesc
->vdesc
, tx_flags
);
1278 static struct dma_async_tx_descriptor
*edma_prep_dma_cyclic(
1279 struct dma_chan
*chan
, dma_addr_t buf_addr
, size_t buf_len
,
1280 size_t period_len
, enum dma_transfer_direction direction
,
1281 unsigned long tx_flags
)
1283 struct edma_chan
*echan
= to_edma_chan(chan
);
1284 struct device
*dev
= chan
->device
->dev
;
1285 struct edma_desc
*edesc
;
1286 dma_addr_t src_addr
, dst_addr
;
1287 enum dma_slave_buswidth dev_width
;
1288 bool use_intermediate
= false;
1292 if (unlikely(!echan
|| !buf_len
|| !period_len
))
1295 if (direction
== DMA_DEV_TO_MEM
) {
1296 src_addr
= echan
->cfg
.src_addr
;
1297 dst_addr
= buf_addr
;
1298 dev_width
= echan
->cfg
.src_addr_width
;
1299 burst
= echan
->cfg
.src_maxburst
;
1300 } else if (direction
== DMA_MEM_TO_DEV
) {
1301 src_addr
= buf_addr
;
1302 dst_addr
= echan
->cfg
.dst_addr
;
1303 dev_width
= echan
->cfg
.dst_addr_width
;
1304 burst
= echan
->cfg
.dst_maxburst
;
1306 dev_err(dev
, "%s: bad direction: %d\n", __func__
, direction
);
1310 if (dev_width
== DMA_SLAVE_BUSWIDTH_UNDEFINED
) {
1311 dev_err(dev
, "%s: Undefined slave buswidth\n", __func__
);
1315 if (unlikely(buf_len
% period_len
)) {
1316 dev_err(dev
, "Period should be multiple of Buffer length\n");
1320 nslots
= (buf_len
/ period_len
) + 1;
1323 * Cyclic DMA users such as audio cannot tolerate delays introduced
1324 * by cases where the number of periods is more than the maximum
1325 * number of SGs the EDMA driver can handle at a time. For DMA types
1326 * such as Slave SGs, such delays are tolerable and synchronized,
1327 * but the synchronization is difficult to achieve with Cyclic and
1328 * cannot be guaranteed, so we error out early.
1330 if (nslots
> MAX_NR_SG
) {
1332 * If the burst and period sizes are the same, we can put
1333 * the full buffer into a single period and activate
1334 * intermediate interrupts. This will produce interrupts
1335 * after each burst, which is also after each desired period.
1337 if (burst
== period_len
) {
1338 period_len
= buf_len
;
1340 use_intermediate
= true;
1346 edesc
= kzalloc(struct_size(edesc
, pset
, nslots
), GFP_ATOMIC
);
1351 edesc
->pset_nr
= nslots
;
1352 edesc
->residue
= edesc
->residue_stat
= buf_len
;
1353 edesc
->direction
= direction
;
1354 edesc
->echan
= echan
;
1356 dev_dbg(dev
, "%s: channel=%d nslots=%d period_len=%zu buf_len=%zu\n",
1357 __func__
, echan
->ch_num
, nslots
, period_len
, buf_len
);
1359 for (i
= 0; i
< nslots
; i
++) {
1360 /* Allocate a PaRAM slot, if needed */
1361 if (echan
->slot
[i
] < 0) {
1363 edma_alloc_slot(echan
->ecc
, EDMA_SLOT_ANY
);
1364 if (echan
->slot
[i
] < 0) {
1366 dev_err(dev
, "%s: Failed to allocate slot\n",
1372 if (i
== nslots
- 1) {
1373 memcpy(&edesc
->pset
[i
], &edesc
->pset
[0],
1374 sizeof(edesc
->pset
[0]));
1378 ret
= edma_config_pset(chan
, &edesc
->pset
[i
], src_addr
,
1379 dst_addr
, burst
, dev_width
, period_len
,
1386 if (direction
== DMA_DEV_TO_MEM
)
1387 dst_addr
+= period_len
;
1389 src_addr
+= period_len
;
1391 dev_vdbg(dev
, "%s: Configure period %d of buf:\n", __func__
, i
);
1404 i
, echan
->ch_num
, echan
->slot
[i
],
1405 edesc
->pset
[i
].param
.opt
,
1406 edesc
->pset
[i
].param
.src
,
1407 edesc
->pset
[i
].param
.dst
,
1408 edesc
->pset
[i
].param
.a_b_cnt
,
1409 edesc
->pset
[i
].param
.ccnt
,
1410 edesc
->pset
[i
].param
.src_dst_bidx
,
1411 edesc
->pset
[i
].param
.src_dst_cidx
,
1412 edesc
->pset
[i
].param
.link_bcntrld
);
1414 edesc
->absync
= ret
;
1417 * Enable period interrupt only if it is requested
1419 if (tx_flags
& DMA_PREP_INTERRUPT
) {
1420 edesc
->pset
[i
].param
.opt
|= TCINTEN
;
1422 /* Also enable intermediate interrupts if necessary */
1423 if (use_intermediate
)
1424 edesc
->pset
[i
].param
.opt
|= ITCINTEN
;
1428 /* Place the cyclic channel to highest priority queue */
1430 edma_assign_channel_eventq(echan
, EVENTQ_0
);
1432 return vchan_tx_prep(&echan
->vchan
, &edesc
->vdesc
, tx_flags
);
1435 static void edma_completion_handler(struct edma_chan
*echan
)
1437 struct device
*dev
= echan
->vchan
.chan
.device
->dev
;
1438 struct edma_desc
*edesc
;
1440 spin_lock(&echan
->vchan
.lock
);
1441 edesc
= echan
->edesc
;
1443 if (edesc
->cyclic
) {
1444 vchan_cyclic_callback(&edesc
->vdesc
);
1445 spin_unlock(&echan
->vchan
.lock
);
1447 } else if (edesc
->processed
== edesc
->pset_nr
) {
1450 vchan_cookie_complete(&edesc
->vdesc
);
1451 echan
->edesc
= NULL
;
1453 dev_dbg(dev
, "Transfer completed on channel %d\n",
1456 dev_dbg(dev
, "Sub transfer completed on channel %d\n",
1461 /* Update statistics for tx_status */
1462 edesc
->residue
-= edesc
->sg_len
;
1463 edesc
->residue_stat
= edesc
->residue
;
1464 edesc
->processed_stat
= edesc
->processed
;
1466 edma_execute(echan
);
1469 spin_unlock(&echan
->vchan
.lock
);
1472 /* eDMA interrupt handler */
1473 static irqreturn_t
dma_irq_handler(int irq
, void *data
)
1475 struct edma_cc
*ecc
= data
;
1485 dev_vdbg(ecc
->dev
, "dma_irq_handler\n");
1487 sh_ipr
= edma_shadow0_read_array(ecc
, SH_IPR
, 0);
1489 sh_ipr
= edma_shadow0_read_array(ecc
, SH_IPR
, 1);
1492 sh_ier
= edma_shadow0_read_array(ecc
, SH_IER
, 1);
1495 sh_ier
= edma_shadow0_read_array(ecc
, SH_IER
, 0);
1503 slot
= __ffs(sh_ipr
);
1504 sh_ipr
&= ~(BIT(slot
));
1506 if (sh_ier
& BIT(slot
)) {
1507 channel
= (bank
<< 5) | slot
;
1508 /* Clear the corresponding IPR bits */
1509 edma_shadow0_write_array(ecc
, SH_ICR
, bank
, BIT(slot
));
1510 edma_completion_handler(&ecc
->slave_chans
[channel
]);
1514 edma_shadow0_write(ecc
, SH_IEVAL
, 1);
1518 static void edma_error_handler(struct edma_chan
*echan
)
1520 struct edma_cc
*ecc
= echan
->ecc
;
1521 struct device
*dev
= echan
->vchan
.chan
.device
->dev
;
1522 struct edmacc_param p
;
1528 spin_lock(&echan
->vchan
.lock
);
1530 err
= edma_read_slot(ecc
, echan
->slot
[0], &p
);
1533 * Issue later based on missed flag which will be sure
1535 * (1) we finished transmitting an intermediate slot and
1536 * edma_execute is coming up.
1537 * (2) or we finished current transfer and issue will
1538 * call edma_execute.
1540 * Important note: issuing can be dangerous here and
1541 * lead to some nasty recursion when we are in a NULL
1542 * slot. So we avoid doing so and set the missed flag.
1544 if (err
|| (p
.a_b_cnt
== 0 && p
.ccnt
== 0)) {
1545 dev_dbg(dev
, "Error on null slot, setting miss\n");
1549 * The slot is already programmed but the event got
1550 * missed, so its safe to issue it here.
1552 dev_dbg(dev
, "Missed event, TRIGGERING\n");
1553 edma_clean_channel(echan
);
1556 edma_trigger_channel(echan
);
1558 spin_unlock(&echan
->vchan
.lock
);
1561 static inline bool edma_error_pending(struct edma_cc
*ecc
)
1563 if (edma_read_array(ecc
, EDMA_EMR
, 0) ||
1564 edma_read_array(ecc
, EDMA_EMR
, 1) ||
1565 edma_read(ecc
, EDMA_QEMR
) || edma_read(ecc
, EDMA_CCERR
))
1571 /* eDMA error interrupt handler */
1572 static irqreturn_t
dma_ccerr_handler(int irq
, void *data
)
1574 struct edma_cc
*ecc
= data
;
1577 unsigned int cnt
= 0;
1584 dev_vdbg(ecc
->dev
, "dma_ccerr_handler\n");
1586 if (!edma_error_pending(ecc
)) {
1588 * The registers indicate no pending error event but the irq
1589 * handler has been called.
1590 * Ask eDMA to re-evaluate the error registers.
1592 dev_err(ecc
->dev
, "%s: Error interrupt without error event!\n",
1594 edma_write(ecc
, EDMA_EEVAL
, 1);
1599 /* Event missed register(s) */
1600 for (j
= 0; j
< 2; j
++) {
1603 val
= edma_read_array(ecc
, EDMA_EMR
, j
);
1607 dev_dbg(ecc
->dev
, "EMR%d 0x%08x\n", j
, val
);
1609 for (i
= find_next_bit(&emr
, 32, 0); i
< 32;
1610 i
= find_next_bit(&emr
, 32, i
+ 1)) {
1611 int k
= (j
<< 5) + i
;
1613 /* Clear the corresponding EMR bits */
1614 edma_write_array(ecc
, EDMA_EMCR
, j
, BIT(i
));
1616 edma_shadow0_write_array(ecc
, SH_SECR
, j
,
1618 edma_error_handler(&ecc
->slave_chans
[k
]);
1622 val
= edma_read(ecc
, EDMA_QEMR
);
1624 dev_dbg(ecc
->dev
, "QEMR 0x%02x\n", val
);
1625 /* Not reported, just clear the interrupt reason. */
1626 edma_write(ecc
, EDMA_QEMCR
, val
);
1627 edma_shadow0_write(ecc
, SH_QSECR
, val
);
1630 val
= edma_read(ecc
, EDMA_CCERR
);
1632 dev_warn(ecc
->dev
, "CCERR 0x%08x\n", val
);
1633 /* Not reported, just clear the interrupt reason. */
1634 edma_write(ecc
, EDMA_CCERRCLR
, val
);
1637 if (!edma_error_pending(ecc
))
1643 edma_write(ecc
, EDMA_EEVAL
, 1);
1647 /* Alloc channel resources */
1648 static int edma_alloc_chan_resources(struct dma_chan
*chan
)
1650 struct edma_chan
*echan
= to_edma_chan(chan
);
1651 struct edma_cc
*ecc
= echan
->ecc
;
1652 struct device
*dev
= ecc
->dev
;
1653 enum dma_event_q eventq_no
= EVENTQ_DEFAULT
;
1657 eventq_no
= echan
->tc
->id
;
1658 } else if (ecc
->tc_list
) {
1659 /* memcpy channel */
1660 echan
->tc
= &ecc
->tc_list
[ecc
->info
->default_queue
];
1661 eventq_no
= echan
->tc
->id
;
1664 ret
= edma_alloc_channel(echan
, eventq_no
);
1668 echan
->slot
[0] = edma_alloc_slot(ecc
, echan
->ch_num
);
1669 if (echan
->slot
[0] < 0) {
1670 dev_err(dev
, "Entry slot allocation failed for channel %u\n",
1671 EDMA_CHAN_SLOT(echan
->ch_num
));
1672 ret
= echan
->slot
[0];
1676 /* Set up channel -> slot mapping for the entry slot */
1677 edma_set_chmap(echan
, echan
->slot
[0]);
1678 echan
->alloced
= true;
1680 dev_dbg(dev
, "Got eDMA channel %d for virt channel %d (%s trigger)\n",
1681 EDMA_CHAN_SLOT(echan
->ch_num
), chan
->chan_id
,
1682 echan
->hw_triggered
? "HW" : "SW");
1687 edma_free_channel(echan
);
1691 /* Free channel resources */
1692 static void edma_free_chan_resources(struct dma_chan
*chan
)
1694 struct edma_chan
*echan
= to_edma_chan(chan
);
1695 struct device
*dev
= echan
->ecc
->dev
;
1698 /* Terminate transfers */
1701 vchan_free_chan_resources(&echan
->vchan
);
1703 /* Free EDMA PaRAM slots */
1704 for (i
= 0; i
< EDMA_MAX_SLOTS
; i
++) {
1705 if (echan
->slot
[i
] >= 0) {
1706 edma_free_slot(echan
->ecc
, echan
->slot
[i
]);
1707 echan
->slot
[i
] = -1;
1711 /* Set entry slot to the dummy slot */
1712 edma_set_chmap(echan
, echan
->ecc
->dummy_slot
);
1714 /* Free EDMA channel */
1715 if (echan
->alloced
) {
1716 edma_free_channel(echan
);
1717 echan
->alloced
= false;
1721 echan
->hw_triggered
= false;
1723 dev_dbg(dev
, "Free eDMA channel %d for virt channel %d\n",
1724 EDMA_CHAN_SLOT(echan
->ch_num
), chan
->chan_id
);
1727 /* Send pending descriptor to hardware */
1728 static void edma_issue_pending(struct dma_chan
*chan
)
1730 struct edma_chan
*echan
= to_edma_chan(chan
);
1731 unsigned long flags
;
1733 spin_lock_irqsave(&echan
->vchan
.lock
, flags
);
1734 if (vchan_issue_pending(&echan
->vchan
) && !echan
->edesc
)
1735 edma_execute(echan
);
1736 spin_unlock_irqrestore(&echan
->vchan
.lock
, flags
);
1740 * This limit exists to avoid a possible infinite loop when waiting for proof
1741 * that a particular transfer is completed. This limit can be hit if there
1742 * are large bursts to/from slow devices or the CPU is never able to catch
1743 * the DMA hardware idle. On an AM335x transfering 48 bytes from the UART
1744 * RX-FIFO, as many as 55 loops have been seen.
1746 #define EDMA_MAX_TR_WAIT_LOOPS 1000
1748 static u32
edma_residue(struct edma_desc
*edesc
)
1750 bool dst
= edesc
->direction
== DMA_DEV_TO_MEM
;
1751 int loop_count
= EDMA_MAX_TR_WAIT_LOOPS
;
1752 struct edma_chan
*echan
= edesc
->echan
;
1753 struct edma_pset
*pset
= edesc
->pset
;
1754 dma_addr_t done
, pos
, pos_old
;
1755 int channel
= EDMA_CHAN_SLOT(echan
->ch_num
);
1756 int idx
= EDMA_REG_ARRAY_INDEX(channel
);
1757 int ch_bit
= EDMA_CHANNEL_BIT(channel
);
1762 * We always read the dst/src position from the first RamPar
1763 * pset. That's the one which is active now.
1765 pos
= edma_get_position(echan
->ecc
, echan
->slot
[0], dst
);
1768 * "pos" may represent a transfer request that is still being
1769 * processed by the EDMACC or EDMATC. We will busy wait until
1770 * any one of the situations occurs:
1771 * 1. while and event is pending for the channel
1772 * 2. a position updated
1773 * 3. we hit the loop limit
1775 if (is_slave_direction(edesc
->direction
))
1781 while (edma_shadow0_read_array(echan
->ecc
, event_reg
, idx
) & ch_bit
) {
1782 pos
= edma_get_position(echan
->ecc
, echan
->slot
[0], dst
);
1786 if (!--loop_count
) {
1787 dev_dbg_ratelimited(echan
->vchan
.chan
.device
->dev
,
1788 "%s: timeout waiting for PaRAM update\n",
1797 * Cyclic is simple. Just subtract pset[0].addr from pos.
1799 * We never update edesc->residue in the cyclic case, so we
1800 * can tell the remaining room to the end of the circular
1803 if (edesc
->cyclic
) {
1804 done
= pos
- pset
->addr
;
1805 edesc
->residue_stat
= edesc
->residue
- done
;
1806 return edesc
->residue_stat
;
1810 * If the position is 0, then EDMA loaded the closing dummy slot, the
1811 * transfer is completed
1816 * For SG operation we catch up with the last processed
1819 pset
+= edesc
->processed_stat
;
1821 for (i
= edesc
->processed_stat
; i
< edesc
->processed
; i
++, pset
++) {
1823 * If we are inside this pset address range, we know
1824 * this is the active one. Get the current delta and
1825 * stop walking the psets.
1827 if (pos
>= pset
->addr
&& pos
< pset
->addr
+ pset
->len
)
1828 return edesc
->residue_stat
- (pos
- pset
->addr
);
1830 /* Otherwise mark it done and update residue_stat. */
1831 edesc
->processed_stat
++;
1832 edesc
->residue_stat
-= pset
->len
;
1834 return edesc
->residue_stat
;
1837 /* Check request completion status */
1838 static enum dma_status
edma_tx_status(struct dma_chan
*chan
,
1839 dma_cookie_t cookie
,
1840 struct dma_tx_state
*txstate
)
1842 struct edma_chan
*echan
= to_edma_chan(chan
);
1843 struct dma_tx_state txstate_tmp
;
1844 enum dma_status ret
;
1845 unsigned long flags
;
1847 ret
= dma_cookie_status(chan
, cookie
, txstate
);
1849 if (ret
== DMA_COMPLETE
)
1852 /* Provide a dummy dma_tx_state for completion checking */
1854 txstate
= &txstate_tmp
;
1856 spin_lock_irqsave(&echan
->vchan
.lock
, flags
);
1857 if (echan
->edesc
&& echan
->edesc
->vdesc
.tx
.cookie
== cookie
) {
1858 txstate
->residue
= edma_residue(echan
->edesc
);
1860 struct virt_dma_desc
*vdesc
= vchan_find_desc(&echan
->vchan
,
1864 txstate
->residue
= to_edma_desc(&vdesc
->tx
)->residue
;
1866 txstate
->residue
= 0;
1870 * Mark the cookie completed if the residue is 0 for non cyclic
1873 if (ret
!= DMA_COMPLETE
&& !txstate
->residue
&&
1874 echan
->edesc
&& echan
->edesc
->polled
&&
1875 echan
->edesc
->vdesc
.tx
.cookie
== cookie
) {
1877 vchan_cookie_complete(&echan
->edesc
->vdesc
);
1878 echan
->edesc
= NULL
;
1879 edma_execute(echan
);
1883 spin_unlock_irqrestore(&echan
->vchan
.lock
, flags
);
1888 static bool edma_is_memcpy_channel(int ch_num
, s32
*memcpy_channels
)
1890 if (!memcpy_channels
)
1892 while (*memcpy_channels
!= -1) {
1893 if (*memcpy_channels
== ch_num
)
1900 #define EDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
1901 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
1902 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
1903 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
1905 static void edma_dma_init(struct edma_cc
*ecc
, bool legacy_mode
)
1907 struct dma_device
*s_ddev
= &ecc
->dma_slave
;
1908 struct dma_device
*m_ddev
= NULL
;
1909 s32
*memcpy_channels
= ecc
->info
->memcpy_channels
;
1912 dma_cap_zero(s_ddev
->cap_mask
);
1913 dma_cap_set(DMA_SLAVE
, s_ddev
->cap_mask
);
1914 dma_cap_set(DMA_CYCLIC
, s_ddev
->cap_mask
);
1915 if (ecc
->legacy_mode
&& !memcpy_channels
) {
1917 "Legacy memcpy is enabled, things might not work\n");
1919 dma_cap_set(DMA_MEMCPY
, s_ddev
->cap_mask
);
1920 s_ddev
->device_prep_dma_memcpy
= edma_prep_dma_memcpy
;
1921 s_ddev
->directions
= BIT(DMA_MEM_TO_MEM
);
1924 s_ddev
->device_prep_slave_sg
= edma_prep_slave_sg
;
1925 s_ddev
->device_prep_dma_cyclic
= edma_prep_dma_cyclic
;
1926 s_ddev
->device_alloc_chan_resources
= edma_alloc_chan_resources
;
1927 s_ddev
->device_free_chan_resources
= edma_free_chan_resources
;
1928 s_ddev
->device_issue_pending
= edma_issue_pending
;
1929 s_ddev
->device_tx_status
= edma_tx_status
;
1930 s_ddev
->device_config
= edma_slave_config
;
1931 s_ddev
->device_pause
= edma_dma_pause
;
1932 s_ddev
->device_resume
= edma_dma_resume
;
1933 s_ddev
->device_terminate_all
= edma_terminate_all
;
1934 s_ddev
->device_synchronize
= edma_synchronize
;
1936 s_ddev
->src_addr_widths
= EDMA_DMA_BUSWIDTHS
;
1937 s_ddev
->dst_addr_widths
= EDMA_DMA_BUSWIDTHS
;
1938 s_ddev
->directions
|= (BIT(DMA_DEV_TO_MEM
) | BIT(DMA_MEM_TO_DEV
));
1939 s_ddev
->residue_granularity
= DMA_RESIDUE_GRANULARITY_BURST
;
1940 s_ddev
->max_burst
= SZ_32K
- 1; /* CIDX: 16bit signed */
1942 s_ddev
->dev
= ecc
->dev
;
1943 INIT_LIST_HEAD(&s_ddev
->channels
);
1945 if (memcpy_channels
) {
1946 m_ddev
= devm_kzalloc(ecc
->dev
, sizeof(*m_ddev
), GFP_KERNEL
);
1948 dev_warn(ecc
->dev
, "memcpy is disabled due to OoM\n");
1949 memcpy_channels
= NULL
;
1952 ecc
->dma_memcpy
= m_ddev
;
1954 dma_cap_zero(m_ddev
->cap_mask
);
1955 dma_cap_set(DMA_MEMCPY
, m_ddev
->cap_mask
);
1957 m_ddev
->device_prep_dma_memcpy
= edma_prep_dma_memcpy
;
1958 m_ddev
->device_alloc_chan_resources
= edma_alloc_chan_resources
;
1959 m_ddev
->device_free_chan_resources
= edma_free_chan_resources
;
1960 m_ddev
->device_issue_pending
= edma_issue_pending
;
1961 m_ddev
->device_tx_status
= edma_tx_status
;
1962 m_ddev
->device_config
= edma_slave_config
;
1963 m_ddev
->device_pause
= edma_dma_pause
;
1964 m_ddev
->device_resume
= edma_dma_resume
;
1965 m_ddev
->device_terminate_all
= edma_terminate_all
;
1966 m_ddev
->device_synchronize
= edma_synchronize
;
1968 m_ddev
->src_addr_widths
= EDMA_DMA_BUSWIDTHS
;
1969 m_ddev
->dst_addr_widths
= EDMA_DMA_BUSWIDTHS
;
1970 m_ddev
->directions
= BIT(DMA_MEM_TO_MEM
);
1971 m_ddev
->residue_granularity
= DMA_RESIDUE_GRANULARITY_BURST
;
1973 m_ddev
->dev
= ecc
->dev
;
1974 INIT_LIST_HEAD(&m_ddev
->channels
);
1975 } else if (!ecc
->legacy_mode
) {
1976 dev_info(ecc
->dev
, "memcpy is disabled\n");
1980 for (i
= 0; i
< ecc
->num_channels
; i
++) {
1981 struct edma_chan
*echan
= &ecc
->slave_chans
[i
];
1982 echan
->ch_num
= EDMA_CTLR_CHAN(ecc
->id
, i
);
1984 echan
->vchan
.desc_free
= edma_desc_free
;
1986 if (m_ddev
&& edma_is_memcpy_channel(i
, memcpy_channels
))
1987 vchan_init(&echan
->vchan
, m_ddev
);
1989 vchan_init(&echan
->vchan
, s_ddev
);
1991 INIT_LIST_HEAD(&echan
->node
);
1992 for (j
= 0; j
< EDMA_MAX_SLOTS
; j
++)
1993 echan
->slot
[j
] = -1;
1997 static int edma_setup_from_hw(struct device
*dev
, struct edma_soc_info
*pdata
,
1998 struct edma_cc
*ecc
)
2002 s8 (*queue_priority_map
)[2];
2004 /* Decode the eDMA3 configuration from CCCFG register */
2005 cccfg
= edma_read(ecc
, EDMA_CCCFG
);
2007 value
= GET_NUM_REGN(cccfg
);
2008 ecc
->num_region
= BIT(value
);
2010 value
= GET_NUM_DMACH(cccfg
);
2011 ecc
->num_channels
= BIT(value
+ 1);
2013 value
= GET_NUM_QDMACH(cccfg
);
2014 ecc
->num_qchannels
= value
* 2;
2016 value
= GET_NUM_PAENTRY(cccfg
);
2017 ecc
->num_slots
= BIT(value
+ 4);
2019 value
= GET_NUM_EVQUE(cccfg
);
2020 ecc
->num_tc
= value
+ 1;
2022 ecc
->chmap_exist
= (cccfg
& CHMAP_EXIST
) ? true : false;
2024 dev_dbg(dev
, "eDMA3 CC HW configuration (cccfg: 0x%08x):\n", cccfg
);
2025 dev_dbg(dev
, "num_region: %u\n", ecc
->num_region
);
2026 dev_dbg(dev
, "num_channels: %u\n", ecc
->num_channels
);
2027 dev_dbg(dev
, "num_qchannels: %u\n", ecc
->num_qchannels
);
2028 dev_dbg(dev
, "num_slots: %u\n", ecc
->num_slots
);
2029 dev_dbg(dev
, "num_tc: %u\n", ecc
->num_tc
);
2030 dev_dbg(dev
, "chmap_exist: %s\n", ecc
->chmap_exist
? "yes" : "no");
2032 /* Nothing need to be done if queue priority is provided */
2033 if (pdata
->queue_priority_mapping
)
2037 * Configure TC/queue priority as follows:
2042 * The meaning of priority numbers: 0 highest priority, 7 lowest
2043 * priority. So Q0 is the highest priority queue and the last queue has
2044 * the lowest priority.
2046 queue_priority_map
= devm_kcalloc(dev
, ecc
->num_tc
+ 1, sizeof(s8
),
2048 if (!queue_priority_map
)
2051 for (i
= 0; i
< ecc
->num_tc
; i
++) {
2052 queue_priority_map
[i
][0] = i
;
2053 queue_priority_map
[i
][1] = i
;
2055 queue_priority_map
[i
][0] = -1;
2056 queue_priority_map
[i
][1] = -1;
2058 pdata
->queue_priority_mapping
= queue_priority_map
;
2059 /* Default queue has the lowest priority */
2060 pdata
->default_queue
= i
- 1;
2065 #if IS_ENABLED(CONFIG_OF)
2066 static int edma_xbar_event_map(struct device
*dev
, struct edma_soc_info
*pdata
,
2069 const char pname
[] = "ti,edma-xbar-event-map";
2070 struct resource res
;
2072 s16 (*xbar_chans
)[2];
2073 size_t nelm
= sz
/ sizeof(s16
);
2074 u32 shift
, offset
, mux
;
2077 xbar_chans
= devm_kcalloc(dev
, nelm
+ 2, sizeof(s16
), GFP_KERNEL
);
2081 ret
= of_address_to_resource(dev
->of_node
, 1, &res
);
2085 xbar
= devm_ioremap(dev
, res
.start
, resource_size(&res
));
2089 ret
= of_property_read_u16_array(dev
->of_node
, pname
, (u16
*)xbar_chans
,
2094 /* Invalidate last entry for the other user of this mess */
2096 xbar_chans
[nelm
][0] = -1;
2097 xbar_chans
[nelm
][1] = -1;
2099 for (i
= 0; i
< nelm
; i
++) {
2100 shift
= (xbar_chans
[i
][1] & 0x03) << 3;
2101 offset
= xbar_chans
[i
][1] & 0xfffffffc;
2102 mux
= readl(xbar
+ offset
);
2103 mux
&= ~(0xff << shift
);
2104 mux
|= xbar_chans
[i
][0] << shift
;
2105 writel(mux
, (xbar
+ offset
));
2108 pdata
->xbar_chans
= (const s16 (*)[2]) xbar_chans
;
2112 static struct edma_soc_info
*edma_setup_info_from_dt(struct device
*dev
,
2115 struct edma_soc_info
*info
;
2116 struct property
*prop
;
2119 info
= devm_kzalloc(dev
, sizeof(struct edma_soc_info
), GFP_KERNEL
);
2121 return ERR_PTR(-ENOMEM
);
2124 prop
= of_find_property(dev
->of_node
, "ti,edma-xbar-event-map",
2127 ret
= edma_xbar_event_map(dev
, info
, sz
);
2129 return ERR_PTR(ret
);
2134 /* Get the list of channels allocated to be used for memcpy */
2135 prop
= of_find_property(dev
->of_node
, "ti,edma-memcpy-channels", &sz
);
2137 const char pname
[] = "ti,edma-memcpy-channels";
2138 size_t nelm
= sz
/ sizeof(s32
);
2141 memcpy_ch
= devm_kcalloc(dev
, nelm
+ 1, sizeof(s32
),
2144 return ERR_PTR(-ENOMEM
);
2146 ret
= of_property_read_u32_array(dev
->of_node
, pname
,
2147 (u32
*)memcpy_ch
, nelm
);
2149 return ERR_PTR(ret
);
2151 memcpy_ch
[nelm
] = -1;
2152 info
->memcpy_channels
= memcpy_ch
;
2155 prop
= of_find_property(dev
->of_node
, "ti,edma-reserved-slot-ranges",
2158 const char pname
[] = "ti,edma-reserved-slot-ranges";
2160 s16 (*rsv_slots
)[2];
2161 size_t nelm
= sz
/ sizeof(*tmp
);
2162 struct edma_rsv_info
*rsv_info
;
2168 tmp
= kcalloc(nelm
, sizeof(*tmp
), GFP_KERNEL
);
2170 return ERR_PTR(-ENOMEM
);
2172 rsv_info
= devm_kzalloc(dev
, sizeof(*rsv_info
), GFP_KERNEL
);
2175 return ERR_PTR(-ENOMEM
);
2178 rsv_slots
= devm_kcalloc(dev
, nelm
+ 1, sizeof(*rsv_slots
),
2182 return ERR_PTR(-ENOMEM
);
2185 ret
= of_property_read_u32_array(dev
->of_node
, pname
,
2186 (u32
*)tmp
, nelm
* 2);
2189 return ERR_PTR(ret
);
2192 for (i
= 0; i
< nelm
; i
++) {
2193 rsv_slots
[i
][0] = tmp
[i
][0];
2194 rsv_slots
[i
][1] = tmp
[i
][1];
2196 rsv_slots
[nelm
][0] = -1;
2197 rsv_slots
[nelm
][1] = -1;
2199 info
->rsv
= rsv_info
;
2200 info
->rsv
->rsv_slots
= (const s16 (*)[2])rsv_slots
;
2208 static struct dma_chan
*of_edma_xlate(struct of_phandle_args
*dma_spec
,
2209 struct of_dma
*ofdma
)
2211 struct edma_cc
*ecc
= ofdma
->of_dma_data
;
2212 struct dma_chan
*chan
= NULL
;
2213 struct edma_chan
*echan
;
2216 if (!ecc
|| dma_spec
->args_count
< 1)
2219 for (i
= 0; i
< ecc
->num_channels
; i
++) {
2220 echan
= &ecc
->slave_chans
[i
];
2221 if (echan
->ch_num
== dma_spec
->args
[0]) {
2222 chan
= &echan
->vchan
.chan
;
2230 if (echan
->ecc
->legacy_mode
&& dma_spec
->args_count
== 1)
2233 if (!echan
->ecc
->legacy_mode
&& dma_spec
->args_count
== 2 &&
2234 dma_spec
->args
[1] < echan
->ecc
->num_tc
) {
2235 echan
->tc
= &echan
->ecc
->tc_list
[dma_spec
->args
[1]];
2241 /* The channel is going to be used as HW synchronized */
2242 echan
->hw_triggered
= true;
2243 return dma_get_slave_channel(chan
);
2246 static struct edma_soc_info
*edma_setup_info_from_dt(struct device
*dev
,
2249 return ERR_PTR(-EINVAL
);
2252 static struct dma_chan
*of_edma_xlate(struct of_phandle_args
*dma_spec
,
2253 struct of_dma
*ofdma
)
2259 static bool edma_filter_fn(struct dma_chan
*chan
, void *param
);
2261 static int edma_probe(struct platform_device
*pdev
)
2263 struct edma_soc_info
*info
= pdev
->dev
.platform_data
;
2264 s8 (*queue_priority_mapping
)[2];
2265 const s16 (*reserved
)[2];
2268 struct resource
*mem
;
2269 struct device_node
*node
= pdev
->dev
.of_node
;
2270 struct device
*dev
= &pdev
->dev
;
2271 struct edma_cc
*ecc
;
2272 bool legacy_mode
= true;
2276 const struct of_device_id
*match
;
2278 match
= of_match_node(edma_of_ids
, node
);
2279 if (match
&& (*(u32
*)match
->data
) == EDMA_BINDING_TPCC
)
2280 legacy_mode
= false;
2282 info
= edma_setup_info_from_dt(dev
, legacy_mode
);
2284 dev_err(dev
, "failed to get DT data\n");
2285 return PTR_ERR(info
);
2292 ret
= dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(32));
2296 ecc
= devm_kzalloc(dev
, sizeof(*ecc
), GFP_KERNEL
);
2302 ecc
->legacy_mode
= legacy_mode
;
2303 /* When booting with DT the pdev->id is -1 */
2307 mem
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "edma3_cc");
2309 dev_dbg(dev
, "mem resource not found, using index 0\n");
2310 mem
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
2312 dev_err(dev
, "no mem resource?\n");
2316 ecc
->base
= devm_ioremap_resource(dev
, mem
);
2317 if (IS_ERR(ecc
->base
))
2318 return PTR_ERR(ecc
->base
);
2320 platform_set_drvdata(pdev
, ecc
);
2322 pm_runtime_enable(dev
);
2323 ret
= pm_runtime_get_sync(dev
);
2325 dev_err(dev
, "pm_runtime_get_sync() failed\n");
2326 pm_runtime_disable(dev
);
2330 /* Get eDMA3 configuration from IP */
2331 ret
= edma_setup_from_hw(dev
, info
, ecc
);
2333 goto err_disable_pm
;
2335 /* Allocate memory based on the information we got from the IP */
2336 ecc
->slave_chans
= devm_kcalloc(dev
, ecc
->num_channels
,
2337 sizeof(*ecc
->slave_chans
), GFP_KERNEL
);
2339 ecc
->slot_inuse
= devm_kcalloc(dev
, BITS_TO_LONGS(ecc
->num_slots
),
2340 sizeof(unsigned long), GFP_KERNEL
);
2342 ecc
->channels_mask
= devm_kcalloc(dev
,
2343 BITS_TO_LONGS(ecc
->num_channels
),
2344 sizeof(unsigned long), GFP_KERNEL
);
2345 if (!ecc
->slave_chans
|| !ecc
->slot_inuse
|| !ecc
->channels_mask
) {
2347 goto err_disable_pm
;
2350 /* Mark all channels available initially */
2351 bitmap_fill(ecc
->channels_mask
, ecc
->num_channels
);
2353 ecc
->default_queue
= info
->default_queue
;
2356 /* Set the reserved slots in inuse list */
2357 reserved
= info
->rsv
->rsv_slots
;
2359 for (i
= 0; reserved
[i
][0] != -1; i
++)
2360 bitmap_set(ecc
->slot_inuse
, reserved
[i
][0],
2364 /* Clear channels not usable for Linux */
2365 reserved
= info
->rsv
->rsv_chans
;
2367 for (i
= 0; reserved
[i
][0] != -1; i
++)
2368 bitmap_clear(ecc
->channels_mask
, reserved
[i
][0],
2373 for (i
= 0; i
< ecc
->num_slots
; i
++) {
2374 /* Reset only unused - not reserved - paRAM slots */
2375 if (!test_bit(i
, ecc
->slot_inuse
))
2376 edma_write_slot(ecc
, i
, &dummy_paramset
);
2379 irq
= platform_get_irq_byname(pdev
, "edma3_ccint");
2380 if (irq
< 0 && node
)
2381 irq
= irq_of_parse_and_map(node
, 0);
2384 irq_name
= devm_kasprintf(dev
, GFP_KERNEL
, "%s_ccint",
2386 ret
= devm_request_irq(dev
, irq
, dma_irq_handler
, 0, irq_name
,
2389 dev_err(dev
, "CCINT (%d) failed --> %d\n", irq
, ret
);
2390 goto err_disable_pm
;
2395 irq
= platform_get_irq_byname(pdev
, "edma3_ccerrint");
2396 if (irq
< 0 && node
)
2397 irq
= irq_of_parse_and_map(node
, 2);
2400 irq_name
= devm_kasprintf(dev
, GFP_KERNEL
, "%s_ccerrint",
2402 ret
= devm_request_irq(dev
, irq
, dma_ccerr_handler
, 0, irq_name
,
2405 dev_err(dev
, "CCERRINT (%d) failed --> %d\n", irq
, ret
);
2406 goto err_disable_pm
;
2408 ecc
->ccerrint
= irq
;
2411 ecc
->dummy_slot
= edma_alloc_slot(ecc
, EDMA_SLOT_ANY
);
2412 if (ecc
->dummy_slot
< 0) {
2413 dev_err(dev
, "Can't allocate PaRAM dummy slot\n");
2414 ret
= ecc
->dummy_slot
;
2415 goto err_disable_pm
;
2418 queue_priority_mapping
= info
->queue_priority_mapping
;
2420 if (!ecc
->legacy_mode
) {
2421 int lowest_priority
= 0;
2422 unsigned int array_max
;
2423 struct of_phandle_args tc_args
;
2425 ecc
->tc_list
= devm_kcalloc(dev
, ecc
->num_tc
,
2426 sizeof(*ecc
->tc_list
), GFP_KERNEL
);
2427 if (!ecc
->tc_list
) {
2433 ret
= of_parse_phandle_with_fixed_args(node
, "ti,tptcs",
2435 if (ret
|| i
== ecc
->num_tc
)
2438 ecc
->tc_list
[i
].node
= tc_args
.np
;
2439 ecc
->tc_list
[i
].id
= i
;
2440 queue_priority_mapping
[i
][1] = tc_args
.args
[0];
2441 if (queue_priority_mapping
[i
][1] > lowest_priority
) {
2442 lowest_priority
= queue_priority_mapping
[i
][1];
2443 info
->default_queue
= i
;
2447 /* See if we have optional dma-channel-mask array */
2448 array_max
= DIV_ROUND_UP(ecc
->num_channels
, BITS_PER_TYPE(u32
));
2449 ret
= of_property_read_variable_u32_array(node
,
2451 (u32
*)ecc
->channels_mask
,
2453 if (ret
> 0 && ret
!= array_max
)
2454 dev_warn(dev
, "dma-channel-mask is not complete.\n");
2455 else if (ret
== -EOVERFLOW
|| ret
== -ENODATA
)
2457 "dma-channel-mask is out of range or empty\n");
2460 /* Event queue priority mapping */
2461 for (i
= 0; queue_priority_mapping
[i
][0] != -1; i
++)
2462 edma_assign_priority_to_queue(ecc
, queue_priority_mapping
[i
][0],
2463 queue_priority_mapping
[i
][1]);
2465 edma_write_array2(ecc
, EDMA_DRAE
, 0, 0, 0x0);
2466 edma_write_array2(ecc
, EDMA_DRAE
, 0, 1, 0x0);
2467 edma_write_array(ecc
, EDMA_QRAE
, 0, 0x0);
2471 /* Init the dma device and channels */
2472 edma_dma_init(ecc
, legacy_mode
);
2474 for (i
= 0; i
< ecc
->num_channels
; i
++) {
2475 /* Do not touch reserved channels */
2476 if (!test_bit(i
, ecc
->channels_mask
))
2479 /* Assign all channels to the default queue */
2480 edma_assign_channel_eventq(&ecc
->slave_chans
[i
],
2481 info
->default_queue
);
2482 /* Set entry slot to the dummy slot */
2483 edma_set_chmap(&ecc
->slave_chans
[i
], ecc
->dummy_slot
);
2486 ecc
->dma_slave
.filter
.map
= info
->slave_map
;
2487 ecc
->dma_slave
.filter
.mapcnt
= info
->slavecnt
;
2488 ecc
->dma_slave
.filter
.fn
= edma_filter_fn
;
2490 ret
= dma_async_device_register(&ecc
->dma_slave
);
2492 dev_err(dev
, "slave ddev registration failed (%d)\n", ret
);
2496 if (ecc
->dma_memcpy
) {
2497 ret
= dma_async_device_register(ecc
->dma_memcpy
);
2499 dev_err(dev
, "memcpy ddev registration failed (%d)\n",
2501 dma_async_device_unregister(&ecc
->dma_slave
);
2507 of_dma_controller_register(node
, of_edma_xlate
, ecc
);
2509 dev_info(dev
, "TI EDMA DMA engine driver\n");
2514 edma_free_slot(ecc
, ecc
->dummy_slot
);
2516 pm_runtime_put_sync(dev
);
2517 pm_runtime_disable(dev
);
2521 static void edma_cleanupp_vchan(struct dma_device
*dmadev
)
2523 struct edma_chan
*echan
, *_echan
;
2525 list_for_each_entry_safe(echan
, _echan
,
2526 &dmadev
->channels
, vchan
.chan
.device_node
) {
2527 list_del(&echan
->vchan
.chan
.device_node
);
2528 tasklet_kill(&echan
->vchan
.task
);
2532 static int edma_remove(struct platform_device
*pdev
)
2534 struct device
*dev
= &pdev
->dev
;
2535 struct edma_cc
*ecc
= dev_get_drvdata(dev
);
2537 devm_free_irq(dev
, ecc
->ccint
, ecc
);
2538 devm_free_irq(dev
, ecc
->ccerrint
, ecc
);
2540 edma_cleanupp_vchan(&ecc
->dma_slave
);
2543 of_dma_controller_free(dev
->of_node
);
2544 dma_async_device_unregister(&ecc
->dma_slave
);
2545 if (ecc
->dma_memcpy
)
2546 dma_async_device_unregister(ecc
->dma_memcpy
);
2547 edma_free_slot(ecc
, ecc
->dummy_slot
);
2548 pm_runtime_put_sync(dev
);
2549 pm_runtime_disable(dev
);
2554 #ifdef CONFIG_PM_SLEEP
2555 static int edma_pm_suspend(struct device
*dev
)
2557 struct edma_cc
*ecc
= dev_get_drvdata(dev
);
2558 struct edma_chan
*echan
= ecc
->slave_chans
;
2561 for (i
= 0; i
< ecc
->num_channels
; i
++) {
2562 if (echan
[i
].alloced
)
2563 edma_setup_interrupt(&echan
[i
], false);
2569 static int edma_pm_resume(struct device
*dev
)
2571 struct edma_cc
*ecc
= dev_get_drvdata(dev
);
2572 struct edma_chan
*echan
= ecc
->slave_chans
;
2574 s8 (*queue_priority_mapping
)[2];
2576 /* re initialize dummy slot to dummy param set */
2577 edma_write_slot(ecc
, ecc
->dummy_slot
, &dummy_paramset
);
2579 queue_priority_mapping
= ecc
->info
->queue_priority_mapping
;
2581 /* Event queue priority mapping */
2582 for (i
= 0; queue_priority_mapping
[i
][0] != -1; i
++)
2583 edma_assign_priority_to_queue(ecc
, queue_priority_mapping
[i
][0],
2584 queue_priority_mapping
[i
][1]);
2586 for (i
= 0; i
< ecc
->num_channels
; i
++) {
2587 if (echan
[i
].alloced
) {
2588 /* ensure access through shadow region 0 */
2589 edma_or_array2(ecc
, EDMA_DRAE
, 0,
2590 EDMA_REG_ARRAY_INDEX(i
),
2591 EDMA_CHANNEL_BIT(i
));
2593 edma_setup_interrupt(&echan
[i
], true);
2595 /* Set up channel -> slot mapping for the entry slot */
2596 edma_set_chmap(&echan
[i
], echan
[i
].slot
[0]);
2604 static const struct dev_pm_ops edma_pm_ops
= {
2605 SET_LATE_SYSTEM_SLEEP_PM_OPS(edma_pm_suspend
, edma_pm_resume
)
2608 static struct platform_driver edma_driver
= {
2609 .probe
= edma_probe
,
2610 .remove
= edma_remove
,
2614 .of_match_table
= edma_of_ids
,
2618 static int edma_tptc_probe(struct platform_device
*pdev
)
2620 pm_runtime_enable(&pdev
->dev
);
2621 return pm_runtime_get_sync(&pdev
->dev
);
2624 static struct platform_driver edma_tptc_driver
= {
2625 .probe
= edma_tptc_probe
,
2627 .name
= "edma3-tptc",
2628 .of_match_table
= edma_tptc_of_ids
,
2632 static bool edma_filter_fn(struct dma_chan
*chan
, void *param
)
2636 if (chan
->device
->dev
->driver
== &edma_driver
.driver
) {
2637 struct edma_chan
*echan
= to_edma_chan(chan
);
2638 unsigned ch_req
= *(unsigned *)param
;
2639 if (ch_req
== echan
->ch_num
) {
2640 /* The channel is going to be used as HW synchronized */
2641 echan
->hw_triggered
= true;
2648 static int edma_init(void)
2652 ret
= platform_driver_register(&edma_tptc_driver
);
2656 return platform_driver_register(&edma_driver
);
2658 subsys_initcall(edma_init
);
2660 static void __exit
edma_exit(void)
2662 platform_driver_unregister(&edma_driver
);
2663 platform_driver_unregister(&edma_tptc_driver
);
2665 module_exit(edma_exit
);
2667 MODULE_AUTHOR("Matt Porter <matt.porter@linaro.org>");
2668 MODULE_DESCRIPTION("TI EDMA DMA engine driver");
2669 MODULE_LICENSE("GPL v2");