2 * driver/dma/ste_dma40.c
4 * Copyright (C) ST-Ericsson 2007-2010
5 * License terms: GNU General Public License (GPL) version 2
6 * Author: Per Friden <per.friden@stericsson.com>
7 * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
11 #include <linux/kernel.h>
12 #include <linux/slab.h>
13 #include <linux/dmaengine.h>
14 #include <linux/platform_device.h>
15 #include <linux/clk.h>
16 #include <linux/delay.h>
18 #include <plat/ste_dma40.h>
20 #include "ste_dma40_ll.h"
22 #define D40_NAME "dma40"
24 #define D40_PHY_CHAN -1
26 /* For masking out/in 2 bit channel positions */
27 #define D40_CHAN_POS(chan) (2 * (chan / 2))
28 #define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
30 /* Maximum iterations taken before giving up suspending a channel */
31 #define D40_SUSPEND_MAX_IT 500
33 #define D40_ALLOC_FREE (1 << 31)
34 #define D40_ALLOC_PHY (1 << 30)
35 #define D40_ALLOC_LOG_FREE 0
37 /* The number of free d40_desc to keep in memory before starting
39 #define D40_DESC_CACHE_SIZE 50
41 /* Hardware designer of the block */
42 #define D40_PERIPHID2_DESIGNER 0x8
45 * enum 40_command - The different commands and/or statuses.
47 * @D40_DMA_STOP: DMA channel command STOP or status STOPPED,
48 * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN.
49 * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible.
50 * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED.
55 D40_DMA_SUSPEND_REQ
= 2,
60 * struct d40_lli_pool - Structure for keeping LLIs in memory
62 * @base: Pointer to memory area when the pre_alloc_lli's are not large
63 * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
64 * pre_alloc_lli is used.
65 * @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
66 * @pre_alloc_lli: Pre allocated area for the most common case of transfers,
67 * one buffer to one buffer.
72 /* Space for dst and src, plus an extra for padding */
73 u8 pre_alloc_lli
[3 * sizeof(struct d40_phy_lli
)];
77 * struct d40_desc - A descriptor is one DMA job.
79 * @lli_phy: LLI settings for physical channel. Both src and dst=
80 * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if
82 * @lli_log: Same as above but for logical channels.
83 * @lli_pool: The pool with two entries pre-allocated.
84 * @lli_len: Number of LLI's in lli_pool
85 * @lli_tcount: Number of LLIs processed in the transfer. When equals lli_len
86 * then this transfer job is done.
87 * @txd: DMA engine struct. Used for among other things for communication
90 * @dir: The transfer direction of this job.
91 * @is_in_client_list: true if the client owns this descriptor.
93 * This descriptor is used for both logical and physical transfers.
98 struct d40_phy_lli_bidir lli_phy
;
100 struct d40_log_lli_bidir lli_log
;
102 struct d40_lli_pool lli_pool
;
106 struct dma_async_tx_descriptor txd
;
107 struct list_head node
;
109 enum dma_data_direction dir
;
110 bool is_in_client_list
;
114 * struct d40_lcla_pool - LCLA pool settings and data.
116 * @base: The virtual address of LCLA.
117 * @phy: Physical base address of LCLA.
118 * @base_size: size of lcla.
119 * @lock: Lock to protect the content in this struct.
120 * @alloc_map: Mapping between physical channel and LCLA entries.
121 * @num_blocks: The number of entries of alloc_map. Equals to the
122 * number of physical channels.
124 struct d40_lcla_pool
{
127 resource_size_t base_size
;
134 * struct d40_phy_res - struct for handling eventlines mapped to physical
137 * @lock: A lock protection this entity.
138 * @num: The physical channel number of this entity.
139 * @allocated_src: Bit mapped to show which src event line's are mapped to
140 * this physical channel. Can also be free or physically allocated.
141 * @allocated_dst: Same as for src but is dst.
142 * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
143 * event line number. Both allocated_src and allocated_dst can not be
144 * allocated to a physical channel, since the interrupt handler has then
145 * no way of figure out which one the interrupt belongs to.
157 * struct d40_chan - Struct that describes a channel.
159 * @lock: A spinlock to protect this struct.
160 * @log_num: The logical number, if any of this channel.
161 * @completed: Starts with 1, after first interrupt it is set to dma engine's
163 * @pending_tx: The number of pending transfers. Used between interrupt handler
165 * @busy: Set to true when transfer is ongoing on this channel.
166 * @phy_chan: Pointer to physical channel which this instance runs on.
167 * @chan: DMA engine handle.
168 * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
169 * transfer and call client callback.
170 * @client: Cliented owned descriptor list.
171 * @active: Active descriptor.
172 * @queue: Queued jobs.
173 * @free: List of free descripts, ready to be reused.
174 * @free_len: Number of descriptors in the free list.
175 * @dma_cfg: The client configuration of this dma channel.
176 * @base: Pointer to the device instance struct.
177 * @src_def_cfg: Default cfg register setting for src.
178 * @dst_def_cfg: Default cfg register setting for dst.
179 * @log_def: Default logical channel settings.
180 * @lcla: Space for one dst src pair for logical channel transfers.
181 * @lcpa: Pointer to dst and src lcpa settings.
183 * This struct can either "be" a logical or a physical channel.
188 /* ID of the most recent completed transfer */
192 struct d40_phy_res
*phy_chan
;
193 struct dma_chan chan
;
194 struct tasklet_struct tasklet
;
195 struct list_head client
;
196 struct list_head active
;
197 struct list_head queue
;
198 struct list_head free
;
200 struct stedma40_chan_cfg dma_cfg
;
201 struct d40_base
*base
;
202 /* Default register configurations */
205 struct d40_def_lcsp log_def
;
206 struct d40_lcla_elem lcla
;
207 struct d40_log_lli_full
*lcpa
;
211 * struct d40_base - The big global struct, one for each probe'd instance.
213 * @interrupt_lock: Lock used to make sure one interrupt is handle a time.
214 * @execmd_lock: Lock for execute command usage since several channels share
215 * the same physical register.
216 * @dev: The device structure.
217 * @virtbase: The virtual base address of the DMA's register.
218 * @clk: Pointer to the DMA clock structure.
219 * @phy_start: Physical memory start of the DMA registers.
220 * @phy_size: Size of the DMA register map.
221 * @irq: The IRQ number.
222 * @num_phy_chans: The number of physical channels. Read from HW. This
223 * is the number of available channels for this driver, not counting "Secure
224 * mode" allocated physical channels.
225 * @num_log_chans: The number of logical channels. Calculated from
227 * @dma_both: dma_device channels that can do both memcpy and slave transfers.
228 * @dma_slave: dma_device channels that can do only do slave transfers.
229 * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
230 * @phy_chans: Room for all possible physical channels in system.
231 * @log_chans: Room for all possible logical channels in system.
232 * @lookup_log_chans: Used to map interrupt number to logical channel. Points
233 * to log_chans entries.
234 * @lookup_phy_chans: Used to map interrupt number to physical channel. Points
235 * to phy_chans entries.
236 * @plat_data: Pointer to provided platform_data which is the driver
238 * @phy_res: Vector containing all physical channels.
239 * @lcla_pool: lcla pool settings and data.
240 * @lcpa_base: The virtual mapped address of LCPA.
241 * @phy_lcpa: The physical address of the LCPA.
242 * @lcpa_size: The size of the LCPA area.
245 spinlock_t interrupt_lock
;
246 spinlock_t execmd_lock
;
248 void __iomem
*virtbase
;
250 phys_addr_t phy_start
;
251 resource_size_t phy_size
;
255 struct dma_device dma_both
;
256 struct dma_device dma_slave
;
257 struct dma_device dma_memcpy
;
258 struct d40_chan
*phy_chans
;
259 struct d40_chan
*log_chans
;
260 struct d40_chan
**lookup_log_chans
;
261 struct d40_chan
**lookup_phy_chans
;
262 struct stedma40_platform_data
*plat_data
;
263 /* Physical half channels */
264 struct d40_phy_res
*phy_res
;
265 struct d40_lcla_pool lcla_pool
;
268 resource_size_t lcpa_size
;
272 * struct d40_interrupt_lookup - lookup table for interrupt handler
274 * @src: Interrupt mask register.
275 * @clr: Interrupt clear register.
276 * @is_error: true if this is an error interrupt.
277 * @offset: start delta in the lookup_log_chans in d40_base. If equals to
278 * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
280 struct d40_interrupt_lookup
{
288 * struct d40_reg_val - simple lookup struct
290 * @reg: The register.
291 * @val: The value that belongs to the register in reg.
298 static int d40_pool_lli_alloc(struct d40_desc
*d40d
,
299 int lli_len
, bool is_log
)
305 align
= sizeof(struct d40_log_lli
);
307 align
= sizeof(struct d40_phy_lli
);
310 base
= d40d
->lli_pool
.pre_alloc_lli
;
311 d40d
->lli_pool
.size
= sizeof(d40d
->lli_pool
.pre_alloc_lli
);
312 d40d
->lli_pool
.base
= NULL
;
314 d40d
->lli_pool
.size
= ALIGN(lli_len
* 2 * align
, align
);
316 base
= kmalloc(d40d
->lli_pool
.size
+ align
, GFP_NOWAIT
);
317 d40d
->lli_pool
.base
= base
;
319 if (d40d
->lli_pool
.base
== NULL
)
324 d40d
->lli_log
.src
= PTR_ALIGN((struct d40_log_lli
*) base
,
326 d40d
->lli_log
.dst
= PTR_ALIGN(d40d
->lli_log
.src
+ lli_len
,
329 d40d
->lli_phy
.src
= PTR_ALIGN((struct d40_phy_lli
*)base
,
331 d40d
->lli_phy
.dst
= PTR_ALIGN(d40d
->lli_phy
.src
+ lli_len
,
334 d40d
->lli_phy
.src_addr
= virt_to_phys(d40d
->lli_phy
.src
);
335 d40d
->lli_phy
.dst_addr
= virt_to_phys(d40d
->lli_phy
.dst
);
341 static void d40_pool_lli_free(struct d40_desc
*d40d
)
343 kfree(d40d
->lli_pool
.base
);
344 d40d
->lli_pool
.base
= NULL
;
345 d40d
->lli_pool
.size
= 0;
346 d40d
->lli_log
.src
= NULL
;
347 d40d
->lli_log
.dst
= NULL
;
348 d40d
->lli_phy
.src
= NULL
;
349 d40d
->lli_phy
.dst
= NULL
;
350 d40d
->lli_phy
.src_addr
= 0;
351 d40d
->lli_phy
.dst_addr
= 0;
354 static dma_cookie_t
d40_assign_cookie(struct d40_chan
*d40c
,
355 struct d40_desc
*desc
)
357 dma_cookie_t cookie
= d40c
->chan
.cookie
;
362 d40c
->chan
.cookie
= cookie
;
363 desc
->txd
.cookie
= cookie
;
368 static void d40_desc_reset(struct d40_desc
*d40d
)
370 d40d
->lli_tcount
= 0;
373 static void d40_desc_remove(struct d40_desc
*d40d
)
375 list_del(&d40d
->node
);
378 static struct d40_desc
*d40_desc_get(struct d40_chan
*d40c
)
380 struct d40_desc
*desc
;
384 if (!list_empty(&d40c
->client
)) {
385 list_for_each_entry_safe(d
, _d
, &d40c
->client
, node
)
386 if (async_tx_test_ack(&d
->txd
)) {
387 d40_pool_lli_free(d
);
394 if (list_empty(&d40c
->free
)) {
395 /* Alloc new desc because we're out of used ones */
396 desc
= kzalloc(sizeof(struct d40_desc
), GFP_NOWAIT
);
399 INIT_LIST_HEAD(&desc
->node
);
401 /* Reuse an old desc. */
402 desc
= list_first_entry(&d40c
->free
,
405 list_del(&desc
->node
);
412 static void d40_desc_free(struct d40_chan
*d40c
, struct d40_desc
*d40d
)
414 if (d40c
->free_len
< D40_DESC_CACHE_SIZE
) {
415 list_add_tail(&d40d
->node
, &d40c
->free
);
421 static void d40_desc_submit(struct d40_chan
*d40c
, struct d40_desc
*desc
)
423 list_add_tail(&desc
->node
, &d40c
->active
);
426 static struct d40_desc
*d40_first_active_get(struct d40_chan
*d40c
)
430 if (list_empty(&d40c
->active
))
433 d
= list_first_entry(&d40c
->active
,
439 static void d40_desc_queue(struct d40_chan
*d40c
, struct d40_desc
*desc
)
441 list_add_tail(&desc
->node
, &d40c
->queue
);
444 static struct d40_desc
*d40_first_queued(struct d40_chan
*d40c
)
448 if (list_empty(&d40c
->queue
))
451 d
= list_first_entry(&d40c
->queue
,
457 /* Support functions for logical channels */
459 static int d40_lcla_id_get(struct d40_chan
*d40c
,
460 struct d40_lcla_pool
*pool
)
464 struct d40_log_lli
*lcla_lidx_base
=
465 pool
->base
+ d40c
->phy_chan
->num
* 1024;
467 int lli_per_log
= d40c
->base
->plat_data
->llis_per_log
;
469 if (d40c
->lcla
.src_id
>= 0 && d40c
->lcla
.dst_id
>= 0)
472 if (pool
->num_blocks
> 32)
475 spin_lock(&pool
->lock
);
477 for (i
= 0; i
< pool
->num_blocks
; i
++) {
478 if (!(pool
->alloc_map
[d40c
->phy_chan
->num
] & (0x1 << i
))) {
479 pool
->alloc_map
[d40c
->phy_chan
->num
] |= (0x1 << i
);
484 if (src_id
>= pool
->num_blocks
)
487 for (; i
< pool
->num_blocks
; i
++) {
488 if (!(pool
->alloc_map
[d40c
->phy_chan
->num
] & (0x1 << i
))) {
489 pool
->alloc_map
[d40c
->phy_chan
->num
] |= (0x1 << i
);
495 if (dst_id
== src_id
)
498 d40c
->lcla
.src_id
= src_id
;
499 d40c
->lcla
.dst_id
= dst_id
;
500 d40c
->lcla
.dst
= lcla_lidx_base
+ dst_id
* lli_per_log
+ 1;
501 d40c
->lcla
.src
= lcla_lidx_base
+ src_id
* lli_per_log
+ 1;
504 spin_unlock(&pool
->lock
);
507 spin_unlock(&pool
->lock
);
511 static void d40_lcla_id_put(struct d40_chan
*d40c
,
512 struct d40_lcla_pool
*pool
,
518 d40c
->lcla
.src_id
= -1;
519 d40c
->lcla
.dst_id
= -1;
521 spin_lock(&pool
->lock
);
522 pool
->alloc_map
[d40c
->phy_chan
->num
] &= (~(0x1 << id
));
523 spin_unlock(&pool
->lock
);
526 static int d40_channel_execute_command(struct d40_chan
*d40c
,
527 enum d40_command command
)
530 void __iomem
*active_reg
;
534 spin_lock_irqsave(&d40c
->base
->execmd_lock
, flags
);
536 if (d40c
->phy_chan
->num
% 2 == 0)
537 active_reg
= d40c
->base
->virtbase
+ D40_DREG_ACTIVE
;
539 active_reg
= d40c
->base
->virtbase
+ D40_DREG_ACTIVO
;
541 if (command
== D40_DMA_SUSPEND_REQ
) {
542 status
= (readl(active_reg
) &
543 D40_CHAN_POS_MASK(d40c
->phy_chan
->num
)) >>
544 D40_CHAN_POS(d40c
->phy_chan
->num
);
546 if (status
== D40_DMA_SUSPENDED
|| status
== D40_DMA_STOP
)
550 writel(command
<< D40_CHAN_POS(d40c
->phy_chan
->num
), active_reg
);
552 if (command
== D40_DMA_SUSPEND_REQ
) {
554 for (i
= 0 ; i
< D40_SUSPEND_MAX_IT
; i
++) {
555 status
= (readl(active_reg
) &
556 D40_CHAN_POS_MASK(d40c
->phy_chan
->num
)) >>
557 D40_CHAN_POS(d40c
->phy_chan
->num
);
561 * Reduce the number of bus accesses while
562 * waiting for the DMA to suspend.
566 if (status
== D40_DMA_STOP
||
567 status
== D40_DMA_SUSPENDED
)
571 if (i
== D40_SUSPEND_MAX_IT
) {
572 dev_err(&d40c
->chan
.dev
->device
,
573 "[%s]: unable to suspend the chl %d (log: %d) status %x\n",
574 __func__
, d40c
->phy_chan
->num
, d40c
->log_num
,
582 spin_unlock_irqrestore(&d40c
->base
->execmd_lock
, flags
);
586 static void d40_term_all(struct d40_chan
*d40c
)
588 struct d40_desc
*d40d
;
592 /* Release active descriptors */
593 while ((d40d
= d40_first_active_get(d40c
))) {
594 d40_desc_remove(d40d
);
596 /* Return desc to free-list */
597 d40_desc_free(d40c
, d40d
);
600 /* Release queued descriptors waiting for transfer */
601 while ((d40d
= d40_first_queued(d40c
))) {
602 d40_desc_remove(d40d
);
604 /* Return desc to free-list */
605 d40_desc_free(d40c
, d40d
);
608 /* Release client owned descriptors */
609 if (!list_empty(&d40c
->client
))
610 list_for_each_entry_safe(d
, _d
, &d40c
->client
, node
) {
611 d40_pool_lli_free(d
);
613 /* Return desc to free-list */
614 d40_desc_free(d40c
, d40d
);
617 d40_lcla_id_put(d40c
, &d40c
->base
->lcla_pool
,
619 d40_lcla_id_put(d40c
, &d40c
->base
->lcla_pool
,
622 d40c
->pending_tx
= 0;
626 static void d40_config_set_event(struct d40_chan
*d40c
, bool do_enable
)
632 val
= D40_ACTIVATE_EVENTLINE
;
634 val
= D40_DEACTIVATE_EVENTLINE
;
636 spin_lock_irqsave(&d40c
->phy_chan
->lock
, flags
);
638 /* Enable event line connected to device (or memcpy) */
639 if ((d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_MEM
) ||
640 (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_PERIPH
)) {
641 u32 event
= D40_TYPE_TO_EVENT(d40c
->dma_cfg
.src_dev_type
);
643 writel((val
<< D40_EVENTLINE_POS(event
)) |
644 ~D40_EVENTLINE_MASK(event
),
645 d40c
->base
->virtbase
+ D40_DREG_PCBASE
+
646 d40c
->phy_chan
->num
* D40_DREG_PCDELTA
+
649 if (d40c
->dma_cfg
.dir
!= STEDMA40_PERIPH_TO_MEM
) {
650 u32 event
= D40_TYPE_TO_EVENT(d40c
->dma_cfg
.dst_dev_type
);
652 writel((val
<< D40_EVENTLINE_POS(event
)) |
653 ~D40_EVENTLINE_MASK(event
),
654 d40c
->base
->virtbase
+ D40_DREG_PCBASE
+
655 d40c
->phy_chan
->num
* D40_DREG_PCDELTA
+
659 spin_unlock_irqrestore(&d40c
->phy_chan
->lock
, flags
);
662 static u32
d40_chan_has_events(struct d40_chan
*d40c
)
666 /* If SSLNK or SDLNK is zero all events are disabled */
667 if ((d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_MEM
) ||
668 (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_PERIPH
))
669 val
= readl(d40c
->base
->virtbase
+ D40_DREG_PCBASE
+
670 d40c
->phy_chan
->num
* D40_DREG_PCDELTA
+
673 if (d40c
->dma_cfg
.dir
!= STEDMA40_PERIPH_TO_MEM
)
674 val
= readl(d40c
->base
->virtbase
+ D40_DREG_PCBASE
+
675 d40c
->phy_chan
->num
* D40_DREG_PCDELTA
+
680 static void d40_config_enable_lidx(struct d40_chan
*d40c
)
682 /* Set LIDX for lcla */
683 writel((d40c
->phy_chan
->num
<< D40_SREG_ELEM_LOG_LIDX_POS
) &
684 D40_SREG_ELEM_LOG_LIDX_MASK
,
685 d40c
->base
->virtbase
+ D40_DREG_PCBASE
+
686 d40c
->phy_chan
->num
* D40_DREG_PCDELTA
+ D40_CHAN_REG_SDELT
);
688 writel((d40c
->phy_chan
->num
<< D40_SREG_ELEM_LOG_LIDX_POS
) &
689 D40_SREG_ELEM_LOG_LIDX_MASK
,
690 d40c
->base
->virtbase
+ D40_DREG_PCBASE
+
691 d40c
->phy_chan
->num
* D40_DREG_PCDELTA
+ D40_CHAN_REG_SSELT
);
694 static int d40_config_write(struct d40_chan
*d40c
)
700 res
= d40_channel_execute_command(d40c
, D40_DMA_SUSPEND_REQ
);
704 /* Odd addresses are even addresses + 4 */
705 addr_base
= (d40c
->phy_chan
->num
% 2) * 4;
706 /* Setup channel mode to logical or physical */
707 var
= ((u32
)(d40c
->log_num
!= D40_PHY_CHAN
) + 1) <<
708 D40_CHAN_POS(d40c
->phy_chan
->num
);
709 writel(var
, d40c
->base
->virtbase
+ D40_DREG_PRMSE
+ addr_base
);
711 /* Setup operational mode option register */
712 var
= ((d40c
->dma_cfg
.channel_type
>> STEDMA40_INFO_CH_MODE_OPT_POS
) &
713 0x3) << D40_CHAN_POS(d40c
->phy_chan
->num
);
715 writel(var
, d40c
->base
->virtbase
+ D40_DREG_PRMOE
+ addr_base
);
717 if (d40c
->log_num
!= D40_PHY_CHAN
) {
718 /* Set default config for CFG reg */
719 writel(d40c
->src_def_cfg
,
720 d40c
->base
->virtbase
+ D40_DREG_PCBASE
+
721 d40c
->phy_chan
->num
* D40_DREG_PCDELTA
+
723 writel(d40c
->dst_def_cfg
,
724 d40c
->base
->virtbase
+ D40_DREG_PCBASE
+
725 d40c
->phy_chan
->num
* D40_DREG_PCDELTA
+
728 d40_config_enable_lidx(d40c
);
733 static void d40_desc_load(struct d40_chan
*d40c
, struct d40_desc
*d40d
)
736 if (d40d
->lli_phy
.dst
&& d40d
->lli_phy
.src
) {
737 d40_phy_lli_write(d40c
->base
->virtbase
,
741 d40d
->lli_tcount
= d40d
->lli_len
;
742 } else if (d40d
->lli_log
.dst
&& d40d
->lli_log
.src
) {
744 struct d40_log_lli
*src
= d40d
->lli_log
.src
;
745 struct d40_log_lli
*dst
= d40d
->lli_log
.dst
;
747 src
+= d40d
->lli_tcount
;
748 dst
+= d40d
->lli_tcount
;
750 if (d40d
->lli_len
<= d40c
->base
->plat_data
->llis_per_log
)
751 lli_len
= d40d
->lli_len
;
753 lli_len
= d40c
->base
->plat_data
->llis_per_log
;
754 d40d
->lli_tcount
+= lli_len
;
755 d40_log_lli_write(d40c
->lcpa
, d40c
->lcla
.src
,
758 d40c
->base
->plat_data
->llis_per_log
);
762 static dma_cookie_t
d40_tx_submit(struct dma_async_tx_descriptor
*tx
)
764 struct d40_chan
*d40c
= container_of(tx
->chan
,
767 struct d40_desc
*d40d
= container_of(tx
, struct d40_desc
, txd
);
770 spin_lock_irqsave(&d40c
->lock
, flags
);
772 tx
->cookie
= d40_assign_cookie(d40c
, d40d
);
774 d40_desc_queue(d40c
, d40d
);
776 spin_unlock_irqrestore(&d40c
->lock
, flags
);
781 static int d40_start(struct d40_chan
*d40c
)
785 if (d40c
->log_num
!= D40_PHY_CHAN
) {
786 err
= d40_channel_execute_command(d40c
, D40_DMA_SUSPEND_REQ
);
789 d40_config_set_event(d40c
, true);
792 err
= d40_channel_execute_command(d40c
, D40_DMA_RUN
);
797 static struct d40_desc
*d40_queue_start(struct d40_chan
*d40c
)
799 struct d40_desc
*d40d
;
802 /* Start queued jobs, if any */
803 d40d
= d40_first_queued(d40c
);
808 /* Remove from queue */
809 d40_desc_remove(d40d
);
811 /* Add to active queue */
812 d40_desc_submit(d40c
, d40d
);
814 /* Initiate DMA job */
815 d40_desc_load(d40c
, d40d
);
818 err
= d40_start(d40c
);
827 /* called from interrupt context */
828 static void dma_tc_handle(struct d40_chan
*d40c
)
830 struct d40_desc
*d40d
;
835 /* Get first active entry from list */
836 d40d
= d40_first_active_get(d40c
);
841 if (d40d
->lli_tcount
< d40d
->lli_len
) {
843 d40_desc_load(d40c
, d40d
);
845 (void) d40_start(d40c
);
849 if (d40_queue_start(d40c
) == NULL
)
853 tasklet_schedule(&d40c
->tasklet
);
857 static void dma_tasklet(unsigned long data
)
859 struct d40_chan
*d40c
= (struct d40_chan
*) data
;
860 struct d40_desc
*d40d_fin
;
862 dma_async_tx_callback callback
;
863 void *callback_param
;
865 spin_lock_irqsave(&d40c
->lock
, flags
);
867 /* Get first active entry from list */
868 d40d_fin
= d40_first_active_get(d40c
);
870 if (d40d_fin
== NULL
)
873 d40c
->completed
= d40d_fin
->txd
.cookie
;
876 * If terminating a channel pending_tx is set to zero.
877 * This prevents any finished active jobs to return to the client.
879 if (d40c
->pending_tx
== 0) {
880 spin_unlock_irqrestore(&d40c
->lock
, flags
);
884 /* Callback to client */
885 callback
= d40d_fin
->txd
.callback
;
886 callback_param
= d40d_fin
->txd
.callback_param
;
888 if (async_tx_test_ack(&d40d_fin
->txd
)) {
889 d40_pool_lli_free(d40d_fin
);
890 d40_desc_remove(d40d_fin
);
891 /* Return desc to free-list */
892 d40_desc_free(d40c
, d40d_fin
);
894 d40_desc_reset(d40d_fin
);
895 if (!d40d_fin
->is_in_client_list
) {
896 d40_desc_remove(d40d_fin
);
897 list_add_tail(&d40d_fin
->node
, &d40c
->client
);
898 d40d_fin
->is_in_client_list
= true;
904 if (d40c
->pending_tx
)
905 tasklet_schedule(&d40c
->tasklet
);
907 spin_unlock_irqrestore(&d40c
->lock
, flags
);
910 callback(callback_param
);
915 /* Rescue manouver if receiving double interrupts */
916 if (d40c
->pending_tx
> 0)
918 spin_unlock_irqrestore(&d40c
->lock
, flags
);
921 static irqreturn_t
d40_handle_interrupt(int irq
, void *data
)
923 static const struct d40_interrupt_lookup il
[] = {
924 {D40_DREG_LCTIS0
, D40_DREG_LCICR0
, false, 0},
925 {D40_DREG_LCTIS1
, D40_DREG_LCICR1
, false, 32},
926 {D40_DREG_LCTIS2
, D40_DREG_LCICR2
, false, 64},
927 {D40_DREG_LCTIS3
, D40_DREG_LCICR3
, false, 96},
928 {D40_DREG_LCEIS0
, D40_DREG_LCICR0
, true, 0},
929 {D40_DREG_LCEIS1
, D40_DREG_LCICR1
, true, 32},
930 {D40_DREG_LCEIS2
, D40_DREG_LCICR2
, true, 64},
931 {D40_DREG_LCEIS3
, D40_DREG_LCICR3
, true, 96},
932 {D40_DREG_PCTIS
, D40_DREG_PCICR
, false, D40_PHY_CHAN
},
933 {D40_DREG_PCEIS
, D40_DREG_PCICR
, true, D40_PHY_CHAN
},
937 u32 regs
[ARRAY_SIZE(il
)];
942 struct d40_chan
*d40c
;
944 struct d40_base
*base
= data
;
946 spin_lock_irqsave(&base
->interrupt_lock
, flags
);
948 /* Read interrupt status of both logical and physical channels */
949 for (i
= 0; i
< ARRAY_SIZE(il
); i
++)
950 regs
[i
] = readl(base
->virtbase
+ il
[i
].src
);
954 chan
= find_next_bit((unsigned long *)regs
,
955 BITS_PER_LONG
* ARRAY_SIZE(il
), chan
+ 1);
957 /* No more set bits found? */
958 if (chan
== BITS_PER_LONG
* ARRAY_SIZE(il
))
961 row
= chan
/ BITS_PER_LONG
;
962 idx
= chan
& (BITS_PER_LONG
- 1);
965 tmp
= readl(base
->virtbase
+ il
[row
].clr
);
967 writel(tmp
, base
->virtbase
+ il
[row
].clr
);
969 if (il
[row
].offset
== D40_PHY_CHAN
)
970 d40c
= base
->lookup_phy_chans
[idx
];
972 d40c
= base
->lookup_log_chans
[il
[row
].offset
+ idx
];
973 spin_lock(&d40c
->lock
);
975 if (!il
[row
].is_error
)
978 dev_err(base
->dev
, "[%s] IRQ chan: %ld offset %d idx %d\n",
979 __func__
, chan
, il
[row
].offset
, idx
);
981 spin_unlock(&d40c
->lock
);
984 spin_unlock_irqrestore(&base
->interrupt_lock
, flags
);
990 static int d40_validate_conf(struct d40_chan
*d40c
,
991 struct stedma40_chan_cfg
*conf
)
994 u32 dst_event_group
= D40_TYPE_TO_GROUP(conf
->dst_dev_type
);
995 u32 src_event_group
= D40_TYPE_TO_GROUP(conf
->src_dev_type
);
996 bool is_log
= (conf
->channel_type
& STEDMA40_CHANNEL_IN_OPER_MODE
)
997 == STEDMA40_CHANNEL_IN_LOG_MODE
;
999 if (d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_PERIPH
&&
1000 dst_event_group
== STEDMA40_DEV_DST_MEMORY
) {
1001 dev_err(&d40c
->chan
.dev
->device
, "[%s] Invalid dst\n",
1006 if (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_MEM
&&
1007 src_event_group
== STEDMA40_DEV_SRC_MEMORY
) {
1008 dev_err(&d40c
->chan
.dev
->device
, "[%s] Invalid src\n",
1013 if (src_event_group
== STEDMA40_DEV_SRC_MEMORY
&&
1014 dst_event_group
== STEDMA40_DEV_DST_MEMORY
&& is_log
) {
1015 dev_err(&d40c
->chan
.dev
->device
,
1016 "[%s] No event line\n", __func__
);
1020 if (conf
->dir
== STEDMA40_PERIPH_TO_PERIPH
&&
1021 (src_event_group
!= dst_event_group
)) {
1022 dev_err(&d40c
->chan
.dev
->device
,
1023 "[%s] Invalid event group\n", __func__
);
1027 if (conf
->dir
== STEDMA40_PERIPH_TO_PERIPH
) {
1029 * DMAC HW supports it. Will be added to this driver,
1030 * in case any dma client requires it.
1032 dev_err(&d40c
->chan
.dev
->device
,
1033 "[%s] periph to periph not supported\n",
1041 static bool d40_alloc_mask_set(struct d40_phy_res
*phy
, bool is_src
,
1042 int log_event_line
, bool is_log
)
1044 unsigned long flags
;
1045 spin_lock_irqsave(&phy
->lock
, flags
);
1047 /* Physical interrupts are masked per physical full channel */
1048 if (phy
->allocated_src
== D40_ALLOC_FREE
&&
1049 phy
->allocated_dst
== D40_ALLOC_FREE
) {
1050 phy
->allocated_dst
= D40_ALLOC_PHY
;
1051 phy
->allocated_src
= D40_ALLOC_PHY
;
1057 /* Logical channel */
1059 if (phy
->allocated_src
== D40_ALLOC_PHY
)
1062 if (phy
->allocated_src
== D40_ALLOC_FREE
)
1063 phy
->allocated_src
= D40_ALLOC_LOG_FREE
;
1065 if (!(phy
->allocated_src
& (1 << log_event_line
))) {
1066 phy
->allocated_src
|= 1 << log_event_line
;
1071 if (phy
->allocated_dst
== D40_ALLOC_PHY
)
1074 if (phy
->allocated_dst
== D40_ALLOC_FREE
)
1075 phy
->allocated_dst
= D40_ALLOC_LOG_FREE
;
1077 if (!(phy
->allocated_dst
& (1 << log_event_line
))) {
1078 phy
->allocated_dst
|= 1 << log_event_line
;
1085 spin_unlock_irqrestore(&phy
->lock
, flags
);
1088 spin_unlock_irqrestore(&phy
->lock
, flags
);
1092 static bool d40_alloc_mask_free(struct d40_phy_res
*phy
, bool is_src
,
1095 unsigned long flags
;
1096 bool is_free
= false;
1098 spin_lock_irqsave(&phy
->lock
, flags
);
1099 if (!log_event_line
) {
1100 /* Physical interrupts are masked per physical full channel */
1101 phy
->allocated_dst
= D40_ALLOC_FREE
;
1102 phy
->allocated_src
= D40_ALLOC_FREE
;
1107 /* Logical channel */
1109 phy
->allocated_src
&= ~(1 << log_event_line
);
1110 if (phy
->allocated_src
== D40_ALLOC_LOG_FREE
)
1111 phy
->allocated_src
= D40_ALLOC_FREE
;
1113 phy
->allocated_dst
&= ~(1 << log_event_line
);
1114 if (phy
->allocated_dst
== D40_ALLOC_LOG_FREE
)
1115 phy
->allocated_dst
= D40_ALLOC_FREE
;
1118 is_free
= ((phy
->allocated_src
| phy
->allocated_dst
) ==
1122 spin_unlock_irqrestore(&phy
->lock
, flags
);
1127 static int d40_allocate_channel(struct d40_chan
*d40c
)
1132 struct d40_phy_res
*phys
;
1137 bool is_log
= (d40c
->dma_cfg
.channel_type
& STEDMA40_CHANNEL_IN_OPER_MODE
)
1138 == STEDMA40_CHANNEL_IN_LOG_MODE
;
1141 phys
= d40c
->base
->phy_res
;
1143 if (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_MEM
) {
1144 dev_type
= d40c
->dma_cfg
.src_dev_type
;
1145 log_num
= 2 * dev_type
;
1147 } else if (d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_PERIPH
||
1148 d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_MEM
) {
1149 /* dst event lines are used for logical memcpy */
1150 dev_type
= d40c
->dma_cfg
.dst_dev_type
;
1151 log_num
= 2 * dev_type
+ 1;
1156 event_group
= D40_TYPE_TO_GROUP(dev_type
);
1157 event_line
= D40_TYPE_TO_EVENT(dev_type
);
1160 if (d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_MEM
) {
1161 /* Find physical half channel */
1162 for (i
= 0; i
< d40c
->base
->num_phy_chans
; i
++) {
1164 if (d40_alloc_mask_set(&phys
[i
], is_src
,
1169 for (j
= 0; j
< d40c
->base
->num_phy_chans
; j
+= 8) {
1170 int phy_num
= j
+ event_group
* 2;
1171 for (i
= phy_num
; i
< phy_num
+ 2; i
++) {
1172 if (d40_alloc_mask_set(&phys
[i
], is_src
,
1179 d40c
->phy_chan
= &phys
[i
];
1180 d40c
->log_num
= D40_PHY_CHAN
;
1186 /* Find logical channel */
1187 for (j
= 0; j
< d40c
->base
->num_phy_chans
; j
+= 8) {
1188 int phy_num
= j
+ event_group
* 2;
1190 * Spread logical channels across all available physical rather
1191 * than pack every logical channel at the first available phy
1195 for (i
= phy_num
; i
< phy_num
+ 2; i
++) {
1196 if (d40_alloc_mask_set(&phys
[i
], is_src
,
1197 event_line
, is_log
))
1201 for (i
= phy_num
+ 1; i
>= phy_num
; i
--) {
1202 if (d40_alloc_mask_set(&phys
[i
], is_src
,
1203 event_line
, is_log
))
1211 d40c
->phy_chan
= &phys
[i
];
1212 d40c
->log_num
= log_num
;
1216 d40c
->base
->lookup_log_chans
[d40c
->log_num
] = d40c
;
1218 d40c
->base
->lookup_phy_chans
[d40c
->phy_chan
->num
] = d40c
;
1224 static int d40_config_chan(struct d40_chan
*d40c
,
1225 struct stedma40_chan_cfg
*info
)
1228 /* Fill in basic CFG register values */
1229 d40_phy_cfg(&d40c
->dma_cfg
, &d40c
->src_def_cfg
,
1230 &d40c
->dst_def_cfg
, d40c
->log_num
!= D40_PHY_CHAN
);
1232 if (d40c
->log_num
!= D40_PHY_CHAN
) {
1233 d40_log_cfg(&d40c
->dma_cfg
,
1234 &d40c
->log_def
.lcsp1
, &d40c
->log_def
.lcsp3
);
1236 if (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_MEM
)
1237 d40c
->lcpa
= d40c
->base
->lcpa_base
+
1238 d40c
->dma_cfg
.src_dev_type
* 32;
1240 d40c
->lcpa
= d40c
->base
->lcpa_base
+
1241 d40c
->dma_cfg
.dst_dev_type
* 32 + 16;
1244 /* Write channel configuration to the DMA */
1245 return d40_config_write(d40c
);
1248 static int d40_config_memcpy(struct d40_chan
*d40c
)
1250 dma_cap_mask_t cap
= d40c
->chan
.device
->cap_mask
;
1252 if (dma_has_cap(DMA_MEMCPY
, cap
) && !dma_has_cap(DMA_SLAVE
, cap
)) {
1253 d40c
->dma_cfg
= *d40c
->base
->plat_data
->memcpy_conf_log
;
1254 d40c
->dma_cfg
.src_dev_type
= STEDMA40_DEV_SRC_MEMORY
;
1255 d40c
->dma_cfg
.dst_dev_type
= d40c
->base
->plat_data
->
1256 memcpy
[d40c
->chan
.chan_id
];
1258 } else if (dma_has_cap(DMA_MEMCPY
, cap
) &&
1259 dma_has_cap(DMA_SLAVE
, cap
)) {
1260 d40c
->dma_cfg
= *d40c
->base
->plat_data
->memcpy_conf_phy
;
1262 dev_err(&d40c
->chan
.dev
->device
, "[%s] No memcpy\n",
1271 static int d40_free_dma(struct d40_chan
*d40c
)
1276 struct d40_phy_res
*phy
= d40c
->phy_chan
;
1279 /* Terminate all queued and active transfers */
1283 dev_err(&d40c
->chan
.dev
->device
, "[%s] phy == null\n",
1288 if (phy
->allocated_src
== D40_ALLOC_FREE
&&
1289 phy
->allocated_dst
== D40_ALLOC_FREE
) {
1290 dev_err(&d40c
->chan
.dev
->device
, "[%s] channel already free\n",
1296 res
= d40_channel_execute_command(d40c
, D40_DMA_SUSPEND_REQ
);
1298 dev_err(&d40c
->chan
.dev
->device
, "[%s] suspend\n",
1303 if (d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_PERIPH
||
1304 d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_MEM
) {
1305 event
= D40_TYPE_TO_EVENT(d40c
->dma_cfg
.dst_dev_type
);
1306 dir
= D40_CHAN_REG_SDLNK
;
1308 } else if (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_MEM
) {
1309 event
= D40_TYPE_TO_EVENT(d40c
->dma_cfg
.src_dev_type
);
1310 dir
= D40_CHAN_REG_SSLNK
;
1313 dev_err(&d40c
->chan
.dev
->device
,
1314 "[%s] Unknown direction\n", __func__
);
1318 if (d40c
->log_num
!= D40_PHY_CHAN
) {
1320 * Release logical channel, deactivate the event line during
1321 * the time physical res is suspended.
1323 writel((D40_DEACTIVATE_EVENTLINE
<< D40_EVENTLINE_POS(event
)) &
1324 D40_EVENTLINE_MASK(event
),
1325 d40c
->base
->virtbase
+ D40_DREG_PCBASE
+
1326 phy
->num
* D40_DREG_PCDELTA
+ dir
);
1328 d40c
->base
->lookup_log_chans
[d40c
->log_num
] = NULL
;
1331 * Check if there are more logical allocation
1332 * on this phy channel.
1334 if (!d40_alloc_mask_free(phy
, is_src
, event
)) {
1335 /* Resume the other logical channels if any */
1336 if (d40_chan_has_events(d40c
)) {
1337 res
= d40_channel_execute_command(d40c
,
1340 dev_err(&d40c
->chan
.dev
->device
,
1341 "[%s] Executing RUN command\n",
1349 d40_alloc_mask_free(phy
, is_src
, 0);
1351 /* Release physical channel */
1352 res
= d40_channel_execute_command(d40c
, D40_DMA_STOP
);
1354 dev_err(&d40c
->chan
.dev
->device
,
1355 "[%s] Failed to stop channel\n", __func__
);
1358 d40c
->phy_chan
= NULL
;
1359 /* Invalidate channel type */
1360 d40c
->dma_cfg
.channel_type
= 0;
1361 d40c
->base
->lookup_phy_chans
[phy
->num
] = NULL
;
1368 static int d40_pause(struct dma_chan
*chan
)
1370 struct d40_chan
*d40c
=
1371 container_of(chan
, struct d40_chan
, chan
);
1374 unsigned long flags
;
1376 spin_lock_irqsave(&d40c
->lock
, flags
);
1378 res
= d40_channel_execute_command(d40c
, D40_DMA_SUSPEND_REQ
);
1380 if (d40c
->log_num
!= D40_PHY_CHAN
) {
1381 d40_config_set_event(d40c
, false);
1382 /* Resume the other logical channels if any */
1383 if (d40_chan_has_events(d40c
))
1384 res
= d40_channel_execute_command(d40c
,
1389 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1393 static bool d40_is_paused(struct d40_chan
*d40c
)
1395 bool is_paused
= false;
1396 unsigned long flags
;
1397 void __iomem
*active_reg
;
1402 spin_lock_irqsave(&d40c
->lock
, flags
);
1404 if (d40c
->log_num
== D40_PHY_CHAN
) {
1405 if (d40c
->phy_chan
->num
% 2 == 0)
1406 active_reg
= d40c
->base
->virtbase
+ D40_DREG_ACTIVE
;
1408 active_reg
= d40c
->base
->virtbase
+ D40_DREG_ACTIVO
;
1410 status
= (readl(active_reg
) &
1411 D40_CHAN_POS_MASK(d40c
->phy_chan
->num
)) >>
1412 D40_CHAN_POS(d40c
->phy_chan
->num
);
1413 if (status
== D40_DMA_SUSPENDED
|| status
== D40_DMA_STOP
)
1419 res
= d40_channel_execute_command(d40c
, D40_DMA_SUSPEND_REQ
);
1423 if (d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_PERIPH
||
1424 d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_MEM
)
1425 event
= D40_TYPE_TO_EVENT(d40c
->dma_cfg
.dst_dev_type
);
1426 else if (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_MEM
)
1427 event
= D40_TYPE_TO_EVENT(d40c
->dma_cfg
.src_dev_type
);
1429 dev_err(&d40c
->chan
.dev
->device
,
1430 "[%s] Unknown direction\n", __func__
);
1433 status
= d40_chan_has_events(d40c
);
1434 status
= (status
& D40_EVENTLINE_MASK(event
)) >>
1435 D40_EVENTLINE_POS(event
);
1437 if (status
!= D40_DMA_RUN
)
1440 /* Resume the other logical channels if any */
1441 if (d40_chan_has_events(d40c
))
1442 res
= d40_channel_execute_command(d40c
,
1446 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1452 static bool d40_tx_is_linked(struct d40_chan
*d40c
)
1456 if (d40c
->log_num
!= D40_PHY_CHAN
)
1457 is_link
= readl(&d40c
->lcpa
->lcsp3
) & D40_MEM_LCSP3_DLOS_MASK
;
1459 is_link
= readl(d40c
->base
->virtbase
+ D40_DREG_PCBASE
+
1460 d40c
->phy_chan
->num
* D40_DREG_PCDELTA
+
1461 D40_CHAN_REG_SDLNK
) &
1462 D40_SREG_LNK_PHYS_LNK_MASK
;
1466 static u32
d40_residue(struct d40_chan
*d40c
)
1470 if (d40c
->log_num
!= D40_PHY_CHAN
)
1471 num_elt
= (readl(&d40c
->lcpa
->lcsp2
) & D40_MEM_LCSP2_ECNT_MASK
)
1472 >> D40_MEM_LCSP2_ECNT_POS
;
1474 num_elt
= (readl(d40c
->base
->virtbase
+ D40_DREG_PCBASE
+
1475 d40c
->phy_chan
->num
* D40_DREG_PCDELTA
+
1476 D40_CHAN_REG_SDELT
) &
1477 D40_SREG_ELEM_PHY_ECNT_MASK
) >> D40_SREG_ELEM_PHY_ECNT_POS
;
1478 return num_elt
* (1 << d40c
->dma_cfg
.dst_info
.data_width
);
1481 static int d40_resume(struct dma_chan
*chan
)
1483 struct d40_chan
*d40c
=
1484 container_of(chan
, struct d40_chan
, chan
);
1486 unsigned long flags
;
1488 spin_lock_irqsave(&d40c
->lock
, flags
);
1490 if (d40c
->log_num
!= D40_PHY_CHAN
) {
1491 res
= d40_channel_execute_command(d40c
, D40_DMA_SUSPEND_REQ
);
1495 /* If bytes left to transfer or linked tx resume job */
1496 if (d40_residue(d40c
) || d40_tx_is_linked(d40c
)) {
1497 d40_config_set_event(d40c
, true);
1498 res
= d40_channel_execute_command(d40c
, D40_DMA_RUN
);
1500 } else if (d40_residue(d40c
) || d40_tx_is_linked(d40c
))
1501 res
= d40_channel_execute_command(d40c
, D40_DMA_RUN
);
1504 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1508 static u32
stedma40_residue(struct dma_chan
*chan
)
1510 struct d40_chan
*d40c
=
1511 container_of(chan
, struct d40_chan
, chan
);
1513 unsigned long flags
;
1515 spin_lock_irqsave(&d40c
->lock
, flags
);
1516 bytes_left
= d40_residue(d40c
);
1517 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1522 /* Public DMA functions in addition to the DMA engine framework */
1524 int stedma40_set_psize(struct dma_chan
*chan
,
1528 struct d40_chan
*d40c
=
1529 container_of(chan
, struct d40_chan
, chan
);
1530 unsigned long flags
;
1532 spin_lock_irqsave(&d40c
->lock
, flags
);
1534 if (d40c
->log_num
!= D40_PHY_CHAN
) {
1535 d40c
->log_def
.lcsp1
&= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK
;
1536 d40c
->log_def
.lcsp3
&= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK
;
1537 d40c
->log_def
.lcsp1
|= src_psize
<< D40_MEM_LCSP1_SCFG_PSIZE_POS
;
1538 d40c
->log_def
.lcsp3
|= dst_psize
<< D40_MEM_LCSP1_SCFG_PSIZE_POS
;
1542 if (src_psize
== STEDMA40_PSIZE_PHY_1
)
1543 d40c
->src_def_cfg
&= ~(1 << D40_SREG_CFG_PHY_PEN_POS
);
1545 d40c
->src_def_cfg
|= 1 << D40_SREG_CFG_PHY_PEN_POS
;
1546 d40c
->src_def_cfg
&= ~(STEDMA40_PSIZE_PHY_16
<<
1547 D40_SREG_CFG_PSIZE_POS
);
1548 d40c
->src_def_cfg
|= src_psize
<< D40_SREG_CFG_PSIZE_POS
;
1551 if (dst_psize
== STEDMA40_PSIZE_PHY_1
)
1552 d40c
->dst_def_cfg
&= ~(1 << D40_SREG_CFG_PHY_PEN_POS
);
1554 d40c
->dst_def_cfg
|= 1 << D40_SREG_CFG_PHY_PEN_POS
;
1555 d40c
->dst_def_cfg
&= ~(STEDMA40_PSIZE_PHY_16
<<
1556 D40_SREG_CFG_PSIZE_POS
);
1557 d40c
->dst_def_cfg
|= dst_psize
<< D40_SREG_CFG_PSIZE_POS
;
1560 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1563 EXPORT_SYMBOL(stedma40_set_psize
);
1565 struct dma_async_tx_descriptor
*stedma40_memcpy_sg(struct dma_chan
*chan
,
1566 struct scatterlist
*sgl_dst
,
1567 struct scatterlist
*sgl_src
,
1568 unsigned int sgl_len
,
1569 unsigned long flags
)
1572 struct d40_desc
*d40d
;
1573 struct d40_chan
*d40c
= container_of(chan
, struct d40_chan
,
1576 int lli_max
= d40c
->base
->plat_data
->llis_per_log
;
1579 spin_lock_irqsave(&d40c
->lock
, flg
);
1580 d40d
= d40_desc_get(d40c
);
1585 memset(d40d
, 0, sizeof(struct d40_desc
));
1586 d40d
->lli_len
= sgl_len
;
1588 d40d
->txd
.flags
= flags
;
1590 if (d40c
->log_num
!= D40_PHY_CHAN
) {
1593 * Check if there is space available in lcla. If not,
1594 * split list into 1-length and run only in lcpa
1597 if (d40_lcla_id_get(d40c
,
1598 &d40c
->base
->lcla_pool
) != 0)
1601 if (d40_pool_lli_alloc(d40d
, sgl_len
, true) < 0) {
1602 dev_err(&d40c
->chan
.dev
->device
,
1603 "[%s] Out of memory\n", __func__
);
1607 (void) d40_log_sg_to_lli(d40c
->lcla
.src_id
,
1611 d40c
->log_def
.lcsp1
,
1612 d40c
->dma_cfg
.src_info
.data_width
,
1613 flags
& DMA_PREP_INTERRUPT
, lli_max
,
1614 d40c
->base
->plat_data
->llis_per_log
);
1616 (void) d40_log_sg_to_lli(d40c
->lcla
.dst_id
,
1620 d40c
->log_def
.lcsp3
,
1621 d40c
->dma_cfg
.dst_info
.data_width
,
1622 flags
& DMA_PREP_INTERRUPT
, lli_max
,
1623 d40c
->base
->plat_data
->llis_per_log
);
1627 if (d40_pool_lli_alloc(d40d
, sgl_len
, false) < 0) {
1628 dev_err(&d40c
->chan
.dev
->device
,
1629 "[%s] Out of memory\n", __func__
);
1633 res
= d40_phy_sg_to_lli(sgl_src
,
1637 d40d
->lli_phy
.src_addr
,
1639 d40c
->dma_cfg
.src_info
.data_width
,
1640 d40c
->dma_cfg
.src_info
.psize
,
1646 res
= d40_phy_sg_to_lli(sgl_dst
,
1650 d40d
->lli_phy
.dst_addr
,
1652 d40c
->dma_cfg
.dst_info
.data_width
,
1653 d40c
->dma_cfg
.dst_info
.psize
,
1659 (void) dma_map_single(d40c
->base
->dev
, d40d
->lli_phy
.src
,
1660 d40d
->lli_pool
.size
, DMA_TO_DEVICE
);
1663 dma_async_tx_descriptor_init(&d40d
->txd
, chan
);
1665 d40d
->txd
.tx_submit
= d40_tx_submit
;
1667 spin_unlock_irqrestore(&d40c
->lock
, flg
);
1671 spin_unlock_irqrestore(&d40c
->lock
, flg
);
1674 EXPORT_SYMBOL(stedma40_memcpy_sg
);
1676 bool stedma40_filter(struct dma_chan
*chan
, void *data
)
1678 struct stedma40_chan_cfg
*info
= data
;
1679 struct d40_chan
*d40c
=
1680 container_of(chan
, struct d40_chan
, chan
);
1684 err
= d40_validate_conf(d40c
, info
);
1686 d40c
->dma_cfg
= *info
;
1688 err
= d40_config_memcpy(d40c
);
1692 EXPORT_SYMBOL(stedma40_filter
);
1694 /* DMA ENGINE functions */
1695 static int d40_alloc_chan_resources(struct dma_chan
*chan
)
1698 unsigned long flags
;
1699 struct d40_chan
*d40c
=
1700 container_of(chan
, struct d40_chan
, chan
);
1702 spin_lock_irqsave(&d40c
->lock
, flags
);
1704 d40c
->completed
= chan
->cookie
= 1;
1707 * If no dma configuration is set (channel_type == 0)
1708 * use default configuration
1710 if (d40c
->dma_cfg
.channel_type
== 0) {
1711 err
= d40_config_memcpy(d40c
);
1716 err
= d40_allocate_channel(d40c
);
1718 dev_err(&d40c
->chan
.dev
->device
,
1719 "[%s] Failed to allocate channel\n", __func__
);
1723 err
= d40_config_chan(d40c
, &d40c
->dma_cfg
);
1725 dev_err(&d40c
->chan
.dev
->device
,
1726 "[%s] Failed to configure channel\n",
1731 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1735 (void) d40_free_dma(d40c
);
1737 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1738 dev_err(&d40c
->chan
.dev
->device
,
1739 "[%s] Channel allocation failed\n", __func__
);
1743 static void d40_free_chan_resources(struct dma_chan
*chan
)
1745 struct d40_chan
*d40c
=
1746 container_of(chan
, struct d40_chan
, chan
);
1748 unsigned long flags
;
1750 spin_lock_irqsave(&d40c
->lock
, flags
);
1752 err
= d40_free_dma(d40c
);
1755 dev_err(&d40c
->chan
.dev
->device
,
1756 "[%s] Failed to free channel\n", __func__
);
1757 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1760 static struct dma_async_tx_descriptor
*d40_prep_memcpy(struct dma_chan
*chan
,
1764 unsigned long flags
)
1766 struct d40_desc
*d40d
;
1767 struct d40_chan
*d40c
= container_of(chan
, struct d40_chan
,
1772 spin_lock_irqsave(&d40c
->lock
, flg
);
1773 d40d
= d40_desc_get(d40c
);
1776 dev_err(&d40c
->chan
.dev
->device
,
1777 "[%s] Descriptor is NULL\n", __func__
);
1781 memset(d40d
, 0, sizeof(struct d40_desc
));
1783 d40d
->txd
.flags
= flags
;
1785 dma_async_tx_descriptor_init(&d40d
->txd
, chan
);
1787 d40d
->txd
.tx_submit
= d40_tx_submit
;
1789 if (d40c
->log_num
!= D40_PHY_CHAN
) {
1791 if (d40_pool_lli_alloc(d40d
, 1, true) < 0) {
1792 dev_err(&d40c
->chan
.dev
->device
,
1793 "[%s] Out of memory\n", __func__
);
1798 d40_log_fill_lli(d40d
->lli_log
.src
,
1802 d40c
->log_def
.lcsp1
,
1803 d40c
->dma_cfg
.src_info
.data_width
,
1806 d40_log_fill_lli(d40d
->lli_log
.dst
,
1810 d40c
->log_def
.lcsp3
,
1811 d40c
->dma_cfg
.dst_info
.data_width
,
1816 if (d40_pool_lli_alloc(d40d
, 1, false) < 0) {
1817 dev_err(&d40c
->chan
.dev
->device
,
1818 "[%s] Out of memory\n", __func__
);
1822 err
= d40_phy_fill_lli(d40d
->lli_phy
.src
,
1825 d40c
->dma_cfg
.src_info
.psize
,
1829 d40c
->dma_cfg
.src_info
.data_width
,
1834 err
= d40_phy_fill_lli(d40d
->lli_phy
.dst
,
1837 d40c
->dma_cfg
.dst_info
.psize
,
1841 d40c
->dma_cfg
.dst_info
.data_width
,
1847 (void) dma_map_single(d40c
->base
->dev
, d40d
->lli_phy
.src
,
1848 d40d
->lli_pool
.size
, DMA_TO_DEVICE
);
1851 spin_unlock_irqrestore(&d40c
->lock
, flg
);
1855 dev_err(&d40c
->chan
.dev
->device
,
1856 "[%s] Failed filling in PHY LLI\n", __func__
);
1857 d40_pool_lli_free(d40d
);
1859 spin_unlock_irqrestore(&d40c
->lock
, flg
);
1863 static int d40_prep_slave_sg_log(struct d40_desc
*d40d
,
1864 struct d40_chan
*d40c
,
1865 struct scatterlist
*sgl
,
1866 unsigned int sg_len
,
1867 enum dma_data_direction direction
,
1868 unsigned long flags
)
1870 dma_addr_t dev_addr
= 0;
1872 int lli_max
= d40c
->base
->plat_data
->llis_per_log
;
1874 if (d40_pool_lli_alloc(d40d
, sg_len
, true) < 0) {
1875 dev_err(&d40c
->chan
.dev
->device
,
1876 "[%s] Out of memory\n", __func__
);
1880 d40d
->lli_len
= sg_len
;
1881 d40d
->lli_tcount
= 0;
1885 * Check if there is space available in lcla.
1886 * If not, split list into 1-length and run only
1889 if (d40_lcla_id_get(d40c
, &d40c
->base
->lcla_pool
) != 0)
1892 if (direction
== DMA_FROM_DEVICE
) {
1893 dev_addr
= d40c
->base
->plat_data
->dev_rx
[d40c
->dma_cfg
.src_dev_type
];
1894 total_size
= d40_log_sg_to_dev(&d40c
->lcla
,
1898 d40c
->dma_cfg
.src_info
.data_width
,
1899 d40c
->dma_cfg
.dst_info
.data_width
,
1901 flags
& DMA_PREP_INTERRUPT
,
1903 d40c
->base
->plat_data
->llis_per_log
);
1904 } else if (direction
== DMA_TO_DEVICE
) {
1905 dev_addr
= d40c
->base
->plat_data
->dev_tx
[d40c
->dma_cfg
.dst_dev_type
];
1906 total_size
= d40_log_sg_to_dev(&d40c
->lcla
,
1910 d40c
->dma_cfg
.src_info
.data_width
,
1911 d40c
->dma_cfg
.dst_info
.data_width
,
1913 flags
& DMA_PREP_INTERRUPT
,
1915 d40c
->base
->plat_data
->llis_per_log
);
1924 static int d40_prep_slave_sg_phy(struct d40_desc
*d40d
,
1925 struct d40_chan
*d40c
,
1926 struct scatterlist
*sgl
,
1927 unsigned int sgl_len
,
1928 enum dma_data_direction direction
,
1929 unsigned long flags
)
1931 dma_addr_t src_dev_addr
;
1932 dma_addr_t dst_dev_addr
;
1935 if (d40_pool_lli_alloc(d40d
, sgl_len
, false) < 0) {
1936 dev_err(&d40c
->chan
.dev
->device
,
1937 "[%s] Out of memory\n", __func__
);
1941 d40d
->lli_len
= sgl_len
;
1942 d40d
->lli_tcount
= 0;
1944 if (direction
== DMA_FROM_DEVICE
) {
1946 src_dev_addr
= d40c
->base
->plat_data
->dev_rx
[d40c
->dma_cfg
.src_dev_type
];
1947 } else if (direction
== DMA_TO_DEVICE
) {
1948 dst_dev_addr
= d40c
->base
->plat_data
->dev_tx
[d40c
->dma_cfg
.dst_dev_type
];
1953 res
= d40_phy_sg_to_lli(sgl
,
1957 d40d
->lli_phy
.src_addr
,
1959 d40c
->dma_cfg
.src_info
.data_width
,
1960 d40c
->dma_cfg
.src_info
.psize
,
1965 res
= d40_phy_sg_to_lli(sgl
,
1969 d40d
->lli_phy
.dst_addr
,
1971 d40c
->dma_cfg
.dst_info
.data_width
,
1972 d40c
->dma_cfg
.dst_info
.psize
,
1977 (void) dma_map_single(d40c
->base
->dev
, d40d
->lli_phy
.src
,
1978 d40d
->lli_pool
.size
, DMA_TO_DEVICE
);
1982 static struct dma_async_tx_descriptor
*d40_prep_slave_sg(struct dma_chan
*chan
,
1983 struct scatterlist
*sgl
,
1984 unsigned int sg_len
,
1985 enum dma_data_direction direction
,
1986 unsigned long flags
)
1988 struct d40_desc
*d40d
;
1989 struct d40_chan
*d40c
= container_of(chan
, struct d40_chan
,
1994 if (d40c
->dma_cfg
.pre_transfer
)
1995 d40c
->dma_cfg
.pre_transfer(chan
,
1996 d40c
->dma_cfg
.pre_transfer_data
,
1999 spin_lock_irqsave(&d40c
->lock
, flg
);
2000 d40d
= d40_desc_get(d40c
);
2001 spin_unlock_irqrestore(&d40c
->lock
, flg
);
2006 memset(d40d
, 0, sizeof(struct d40_desc
));
2008 if (d40c
->log_num
!= D40_PHY_CHAN
)
2009 err
= d40_prep_slave_sg_log(d40d
, d40c
, sgl
, sg_len
,
2012 err
= d40_prep_slave_sg_phy(d40d
, d40c
, sgl
, sg_len
,
2015 dev_err(&d40c
->chan
.dev
->device
,
2016 "[%s] Failed to prepare %s slave sg job: %d\n",
2018 d40c
->log_num
!= D40_PHY_CHAN
? "log" : "phy", err
);
2022 d40d
->txd
.flags
= flags
;
2024 dma_async_tx_descriptor_init(&d40d
->txd
, chan
);
2026 d40d
->txd
.tx_submit
= d40_tx_submit
;
2031 static enum dma_status
d40_tx_status(struct dma_chan
*chan
,
2032 dma_cookie_t cookie
,
2033 struct dma_tx_state
*txstate
)
2035 struct d40_chan
*d40c
= container_of(chan
, struct d40_chan
, chan
);
2036 dma_cookie_t last_used
;
2037 dma_cookie_t last_complete
;
2040 last_complete
= d40c
->completed
;
2041 last_used
= chan
->cookie
;
2043 if (d40_is_paused(d40c
))
2046 ret
= dma_async_is_complete(cookie
, last_complete
, last_used
);
2048 dma_set_tx_state(txstate
, last_complete
, last_used
,
2049 stedma40_residue(chan
));
2054 static void d40_issue_pending(struct dma_chan
*chan
)
2056 struct d40_chan
*d40c
= container_of(chan
, struct d40_chan
, chan
);
2057 unsigned long flags
;
2059 spin_lock_irqsave(&d40c
->lock
, flags
);
2061 /* Busy means that pending jobs are already being processed */
2063 (void) d40_queue_start(d40c
);
2065 spin_unlock_irqrestore(&d40c
->lock
, flags
);
2068 static int d40_control(struct dma_chan
*chan
, enum dma_ctrl_cmd cmd
,
2071 unsigned long flags
;
2072 struct d40_chan
*d40c
= container_of(chan
, struct d40_chan
, chan
);
2075 case DMA_TERMINATE_ALL
:
2076 spin_lock_irqsave(&d40c
->lock
, flags
);
2078 spin_unlock_irqrestore(&d40c
->lock
, flags
);
2081 return d40_pause(chan
);
2083 return d40_resume(chan
);
2086 /* Other commands are unimplemented */
2090 /* Initialization functions */
2092 static void __init
d40_chan_init(struct d40_base
*base
, struct dma_device
*dma
,
2093 struct d40_chan
*chans
, int offset
,
2097 struct d40_chan
*d40c
;
2099 INIT_LIST_HEAD(&dma
->channels
);
2101 for (i
= offset
; i
< offset
+ num_chans
; i
++) {
2104 d40c
->chan
.device
= dma
;
2106 /* Invalidate lcla element */
2107 d40c
->lcla
.src_id
= -1;
2108 d40c
->lcla
.dst_id
= -1;
2110 spin_lock_init(&d40c
->lock
);
2112 d40c
->log_num
= D40_PHY_CHAN
;
2114 INIT_LIST_HEAD(&d40c
->free
);
2115 INIT_LIST_HEAD(&d40c
->active
);
2116 INIT_LIST_HEAD(&d40c
->queue
);
2117 INIT_LIST_HEAD(&d40c
->client
);
2121 tasklet_init(&d40c
->tasklet
, dma_tasklet
,
2122 (unsigned long) d40c
);
2124 list_add_tail(&d40c
->chan
.device_node
,
2129 static int __init
d40_dmaengine_init(struct d40_base
*base
,
2130 int num_reserved_chans
)
2134 d40_chan_init(base
, &base
->dma_slave
, base
->log_chans
,
2135 0, base
->num_log_chans
);
2137 dma_cap_zero(base
->dma_slave
.cap_mask
);
2138 dma_cap_set(DMA_SLAVE
, base
->dma_slave
.cap_mask
);
2140 base
->dma_slave
.device_alloc_chan_resources
= d40_alloc_chan_resources
;
2141 base
->dma_slave
.device_free_chan_resources
= d40_free_chan_resources
;
2142 base
->dma_slave
.device_prep_dma_memcpy
= d40_prep_memcpy
;
2143 base
->dma_slave
.device_prep_slave_sg
= d40_prep_slave_sg
;
2144 base
->dma_slave
.device_tx_status
= d40_tx_status
;
2145 base
->dma_slave
.device_issue_pending
= d40_issue_pending
;
2146 base
->dma_slave
.device_control
= d40_control
;
2147 base
->dma_slave
.dev
= base
->dev
;
2149 err
= dma_async_device_register(&base
->dma_slave
);
2153 "[%s] Failed to register slave channels\n",
2158 d40_chan_init(base
, &base
->dma_memcpy
, base
->log_chans
,
2159 base
->num_log_chans
, base
->plat_data
->memcpy_len
);
2161 dma_cap_zero(base
->dma_memcpy
.cap_mask
);
2162 dma_cap_set(DMA_MEMCPY
, base
->dma_memcpy
.cap_mask
);
2164 base
->dma_memcpy
.device_alloc_chan_resources
= d40_alloc_chan_resources
;
2165 base
->dma_memcpy
.device_free_chan_resources
= d40_free_chan_resources
;
2166 base
->dma_memcpy
.device_prep_dma_memcpy
= d40_prep_memcpy
;
2167 base
->dma_memcpy
.device_prep_slave_sg
= d40_prep_slave_sg
;
2168 base
->dma_memcpy
.device_tx_status
= d40_tx_status
;
2169 base
->dma_memcpy
.device_issue_pending
= d40_issue_pending
;
2170 base
->dma_memcpy
.device_control
= d40_control
;
2171 base
->dma_memcpy
.dev
= base
->dev
;
2173 * This controller can only access address at even
2174 * 32bit boundaries, i.e. 2^2
2176 base
->dma_memcpy
.copy_align
= 2;
2178 err
= dma_async_device_register(&base
->dma_memcpy
);
2182 "[%s] Failed to regsiter memcpy only channels\n",
2187 d40_chan_init(base
, &base
->dma_both
, base
->phy_chans
,
2188 0, num_reserved_chans
);
2190 dma_cap_zero(base
->dma_both
.cap_mask
);
2191 dma_cap_set(DMA_SLAVE
, base
->dma_both
.cap_mask
);
2192 dma_cap_set(DMA_MEMCPY
, base
->dma_both
.cap_mask
);
2194 base
->dma_both
.device_alloc_chan_resources
= d40_alloc_chan_resources
;
2195 base
->dma_both
.device_free_chan_resources
= d40_free_chan_resources
;
2196 base
->dma_both
.device_prep_dma_memcpy
= d40_prep_memcpy
;
2197 base
->dma_both
.device_prep_slave_sg
= d40_prep_slave_sg
;
2198 base
->dma_both
.device_tx_status
= d40_tx_status
;
2199 base
->dma_both
.device_issue_pending
= d40_issue_pending
;
2200 base
->dma_both
.device_control
= d40_control
;
2201 base
->dma_both
.dev
= base
->dev
;
2202 base
->dma_both
.copy_align
= 2;
2203 err
= dma_async_device_register(&base
->dma_both
);
2207 "[%s] Failed to register logical and physical capable channels\n",
2213 dma_async_device_unregister(&base
->dma_memcpy
);
2215 dma_async_device_unregister(&base
->dma_slave
);
2220 /* Initialization functions. */
2222 static int __init
d40_phy_res_init(struct d40_base
*base
)
2225 int num_phy_chans_avail
= 0;
2227 int odd_even_bit
= -2;
2229 val
[0] = readl(base
->virtbase
+ D40_DREG_PRSME
);
2230 val
[1] = readl(base
->virtbase
+ D40_DREG_PRSMO
);
2232 for (i
= 0; i
< base
->num_phy_chans
; i
++) {
2233 base
->phy_res
[i
].num
= i
;
2234 odd_even_bit
+= 2 * ((i
% 2) == 0);
2235 if (((val
[i
% 2] >> odd_even_bit
) & 3) == 1) {
2236 /* Mark security only channels as occupied */
2237 base
->phy_res
[i
].allocated_src
= D40_ALLOC_PHY
;
2238 base
->phy_res
[i
].allocated_dst
= D40_ALLOC_PHY
;
2240 base
->phy_res
[i
].allocated_src
= D40_ALLOC_FREE
;
2241 base
->phy_res
[i
].allocated_dst
= D40_ALLOC_FREE
;
2242 num_phy_chans_avail
++;
2244 spin_lock_init(&base
->phy_res
[i
].lock
);
2246 dev_info(base
->dev
, "%d of %d physical DMA channels available\n",
2247 num_phy_chans_avail
, base
->num_phy_chans
);
2249 /* Verify settings extended vs standard */
2250 val
[0] = readl(base
->virtbase
+ D40_DREG_PRTYP
);
2252 for (i
= 0; i
< base
->num_phy_chans
; i
++) {
2254 if (base
->phy_res
[i
].allocated_src
== D40_ALLOC_FREE
&&
2255 (val
[0] & 0x3) != 1)
2257 "[%s] INFO: channel %d is misconfigured (%d)\n",
2258 __func__
, i
, val
[0] & 0x3);
2260 val
[0] = val
[0] >> 2;
2263 return num_phy_chans_avail
;
2266 static struct d40_base
* __init
d40_hw_detect_init(struct platform_device
*pdev
)
2268 static const struct d40_reg_val dma_id_regs
[] = {
2270 { .reg
= D40_DREG_PERIPHID0
, .val
= 0x0040},
2271 { .reg
= D40_DREG_PERIPHID1
, .val
= 0x0000},
2273 * D40_DREG_PERIPHID2 Depends on HW revision:
2274 * MOP500/HREF ED has 0x0008,
2276 * HREF V1 has 0x0028
2278 { .reg
= D40_DREG_PERIPHID3
, .val
= 0x0000},
2281 { .reg
= D40_DREG_CELLID0
, .val
= 0x000d},
2282 { .reg
= D40_DREG_CELLID1
, .val
= 0x00f0},
2283 { .reg
= D40_DREG_CELLID2
, .val
= 0x0005},
2284 { .reg
= D40_DREG_CELLID3
, .val
= 0x00b1}
2286 struct stedma40_platform_data
*plat_data
;
2287 struct clk
*clk
= NULL
;
2288 void __iomem
*virtbase
= NULL
;
2289 struct resource
*res
= NULL
;
2290 struct d40_base
*base
= NULL
;
2291 int num_log_chans
= 0;
2295 clk
= clk_get(&pdev
->dev
, NULL
);
2298 dev_err(&pdev
->dev
, "[%s] No matching clock found\n",
2305 /* Get IO for DMAC base address */
2306 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "base");
2310 if (request_mem_region(res
->start
, resource_size(res
),
2311 D40_NAME
" I/O base") == NULL
)
2314 virtbase
= ioremap(res
->start
, resource_size(res
));
2318 /* HW version check */
2319 for (i
= 0; i
< ARRAY_SIZE(dma_id_regs
); i
++) {
2320 if (dma_id_regs
[i
].val
!=
2321 readl(virtbase
+ dma_id_regs
[i
].reg
)) {
2323 "[%s] Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n",
2327 readl(virtbase
+ dma_id_regs
[i
].reg
));
2332 i
= readl(virtbase
+ D40_DREG_PERIPHID2
);
2334 if ((i
& 0xf) != D40_PERIPHID2_DESIGNER
) {
2336 "[%s] Unknown designer! Got %x wanted %x\n",
2337 __func__
, i
& 0xf, D40_PERIPHID2_DESIGNER
);
2341 /* The number of physical channels on this HW */
2342 num_phy_chans
= 4 * (readl(virtbase
+ D40_DREG_ICFG
) & 0x7) + 4;
2344 dev_info(&pdev
->dev
, "hardware revision: %d @ 0x%x\n",
2345 (i
>> 4) & 0xf, res
->start
);
2347 plat_data
= pdev
->dev
.platform_data
;
2349 /* Count the number of logical channels in use */
2350 for (i
= 0; i
< plat_data
->dev_len
; i
++)
2351 if (plat_data
->dev_rx
[i
] != 0)
2354 for (i
= 0; i
< plat_data
->dev_len
; i
++)
2355 if (plat_data
->dev_tx
[i
] != 0)
2358 base
= kzalloc(ALIGN(sizeof(struct d40_base
), 4) +
2359 (num_phy_chans
+ num_log_chans
+ plat_data
->memcpy_len
) *
2360 sizeof(struct d40_chan
), GFP_KERNEL
);
2363 dev_err(&pdev
->dev
, "[%s] Out of memory\n", __func__
);
2368 base
->num_phy_chans
= num_phy_chans
;
2369 base
->num_log_chans
= num_log_chans
;
2370 base
->phy_start
= res
->start
;
2371 base
->phy_size
= resource_size(res
);
2372 base
->virtbase
= virtbase
;
2373 base
->plat_data
= plat_data
;
2374 base
->dev
= &pdev
->dev
;
2375 base
->phy_chans
= ((void *)base
) + ALIGN(sizeof(struct d40_base
), 4);
2376 base
->log_chans
= &base
->phy_chans
[num_phy_chans
];
2378 base
->phy_res
= kzalloc(num_phy_chans
* sizeof(struct d40_phy_res
),
2383 base
->lookup_phy_chans
= kzalloc(num_phy_chans
*
2384 sizeof(struct d40_chan
*),
2386 if (!base
->lookup_phy_chans
)
2389 if (num_log_chans
+ plat_data
->memcpy_len
) {
2391 * The max number of logical channels are event lines for all
2392 * src devices and dst devices
2394 base
->lookup_log_chans
= kzalloc(plat_data
->dev_len
* 2 *
2395 sizeof(struct d40_chan
*),
2397 if (!base
->lookup_log_chans
)
2400 base
->lcla_pool
.alloc_map
= kzalloc(num_phy_chans
* sizeof(u32
),
2402 if (!base
->lcla_pool
.alloc_map
)
2415 release_mem_region(res
->start
,
2416 resource_size(res
));
2421 kfree(base
->lcla_pool
.alloc_map
);
2422 kfree(base
->lookup_log_chans
);
2423 kfree(base
->lookup_phy_chans
);
2424 kfree(base
->phy_res
);
2431 static void __init
d40_hw_init(struct d40_base
*base
)
2434 static const struct d40_reg_val dma_init_reg
[] = {
2435 /* Clock every part of the DMA block from start */
2436 { .reg
= D40_DREG_GCC
, .val
= 0x0000ff01},
2438 /* Interrupts on all logical channels */
2439 { .reg
= D40_DREG_LCMIS0
, .val
= 0xFFFFFFFF},
2440 { .reg
= D40_DREG_LCMIS1
, .val
= 0xFFFFFFFF},
2441 { .reg
= D40_DREG_LCMIS2
, .val
= 0xFFFFFFFF},
2442 { .reg
= D40_DREG_LCMIS3
, .val
= 0xFFFFFFFF},
2443 { .reg
= D40_DREG_LCICR0
, .val
= 0xFFFFFFFF},
2444 { .reg
= D40_DREG_LCICR1
, .val
= 0xFFFFFFFF},
2445 { .reg
= D40_DREG_LCICR2
, .val
= 0xFFFFFFFF},
2446 { .reg
= D40_DREG_LCICR3
, .val
= 0xFFFFFFFF},
2447 { .reg
= D40_DREG_LCTIS0
, .val
= 0xFFFFFFFF},
2448 { .reg
= D40_DREG_LCTIS1
, .val
= 0xFFFFFFFF},
2449 { .reg
= D40_DREG_LCTIS2
, .val
= 0xFFFFFFFF},
2450 { .reg
= D40_DREG_LCTIS3
, .val
= 0xFFFFFFFF}
2453 u32 prmseo
[2] = {0, 0};
2454 u32 activeo
[2] = {0xFFFFFFFF, 0xFFFFFFFF};
2458 for (i
= 0; i
< ARRAY_SIZE(dma_init_reg
); i
++)
2459 writel(dma_init_reg
[i
].val
,
2460 base
->virtbase
+ dma_init_reg
[i
].reg
);
2462 /* Configure all our dma channels to default settings */
2463 for (i
= 0; i
< base
->num_phy_chans
; i
++) {
2465 activeo
[i
% 2] = activeo
[i
% 2] << 2;
2467 if (base
->phy_res
[base
->num_phy_chans
- i
- 1].allocated_src
2469 activeo
[i
% 2] |= 3;
2473 /* Enable interrupt # */
2474 pcmis
= (pcmis
<< 1) | 1;
2476 /* Clear interrupt # */
2477 pcicr
= (pcicr
<< 1) | 1;
2479 /* Set channel to physical mode */
2480 prmseo
[i
% 2] = prmseo
[i
% 2] << 2;
2485 writel(prmseo
[1], base
->virtbase
+ D40_DREG_PRMSE
);
2486 writel(prmseo
[0], base
->virtbase
+ D40_DREG_PRMSO
);
2487 writel(activeo
[1], base
->virtbase
+ D40_DREG_ACTIVE
);
2488 writel(activeo
[0], base
->virtbase
+ D40_DREG_ACTIVO
);
2490 /* Write which interrupt to enable */
2491 writel(pcmis
, base
->virtbase
+ D40_DREG_PCMIS
);
2493 /* Write which interrupt to clear */
2494 writel(pcicr
, base
->virtbase
+ D40_DREG_PCICR
);
2498 static int __init
d40_probe(struct platform_device
*pdev
)
2502 struct d40_base
*base
;
2503 struct resource
*res
= NULL
;
2504 int num_reserved_chans
;
2507 base
= d40_hw_detect_init(pdev
);
2512 num_reserved_chans
= d40_phy_res_init(base
);
2514 platform_set_drvdata(pdev
, base
);
2516 spin_lock_init(&base
->interrupt_lock
);
2517 spin_lock_init(&base
->execmd_lock
);
2519 /* Get IO for logical channel parameter address */
2520 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "lcpa");
2524 "[%s] No \"lcpa\" memory resource\n",
2528 base
->lcpa_size
= resource_size(res
);
2529 base
->phy_lcpa
= res
->start
;
2531 if (request_mem_region(res
->start
, resource_size(res
),
2532 D40_NAME
" I/O lcpa") == NULL
) {
2535 "[%s] Failed to request LCPA region 0x%x-0x%x\n",
2536 __func__
, res
->start
, res
->end
);
2540 /* We make use of ESRAM memory for this. */
2541 val
= readl(base
->virtbase
+ D40_DREG_LCPA
);
2542 if (res
->start
!= val
&& val
!= 0) {
2543 dev_warn(&pdev
->dev
,
2544 "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n",
2545 __func__
, val
, res
->start
);
2547 writel(res
->start
, base
->virtbase
+ D40_DREG_LCPA
);
2549 base
->lcpa_base
= ioremap(res
->start
, resource_size(res
));
2550 if (!base
->lcpa_base
) {
2553 "[%s] Failed to ioremap LCPA region\n",
2557 /* Get IO for logical channel link address */
2558 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "lcla");
2562 "[%s] No \"lcla\" resource defined\n",
2567 base
->lcla_pool
.base_size
= resource_size(res
);
2568 base
->lcla_pool
.phy
= res
->start
;
2570 if (request_mem_region(res
->start
, resource_size(res
),
2571 D40_NAME
" I/O lcla") == NULL
) {
2574 "[%s] Failed to request LCLA region 0x%x-0x%x\n",
2575 __func__
, res
->start
, res
->end
);
2578 val
= readl(base
->virtbase
+ D40_DREG_LCLA
);
2579 if (res
->start
!= val
&& val
!= 0) {
2580 dev_warn(&pdev
->dev
,
2581 "[%s] Mismatch LCLA dma 0x%x, def 0x%x\n",
2582 __func__
, val
, res
->start
);
2584 writel(res
->start
, base
->virtbase
+ D40_DREG_LCLA
);
2586 base
->lcla_pool
.base
= ioremap(res
->start
, resource_size(res
));
2587 if (!base
->lcla_pool
.base
) {
2590 "[%s] Failed to ioremap LCLA 0x%x-0x%x\n",
2591 __func__
, res
->start
, res
->end
);
2595 spin_lock_init(&base
->lcla_pool
.lock
);
2597 base
->lcla_pool
.num_blocks
= base
->num_phy_chans
;
2599 base
->irq
= platform_get_irq(pdev
, 0);
2601 ret
= request_irq(base
->irq
, d40_handle_interrupt
, 0, D40_NAME
, base
);
2604 dev_err(&pdev
->dev
, "[%s] No IRQ defined\n", __func__
);
2608 err
= d40_dmaengine_init(base
, num_reserved_chans
);
2614 dev_info(base
->dev
, "initialized\n");
2620 iounmap(base
->virtbase
);
2621 if (base
->lcla_pool
.phy
)
2622 release_mem_region(base
->lcla_pool
.phy
,
2623 base
->lcla_pool
.base_size
);
2625 release_mem_region(base
->phy_lcpa
,
2627 if (base
->phy_start
)
2628 release_mem_region(base
->phy_start
,
2631 clk_disable(base
->clk
);
2635 kfree(base
->lcla_pool
.alloc_map
);
2636 kfree(base
->lookup_log_chans
);
2637 kfree(base
->lookup_phy_chans
);
2638 kfree(base
->phy_res
);
2642 dev_err(&pdev
->dev
, "[%s] probe failed\n", __func__
);
2646 static struct platform_driver d40_driver
= {
2648 .owner
= THIS_MODULE
,
2653 int __init
stedma40_init(void)
2655 return platform_driver_probe(&d40_driver
, d40_probe
);
2657 arch_initcall(stedma40_init
);