2 * driver/dma/ste_dma40.c
4 * Copyright (C) ST-Ericsson 2007-2010
5 * License terms: GNU General Public License (GPL) version 2
6 * Author: Per Friden <per.friden@stericsson.com>
7 * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
11 #include <linux/kernel.h>
12 #include <linux/slab.h>
13 #include <linux/dmaengine.h>
14 #include <linux/platform_device.h>
15 #include <linux/clk.h>
16 #include <linux/delay.h>
18 #include <plat/ste_dma40.h>
20 #include "ste_dma40_ll.h"
22 #define D40_NAME "dma40"
24 #define D40_PHY_CHAN -1
26 /* For masking out/in 2 bit channel positions */
27 #define D40_CHAN_POS(chan) (2 * (chan / 2))
28 #define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
30 /* Maximum iterations taken before giving up suspending a channel */
31 #define D40_SUSPEND_MAX_IT 500
33 /* Hardware requirement on LCLA alignment */
34 #define LCLA_ALIGNMENT 0x40000
35 /* Attempts before giving up to trying to get pages that are aligned */
36 #define MAX_LCLA_ALLOC_ATTEMPTS 256
38 /* Bit markings for allocation map */
39 #define D40_ALLOC_FREE (1 << 31)
40 #define D40_ALLOC_PHY (1 << 30)
41 #define D40_ALLOC_LOG_FREE 0
43 /* Hardware designer of the block */
44 #define D40_PERIPHID2_DESIGNER 0x8
47 * enum 40_command - The different commands and/or statuses.
49 * @D40_DMA_STOP: DMA channel command STOP or status STOPPED,
50 * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN.
51 * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible.
52 * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED.
57 D40_DMA_SUSPEND_REQ
= 2,
62 * struct d40_lli_pool - Structure for keeping LLIs in memory
64 * @base: Pointer to memory area when the pre_alloc_lli's are not large
65 * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
66 * pre_alloc_lli is used.
67 * @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
68 * @pre_alloc_lli: Pre allocated area for the most common case of transfers,
69 * one buffer to one buffer.
74 /* Space for dst and src, plus an extra for padding */
75 u8 pre_alloc_lli
[3 * sizeof(struct d40_phy_lli
)];
79 * struct d40_desc - A descriptor is one DMA job.
81 * @lli_phy: LLI settings for physical channel. Both src and dst=
82 * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if
84 * @lli_log: Same as above but for logical channels.
85 * @lli_pool: The pool with two entries pre-allocated.
86 * @lli_len: Number of llis of current descriptor.
87 * @lli_count: Number of transfered llis.
88 * @lli_tx_len: Max number of LLIs per transfer, there can be
89 * many transfer for one descriptor.
90 * @txd: DMA engine struct. Used for among other things for communication
93 * @dir: The transfer direction of this job.
94 * @is_in_client_list: true if the client owns this descriptor.
96 * This descriptor is used for both logical and physical transfers.
101 struct d40_phy_lli_bidir lli_phy
;
103 struct d40_log_lli_bidir lli_log
;
105 struct d40_lli_pool lli_pool
;
110 struct dma_async_tx_descriptor txd
;
111 struct list_head node
;
113 enum dma_data_direction dir
;
114 bool is_in_client_list
;
118 * struct d40_lcla_pool - LCLA pool settings and data.
120 * @base: The virtual address of LCLA. 18 bit aligned.
121 * @base_unaligned: The orignal kmalloc pointer, if kmalloc is used.
122 * This pointer is only there for clean-up on error.
123 * @pages: The number of pages needed for all physical channels.
124 * Only used later for clean-up on error
125 * @lock: Lock to protect the content in this struct.
126 * @alloc_map: Bitmap mapping between physical channel and LCLA entries.
127 * @num_blocks: The number of entries of alloc_map. Equals to the
128 * number of physical channels.
130 struct d40_lcla_pool
{
132 void *base_unaligned
;
140 * struct d40_phy_res - struct for handling eventlines mapped to physical
143 * @lock: A lock protection this entity.
144 * @num: The physical channel number of this entity.
145 * @allocated_src: Bit mapped to show which src event line's are mapped to
146 * this physical channel. Can also be free or physically allocated.
147 * @allocated_dst: Same as for src but is dst.
148 * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
149 * event line number. Both allocated_src and allocated_dst can not be
150 * allocated to a physical channel, since the interrupt handler has then
151 * no way of figure out which one the interrupt belongs to.
163 * struct d40_chan - Struct that describes a channel.
165 * @lock: A spinlock to protect this struct.
166 * @log_num: The logical number, if any of this channel.
167 * @completed: Starts with 1, after first interrupt it is set to dma engine's
169 * @pending_tx: The number of pending transfers. Used between interrupt handler
171 * @busy: Set to true when transfer is ongoing on this channel.
172 * @phy_chan: Pointer to physical channel which this instance runs on. If this
173 * point is NULL, then the channel is not allocated.
174 * @chan: DMA engine handle.
175 * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
176 * transfer and call client callback.
177 * @client: Cliented owned descriptor list.
178 * @active: Active descriptor.
179 * @queue: Queued jobs.
180 * @dma_cfg: The client configuration of this dma channel.
181 * @base: Pointer to the device instance struct.
182 * @src_def_cfg: Default cfg register setting for src.
183 * @dst_def_cfg: Default cfg register setting for dst.
184 * @log_def: Default logical channel settings.
185 * @lcla: Space for one dst src pair for logical channel transfers.
186 * @lcpa: Pointer to dst and src lcpa settings.
188 * This struct can either "be" a logical or a physical channel.
193 /* ID of the most recent completed transfer */
197 struct d40_phy_res
*phy_chan
;
198 struct dma_chan chan
;
199 struct tasklet_struct tasklet
;
200 struct list_head client
;
201 struct list_head active
;
202 struct list_head queue
;
203 struct stedma40_chan_cfg dma_cfg
;
204 struct d40_base
*base
;
205 /* Default register configurations */
208 struct d40_def_lcsp log_def
;
209 struct d40_lcla_elem lcla
;
210 struct d40_log_lli_full
*lcpa
;
211 /* Runtime reconfiguration */
212 dma_addr_t runtime_addr
;
213 enum dma_data_direction runtime_direction
;
217 * struct d40_base - The big global struct, one for each probe'd instance.
219 * @interrupt_lock: Lock used to make sure one interrupt is handle a time.
220 * @execmd_lock: Lock for execute command usage since several channels share
221 * the same physical register.
222 * @dev: The device structure.
223 * @virtbase: The virtual base address of the DMA's register.
224 * @rev: silicon revision detected.
225 * @clk: Pointer to the DMA clock structure.
226 * @phy_start: Physical memory start of the DMA registers.
227 * @phy_size: Size of the DMA register map.
228 * @irq: The IRQ number.
229 * @num_phy_chans: The number of physical channels. Read from HW. This
230 * is the number of available channels for this driver, not counting "Secure
231 * mode" allocated physical channels.
232 * @num_log_chans: The number of logical channels. Calculated from
234 * @dma_both: dma_device channels that can do both memcpy and slave transfers.
235 * @dma_slave: dma_device channels that can do only do slave transfers.
236 * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
237 * @phy_chans: Room for all possible physical channels in system.
238 * @log_chans: Room for all possible logical channels in system.
239 * @lookup_log_chans: Used to map interrupt number to logical channel. Points
240 * to log_chans entries.
241 * @lookup_phy_chans: Used to map interrupt number to physical channel. Points
242 * to phy_chans entries.
243 * @plat_data: Pointer to provided platform_data which is the driver
245 * @phy_res: Vector containing all physical channels.
246 * @lcla_pool: lcla pool settings and data.
247 * @lcpa_base: The virtual mapped address of LCPA.
248 * @phy_lcpa: The physical address of the LCPA.
249 * @lcpa_size: The size of the LCPA area.
250 * @desc_slab: cache for descriptors.
253 spinlock_t interrupt_lock
;
254 spinlock_t execmd_lock
;
256 void __iomem
*virtbase
;
259 phys_addr_t phy_start
;
260 resource_size_t phy_size
;
264 struct dma_device dma_both
;
265 struct dma_device dma_slave
;
266 struct dma_device dma_memcpy
;
267 struct d40_chan
*phy_chans
;
268 struct d40_chan
*log_chans
;
269 struct d40_chan
**lookup_log_chans
;
270 struct d40_chan
**lookup_phy_chans
;
271 struct stedma40_platform_data
*plat_data
;
272 /* Physical half channels */
273 struct d40_phy_res
*phy_res
;
274 struct d40_lcla_pool lcla_pool
;
277 resource_size_t lcpa_size
;
278 struct kmem_cache
*desc_slab
;
282 * struct d40_interrupt_lookup - lookup table for interrupt handler
284 * @src: Interrupt mask register.
285 * @clr: Interrupt clear register.
286 * @is_error: true if this is an error interrupt.
287 * @offset: start delta in the lookup_log_chans in d40_base. If equals to
288 * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
290 struct d40_interrupt_lookup
{
298 * struct d40_reg_val - simple lookup struct
300 * @reg: The register.
301 * @val: The value that belongs to the register in reg.
308 static int d40_pool_lli_alloc(struct d40_desc
*d40d
,
309 int lli_len
, bool is_log
)
315 align
= sizeof(struct d40_log_lli
);
317 align
= sizeof(struct d40_phy_lli
);
320 base
= d40d
->lli_pool
.pre_alloc_lli
;
321 d40d
->lli_pool
.size
= sizeof(d40d
->lli_pool
.pre_alloc_lli
);
322 d40d
->lli_pool
.base
= NULL
;
324 d40d
->lli_pool
.size
= ALIGN(lli_len
* 2 * align
, align
);
326 base
= kmalloc(d40d
->lli_pool
.size
+ align
, GFP_NOWAIT
);
327 d40d
->lli_pool
.base
= base
;
329 if (d40d
->lli_pool
.base
== NULL
)
334 d40d
->lli_log
.src
= PTR_ALIGN((struct d40_log_lli
*) base
,
336 d40d
->lli_log
.dst
= PTR_ALIGN(d40d
->lli_log
.src
+ lli_len
,
339 d40d
->lli_phy
.src
= PTR_ALIGN((struct d40_phy_lli
*)base
,
341 d40d
->lli_phy
.dst
= PTR_ALIGN(d40d
->lli_phy
.src
+ lli_len
,
344 d40d
->lli_phy
.src_addr
= virt_to_phys(d40d
->lli_phy
.src
);
345 d40d
->lli_phy
.dst_addr
= virt_to_phys(d40d
->lli_phy
.dst
);
351 static void d40_pool_lli_free(struct d40_desc
*d40d
)
353 kfree(d40d
->lli_pool
.base
);
354 d40d
->lli_pool
.base
= NULL
;
355 d40d
->lli_pool
.size
= 0;
356 d40d
->lli_log
.src
= NULL
;
357 d40d
->lli_log
.dst
= NULL
;
358 d40d
->lli_phy
.src
= NULL
;
359 d40d
->lli_phy
.dst
= NULL
;
360 d40d
->lli_phy
.src_addr
= 0;
361 d40d
->lli_phy
.dst_addr
= 0;
364 static dma_cookie_t
d40_assign_cookie(struct d40_chan
*d40c
,
365 struct d40_desc
*desc
)
367 dma_cookie_t cookie
= d40c
->chan
.cookie
;
372 d40c
->chan
.cookie
= cookie
;
373 desc
->txd
.cookie
= cookie
;
378 static void d40_desc_remove(struct d40_desc
*d40d
)
380 list_del(&d40d
->node
);
383 static struct d40_desc
*d40_desc_get(struct d40_chan
*d40c
)
388 if (!list_empty(&d40c
->client
)) {
389 list_for_each_entry_safe(d
, _d
, &d40c
->client
, node
)
390 if (async_tx_test_ack(&d
->txd
)) {
391 d40_pool_lli_free(d
);
396 d
= kmem_cache_alloc(d40c
->base
->desc_slab
, GFP_NOWAIT
);
398 memset(d
, 0, sizeof(struct d40_desc
));
399 INIT_LIST_HEAD(&d
->node
);
405 static void d40_desc_free(struct d40_chan
*d40c
, struct d40_desc
*d40d
)
407 kmem_cache_free(d40c
->base
->desc_slab
, d40d
);
410 static void d40_desc_submit(struct d40_chan
*d40c
, struct d40_desc
*desc
)
412 list_add_tail(&desc
->node
, &d40c
->active
);
415 static struct d40_desc
*d40_first_active_get(struct d40_chan
*d40c
)
419 if (list_empty(&d40c
->active
))
422 d
= list_first_entry(&d40c
->active
,
428 static void d40_desc_queue(struct d40_chan
*d40c
, struct d40_desc
*desc
)
430 list_add_tail(&desc
->node
, &d40c
->queue
);
433 static struct d40_desc
*d40_first_queued(struct d40_chan
*d40c
)
437 if (list_empty(&d40c
->queue
))
440 d
= list_first_entry(&d40c
->queue
,
446 /* Support functions for logical channels */
448 static int d40_lcla_id_get(struct d40_chan
*d40c
)
452 struct d40_log_lli
*lcla_lidx_base
=
453 d40c
->base
->lcla_pool
.base
+ d40c
->phy_chan
->num
* 1024;
455 int lli_per_log
= d40c
->base
->plat_data
->llis_per_log
;
458 if (d40c
->lcla
.src_id
>= 0 && d40c
->lcla
.dst_id
>= 0)
461 if (d40c
->base
->lcla_pool
.num_blocks
> 32)
464 spin_lock_irqsave(&d40c
->base
->lcla_pool
.lock
, flags
);
466 for (i
= 0; i
< d40c
->base
->lcla_pool
.num_blocks
; i
++) {
467 if (!(d40c
->base
->lcla_pool
.alloc_map
[d40c
->phy_chan
->num
] &
469 d40c
->base
->lcla_pool
.alloc_map
[d40c
->phy_chan
->num
] |=
475 if (src_id
>= d40c
->base
->lcla_pool
.num_blocks
)
478 for (; i
< d40c
->base
->lcla_pool
.num_blocks
; i
++) {
479 if (!(d40c
->base
->lcla_pool
.alloc_map
[d40c
->phy_chan
->num
] &
481 d40c
->base
->lcla_pool
.alloc_map
[d40c
->phy_chan
->num
] |=
488 if (dst_id
== src_id
)
491 d40c
->lcla
.src_id
= src_id
;
492 d40c
->lcla
.dst_id
= dst_id
;
493 d40c
->lcla
.dst
= lcla_lidx_base
+ dst_id
* lli_per_log
+ 1;
494 d40c
->lcla
.src
= lcla_lidx_base
+ src_id
* lli_per_log
+ 1;
496 spin_unlock_irqrestore(&d40c
->base
->lcla_pool
.lock
, flags
);
499 spin_unlock_irqrestore(&d40c
->base
->lcla_pool
.lock
, flags
);
504 static int d40_channel_execute_command(struct d40_chan
*d40c
,
505 enum d40_command command
)
508 void __iomem
*active_reg
;
513 spin_lock_irqsave(&d40c
->base
->execmd_lock
, flags
);
515 if (d40c
->phy_chan
->num
% 2 == 0)
516 active_reg
= d40c
->base
->virtbase
+ D40_DREG_ACTIVE
;
518 active_reg
= d40c
->base
->virtbase
+ D40_DREG_ACTIVO
;
520 if (command
== D40_DMA_SUSPEND_REQ
) {
521 status
= (readl(active_reg
) &
522 D40_CHAN_POS_MASK(d40c
->phy_chan
->num
)) >>
523 D40_CHAN_POS(d40c
->phy_chan
->num
);
525 if (status
== D40_DMA_SUSPENDED
|| status
== D40_DMA_STOP
)
529 wmask
= 0xffffffff & ~(D40_CHAN_POS_MASK(d40c
->phy_chan
->num
));
530 writel(wmask
| (command
<< D40_CHAN_POS(d40c
->phy_chan
->num
)),
533 if (command
== D40_DMA_SUSPEND_REQ
) {
535 for (i
= 0 ; i
< D40_SUSPEND_MAX_IT
; i
++) {
536 status
= (readl(active_reg
) &
537 D40_CHAN_POS_MASK(d40c
->phy_chan
->num
)) >>
538 D40_CHAN_POS(d40c
->phy_chan
->num
);
542 * Reduce the number of bus accesses while
543 * waiting for the DMA to suspend.
547 if (status
== D40_DMA_STOP
||
548 status
== D40_DMA_SUSPENDED
)
552 if (i
== D40_SUSPEND_MAX_IT
) {
553 dev_err(&d40c
->chan
.dev
->device
,
554 "[%s]: unable to suspend the chl %d (log: %d) status %x\n",
555 __func__
, d40c
->phy_chan
->num
, d40c
->log_num
,
563 spin_unlock_irqrestore(&d40c
->base
->execmd_lock
, flags
);
567 static void d40_term_all(struct d40_chan
*d40c
)
569 struct d40_desc
*d40d
;
572 /* Release active descriptors */
573 while ((d40d
= d40_first_active_get(d40c
))) {
574 d40_desc_remove(d40d
);
576 /* Return desc to free-list */
577 d40_desc_free(d40c
, d40d
);
580 /* Release queued descriptors waiting for transfer */
581 while ((d40d
= d40_first_queued(d40c
))) {
582 d40_desc_remove(d40d
);
584 /* Return desc to free-list */
585 d40_desc_free(d40c
, d40d
);
588 spin_lock_irqsave(&d40c
->base
->lcla_pool
.lock
, flags
);
590 d40c
->base
->lcla_pool
.alloc_map
[d40c
->phy_chan
->num
] &=
591 (~(0x1 << d40c
->lcla
.dst_id
));
592 d40c
->base
->lcla_pool
.alloc_map
[d40c
->phy_chan
->num
] &=
593 (~(0x1 << d40c
->lcla
.src_id
));
595 d40c
->lcla
.src_id
= -1;
596 d40c
->lcla
.dst_id
= -1;
598 spin_unlock_irqrestore(&d40c
->base
->lcla_pool
.lock
, flags
);
600 d40c
->pending_tx
= 0;
604 static void d40_config_set_event(struct d40_chan
*d40c
, bool do_enable
)
609 /* Notice, that disable requires the physical channel to be stopped */
611 val
= D40_ACTIVATE_EVENTLINE
;
613 val
= D40_DEACTIVATE_EVENTLINE
;
615 spin_lock_irqsave(&d40c
->phy_chan
->lock
, flags
);
617 /* Enable event line connected to device (or memcpy) */
618 if ((d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_MEM
) ||
619 (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_PERIPH
)) {
620 u32 event
= D40_TYPE_TO_EVENT(d40c
->dma_cfg
.src_dev_type
);
622 writel((val
<< D40_EVENTLINE_POS(event
)) |
623 ~D40_EVENTLINE_MASK(event
),
624 d40c
->base
->virtbase
+ D40_DREG_PCBASE
+
625 d40c
->phy_chan
->num
* D40_DREG_PCDELTA
+
628 if (d40c
->dma_cfg
.dir
!= STEDMA40_PERIPH_TO_MEM
) {
629 u32 event
= D40_TYPE_TO_EVENT(d40c
->dma_cfg
.dst_dev_type
);
631 writel((val
<< D40_EVENTLINE_POS(event
)) |
632 ~D40_EVENTLINE_MASK(event
),
633 d40c
->base
->virtbase
+ D40_DREG_PCBASE
+
634 d40c
->phy_chan
->num
* D40_DREG_PCDELTA
+
638 spin_unlock_irqrestore(&d40c
->phy_chan
->lock
, flags
);
641 static u32
d40_chan_has_events(struct d40_chan
*d40c
)
645 /* If SSLNK or SDLNK is zero all events are disabled */
646 if ((d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_MEM
) ||
647 (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_PERIPH
))
648 val
= readl(d40c
->base
->virtbase
+ D40_DREG_PCBASE
+
649 d40c
->phy_chan
->num
* D40_DREG_PCDELTA
+
652 if (d40c
->dma_cfg
.dir
!= STEDMA40_PERIPH_TO_MEM
)
653 val
= readl(d40c
->base
->virtbase
+ D40_DREG_PCBASE
+
654 d40c
->phy_chan
->num
* D40_DREG_PCDELTA
+
659 static void d40_config_enable_lidx(struct d40_chan
*d40c
)
661 /* Set LIDX for lcla */
662 writel((d40c
->phy_chan
->num
<< D40_SREG_ELEM_LOG_LIDX_POS
) &
663 D40_SREG_ELEM_LOG_LIDX_MASK
,
664 d40c
->base
->virtbase
+ D40_DREG_PCBASE
+
665 d40c
->phy_chan
->num
* D40_DREG_PCDELTA
+ D40_CHAN_REG_SDELT
);
667 writel((d40c
->phy_chan
->num
<< D40_SREG_ELEM_LOG_LIDX_POS
) &
668 D40_SREG_ELEM_LOG_LIDX_MASK
,
669 d40c
->base
->virtbase
+ D40_DREG_PCBASE
+
670 d40c
->phy_chan
->num
* D40_DREG_PCDELTA
+ D40_CHAN_REG_SSELT
);
673 static int d40_config_write(struct d40_chan
*d40c
)
679 res
= d40_channel_execute_command(d40c
, D40_DMA_SUSPEND_REQ
);
683 /* Odd addresses are even addresses + 4 */
684 addr_base
= (d40c
->phy_chan
->num
% 2) * 4;
685 /* Setup channel mode to logical or physical */
686 var
= ((u32
)(d40c
->log_num
!= D40_PHY_CHAN
) + 1) <<
687 D40_CHAN_POS(d40c
->phy_chan
->num
);
688 writel(var
, d40c
->base
->virtbase
+ D40_DREG_PRMSE
+ addr_base
);
690 /* Setup operational mode option register */
691 var
= ((d40c
->dma_cfg
.channel_type
>> STEDMA40_INFO_CH_MODE_OPT_POS
) &
692 0x3) << D40_CHAN_POS(d40c
->phy_chan
->num
);
694 writel(var
, d40c
->base
->virtbase
+ D40_DREG_PRMOE
+ addr_base
);
696 if (d40c
->log_num
!= D40_PHY_CHAN
) {
697 /* Set default config for CFG reg */
698 writel(d40c
->src_def_cfg
,
699 d40c
->base
->virtbase
+ D40_DREG_PCBASE
+
700 d40c
->phy_chan
->num
* D40_DREG_PCDELTA
+
702 writel(d40c
->dst_def_cfg
,
703 d40c
->base
->virtbase
+ D40_DREG_PCBASE
+
704 d40c
->phy_chan
->num
* D40_DREG_PCDELTA
+
707 d40_config_enable_lidx(d40c
);
712 static void d40_desc_load(struct d40_chan
*d40c
, struct d40_desc
*d40d
)
714 if (d40d
->lli_phy
.dst
&& d40d
->lli_phy
.src
) {
715 d40_phy_lli_write(d40c
->base
->virtbase
,
719 } else if (d40d
->lli_log
.dst
&& d40d
->lli_log
.src
) {
720 struct d40_log_lli
*src
= d40d
->lli_log
.src
;
721 struct d40_log_lli
*dst
= d40d
->lli_log
.dst
;
724 src
+= d40d
->lli_count
;
725 dst
+= d40d
->lli_count
;
726 s
= d40_log_lli_write(d40c
->lcpa
,
727 d40c
->lcla
.src
, d40c
->lcla
.dst
,
729 d40c
->base
->plat_data
->llis_per_log
);
731 /* If s equals to zero, the job is not linked */
733 (void) dma_map_single(d40c
->base
->dev
, d40c
->lcla
.src
,
734 s
* sizeof(struct d40_log_lli
),
736 (void) dma_map_single(d40c
->base
->dev
, d40c
->lcla
.dst
,
737 s
* sizeof(struct d40_log_lli
),
741 d40d
->lli_count
+= d40d
->lli_tx_len
;
744 static dma_cookie_t
d40_tx_submit(struct dma_async_tx_descriptor
*tx
)
746 struct d40_chan
*d40c
= container_of(tx
->chan
,
749 struct d40_desc
*d40d
= container_of(tx
, struct d40_desc
, txd
);
752 spin_lock_irqsave(&d40c
->lock
, flags
);
754 tx
->cookie
= d40_assign_cookie(d40c
, d40d
);
756 d40_desc_queue(d40c
, d40d
);
758 spin_unlock_irqrestore(&d40c
->lock
, flags
);
763 static int d40_start(struct d40_chan
*d40c
)
765 if (d40c
->base
->rev
== 0) {
768 if (d40c
->log_num
!= D40_PHY_CHAN
) {
769 err
= d40_channel_execute_command(d40c
,
770 D40_DMA_SUSPEND_REQ
);
776 if (d40c
->log_num
!= D40_PHY_CHAN
)
777 d40_config_set_event(d40c
, true);
779 return d40_channel_execute_command(d40c
, D40_DMA_RUN
);
782 static struct d40_desc
*d40_queue_start(struct d40_chan
*d40c
)
784 struct d40_desc
*d40d
;
787 /* Start queued jobs, if any */
788 d40d
= d40_first_queued(d40c
);
793 /* Remove from queue */
794 d40_desc_remove(d40d
);
796 /* Add to active queue */
797 d40_desc_submit(d40c
, d40d
);
799 /* Initiate DMA job */
800 d40_desc_load(d40c
, d40d
);
803 err
= d40_start(d40c
);
812 /* called from interrupt context */
813 static void dma_tc_handle(struct d40_chan
*d40c
)
815 struct d40_desc
*d40d
;
820 /* Get first active entry from list */
821 d40d
= d40_first_active_get(d40c
);
826 if (d40d
->lli_count
< d40d
->lli_len
) {
828 d40_desc_load(d40c
, d40d
);
830 (void) d40_start(d40c
);
834 if (d40_queue_start(d40c
) == NULL
)
838 tasklet_schedule(&d40c
->tasklet
);
842 static void dma_tasklet(unsigned long data
)
844 struct d40_chan
*d40c
= (struct d40_chan
*) data
;
845 struct d40_desc
*d40d_fin
;
847 dma_async_tx_callback callback
;
848 void *callback_param
;
850 spin_lock_irqsave(&d40c
->lock
, flags
);
852 /* Get first active entry from list */
853 d40d_fin
= d40_first_active_get(d40c
);
855 if (d40d_fin
== NULL
)
858 d40c
->completed
= d40d_fin
->txd
.cookie
;
861 * If terminating a channel pending_tx is set to zero.
862 * This prevents any finished active jobs to return to the client.
864 if (d40c
->pending_tx
== 0) {
865 spin_unlock_irqrestore(&d40c
->lock
, flags
);
869 /* Callback to client */
870 callback
= d40d_fin
->txd
.callback
;
871 callback_param
= d40d_fin
->txd
.callback_param
;
873 if (async_tx_test_ack(&d40d_fin
->txd
)) {
874 d40_pool_lli_free(d40d_fin
);
875 d40_desc_remove(d40d_fin
);
876 /* Return desc to free-list */
877 d40_desc_free(d40c
, d40d_fin
);
879 if (!d40d_fin
->is_in_client_list
) {
880 d40_desc_remove(d40d_fin
);
881 list_add_tail(&d40d_fin
->node
, &d40c
->client
);
882 d40d_fin
->is_in_client_list
= true;
888 if (d40c
->pending_tx
)
889 tasklet_schedule(&d40c
->tasklet
);
891 spin_unlock_irqrestore(&d40c
->lock
, flags
);
894 callback(callback_param
);
899 /* Rescue manouver if receiving double interrupts */
900 if (d40c
->pending_tx
> 0)
902 spin_unlock_irqrestore(&d40c
->lock
, flags
);
905 static irqreturn_t
d40_handle_interrupt(int irq
, void *data
)
907 static const struct d40_interrupt_lookup il
[] = {
908 {D40_DREG_LCTIS0
, D40_DREG_LCICR0
, false, 0},
909 {D40_DREG_LCTIS1
, D40_DREG_LCICR1
, false, 32},
910 {D40_DREG_LCTIS2
, D40_DREG_LCICR2
, false, 64},
911 {D40_DREG_LCTIS3
, D40_DREG_LCICR3
, false, 96},
912 {D40_DREG_LCEIS0
, D40_DREG_LCICR0
, true, 0},
913 {D40_DREG_LCEIS1
, D40_DREG_LCICR1
, true, 32},
914 {D40_DREG_LCEIS2
, D40_DREG_LCICR2
, true, 64},
915 {D40_DREG_LCEIS3
, D40_DREG_LCICR3
, true, 96},
916 {D40_DREG_PCTIS
, D40_DREG_PCICR
, false, D40_PHY_CHAN
},
917 {D40_DREG_PCEIS
, D40_DREG_PCICR
, true, D40_PHY_CHAN
},
921 u32 regs
[ARRAY_SIZE(il
)];
926 struct d40_chan
*d40c
;
928 struct d40_base
*base
= data
;
930 spin_lock_irqsave(&base
->interrupt_lock
, flags
);
932 /* Read interrupt status of both logical and physical channels */
933 for (i
= 0; i
< ARRAY_SIZE(il
); i
++)
934 regs
[i
] = readl(base
->virtbase
+ il
[i
].src
);
938 chan
= find_next_bit((unsigned long *)regs
,
939 BITS_PER_LONG
* ARRAY_SIZE(il
), chan
+ 1);
941 /* No more set bits found? */
942 if (chan
== BITS_PER_LONG
* ARRAY_SIZE(il
))
945 row
= chan
/ BITS_PER_LONG
;
946 idx
= chan
& (BITS_PER_LONG
- 1);
949 tmp
= readl(base
->virtbase
+ il
[row
].clr
);
951 writel(tmp
, base
->virtbase
+ il
[row
].clr
);
953 if (il
[row
].offset
== D40_PHY_CHAN
)
954 d40c
= base
->lookup_phy_chans
[idx
];
956 d40c
= base
->lookup_log_chans
[il
[row
].offset
+ idx
];
957 spin_lock(&d40c
->lock
);
959 if (!il
[row
].is_error
)
963 "[%s] IRQ chan: %ld offset %d idx %d\n",
964 __func__
, chan
, il
[row
].offset
, idx
);
966 spin_unlock(&d40c
->lock
);
969 spin_unlock_irqrestore(&base
->interrupt_lock
, flags
);
975 static int d40_validate_conf(struct d40_chan
*d40c
,
976 struct stedma40_chan_cfg
*conf
)
979 u32 dst_event_group
= D40_TYPE_TO_GROUP(conf
->dst_dev_type
);
980 u32 src_event_group
= D40_TYPE_TO_GROUP(conf
->src_dev_type
);
981 bool is_log
= (conf
->channel_type
& STEDMA40_CHANNEL_IN_OPER_MODE
)
982 == STEDMA40_CHANNEL_IN_LOG_MODE
;
984 if (d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_PERIPH
&&
985 dst_event_group
== STEDMA40_DEV_DST_MEMORY
) {
986 dev_err(&d40c
->chan
.dev
->device
, "[%s] Invalid dst\n",
991 if (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_MEM
&&
992 src_event_group
== STEDMA40_DEV_SRC_MEMORY
) {
993 dev_err(&d40c
->chan
.dev
->device
, "[%s] Invalid src\n",
998 if (src_event_group
== STEDMA40_DEV_SRC_MEMORY
&&
999 dst_event_group
== STEDMA40_DEV_DST_MEMORY
&& is_log
) {
1000 dev_err(&d40c
->chan
.dev
->device
,
1001 "[%s] No event line\n", __func__
);
1005 if (conf
->dir
== STEDMA40_PERIPH_TO_PERIPH
&&
1006 (src_event_group
!= dst_event_group
)) {
1007 dev_err(&d40c
->chan
.dev
->device
,
1008 "[%s] Invalid event group\n", __func__
);
1012 if (conf
->dir
== STEDMA40_PERIPH_TO_PERIPH
) {
1014 * DMAC HW supports it. Will be added to this driver,
1015 * in case any dma client requires it.
1017 dev_err(&d40c
->chan
.dev
->device
,
1018 "[%s] periph to periph not supported\n",
1026 static bool d40_alloc_mask_set(struct d40_phy_res
*phy
, bool is_src
,
1027 int log_event_line
, bool is_log
)
1029 unsigned long flags
;
1030 spin_lock_irqsave(&phy
->lock
, flags
);
1032 /* Physical interrupts are masked per physical full channel */
1033 if (phy
->allocated_src
== D40_ALLOC_FREE
&&
1034 phy
->allocated_dst
== D40_ALLOC_FREE
) {
1035 phy
->allocated_dst
= D40_ALLOC_PHY
;
1036 phy
->allocated_src
= D40_ALLOC_PHY
;
1042 /* Logical channel */
1044 if (phy
->allocated_src
== D40_ALLOC_PHY
)
1047 if (phy
->allocated_src
== D40_ALLOC_FREE
)
1048 phy
->allocated_src
= D40_ALLOC_LOG_FREE
;
1050 if (!(phy
->allocated_src
& (1 << log_event_line
))) {
1051 phy
->allocated_src
|= 1 << log_event_line
;
1056 if (phy
->allocated_dst
== D40_ALLOC_PHY
)
1059 if (phy
->allocated_dst
== D40_ALLOC_FREE
)
1060 phy
->allocated_dst
= D40_ALLOC_LOG_FREE
;
1062 if (!(phy
->allocated_dst
& (1 << log_event_line
))) {
1063 phy
->allocated_dst
|= 1 << log_event_line
;
1070 spin_unlock_irqrestore(&phy
->lock
, flags
);
1073 spin_unlock_irqrestore(&phy
->lock
, flags
);
1077 static bool d40_alloc_mask_free(struct d40_phy_res
*phy
, bool is_src
,
1080 unsigned long flags
;
1081 bool is_free
= false;
1083 spin_lock_irqsave(&phy
->lock
, flags
);
1084 if (!log_event_line
) {
1085 /* Physical interrupts are masked per physical full channel */
1086 phy
->allocated_dst
= D40_ALLOC_FREE
;
1087 phy
->allocated_src
= D40_ALLOC_FREE
;
1092 /* Logical channel */
1094 phy
->allocated_src
&= ~(1 << log_event_line
);
1095 if (phy
->allocated_src
== D40_ALLOC_LOG_FREE
)
1096 phy
->allocated_src
= D40_ALLOC_FREE
;
1098 phy
->allocated_dst
&= ~(1 << log_event_line
);
1099 if (phy
->allocated_dst
== D40_ALLOC_LOG_FREE
)
1100 phy
->allocated_dst
= D40_ALLOC_FREE
;
1103 is_free
= ((phy
->allocated_src
| phy
->allocated_dst
) ==
1107 spin_unlock_irqrestore(&phy
->lock
, flags
);
1112 static int d40_allocate_channel(struct d40_chan
*d40c
)
1117 struct d40_phy_res
*phys
;
1122 bool is_log
= (d40c
->dma_cfg
.channel_type
&
1123 STEDMA40_CHANNEL_IN_OPER_MODE
)
1124 == STEDMA40_CHANNEL_IN_LOG_MODE
;
1127 phys
= d40c
->base
->phy_res
;
1129 if (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_MEM
) {
1130 dev_type
= d40c
->dma_cfg
.src_dev_type
;
1131 log_num
= 2 * dev_type
;
1133 } else if (d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_PERIPH
||
1134 d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_MEM
) {
1135 /* dst event lines are used for logical memcpy */
1136 dev_type
= d40c
->dma_cfg
.dst_dev_type
;
1137 log_num
= 2 * dev_type
+ 1;
1142 event_group
= D40_TYPE_TO_GROUP(dev_type
);
1143 event_line
= D40_TYPE_TO_EVENT(dev_type
);
1146 if (d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_MEM
) {
1147 /* Find physical half channel */
1148 for (i
= 0; i
< d40c
->base
->num_phy_chans
; i
++) {
1150 if (d40_alloc_mask_set(&phys
[i
], is_src
,
1155 for (j
= 0; j
< d40c
->base
->num_phy_chans
; j
+= 8) {
1156 int phy_num
= j
+ event_group
* 2;
1157 for (i
= phy_num
; i
< phy_num
+ 2; i
++) {
1158 if (d40_alloc_mask_set(&phys
[i
],
1167 d40c
->phy_chan
= &phys
[i
];
1168 d40c
->log_num
= D40_PHY_CHAN
;
1174 /* Find logical channel */
1175 for (j
= 0; j
< d40c
->base
->num_phy_chans
; j
+= 8) {
1176 int phy_num
= j
+ event_group
* 2;
1178 * Spread logical channels across all available physical rather
1179 * than pack every logical channel at the first available phy
1183 for (i
= phy_num
; i
< phy_num
+ 2; i
++) {
1184 if (d40_alloc_mask_set(&phys
[i
], is_src
,
1185 event_line
, is_log
))
1189 for (i
= phy_num
+ 1; i
>= phy_num
; i
--) {
1190 if (d40_alloc_mask_set(&phys
[i
], is_src
,
1191 event_line
, is_log
))
1199 d40c
->phy_chan
= &phys
[i
];
1200 d40c
->log_num
= log_num
;
1204 d40c
->base
->lookup_log_chans
[d40c
->log_num
] = d40c
;
1206 d40c
->base
->lookup_phy_chans
[d40c
->phy_chan
->num
] = d40c
;
1212 static int d40_config_memcpy(struct d40_chan
*d40c
)
1214 dma_cap_mask_t cap
= d40c
->chan
.device
->cap_mask
;
1216 if (dma_has_cap(DMA_MEMCPY
, cap
) && !dma_has_cap(DMA_SLAVE
, cap
)) {
1217 d40c
->dma_cfg
= *d40c
->base
->plat_data
->memcpy_conf_log
;
1218 d40c
->dma_cfg
.src_dev_type
= STEDMA40_DEV_SRC_MEMORY
;
1219 d40c
->dma_cfg
.dst_dev_type
= d40c
->base
->plat_data
->
1220 memcpy
[d40c
->chan
.chan_id
];
1222 } else if (dma_has_cap(DMA_MEMCPY
, cap
) &&
1223 dma_has_cap(DMA_SLAVE
, cap
)) {
1224 d40c
->dma_cfg
= *d40c
->base
->plat_data
->memcpy_conf_phy
;
1226 dev_err(&d40c
->chan
.dev
->device
, "[%s] No memcpy\n",
1235 static int d40_free_dma(struct d40_chan
*d40c
)
1240 struct d40_phy_res
*phy
= d40c
->phy_chan
;
1243 struct d40_desc
*_d
;
1246 /* Terminate all queued and active transfers */
1249 /* Release client owned descriptors */
1250 if (!list_empty(&d40c
->client
))
1251 list_for_each_entry_safe(d
, _d
, &d40c
->client
, node
) {
1252 d40_pool_lli_free(d
);
1254 /* Return desc to free-list */
1255 d40_desc_free(d40c
, d
);
1259 dev_err(&d40c
->chan
.dev
->device
, "[%s] phy == null\n",
1264 if (phy
->allocated_src
== D40_ALLOC_FREE
&&
1265 phy
->allocated_dst
== D40_ALLOC_FREE
) {
1266 dev_err(&d40c
->chan
.dev
->device
, "[%s] channel already free\n",
1271 if (d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_PERIPH
||
1272 d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_MEM
) {
1273 event
= D40_TYPE_TO_EVENT(d40c
->dma_cfg
.dst_dev_type
);
1275 } else if (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_MEM
) {
1276 event
= D40_TYPE_TO_EVENT(d40c
->dma_cfg
.src_dev_type
);
1279 dev_err(&d40c
->chan
.dev
->device
,
1280 "[%s] Unknown direction\n", __func__
);
1284 res
= d40_channel_execute_command(d40c
, D40_DMA_SUSPEND_REQ
);
1286 dev_err(&d40c
->chan
.dev
->device
, "[%s] suspend failed\n",
1291 if (d40c
->log_num
!= D40_PHY_CHAN
) {
1292 /* Release logical channel, deactivate the event line */
1294 d40_config_set_event(d40c
, false);
1295 d40c
->base
->lookup_log_chans
[d40c
->log_num
] = NULL
;
1298 * Check if there are more logical allocation
1299 * on this phy channel.
1301 if (!d40_alloc_mask_free(phy
, is_src
, event
)) {
1302 /* Resume the other logical channels if any */
1303 if (d40_chan_has_events(d40c
)) {
1304 res
= d40_channel_execute_command(d40c
,
1307 dev_err(&d40c
->chan
.dev
->device
,
1308 "[%s] Executing RUN command\n",
1316 (void) d40_alloc_mask_free(phy
, is_src
, 0);
1319 /* Release physical channel */
1320 res
= d40_channel_execute_command(d40c
, D40_DMA_STOP
);
1322 dev_err(&d40c
->chan
.dev
->device
,
1323 "[%s] Failed to stop channel\n", __func__
);
1326 d40c
->phy_chan
= NULL
;
1327 /* Invalidate channel type */
1328 d40c
->dma_cfg
.channel_type
= 0;
1329 d40c
->base
->lookup_phy_chans
[phy
->num
] = NULL
;
1334 static int d40_pause(struct dma_chan
*chan
)
1336 struct d40_chan
*d40c
=
1337 container_of(chan
, struct d40_chan
, chan
);
1339 unsigned long flags
;
1341 spin_lock_irqsave(&d40c
->lock
, flags
);
1343 res
= d40_channel_execute_command(d40c
, D40_DMA_SUSPEND_REQ
);
1345 if (d40c
->log_num
!= D40_PHY_CHAN
) {
1346 d40_config_set_event(d40c
, false);
1347 /* Resume the other logical channels if any */
1348 if (d40_chan_has_events(d40c
))
1349 res
= d40_channel_execute_command(d40c
,
1354 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1358 static bool d40_is_paused(struct d40_chan
*d40c
)
1360 bool is_paused
= false;
1361 unsigned long flags
;
1362 void __iomem
*active_reg
;
1366 spin_lock_irqsave(&d40c
->lock
, flags
);
1368 if (d40c
->log_num
== D40_PHY_CHAN
) {
1369 if (d40c
->phy_chan
->num
% 2 == 0)
1370 active_reg
= d40c
->base
->virtbase
+ D40_DREG_ACTIVE
;
1372 active_reg
= d40c
->base
->virtbase
+ D40_DREG_ACTIVO
;
1374 status
= (readl(active_reg
) &
1375 D40_CHAN_POS_MASK(d40c
->phy_chan
->num
)) >>
1376 D40_CHAN_POS(d40c
->phy_chan
->num
);
1377 if (status
== D40_DMA_SUSPENDED
|| status
== D40_DMA_STOP
)
1383 if (d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_PERIPH
||
1384 d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_MEM
)
1385 event
= D40_TYPE_TO_EVENT(d40c
->dma_cfg
.dst_dev_type
);
1386 else if (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_MEM
)
1387 event
= D40_TYPE_TO_EVENT(d40c
->dma_cfg
.src_dev_type
);
1389 dev_err(&d40c
->chan
.dev
->device
,
1390 "[%s] Unknown direction\n", __func__
);
1393 status
= d40_chan_has_events(d40c
);
1394 status
= (status
& D40_EVENTLINE_MASK(event
)) >>
1395 D40_EVENTLINE_POS(event
);
1397 if (status
!= D40_DMA_RUN
)
1400 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1406 static bool d40_tx_is_linked(struct d40_chan
*d40c
)
1410 if (d40c
->log_num
!= D40_PHY_CHAN
)
1411 is_link
= readl(&d40c
->lcpa
->lcsp3
) & D40_MEM_LCSP3_DLOS_MASK
;
1413 is_link
= readl(d40c
->base
->virtbase
+ D40_DREG_PCBASE
+
1414 d40c
->phy_chan
->num
* D40_DREG_PCDELTA
+
1415 D40_CHAN_REG_SDLNK
) &
1416 D40_SREG_LNK_PHYS_LNK_MASK
;
1420 static u32
d40_residue(struct d40_chan
*d40c
)
1424 if (d40c
->log_num
!= D40_PHY_CHAN
)
1425 num_elt
= (readl(&d40c
->lcpa
->lcsp2
) & D40_MEM_LCSP2_ECNT_MASK
)
1426 >> D40_MEM_LCSP2_ECNT_POS
;
1428 num_elt
= (readl(d40c
->base
->virtbase
+ D40_DREG_PCBASE
+
1429 d40c
->phy_chan
->num
* D40_DREG_PCDELTA
+
1430 D40_CHAN_REG_SDELT
) &
1431 D40_SREG_ELEM_PHY_ECNT_MASK
) >>
1432 D40_SREG_ELEM_PHY_ECNT_POS
;
1433 return num_elt
* (1 << d40c
->dma_cfg
.dst_info
.data_width
);
1436 static int d40_resume(struct dma_chan
*chan
)
1438 struct d40_chan
*d40c
=
1439 container_of(chan
, struct d40_chan
, chan
);
1441 unsigned long flags
;
1443 spin_lock_irqsave(&d40c
->lock
, flags
);
1445 if (d40c
->base
->rev
== 0)
1446 if (d40c
->log_num
!= D40_PHY_CHAN
) {
1447 res
= d40_channel_execute_command(d40c
,
1448 D40_DMA_SUSPEND_REQ
);
1452 /* If bytes left to transfer or linked tx resume job */
1453 if (d40_residue(d40c
) || d40_tx_is_linked(d40c
)) {
1454 if (d40c
->log_num
!= D40_PHY_CHAN
)
1455 d40_config_set_event(d40c
, true);
1456 res
= d40_channel_execute_command(d40c
, D40_DMA_RUN
);
1460 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1464 static u32
stedma40_residue(struct dma_chan
*chan
)
1466 struct d40_chan
*d40c
=
1467 container_of(chan
, struct d40_chan
, chan
);
1469 unsigned long flags
;
1471 spin_lock_irqsave(&d40c
->lock
, flags
);
1472 bytes_left
= d40_residue(d40c
);
1473 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1478 /* Public DMA functions in addition to the DMA engine framework */
1480 int stedma40_set_psize(struct dma_chan
*chan
,
1484 struct d40_chan
*d40c
=
1485 container_of(chan
, struct d40_chan
, chan
);
1486 unsigned long flags
;
1488 spin_lock_irqsave(&d40c
->lock
, flags
);
1490 if (d40c
->log_num
!= D40_PHY_CHAN
) {
1491 d40c
->log_def
.lcsp1
&= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK
;
1492 d40c
->log_def
.lcsp3
&= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK
;
1493 d40c
->log_def
.lcsp1
|= src_psize
<<
1494 D40_MEM_LCSP1_SCFG_PSIZE_POS
;
1495 d40c
->log_def
.lcsp3
|= dst_psize
<<
1496 D40_MEM_LCSP1_SCFG_PSIZE_POS
;
1500 if (src_psize
== STEDMA40_PSIZE_PHY_1
)
1501 d40c
->src_def_cfg
&= ~(1 << D40_SREG_CFG_PHY_PEN_POS
);
1503 d40c
->src_def_cfg
|= 1 << D40_SREG_CFG_PHY_PEN_POS
;
1504 d40c
->src_def_cfg
&= ~(STEDMA40_PSIZE_PHY_16
<<
1505 D40_SREG_CFG_PSIZE_POS
);
1506 d40c
->src_def_cfg
|= src_psize
<< D40_SREG_CFG_PSIZE_POS
;
1509 if (dst_psize
== STEDMA40_PSIZE_PHY_1
)
1510 d40c
->dst_def_cfg
&= ~(1 << D40_SREG_CFG_PHY_PEN_POS
);
1512 d40c
->dst_def_cfg
|= 1 << D40_SREG_CFG_PHY_PEN_POS
;
1513 d40c
->dst_def_cfg
&= ~(STEDMA40_PSIZE_PHY_16
<<
1514 D40_SREG_CFG_PSIZE_POS
);
1515 d40c
->dst_def_cfg
|= dst_psize
<< D40_SREG_CFG_PSIZE_POS
;
1518 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1521 EXPORT_SYMBOL(stedma40_set_psize
);
1523 struct dma_async_tx_descriptor
*stedma40_memcpy_sg(struct dma_chan
*chan
,
1524 struct scatterlist
*sgl_dst
,
1525 struct scatterlist
*sgl_src
,
1526 unsigned int sgl_len
,
1527 unsigned long dma_flags
)
1530 struct d40_desc
*d40d
;
1531 struct d40_chan
*d40c
= container_of(chan
, struct d40_chan
,
1533 unsigned long flags
;
1535 if (d40c
->phy_chan
== NULL
) {
1536 dev_err(&d40c
->chan
.dev
->device
,
1537 "[%s] Unallocated channel.\n", __func__
);
1538 return ERR_PTR(-EINVAL
);
1541 spin_lock_irqsave(&d40c
->lock
, flags
);
1542 d40d
= d40_desc_get(d40c
);
1547 d40d
->lli_len
= sgl_len
;
1548 d40d
->lli_tx_len
= d40d
->lli_len
;
1549 d40d
->txd
.flags
= dma_flags
;
1551 if (d40c
->log_num
!= D40_PHY_CHAN
) {
1552 if (d40d
->lli_len
> d40c
->base
->plat_data
->llis_per_log
)
1553 d40d
->lli_tx_len
= d40c
->base
->plat_data
->llis_per_log
;
1557 * Check if there is space available in lcla. If not,
1558 * split list into 1-length and run only in lcpa
1561 if (d40_lcla_id_get(d40c
) != 0)
1562 d40d
->lli_tx_len
= 1;
1564 if (d40_pool_lli_alloc(d40d
, sgl_len
, true) < 0) {
1565 dev_err(&d40c
->chan
.dev
->device
,
1566 "[%s] Out of memory\n", __func__
);
1570 (void) d40_log_sg_to_lli(d40c
->lcla
.src_id
,
1574 d40c
->log_def
.lcsp1
,
1575 d40c
->dma_cfg
.src_info
.data_width
,
1576 dma_flags
& DMA_PREP_INTERRUPT
,
1578 d40c
->base
->plat_data
->llis_per_log
);
1580 (void) d40_log_sg_to_lli(d40c
->lcla
.dst_id
,
1584 d40c
->log_def
.lcsp3
,
1585 d40c
->dma_cfg
.dst_info
.data_width
,
1586 dma_flags
& DMA_PREP_INTERRUPT
,
1588 d40c
->base
->plat_data
->llis_per_log
);
1592 if (d40_pool_lli_alloc(d40d
, sgl_len
, false) < 0) {
1593 dev_err(&d40c
->chan
.dev
->device
,
1594 "[%s] Out of memory\n", __func__
);
1598 res
= d40_phy_sg_to_lli(sgl_src
,
1602 d40d
->lli_phy
.src_addr
,
1604 d40c
->dma_cfg
.src_info
.data_width
,
1605 d40c
->dma_cfg
.src_info
.psize
,
1611 res
= d40_phy_sg_to_lli(sgl_dst
,
1615 d40d
->lli_phy
.dst_addr
,
1617 d40c
->dma_cfg
.dst_info
.data_width
,
1618 d40c
->dma_cfg
.dst_info
.psize
,
1624 (void) dma_map_single(d40c
->base
->dev
, d40d
->lli_phy
.src
,
1625 d40d
->lli_pool
.size
, DMA_TO_DEVICE
);
1628 dma_async_tx_descriptor_init(&d40d
->txd
, chan
);
1630 d40d
->txd
.tx_submit
= d40_tx_submit
;
1632 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1636 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1639 EXPORT_SYMBOL(stedma40_memcpy_sg
);
1641 bool stedma40_filter(struct dma_chan
*chan
, void *data
)
1643 struct stedma40_chan_cfg
*info
= data
;
1644 struct d40_chan
*d40c
=
1645 container_of(chan
, struct d40_chan
, chan
);
1649 err
= d40_validate_conf(d40c
, info
);
1651 d40c
->dma_cfg
= *info
;
1653 err
= d40_config_memcpy(d40c
);
1657 EXPORT_SYMBOL(stedma40_filter
);
1659 /* DMA ENGINE functions */
1660 static int d40_alloc_chan_resources(struct dma_chan
*chan
)
1663 unsigned long flags
;
1664 struct d40_chan
*d40c
=
1665 container_of(chan
, struct d40_chan
, chan
);
1667 spin_lock_irqsave(&d40c
->lock
, flags
);
1669 d40c
->completed
= chan
->cookie
= 1;
1672 * If no dma configuration is set (channel_type == 0)
1673 * use default configuration (memcpy)
1675 if (d40c
->dma_cfg
.channel_type
== 0) {
1676 err
= d40_config_memcpy(d40c
);
1678 dev_err(&d40c
->chan
.dev
->device
,
1679 "[%s] Failed to configure memcpy channel\n",
1684 is_free_phy
= (d40c
->phy_chan
== NULL
);
1686 err
= d40_allocate_channel(d40c
);
1688 dev_err(&d40c
->chan
.dev
->device
,
1689 "[%s] Failed to allocate channel\n", __func__
);
1693 /* Fill in basic CFG register values */
1694 d40_phy_cfg(&d40c
->dma_cfg
, &d40c
->src_def_cfg
,
1695 &d40c
->dst_def_cfg
, d40c
->log_num
!= D40_PHY_CHAN
);
1697 if (d40c
->log_num
!= D40_PHY_CHAN
) {
1698 d40_log_cfg(&d40c
->dma_cfg
,
1699 &d40c
->log_def
.lcsp1
, &d40c
->log_def
.lcsp3
);
1701 if (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_MEM
)
1702 d40c
->lcpa
= d40c
->base
->lcpa_base
+
1703 d40c
->dma_cfg
.src_dev_type
* D40_LCPA_CHAN_SIZE
;
1705 d40c
->lcpa
= d40c
->base
->lcpa_base
+
1706 d40c
->dma_cfg
.dst_dev_type
*
1707 D40_LCPA_CHAN_SIZE
+ D40_LCPA_CHAN_DST_DELTA
;
1711 * Only write channel configuration to the DMA if the physical
1712 * resource is free. In case of multiple logical channels
1713 * on the same physical resource, only the first write is necessary.
1716 err
= d40_config_write(d40c
);
1718 dev_err(&d40c
->chan
.dev
->device
,
1719 "[%s] Failed to configure channel\n",
1724 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1728 static void d40_free_chan_resources(struct dma_chan
*chan
)
1730 struct d40_chan
*d40c
=
1731 container_of(chan
, struct d40_chan
, chan
);
1733 unsigned long flags
;
1735 if (d40c
->phy_chan
== NULL
) {
1736 dev_err(&d40c
->chan
.dev
->device
,
1737 "[%s] Cannot free unallocated channel\n", __func__
);
1742 spin_lock_irqsave(&d40c
->lock
, flags
);
1744 err
= d40_free_dma(d40c
);
1747 dev_err(&d40c
->chan
.dev
->device
,
1748 "[%s] Failed to free channel\n", __func__
);
1749 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1752 static struct dma_async_tx_descriptor
*d40_prep_memcpy(struct dma_chan
*chan
,
1756 unsigned long dma_flags
)
1758 struct d40_desc
*d40d
;
1759 struct d40_chan
*d40c
= container_of(chan
, struct d40_chan
,
1761 unsigned long flags
;
1764 if (d40c
->phy_chan
== NULL
) {
1765 dev_err(&d40c
->chan
.dev
->device
,
1766 "[%s] Channel is not allocated.\n", __func__
);
1767 return ERR_PTR(-EINVAL
);
1770 spin_lock_irqsave(&d40c
->lock
, flags
);
1771 d40d
= d40_desc_get(d40c
);
1774 dev_err(&d40c
->chan
.dev
->device
,
1775 "[%s] Descriptor is NULL\n", __func__
);
1779 d40d
->txd
.flags
= dma_flags
;
1781 dma_async_tx_descriptor_init(&d40d
->txd
, chan
);
1783 d40d
->txd
.tx_submit
= d40_tx_submit
;
1785 if (d40c
->log_num
!= D40_PHY_CHAN
) {
1787 if (d40_pool_lli_alloc(d40d
, 1, true) < 0) {
1788 dev_err(&d40c
->chan
.dev
->device
,
1789 "[%s] Out of memory\n", __func__
);
1793 d40d
->lli_tx_len
= 1;
1795 d40_log_fill_lli(d40d
->lli_log
.src
,
1799 d40c
->log_def
.lcsp1
,
1800 d40c
->dma_cfg
.src_info
.data_width
,
1803 d40_log_fill_lli(d40d
->lli_log
.dst
,
1807 d40c
->log_def
.lcsp3
,
1808 d40c
->dma_cfg
.dst_info
.data_width
,
1813 if (d40_pool_lli_alloc(d40d
, 1, false) < 0) {
1814 dev_err(&d40c
->chan
.dev
->device
,
1815 "[%s] Out of memory\n", __func__
);
1819 err
= d40_phy_fill_lli(d40d
->lli_phy
.src
,
1822 d40c
->dma_cfg
.src_info
.psize
,
1826 d40c
->dma_cfg
.src_info
.data_width
,
1831 err
= d40_phy_fill_lli(d40d
->lli_phy
.dst
,
1834 d40c
->dma_cfg
.dst_info
.psize
,
1838 d40c
->dma_cfg
.dst_info
.data_width
,
1844 (void) dma_map_single(d40c
->base
->dev
, d40d
->lli_phy
.src
,
1845 d40d
->lli_pool
.size
, DMA_TO_DEVICE
);
1848 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1852 dev_err(&d40c
->chan
.dev
->device
,
1853 "[%s] Failed filling in PHY LLI\n", __func__
);
1854 d40_pool_lli_free(d40d
);
1856 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1860 static int d40_prep_slave_sg_log(struct d40_desc
*d40d
,
1861 struct d40_chan
*d40c
,
1862 struct scatterlist
*sgl
,
1863 unsigned int sg_len
,
1864 enum dma_data_direction direction
,
1865 unsigned long dma_flags
)
1867 dma_addr_t dev_addr
= 0;
1870 if (d40_pool_lli_alloc(d40d
, sg_len
, true) < 0) {
1871 dev_err(&d40c
->chan
.dev
->device
,
1872 "[%s] Out of memory\n", __func__
);
1876 d40d
->lli_len
= sg_len
;
1877 if (d40d
->lli_len
<= d40c
->base
->plat_data
->llis_per_log
)
1878 d40d
->lli_tx_len
= d40d
->lli_len
;
1880 d40d
->lli_tx_len
= d40c
->base
->plat_data
->llis_per_log
;
1884 * Check if there is space available in lcla.
1885 * If not, split list into 1-length and run only
1888 if (d40_lcla_id_get(d40c
) != 0)
1889 d40d
->lli_tx_len
= 1;
1891 if (direction
== DMA_FROM_DEVICE
)
1892 if (d40c
->runtime_addr
)
1893 dev_addr
= d40c
->runtime_addr
;
1895 dev_addr
= d40c
->base
->plat_data
->dev_rx
[d40c
->dma_cfg
.src_dev_type
];
1896 else if (direction
== DMA_TO_DEVICE
)
1897 if (d40c
->runtime_addr
)
1898 dev_addr
= d40c
->runtime_addr
;
1900 dev_addr
= d40c
->base
->plat_data
->dev_tx
[d40c
->dma_cfg
.dst_dev_type
];
1905 total_size
= d40_log_sg_to_dev(&d40c
->lcla
,
1909 d40c
->dma_cfg
.src_info
.data_width
,
1910 d40c
->dma_cfg
.dst_info
.data_width
,
1912 dma_flags
& DMA_PREP_INTERRUPT
,
1913 dev_addr
, d40d
->lli_tx_len
,
1914 d40c
->base
->plat_data
->llis_per_log
);
1922 static int d40_prep_slave_sg_phy(struct d40_desc
*d40d
,
1923 struct d40_chan
*d40c
,
1924 struct scatterlist
*sgl
,
1925 unsigned int sgl_len
,
1926 enum dma_data_direction direction
,
1927 unsigned long dma_flags
)
1929 dma_addr_t src_dev_addr
;
1930 dma_addr_t dst_dev_addr
;
1933 if (d40_pool_lli_alloc(d40d
, sgl_len
, false) < 0) {
1934 dev_err(&d40c
->chan
.dev
->device
,
1935 "[%s] Out of memory\n", __func__
);
1939 d40d
->lli_len
= sgl_len
;
1940 d40d
->lli_tx_len
= sgl_len
;
1942 if (direction
== DMA_FROM_DEVICE
) {
1944 if (d40c
->runtime_addr
)
1945 src_dev_addr
= d40c
->runtime_addr
;
1947 src_dev_addr
= d40c
->base
->plat_data
->dev_rx
[d40c
->dma_cfg
.src_dev_type
];
1948 } else if (direction
== DMA_TO_DEVICE
) {
1949 if (d40c
->runtime_addr
)
1950 dst_dev_addr
= d40c
->runtime_addr
;
1952 dst_dev_addr
= d40c
->base
->plat_data
->dev_tx
[d40c
->dma_cfg
.dst_dev_type
];
1957 res
= d40_phy_sg_to_lli(sgl
,
1961 d40d
->lli_phy
.src_addr
,
1963 d40c
->dma_cfg
.src_info
.data_width
,
1964 d40c
->dma_cfg
.src_info
.psize
,
1969 res
= d40_phy_sg_to_lli(sgl
,
1973 d40d
->lli_phy
.dst_addr
,
1975 d40c
->dma_cfg
.dst_info
.data_width
,
1976 d40c
->dma_cfg
.dst_info
.psize
,
1981 (void) dma_map_single(d40c
->base
->dev
, d40d
->lli_phy
.src
,
1982 d40d
->lli_pool
.size
, DMA_TO_DEVICE
);
1986 static struct dma_async_tx_descriptor
*d40_prep_slave_sg(struct dma_chan
*chan
,
1987 struct scatterlist
*sgl
,
1988 unsigned int sg_len
,
1989 enum dma_data_direction direction
,
1990 unsigned long dma_flags
)
1992 struct d40_desc
*d40d
;
1993 struct d40_chan
*d40c
= container_of(chan
, struct d40_chan
,
1995 unsigned long flags
;
1998 if (d40c
->phy_chan
== NULL
) {
1999 dev_err(&d40c
->chan
.dev
->device
,
2000 "[%s] Cannot prepare unallocated channel\n", __func__
);
2001 return ERR_PTR(-EINVAL
);
2004 if (d40c
->dma_cfg
.pre_transfer
)
2005 d40c
->dma_cfg
.pre_transfer(chan
,
2006 d40c
->dma_cfg
.pre_transfer_data
,
2009 spin_lock_irqsave(&d40c
->lock
, flags
);
2010 d40d
= d40_desc_get(d40c
);
2011 spin_unlock_irqrestore(&d40c
->lock
, flags
);
2016 if (d40c
->log_num
!= D40_PHY_CHAN
)
2017 err
= d40_prep_slave_sg_log(d40d
, d40c
, sgl
, sg_len
,
2018 direction
, dma_flags
);
2020 err
= d40_prep_slave_sg_phy(d40d
, d40c
, sgl
, sg_len
,
2021 direction
, dma_flags
);
2023 dev_err(&d40c
->chan
.dev
->device
,
2024 "[%s] Failed to prepare %s slave sg job: %d\n",
2026 d40c
->log_num
!= D40_PHY_CHAN
? "log" : "phy", err
);
2030 d40d
->txd
.flags
= dma_flags
;
2032 dma_async_tx_descriptor_init(&d40d
->txd
, chan
);
2034 d40d
->txd
.tx_submit
= d40_tx_submit
;
2039 static enum dma_status
d40_tx_status(struct dma_chan
*chan
,
2040 dma_cookie_t cookie
,
2041 struct dma_tx_state
*txstate
)
2043 struct d40_chan
*d40c
= container_of(chan
, struct d40_chan
, chan
);
2044 dma_cookie_t last_used
;
2045 dma_cookie_t last_complete
;
2048 if (d40c
->phy_chan
== NULL
) {
2049 dev_err(&d40c
->chan
.dev
->device
,
2050 "[%s] Cannot read status of unallocated channel\n",
2055 last_complete
= d40c
->completed
;
2056 last_used
= chan
->cookie
;
2058 if (d40_is_paused(d40c
))
2061 ret
= dma_async_is_complete(cookie
, last_complete
, last_used
);
2063 dma_set_tx_state(txstate
, last_complete
, last_used
,
2064 stedma40_residue(chan
));
2069 static void d40_issue_pending(struct dma_chan
*chan
)
2071 struct d40_chan
*d40c
= container_of(chan
, struct d40_chan
, chan
);
2072 unsigned long flags
;
2074 if (d40c
->phy_chan
== NULL
) {
2075 dev_err(&d40c
->chan
.dev
->device
,
2076 "[%s] Channel is not allocated!\n", __func__
);
2080 spin_lock_irqsave(&d40c
->lock
, flags
);
2082 /* Busy means that pending jobs are already being processed */
2084 (void) d40_queue_start(d40c
);
2086 spin_unlock_irqrestore(&d40c
->lock
, flags
);
2089 /* Runtime reconfiguration extension */
2090 static void d40_set_runtime_config(struct dma_chan
*chan
,
2091 struct dma_slave_config
*config
)
2093 struct d40_chan
*d40c
= container_of(chan
, struct d40_chan
, chan
);
2094 struct stedma40_chan_cfg
*cfg
= &d40c
->dma_cfg
;
2095 enum dma_slave_buswidth config_addr_width
;
2096 dma_addr_t config_addr
;
2097 u32 config_maxburst
;
2098 enum stedma40_periph_data_width addr_width
;
2101 if (config
->direction
== DMA_FROM_DEVICE
) {
2102 dma_addr_t dev_addr_rx
=
2103 d40c
->base
->plat_data
->dev_rx
[cfg
->src_dev_type
];
2105 config_addr
= config
->src_addr
;
2107 dev_dbg(d40c
->base
->dev
,
2108 "channel has a pre-wired RX address %08x "
2109 "overriding with %08x\n",
2110 dev_addr_rx
, config_addr
);
2111 if (cfg
->dir
!= STEDMA40_PERIPH_TO_MEM
)
2112 dev_dbg(d40c
->base
->dev
,
2113 "channel was not configured for peripheral "
2114 "to memory transfer (%d) overriding\n",
2116 cfg
->dir
= STEDMA40_PERIPH_TO_MEM
;
2118 config_addr_width
= config
->src_addr_width
;
2119 config_maxburst
= config
->src_maxburst
;
2121 } else if (config
->direction
== DMA_TO_DEVICE
) {
2122 dma_addr_t dev_addr_tx
=
2123 d40c
->base
->plat_data
->dev_tx
[cfg
->dst_dev_type
];
2125 config_addr
= config
->dst_addr
;
2127 dev_dbg(d40c
->base
->dev
,
2128 "channel has a pre-wired TX address %08x "
2129 "overriding with %08x\n",
2130 dev_addr_tx
, config_addr
);
2131 if (cfg
->dir
!= STEDMA40_MEM_TO_PERIPH
)
2132 dev_dbg(d40c
->base
->dev
,
2133 "channel was not configured for memory "
2134 "to peripheral transfer (%d) overriding\n",
2136 cfg
->dir
= STEDMA40_MEM_TO_PERIPH
;
2138 config_addr_width
= config
->dst_addr_width
;
2139 config_maxburst
= config
->dst_maxburst
;
2142 dev_err(d40c
->base
->dev
,
2143 "unrecognized channel direction %d\n",
2148 switch (config_addr_width
) {
2149 case DMA_SLAVE_BUSWIDTH_1_BYTE
:
2150 addr_width
= STEDMA40_BYTE_WIDTH
;
2152 case DMA_SLAVE_BUSWIDTH_2_BYTES
:
2153 addr_width
= STEDMA40_HALFWORD_WIDTH
;
2155 case DMA_SLAVE_BUSWIDTH_4_BYTES
:
2156 addr_width
= STEDMA40_WORD_WIDTH
;
2158 case DMA_SLAVE_BUSWIDTH_8_BYTES
:
2159 addr_width
= STEDMA40_DOUBLEWORD_WIDTH
;
2162 dev_err(d40c
->base
->dev
,
2163 "illegal peripheral address width "
2165 config
->src_addr_width
);
2169 if (config_maxburst
>= 16)
2170 psize
= STEDMA40_PSIZE_LOG_16
;
2171 else if (config_maxburst
>= 8)
2172 psize
= STEDMA40_PSIZE_LOG_8
;
2173 else if (config_maxburst
>= 4)
2174 psize
= STEDMA40_PSIZE_LOG_4
;
2176 psize
= STEDMA40_PSIZE_LOG_1
;
2178 /* Set up all the endpoint configs */
2179 cfg
->src_info
.data_width
= addr_width
;
2180 cfg
->src_info
.psize
= psize
;
2181 cfg
->src_info
.endianess
= STEDMA40_LITTLE_ENDIAN
;
2182 cfg
->src_info
.flow_ctrl
= STEDMA40_NO_FLOW_CTRL
;
2183 cfg
->dst_info
.data_width
= addr_width
;
2184 cfg
->dst_info
.psize
= psize
;
2185 cfg
->dst_info
.endianess
= STEDMA40_LITTLE_ENDIAN
;
2186 cfg
->dst_info
.flow_ctrl
= STEDMA40_NO_FLOW_CTRL
;
2188 /* These settings will take precedence later */
2189 d40c
->runtime_addr
= config_addr
;
2190 d40c
->runtime_direction
= config
->direction
;
2191 dev_dbg(d40c
->base
->dev
,
2192 "configured channel %s for %s, data width %d, "
2193 "maxburst %d bytes, LE, no flow control\n",
2194 dma_chan_name(chan
),
2195 (config
->direction
== DMA_FROM_DEVICE
) ? "RX" : "TX",
2200 static int d40_control(struct dma_chan
*chan
, enum dma_ctrl_cmd cmd
,
2203 unsigned long flags
;
2204 struct d40_chan
*d40c
= container_of(chan
, struct d40_chan
, chan
);
2206 if (d40c
->phy_chan
== NULL
) {
2207 dev_err(&d40c
->chan
.dev
->device
,
2208 "[%s] Channel is not allocated!\n", __func__
);
2213 case DMA_TERMINATE_ALL
:
2214 spin_lock_irqsave(&d40c
->lock
, flags
);
2216 spin_unlock_irqrestore(&d40c
->lock
, flags
);
2219 return d40_pause(chan
);
2221 return d40_resume(chan
);
2222 case DMA_SLAVE_CONFIG
:
2223 d40_set_runtime_config(chan
,
2224 (struct dma_slave_config
*) arg
);
2230 /* Other commands are unimplemented */
2234 /* Initialization functions */
2236 static void __init
d40_chan_init(struct d40_base
*base
, struct dma_device
*dma
,
2237 struct d40_chan
*chans
, int offset
,
2241 struct d40_chan
*d40c
;
2243 INIT_LIST_HEAD(&dma
->channels
);
2245 for (i
= offset
; i
< offset
+ num_chans
; i
++) {
2248 d40c
->chan
.device
= dma
;
2250 /* Invalidate lcla element */
2251 d40c
->lcla
.src_id
= -1;
2252 d40c
->lcla
.dst_id
= -1;
2254 spin_lock_init(&d40c
->lock
);
2256 d40c
->log_num
= D40_PHY_CHAN
;
2258 INIT_LIST_HEAD(&d40c
->active
);
2259 INIT_LIST_HEAD(&d40c
->queue
);
2260 INIT_LIST_HEAD(&d40c
->client
);
2262 tasklet_init(&d40c
->tasklet
, dma_tasklet
,
2263 (unsigned long) d40c
);
2265 list_add_tail(&d40c
->chan
.device_node
,
2270 static int __init
d40_dmaengine_init(struct d40_base
*base
,
2271 int num_reserved_chans
)
2275 d40_chan_init(base
, &base
->dma_slave
, base
->log_chans
,
2276 0, base
->num_log_chans
);
2278 dma_cap_zero(base
->dma_slave
.cap_mask
);
2279 dma_cap_set(DMA_SLAVE
, base
->dma_slave
.cap_mask
);
2281 base
->dma_slave
.device_alloc_chan_resources
= d40_alloc_chan_resources
;
2282 base
->dma_slave
.device_free_chan_resources
= d40_free_chan_resources
;
2283 base
->dma_slave
.device_prep_dma_memcpy
= d40_prep_memcpy
;
2284 base
->dma_slave
.device_prep_slave_sg
= d40_prep_slave_sg
;
2285 base
->dma_slave
.device_tx_status
= d40_tx_status
;
2286 base
->dma_slave
.device_issue_pending
= d40_issue_pending
;
2287 base
->dma_slave
.device_control
= d40_control
;
2288 base
->dma_slave
.dev
= base
->dev
;
2290 err
= dma_async_device_register(&base
->dma_slave
);
2294 "[%s] Failed to register slave channels\n",
2299 d40_chan_init(base
, &base
->dma_memcpy
, base
->log_chans
,
2300 base
->num_log_chans
, base
->plat_data
->memcpy_len
);
2302 dma_cap_zero(base
->dma_memcpy
.cap_mask
);
2303 dma_cap_set(DMA_MEMCPY
, base
->dma_memcpy
.cap_mask
);
2305 base
->dma_memcpy
.device_alloc_chan_resources
= d40_alloc_chan_resources
;
2306 base
->dma_memcpy
.device_free_chan_resources
= d40_free_chan_resources
;
2307 base
->dma_memcpy
.device_prep_dma_memcpy
= d40_prep_memcpy
;
2308 base
->dma_memcpy
.device_prep_slave_sg
= d40_prep_slave_sg
;
2309 base
->dma_memcpy
.device_tx_status
= d40_tx_status
;
2310 base
->dma_memcpy
.device_issue_pending
= d40_issue_pending
;
2311 base
->dma_memcpy
.device_control
= d40_control
;
2312 base
->dma_memcpy
.dev
= base
->dev
;
2314 * This controller can only access address at even
2315 * 32bit boundaries, i.e. 2^2
2317 base
->dma_memcpy
.copy_align
= 2;
2319 err
= dma_async_device_register(&base
->dma_memcpy
);
2323 "[%s] Failed to regsiter memcpy only channels\n",
2328 d40_chan_init(base
, &base
->dma_both
, base
->phy_chans
,
2329 0, num_reserved_chans
);
2331 dma_cap_zero(base
->dma_both
.cap_mask
);
2332 dma_cap_set(DMA_SLAVE
, base
->dma_both
.cap_mask
);
2333 dma_cap_set(DMA_MEMCPY
, base
->dma_both
.cap_mask
);
2335 base
->dma_both
.device_alloc_chan_resources
= d40_alloc_chan_resources
;
2336 base
->dma_both
.device_free_chan_resources
= d40_free_chan_resources
;
2337 base
->dma_both
.device_prep_dma_memcpy
= d40_prep_memcpy
;
2338 base
->dma_both
.device_prep_slave_sg
= d40_prep_slave_sg
;
2339 base
->dma_both
.device_tx_status
= d40_tx_status
;
2340 base
->dma_both
.device_issue_pending
= d40_issue_pending
;
2341 base
->dma_both
.device_control
= d40_control
;
2342 base
->dma_both
.dev
= base
->dev
;
2343 base
->dma_both
.copy_align
= 2;
2344 err
= dma_async_device_register(&base
->dma_both
);
2348 "[%s] Failed to register logical and physical capable channels\n",
2354 dma_async_device_unregister(&base
->dma_memcpy
);
2356 dma_async_device_unregister(&base
->dma_slave
);
2361 /* Initialization functions. */
2363 static int __init
d40_phy_res_init(struct d40_base
*base
)
2366 int num_phy_chans_avail
= 0;
2368 int odd_even_bit
= -2;
2370 val
[0] = readl(base
->virtbase
+ D40_DREG_PRSME
);
2371 val
[1] = readl(base
->virtbase
+ D40_DREG_PRSMO
);
2373 for (i
= 0; i
< base
->num_phy_chans
; i
++) {
2374 base
->phy_res
[i
].num
= i
;
2375 odd_even_bit
+= 2 * ((i
% 2) == 0);
2376 if (((val
[i
% 2] >> odd_even_bit
) & 3) == 1) {
2377 /* Mark security only channels as occupied */
2378 base
->phy_res
[i
].allocated_src
= D40_ALLOC_PHY
;
2379 base
->phy_res
[i
].allocated_dst
= D40_ALLOC_PHY
;
2381 base
->phy_res
[i
].allocated_src
= D40_ALLOC_FREE
;
2382 base
->phy_res
[i
].allocated_dst
= D40_ALLOC_FREE
;
2383 num_phy_chans_avail
++;
2385 spin_lock_init(&base
->phy_res
[i
].lock
);
2388 /* Mark disabled channels as occupied */
2389 for (i
= 0; base
->plat_data
->disabled_channels
[i
] != -1; i
++) {
2390 base
->phy_res
[i
].allocated_src
= D40_ALLOC_PHY
;
2391 base
->phy_res
[i
].allocated_dst
= D40_ALLOC_PHY
;
2392 num_phy_chans_avail
--;
2395 dev_info(base
->dev
, "%d of %d physical DMA channels available\n",
2396 num_phy_chans_avail
, base
->num_phy_chans
);
2398 /* Verify settings extended vs standard */
2399 val
[0] = readl(base
->virtbase
+ D40_DREG_PRTYP
);
2401 for (i
= 0; i
< base
->num_phy_chans
; i
++) {
2403 if (base
->phy_res
[i
].allocated_src
== D40_ALLOC_FREE
&&
2404 (val
[0] & 0x3) != 1)
2406 "[%s] INFO: channel %d is misconfigured (%d)\n",
2407 __func__
, i
, val
[0] & 0x3);
2409 val
[0] = val
[0] >> 2;
2412 return num_phy_chans_avail
;
2415 static struct d40_base
* __init
d40_hw_detect_init(struct platform_device
*pdev
)
2417 static const struct d40_reg_val dma_id_regs
[] = {
2419 { .reg
= D40_DREG_PERIPHID0
, .val
= 0x0040},
2420 { .reg
= D40_DREG_PERIPHID1
, .val
= 0x0000},
2422 * D40_DREG_PERIPHID2 Depends on HW revision:
2423 * MOP500/HREF ED has 0x0008,
2425 * HREF V1 has 0x0028
2427 { .reg
= D40_DREG_PERIPHID3
, .val
= 0x0000},
2430 { .reg
= D40_DREG_CELLID0
, .val
= 0x000d},
2431 { .reg
= D40_DREG_CELLID1
, .val
= 0x00f0},
2432 { .reg
= D40_DREG_CELLID2
, .val
= 0x0005},
2433 { .reg
= D40_DREG_CELLID3
, .val
= 0x00b1}
2435 struct stedma40_platform_data
*plat_data
;
2436 struct clk
*clk
= NULL
;
2437 void __iomem
*virtbase
= NULL
;
2438 struct resource
*res
= NULL
;
2439 struct d40_base
*base
= NULL
;
2440 int num_log_chans
= 0;
2445 clk
= clk_get(&pdev
->dev
, NULL
);
2448 dev_err(&pdev
->dev
, "[%s] No matching clock found\n",
2455 /* Get IO for DMAC base address */
2456 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "base");
2460 if (request_mem_region(res
->start
, resource_size(res
),
2461 D40_NAME
" I/O base") == NULL
)
2464 virtbase
= ioremap(res
->start
, resource_size(res
));
2468 /* HW version check */
2469 for (i
= 0; i
< ARRAY_SIZE(dma_id_regs
); i
++) {
2470 if (dma_id_regs
[i
].val
!=
2471 readl(virtbase
+ dma_id_regs
[i
].reg
)) {
2473 "[%s] Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n",
2477 readl(virtbase
+ dma_id_regs
[i
].reg
));
2482 /* Get silicon revision */
2483 val
= readl(virtbase
+ D40_DREG_PERIPHID2
);
2485 if ((val
& 0xf) != D40_PERIPHID2_DESIGNER
) {
2487 "[%s] Unknown designer! Got %x wanted %x\n",
2488 __func__
, val
& 0xf, D40_PERIPHID2_DESIGNER
);
2492 /* The number of physical channels on this HW */
2493 num_phy_chans
= 4 * (readl(virtbase
+ D40_DREG_ICFG
) & 0x7) + 4;
2495 dev_info(&pdev
->dev
, "hardware revision: %d @ 0x%x\n",
2496 (val
>> 4) & 0xf, res
->start
);
2498 plat_data
= pdev
->dev
.platform_data
;
2500 /* Count the number of logical channels in use */
2501 for (i
= 0; i
< plat_data
->dev_len
; i
++)
2502 if (plat_data
->dev_rx
[i
] != 0)
2505 for (i
= 0; i
< plat_data
->dev_len
; i
++)
2506 if (plat_data
->dev_tx
[i
] != 0)
2509 base
= kzalloc(ALIGN(sizeof(struct d40_base
), 4) +
2510 (num_phy_chans
+ num_log_chans
+ plat_data
->memcpy_len
) *
2511 sizeof(struct d40_chan
), GFP_KERNEL
);
2514 dev_err(&pdev
->dev
, "[%s] Out of memory\n", __func__
);
2518 base
->rev
= (val
>> 4) & 0xf;
2520 base
->num_phy_chans
= num_phy_chans
;
2521 base
->num_log_chans
= num_log_chans
;
2522 base
->phy_start
= res
->start
;
2523 base
->phy_size
= resource_size(res
);
2524 base
->virtbase
= virtbase
;
2525 base
->plat_data
= plat_data
;
2526 base
->dev
= &pdev
->dev
;
2527 base
->phy_chans
= ((void *)base
) + ALIGN(sizeof(struct d40_base
), 4);
2528 base
->log_chans
= &base
->phy_chans
[num_phy_chans
];
2530 base
->phy_res
= kzalloc(num_phy_chans
* sizeof(struct d40_phy_res
),
2535 base
->lookup_phy_chans
= kzalloc(num_phy_chans
*
2536 sizeof(struct d40_chan
*),
2538 if (!base
->lookup_phy_chans
)
2541 if (num_log_chans
+ plat_data
->memcpy_len
) {
2543 * The max number of logical channels are event lines for all
2544 * src devices and dst devices
2546 base
->lookup_log_chans
= kzalloc(plat_data
->dev_len
* 2 *
2547 sizeof(struct d40_chan
*),
2549 if (!base
->lookup_log_chans
)
2552 base
->lcla_pool
.alloc_map
= kzalloc(num_phy_chans
* sizeof(u32
),
2554 if (!base
->lcla_pool
.alloc_map
)
2557 base
->desc_slab
= kmem_cache_create(D40_NAME
, sizeof(struct d40_desc
),
2558 0, SLAB_HWCACHE_ALIGN
,
2560 if (base
->desc_slab
== NULL
)
2573 release_mem_region(res
->start
,
2574 resource_size(res
));
2579 kfree(base
->lcla_pool
.alloc_map
);
2580 kfree(base
->lookup_log_chans
);
2581 kfree(base
->lookup_phy_chans
);
2582 kfree(base
->phy_res
);
2589 static void __init
d40_hw_init(struct d40_base
*base
)
2592 static const struct d40_reg_val dma_init_reg
[] = {
2593 /* Clock every part of the DMA block from start */
2594 { .reg
= D40_DREG_GCC
, .val
= 0x0000ff01},
2596 /* Interrupts on all logical channels */
2597 { .reg
= D40_DREG_LCMIS0
, .val
= 0xFFFFFFFF},
2598 { .reg
= D40_DREG_LCMIS1
, .val
= 0xFFFFFFFF},
2599 { .reg
= D40_DREG_LCMIS2
, .val
= 0xFFFFFFFF},
2600 { .reg
= D40_DREG_LCMIS3
, .val
= 0xFFFFFFFF},
2601 { .reg
= D40_DREG_LCICR0
, .val
= 0xFFFFFFFF},
2602 { .reg
= D40_DREG_LCICR1
, .val
= 0xFFFFFFFF},
2603 { .reg
= D40_DREG_LCICR2
, .val
= 0xFFFFFFFF},
2604 { .reg
= D40_DREG_LCICR3
, .val
= 0xFFFFFFFF},
2605 { .reg
= D40_DREG_LCTIS0
, .val
= 0xFFFFFFFF},
2606 { .reg
= D40_DREG_LCTIS1
, .val
= 0xFFFFFFFF},
2607 { .reg
= D40_DREG_LCTIS2
, .val
= 0xFFFFFFFF},
2608 { .reg
= D40_DREG_LCTIS3
, .val
= 0xFFFFFFFF}
2611 u32 prmseo
[2] = {0, 0};
2612 u32 activeo
[2] = {0xFFFFFFFF, 0xFFFFFFFF};
2616 for (i
= 0; i
< ARRAY_SIZE(dma_init_reg
); i
++)
2617 writel(dma_init_reg
[i
].val
,
2618 base
->virtbase
+ dma_init_reg
[i
].reg
);
2620 /* Configure all our dma channels to default settings */
2621 for (i
= 0; i
< base
->num_phy_chans
; i
++) {
2623 activeo
[i
% 2] = activeo
[i
% 2] << 2;
2625 if (base
->phy_res
[base
->num_phy_chans
- i
- 1].allocated_src
2627 activeo
[i
% 2] |= 3;
2631 /* Enable interrupt # */
2632 pcmis
= (pcmis
<< 1) | 1;
2634 /* Clear interrupt # */
2635 pcicr
= (pcicr
<< 1) | 1;
2637 /* Set channel to physical mode */
2638 prmseo
[i
% 2] = prmseo
[i
% 2] << 2;
2643 writel(prmseo
[1], base
->virtbase
+ D40_DREG_PRMSE
);
2644 writel(prmseo
[0], base
->virtbase
+ D40_DREG_PRMSO
);
2645 writel(activeo
[1], base
->virtbase
+ D40_DREG_ACTIVE
);
2646 writel(activeo
[0], base
->virtbase
+ D40_DREG_ACTIVO
);
2648 /* Write which interrupt to enable */
2649 writel(pcmis
, base
->virtbase
+ D40_DREG_PCMIS
);
2651 /* Write which interrupt to clear */
2652 writel(pcicr
, base
->virtbase
+ D40_DREG_PCICR
);
2656 static int __init
d40_lcla_allocate(struct d40_base
*base
)
2658 unsigned long *page_list
;
2663 * This is somewhat ugly. We need 8192 bytes that are 18 bit aligned,
2664 * To full fill this hardware requirement without wasting 256 kb
2665 * we allocate pages until we get an aligned one.
2667 page_list
= kmalloc(sizeof(unsigned long) * MAX_LCLA_ALLOC_ATTEMPTS
,
2675 /* Calculating how many pages that are required */
2676 base
->lcla_pool
.pages
= SZ_1K
* base
->num_phy_chans
/ PAGE_SIZE
;
2678 for (i
= 0; i
< MAX_LCLA_ALLOC_ATTEMPTS
; i
++) {
2679 page_list
[i
] = __get_free_pages(GFP_KERNEL
,
2680 base
->lcla_pool
.pages
);
2681 if (!page_list
[i
]) {
2684 "[%s] Failed to allocate %d pages.\n",
2685 __func__
, base
->lcla_pool
.pages
);
2687 for (j
= 0; j
< i
; j
++)
2688 free_pages(page_list
[j
], base
->lcla_pool
.pages
);
2692 if ((virt_to_phys((void *)page_list
[i
]) &
2693 (LCLA_ALIGNMENT
- 1)) == 0)
2697 for (j
= 0; j
< i
; j
++)
2698 free_pages(page_list
[j
], base
->lcla_pool
.pages
);
2700 if (i
< MAX_LCLA_ALLOC_ATTEMPTS
) {
2701 base
->lcla_pool
.base
= (void *)page_list
[i
];
2703 /* After many attempts, no succees with finding the correct
2704 * alignment try with allocating a big buffer */
2706 "[%s] Failed to get %d pages @ 18 bit align.\n",
2707 __func__
, base
->lcla_pool
.pages
);
2708 base
->lcla_pool
.base_unaligned
= kmalloc(SZ_1K
*
2709 base
->num_phy_chans
+
2712 if (!base
->lcla_pool
.base_unaligned
) {
2717 base
->lcla_pool
.base
= PTR_ALIGN(base
->lcla_pool
.base_unaligned
,
2721 writel(virt_to_phys(base
->lcla_pool
.base
),
2722 base
->virtbase
+ D40_DREG_LCLA
);
2728 static int __init
d40_probe(struct platform_device
*pdev
)
2732 struct d40_base
*base
;
2733 struct resource
*res
= NULL
;
2734 int num_reserved_chans
;
2737 base
= d40_hw_detect_init(pdev
);
2742 num_reserved_chans
= d40_phy_res_init(base
);
2744 platform_set_drvdata(pdev
, base
);
2746 spin_lock_init(&base
->interrupt_lock
);
2747 spin_lock_init(&base
->execmd_lock
);
2749 /* Get IO for logical channel parameter address */
2750 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "lcpa");
2754 "[%s] No \"lcpa\" memory resource\n",
2758 base
->lcpa_size
= resource_size(res
);
2759 base
->phy_lcpa
= res
->start
;
2761 if (request_mem_region(res
->start
, resource_size(res
),
2762 D40_NAME
" I/O lcpa") == NULL
) {
2765 "[%s] Failed to request LCPA region 0x%x-0x%x\n",
2766 __func__
, res
->start
, res
->end
);
2770 /* We make use of ESRAM memory for this. */
2771 val
= readl(base
->virtbase
+ D40_DREG_LCPA
);
2772 if (res
->start
!= val
&& val
!= 0) {
2773 dev_warn(&pdev
->dev
,
2774 "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n",
2775 __func__
, val
, res
->start
);
2777 writel(res
->start
, base
->virtbase
+ D40_DREG_LCPA
);
2779 base
->lcpa_base
= ioremap(res
->start
, resource_size(res
));
2780 if (!base
->lcpa_base
) {
2783 "[%s] Failed to ioremap LCPA region\n",
2788 ret
= d40_lcla_allocate(base
);
2790 dev_err(&pdev
->dev
, "[%s] Failed to allocate LCLA area\n",
2795 spin_lock_init(&base
->lcla_pool
.lock
);
2797 base
->lcla_pool
.num_blocks
= base
->num_phy_chans
;
2799 base
->irq
= platform_get_irq(pdev
, 0);
2801 ret
= request_irq(base
->irq
, d40_handle_interrupt
, 0, D40_NAME
, base
);
2804 dev_err(&pdev
->dev
, "[%s] No IRQ defined\n", __func__
);
2808 err
= d40_dmaengine_init(base
, num_reserved_chans
);
2814 dev_info(base
->dev
, "initialized\n");
2819 if (base
->desc_slab
)
2820 kmem_cache_destroy(base
->desc_slab
);
2822 iounmap(base
->virtbase
);
2823 if (!base
->lcla_pool
.base_unaligned
&& base
->lcla_pool
.base
)
2824 free_pages((unsigned long)base
->lcla_pool
.base
,
2825 base
->lcla_pool
.pages
);
2826 if (base
->lcla_pool
.base_unaligned
)
2827 kfree(base
->lcla_pool
.base_unaligned
);
2829 release_mem_region(base
->phy_lcpa
,
2831 if (base
->phy_start
)
2832 release_mem_region(base
->phy_start
,
2835 clk_disable(base
->clk
);
2839 kfree(base
->lcla_pool
.alloc_map
);
2840 kfree(base
->lookup_log_chans
);
2841 kfree(base
->lookup_phy_chans
);
2842 kfree(base
->phy_res
);
2846 dev_err(&pdev
->dev
, "[%s] probe failed\n", __func__
);
2850 static struct platform_driver d40_driver
= {
2852 .owner
= THIS_MODULE
,
2857 int __init
stedma40_init(void)
2859 return platform_driver_probe(&d40_driver
, d40_probe
);
2861 arch_initcall(stedma40_init
);