2 * Copyright (C) Ericsson AB 2007-2008
3 * Copyright (C) ST-Ericsson SA 2008-2010
4 * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
5 * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
6 * License terms: GNU General Public License (GPL) version 2
9 #include <linux/dma-mapping.h>
10 #include <linux/kernel.h>
11 #include <linux/slab.h>
12 #include <linux/dmaengine.h>
13 #include <linux/platform_device.h>
14 #include <linux/clk.h>
15 #include <linux/delay.h>
16 #include <linux/err.h>
18 #include <plat/ste_dma40.h>
20 #include "ste_dma40_ll.h"
22 #define D40_NAME "dma40"
24 #define D40_PHY_CHAN -1
26 /* For masking out/in 2 bit channel positions */
27 #define D40_CHAN_POS(chan) (2 * (chan / 2))
28 #define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
30 /* Maximum iterations taken before giving up suspending a channel */
31 #define D40_SUSPEND_MAX_IT 500
33 /* Hardware requirement on LCLA alignment */
34 #define LCLA_ALIGNMENT 0x40000
36 /* Max number of links per event group */
37 #define D40_LCLA_LINK_PER_EVENT_GRP 128
38 #define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP
40 /* Attempts before giving up to trying to get pages that are aligned */
41 #define MAX_LCLA_ALLOC_ATTEMPTS 256
43 /* Bit markings for allocation map */
44 #define D40_ALLOC_FREE (1 << 31)
45 #define D40_ALLOC_PHY (1 << 30)
46 #define D40_ALLOC_LOG_FREE 0
48 /* Hardware designer of the block */
49 #define D40_HW_DESIGNER 0x8
52 * enum 40_command - The different commands and/or statuses.
54 * @D40_DMA_STOP: DMA channel command STOP or status STOPPED,
55 * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN.
56 * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible.
57 * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED.
62 D40_DMA_SUSPEND_REQ
= 2,
67 * struct d40_lli_pool - Structure for keeping LLIs in memory
69 * @base: Pointer to memory area when the pre_alloc_lli's are not large
70 * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
71 * pre_alloc_lli is used.
72 * @dma_addr: DMA address, if mapped
73 * @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
74 * @pre_alloc_lli: Pre allocated area for the most common case of transfers,
75 * one buffer to one buffer.
81 /* Space for dst and src, plus an extra for padding */
82 u8 pre_alloc_lli
[3 * sizeof(struct d40_phy_lli
)];
86 * struct d40_desc - A descriptor is one DMA job.
88 * @lli_phy: LLI settings for physical channel. Both src and dst=
89 * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if
91 * @lli_log: Same as above but for logical channels.
92 * @lli_pool: The pool with two entries pre-allocated.
93 * @lli_len: Number of llis of current descriptor.
94 * @lli_current: Number of transferred llis.
95 * @lcla_alloc: Number of LCLA entries allocated.
96 * @txd: DMA engine struct. Used for among other things for communication
99 * @is_in_client_list: true if the client owns this descriptor.
102 * This descriptor is used for both logical and physical transfers.
106 struct d40_phy_lli_bidir lli_phy
;
108 struct d40_log_lli_bidir lli_log
;
110 struct d40_lli_pool lli_pool
;
115 struct dma_async_tx_descriptor txd
;
116 struct list_head node
;
118 bool is_in_client_list
;
123 * struct d40_lcla_pool - LCLA pool settings and data.
125 * @base: The virtual address of LCLA. 18 bit aligned.
126 * @base_unaligned: The orignal kmalloc pointer, if kmalloc is used.
127 * This pointer is only there for clean-up on error.
128 * @pages: The number of pages needed for all physical channels.
129 * Only used later for clean-up on error
130 * @lock: Lock to protect the content in this struct.
131 * @alloc_map: big map over which LCLA entry is own by which job.
133 struct d40_lcla_pool
{
136 void *base_unaligned
;
139 struct d40_desc
**alloc_map
;
143 * struct d40_phy_res - struct for handling eventlines mapped to physical
146 * @lock: A lock protection this entity.
147 * @num: The physical channel number of this entity.
148 * @allocated_src: Bit mapped to show which src event line's are mapped to
149 * this physical channel. Can also be free or physically allocated.
150 * @allocated_dst: Same as for src but is dst.
151 * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
164 * struct d40_chan - Struct that describes a channel.
166 * @lock: A spinlock to protect this struct.
167 * @log_num: The logical number, if any of this channel.
168 * @completed: Starts with 1, after first interrupt it is set to dma engine's
170 * @pending_tx: The number of pending transfers. Used between interrupt handler
172 * @busy: Set to true when transfer is ongoing on this channel.
173 * @phy_chan: Pointer to physical channel which this instance runs on. If this
174 * point is NULL, then the channel is not allocated.
175 * @chan: DMA engine handle.
176 * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
177 * transfer and call client callback.
178 * @client: Cliented owned descriptor list.
179 * @active: Active descriptor.
180 * @queue: Queued jobs.
181 * @dma_cfg: The client configuration of this dma channel.
182 * @configured: whether the dma_cfg configuration is valid
183 * @base: Pointer to the device instance struct.
184 * @src_def_cfg: Default cfg register setting for src.
185 * @dst_def_cfg: Default cfg register setting for dst.
186 * @log_def: Default logical channel settings.
187 * @lcla: Space for one dst src pair for logical channel transfers.
188 * @lcpa: Pointer to dst and src lcpa settings.
190 * This struct can either "be" a logical or a physical channel.
195 /* ID of the most recent completed transfer */
199 struct d40_phy_res
*phy_chan
;
200 struct dma_chan chan
;
201 struct tasklet_struct tasklet
;
202 struct list_head client
;
203 struct list_head active
;
204 struct list_head queue
;
205 struct stedma40_chan_cfg dma_cfg
;
207 struct d40_base
*base
;
208 /* Default register configurations */
211 struct d40_def_lcsp log_def
;
212 struct d40_log_lli_full
*lcpa
;
213 /* Runtime reconfiguration */
214 dma_addr_t runtime_addr
;
215 enum dma_data_direction runtime_direction
;
219 * struct d40_base - The big global struct, one for each probe'd instance.
221 * @interrupt_lock: Lock used to make sure one interrupt is handle a time.
222 * @execmd_lock: Lock for execute command usage since several channels share
223 * the same physical register.
224 * @dev: The device structure.
225 * @virtbase: The virtual base address of the DMA's register.
226 * @rev: silicon revision detected.
227 * @clk: Pointer to the DMA clock structure.
228 * @phy_start: Physical memory start of the DMA registers.
229 * @phy_size: Size of the DMA register map.
230 * @irq: The IRQ number.
231 * @num_phy_chans: The number of physical channels. Read from HW. This
232 * is the number of available channels for this driver, not counting "Secure
233 * mode" allocated physical channels.
234 * @num_log_chans: The number of logical channels. Calculated from
236 * @dma_both: dma_device channels that can do both memcpy and slave transfers.
237 * @dma_slave: dma_device channels that can do only do slave transfers.
238 * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
239 * @log_chans: Room for all possible logical channels in system.
240 * @lookup_log_chans: Used to map interrupt number to logical channel. Points
241 * to log_chans entries.
242 * @lookup_phy_chans: Used to map interrupt number to physical channel. Points
243 * to phy_chans entries.
244 * @plat_data: Pointer to provided platform_data which is the driver
246 * @phy_res: Vector containing all physical channels.
247 * @lcla_pool: lcla pool settings and data.
248 * @lcpa_base: The virtual mapped address of LCPA.
249 * @phy_lcpa: The physical address of the LCPA.
250 * @lcpa_size: The size of the LCPA area.
251 * @desc_slab: cache for descriptors.
254 spinlock_t interrupt_lock
;
255 spinlock_t execmd_lock
;
257 void __iomem
*virtbase
;
260 phys_addr_t phy_start
;
261 resource_size_t phy_size
;
265 struct dma_device dma_both
;
266 struct dma_device dma_slave
;
267 struct dma_device dma_memcpy
;
268 struct d40_chan
*phy_chans
;
269 struct d40_chan
*log_chans
;
270 struct d40_chan
**lookup_log_chans
;
271 struct d40_chan
**lookup_phy_chans
;
272 struct stedma40_platform_data
*plat_data
;
273 /* Physical half channels */
274 struct d40_phy_res
*phy_res
;
275 struct d40_lcla_pool lcla_pool
;
278 resource_size_t lcpa_size
;
279 struct kmem_cache
*desc_slab
;
283 * struct d40_interrupt_lookup - lookup table for interrupt handler
285 * @src: Interrupt mask register.
286 * @clr: Interrupt clear register.
287 * @is_error: true if this is an error interrupt.
288 * @offset: start delta in the lookup_log_chans in d40_base. If equals to
289 * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
291 struct d40_interrupt_lookup
{
299 * struct d40_reg_val - simple lookup struct
301 * @reg: The register.
302 * @val: The value that belongs to the register in reg.
309 static struct device
*chan2dev(struct d40_chan
*d40c
)
311 return &d40c
->chan
.dev
->device
;
314 static bool chan_is_physical(struct d40_chan
*chan
)
316 return chan
->log_num
== D40_PHY_CHAN
;
319 static bool chan_is_logical(struct d40_chan
*chan
)
321 return !chan_is_physical(chan
);
324 static void __iomem
*chan_base(struct d40_chan
*chan
)
326 return chan
->base
->virtbase
+ D40_DREG_PCBASE
+
327 chan
->phy_chan
->num
* D40_DREG_PCDELTA
;
330 #define d40_err(dev, format, arg...) \
331 dev_err(dev, "[%s] " format, __func__, ## arg)
333 #define chan_err(d40c, format, arg...) \
334 d40_err(chan2dev(d40c), format, ## arg)
336 static int d40_pool_lli_alloc(struct d40_chan
*d40c
, struct d40_desc
*d40d
,
339 bool is_log
= chan_is_logical(d40c
);
344 align
= sizeof(struct d40_log_lli
);
346 align
= sizeof(struct d40_phy_lli
);
349 base
= d40d
->lli_pool
.pre_alloc_lli
;
350 d40d
->lli_pool
.size
= sizeof(d40d
->lli_pool
.pre_alloc_lli
);
351 d40d
->lli_pool
.base
= NULL
;
353 d40d
->lli_pool
.size
= lli_len
* 2 * align
;
355 base
= kmalloc(d40d
->lli_pool
.size
+ align
, GFP_NOWAIT
);
356 d40d
->lli_pool
.base
= base
;
358 if (d40d
->lli_pool
.base
== NULL
)
363 d40d
->lli_log
.src
= PTR_ALIGN(base
, align
);
364 d40d
->lli_log
.dst
= d40d
->lli_log
.src
+ lli_len
;
366 d40d
->lli_pool
.dma_addr
= 0;
368 d40d
->lli_phy
.src
= PTR_ALIGN(base
, align
);
369 d40d
->lli_phy
.dst
= d40d
->lli_phy
.src
+ lli_len
;
371 d40d
->lli_pool
.dma_addr
= dma_map_single(d40c
->base
->dev
,
376 if (dma_mapping_error(d40c
->base
->dev
,
377 d40d
->lli_pool
.dma_addr
)) {
378 kfree(d40d
->lli_pool
.base
);
379 d40d
->lli_pool
.base
= NULL
;
380 d40d
->lli_pool
.dma_addr
= 0;
388 static void d40_pool_lli_free(struct d40_chan
*d40c
, struct d40_desc
*d40d
)
390 if (d40d
->lli_pool
.dma_addr
)
391 dma_unmap_single(d40c
->base
->dev
, d40d
->lli_pool
.dma_addr
,
392 d40d
->lli_pool
.size
, DMA_TO_DEVICE
);
394 kfree(d40d
->lli_pool
.base
);
395 d40d
->lli_pool
.base
= NULL
;
396 d40d
->lli_pool
.size
= 0;
397 d40d
->lli_log
.src
= NULL
;
398 d40d
->lli_log
.dst
= NULL
;
399 d40d
->lli_phy
.src
= NULL
;
400 d40d
->lli_phy
.dst
= NULL
;
403 static int d40_lcla_alloc_one(struct d40_chan
*d40c
,
404 struct d40_desc
*d40d
)
411 spin_lock_irqsave(&d40c
->base
->lcla_pool
.lock
, flags
);
413 p
= d40c
->phy_chan
->num
* D40_LCLA_LINK_PER_EVENT_GRP
;
416 * Allocate both src and dst at the same time, therefore the half
417 * start on 1 since 0 can't be used since zero is used as end marker.
419 for (i
= 1 ; i
< D40_LCLA_LINK_PER_EVENT_GRP
/ 2; i
++) {
420 if (!d40c
->base
->lcla_pool
.alloc_map
[p
+ i
]) {
421 d40c
->base
->lcla_pool
.alloc_map
[p
+ i
] = d40d
;
428 spin_unlock_irqrestore(&d40c
->base
->lcla_pool
.lock
, flags
);
433 static int d40_lcla_free_all(struct d40_chan
*d40c
,
434 struct d40_desc
*d40d
)
440 if (chan_is_physical(d40c
))
443 spin_lock_irqsave(&d40c
->base
->lcla_pool
.lock
, flags
);
445 for (i
= 1 ; i
< D40_LCLA_LINK_PER_EVENT_GRP
/ 2; i
++) {
446 if (d40c
->base
->lcla_pool
.alloc_map
[d40c
->phy_chan
->num
*
447 D40_LCLA_LINK_PER_EVENT_GRP
+ i
] == d40d
) {
448 d40c
->base
->lcla_pool
.alloc_map
[d40c
->phy_chan
->num
*
449 D40_LCLA_LINK_PER_EVENT_GRP
+ i
] = NULL
;
451 if (d40d
->lcla_alloc
== 0) {
458 spin_unlock_irqrestore(&d40c
->base
->lcla_pool
.lock
, flags
);
464 static void d40_desc_remove(struct d40_desc
*d40d
)
466 list_del(&d40d
->node
);
469 static struct d40_desc
*d40_desc_get(struct d40_chan
*d40c
)
471 struct d40_desc
*desc
= NULL
;
473 if (!list_empty(&d40c
->client
)) {
477 list_for_each_entry_safe(d
, _d
, &d40c
->client
, node
)
478 if (async_tx_test_ack(&d
->txd
)) {
479 d40_pool_lli_free(d40c
, d
);
482 memset(desc
, 0, sizeof(*desc
));
488 desc
= kmem_cache_zalloc(d40c
->base
->desc_slab
, GFP_NOWAIT
);
491 INIT_LIST_HEAD(&desc
->node
);
496 static void d40_desc_free(struct d40_chan
*d40c
, struct d40_desc
*d40d
)
499 d40_pool_lli_free(d40c
, d40d
);
500 d40_lcla_free_all(d40c
, d40d
);
501 kmem_cache_free(d40c
->base
->desc_slab
, d40d
);
504 static void d40_desc_submit(struct d40_chan
*d40c
, struct d40_desc
*desc
)
506 list_add_tail(&desc
->node
, &d40c
->active
);
509 static void d40_phy_lli_load(struct d40_chan
*chan
, struct d40_desc
*desc
)
511 struct d40_phy_lli
*lli_dst
= desc
->lli_phy
.dst
;
512 struct d40_phy_lli
*lli_src
= desc
->lli_phy
.src
;
513 void __iomem
*base
= chan_base(chan
);
515 writel(lli_src
->reg_cfg
, base
+ D40_CHAN_REG_SSCFG
);
516 writel(lli_src
->reg_elt
, base
+ D40_CHAN_REG_SSELT
);
517 writel(lli_src
->reg_ptr
, base
+ D40_CHAN_REG_SSPTR
);
518 writel(lli_src
->reg_lnk
, base
+ D40_CHAN_REG_SSLNK
);
520 writel(lli_dst
->reg_cfg
, base
+ D40_CHAN_REG_SDCFG
);
521 writel(lli_dst
->reg_elt
, base
+ D40_CHAN_REG_SDELT
);
522 writel(lli_dst
->reg_ptr
, base
+ D40_CHAN_REG_SDPTR
);
523 writel(lli_dst
->reg_lnk
, base
+ D40_CHAN_REG_SDLNK
);
526 static void d40_log_lli_to_lcxa(struct d40_chan
*chan
, struct d40_desc
*desc
)
528 struct d40_lcla_pool
*pool
= &chan
->base
->lcla_pool
;
529 struct d40_log_lli_bidir
*lli
= &desc
->lli_log
;
530 int lli_current
= desc
->lli_current
;
531 int lli_len
= desc
->lli_len
;
532 bool cyclic
= desc
->cyclic
;
533 int curr_lcla
= -EINVAL
;
538 * We may have partially running cyclic transfers, in case we did't get
539 * enough LCLA entries.
541 linkback
= cyclic
&& lli_current
== 0;
544 * For linkback, we need one LCLA even with only one link, because we
545 * can't link back to the one in LCPA space
547 if (linkback
|| (lli_len
- lli_current
> 1)) {
548 curr_lcla
= d40_lcla_alloc_one(chan
, desc
);
549 first_lcla
= curr_lcla
;
553 * For linkback, we normally load the LCPA in the loop since we need to
554 * link it to the second LCLA and not the first. However, if we
555 * couldn't even get a first LCLA, then we have to run in LCPA and
558 if (!linkback
|| curr_lcla
== -EINVAL
) {
559 unsigned int flags
= 0;
561 if (curr_lcla
== -EINVAL
)
562 flags
|= LLI_TERM_INT
;
564 d40_log_lli_lcpa_write(chan
->lcpa
,
565 &lli
->dst
[lli_current
],
566 &lli
->src
[lli_current
],
575 for (; lli_current
< lli_len
; lli_current
++) {
576 unsigned int lcla_offset
= chan
->phy_chan
->num
* 1024 +
578 struct d40_log_lli
*lcla
= pool
->base
+ lcla_offset
;
579 unsigned int flags
= 0;
582 if (lli_current
+ 1 < lli_len
)
583 next_lcla
= d40_lcla_alloc_one(chan
, desc
);
585 next_lcla
= linkback
? first_lcla
: -EINVAL
;
587 if (cyclic
|| next_lcla
== -EINVAL
)
588 flags
|= LLI_TERM_INT
;
590 if (linkback
&& curr_lcla
== first_lcla
) {
591 /* First link goes in both LCPA and LCLA */
592 d40_log_lli_lcpa_write(chan
->lcpa
,
593 &lli
->dst
[lli_current
],
594 &lli
->src
[lli_current
],
599 * One unused LCLA in the cyclic case if the very first
602 d40_log_lli_lcla_write(lcla
,
603 &lli
->dst
[lli_current
],
604 &lli
->src
[lli_current
],
607 dma_sync_single_range_for_device(chan
->base
->dev
,
608 pool
->dma_addr
, lcla_offset
,
609 2 * sizeof(struct d40_log_lli
),
612 curr_lcla
= next_lcla
;
614 if (curr_lcla
== -EINVAL
|| curr_lcla
== first_lcla
) {
621 desc
->lli_current
= lli_current
;
624 static void d40_desc_load(struct d40_chan
*d40c
, struct d40_desc
*d40d
)
626 if (chan_is_physical(d40c
)) {
627 d40_phy_lli_load(d40c
, d40d
);
628 d40d
->lli_current
= d40d
->lli_len
;
630 d40_log_lli_to_lcxa(d40c
, d40d
);
633 static struct d40_desc
*d40_first_active_get(struct d40_chan
*d40c
)
637 if (list_empty(&d40c
->active
))
640 d
= list_first_entry(&d40c
->active
,
646 static void d40_desc_queue(struct d40_chan
*d40c
, struct d40_desc
*desc
)
648 list_add_tail(&desc
->node
, &d40c
->queue
);
651 static struct d40_desc
*d40_first_queued(struct d40_chan
*d40c
)
655 if (list_empty(&d40c
->queue
))
658 d
= list_first_entry(&d40c
->queue
,
664 static int d40_psize_2_burst_size(bool is_log
, int psize
)
667 if (psize
== STEDMA40_PSIZE_LOG_1
)
670 if (psize
== STEDMA40_PSIZE_PHY_1
)
678 * The dma only supports transmitting packages up to
679 * STEDMA40_MAX_SEG_SIZE << data_width. Calculate the total number of
680 * dma elements required to send the entire sg list
682 static int d40_size_2_dmalen(int size
, u32 data_width1
, u32 data_width2
)
685 u32 max_w
= max(data_width1
, data_width2
);
686 u32 min_w
= min(data_width1
, data_width2
);
687 u32 seg_max
= ALIGN(STEDMA40_MAX_SEG_SIZE
<< min_w
, 1 << max_w
);
689 if (seg_max
> STEDMA40_MAX_SEG_SIZE
)
690 seg_max
-= (1 << max_w
);
692 if (!IS_ALIGNED(size
, 1 << max_w
))
698 dmalen
= size
/ seg_max
;
699 if (dmalen
* seg_max
< size
)
705 static int d40_sg_2_dmalen(struct scatterlist
*sgl
, int sg_len
,
706 u32 data_width1
, u32 data_width2
)
708 struct scatterlist
*sg
;
713 for_each_sg(sgl
, sg
, sg_len
, i
) {
714 ret
= d40_size_2_dmalen(sg_dma_len(sg
),
715 data_width1
, data_width2
);
723 /* Support functions for logical channels */
725 static int d40_channel_execute_command(struct d40_chan
*d40c
,
726 enum d40_command command
)
730 void __iomem
*active_reg
;
735 spin_lock_irqsave(&d40c
->base
->execmd_lock
, flags
);
737 if (d40c
->phy_chan
->num
% 2 == 0)
738 active_reg
= d40c
->base
->virtbase
+ D40_DREG_ACTIVE
;
740 active_reg
= d40c
->base
->virtbase
+ D40_DREG_ACTIVO
;
742 if (command
== D40_DMA_SUSPEND_REQ
) {
743 status
= (readl(active_reg
) &
744 D40_CHAN_POS_MASK(d40c
->phy_chan
->num
)) >>
745 D40_CHAN_POS(d40c
->phy_chan
->num
);
747 if (status
== D40_DMA_SUSPENDED
|| status
== D40_DMA_STOP
)
751 wmask
= 0xffffffff & ~(D40_CHAN_POS_MASK(d40c
->phy_chan
->num
));
752 writel(wmask
| (command
<< D40_CHAN_POS(d40c
->phy_chan
->num
)),
755 if (command
== D40_DMA_SUSPEND_REQ
) {
757 for (i
= 0 ; i
< D40_SUSPEND_MAX_IT
; i
++) {
758 status
= (readl(active_reg
) &
759 D40_CHAN_POS_MASK(d40c
->phy_chan
->num
)) >>
760 D40_CHAN_POS(d40c
->phy_chan
->num
);
764 * Reduce the number of bus accesses while
765 * waiting for the DMA to suspend.
769 if (status
== D40_DMA_STOP
||
770 status
== D40_DMA_SUSPENDED
)
774 if (i
== D40_SUSPEND_MAX_IT
) {
776 "unable to suspend the chl %d (log: %d) status %x\n",
777 d40c
->phy_chan
->num
, d40c
->log_num
,
785 spin_unlock_irqrestore(&d40c
->base
->execmd_lock
, flags
);
789 static void d40_term_all(struct d40_chan
*d40c
)
791 struct d40_desc
*d40d
;
793 /* Release active descriptors */
794 while ((d40d
= d40_first_active_get(d40c
))) {
795 d40_desc_remove(d40d
);
796 d40_desc_free(d40c
, d40d
);
799 /* Release queued descriptors waiting for transfer */
800 while ((d40d
= d40_first_queued(d40c
))) {
801 d40_desc_remove(d40d
);
802 d40_desc_free(d40c
, d40d
);
806 d40c
->pending_tx
= 0;
810 static void __d40_config_set_event(struct d40_chan
*d40c
, bool enable
,
813 void __iomem
*addr
= chan_base(d40c
) + reg
;
817 writel((D40_DEACTIVATE_EVENTLINE
<< D40_EVENTLINE_POS(event
))
818 | ~D40_EVENTLINE_MASK(event
), addr
);
823 * The hardware sometimes doesn't register the enable when src and dst
824 * event lines are active on the same logical channel. Retry to ensure
825 * it does. Usually only one retry is sufficient.
829 writel((D40_ACTIVATE_EVENTLINE
<< D40_EVENTLINE_POS(event
))
830 | ~D40_EVENTLINE_MASK(event
), addr
);
832 if (readl(addr
) & D40_EVENTLINE_MASK(event
))
837 dev_dbg(chan2dev(d40c
),
838 "[%s] workaround enable S%cLNK (%d tries)\n",
839 __func__
, reg
== D40_CHAN_REG_SSLNK
? 'S' : 'D',
845 static void d40_config_set_event(struct d40_chan
*d40c
, bool do_enable
)
849 spin_lock_irqsave(&d40c
->phy_chan
->lock
, flags
);
851 /* Enable event line connected to device (or memcpy) */
852 if ((d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_MEM
) ||
853 (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_PERIPH
)) {
854 u32 event
= D40_TYPE_TO_EVENT(d40c
->dma_cfg
.src_dev_type
);
856 __d40_config_set_event(d40c
, do_enable
, event
,
860 if (d40c
->dma_cfg
.dir
!= STEDMA40_PERIPH_TO_MEM
) {
861 u32 event
= D40_TYPE_TO_EVENT(d40c
->dma_cfg
.dst_dev_type
);
863 __d40_config_set_event(d40c
, do_enable
, event
,
867 spin_unlock_irqrestore(&d40c
->phy_chan
->lock
, flags
);
870 static u32
d40_chan_has_events(struct d40_chan
*d40c
)
872 void __iomem
*chanbase
= chan_base(d40c
);
875 val
= readl(chanbase
+ D40_CHAN_REG_SSLNK
);
876 val
|= readl(chanbase
+ D40_CHAN_REG_SDLNK
);
881 static u32
d40_get_prmo(struct d40_chan
*d40c
)
883 static const unsigned int phy_map
[] = {
884 [STEDMA40_PCHAN_BASIC_MODE
]
885 = D40_DREG_PRMO_PCHAN_BASIC
,
886 [STEDMA40_PCHAN_MODULO_MODE
]
887 = D40_DREG_PRMO_PCHAN_MODULO
,
888 [STEDMA40_PCHAN_DOUBLE_DST_MODE
]
889 = D40_DREG_PRMO_PCHAN_DOUBLE_DST
,
891 static const unsigned int log_map
[] = {
892 [STEDMA40_LCHAN_SRC_PHY_DST_LOG
]
893 = D40_DREG_PRMO_LCHAN_SRC_PHY_DST_LOG
,
894 [STEDMA40_LCHAN_SRC_LOG_DST_PHY
]
895 = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_PHY
,
896 [STEDMA40_LCHAN_SRC_LOG_DST_LOG
]
897 = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG
,
900 if (chan_is_physical(d40c
))
901 return phy_map
[d40c
->dma_cfg
.mode_opt
];
903 return log_map
[d40c
->dma_cfg
.mode_opt
];
906 static void d40_config_write(struct d40_chan
*d40c
)
911 /* Odd addresses are even addresses + 4 */
912 addr_base
= (d40c
->phy_chan
->num
% 2) * 4;
913 /* Setup channel mode to logical or physical */
914 var
= ((u32
)(chan_is_logical(d40c
)) + 1) <<
915 D40_CHAN_POS(d40c
->phy_chan
->num
);
916 writel(var
, d40c
->base
->virtbase
+ D40_DREG_PRMSE
+ addr_base
);
918 /* Setup operational mode option register */
919 var
= d40_get_prmo(d40c
) << D40_CHAN_POS(d40c
->phy_chan
->num
);
921 writel(var
, d40c
->base
->virtbase
+ D40_DREG_PRMOE
+ addr_base
);
923 if (chan_is_logical(d40c
)) {
924 int lidx
= (d40c
->phy_chan
->num
<< D40_SREG_ELEM_LOG_LIDX_POS
)
925 & D40_SREG_ELEM_LOG_LIDX_MASK
;
926 void __iomem
*chanbase
= chan_base(d40c
);
928 /* Set default config for CFG reg */
929 writel(d40c
->src_def_cfg
, chanbase
+ D40_CHAN_REG_SSCFG
);
930 writel(d40c
->dst_def_cfg
, chanbase
+ D40_CHAN_REG_SDCFG
);
932 /* Set LIDX for lcla */
933 writel(lidx
, chanbase
+ D40_CHAN_REG_SSELT
);
934 writel(lidx
, chanbase
+ D40_CHAN_REG_SDELT
);
938 static u32
d40_residue(struct d40_chan
*d40c
)
942 if (chan_is_logical(d40c
))
943 num_elt
= (readl(&d40c
->lcpa
->lcsp2
) & D40_MEM_LCSP2_ECNT_MASK
)
944 >> D40_MEM_LCSP2_ECNT_POS
;
946 u32 val
= readl(chan_base(d40c
) + D40_CHAN_REG_SDELT
);
947 num_elt
= (val
& D40_SREG_ELEM_PHY_ECNT_MASK
)
948 >> D40_SREG_ELEM_PHY_ECNT_POS
;
951 return num_elt
* (1 << d40c
->dma_cfg
.dst_info
.data_width
);
954 static bool d40_tx_is_linked(struct d40_chan
*d40c
)
958 if (chan_is_logical(d40c
))
959 is_link
= readl(&d40c
->lcpa
->lcsp3
) & D40_MEM_LCSP3_DLOS_MASK
;
961 is_link
= readl(chan_base(d40c
) + D40_CHAN_REG_SDLNK
)
962 & D40_SREG_LNK_PHYS_LNK_MASK
;
967 static int d40_pause(struct d40_chan
*d40c
)
975 spin_lock_irqsave(&d40c
->lock
, flags
);
977 res
= d40_channel_execute_command(d40c
, D40_DMA_SUSPEND_REQ
);
979 if (chan_is_logical(d40c
)) {
980 d40_config_set_event(d40c
, false);
981 /* Resume the other logical channels if any */
982 if (d40_chan_has_events(d40c
))
983 res
= d40_channel_execute_command(d40c
,
988 spin_unlock_irqrestore(&d40c
->lock
, flags
);
992 static int d40_resume(struct d40_chan
*d40c
)
1000 spin_lock_irqsave(&d40c
->lock
, flags
);
1002 if (d40c
->base
->rev
== 0)
1003 if (chan_is_logical(d40c
)) {
1004 res
= d40_channel_execute_command(d40c
,
1005 D40_DMA_SUSPEND_REQ
);
1009 /* If bytes left to transfer or linked tx resume job */
1010 if (d40_residue(d40c
) || d40_tx_is_linked(d40c
)) {
1012 if (chan_is_logical(d40c
))
1013 d40_config_set_event(d40c
, true);
1015 res
= d40_channel_execute_command(d40c
, D40_DMA_RUN
);
1019 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1023 static int d40_terminate_all(struct d40_chan
*chan
)
1025 unsigned long flags
;
1028 ret
= d40_pause(chan
);
1029 if (!ret
&& chan_is_physical(chan
))
1030 ret
= d40_channel_execute_command(chan
, D40_DMA_STOP
);
1032 spin_lock_irqsave(&chan
->lock
, flags
);
1034 spin_unlock_irqrestore(&chan
->lock
, flags
);
1039 static dma_cookie_t
d40_tx_submit(struct dma_async_tx_descriptor
*tx
)
1041 struct d40_chan
*d40c
= container_of(tx
->chan
,
1044 struct d40_desc
*d40d
= container_of(tx
, struct d40_desc
, txd
);
1045 unsigned long flags
;
1047 spin_lock_irqsave(&d40c
->lock
, flags
);
1049 d40c
->chan
.cookie
++;
1051 if (d40c
->chan
.cookie
< 0)
1052 d40c
->chan
.cookie
= 1;
1054 d40d
->txd
.cookie
= d40c
->chan
.cookie
;
1056 d40_desc_queue(d40c
, d40d
);
1058 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1063 static int d40_start(struct d40_chan
*d40c
)
1065 if (d40c
->base
->rev
== 0) {
1068 if (chan_is_logical(d40c
)) {
1069 err
= d40_channel_execute_command(d40c
,
1070 D40_DMA_SUSPEND_REQ
);
1076 if (chan_is_logical(d40c
))
1077 d40_config_set_event(d40c
, true);
1079 return d40_channel_execute_command(d40c
, D40_DMA_RUN
);
1082 static struct d40_desc
*d40_queue_start(struct d40_chan
*d40c
)
1084 struct d40_desc
*d40d
;
1087 /* Start queued jobs, if any */
1088 d40d
= d40_first_queued(d40c
);
1093 /* Remove from queue */
1094 d40_desc_remove(d40d
);
1096 /* Add to active queue */
1097 d40_desc_submit(d40c
, d40d
);
1099 /* Initiate DMA job */
1100 d40_desc_load(d40c
, d40d
);
1103 err
= d40_start(d40c
);
1112 /* called from interrupt context */
1113 static void dma_tc_handle(struct d40_chan
*d40c
)
1115 struct d40_desc
*d40d
;
1117 /* Get first active entry from list */
1118 d40d
= d40_first_active_get(d40c
);
1125 * If this was a paritially loaded list, we need to reloaded
1126 * it, and only when the list is completed. We need to check
1127 * for done because the interrupt will hit for every link, and
1128 * not just the last one.
1130 if (d40d
->lli_current
< d40d
->lli_len
1131 && !d40_tx_is_linked(d40c
)
1132 && !d40_residue(d40c
)) {
1133 d40_lcla_free_all(d40c
, d40d
);
1134 d40_desc_load(d40c
, d40d
);
1135 (void) d40_start(d40c
);
1137 if (d40d
->lli_current
== d40d
->lli_len
)
1138 d40d
->lli_current
= 0;
1141 d40_lcla_free_all(d40c
, d40d
);
1143 if (d40d
->lli_current
< d40d
->lli_len
) {
1144 d40_desc_load(d40c
, d40d
);
1146 (void) d40_start(d40c
);
1150 if (d40_queue_start(d40c
) == NULL
)
1155 tasklet_schedule(&d40c
->tasklet
);
1159 static void dma_tasklet(unsigned long data
)
1161 struct d40_chan
*d40c
= (struct d40_chan
*) data
;
1162 struct d40_desc
*d40d
;
1163 unsigned long flags
;
1164 dma_async_tx_callback callback
;
1165 void *callback_param
;
1167 spin_lock_irqsave(&d40c
->lock
, flags
);
1169 /* Get first active entry from list */
1170 d40d
= d40_first_active_get(d40c
);
1175 d40c
->completed
= d40d
->txd
.cookie
;
1178 * If terminating a channel pending_tx is set to zero.
1179 * This prevents any finished active jobs to return to the client.
1181 if (d40c
->pending_tx
== 0) {
1182 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1186 /* Callback to client */
1187 callback
= d40d
->txd
.callback
;
1188 callback_param
= d40d
->txd
.callback_param
;
1190 if (!d40d
->cyclic
) {
1191 if (async_tx_test_ack(&d40d
->txd
)) {
1192 d40_pool_lli_free(d40c
, d40d
);
1193 d40_desc_remove(d40d
);
1194 d40_desc_free(d40c
, d40d
);
1196 if (!d40d
->is_in_client_list
) {
1197 d40_desc_remove(d40d
);
1198 d40_lcla_free_all(d40c
, d40d
);
1199 list_add_tail(&d40d
->node
, &d40c
->client
);
1200 d40d
->is_in_client_list
= true;
1207 if (d40c
->pending_tx
)
1208 tasklet_schedule(&d40c
->tasklet
);
1210 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1212 if (callback
&& (d40d
->txd
.flags
& DMA_PREP_INTERRUPT
))
1213 callback(callback_param
);
1218 /* Rescue manoeuvre if receiving double interrupts */
1219 if (d40c
->pending_tx
> 0)
1221 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1224 static irqreturn_t
d40_handle_interrupt(int irq
, void *data
)
1226 static const struct d40_interrupt_lookup il
[] = {
1227 {D40_DREG_LCTIS0
, D40_DREG_LCICR0
, false, 0},
1228 {D40_DREG_LCTIS1
, D40_DREG_LCICR1
, false, 32},
1229 {D40_DREG_LCTIS2
, D40_DREG_LCICR2
, false, 64},
1230 {D40_DREG_LCTIS3
, D40_DREG_LCICR3
, false, 96},
1231 {D40_DREG_LCEIS0
, D40_DREG_LCICR0
, true, 0},
1232 {D40_DREG_LCEIS1
, D40_DREG_LCICR1
, true, 32},
1233 {D40_DREG_LCEIS2
, D40_DREG_LCICR2
, true, 64},
1234 {D40_DREG_LCEIS3
, D40_DREG_LCICR3
, true, 96},
1235 {D40_DREG_PCTIS
, D40_DREG_PCICR
, false, D40_PHY_CHAN
},
1236 {D40_DREG_PCEIS
, D40_DREG_PCICR
, true, D40_PHY_CHAN
},
1240 u32 regs
[ARRAY_SIZE(il
)];
1244 struct d40_chan
*d40c
;
1245 unsigned long flags
;
1246 struct d40_base
*base
= data
;
1248 spin_lock_irqsave(&base
->interrupt_lock
, flags
);
1250 /* Read interrupt status of both logical and physical channels */
1251 for (i
= 0; i
< ARRAY_SIZE(il
); i
++)
1252 regs
[i
] = readl(base
->virtbase
+ il
[i
].src
);
1256 chan
= find_next_bit((unsigned long *)regs
,
1257 BITS_PER_LONG
* ARRAY_SIZE(il
), chan
+ 1);
1259 /* No more set bits found? */
1260 if (chan
== BITS_PER_LONG
* ARRAY_SIZE(il
))
1263 row
= chan
/ BITS_PER_LONG
;
1264 idx
= chan
& (BITS_PER_LONG
- 1);
1267 writel(1 << idx
, base
->virtbase
+ il
[row
].clr
);
1269 if (il
[row
].offset
== D40_PHY_CHAN
)
1270 d40c
= base
->lookup_phy_chans
[idx
];
1272 d40c
= base
->lookup_log_chans
[il
[row
].offset
+ idx
];
1273 spin_lock(&d40c
->lock
);
1275 if (!il
[row
].is_error
)
1276 dma_tc_handle(d40c
);
1278 d40_err(base
->dev
, "IRQ chan: %ld offset %d idx %d\n",
1279 chan
, il
[row
].offset
, idx
);
1281 spin_unlock(&d40c
->lock
);
1284 spin_unlock_irqrestore(&base
->interrupt_lock
, flags
);
1289 static int d40_validate_conf(struct d40_chan
*d40c
,
1290 struct stedma40_chan_cfg
*conf
)
1293 u32 dst_event_group
= D40_TYPE_TO_GROUP(conf
->dst_dev_type
);
1294 u32 src_event_group
= D40_TYPE_TO_GROUP(conf
->src_dev_type
);
1295 bool is_log
= conf
->mode
== STEDMA40_MODE_LOGICAL
;
1298 chan_err(d40c
, "Invalid direction.\n");
1302 if (conf
->dst_dev_type
!= STEDMA40_DEV_DST_MEMORY
&&
1303 d40c
->base
->plat_data
->dev_tx
[conf
->dst_dev_type
] == 0 &&
1304 d40c
->runtime_addr
== 0) {
1306 chan_err(d40c
, "Invalid TX channel address (%d)\n",
1307 conf
->dst_dev_type
);
1311 if (conf
->src_dev_type
!= STEDMA40_DEV_SRC_MEMORY
&&
1312 d40c
->base
->plat_data
->dev_rx
[conf
->src_dev_type
] == 0 &&
1313 d40c
->runtime_addr
== 0) {
1314 chan_err(d40c
, "Invalid RX channel address (%d)\n",
1315 conf
->src_dev_type
);
1319 if (conf
->dir
== STEDMA40_MEM_TO_PERIPH
&&
1320 dst_event_group
== STEDMA40_DEV_DST_MEMORY
) {
1321 chan_err(d40c
, "Invalid dst\n");
1325 if (conf
->dir
== STEDMA40_PERIPH_TO_MEM
&&
1326 src_event_group
== STEDMA40_DEV_SRC_MEMORY
) {
1327 chan_err(d40c
, "Invalid src\n");
1331 if (src_event_group
== STEDMA40_DEV_SRC_MEMORY
&&
1332 dst_event_group
== STEDMA40_DEV_DST_MEMORY
&& is_log
) {
1333 chan_err(d40c
, "No event line\n");
1337 if (conf
->dir
== STEDMA40_PERIPH_TO_PERIPH
&&
1338 (src_event_group
!= dst_event_group
)) {
1339 chan_err(d40c
, "Invalid event group\n");
1343 if (conf
->dir
== STEDMA40_PERIPH_TO_PERIPH
) {
1345 * DMAC HW supports it. Will be added to this driver,
1346 * in case any dma client requires it.
1348 chan_err(d40c
, "periph to periph not supported\n");
1352 if (d40_psize_2_burst_size(is_log
, conf
->src_info
.psize
) *
1353 (1 << conf
->src_info
.data_width
) !=
1354 d40_psize_2_burst_size(is_log
, conf
->dst_info
.psize
) *
1355 (1 << conf
->dst_info
.data_width
)) {
1357 * The DMAC hardware only supports
1358 * src (burst x width) == dst (burst x width)
1361 chan_err(d40c
, "src (burst x width) != dst (burst x width)\n");
1368 static bool d40_alloc_mask_set(struct d40_phy_res
*phy
, bool is_src
,
1369 int log_event_line
, bool is_log
)
1371 unsigned long flags
;
1372 spin_lock_irqsave(&phy
->lock
, flags
);
1374 /* Physical interrupts are masked per physical full channel */
1375 if (phy
->allocated_src
== D40_ALLOC_FREE
&&
1376 phy
->allocated_dst
== D40_ALLOC_FREE
) {
1377 phy
->allocated_dst
= D40_ALLOC_PHY
;
1378 phy
->allocated_src
= D40_ALLOC_PHY
;
1384 /* Logical channel */
1386 if (phy
->allocated_src
== D40_ALLOC_PHY
)
1389 if (phy
->allocated_src
== D40_ALLOC_FREE
)
1390 phy
->allocated_src
= D40_ALLOC_LOG_FREE
;
1392 if (!(phy
->allocated_src
& (1 << log_event_line
))) {
1393 phy
->allocated_src
|= 1 << log_event_line
;
1398 if (phy
->allocated_dst
== D40_ALLOC_PHY
)
1401 if (phy
->allocated_dst
== D40_ALLOC_FREE
)
1402 phy
->allocated_dst
= D40_ALLOC_LOG_FREE
;
1404 if (!(phy
->allocated_dst
& (1 << log_event_line
))) {
1405 phy
->allocated_dst
|= 1 << log_event_line
;
1412 spin_unlock_irqrestore(&phy
->lock
, flags
);
1415 spin_unlock_irqrestore(&phy
->lock
, flags
);
1419 static bool d40_alloc_mask_free(struct d40_phy_res
*phy
, bool is_src
,
1422 unsigned long flags
;
1423 bool is_free
= false;
1425 spin_lock_irqsave(&phy
->lock
, flags
);
1426 if (!log_event_line
) {
1427 phy
->allocated_dst
= D40_ALLOC_FREE
;
1428 phy
->allocated_src
= D40_ALLOC_FREE
;
1433 /* Logical channel */
1435 phy
->allocated_src
&= ~(1 << log_event_line
);
1436 if (phy
->allocated_src
== D40_ALLOC_LOG_FREE
)
1437 phy
->allocated_src
= D40_ALLOC_FREE
;
1439 phy
->allocated_dst
&= ~(1 << log_event_line
);
1440 if (phy
->allocated_dst
== D40_ALLOC_LOG_FREE
)
1441 phy
->allocated_dst
= D40_ALLOC_FREE
;
1444 is_free
= ((phy
->allocated_src
| phy
->allocated_dst
) ==
1448 spin_unlock_irqrestore(&phy
->lock
, flags
);
1453 static int d40_allocate_channel(struct d40_chan
*d40c
)
1458 struct d40_phy_res
*phys
;
1463 bool is_log
= d40c
->dma_cfg
.mode
== STEDMA40_MODE_LOGICAL
;
1465 phys
= d40c
->base
->phy_res
;
1467 if (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_MEM
) {
1468 dev_type
= d40c
->dma_cfg
.src_dev_type
;
1469 log_num
= 2 * dev_type
;
1471 } else if (d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_PERIPH
||
1472 d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_MEM
) {
1473 /* dst event lines are used for logical memcpy */
1474 dev_type
= d40c
->dma_cfg
.dst_dev_type
;
1475 log_num
= 2 * dev_type
+ 1;
1480 event_group
= D40_TYPE_TO_GROUP(dev_type
);
1481 event_line
= D40_TYPE_TO_EVENT(dev_type
);
1484 if (d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_MEM
) {
1485 /* Find physical half channel */
1486 for (i
= 0; i
< d40c
->base
->num_phy_chans
; i
++) {
1488 if (d40_alloc_mask_set(&phys
[i
], is_src
,
1493 for (j
= 0; j
< d40c
->base
->num_phy_chans
; j
+= 8) {
1494 int phy_num
= j
+ event_group
* 2;
1495 for (i
= phy_num
; i
< phy_num
+ 2; i
++) {
1496 if (d40_alloc_mask_set(&phys
[i
],
1505 d40c
->phy_chan
= &phys
[i
];
1506 d40c
->log_num
= D40_PHY_CHAN
;
1512 /* Find logical channel */
1513 for (j
= 0; j
< d40c
->base
->num_phy_chans
; j
+= 8) {
1514 int phy_num
= j
+ event_group
* 2;
1516 * Spread logical channels across all available physical rather
1517 * than pack every logical channel at the first available phy
1521 for (i
= phy_num
; i
< phy_num
+ 2; i
++) {
1522 if (d40_alloc_mask_set(&phys
[i
], is_src
,
1523 event_line
, is_log
))
1527 for (i
= phy_num
+ 1; i
>= phy_num
; i
--) {
1528 if (d40_alloc_mask_set(&phys
[i
], is_src
,
1529 event_line
, is_log
))
1537 d40c
->phy_chan
= &phys
[i
];
1538 d40c
->log_num
= log_num
;
1542 d40c
->base
->lookup_log_chans
[d40c
->log_num
] = d40c
;
1544 d40c
->base
->lookup_phy_chans
[d40c
->phy_chan
->num
] = d40c
;
1550 static int d40_config_memcpy(struct d40_chan
*d40c
)
1552 dma_cap_mask_t cap
= d40c
->chan
.device
->cap_mask
;
1554 if (dma_has_cap(DMA_MEMCPY
, cap
) && !dma_has_cap(DMA_SLAVE
, cap
)) {
1555 d40c
->dma_cfg
= *d40c
->base
->plat_data
->memcpy_conf_log
;
1556 d40c
->dma_cfg
.src_dev_type
= STEDMA40_DEV_SRC_MEMORY
;
1557 d40c
->dma_cfg
.dst_dev_type
= d40c
->base
->plat_data
->
1558 memcpy
[d40c
->chan
.chan_id
];
1560 } else if (dma_has_cap(DMA_MEMCPY
, cap
) &&
1561 dma_has_cap(DMA_SLAVE
, cap
)) {
1562 d40c
->dma_cfg
= *d40c
->base
->plat_data
->memcpy_conf_phy
;
1564 chan_err(d40c
, "No memcpy\n");
1572 static int d40_free_dma(struct d40_chan
*d40c
)
1577 struct d40_phy_res
*phy
= d40c
->phy_chan
;
1580 struct d40_desc
*_d
;
1583 /* Terminate all queued and active transfers */
1586 /* Release client owned descriptors */
1587 if (!list_empty(&d40c
->client
))
1588 list_for_each_entry_safe(d
, _d
, &d40c
->client
, node
) {
1589 d40_pool_lli_free(d40c
, d
);
1591 d40_desc_free(d40c
, d
);
1595 chan_err(d40c
, "phy == null\n");
1599 if (phy
->allocated_src
== D40_ALLOC_FREE
&&
1600 phy
->allocated_dst
== D40_ALLOC_FREE
) {
1601 chan_err(d40c
, "channel already free\n");
1605 if (d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_PERIPH
||
1606 d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_MEM
) {
1607 event
= D40_TYPE_TO_EVENT(d40c
->dma_cfg
.dst_dev_type
);
1609 } else if (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_MEM
) {
1610 event
= D40_TYPE_TO_EVENT(d40c
->dma_cfg
.src_dev_type
);
1613 chan_err(d40c
, "Unknown direction\n");
1617 res
= d40_channel_execute_command(d40c
, D40_DMA_SUSPEND_REQ
);
1619 chan_err(d40c
, "suspend failed\n");
1623 if (chan_is_logical(d40c
)) {
1624 /* Release logical channel, deactivate the event line */
1626 d40_config_set_event(d40c
, false);
1627 d40c
->base
->lookup_log_chans
[d40c
->log_num
] = NULL
;
1630 * Check if there are more logical allocation
1631 * on this phy channel.
1633 if (!d40_alloc_mask_free(phy
, is_src
, event
)) {
1634 /* Resume the other logical channels if any */
1635 if (d40_chan_has_events(d40c
)) {
1636 res
= d40_channel_execute_command(d40c
,
1640 "Executing RUN command\n");
1647 (void) d40_alloc_mask_free(phy
, is_src
, 0);
1650 /* Release physical channel */
1651 res
= d40_channel_execute_command(d40c
, D40_DMA_STOP
);
1653 chan_err(d40c
, "Failed to stop channel\n");
1656 d40c
->phy_chan
= NULL
;
1657 d40c
->configured
= false;
1658 d40c
->base
->lookup_phy_chans
[phy
->num
] = NULL
;
1663 static bool d40_is_paused(struct d40_chan
*d40c
)
1665 void __iomem
*chanbase
= chan_base(d40c
);
1666 bool is_paused
= false;
1667 unsigned long flags
;
1668 void __iomem
*active_reg
;
1672 spin_lock_irqsave(&d40c
->lock
, flags
);
1674 if (chan_is_physical(d40c
)) {
1675 if (d40c
->phy_chan
->num
% 2 == 0)
1676 active_reg
= d40c
->base
->virtbase
+ D40_DREG_ACTIVE
;
1678 active_reg
= d40c
->base
->virtbase
+ D40_DREG_ACTIVO
;
1680 status
= (readl(active_reg
) &
1681 D40_CHAN_POS_MASK(d40c
->phy_chan
->num
)) >>
1682 D40_CHAN_POS(d40c
->phy_chan
->num
);
1683 if (status
== D40_DMA_SUSPENDED
|| status
== D40_DMA_STOP
)
1689 if (d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_PERIPH
||
1690 d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_MEM
) {
1691 event
= D40_TYPE_TO_EVENT(d40c
->dma_cfg
.dst_dev_type
);
1692 status
= readl(chanbase
+ D40_CHAN_REG_SDLNK
);
1693 } else if (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_MEM
) {
1694 event
= D40_TYPE_TO_EVENT(d40c
->dma_cfg
.src_dev_type
);
1695 status
= readl(chanbase
+ D40_CHAN_REG_SSLNK
);
1697 chan_err(d40c
, "Unknown direction\n");
1701 status
= (status
& D40_EVENTLINE_MASK(event
)) >>
1702 D40_EVENTLINE_POS(event
);
1704 if (status
!= D40_DMA_RUN
)
1707 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1713 static u32
stedma40_residue(struct dma_chan
*chan
)
1715 struct d40_chan
*d40c
=
1716 container_of(chan
, struct d40_chan
, chan
);
1718 unsigned long flags
;
1720 spin_lock_irqsave(&d40c
->lock
, flags
);
1721 bytes_left
= d40_residue(d40c
);
1722 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1728 d40_prep_sg_log(struct d40_chan
*chan
, struct d40_desc
*desc
,
1729 struct scatterlist
*sg_src
, struct scatterlist
*sg_dst
,
1730 unsigned int sg_len
, dma_addr_t src_dev_addr
,
1731 dma_addr_t dst_dev_addr
)
1733 struct stedma40_chan_cfg
*cfg
= &chan
->dma_cfg
;
1734 struct stedma40_half_channel_info
*src_info
= &cfg
->src_info
;
1735 struct stedma40_half_channel_info
*dst_info
= &cfg
->dst_info
;
1738 ret
= d40_log_sg_to_lli(sg_src
, sg_len
,
1741 chan
->log_def
.lcsp1
,
1742 src_info
->data_width
,
1743 dst_info
->data_width
);
1745 ret
= d40_log_sg_to_lli(sg_dst
, sg_len
,
1748 chan
->log_def
.lcsp3
,
1749 dst_info
->data_width
,
1750 src_info
->data_width
);
1752 return ret
< 0 ? ret
: 0;
1756 d40_prep_sg_phy(struct d40_chan
*chan
, struct d40_desc
*desc
,
1757 struct scatterlist
*sg_src
, struct scatterlist
*sg_dst
,
1758 unsigned int sg_len
, dma_addr_t src_dev_addr
,
1759 dma_addr_t dst_dev_addr
)
1761 struct stedma40_chan_cfg
*cfg
= &chan
->dma_cfg
;
1762 struct stedma40_half_channel_info
*src_info
= &cfg
->src_info
;
1763 struct stedma40_half_channel_info
*dst_info
= &cfg
->dst_info
;
1764 unsigned long flags
= 0;
1768 flags
|= LLI_CYCLIC
| LLI_TERM_INT
;
1770 ret
= d40_phy_sg_to_lli(sg_src
, sg_len
, src_dev_addr
,
1772 virt_to_phys(desc
->lli_phy
.src
),
1774 src_info
, dst_info
, flags
);
1776 ret
= d40_phy_sg_to_lli(sg_dst
, sg_len
, dst_dev_addr
,
1778 virt_to_phys(desc
->lli_phy
.dst
),
1780 dst_info
, src_info
, flags
);
1782 dma_sync_single_for_device(chan
->base
->dev
, desc
->lli_pool
.dma_addr
,
1783 desc
->lli_pool
.size
, DMA_TO_DEVICE
);
1785 return ret
< 0 ? ret
: 0;
1789 static struct d40_desc
*
1790 d40_prep_desc(struct d40_chan
*chan
, struct scatterlist
*sg
,
1791 unsigned int sg_len
, unsigned long dma_flags
)
1793 struct stedma40_chan_cfg
*cfg
= &chan
->dma_cfg
;
1794 struct d40_desc
*desc
;
1797 desc
= d40_desc_get(chan
);
1801 desc
->lli_len
= d40_sg_2_dmalen(sg
, sg_len
, cfg
->src_info
.data_width
,
1802 cfg
->dst_info
.data_width
);
1803 if (desc
->lli_len
< 0) {
1804 chan_err(chan
, "Unaligned size\n");
1808 ret
= d40_pool_lli_alloc(chan
, desc
, desc
->lli_len
);
1810 chan_err(chan
, "Could not allocate lli\n");
1815 desc
->lli_current
= 0;
1816 desc
->txd
.flags
= dma_flags
;
1817 desc
->txd
.tx_submit
= d40_tx_submit
;
1819 dma_async_tx_descriptor_init(&desc
->txd
, &chan
->chan
);
1824 d40_desc_free(chan
, desc
);
1829 d40_get_dev_addr(struct d40_chan
*chan
, enum dma_data_direction direction
)
1831 struct stedma40_platform_data
*plat
= chan
->base
->plat_data
;
1832 struct stedma40_chan_cfg
*cfg
= &chan
->dma_cfg
;
1833 dma_addr_t addr
= 0;
1835 if (chan
->runtime_addr
)
1836 return chan
->runtime_addr
;
1838 if (direction
== DMA_FROM_DEVICE
)
1839 addr
= plat
->dev_rx
[cfg
->src_dev_type
];
1840 else if (direction
== DMA_TO_DEVICE
)
1841 addr
= plat
->dev_tx
[cfg
->dst_dev_type
];
1846 static struct dma_async_tx_descriptor
*
1847 d40_prep_sg(struct dma_chan
*dchan
, struct scatterlist
*sg_src
,
1848 struct scatterlist
*sg_dst
, unsigned int sg_len
,
1849 enum dma_data_direction direction
, unsigned long dma_flags
)
1851 struct d40_chan
*chan
= container_of(dchan
, struct d40_chan
, chan
);
1852 dma_addr_t src_dev_addr
= 0;
1853 dma_addr_t dst_dev_addr
= 0;
1854 struct d40_desc
*desc
;
1855 unsigned long flags
;
1858 if (!chan
->phy_chan
) {
1859 chan_err(chan
, "Cannot prepare unallocated channel\n");
1864 spin_lock_irqsave(&chan
->lock
, flags
);
1866 desc
= d40_prep_desc(chan
, sg_src
, sg_len
, dma_flags
);
1870 if (sg_next(&sg_src
[sg_len
- 1]) == sg_src
)
1871 desc
->cyclic
= true;
1873 if (direction
!= DMA_NONE
) {
1874 dma_addr_t dev_addr
= d40_get_dev_addr(chan
, direction
);
1876 if (direction
== DMA_FROM_DEVICE
)
1877 src_dev_addr
= dev_addr
;
1878 else if (direction
== DMA_TO_DEVICE
)
1879 dst_dev_addr
= dev_addr
;
1882 if (chan_is_logical(chan
))
1883 ret
= d40_prep_sg_log(chan
, desc
, sg_src
, sg_dst
,
1884 sg_len
, src_dev_addr
, dst_dev_addr
);
1886 ret
= d40_prep_sg_phy(chan
, desc
, sg_src
, sg_dst
,
1887 sg_len
, src_dev_addr
, dst_dev_addr
);
1890 chan_err(chan
, "Failed to prepare %s sg job: %d\n",
1891 chan_is_logical(chan
) ? "log" : "phy", ret
);
1895 spin_unlock_irqrestore(&chan
->lock
, flags
);
1901 d40_desc_free(chan
, desc
);
1902 spin_unlock_irqrestore(&chan
->lock
, flags
);
1906 bool stedma40_filter(struct dma_chan
*chan
, void *data
)
1908 struct stedma40_chan_cfg
*info
= data
;
1909 struct d40_chan
*d40c
=
1910 container_of(chan
, struct d40_chan
, chan
);
1914 err
= d40_validate_conf(d40c
, info
);
1916 d40c
->dma_cfg
= *info
;
1918 err
= d40_config_memcpy(d40c
);
1921 d40c
->configured
= true;
1925 EXPORT_SYMBOL(stedma40_filter
);
1927 static void __d40_set_prio_rt(struct d40_chan
*d40c
, int dev_type
, bool src
)
1929 bool realtime
= d40c
->dma_cfg
.realtime
;
1930 bool highprio
= d40c
->dma_cfg
.high_priority
;
1931 u32 prioreg
= highprio
? D40_DREG_PSEG1
: D40_DREG_PCEG1
;
1932 u32 rtreg
= realtime
? D40_DREG_RSEG1
: D40_DREG_RCEG1
;
1933 u32 event
= D40_TYPE_TO_EVENT(dev_type
);
1934 u32 group
= D40_TYPE_TO_GROUP(dev_type
);
1935 u32 bit
= 1 << event
;
1937 /* Destination event lines are stored in the upper halfword */
1941 writel(bit
, d40c
->base
->virtbase
+ prioreg
+ group
* 4);
1942 writel(bit
, d40c
->base
->virtbase
+ rtreg
+ group
* 4);
1945 static void d40_set_prio_realtime(struct d40_chan
*d40c
)
1947 if (d40c
->base
->rev
< 3)
1950 if ((d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_MEM
) ||
1951 (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_PERIPH
))
1952 __d40_set_prio_rt(d40c
, d40c
->dma_cfg
.src_dev_type
, true);
1954 if ((d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_PERIPH
) ||
1955 (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_PERIPH
))
1956 __d40_set_prio_rt(d40c
, d40c
->dma_cfg
.dst_dev_type
, false);
1959 /* DMA ENGINE functions */
1960 static int d40_alloc_chan_resources(struct dma_chan
*chan
)
1963 unsigned long flags
;
1964 struct d40_chan
*d40c
=
1965 container_of(chan
, struct d40_chan
, chan
);
1967 spin_lock_irqsave(&d40c
->lock
, flags
);
1969 d40c
->completed
= chan
->cookie
= 1;
1971 /* If no dma configuration is set use default configuration (memcpy) */
1972 if (!d40c
->configured
) {
1973 err
= d40_config_memcpy(d40c
);
1975 chan_err(d40c
, "Failed to configure memcpy channel\n");
1979 is_free_phy
= (d40c
->phy_chan
== NULL
);
1981 err
= d40_allocate_channel(d40c
);
1983 chan_err(d40c
, "Failed to allocate channel\n");
1987 /* Fill in basic CFG register values */
1988 d40_phy_cfg(&d40c
->dma_cfg
, &d40c
->src_def_cfg
,
1989 &d40c
->dst_def_cfg
, chan_is_logical(d40c
));
1991 d40_set_prio_realtime(d40c
);
1993 if (chan_is_logical(d40c
)) {
1994 d40_log_cfg(&d40c
->dma_cfg
,
1995 &d40c
->log_def
.lcsp1
, &d40c
->log_def
.lcsp3
);
1997 if (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_MEM
)
1998 d40c
->lcpa
= d40c
->base
->lcpa_base
+
1999 d40c
->dma_cfg
.src_dev_type
* D40_LCPA_CHAN_SIZE
;
2001 d40c
->lcpa
= d40c
->base
->lcpa_base
+
2002 d40c
->dma_cfg
.dst_dev_type
*
2003 D40_LCPA_CHAN_SIZE
+ D40_LCPA_CHAN_DST_DELTA
;
2007 * Only write channel configuration to the DMA if the physical
2008 * resource is free. In case of multiple logical channels
2009 * on the same physical resource, only the first write is necessary.
2012 d40_config_write(d40c
);
2014 spin_unlock_irqrestore(&d40c
->lock
, flags
);
2018 static void d40_free_chan_resources(struct dma_chan
*chan
)
2020 struct d40_chan
*d40c
=
2021 container_of(chan
, struct d40_chan
, chan
);
2023 unsigned long flags
;
2025 if (d40c
->phy_chan
== NULL
) {
2026 chan_err(d40c
, "Cannot free unallocated channel\n");
2031 spin_lock_irqsave(&d40c
->lock
, flags
);
2033 err
= d40_free_dma(d40c
);
2036 chan_err(d40c
, "Failed to free channel\n");
2037 spin_unlock_irqrestore(&d40c
->lock
, flags
);
2040 static struct dma_async_tx_descriptor
*d40_prep_memcpy(struct dma_chan
*chan
,
2044 unsigned long dma_flags
)
2046 struct scatterlist dst_sg
;
2047 struct scatterlist src_sg
;
2049 sg_init_table(&dst_sg
, 1);
2050 sg_init_table(&src_sg
, 1);
2052 sg_dma_address(&dst_sg
) = dst
;
2053 sg_dma_address(&src_sg
) = src
;
2055 sg_dma_len(&dst_sg
) = size
;
2056 sg_dma_len(&src_sg
) = size
;
2058 return d40_prep_sg(chan
, &src_sg
, &dst_sg
, 1, DMA_NONE
, dma_flags
);
2061 static struct dma_async_tx_descriptor
*
2062 d40_prep_memcpy_sg(struct dma_chan
*chan
,
2063 struct scatterlist
*dst_sg
, unsigned int dst_nents
,
2064 struct scatterlist
*src_sg
, unsigned int src_nents
,
2065 unsigned long dma_flags
)
2067 if (dst_nents
!= src_nents
)
2070 return d40_prep_sg(chan
, src_sg
, dst_sg
, src_nents
, DMA_NONE
, dma_flags
);
2073 static struct dma_async_tx_descriptor
*d40_prep_slave_sg(struct dma_chan
*chan
,
2074 struct scatterlist
*sgl
,
2075 unsigned int sg_len
,
2076 enum dma_data_direction direction
,
2077 unsigned long dma_flags
)
2079 if (direction
!= DMA_FROM_DEVICE
&& direction
!= DMA_TO_DEVICE
)
2082 return d40_prep_sg(chan
, sgl
, sgl
, sg_len
, direction
, dma_flags
);
2085 static struct dma_async_tx_descriptor
*
2086 dma40_prep_dma_cyclic(struct dma_chan
*chan
, dma_addr_t dma_addr
,
2087 size_t buf_len
, size_t period_len
,
2088 enum dma_data_direction direction
)
2090 unsigned int periods
= buf_len
/ period_len
;
2091 struct dma_async_tx_descriptor
*txd
;
2092 struct scatterlist
*sg
;
2095 sg
= kcalloc(periods
+ 1, sizeof(struct scatterlist
), GFP_KERNEL
);
2096 for (i
= 0; i
< periods
; i
++) {
2097 sg_dma_address(&sg
[i
]) = dma_addr
;
2098 sg_dma_len(&sg
[i
]) = period_len
;
2099 dma_addr
+= period_len
;
2102 sg
[periods
].offset
= 0;
2103 sg
[periods
].length
= 0;
2104 sg
[periods
].page_link
=
2105 ((unsigned long)sg
| 0x01) & ~0x02;
2107 txd
= d40_prep_sg(chan
, sg
, sg
, periods
, direction
,
2108 DMA_PREP_INTERRUPT
);
2115 static enum dma_status
d40_tx_status(struct dma_chan
*chan
,
2116 dma_cookie_t cookie
,
2117 struct dma_tx_state
*txstate
)
2119 struct d40_chan
*d40c
= container_of(chan
, struct d40_chan
, chan
);
2120 dma_cookie_t last_used
;
2121 dma_cookie_t last_complete
;
2124 if (d40c
->phy_chan
== NULL
) {
2125 chan_err(d40c
, "Cannot read status of unallocated channel\n");
2129 last_complete
= d40c
->completed
;
2130 last_used
= chan
->cookie
;
2132 if (d40_is_paused(d40c
))
2135 ret
= dma_async_is_complete(cookie
, last_complete
, last_used
);
2137 dma_set_tx_state(txstate
, last_complete
, last_used
,
2138 stedma40_residue(chan
));
2143 static void d40_issue_pending(struct dma_chan
*chan
)
2145 struct d40_chan
*d40c
= container_of(chan
, struct d40_chan
, chan
);
2146 unsigned long flags
;
2148 if (d40c
->phy_chan
== NULL
) {
2149 chan_err(d40c
, "Channel is not allocated!\n");
2153 spin_lock_irqsave(&d40c
->lock
, flags
);
2155 /* Busy means that pending jobs are already being processed */
2157 (void) d40_queue_start(d40c
);
2159 spin_unlock_irqrestore(&d40c
->lock
, flags
);
2162 /* Runtime reconfiguration extension */
2163 static void d40_set_runtime_config(struct dma_chan
*chan
,
2164 struct dma_slave_config
*config
)
2166 struct d40_chan
*d40c
= container_of(chan
, struct d40_chan
, chan
);
2167 struct stedma40_chan_cfg
*cfg
= &d40c
->dma_cfg
;
2168 enum dma_slave_buswidth config_addr_width
;
2169 dma_addr_t config_addr
;
2170 u32 config_maxburst
;
2171 enum stedma40_periph_data_width addr_width
;
2174 if (config
->direction
== DMA_FROM_DEVICE
) {
2175 dma_addr_t dev_addr_rx
=
2176 d40c
->base
->plat_data
->dev_rx
[cfg
->src_dev_type
];
2178 config_addr
= config
->src_addr
;
2180 dev_dbg(d40c
->base
->dev
,
2181 "channel has a pre-wired RX address %08x "
2182 "overriding with %08x\n",
2183 dev_addr_rx
, config_addr
);
2184 if (cfg
->dir
!= STEDMA40_PERIPH_TO_MEM
)
2185 dev_dbg(d40c
->base
->dev
,
2186 "channel was not configured for peripheral "
2187 "to memory transfer (%d) overriding\n",
2189 cfg
->dir
= STEDMA40_PERIPH_TO_MEM
;
2191 config_addr_width
= config
->src_addr_width
;
2192 config_maxburst
= config
->src_maxburst
;
2194 } else if (config
->direction
== DMA_TO_DEVICE
) {
2195 dma_addr_t dev_addr_tx
=
2196 d40c
->base
->plat_data
->dev_tx
[cfg
->dst_dev_type
];
2198 config_addr
= config
->dst_addr
;
2200 dev_dbg(d40c
->base
->dev
,
2201 "channel has a pre-wired TX address %08x "
2202 "overriding with %08x\n",
2203 dev_addr_tx
, config_addr
);
2204 if (cfg
->dir
!= STEDMA40_MEM_TO_PERIPH
)
2205 dev_dbg(d40c
->base
->dev
,
2206 "channel was not configured for memory "
2207 "to peripheral transfer (%d) overriding\n",
2209 cfg
->dir
= STEDMA40_MEM_TO_PERIPH
;
2211 config_addr_width
= config
->dst_addr_width
;
2212 config_maxburst
= config
->dst_maxburst
;
2215 dev_err(d40c
->base
->dev
,
2216 "unrecognized channel direction %d\n",
2221 switch (config_addr_width
) {
2222 case DMA_SLAVE_BUSWIDTH_1_BYTE
:
2223 addr_width
= STEDMA40_BYTE_WIDTH
;
2225 case DMA_SLAVE_BUSWIDTH_2_BYTES
:
2226 addr_width
= STEDMA40_HALFWORD_WIDTH
;
2228 case DMA_SLAVE_BUSWIDTH_4_BYTES
:
2229 addr_width
= STEDMA40_WORD_WIDTH
;
2231 case DMA_SLAVE_BUSWIDTH_8_BYTES
:
2232 addr_width
= STEDMA40_DOUBLEWORD_WIDTH
;
2235 dev_err(d40c
->base
->dev
,
2236 "illegal peripheral address width "
2238 config
->src_addr_width
);
2242 if (chan_is_logical(d40c
)) {
2243 if (config_maxburst
>= 16)
2244 psize
= STEDMA40_PSIZE_LOG_16
;
2245 else if (config_maxburst
>= 8)
2246 psize
= STEDMA40_PSIZE_LOG_8
;
2247 else if (config_maxburst
>= 4)
2248 psize
= STEDMA40_PSIZE_LOG_4
;
2250 psize
= STEDMA40_PSIZE_LOG_1
;
2252 if (config_maxburst
>= 16)
2253 psize
= STEDMA40_PSIZE_PHY_16
;
2254 else if (config_maxburst
>= 8)
2255 psize
= STEDMA40_PSIZE_PHY_8
;
2256 else if (config_maxburst
>= 4)
2257 psize
= STEDMA40_PSIZE_PHY_4
;
2258 else if (config_maxburst
>= 2)
2259 psize
= STEDMA40_PSIZE_PHY_2
;
2261 psize
= STEDMA40_PSIZE_PHY_1
;
2264 /* Set up all the endpoint configs */
2265 cfg
->src_info
.data_width
= addr_width
;
2266 cfg
->src_info
.psize
= psize
;
2267 cfg
->src_info
.big_endian
= false;
2268 cfg
->src_info
.flow_ctrl
= STEDMA40_NO_FLOW_CTRL
;
2269 cfg
->dst_info
.data_width
= addr_width
;
2270 cfg
->dst_info
.psize
= psize
;
2271 cfg
->dst_info
.big_endian
= false;
2272 cfg
->dst_info
.flow_ctrl
= STEDMA40_NO_FLOW_CTRL
;
2274 /* Fill in register values */
2275 if (chan_is_logical(d40c
))
2276 d40_log_cfg(cfg
, &d40c
->log_def
.lcsp1
, &d40c
->log_def
.lcsp3
);
2278 d40_phy_cfg(cfg
, &d40c
->src_def_cfg
,
2279 &d40c
->dst_def_cfg
, false);
2281 /* These settings will take precedence later */
2282 d40c
->runtime_addr
= config_addr
;
2283 d40c
->runtime_direction
= config
->direction
;
2284 dev_dbg(d40c
->base
->dev
,
2285 "configured channel %s for %s, data width %d, "
2286 "maxburst %d bytes, LE, no flow control\n",
2287 dma_chan_name(chan
),
2288 (config
->direction
== DMA_FROM_DEVICE
) ? "RX" : "TX",
2293 static int d40_control(struct dma_chan
*chan
, enum dma_ctrl_cmd cmd
,
2296 struct d40_chan
*d40c
= container_of(chan
, struct d40_chan
, chan
);
2298 if (d40c
->phy_chan
== NULL
) {
2299 chan_err(d40c
, "Channel is not allocated!\n");
2304 case DMA_TERMINATE_ALL
:
2305 return d40_terminate_all(d40c
);
2307 return d40_pause(d40c
);
2309 return d40_resume(d40c
);
2310 case DMA_SLAVE_CONFIG
:
2311 d40_set_runtime_config(chan
,
2312 (struct dma_slave_config
*) arg
);
2318 /* Other commands are unimplemented */
2322 /* Initialization functions */
2324 static void __init
d40_chan_init(struct d40_base
*base
, struct dma_device
*dma
,
2325 struct d40_chan
*chans
, int offset
,
2329 struct d40_chan
*d40c
;
2331 INIT_LIST_HEAD(&dma
->channels
);
2333 for (i
= offset
; i
< offset
+ num_chans
; i
++) {
2336 d40c
->chan
.device
= dma
;
2338 spin_lock_init(&d40c
->lock
);
2340 d40c
->log_num
= D40_PHY_CHAN
;
2342 INIT_LIST_HEAD(&d40c
->active
);
2343 INIT_LIST_HEAD(&d40c
->queue
);
2344 INIT_LIST_HEAD(&d40c
->client
);
2346 tasklet_init(&d40c
->tasklet
, dma_tasklet
,
2347 (unsigned long) d40c
);
2349 list_add_tail(&d40c
->chan
.device_node
,
2354 static void d40_ops_init(struct d40_base
*base
, struct dma_device
*dev
)
2356 if (dma_has_cap(DMA_SLAVE
, dev
->cap_mask
))
2357 dev
->device_prep_slave_sg
= d40_prep_slave_sg
;
2359 if (dma_has_cap(DMA_MEMCPY
, dev
->cap_mask
)) {
2360 dev
->device_prep_dma_memcpy
= d40_prep_memcpy
;
2363 * This controller can only access address at even
2364 * 32bit boundaries, i.e. 2^2
2366 dev
->copy_align
= 2;
2369 if (dma_has_cap(DMA_SG
, dev
->cap_mask
))
2370 dev
->device_prep_dma_sg
= d40_prep_memcpy_sg
;
2372 if (dma_has_cap(DMA_CYCLIC
, dev
->cap_mask
))
2373 dev
->device_prep_dma_cyclic
= dma40_prep_dma_cyclic
;
2375 dev
->device_alloc_chan_resources
= d40_alloc_chan_resources
;
2376 dev
->device_free_chan_resources
= d40_free_chan_resources
;
2377 dev
->device_issue_pending
= d40_issue_pending
;
2378 dev
->device_tx_status
= d40_tx_status
;
2379 dev
->device_control
= d40_control
;
2380 dev
->dev
= base
->dev
;
2383 static int __init
d40_dmaengine_init(struct d40_base
*base
,
2384 int num_reserved_chans
)
2388 d40_chan_init(base
, &base
->dma_slave
, base
->log_chans
,
2389 0, base
->num_log_chans
);
2391 dma_cap_zero(base
->dma_slave
.cap_mask
);
2392 dma_cap_set(DMA_SLAVE
, base
->dma_slave
.cap_mask
);
2393 dma_cap_set(DMA_CYCLIC
, base
->dma_slave
.cap_mask
);
2395 d40_ops_init(base
, &base
->dma_slave
);
2397 err
= dma_async_device_register(&base
->dma_slave
);
2400 d40_err(base
->dev
, "Failed to register slave channels\n");
2404 d40_chan_init(base
, &base
->dma_memcpy
, base
->log_chans
,
2405 base
->num_log_chans
, base
->plat_data
->memcpy_len
);
2407 dma_cap_zero(base
->dma_memcpy
.cap_mask
);
2408 dma_cap_set(DMA_MEMCPY
, base
->dma_memcpy
.cap_mask
);
2409 dma_cap_set(DMA_SG
, base
->dma_memcpy
.cap_mask
);
2411 d40_ops_init(base
, &base
->dma_memcpy
);
2413 err
= dma_async_device_register(&base
->dma_memcpy
);
2417 "Failed to regsiter memcpy only channels\n");
2421 d40_chan_init(base
, &base
->dma_both
, base
->phy_chans
,
2422 0, num_reserved_chans
);
2424 dma_cap_zero(base
->dma_both
.cap_mask
);
2425 dma_cap_set(DMA_SLAVE
, base
->dma_both
.cap_mask
);
2426 dma_cap_set(DMA_MEMCPY
, base
->dma_both
.cap_mask
);
2427 dma_cap_set(DMA_SG
, base
->dma_both
.cap_mask
);
2428 dma_cap_set(DMA_CYCLIC
, base
->dma_slave
.cap_mask
);
2430 d40_ops_init(base
, &base
->dma_both
);
2431 err
= dma_async_device_register(&base
->dma_both
);
2435 "Failed to register logical and physical capable channels\n");
2440 dma_async_device_unregister(&base
->dma_memcpy
);
2442 dma_async_device_unregister(&base
->dma_slave
);
2447 /* Initialization functions. */
2449 static int __init
d40_phy_res_init(struct d40_base
*base
)
2452 int num_phy_chans_avail
= 0;
2454 int odd_even_bit
= -2;
2456 val
[0] = readl(base
->virtbase
+ D40_DREG_PRSME
);
2457 val
[1] = readl(base
->virtbase
+ D40_DREG_PRSMO
);
2459 for (i
= 0; i
< base
->num_phy_chans
; i
++) {
2460 base
->phy_res
[i
].num
= i
;
2461 odd_even_bit
+= 2 * ((i
% 2) == 0);
2462 if (((val
[i
% 2] >> odd_even_bit
) & 3) == 1) {
2463 /* Mark security only channels as occupied */
2464 base
->phy_res
[i
].allocated_src
= D40_ALLOC_PHY
;
2465 base
->phy_res
[i
].allocated_dst
= D40_ALLOC_PHY
;
2467 base
->phy_res
[i
].allocated_src
= D40_ALLOC_FREE
;
2468 base
->phy_res
[i
].allocated_dst
= D40_ALLOC_FREE
;
2469 num_phy_chans_avail
++;
2471 spin_lock_init(&base
->phy_res
[i
].lock
);
2474 /* Mark disabled channels as occupied */
2475 for (i
= 0; base
->plat_data
->disabled_channels
[i
] != -1; i
++) {
2476 int chan
= base
->plat_data
->disabled_channels
[i
];
2478 base
->phy_res
[chan
].allocated_src
= D40_ALLOC_PHY
;
2479 base
->phy_res
[chan
].allocated_dst
= D40_ALLOC_PHY
;
2480 num_phy_chans_avail
--;
2483 dev_info(base
->dev
, "%d of %d physical DMA channels available\n",
2484 num_phy_chans_avail
, base
->num_phy_chans
);
2486 /* Verify settings extended vs standard */
2487 val
[0] = readl(base
->virtbase
+ D40_DREG_PRTYP
);
2489 for (i
= 0; i
< base
->num_phy_chans
; i
++) {
2491 if (base
->phy_res
[i
].allocated_src
== D40_ALLOC_FREE
&&
2492 (val
[0] & 0x3) != 1)
2494 "[%s] INFO: channel %d is misconfigured (%d)\n",
2495 __func__
, i
, val
[0] & 0x3);
2497 val
[0] = val
[0] >> 2;
2500 return num_phy_chans_avail
;
2503 static struct d40_base
* __init
d40_hw_detect_init(struct platform_device
*pdev
)
2505 static const struct d40_reg_val dma_id_regs
[] = {
2507 { .reg
= D40_DREG_PERIPHID0
, .val
= 0x0040},
2508 { .reg
= D40_DREG_PERIPHID1
, .val
= 0x0000},
2510 * D40_DREG_PERIPHID2 Depends on HW revision:
2511 * DB8500ed has 0x0008,
2513 * DB8500v1 has 0x0028
2514 * DB8500v2 has 0x0038
2516 { .reg
= D40_DREG_PERIPHID3
, .val
= 0x0000},
2519 { .reg
= D40_DREG_CELLID0
, .val
= 0x000d},
2520 { .reg
= D40_DREG_CELLID1
, .val
= 0x00f0},
2521 { .reg
= D40_DREG_CELLID2
, .val
= 0x0005},
2522 { .reg
= D40_DREG_CELLID3
, .val
= 0x00b1}
2524 struct stedma40_platform_data
*plat_data
;
2525 struct clk
*clk
= NULL
;
2526 void __iomem
*virtbase
= NULL
;
2527 struct resource
*res
= NULL
;
2528 struct d40_base
*base
= NULL
;
2529 int num_log_chans
= 0;
2535 clk
= clk_get(&pdev
->dev
, NULL
);
2538 d40_err(&pdev
->dev
, "No matching clock found\n");
2544 /* Get IO for DMAC base address */
2545 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "base");
2549 if (request_mem_region(res
->start
, resource_size(res
),
2550 D40_NAME
" I/O base") == NULL
)
2553 virtbase
= ioremap(res
->start
, resource_size(res
));
2557 /* HW version check */
2558 for (i
= 0; i
< ARRAY_SIZE(dma_id_regs
); i
++) {
2559 if (dma_id_regs
[i
].val
!=
2560 readl(virtbase
+ dma_id_regs
[i
].reg
)) {
2562 "Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n",
2565 readl(virtbase
+ dma_id_regs
[i
].reg
));
2570 /* Get silicon revision and designer */
2571 val
= readl(virtbase
+ D40_DREG_PERIPHID2
);
2573 if ((val
& D40_DREG_PERIPHID2_DESIGNER_MASK
) !=
2575 d40_err(&pdev
->dev
, "Unknown designer! Got %x wanted %x\n",
2576 val
& D40_DREG_PERIPHID2_DESIGNER_MASK
,
2581 rev
= (val
& D40_DREG_PERIPHID2_REV_MASK
) >>
2582 D40_DREG_PERIPHID2_REV_POS
;
2584 /* The number of physical channels on this HW */
2585 num_phy_chans
= 4 * (readl(virtbase
+ D40_DREG_ICFG
) & 0x7) + 4;
2587 dev_info(&pdev
->dev
, "hardware revision: %d @ 0x%x\n",
2590 plat_data
= pdev
->dev
.platform_data
;
2592 /* Count the number of logical channels in use */
2593 for (i
= 0; i
< plat_data
->dev_len
; i
++)
2594 if (plat_data
->dev_rx
[i
] != 0)
2597 for (i
= 0; i
< plat_data
->dev_len
; i
++)
2598 if (plat_data
->dev_tx
[i
] != 0)
2601 base
= kzalloc(ALIGN(sizeof(struct d40_base
), 4) +
2602 (num_phy_chans
+ num_log_chans
+ plat_data
->memcpy_len
) *
2603 sizeof(struct d40_chan
), GFP_KERNEL
);
2606 d40_err(&pdev
->dev
, "Out of memory\n");
2612 base
->num_phy_chans
= num_phy_chans
;
2613 base
->num_log_chans
= num_log_chans
;
2614 base
->phy_start
= res
->start
;
2615 base
->phy_size
= resource_size(res
);
2616 base
->virtbase
= virtbase
;
2617 base
->plat_data
= plat_data
;
2618 base
->dev
= &pdev
->dev
;
2619 base
->phy_chans
= ((void *)base
) + ALIGN(sizeof(struct d40_base
), 4);
2620 base
->log_chans
= &base
->phy_chans
[num_phy_chans
];
2622 base
->phy_res
= kzalloc(num_phy_chans
* sizeof(struct d40_phy_res
),
2627 base
->lookup_phy_chans
= kzalloc(num_phy_chans
*
2628 sizeof(struct d40_chan
*),
2630 if (!base
->lookup_phy_chans
)
2633 if (num_log_chans
+ plat_data
->memcpy_len
) {
2635 * The max number of logical channels are event lines for all
2636 * src devices and dst devices
2638 base
->lookup_log_chans
= kzalloc(plat_data
->dev_len
* 2 *
2639 sizeof(struct d40_chan
*),
2641 if (!base
->lookup_log_chans
)
2645 base
->lcla_pool
.alloc_map
= kzalloc(num_phy_chans
*
2646 sizeof(struct d40_desc
*) *
2647 D40_LCLA_LINK_PER_EVENT_GRP
,
2649 if (!base
->lcla_pool
.alloc_map
)
2652 base
->desc_slab
= kmem_cache_create(D40_NAME
, sizeof(struct d40_desc
),
2653 0, SLAB_HWCACHE_ALIGN
,
2655 if (base
->desc_slab
== NULL
)
2668 release_mem_region(res
->start
,
2669 resource_size(res
));
2674 kfree(base
->lcla_pool
.alloc_map
);
2675 kfree(base
->lookup_log_chans
);
2676 kfree(base
->lookup_phy_chans
);
2677 kfree(base
->phy_res
);
2684 static void __init
d40_hw_init(struct d40_base
*base
)
2687 static const struct d40_reg_val dma_init_reg
[] = {
2688 /* Clock every part of the DMA block from start */
2689 { .reg
= D40_DREG_GCC
, .val
= 0x0000ff01},
2691 /* Interrupts on all logical channels */
2692 { .reg
= D40_DREG_LCMIS0
, .val
= 0xFFFFFFFF},
2693 { .reg
= D40_DREG_LCMIS1
, .val
= 0xFFFFFFFF},
2694 { .reg
= D40_DREG_LCMIS2
, .val
= 0xFFFFFFFF},
2695 { .reg
= D40_DREG_LCMIS3
, .val
= 0xFFFFFFFF},
2696 { .reg
= D40_DREG_LCICR0
, .val
= 0xFFFFFFFF},
2697 { .reg
= D40_DREG_LCICR1
, .val
= 0xFFFFFFFF},
2698 { .reg
= D40_DREG_LCICR2
, .val
= 0xFFFFFFFF},
2699 { .reg
= D40_DREG_LCICR3
, .val
= 0xFFFFFFFF},
2700 { .reg
= D40_DREG_LCTIS0
, .val
= 0xFFFFFFFF},
2701 { .reg
= D40_DREG_LCTIS1
, .val
= 0xFFFFFFFF},
2702 { .reg
= D40_DREG_LCTIS2
, .val
= 0xFFFFFFFF},
2703 { .reg
= D40_DREG_LCTIS3
, .val
= 0xFFFFFFFF}
2706 u32 prmseo
[2] = {0, 0};
2707 u32 activeo
[2] = {0xFFFFFFFF, 0xFFFFFFFF};
2711 for (i
= 0; i
< ARRAY_SIZE(dma_init_reg
); i
++)
2712 writel(dma_init_reg
[i
].val
,
2713 base
->virtbase
+ dma_init_reg
[i
].reg
);
2715 /* Configure all our dma channels to default settings */
2716 for (i
= 0; i
< base
->num_phy_chans
; i
++) {
2718 activeo
[i
% 2] = activeo
[i
% 2] << 2;
2720 if (base
->phy_res
[base
->num_phy_chans
- i
- 1].allocated_src
2722 activeo
[i
% 2] |= 3;
2726 /* Enable interrupt # */
2727 pcmis
= (pcmis
<< 1) | 1;
2729 /* Clear interrupt # */
2730 pcicr
= (pcicr
<< 1) | 1;
2732 /* Set channel to physical mode */
2733 prmseo
[i
% 2] = prmseo
[i
% 2] << 2;
2738 writel(prmseo
[1], base
->virtbase
+ D40_DREG_PRMSE
);
2739 writel(prmseo
[0], base
->virtbase
+ D40_DREG_PRMSO
);
2740 writel(activeo
[1], base
->virtbase
+ D40_DREG_ACTIVE
);
2741 writel(activeo
[0], base
->virtbase
+ D40_DREG_ACTIVO
);
2743 /* Write which interrupt to enable */
2744 writel(pcmis
, base
->virtbase
+ D40_DREG_PCMIS
);
2746 /* Write which interrupt to clear */
2747 writel(pcicr
, base
->virtbase
+ D40_DREG_PCICR
);
2751 static int __init
d40_lcla_allocate(struct d40_base
*base
)
2753 struct d40_lcla_pool
*pool
= &base
->lcla_pool
;
2754 unsigned long *page_list
;
2759 * This is somewhat ugly. We need 8192 bytes that are 18 bit aligned,
2760 * To full fill this hardware requirement without wasting 256 kb
2761 * we allocate pages until we get an aligned one.
2763 page_list
= kmalloc(sizeof(unsigned long) * MAX_LCLA_ALLOC_ATTEMPTS
,
2771 /* Calculating how many pages that are required */
2772 base
->lcla_pool
.pages
= SZ_1K
* base
->num_phy_chans
/ PAGE_SIZE
;
2774 for (i
= 0; i
< MAX_LCLA_ALLOC_ATTEMPTS
; i
++) {
2775 page_list
[i
] = __get_free_pages(GFP_KERNEL
,
2776 base
->lcla_pool
.pages
);
2777 if (!page_list
[i
]) {
2779 d40_err(base
->dev
, "Failed to allocate %d pages.\n",
2780 base
->lcla_pool
.pages
);
2782 for (j
= 0; j
< i
; j
++)
2783 free_pages(page_list
[j
], base
->lcla_pool
.pages
);
2787 if ((virt_to_phys((void *)page_list
[i
]) &
2788 (LCLA_ALIGNMENT
- 1)) == 0)
2792 for (j
= 0; j
< i
; j
++)
2793 free_pages(page_list
[j
], base
->lcla_pool
.pages
);
2795 if (i
< MAX_LCLA_ALLOC_ATTEMPTS
) {
2796 base
->lcla_pool
.base
= (void *)page_list
[i
];
2799 * After many attempts and no succees with finding the correct
2800 * alignment, try with allocating a big buffer.
2803 "[%s] Failed to get %d pages @ 18 bit align.\n",
2804 __func__
, base
->lcla_pool
.pages
);
2805 base
->lcla_pool
.base_unaligned
= kmalloc(SZ_1K
*
2806 base
->num_phy_chans
+
2809 if (!base
->lcla_pool
.base_unaligned
) {
2814 base
->lcla_pool
.base
= PTR_ALIGN(base
->lcla_pool
.base_unaligned
,
2818 pool
->dma_addr
= dma_map_single(base
->dev
, pool
->base
,
2819 SZ_1K
* base
->num_phy_chans
,
2821 if (dma_mapping_error(base
->dev
, pool
->dma_addr
)) {
2827 writel(virt_to_phys(base
->lcla_pool
.base
),
2828 base
->virtbase
+ D40_DREG_LCLA
);
2834 static int __init
d40_probe(struct platform_device
*pdev
)
2838 struct d40_base
*base
;
2839 struct resource
*res
= NULL
;
2840 int num_reserved_chans
;
2843 base
= d40_hw_detect_init(pdev
);
2848 num_reserved_chans
= d40_phy_res_init(base
);
2850 platform_set_drvdata(pdev
, base
);
2852 spin_lock_init(&base
->interrupt_lock
);
2853 spin_lock_init(&base
->execmd_lock
);
2855 /* Get IO for logical channel parameter address */
2856 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "lcpa");
2859 d40_err(&pdev
->dev
, "No \"lcpa\" memory resource\n");
2862 base
->lcpa_size
= resource_size(res
);
2863 base
->phy_lcpa
= res
->start
;
2865 if (request_mem_region(res
->start
, resource_size(res
),
2866 D40_NAME
" I/O lcpa") == NULL
) {
2869 "Failed to request LCPA region 0x%x-0x%x\n",
2870 res
->start
, res
->end
);
2874 /* We make use of ESRAM memory for this. */
2875 val
= readl(base
->virtbase
+ D40_DREG_LCPA
);
2876 if (res
->start
!= val
&& val
!= 0) {
2877 dev_warn(&pdev
->dev
,
2878 "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n",
2879 __func__
, val
, res
->start
);
2881 writel(res
->start
, base
->virtbase
+ D40_DREG_LCPA
);
2883 base
->lcpa_base
= ioremap(res
->start
, resource_size(res
));
2884 if (!base
->lcpa_base
) {
2886 d40_err(&pdev
->dev
, "Failed to ioremap LCPA region\n");
2890 ret
= d40_lcla_allocate(base
);
2892 d40_err(&pdev
->dev
, "Failed to allocate LCLA area\n");
2896 spin_lock_init(&base
->lcla_pool
.lock
);
2898 base
->irq
= platform_get_irq(pdev
, 0);
2900 ret
= request_irq(base
->irq
, d40_handle_interrupt
, 0, D40_NAME
, base
);
2902 d40_err(&pdev
->dev
, "No IRQ defined\n");
2906 err
= d40_dmaengine_init(base
, num_reserved_chans
);
2912 dev_info(base
->dev
, "initialized\n");
2917 if (base
->desc_slab
)
2918 kmem_cache_destroy(base
->desc_slab
);
2920 iounmap(base
->virtbase
);
2922 if (base
->lcla_pool
.dma_addr
)
2923 dma_unmap_single(base
->dev
, base
->lcla_pool
.dma_addr
,
2924 SZ_1K
* base
->num_phy_chans
,
2927 if (!base
->lcla_pool
.base_unaligned
&& base
->lcla_pool
.base
)
2928 free_pages((unsigned long)base
->lcla_pool
.base
,
2929 base
->lcla_pool
.pages
);
2931 kfree(base
->lcla_pool
.base_unaligned
);
2934 release_mem_region(base
->phy_lcpa
,
2936 if (base
->phy_start
)
2937 release_mem_region(base
->phy_start
,
2940 clk_disable(base
->clk
);
2944 kfree(base
->lcla_pool
.alloc_map
);
2945 kfree(base
->lookup_log_chans
);
2946 kfree(base
->lookup_phy_chans
);
2947 kfree(base
->phy_res
);
2951 d40_err(&pdev
->dev
, "probe failed\n");
2955 static struct platform_driver d40_driver
= {
2957 .owner
= THIS_MODULE
,
2962 static int __init
stedma40_init(void)
2964 return platform_driver_probe(&d40_driver
, d40_probe
);
2966 subsys_initcall(stedma40_init
);