1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
7 #include <linux/delay.h>
8 #include <linux/device.h>
9 #include <linux/dmaengine.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/init.h>
12 #include <linux/interrupt.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
17 #include <linux/of_address.h>
18 #include <linux/of_irq.h>
19 #include <linux/of_dma.h>
20 #include <linux/platform_device.h>
21 #include <linux/reset.h>
22 #include <linux/scatterlist.h>
23 #include <linux/slab.h>
25 #include "../dmaengine.h"
26 #include "../virt-dma.h"
28 /* ADM registers - calculated from channel number and security domain */
29 #define ADM_CHAN_MULTI 0x4
30 #define ADM_CI_MULTI 0x4
31 #define ADM_CRCI_MULTI 0x4
32 #define ADM_EE_MULTI 0x800
33 #define ADM_CHAN_OFFS(chan) (ADM_CHAN_MULTI * (chan))
34 #define ADM_EE_OFFS(ee) (ADM_EE_MULTI * (ee))
35 #define ADM_CHAN_EE_OFFS(chan, ee) (ADM_CHAN_OFFS(chan) + ADM_EE_OFFS(ee))
36 #define ADM_CHAN_OFFS(chan) (ADM_CHAN_MULTI * (chan))
37 #define ADM_CI_OFFS(ci) (ADM_CHAN_OFF(ci))
38 #define ADM_CH_CMD_PTR(chan, ee) (ADM_CHAN_EE_OFFS(chan, ee))
39 #define ADM_CH_RSLT(chan, ee) (0x40 + ADM_CHAN_EE_OFFS(chan, ee))
40 #define ADM_CH_FLUSH_STATE0(chan, ee) (0x80 + ADM_CHAN_EE_OFFS(chan, ee))
41 #define ADM_CH_STATUS_SD(chan, ee) (0x200 + ADM_CHAN_EE_OFFS(chan, ee))
42 #define ADM_CH_CONF(chan) (0x240 + ADM_CHAN_OFFS(chan))
43 #define ADM_CH_RSLT_CONF(chan, ee) (0x300 + ADM_CHAN_EE_OFFS(chan, ee))
44 #define ADM_SEC_DOMAIN_IRQ_STATUS(ee) (0x380 + ADM_EE_OFFS(ee))
45 #define ADM_CI_CONF(ci) (0x390 + (ci) * ADM_CI_MULTI)
46 #define ADM_GP_CTL 0x3d8
47 #define ADM_CRCI_CTL(crci, ee) (0x400 + (crci) * ADM_CRCI_MULTI + \
51 #define ADM_CH_STATUS_VALID BIT(1)
54 #define ADM_CH_RSLT_VALID BIT(31)
55 #define ADM_CH_RSLT_ERR BIT(3)
56 #define ADM_CH_RSLT_FLUSH BIT(2)
57 #define ADM_CH_RSLT_TPD BIT(1)
60 #define ADM_CH_CONF_SHADOW_EN BIT(12)
61 #define ADM_CH_CONF_MPU_DISABLE BIT(11)
62 #define ADM_CH_CONF_PERM_MPU_CONF BIT(9)
63 #define ADM_CH_CONF_FORCE_RSLT_EN BIT(7)
64 #define ADM_CH_CONF_SEC_DOMAIN(ee) ((((ee) & 0x3) << 4) | (((ee) & 0x4) << 11))
66 /* channel result conf */
67 #define ADM_CH_RSLT_CONF_FLUSH_EN BIT(1)
68 #define ADM_CH_RSLT_CONF_IRQ_EN BIT(0)
71 #define ADM_CRCI_CTL_MUX_SEL BIT(18)
72 #define ADM_CRCI_CTL_RST BIT(17)
74 /* CI configuration */
75 #define ADM_CI_RANGE_END(x) ((x) << 24)
76 #define ADM_CI_RANGE_START(x) ((x) << 16)
77 #define ADM_CI_BURST_4_WORDS BIT(2)
78 #define ADM_CI_BURST_8_WORDS BIT(3)
81 #define ADM_GP_CTL_LP_EN BIT(12)
82 #define ADM_GP_CTL_LP_CNT(x) ((x) << 8)
84 /* Command pointer list entry */
85 #define ADM_CPLE_LP BIT(31)
86 #define ADM_CPLE_CMD_PTR_LIST BIT(29)
88 /* Command list entry */
89 #define ADM_CMD_LC BIT(31)
90 #define ADM_CMD_DST_CRCI(n) (((n) & 0xf) << 7)
91 #define ADM_CMD_SRC_CRCI(n) (((n) & 0xf) << 3)
93 #define ADM_CMD_TYPE_SINGLE 0x0
94 #define ADM_CMD_TYPE_BOX 0x3
96 #define ADM_CRCI_MUX_SEL BIT(4)
97 #define ADM_DESC_ALIGN 8
98 #define ADM_MAX_XFER (SZ_64K - 1)
99 #define ADM_MAX_ROWS (SZ_64K - 1)
100 #define ADM_MAX_CHANNELS 16
102 struct adm_desc_hw_box
{
111 struct adm_desc_hw_single
{
118 struct adm_async_desc
{
119 struct virt_dma_desc vd
;
120 struct adm_device
*adev
;
123 enum dma_transfer_direction dir
;
135 struct virt_dma_chan vc
;
136 struct adm_device
*adev
;
139 u32 id
; /* channel id */
141 struct adm_async_desc
*curr_txd
;
142 struct dma_slave_config slave
;
143 struct list_head node
;
149 static inline struct adm_chan
*to_adm_chan(struct dma_chan
*common
)
151 return container_of(common
, struct adm_chan
, vc
.chan
);
157 struct dma_device common
;
158 struct device_dma_parameters dma_parms
;
159 struct adm_chan
*channels
;
163 struct clk
*core_clk
;
164 struct clk
*iface_clk
;
166 struct reset_control
*clk_reset
;
167 struct reset_control
*c0_reset
;
168 struct reset_control
*c1_reset
;
169 struct reset_control
*c2_reset
;
174 * adm_free_chan - Frees dma resources associated with the specific channel
178 * Free all allocated descriptors associated with this channel
180 static void adm_free_chan(struct dma_chan
*chan
)
182 /* free all queued descriptors */
183 vchan_free_chan_resources(to_virt_chan(chan
));
187 * adm_get_blksize - Get block size from burst value
189 * @burst: Burst size of transaction
191 static int adm_get_blksize(unsigned int burst
)
200 ret
= ffs(burst
>> 4) - 1;
217 * adm_process_fc_descriptors - Process descriptors for flow controlled xfers
219 * @achan: ADM channel
220 * @desc: Descriptor memory pointer
221 * @sg: Scatterlist entry
223 * @burst: Burst size of transaction
224 * @direction: DMA transfer direction
226 static void *adm_process_fc_descriptors(struct adm_chan
*achan
, void *desc
,
227 struct scatterlist
*sg
, u32 crci
,
229 enum dma_transfer_direction direction
)
231 struct adm_desc_hw_box
*box_desc
= NULL
;
232 struct adm_desc_hw_single
*single_desc
;
233 u32 remainder
= sg_dma_len(sg
);
234 u32 rows
, row_offset
, crci_cmd
;
235 u32 mem_addr
= sg_dma_address(sg
);
236 u32
*incr_addr
= &mem_addr
;
239 if (direction
== DMA_DEV_TO_MEM
) {
240 crci_cmd
= ADM_CMD_SRC_CRCI(crci
);
242 src
= &achan
->slave
.src_addr
;
245 crci_cmd
= ADM_CMD_DST_CRCI(crci
);
246 row_offset
= burst
<< 16;
248 dst
= &achan
->slave
.dst_addr
;
251 while (remainder
>= burst
) {
253 box_desc
->cmd
= ADM_CMD_TYPE_BOX
| crci_cmd
;
254 box_desc
->row_offset
= row_offset
;
255 box_desc
->src_addr
= *src
;
256 box_desc
->dst_addr
= *dst
;
258 rows
= remainder
/ burst
;
259 rows
= min_t(u32
, rows
, ADM_MAX_ROWS
);
260 box_desc
->num_rows
= rows
<< 16 | rows
;
261 box_desc
->row_len
= burst
<< 16 | burst
;
263 *incr_addr
+= burst
* rows
;
264 remainder
-= burst
* rows
;
265 desc
+= sizeof(*box_desc
);
268 /* if leftover bytes, do one single descriptor */
271 single_desc
->cmd
= ADM_CMD_TYPE_SINGLE
| crci_cmd
;
272 single_desc
->len
= remainder
;
273 single_desc
->src_addr
= *src
;
274 single_desc
->dst_addr
= *dst
;
275 desc
+= sizeof(*single_desc
);
278 single_desc
->cmd
|= ADM_CMD_LC
;
280 if (box_desc
&& sg_is_last(sg
))
281 box_desc
->cmd
|= ADM_CMD_LC
;
288 * adm_process_non_fc_descriptors - Process descriptors for non-fc xfers
290 * @achan: ADM channel
291 * @desc: Descriptor memory pointer
292 * @sg: Scatterlist entry
293 * @direction: DMA transfer direction
295 static void *adm_process_non_fc_descriptors(struct adm_chan
*achan
, void *desc
,
296 struct scatterlist
*sg
,
297 enum dma_transfer_direction direction
)
299 struct adm_desc_hw_single
*single_desc
;
300 u32 remainder
= sg_dma_len(sg
);
301 u32 mem_addr
= sg_dma_address(sg
);
302 u32
*incr_addr
= &mem_addr
;
305 if (direction
== DMA_DEV_TO_MEM
) {
306 src
= &achan
->slave
.src_addr
;
310 dst
= &achan
->slave
.dst_addr
;
315 single_desc
->cmd
= ADM_CMD_TYPE_SINGLE
;
316 single_desc
->src_addr
= *src
;
317 single_desc
->dst_addr
= *dst
;
318 single_desc
->len
= (remainder
> ADM_MAX_XFER
) ?
319 ADM_MAX_XFER
: remainder
;
321 remainder
-= single_desc
->len
;
322 *incr_addr
+= single_desc
->len
;
323 desc
+= sizeof(*single_desc
);
326 /* set last command if this is the end of the whole transaction */
328 single_desc
->cmd
|= ADM_CMD_LC
;
334 * adm_prep_slave_sg - Prep slave sg transaction
337 * @sgl: scatter gather list
338 * @sg_len: length of sg
339 * @direction: DMA transfer direction
341 * @context: transfer context (unused)
343 static struct dma_async_tx_descriptor
*adm_prep_slave_sg(struct dma_chan
*chan
,
344 struct scatterlist
*sgl
,
346 enum dma_transfer_direction direction
,
350 struct adm_chan
*achan
= to_adm_chan(chan
);
351 struct adm_device
*adev
= achan
->adev
;
352 struct adm_async_desc
*async_desc
;
353 struct scatterlist
*sg
;
354 dma_addr_t cple_addr
;
356 u32 single_count
= 0, box_count
= 0, crci
= 0;
361 if (!is_slave_direction(direction
)) {
362 dev_err(adev
->dev
, "invalid dma direction\n");
367 * get burst value from slave configuration
369 burst
= (direction
== DMA_MEM_TO_DEV
) ?
370 achan
->slave
.dst_maxburst
:
371 achan
->slave
.src_maxburst
;
373 /* if using flow control, validate burst and crci values */
374 if (achan
->slave
.device_fc
) {
375 blk_size
= adm_get_blksize(burst
);
377 dev_err(adev
->dev
, "invalid burst value: %d\n",
379 return ERR_PTR(-EINVAL
);
382 crci
= achan
->slave
.slave_id
& 0xf;
383 if (!crci
|| achan
->slave
.slave_id
> 0x1f) {
384 dev_err(adev
->dev
, "invalid crci value\n");
385 return ERR_PTR(-EINVAL
);
389 /* iterate through sgs and compute allocation size of structures */
390 for_each_sg(sgl
, sg
, sg_len
, i
) {
391 if (achan
->slave
.device_fc
) {
392 box_count
+= DIV_ROUND_UP(sg_dma_len(sg
) / burst
,
394 if (sg_dma_len(sg
) % burst
)
397 single_count
+= DIV_ROUND_UP(sg_dma_len(sg
),
402 async_desc
= kzalloc(sizeof(*async_desc
), GFP_NOWAIT
);
404 return ERR_PTR(-ENOMEM
);
407 async_desc
->mux
= achan
->slave
.slave_id
& ADM_CRCI_MUX_SEL
?
408 ADM_CRCI_CTL_MUX_SEL
: 0;
409 async_desc
->crci
= crci
;
410 async_desc
->blk_size
= blk_size
;
411 async_desc
->dma_len
= single_count
* sizeof(struct adm_desc_hw_single
) +
412 box_count
* sizeof(struct adm_desc_hw_box
) +
413 sizeof(*cple
) + 2 * ADM_DESC_ALIGN
;
415 async_desc
->cpl
= kzalloc(async_desc
->dma_len
, GFP_NOWAIT
);
416 if (!async_desc
->cpl
)
419 async_desc
->adev
= adev
;
421 /* both command list entry and descriptors must be 8 byte aligned */
422 cple
= PTR_ALIGN(async_desc
->cpl
, ADM_DESC_ALIGN
);
423 desc
= PTR_ALIGN(cple
+ 1, ADM_DESC_ALIGN
);
425 for_each_sg(sgl
, sg
, sg_len
, i
) {
426 async_desc
->length
+= sg_dma_len(sg
);
428 if (achan
->slave
.device_fc
)
429 desc
= adm_process_fc_descriptors(achan
, desc
, sg
, crci
,
432 desc
= adm_process_non_fc_descriptors(achan
, desc
, sg
,
436 async_desc
->dma_addr
= dma_map_single(adev
->dev
, async_desc
->cpl
,
439 if (dma_mapping_error(adev
->dev
, async_desc
->dma_addr
))
442 cple_addr
= async_desc
->dma_addr
+ ((void *)cple
- async_desc
->cpl
);
445 dma_sync_single_for_cpu(adev
->dev
, cple_addr
, sizeof(*cple
),
448 *cple
|= (async_desc
->dma_addr
+ ADM_DESC_ALIGN
) >> 3;
449 dma_sync_single_for_device(adev
->dev
, cple_addr
, sizeof(*cple
),
452 return vchan_tx_prep(&achan
->vc
, &async_desc
->vd
, flags
);
456 return ERR_PTR(-ENOMEM
);
460 * adm_terminate_all - terminate all transactions on a channel
463 * Dequeues and frees all transactions, aborts current transaction
464 * No callbacks are done
467 static int adm_terminate_all(struct dma_chan
*chan
)
469 struct adm_chan
*achan
= to_adm_chan(chan
);
470 struct adm_device
*adev
= achan
->adev
;
474 spin_lock_irqsave(&achan
->vc
.lock
, flags
);
475 vchan_get_all_descriptors(&achan
->vc
, &head
);
477 /* send flush command to terminate current transaction */
479 adev
->regs
+ ADM_CH_FLUSH_STATE0(achan
->id
, adev
->ee
));
481 spin_unlock_irqrestore(&achan
->vc
.lock
, flags
);
483 vchan_dma_desc_free_list(&achan
->vc
, &head
);
488 static int adm_slave_config(struct dma_chan
*chan
, struct dma_slave_config
*cfg
)
490 struct adm_chan
*achan
= to_adm_chan(chan
);
493 spin_lock_irqsave(&achan
->vc
.lock
, flag
);
494 memcpy(&achan
->slave
, cfg
, sizeof(struct dma_slave_config
));
495 spin_unlock_irqrestore(&achan
->vc
.lock
, flag
);
501 * adm_start_dma - start next transaction
502 * @achan: ADM dma channel
504 static void adm_start_dma(struct adm_chan
*achan
)
506 struct virt_dma_desc
*vd
= vchan_next_desc(&achan
->vc
);
507 struct adm_device
*adev
= achan
->adev
;
508 struct adm_async_desc
*async_desc
;
510 lockdep_assert_held(&achan
->vc
.lock
);
517 /* write next command list out to the CMD FIFO */
518 async_desc
= container_of(vd
, struct adm_async_desc
, vd
);
519 achan
->curr_txd
= async_desc
;
521 /* reset channel error */
524 if (!achan
->initialized
) {
525 /* enable interrupts */
526 writel(ADM_CH_CONF_SHADOW_EN
|
527 ADM_CH_CONF_PERM_MPU_CONF
|
528 ADM_CH_CONF_MPU_DISABLE
|
529 ADM_CH_CONF_SEC_DOMAIN(adev
->ee
),
530 adev
->regs
+ ADM_CH_CONF(achan
->id
));
532 writel(ADM_CH_RSLT_CONF_IRQ_EN
| ADM_CH_RSLT_CONF_FLUSH_EN
,
533 adev
->regs
+ ADM_CH_RSLT_CONF(achan
->id
, adev
->ee
));
535 achan
->initialized
= 1;
538 /* set the crci block size if this transaction requires CRCI */
539 if (async_desc
->crci
) {
540 writel(async_desc
->mux
| async_desc
->blk_size
,
541 adev
->regs
+ ADM_CRCI_CTL(async_desc
->crci
, adev
->ee
));
544 /* make sure IRQ enable doesn't get reordered */
547 /* write next command list out to the CMD FIFO */
548 writel(ALIGN(async_desc
->dma_addr
, ADM_DESC_ALIGN
) >> 3,
549 adev
->regs
+ ADM_CH_CMD_PTR(achan
->id
, adev
->ee
));
553 * adm_dma_irq - irq handler for ADM controller
554 * @irq: IRQ of interrupt
555 * @data: callback data
557 * IRQ handler for the bam controller
559 static irqreturn_t
adm_dma_irq(int irq
, void *data
)
561 struct adm_device
*adev
= data
;
563 struct adm_async_desc
*async_desc
;
566 srcs
= readl_relaxed(adev
->regs
+
567 ADM_SEC_DOMAIN_IRQ_STATUS(adev
->ee
));
569 for (i
= 0; i
< ADM_MAX_CHANNELS
; i
++) {
570 struct adm_chan
*achan
= &adev
->channels
[i
];
574 status
= readl_relaxed(adev
->regs
+
575 ADM_CH_STATUS_SD(i
, adev
->ee
));
577 /* if no result present, skip */
578 if (!(status
& ADM_CH_STATUS_VALID
))
581 result
= readl_relaxed(adev
->regs
+
582 ADM_CH_RSLT(i
, adev
->ee
));
584 /* no valid results, skip */
585 if (!(result
& ADM_CH_RSLT_VALID
))
588 /* flag error if transaction was flushed or failed */
589 if (result
& (ADM_CH_RSLT_ERR
| ADM_CH_RSLT_FLUSH
))
592 spin_lock_irqsave(&achan
->vc
.lock
, flags
);
593 async_desc
= achan
->curr_txd
;
595 achan
->curr_txd
= NULL
;
598 vchan_cookie_complete(&async_desc
->vd
);
600 /* kick off next DMA */
601 adm_start_dma(achan
);
604 spin_unlock_irqrestore(&achan
->vc
.lock
, flags
);
612 * adm_tx_status - returns status of transaction
614 * @cookie: transaction cookie
615 * @txstate: DMA transaction state
617 * Return status of dma transaction
619 static enum dma_status
adm_tx_status(struct dma_chan
*chan
, dma_cookie_t cookie
,
620 struct dma_tx_state
*txstate
)
622 struct adm_chan
*achan
= to_adm_chan(chan
);
623 struct virt_dma_desc
*vd
;
628 ret
= dma_cookie_status(chan
, cookie
, txstate
);
629 if (ret
== DMA_COMPLETE
|| !txstate
)
632 spin_lock_irqsave(&achan
->vc
.lock
, flags
);
634 vd
= vchan_find_desc(&achan
->vc
, cookie
);
636 residue
= container_of(vd
, struct adm_async_desc
, vd
)->length
;
638 spin_unlock_irqrestore(&achan
->vc
.lock
, flags
);
641 * residue is either the full length if it is in the issued list, or 0
642 * if it is in progress. We have no reliable way of determining
645 dma_set_residue(txstate
, residue
);
654 * adm_issue_pending - starts pending transactions
657 * Issues all pending transactions and starts DMA
659 static void adm_issue_pending(struct dma_chan
*chan
)
661 struct adm_chan
*achan
= to_adm_chan(chan
);
664 spin_lock_irqsave(&achan
->vc
.lock
, flags
);
666 if (vchan_issue_pending(&achan
->vc
) && !achan
->curr_txd
)
667 adm_start_dma(achan
);
668 spin_unlock_irqrestore(&achan
->vc
.lock
, flags
);
672 * adm_dma_free_desc - free descriptor memory
673 * @vd: virtual descriptor
676 static void adm_dma_free_desc(struct virt_dma_desc
*vd
)
678 struct adm_async_desc
*async_desc
= container_of(vd
,
679 struct adm_async_desc
, vd
);
681 dma_unmap_single(async_desc
->adev
->dev
, async_desc
->dma_addr
,
682 async_desc
->dma_len
, DMA_TO_DEVICE
);
683 kfree(async_desc
->cpl
);
687 static void adm_channel_init(struct adm_device
*adev
, struct adm_chan
*achan
,
693 vchan_init(&achan
->vc
, &adev
->common
);
694 achan
->vc
.desc_free
= adm_dma_free_desc
;
697 static int adm_dma_probe(struct platform_device
*pdev
)
699 struct adm_device
*adev
;
703 adev
= devm_kzalloc(&pdev
->dev
, sizeof(*adev
), GFP_KERNEL
);
707 adev
->dev
= &pdev
->dev
;
709 adev
->regs
= devm_platform_ioremap_resource(pdev
, 0);
710 if (IS_ERR(adev
->regs
))
711 return PTR_ERR(adev
->regs
);
713 adev
->irq
= platform_get_irq(pdev
, 0);
717 ret
= of_property_read_u32(pdev
->dev
.of_node
, "qcom,ee", &adev
->ee
);
719 dev_err(adev
->dev
, "Execution environment unspecified\n");
723 adev
->core_clk
= devm_clk_get(adev
->dev
, "core");
724 if (IS_ERR(adev
->core_clk
))
725 return PTR_ERR(adev
->core_clk
);
727 adev
->iface_clk
= devm_clk_get(adev
->dev
, "iface");
728 if (IS_ERR(adev
->iface_clk
))
729 return PTR_ERR(adev
->iface_clk
);
731 adev
->clk_reset
= devm_reset_control_get_exclusive(&pdev
->dev
, "clk");
732 if (IS_ERR(adev
->clk_reset
)) {
733 dev_err(adev
->dev
, "failed to get ADM0 reset\n");
734 return PTR_ERR(adev
->clk_reset
);
737 adev
->c0_reset
= devm_reset_control_get_exclusive(&pdev
->dev
, "c0");
738 if (IS_ERR(adev
->c0_reset
)) {
739 dev_err(adev
->dev
, "failed to get ADM0 C0 reset\n");
740 return PTR_ERR(adev
->c0_reset
);
743 adev
->c1_reset
= devm_reset_control_get_exclusive(&pdev
->dev
, "c1");
744 if (IS_ERR(adev
->c1_reset
)) {
745 dev_err(adev
->dev
, "failed to get ADM0 C1 reset\n");
746 return PTR_ERR(adev
->c1_reset
);
749 adev
->c2_reset
= devm_reset_control_get_exclusive(&pdev
->dev
, "c2");
750 if (IS_ERR(adev
->c2_reset
)) {
751 dev_err(adev
->dev
, "failed to get ADM0 C2 reset\n");
752 return PTR_ERR(adev
->c2_reset
);
755 ret
= clk_prepare_enable(adev
->core_clk
);
757 dev_err(adev
->dev
, "failed to prepare/enable core clock\n");
761 ret
= clk_prepare_enable(adev
->iface_clk
);
763 dev_err(adev
->dev
, "failed to prepare/enable iface clock\n");
764 goto err_disable_core_clk
;
767 reset_control_assert(adev
->clk_reset
);
768 reset_control_assert(adev
->c0_reset
);
769 reset_control_assert(adev
->c1_reset
);
770 reset_control_assert(adev
->c2_reset
);
774 reset_control_deassert(adev
->clk_reset
);
775 reset_control_deassert(adev
->c0_reset
);
776 reset_control_deassert(adev
->c1_reset
);
777 reset_control_deassert(adev
->c2_reset
);
779 adev
->channels
= devm_kcalloc(adev
->dev
, ADM_MAX_CHANNELS
,
780 sizeof(*adev
->channels
), GFP_KERNEL
);
782 if (!adev
->channels
) {
784 goto err_disable_clks
;
787 /* allocate and initialize channels */
788 INIT_LIST_HEAD(&adev
->common
.channels
);
790 for (i
= 0; i
< ADM_MAX_CHANNELS
; i
++)
791 adm_channel_init(adev
, &adev
->channels
[i
], i
);
794 for (i
= 0; i
< 16; i
++)
795 writel(ADM_CRCI_CTL_RST
, adev
->regs
+
796 ADM_CRCI_CTL(i
, adev
->ee
));
798 /* configure client interfaces */
799 writel(ADM_CI_RANGE_START(0x40) | ADM_CI_RANGE_END(0xb0) |
800 ADM_CI_BURST_8_WORDS
, adev
->regs
+ ADM_CI_CONF(0));
801 writel(ADM_CI_RANGE_START(0x2a) | ADM_CI_RANGE_END(0x2c) |
802 ADM_CI_BURST_8_WORDS
, adev
->regs
+ ADM_CI_CONF(1));
803 writel(ADM_CI_RANGE_START(0x12) | ADM_CI_RANGE_END(0x28) |
804 ADM_CI_BURST_8_WORDS
, adev
->regs
+ ADM_CI_CONF(2));
805 writel(ADM_GP_CTL_LP_EN
| ADM_GP_CTL_LP_CNT(0xf),
806 adev
->regs
+ ADM_GP_CTL
);
808 ret
= devm_request_irq(adev
->dev
, adev
->irq
, adm_dma_irq
,
811 goto err_disable_clks
;
813 platform_set_drvdata(pdev
, adev
);
815 adev
->common
.dev
= adev
->dev
;
816 adev
->common
.dev
->dma_parms
= &adev
->dma_parms
;
818 /* set capabilities */
819 dma_cap_zero(adev
->common
.cap_mask
);
820 dma_cap_set(DMA_SLAVE
, adev
->common
.cap_mask
);
821 dma_cap_set(DMA_PRIVATE
, adev
->common
.cap_mask
);
823 /* initialize dmaengine apis */
824 adev
->common
.directions
= BIT(DMA_DEV_TO_MEM
| DMA_MEM_TO_DEV
);
825 adev
->common
.residue_granularity
= DMA_RESIDUE_GRANULARITY_DESCRIPTOR
;
826 adev
->common
.src_addr_widths
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
827 adev
->common
.dst_addr_widths
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
828 adev
->common
.device_free_chan_resources
= adm_free_chan
;
829 adev
->common
.device_prep_slave_sg
= adm_prep_slave_sg
;
830 adev
->common
.device_issue_pending
= adm_issue_pending
;
831 adev
->common
.device_tx_status
= adm_tx_status
;
832 adev
->common
.device_terminate_all
= adm_terminate_all
;
833 adev
->common
.device_config
= adm_slave_config
;
835 ret
= dma_async_device_register(&adev
->common
);
837 dev_err(adev
->dev
, "failed to register dma async device\n");
838 goto err_disable_clks
;
841 ret
= of_dma_controller_register(pdev
->dev
.of_node
,
842 of_dma_xlate_by_chan_id
,
845 goto err_unregister_dma
;
850 dma_async_device_unregister(&adev
->common
);
852 clk_disable_unprepare(adev
->iface_clk
);
853 err_disable_core_clk
:
854 clk_disable_unprepare(adev
->core_clk
);
859 static int adm_dma_remove(struct platform_device
*pdev
)
861 struct adm_device
*adev
= platform_get_drvdata(pdev
);
862 struct adm_chan
*achan
;
865 of_dma_controller_free(pdev
->dev
.of_node
);
866 dma_async_device_unregister(&adev
->common
);
868 for (i
= 0; i
< ADM_MAX_CHANNELS
; i
++) {
869 achan
= &adev
->channels
[i
];
871 /* mask IRQs for this channel/EE pair */
872 writel(0, adev
->regs
+ ADM_CH_RSLT_CONF(achan
->id
, adev
->ee
));
874 tasklet_kill(&adev
->channels
[i
].vc
.task
);
875 adm_terminate_all(&adev
->channels
[i
].vc
.chan
);
878 devm_free_irq(adev
->dev
, adev
->irq
, adev
);
880 clk_disable_unprepare(adev
->core_clk
);
881 clk_disable_unprepare(adev
->iface_clk
);
886 static const struct of_device_id adm_of_match
[] = {
887 { .compatible
= "qcom,adm", },
890 MODULE_DEVICE_TABLE(of
, adm_of_match
);
892 static struct platform_driver adm_dma_driver
= {
893 .probe
= adm_dma_probe
,
894 .remove
= adm_dma_remove
,
896 .name
= "adm-dma-engine",
897 .of_match_table
= adm_of_match
,
901 module_platform_driver(adm_dma_driver
);
903 MODULE_AUTHOR("Andy Gross <agross@codeaurora.org>");
904 MODULE_DESCRIPTION("QCOM ADM DMA engine driver");
905 MODULE_LICENSE("GPL v2");