1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
7 #include <linux/delay.h>
8 #include <linux/device.h>
9 #include <linux/dmaengine.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/dma/qcom_adm.h>
12 #include <linux/init.h>
13 #include <linux/interrupt.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
18 #include <linux/of_address.h>
19 #include <linux/of_irq.h>
20 #include <linux/of_dma.h>
21 #include <linux/platform_device.h>
22 #include <linux/reset.h>
23 #include <linux/scatterlist.h>
24 #include <linux/slab.h>
26 #include "../dmaengine.h"
27 #include "../virt-dma.h"
29 /* ADM registers - calculated from channel number and security domain */
30 #define ADM_CHAN_MULTI 0x4
31 #define ADM_CI_MULTI 0x4
32 #define ADM_CRCI_MULTI 0x4
33 #define ADM_EE_MULTI 0x800
34 #define ADM_CHAN_OFFS(chan) (ADM_CHAN_MULTI * (chan))
35 #define ADM_EE_OFFS(ee) (ADM_EE_MULTI * (ee))
36 #define ADM_CHAN_EE_OFFS(chan, ee) (ADM_CHAN_OFFS(chan) + ADM_EE_OFFS(ee))
37 #define ADM_CHAN_OFFS(chan) (ADM_CHAN_MULTI * (chan))
38 #define ADM_CI_OFFS(ci) (ADM_CHAN_OFF(ci))
39 #define ADM_CH_CMD_PTR(chan, ee) (ADM_CHAN_EE_OFFS(chan, ee))
40 #define ADM_CH_RSLT(chan, ee) (0x40 + ADM_CHAN_EE_OFFS(chan, ee))
41 #define ADM_CH_FLUSH_STATE0(chan, ee) (0x80 + ADM_CHAN_EE_OFFS(chan, ee))
42 #define ADM_CH_STATUS_SD(chan, ee) (0x200 + ADM_CHAN_EE_OFFS(chan, ee))
43 #define ADM_CH_CONF(chan) (0x240 + ADM_CHAN_OFFS(chan))
44 #define ADM_CH_RSLT_CONF(chan, ee) (0x300 + ADM_CHAN_EE_OFFS(chan, ee))
45 #define ADM_SEC_DOMAIN_IRQ_STATUS(ee) (0x380 + ADM_EE_OFFS(ee))
46 #define ADM_CI_CONF(ci) (0x390 + (ci) * ADM_CI_MULTI)
47 #define ADM_GP_CTL 0x3d8
48 #define ADM_CRCI_CTL(crci, ee) (0x400 + (crci) * ADM_CRCI_MULTI + \
52 #define ADM_CH_STATUS_VALID BIT(1)
55 #define ADM_CH_RSLT_VALID BIT(31)
56 #define ADM_CH_RSLT_ERR BIT(3)
57 #define ADM_CH_RSLT_FLUSH BIT(2)
58 #define ADM_CH_RSLT_TPD BIT(1)
61 #define ADM_CH_CONF_SHADOW_EN BIT(12)
62 #define ADM_CH_CONF_MPU_DISABLE BIT(11)
63 #define ADM_CH_CONF_PERM_MPU_CONF BIT(9)
64 #define ADM_CH_CONF_FORCE_RSLT_EN BIT(7)
65 #define ADM_CH_CONF_SEC_DOMAIN(ee) ((((ee) & 0x3) << 4) | (((ee) & 0x4) << 11))
67 /* channel result conf */
68 #define ADM_CH_RSLT_CONF_FLUSH_EN BIT(1)
69 #define ADM_CH_RSLT_CONF_IRQ_EN BIT(0)
72 #define ADM_CRCI_CTL_MUX_SEL BIT(18)
73 #define ADM_CRCI_CTL_RST BIT(17)
75 /* CI configuration */
76 #define ADM_CI_RANGE_END(x) ((x) << 24)
77 #define ADM_CI_RANGE_START(x) ((x) << 16)
78 #define ADM_CI_BURST_4_WORDS BIT(2)
79 #define ADM_CI_BURST_8_WORDS BIT(3)
82 #define ADM_GP_CTL_LP_EN BIT(12)
83 #define ADM_GP_CTL_LP_CNT(x) ((x) << 8)
85 /* Command pointer list entry */
86 #define ADM_CPLE_LP BIT(31)
87 #define ADM_CPLE_CMD_PTR_LIST BIT(29)
89 /* Command list entry */
90 #define ADM_CMD_LC BIT(31)
91 #define ADM_CMD_DST_CRCI(n) (((n) & 0xf) << 7)
92 #define ADM_CMD_SRC_CRCI(n) (((n) & 0xf) << 3)
94 #define ADM_CMD_TYPE_SINGLE 0x0
95 #define ADM_CMD_TYPE_BOX 0x3
97 #define ADM_CRCI_MUX_SEL BIT(4)
98 #define ADM_DESC_ALIGN 8
99 #define ADM_MAX_XFER (SZ_64K - 1)
100 #define ADM_MAX_ROWS (SZ_64K - 1)
101 #define ADM_MAX_CHANNELS 16
103 struct adm_desc_hw_box
{
112 struct adm_desc_hw_single
{
119 struct adm_async_desc
{
120 struct virt_dma_desc vd
;
121 struct adm_device
*adev
;
124 enum dma_transfer_direction dir
;
136 struct virt_dma_chan vc
;
137 struct adm_device
*adev
;
140 u32 id
; /* channel id */
142 struct adm_async_desc
*curr_txd
;
143 struct dma_slave_config slave
;
146 struct list_head node
;
152 static inline struct adm_chan
*to_adm_chan(struct dma_chan
*common
)
154 return container_of(common
, struct adm_chan
, vc
.chan
);
160 struct dma_device common
;
161 struct device_dma_parameters dma_parms
;
162 struct adm_chan
*channels
;
166 struct clk
*core_clk
;
167 struct clk
*iface_clk
;
169 struct reset_control
*clk_reset
;
170 struct reset_control
*c0_reset
;
171 struct reset_control
*c1_reset
;
172 struct reset_control
*c2_reset
;
177 * adm_free_chan - Frees dma resources associated with the specific channel
181 * Free all allocated descriptors associated with this channel
183 static void adm_free_chan(struct dma_chan
*chan
)
185 /* free all queued descriptors */
186 vchan_free_chan_resources(to_virt_chan(chan
));
190 * adm_get_blksize - Get block size from burst value
192 * @burst: Burst size of transaction
194 static int adm_get_blksize(unsigned int burst
)
203 ret
= ffs(burst
>> 4) - 1;
220 * adm_process_fc_descriptors - Process descriptors for flow controlled xfers
222 * @achan: ADM channel
223 * @desc: Descriptor memory pointer
224 * @sg: Scatterlist entry
226 * @burst: Burst size of transaction
227 * @direction: DMA transfer direction
229 static void *adm_process_fc_descriptors(struct adm_chan
*achan
, void *desc
,
230 struct scatterlist
*sg
, u32 crci
,
232 enum dma_transfer_direction direction
)
234 struct adm_desc_hw_box
*box_desc
= NULL
;
235 struct adm_desc_hw_single
*single_desc
;
236 u32 remainder
= sg_dma_len(sg
);
237 u32 rows
, row_offset
, crci_cmd
;
238 u32 mem_addr
= sg_dma_address(sg
);
239 u32
*incr_addr
= &mem_addr
;
242 if (direction
== DMA_DEV_TO_MEM
) {
243 crci_cmd
= ADM_CMD_SRC_CRCI(crci
);
245 src
= &achan
->slave
.src_addr
;
248 crci_cmd
= ADM_CMD_DST_CRCI(crci
);
249 row_offset
= burst
<< 16;
251 dst
= &achan
->slave
.dst_addr
;
254 while (remainder
>= burst
) {
256 box_desc
->cmd
= ADM_CMD_TYPE_BOX
| crci_cmd
;
257 box_desc
->row_offset
= row_offset
;
258 box_desc
->src_addr
= *src
;
259 box_desc
->dst_addr
= *dst
;
261 rows
= remainder
/ burst
;
262 rows
= min_t(u32
, rows
, ADM_MAX_ROWS
);
263 box_desc
->num_rows
= rows
<< 16 | rows
;
264 box_desc
->row_len
= burst
<< 16 | burst
;
266 *incr_addr
+= burst
* rows
;
267 remainder
-= burst
* rows
;
268 desc
+= sizeof(*box_desc
);
271 /* if leftover bytes, do one single descriptor */
274 single_desc
->cmd
= ADM_CMD_TYPE_SINGLE
| crci_cmd
;
275 single_desc
->len
= remainder
;
276 single_desc
->src_addr
= *src
;
277 single_desc
->dst_addr
= *dst
;
278 desc
+= sizeof(*single_desc
);
281 single_desc
->cmd
|= ADM_CMD_LC
;
283 if (box_desc
&& sg_is_last(sg
))
284 box_desc
->cmd
|= ADM_CMD_LC
;
291 * adm_process_non_fc_descriptors - Process descriptors for non-fc xfers
293 * @achan: ADM channel
294 * @desc: Descriptor memory pointer
295 * @sg: Scatterlist entry
296 * @direction: DMA transfer direction
298 static void *adm_process_non_fc_descriptors(struct adm_chan
*achan
, void *desc
,
299 struct scatterlist
*sg
,
300 enum dma_transfer_direction direction
)
302 struct adm_desc_hw_single
*single_desc
;
303 u32 remainder
= sg_dma_len(sg
);
304 u32 mem_addr
= sg_dma_address(sg
);
305 u32
*incr_addr
= &mem_addr
;
308 if (direction
== DMA_DEV_TO_MEM
) {
309 src
= &achan
->slave
.src_addr
;
313 dst
= &achan
->slave
.dst_addr
;
318 single_desc
->cmd
= ADM_CMD_TYPE_SINGLE
;
319 single_desc
->src_addr
= *src
;
320 single_desc
->dst_addr
= *dst
;
321 single_desc
->len
= (remainder
> ADM_MAX_XFER
) ?
322 ADM_MAX_XFER
: remainder
;
324 remainder
-= single_desc
->len
;
325 *incr_addr
+= single_desc
->len
;
326 desc
+= sizeof(*single_desc
);
329 /* set last command if this is the end of the whole transaction */
331 single_desc
->cmd
|= ADM_CMD_LC
;
337 * adm_prep_slave_sg - Prep slave sg transaction
340 * @sgl: scatter gather list
341 * @sg_len: length of sg
342 * @direction: DMA transfer direction
344 * @context: transfer context (unused)
346 static struct dma_async_tx_descriptor
*adm_prep_slave_sg(struct dma_chan
*chan
,
347 struct scatterlist
*sgl
,
349 enum dma_transfer_direction direction
,
353 struct adm_chan
*achan
= to_adm_chan(chan
);
354 struct adm_device
*adev
= achan
->adev
;
355 struct adm_async_desc
*async_desc
;
356 struct scatterlist
*sg
;
357 dma_addr_t cple_addr
;
359 u32 single_count
= 0, box_count
= 0, crci
= 0;
364 if (!is_slave_direction(direction
)) {
365 dev_err(adev
->dev
, "invalid dma direction\n");
370 * get burst value from slave configuration
372 burst
= (direction
== DMA_MEM_TO_DEV
) ?
373 achan
->slave
.dst_maxburst
:
374 achan
->slave
.src_maxburst
;
376 /* if using flow control, validate burst and crci values */
377 if (achan
->slave
.device_fc
) {
378 blk_size
= adm_get_blksize(burst
);
380 dev_err(adev
->dev
, "invalid burst value: %d\n",
385 crci
= achan
->crci
& 0xf;
386 if (!crci
|| achan
->crci
> 0x1f) {
387 dev_err(adev
->dev
, "invalid crci value\n");
392 /* iterate through sgs and compute allocation size of structures */
393 for_each_sg(sgl
, sg
, sg_len
, i
) {
394 if (achan
->slave
.device_fc
) {
395 box_count
+= DIV_ROUND_UP(sg_dma_len(sg
) / burst
,
397 if (sg_dma_len(sg
) % burst
)
400 single_count
+= DIV_ROUND_UP(sg_dma_len(sg
),
405 async_desc
= kzalloc(sizeof(*async_desc
), GFP_NOWAIT
);
407 dev_err(adev
->dev
, "not enough memory for async_desc struct\n");
411 async_desc
->mux
= achan
->mux
? ADM_CRCI_CTL_MUX_SEL
: 0;
412 async_desc
->crci
= crci
;
413 async_desc
->blk_size
= blk_size
;
414 async_desc
->dma_len
= single_count
* sizeof(struct adm_desc_hw_single
) +
415 box_count
* sizeof(struct adm_desc_hw_box
) +
416 sizeof(*cple
) + 2 * ADM_DESC_ALIGN
;
418 async_desc
->cpl
= kzalloc(async_desc
->dma_len
, GFP_NOWAIT
);
419 if (!async_desc
->cpl
) {
420 dev_err(adev
->dev
, "not enough memory for cpl struct\n");
424 async_desc
->adev
= adev
;
426 /* both command list entry and descriptors must be 8 byte aligned */
427 cple
= PTR_ALIGN(async_desc
->cpl
, ADM_DESC_ALIGN
);
428 desc
= PTR_ALIGN(cple
+ 1, ADM_DESC_ALIGN
);
430 for_each_sg(sgl
, sg
, sg_len
, i
) {
431 async_desc
->length
+= sg_dma_len(sg
);
433 if (achan
->slave
.device_fc
)
434 desc
= adm_process_fc_descriptors(achan
, desc
, sg
, crci
,
437 desc
= adm_process_non_fc_descriptors(achan
, desc
, sg
,
441 async_desc
->dma_addr
= dma_map_single(adev
->dev
, async_desc
->cpl
,
444 if (dma_mapping_error(adev
->dev
, async_desc
->dma_addr
)) {
445 dev_err(adev
->dev
, "dma mapping error for cpl\n");
449 cple_addr
= async_desc
->dma_addr
+ ((void *)cple
- async_desc
->cpl
);
452 dma_sync_single_for_cpu(adev
->dev
, cple_addr
, sizeof(*cple
),
455 *cple
|= (async_desc
->dma_addr
+ ADM_DESC_ALIGN
) >> 3;
456 dma_sync_single_for_device(adev
->dev
, cple_addr
, sizeof(*cple
),
459 return vchan_tx_prep(&achan
->vc
, &async_desc
->vd
, flags
);
467 * adm_terminate_all - terminate all transactions on a channel
470 * Dequeues and frees all transactions, aborts current transaction
471 * No callbacks are done
474 static int adm_terminate_all(struct dma_chan
*chan
)
476 struct adm_chan
*achan
= to_adm_chan(chan
);
477 struct adm_device
*adev
= achan
->adev
;
481 spin_lock_irqsave(&achan
->vc
.lock
, flags
);
482 vchan_get_all_descriptors(&achan
->vc
, &head
);
484 /* send flush command to terminate current transaction */
486 adev
->regs
+ ADM_CH_FLUSH_STATE0(achan
->id
, adev
->ee
));
488 spin_unlock_irqrestore(&achan
->vc
.lock
, flags
);
490 vchan_dma_desc_free_list(&achan
->vc
, &head
);
495 static int adm_slave_config(struct dma_chan
*chan
, struct dma_slave_config
*cfg
)
497 struct adm_chan
*achan
= to_adm_chan(chan
);
498 struct qcom_adm_peripheral_config
*config
= cfg
->peripheral_config
;
501 spin_lock_irqsave(&achan
->vc
.lock
, flag
);
502 memcpy(&achan
->slave
, cfg
, sizeof(struct dma_slave_config
));
503 if (cfg
->peripheral_size
== sizeof(*config
))
504 achan
->crci
= config
->crci
;
505 spin_unlock_irqrestore(&achan
->vc
.lock
, flag
);
511 * adm_start_dma - start next transaction
512 * @achan: ADM dma channel
514 static void adm_start_dma(struct adm_chan
*achan
)
516 struct virt_dma_desc
*vd
= vchan_next_desc(&achan
->vc
);
517 struct adm_device
*adev
= achan
->adev
;
518 struct adm_async_desc
*async_desc
;
520 lockdep_assert_held(&achan
->vc
.lock
);
527 /* write next command list out to the CMD FIFO */
528 async_desc
= container_of(vd
, struct adm_async_desc
, vd
);
529 achan
->curr_txd
= async_desc
;
531 /* reset channel error */
534 if (!achan
->initialized
) {
535 /* enable interrupts */
536 writel(ADM_CH_CONF_SHADOW_EN
|
537 ADM_CH_CONF_PERM_MPU_CONF
|
538 ADM_CH_CONF_MPU_DISABLE
|
539 ADM_CH_CONF_SEC_DOMAIN(adev
->ee
),
540 adev
->regs
+ ADM_CH_CONF(achan
->id
));
542 writel(ADM_CH_RSLT_CONF_IRQ_EN
| ADM_CH_RSLT_CONF_FLUSH_EN
,
543 adev
->regs
+ ADM_CH_RSLT_CONF(achan
->id
, adev
->ee
));
545 achan
->initialized
= 1;
548 /* set the crci block size if this transaction requires CRCI */
549 if (async_desc
->crci
) {
550 writel(async_desc
->mux
| async_desc
->blk_size
,
551 adev
->regs
+ ADM_CRCI_CTL(async_desc
->crci
, adev
->ee
));
554 /* make sure IRQ enable doesn't get reordered */
557 /* write next command list out to the CMD FIFO */
558 writel(ALIGN(async_desc
->dma_addr
, ADM_DESC_ALIGN
) >> 3,
559 adev
->regs
+ ADM_CH_CMD_PTR(achan
->id
, adev
->ee
));
563 * adm_dma_irq - irq handler for ADM controller
564 * @irq: IRQ of interrupt
565 * @data: callback data
567 * IRQ handler for the bam controller
569 static irqreturn_t
adm_dma_irq(int irq
, void *data
)
571 struct adm_device
*adev
= data
;
573 struct adm_async_desc
*async_desc
;
576 srcs
= readl_relaxed(adev
->regs
+
577 ADM_SEC_DOMAIN_IRQ_STATUS(adev
->ee
));
579 for (i
= 0; i
< ADM_MAX_CHANNELS
; i
++) {
580 struct adm_chan
*achan
= &adev
->channels
[i
];
584 status
= readl_relaxed(adev
->regs
+
585 ADM_CH_STATUS_SD(i
, adev
->ee
));
587 /* if no result present, skip */
588 if (!(status
& ADM_CH_STATUS_VALID
))
591 result
= readl_relaxed(adev
->regs
+
592 ADM_CH_RSLT(i
, adev
->ee
));
594 /* no valid results, skip */
595 if (!(result
& ADM_CH_RSLT_VALID
))
598 /* flag error if transaction was flushed or failed */
599 if (result
& (ADM_CH_RSLT_ERR
| ADM_CH_RSLT_FLUSH
))
602 spin_lock_irqsave(&achan
->vc
.lock
, flags
);
603 async_desc
= achan
->curr_txd
;
605 achan
->curr_txd
= NULL
;
608 vchan_cookie_complete(&async_desc
->vd
);
610 /* kick off next DMA */
611 adm_start_dma(achan
);
614 spin_unlock_irqrestore(&achan
->vc
.lock
, flags
);
622 * adm_tx_status - returns status of transaction
624 * @cookie: transaction cookie
625 * @txstate: DMA transaction state
627 * Return status of dma transaction
629 static enum dma_status
adm_tx_status(struct dma_chan
*chan
, dma_cookie_t cookie
,
630 struct dma_tx_state
*txstate
)
632 struct adm_chan
*achan
= to_adm_chan(chan
);
633 struct virt_dma_desc
*vd
;
638 ret
= dma_cookie_status(chan
, cookie
, txstate
);
639 if (ret
== DMA_COMPLETE
|| !txstate
)
642 spin_lock_irqsave(&achan
->vc
.lock
, flags
);
644 vd
= vchan_find_desc(&achan
->vc
, cookie
);
646 residue
= container_of(vd
, struct adm_async_desc
, vd
)->length
;
648 spin_unlock_irqrestore(&achan
->vc
.lock
, flags
);
651 * residue is either the full length if it is in the issued list, or 0
652 * if it is in progress. We have no reliable way of determining
653 * anything in between
655 dma_set_residue(txstate
, residue
);
664 * adm_issue_pending - starts pending transactions
667 * Issues all pending transactions and starts DMA
669 static void adm_issue_pending(struct dma_chan
*chan
)
671 struct adm_chan
*achan
= to_adm_chan(chan
);
674 spin_lock_irqsave(&achan
->vc
.lock
, flags
);
676 if (vchan_issue_pending(&achan
->vc
) && !achan
->curr_txd
)
677 adm_start_dma(achan
);
678 spin_unlock_irqrestore(&achan
->vc
.lock
, flags
);
682 * adm_dma_free_desc - free descriptor memory
683 * @vd: virtual descriptor
686 static void adm_dma_free_desc(struct virt_dma_desc
*vd
)
688 struct adm_async_desc
*async_desc
= container_of(vd
,
689 struct adm_async_desc
, vd
);
691 dma_unmap_single(async_desc
->adev
->dev
, async_desc
->dma_addr
,
692 async_desc
->dma_len
, DMA_TO_DEVICE
);
693 kfree(async_desc
->cpl
);
697 static void adm_channel_init(struct adm_device
*adev
, struct adm_chan
*achan
,
703 vchan_init(&achan
->vc
, &adev
->common
);
704 achan
->vc
.desc_free
= adm_dma_free_desc
;
709 * @dma_spec: pointer to DMA specifier as found in the device tree
710 * @ofdma: pointer to DMA controller data
712 * This can use either 1-cell or 2-cell formats, the first cell
713 * identifies the slave device, while the optional second cell
714 * contains the crci value.
716 * Returns pointer to appropriate dma channel on success or NULL on error.
718 static struct dma_chan
*adm_dma_xlate(struct of_phandle_args
*dma_spec
,
719 struct of_dma
*ofdma
)
721 struct dma_device
*dev
= ofdma
->of_dma_data
;
722 struct dma_chan
*chan
, *candidate
= NULL
;
723 struct adm_chan
*achan
;
725 if (!dev
|| dma_spec
->args_count
> 2)
728 list_for_each_entry(chan
, &dev
->channels
, device_node
)
729 if (chan
->chan_id
== dma_spec
->args
[0]) {
737 achan
= to_adm_chan(candidate
);
738 if (dma_spec
->args_count
== 2)
739 achan
->crci
= dma_spec
->args
[1];
743 return dma_get_slave_channel(candidate
);
746 static int adm_dma_probe(struct platform_device
*pdev
)
748 struct adm_device
*adev
;
752 adev
= devm_kzalloc(&pdev
->dev
, sizeof(*adev
), GFP_KERNEL
);
756 adev
->dev
= &pdev
->dev
;
758 adev
->regs
= devm_platform_ioremap_resource(pdev
, 0);
759 if (IS_ERR(adev
->regs
))
760 return PTR_ERR(adev
->regs
);
762 adev
->irq
= platform_get_irq(pdev
, 0);
766 ret
= of_property_read_u32(pdev
->dev
.of_node
, "qcom,ee", &adev
->ee
);
768 dev_err(adev
->dev
, "Execution environment unspecified\n");
772 adev
->core_clk
= devm_clk_get(adev
->dev
, "core");
773 if (IS_ERR(adev
->core_clk
))
774 return PTR_ERR(adev
->core_clk
);
776 adev
->iface_clk
= devm_clk_get(adev
->dev
, "iface");
777 if (IS_ERR(adev
->iface_clk
))
778 return PTR_ERR(adev
->iface_clk
);
780 adev
->clk_reset
= devm_reset_control_get_exclusive(&pdev
->dev
, "clk");
781 if (IS_ERR(adev
->clk_reset
)) {
782 dev_err(adev
->dev
, "failed to get ADM0 reset\n");
783 return PTR_ERR(adev
->clk_reset
);
786 adev
->c0_reset
= devm_reset_control_get_exclusive(&pdev
->dev
, "c0");
787 if (IS_ERR(adev
->c0_reset
)) {
788 dev_err(adev
->dev
, "failed to get ADM0 C0 reset\n");
789 return PTR_ERR(adev
->c0_reset
);
792 adev
->c1_reset
= devm_reset_control_get_exclusive(&pdev
->dev
, "c1");
793 if (IS_ERR(adev
->c1_reset
)) {
794 dev_err(adev
->dev
, "failed to get ADM0 C1 reset\n");
795 return PTR_ERR(adev
->c1_reset
);
798 adev
->c2_reset
= devm_reset_control_get_exclusive(&pdev
->dev
, "c2");
799 if (IS_ERR(adev
->c2_reset
)) {
800 dev_err(adev
->dev
, "failed to get ADM0 C2 reset\n");
801 return PTR_ERR(adev
->c2_reset
);
804 ret
= clk_prepare_enable(adev
->core_clk
);
806 dev_err(adev
->dev
, "failed to prepare/enable core clock\n");
810 ret
= clk_prepare_enable(adev
->iface_clk
);
812 dev_err(adev
->dev
, "failed to prepare/enable iface clock\n");
813 goto err_disable_core_clk
;
816 reset_control_assert(adev
->clk_reset
);
817 reset_control_assert(adev
->c0_reset
);
818 reset_control_assert(adev
->c1_reset
);
819 reset_control_assert(adev
->c2_reset
);
823 reset_control_deassert(adev
->clk_reset
);
824 reset_control_deassert(adev
->c0_reset
);
825 reset_control_deassert(adev
->c1_reset
);
826 reset_control_deassert(adev
->c2_reset
);
828 adev
->channels
= devm_kcalloc(adev
->dev
, ADM_MAX_CHANNELS
,
829 sizeof(*adev
->channels
), GFP_KERNEL
);
831 if (!adev
->channels
) {
833 goto err_disable_clks
;
836 /* allocate and initialize channels */
837 INIT_LIST_HEAD(&adev
->common
.channels
);
839 for (i
= 0; i
< ADM_MAX_CHANNELS
; i
++)
840 adm_channel_init(adev
, &adev
->channels
[i
], i
);
843 for (i
= 0; i
< 16; i
++)
844 writel(ADM_CRCI_CTL_RST
, adev
->regs
+
845 ADM_CRCI_CTL(i
, adev
->ee
));
847 /* configure client interfaces */
848 writel(ADM_CI_RANGE_START(0x40) | ADM_CI_RANGE_END(0xb0) |
849 ADM_CI_BURST_8_WORDS
, adev
->regs
+ ADM_CI_CONF(0));
850 writel(ADM_CI_RANGE_START(0x2a) | ADM_CI_RANGE_END(0x2c) |
851 ADM_CI_BURST_8_WORDS
, adev
->regs
+ ADM_CI_CONF(1));
852 writel(ADM_CI_RANGE_START(0x12) | ADM_CI_RANGE_END(0x28) |
853 ADM_CI_BURST_8_WORDS
, adev
->regs
+ ADM_CI_CONF(2));
854 writel(ADM_GP_CTL_LP_EN
| ADM_GP_CTL_LP_CNT(0xf),
855 adev
->regs
+ ADM_GP_CTL
);
857 ret
= devm_request_irq(adev
->dev
, adev
->irq
, adm_dma_irq
,
860 goto err_disable_clks
;
862 platform_set_drvdata(pdev
, adev
);
864 adev
->common
.dev
= adev
->dev
;
865 adev
->common
.dev
->dma_parms
= &adev
->dma_parms
;
867 /* set capabilities */
868 dma_cap_zero(adev
->common
.cap_mask
);
869 dma_cap_set(DMA_SLAVE
, adev
->common
.cap_mask
);
870 dma_cap_set(DMA_PRIVATE
, adev
->common
.cap_mask
);
872 /* initialize dmaengine apis */
873 adev
->common
.directions
= BIT(DMA_DEV_TO_MEM
| DMA_MEM_TO_DEV
);
874 adev
->common
.residue_granularity
= DMA_RESIDUE_GRANULARITY_DESCRIPTOR
;
875 adev
->common
.src_addr_widths
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
876 adev
->common
.dst_addr_widths
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
877 adev
->common
.device_free_chan_resources
= adm_free_chan
;
878 adev
->common
.device_prep_slave_sg
= adm_prep_slave_sg
;
879 adev
->common
.device_issue_pending
= adm_issue_pending
;
880 adev
->common
.device_tx_status
= adm_tx_status
;
881 adev
->common
.device_terminate_all
= adm_terminate_all
;
882 adev
->common
.device_config
= adm_slave_config
;
884 ret
= dma_async_device_register(&adev
->common
);
886 dev_err(adev
->dev
, "failed to register dma async device\n");
887 goto err_disable_clks
;
890 ret
= of_dma_controller_register(pdev
->dev
.of_node
, adm_dma_xlate
,
893 goto err_unregister_dma
;
898 dma_async_device_unregister(&adev
->common
);
900 clk_disable_unprepare(adev
->iface_clk
);
901 err_disable_core_clk
:
902 clk_disable_unprepare(adev
->core_clk
);
907 static void adm_dma_remove(struct platform_device
*pdev
)
909 struct adm_device
*adev
= platform_get_drvdata(pdev
);
910 struct adm_chan
*achan
;
913 of_dma_controller_free(pdev
->dev
.of_node
);
914 dma_async_device_unregister(&adev
->common
);
916 for (i
= 0; i
< ADM_MAX_CHANNELS
; i
++) {
917 achan
= &adev
->channels
[i
];
919 /* mask IRQs for this channel/EE pair */
920 writel(0, adev
->regs
+ ADM_CH_RSLT_CONF(achan
->id
, adev
->ee
));
922 tasklet_kill(&adev
->channels
[i
].vc
.task
);
923 adm_terminate_all(&adev
->channels
[i
].vc
.chan
);
926 devm_free_irq(adev
->dev
, adev
->irq
, adev
);
928 clk_disable_unprepare(adev
->core_clk
);
929 clk_disable_unprepare(adev
->iface_clk
);
932 static const struct of_device_id adm_of_match
[] = {
933 { .compatible
= "qcom,adm", },
936 MODULE_DEVICE_TABLE(of
, adm_of_match
);
938 static struct platform_driver adm_dma_driver
= {
939 .probe
= adm_dma_probe
,
940 .remove
= adm_dma_remove
,
942 .name
= "adm-dma-engine",
943 .of_match_table
= adm_of_match
,
947 module_platform_driver(adm_dma_driver
);
949 MODULE_AUTHOR("Andy Gross <agross@codeaurora.org>");
950 MODULE_DESCRIPTION("QCOM ADM DMA engine driver");
951 MODULE_LICENSE("GPL v2");