Merge tag 'regmap-fix-v5.11-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux/fpc-iii.git] / drivers / dma / qcom / qcom_adm.c
blobee78bed8d60d2b817505303d4cfb25c11e3c2693
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
4 */
6 #include <linux/clk.h>
7 #include <linux/delay.h>
8 #include <linux/device.h>
9 #include <linux/dmaengine.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/init.h>
12 #include <linux/interrupt.h>
13 #include <linux/io.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/of.h>
17 #include <linux/of_address.h>
18 #include <linux/of_irq.h>
19 #include <linux/of_dma.h>
20 #include <linux/platform_device.h>
21 #include <linux/reset.h>
22 #include <linux/scatterlist.h>
23 #include <linux/slab.h>
25 #include "../dmaengine.h"
26 #include "../virt-dma.h"
28 /* ADM registers - calculated from channel number and security domain */
29 #define ADM_CHAN_MULTI 0x4
30 #define ADM_CI_MULTI 0x4
31 #define ADM_CRCI_MULTI 0x4
32 #define ADM_EE_MULTI 0x800
33 #define ADM_CHAN_OFFS(chan) (ADM_CHAN_MULTI * (chan))
34 #define ADM_EE_OFFS(ee) (ADM_EE_MULTI * (ee))
35 #define ADM_CHAN_EE_OFFS(chan, ee) (ADM_CHAN_OFFS(chan) + ADM_EE_OFFS(ee))
36 #define ADM_CHAN_OFFS(chan) (ADM_CHAN_MULTI * (chan))
37 #define ADM_CI_OFFS(ci) (ADM_CHAN_OFF(ci))
38 #define ADM_CH_CMD_PTR(chan, ee) (ADM_CHAN_EE_OFFS(chan, ee))
39 #define ADM_CH_RSLT(chan, ee) (0x40 + ADM_CHAN_EE_OFFS(chan, ee))
40 #define ADM_CH_FLUSH_STATE0(chan, ee) (0x80 + ADM_CHAN_EE_OFFS(chan, ee))
41 #define ADM_CH_STATUS_SD(chan, ee) (0x200 + ADM_CHAN_EE_OFFS(chan, ee))
42 #define ADM_CH_CONF(chan) (0x240 + ADM_CHAN_OFFS(chan))
43 #define ADM_CH_RSLT_CONF(chan, ee) (0x300 + ADM_CHAN_EE_OFFS(chan, ee))
44 #define ADM_SEC_DOMAIN_IRQ_STATUS(ee) (0x380 + ADM_EE_OFFS(ee))
45 #define ADM_CI_CONF(ci) (0x390 + (ci) * ADM_CI_MULTI)
46 #define ADM_GP_CTL 0x3d8
47 #define ADM_CRCI_CTL(crci, ee) (0x400 + (crci) * ADM_CRCI_MULTI + \
48 ADM_EE_OFFS(ee))
50 /* channel status */
51 #define ADM_CH_STATUS_VALID BIT(1)
53 /* channel result */
54 #define ADM_CH_RSLT_VALID BIT(31)
55 #define ADM_CH_RSLT_ERR BIT(3)
56 #define ADM_CH_RSLT_FLUSH BIT(2)
57 #define ADM_CH_RSLT_TPD BIT(1)
59 /* channel conf */
60 #define ADM_CH_CONF_SHADOW_EN BIT(12)
61 #define ADM_CH_CONF_MPU_DISABLE BIT(11)
62 #define ADM_CH_CONF_PERM_MPU_CONF BIT(9)
63 #define ADM_CH_CONF_FORCE_RSLT_EN BIT(7)
64 #define ADM_CH_CONF_SEC_DOMAIN(ee) ((((ee) & 0x3) << 4) | (((ee) & 0x4) << 11))
66 /* channel result conf */
67 #define ADM_CH_RSLT_CONF_FLUSH_EN BIT(1)
68 #define ADM_CH_RSLT_CONF_IRQ_EN BIT(0)
70 /* CRCI CTL */
71 #define ADM_CRCI_CTL_MUX_SEL BIT(18)
72 #define ADM_CRCI_CTL_RST BIT(17)
74 /* CI configuration */
75 #define ADM_CI_RANGE_END(x) ((x) << 24)
76 #define ADM_CI_RANGE_START(x) ((x) << 16)
77 #define ADM_CI_BURST_4_WORDS BIT(2)
78 #define ADM_CI_BURST_8_WORDS BIT(3)
80 /* GP CTL */
81 #define ADM_GP_CTL_LP_EN BIT(12)
82 #define ADM_GP_CTL_LP_CNT(x) ((x) << 8)
84 /* Command pointer list entry */
85 #define ADM_CPLE_LP BIT(31)
86 #define ADM_CPLE_CMD_PTR_LIST BIT(29)
88 /* Command list entry */
89 #define ADM_CMD_LC BIT(31)
90 #define ADM_CMD_DST_CRCI(n) (((n) & 0xf) << 7)
91 #define ADM_CMD_SRC_CRCI(n) (((n) & 0xf) << 3)
93 #define ADM_CMD_TYPE_SINGLE 0x0
94 #define ADM_CMD_TYPE_BOX 0x3
96 #define ADM_CRCI_MUX_SEL BIT(4)
97 #define ADM_DESC_ALIGN 8
98 #define ADM_MAX_XFER (SZ_64K - 1)
99 #define ADM_MAX_ROWS (SZ_64K - 1)
100 #define ADM_MAX_CHANNELS 16
102 struct adm_desc_hw_box {
103 u32 cmd;
104 u32 src_addr;
105 u32 dst_addr;
106 u32 row_len;
107 u32 num_rows;
108 u32 row_offset;
111 struct adm_desc_hw_single {
112 u32 cmd;
113 u32 src_addr;
114 u32 dst_addr;
115 u32 len;
118 struct adm_async_desc {
119 struct virt_dma_desc vd;
120 struct adm_device *adev;
122 size_t length;
123 enum dma_transfer_direction dir;
124 dma_addr_t dma_addr;
125 size_t dma_len;
127 void *cpl;
128 dma_addr_t cp_addr;
129 u32 crci;
130 u32 mux;
131 u32 blk_size;
134 struct adm_chan {
135 struct virt_dma_chan vc;
136 struct adm_device *adev;
138 /* parsed from DT */
139 u32 id; /* channel id */
141 struct adm_async_desc *curr_txd;
142 struct dma_slave_config slave;
143 struct list_head node;
145 int error;
146 int initialized;
149 static inline struct adm_chan *to_adm_chan(struct dma_chan *common)
151 return container_of(common, struct adm_chan, vc.chan);
154 struct adm_device {
155 void __iomem *regs;
156 struct device *dev;
157 struct dma_device common;
158 struct device_dma_parameters dma_parms;
159 struct adm_chan *channels;
161 u32 ee;
163 struct clk *core_clk;
164 struct clk *iface_clk;
166 struct reset_control *clk_reset;
167 struct reset_control *c0_reset;
168 struct reset_control *c1_reset;
169 struct reset_control *c2_reset;
170 int irq;
174 * adm_free_chan - Frees dma resources associated with the specific channel
176 * @chan: dma channel
178 * Free all allocated descriptors associated with this channel
180 static void adm_free_chan(struct dma_chan *chan)
182 /* free all queued descriptors */
183 vchan_free_chan_resources(to_virt_chan(chan));
187 * adm_get_blksize - Get block size from burst value
189 * @burst: Burst size of transaction
191 static int adm_get_blksize(unsigned int burst)
193 int ret;
195 switch (burst) {
196 case 16:
197 case 32:
198 case 64:
199 case 128:
200 ret = ffs(burst >> 4) - 1;
201 break;
202 case 192:
203 ret = 4;
204 break;
205 case 256:
206 ret = 5;
207 break;
208 default:
209 ret = -EINVAL;
210 break;
213 return ret;
217 * adm_process_fc_descriptors - Process descriptors for flow controlled xfers
219 * @achan: ADM channel
220 * @desc: Descriptor memory pointer
221 * @sg: Scatterlist entry
222 * @crci: CRCI value
223 * @burst: Burst size of transaction
224 * @direction: DMA transfer direction
226 static void *adm_process_fc_descriptors(struct adm_chan *achan, void *desc,
227 struct scatterlist *sg, u32 crci,
228 u32 burst,
229 enum dma_transfer_direction direction)
231 struct adm_desc_hw_box *box_desc = NULL;
232 struct adm_desc_hw_single *single_desc;
233 u32 remainder = sg_dma_len(sg);
234 u32 rows, row_offset, crci_cmd;
235 u32 mem_addr = sg_dma_address(sg);
236 u32 *incr_addr = &mem_addr;
237 u32 *src, *dst;
239 if (direction == DMA_DEV_TO_MEM) {
240 crci_cmd = ADM_CMD_SRC_CRCI(crci);
241 row_offset = burst;
242 src = &achan->slave.src_addr;
243 dst = &mem_addr;
244 } else {
245 crci_cmd = ADM_CMD_DST_CRCI(crci);
246 row_offset = burst << 16;
247 src = &mem_addr;
248 dst = &achan->slave.dst_addr;
251 while (remainder >= burst) {
252 box_desc = desc;
253 box_desc->cmd = ADM_CMD_TYPE_BOX | crci_cmd;
254 box_desc->row_offset = row_offset;
255 box_desc->src_addr = *src;
256 box_desc->dst_addr = *dst;
258 rows = remainder / burst;
259 rows = min_t(u32, rows, ADM_MAX_ROWS);
260 box_desc->num_rows = rows << 16 | rows;
261 box_desc->row_len = burst << 16 | burst;
263 *incr_addr += burst * rows;
264 remainder -= burst * rows;
265 desc += sizeof(*box_desc);
268 /* if leftover bytes, do one single descriptor */
269 if (remainder) {
270 single_desc = desc;
271 single_desc->cmd = ADM_CMD_TYPE_SINGLE | crci_cmd;
272 single_desc->len = remainder;
273 single_desc->src_addr = *src;
274 single_desc->dst_addr = *dst;
275 desc += sizeof(*single_desc);
277 if (sg_is_last(sg))
278 single_desc->cmd |= ADM_CMD_LC;
279 } else {
280 if (box_desc && sg_is_last(sg))
281 box_desc->cmd |= ADM_CMD_LC;
284 return desc;
288 * adm_process_non_fc_descriptors - Process descriptors for non-fc xfers
290 * @achan: ADM channel
291 * @desc: Descriptor memory pointer
292 * @sg: Scatterlist entry
293 * @direction: DMA transfer direction
295 static void *adm_process_non_fc_descriptors(struct adm_chan *achan, void *desc,
296 struct scatterlist *sg,
297 enum dma_transfer_direction direction)
299 struct adm_desc_hw_single *single_desc;
300 u32 remainder = sg_dma_len(sg);
301 u32 mem_addr = sg_dma_address(sg);
302 u32 *incr_addr = &mem_addr;
303 u32 *src, *dst;
305 if (direction == DMA_DEV_TO_MEM) {
306 src = &achan->slave.src_addr;
307 dst = &mem_addr;
308 } else {
309 src = &mem_addr;
310 dst = &achan->slave.dst_addr;
313 do {
314 single_desc = desc;
315 single_desc->cmd = ADM_CMD_TYPE_SINGLE;
316 single_desc->src_addr = *src;
317 single_desc->dst_addr = *dst;
318 single_desc->len = (remainder > ADM_MAX_XFER) ?
319 ADM_MAX_XFER : remainder;
321 remainder -= single_desc->len;
322 *incr_addr += single_desc->len;
323 desc += sizeof(*single_desc);
324 } while (remainder);
326 /* set last command if this is the end of the whole transaction */
327 if (sg_is_last(sg))
328 single_desc->cmd |= ADM_CMD_LC;
330 return desc;
334 * adm_prep_slave_sg - Prep slave sg transaction
336 * @chan: dma channel
337 * @sgl: scatter gather list
338 * @sg_len: length of sg
339 * @direction: DMA transfer direction
340 * @flags: DMA flags
341 * @context: transfer context (unused)
343 static struct dma_async_tx_descriptor *adm_prep_slave_sg(struct dma_chan *chan,
344 struct scatterlist *sgl,
345 unsigned int sg_len,
346 enum dma_transfer_direction direction,
347 unsigned long flags,
348 void *context)
350 struct adm_chan *achan = to_adm_chan(chan);
351 struct adm_device *adev = achan->adev;
352 struct adm_async_desc *async_desc;
353 struct scatterlist *sg;
354 dma_addr_t cple_addr;
355 u32 i, burst;
356 u32 single_count = 0, box_count = 0, crci = 0;
357 void *desc;
358 u32 *cple;
359 int blk_size = 0;
361 if (!is_slave_direction(direction)) {
362 dev_err(adev->dev, "invalid dma direction\n");
363 return NULL;
367 * get burst value from slave configuration
369 burst = (direction == DMA_MEM_TO_DEV) ?
370 achan->slave.dst_maxburst :
371 achan->slave.src_maxburst;
373 /* if using flow control, validate burst and crci values */
374 if (achan->slave.device_fc) {
375 blk_size = adm_get_blksize(burst);
376 if (blk_size < 0) {
377 dev_err(adev->dev, "invalid burst value: %d\n",
378 burst);
379 return ERR_PTR(-EINVAL);
382 crci = achan->slave.slave_id & 0xf;
383 if (!crci || achan->slave.slave_id > 0x1f) {
384 dev_err(adev->dev, "invalid crci value\n");
385 return ERR_PTR(-EINVAL);
389 /* iterate through sgs and compute allocation size of structures */
390 for_each_sg(sgl, sg, sg_len, i) {
391 if (achan->slave.device_fc) {
392 box_count += DIV_ROUND_UP(sg_dma_len(sg) / burst,
393 ADM_MAX_ROWS);
394 if (sg_dma_len(sg) % burst)
395 single_count++;
396 } else {
397 single_count += DIV_ROUND_UP(sg_dma_len(sg),
398 ADM_MAX_XFER);
402 async_desc = kzalloc(sizeof(*async_desc), GFP_NOWAIT);
403 if (!async_desc)
404 return ERR_PTR(-ENOMEM);
406 if (crci)
407 async_desc->mux = achan->slave.slave_id & ADM_CRCI_MUX_SEL ?
408 ADM_CRCI_CTL_MUX_SEL : 0;
409 async_desc->crci = crci;
410 async_desc->blk_size = blk_size;
411 async_desc->dma_len = single_count * sizeof(struct adm_desc_hw_single) +
412 box_count * sizeof(struct adm_desc_hw_box) +
413 sizeof(*cple) + 2 * ADM_DESC_ALIGN;
415 async_desc->cpl = kzalloc(async_desc->dma_len, GFP_NOWAIT);
416 if (!async_desc->cpl)
417 goto free;
419 async_desc->adev = adev;
421 /* both command list entry and descriptors must be 8 byte aligned */
422 cple = PTR_ALIGN(async_desc->cpl, ADM_DESC_ALIGN);
423 desc = PTR_ALIGN(cple + 1, ADM_DESC_ALIGN);
425 for_each_sg(sgl, sg, sg_len, i) {
426 async_desc->length += sg_dma_len(sg);
428 if (achan->slave.device_fc)
429 desc = adm_process_fc_descriptors(achan, desc, sg, crci,
430 burst, direction);
431 else
432 desc = adm_process_non_fc_descriptors(achan, desc, sg,
433 direction);
436 async_desc->dma_addr = dma_map_single(adev->dev, async_desc->cpl,
437 async_desc->dma_len,
438 DMA_TO_DEVICE);
439 if (dma_mapping_error(adev->dev, async_desc->dma_addr))
440 goto free;
442 cple_addr = async_desc->dma_addr + ((void *)cple - async_desc->cpl);
444 /* init cmd list */
445 dma_sync_single_for_cpu(adev->dev, cple_addr, sizeof(*cple),
446 DMA_TO_DEVICE);
447 *cple = ADM_CPLE_LP;
448 *cple |= (async_desc->dma_addr + ADM_DESC_ALIGN) >> 3;
449 dma_sync_single_for_device(adev->dev, cple_addr, sizeof(*cple),
450 DMA_TO_DEVICE);
452 return vchan_tx_prep(&achan->vc, &async_desc->vd, flags);
454 free:
455 kfree(async_desc);
456 return ERR_PTR(-ENOMEM);
460 * adm_terminate_all - terminate all transactions on a channel
461 * @chan: dma channel
463 * Dequeues and frees all transactions, aborts current transaction
464 * No callbacks are done
467 static int adm_terminate_all(struct dma_chan *chan)
469 struct adm_chan *achan = to_adm_chan(chan);
470 struct adm_device *adev = achan->adev;
471 unsigned long flags;
472 LIST_HEAD(head);
474 spin_lock_irqsave(&achan->vc.lock, flags);
475 vchan_get_all_descriptors(&achan->vc, &head);
477 /* send flush command to terminate current transaction */
478 writel_relaxed(0x0,
479 adev->regs + ADM_CH_FLUSH_STATE0(achan->id, adev->ee));
481 spin_unlock_irqrestore(&achan->vc.lock, flags);
483 vchan_dma_desc_free_list(&achan->vc, &head);
485 return 0;
488 static int adm_slave_config(struct dma_chan *chan, struct dma_slave_config *cfg)
490 struct adm_chan *achan = to_adm_chan(chan);
491 unsigned long flag;
493 spin_lock_irqsave(&achan->vc.lock, flag);
494 memcpy(&achan->slave, cfg, sizeof(struct dma_slave_config));
495 spin_unlock_irqrestore(&achan->vc.lock, flag);
497 return 0;
501 * adm_start_dma - start next transaction
502 * @achan: ADM dma channel
504 static void adm_start_dma(struct adm_chan *achan)
506 struct virt_dma_desc *vd = vchan_next_desc(&achan->vc);
507 struct adm_device *adev = achan->adev;
508 struct adm_async_desc *async_desc;
510 lockdep_assert_held(&achan->vc.lock);
512 if (!vd)
513 return;
515 list_del(&vd->node);
517 /* write next command list out to the CMD FIFO */
518 async_desc = container_of(vd, struct adm_async_desc, vd);
519 achan->curr_txd = async_desc;
521 /* reset channel error */
522 achan->error = 0;
524 if (!achan->initialized) {
525 /* enable interrupts */
526 writel(ADM_CH_CONF_SHADOW_EN |
527 ADM_CH_CONF_PERM_MPU_CONF |
528 ADM_CH_CONF_MPU_DISABLE |
529 ADM_CH_CONF_SEC_DOMAIN(adev->ee),
530 adev->regs + ADM_CH_CONF(achan->id));
532 writel(ADM_CH_RSLT_CONF_IRQ_EN | ADM_CH_RSLT_CONF_FLUSH_EN,
533 adev->regs + ADM_CH_RSLT_CONF(achan->id, adev->ee));
535 achan->initialized = 1;
538 /* set the crci block size if this transaction requires CRCI */
539 if (async_desc->crci) {
540 writel(async_desc->mux | async_desc->blk_size,
541 adev->regs + ADM_CRCI_CTL(async_desc->crci, adev->ee));
544 /* make sure IRQ enable doesn't get reordered */
545 wmb();
547 /* write next command list out to the CMD FIFO */
548 writel(ALIGN(async_desc->dma_addr, ADM_DESC_ALIGN) >> 3,
549 adev->regs + ADM_CH_CMD_PTR(achan->id, adev->ee));
553 * adm_dma_irq - irq handler for ADM controller
554 * @irq: IRQ of interrupt
555 * @data: callback data
557 * IRQ handler for the bam controller
559 static irqreturn_t adm_dma_irq(int irq, void *data)
561 struct adm_device *adev = data;
562 u32 srcs, i;
563 struct adm_async_desc *async_desc;
564 unsigned long flags;
566 srcs = readl_relaxed(adev->regs +
567 ADM_SEC_DOMAIN_IRQ_STATUS(adev->ee));
569 for (i = 0; i < ADM_MAX_CHANNELS; i++) {
570 struct adm_chan *achan = &adev->channels[i];
571 u32 status, result;
573 if (srcs & BIT(i)) {
574 status = readl_relaxed(adev->regs +
575 ADM_CH_STATUS_SD(i, adev->ee));
577 /* if no result present, skip */
578 if (!(status & ADM_CH_STATUS_VALID))
579 continue;
581 result = readl_relaxed(adev->regs +
582 ADM_CH_RSLT(i, adev->ee));
584 /* no valid results, skip */
585 if (!(result & ADM_CH_RSLT_VALID))
586 continue;
588 /* flag error if transaction was flushed or failed */
589 if (result & (ADM_CH_RSLT_ERR | ADM_CH_RSLT_FLUSH))
590 achan->error = 1;
592 spin_lock_irqsave(&achan->vc.lock, flags);
593 async_desc = achan->curr_txd;
595 achan->curr_txd = NULL;
597 if (async_desc) {
598 vchan_cookie_complete(&async_desc->vd);
600 /* kick off next DMA */
601 adm_start_dma(achan);
604 spin_unlock_irqrestore(&achan->vc.lock, flags);
608 return IRQ_HANDLED;
612 * adm_tx_status - returns status of transaction
613 * @chan: dma channel
614 * @cookie: transaction cookie
615 * @txstate: DMA transaction state
617 * Return status of dma transaction
619 static enum dma_status adm_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
620 struct dma_tx_state *txstate)
622 struct adm_chan *achan = to_adm_chan(chan);
623 struct virt_dma_desc *vd;
624 enum dma_status ret;
625 unsigned long flags;
626 size_t residue = 0;
628 ret = dma_cookie_status(chan, cookie, txstate);
629 if (ret == DMA_COMPLETE || !txstate)
630 return ret;
632 spin_lock_irqsave(&achan->vc.lock, flags);
634 vd = vchan_find_desc(&achan->vc, cookie);
635 if (vd)
636 residue = container_of(vd, struct adm_async_desc, vd)->length;
638 spin_unlock_irqrestore(&achan->vc.lock, flags);
641 * residue is either the full length if it is in the issued list, or 0
642 * if it is in progress. We have no reliable way of determining
643 * anything inbetween
645 dma_set_residue(txstate, residue);
647 if (achan->error)
648 return DMA_ERROR;
650 return ret;
654 * adm_issue_pending - starts pending transactions
655 * @chan: dma channel
657 * Issues all pending transactions and starts DMA
659 static void adm_issue_pending(struct dma_chan *chan)
661 struct adm_chan *achan = to_adm_chan(chan);
662 unsigned long flags;
664 spin_lock_irqsave(&achan->vc.lock, flags);
666 if (vchan_issue_pending(&achan->vc) && !achan->curr_txd)
667 adm_start_dma(achan);
668 spin_unlock_irqrestore(&achan->vc.lock, flags);
672 * adm_dma_free_desc - free descriptor memory
673 * @vd: virtual descriptor
676 static void adm_dma_free_desc(struct virt_dma_desc *vd)
678 struct adm_async_desc *async_desc = container_of(vd,
679 struct adm_async_desc, vd);
681 dma_unmap_single(async_desc->adev->dev, async_desc->dma_addr,
682 async_desc->dma_len, DMA_TO_DEVICE);
683 kfree(async_desc->cpl);
684 kfree(async_desc);
687 static void adm_channel_init(struct adm_device *adev, struct adm_chan *achan,
688 u32 index)
690 achan->id = index;
691 achan->adev = adev;
693 vchan_init(&achan->vc, &adev->common);
694 achan->vc.desc_free = adm_dma_free_desc;
697 static int adm_dma_probe(struct platform_device *pdev)
699 struct adm_device *adev;
700 int ret;
701 u32 i;
703 adev = devm_kzalloc(&pdev->dev, sizeof(*adev), GFP_KERNEL);
704 if (!adev)
705 return -ENOMEM;
707 adev->dev = &pdev->dev;
709 adev->regs = devm_platform_ioremap_resource(pdev, 0);
710 if (IS_ERR(adev->regs))
711 return PTR_ERR(adev->regs);
713 adev->irq = platform_get_irq(pdev, 0);
714 if (adev->irq < 0)
715 return adev->irq;
717 ret = of_property_read_u32(pdev->dev.of_node, "qcom,ee", &adev->ee);
718 if (ret) {
719 dev_err(adev->dev, "Execution environment unspecified\n");
720 return ret;
723 adev->core_clk = devm_clk_get(adev->dev, "core");
724 if (IS_ERR(adev->core_clk))
725 return PTR_ERR(adev->core_clk);
727 adev->iface_clk = devm_clk_get(adev->dev, "iface");
728 if (IS_ERR(adev->iface_clk))
729 return PTR_ERR(adev->iface_clk);
731 adev->clk_reset = devm_reset_control_get_exclusive(&pdev->dev, "clk");
732 if (IS_ERR(adev->clk_reset)) {
733 dev_err(adev->dev, "failed to get ADM0 reset\n");
734 return PTR_ERR(adev->clk_reset);
737 adev->c0_reset = devm_reset_control_get_exclusive(&pdev->dev, "c0");
738 if (IS_ERR(adev->c0_reset)) {
739 dev_err(adev->dev, "failed to get ADM0 C0 reset\n");
740 return PTR_ERR(adev->c0_reset);
743 adev->c1_reset = devm_reset_control_get_exclusive(&pdev->dev, "c1");
744 if (IS_ERR(adev->c1_reset)) {
745 dev_err(adev->dev, "failed to get ADM0 C1 reset\n");
746 return PTR_ERR(adev->c1_reset);
749 adev->c2_reset = devm_reset_control_get_exclusive(&pdev->dev, "c2");
750 if (IS_ERR(adev->c2_reset)) {
751 dev_err(adev->dev, "failed to get ADM0 C2 reset\n");
752 return PTR_ERR(adev->c2_reset);
755 ret = clk_prepare_enable(adev->core_clk);
756 if (ret) {
757 dev_err(adev->dev, "failed to prepare/enable core clock\n");
758 return ret;
761 ret = clk_prepare_enable(adev->iface_clk);
762 if (ret) {
763 dev_err(adev->dev, "failed to prepare/enable iface clock\n");
764 goto err_disable_core_clk;
767 reset_control_assert(adev->clk_reset);
768 reset_control_assert(adev->c0_reset);
769 reset_control_assert(adev->c1_reset);
770 reset_control_assert(adev->c2_reset);
772 udelay(2);
774 reset_control_deassert(adev->clk_reset);
775 reset_control_deassert(adev->c0_reset);
776 reset_control_deassert(adev->c1_reset);
777 reset_control_deassert(adev->c2_reset);
779 adev->channels = devm_kcalloc(adev->dev, ADM_MAX_CHANNELS,
780 sizeof(*adev->channels), GFP_KERNEL);
782 if (!adev->channels) {
783 ret = -ENOMEM;
784 goto err_disable_clks;
787 /* allocate and initialize channels */
788 INIT_LIST_HEAD(&adev->common.channels);
790 for (i = 0; i < ADM_MAX_CHANNELS; i++)
791 adm_channel_init(adev, &adev->channels[i], i);
793 /* reset CRCIs */
794 for (i = 0; i < 16; i++)
795 writel(ADM_CRCI_CTL_RST, adev->regs +
796 ADM_CRCI_CTL(i, adev->ee));
798 /* configure client interfaces */
799 writel(ADM_CI_RANGE_START(0x40) | ADM_CI_RANGE_END(0xb0) |
800 ADM_CI_BURST_8_WORDS, adev->regs + ADM_CI_CONF(0));
801 writel(ADM_CI_RANGE_START(0x2a) | ADM_CI_RANGE_END(0x2c) |
802 ADM_CI_BURST_8_WORDS, adev->regs + ADM_CI_CONF(1));
803 writel(ADM_CI_RANGE_START(0x12) | ADM_CI_RANGE_END(0x28) |
804 ADM_CI_BURST_8_WORDS, adev->regs + ADM_CI_CONF(2));
805 writel(ADM_GP_CTL_LP_EN | ADM_GP_CTL_LP_CNT(0xf),
806 adev->regs + ADM_GP_CTL);
808 ret = devm_request_irq(adev->dev, adev->irq, adm_dma_irq,
809 0, "adm_dma", adev);
810 if (ret)
811 goto err_disable_clks;
813 platform_set_drvdata(pdev, adev);
815 adev->common.dev = adev->dev;
816 adev->common.dev->dma_parms = &adev->dma_parms;
818 /* set capabilities */
819 dma_cap_zero(adev->common.cap_mask);
820 dma_cap_set(DMA_SLAVE, adev->common.cap_mask);
821 dma_cap_set(DMA_PRIVATE, adev->common.cap_mask);
823 /* initialize dmaengine apis */
824 adev->common.directions = BIT(DMA_DEV_TO_MEM | DMA_MEM_TO_DEV);
825 adev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
826 adev->common.src_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES;
827 adev->common.dst_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES;
828 adev->common.device_free_chan_resources = adm_free_chan;
829 adev->common.device_prep_slave_sg = adm_prep_slave_sg;
830 adev->common.device_issue_pending = adm_issue_pending;
831 adev->common.device_tx_status = adm_tx_status;
832 adev->common.device_terminate_all = adm_terminate_all;
833 adev->common.device_config = adm_slave_config;
835 ret = dma_async_device_register(&adev->common);
836 if (ret) {
837 dev_err(adev->dev, "failed to register dma async device\n");
838 goto err_disable_clks;
841 ret = of_dma_controller_register(pdev->dev.of_node,
842 of_dma_xlate_by_chan_id,
843 &adev->common);
844 if (ret)
845 goto err_unregister_dma;
847 return 0;
849 err_unregister_dma:
850 dma_async_device_unregister(&adev->common);
851 err_disable_clks:
852 clk_disable_unprepare(adev->iface_clk);
853 err_disable_core_clk:
854 clk_disable_unprepare(adev->core_clk);
856 return ret;
859 static int adm_dma_remove(struct platform_device *pdev)
861 struct adm_device *adev = platform_get_drvdata(pdev);
862 struct adm_chan *achan;
863 u32 i;
865 of_dma_controller_free(pdev->dev.of_node);
866 dma_async_device_unregister(&adev->common);
868 for (i = 0; i < ADM_MAX_CHANNELS; i++) {
869 achan = &adev->channels[i];
871 /* mask IRQs for this channel/EE pair */
872 writel(0, adev->regs + ADM_CH_RSLT_CONF(achan->id, adev->ee));
874 tasklet_kill(&adev->channels[i].vc.task);
875 adm_terminate_all(&adev->channels[i].vc.chan);
878 devm_free_irq(adev->dev, adev->irq, adev);
880 clk_disable_unprepare(adev->core_clk);
881 clk_disable_unprepare(adev->iface_clk);
883 return 0;
886 static const struct of_device_id adm_of_match[] = {
887 { .compatible = "qcom,adm", },
890 MODULE_DEVICE_TABLE(of, adm_of_match);
892 static struct platform_driver adm_dma_driver = {
893 .probe = adm_dma_probe,
894 .remove = adm_dma_remove,
895 .driver = {
896 .name = "adm-dma-engine",
897 .of_match_table = adm_of_match,
901 module_platform_driver(adm_dma_driver);
903 MODULE_AUTHOR("Andy Gross <agross@codeaurora.org>");
904 MODULE_DESCRIPTION("QCOM ADM DMA engine driver");
905 MODULE_LICENSE("GPL v2");