2 * Qualcomm Technologies HIDMA DMA engine interface
4 * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
17 * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
18 * Copyright (C) Semihalf 2009
19 * Copyright (C) Ilya Yanok, Emcraft Systems 2010
20 * Copyright (C) Alexander Popov, Promcontroller 2014
22 * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
23 * (defines, structures and comments) was taken from MPC5121 DMA driver
24 * written by Hongjun Chen <hong-jun.chen@freescale.com>.
26 * Approved as OSADL project by a majority of OSADL members and funded
27 * by OSADL membership fees in 2009; for details see www.osadl.org.
29 * This program is free software; you can redistribute it and/or modify it
30 * under the terms of the GNU General Public License as published by the Free
31 * Software Foundation; either version 2 of the License, or (at your option)
34 * This program is distributed in the hope that it will be useful, but WITHOUT
35 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
36 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
39 * The full GNU General Public License is included in this distribution in the
40 * file called COPYING.
43 /* Linux Foundation elects GPLv2 license only. */
45 #include <linux/dmaengine.h>
46 #include <linux/dma-mapping.h>
47 #include <linux/list.h>
48 #include <linux/module.h>
49 #include <linux/platform_device.h>
50 #include <linux/slab.h>
51 #include <linux/spinlock.h>
52 #include <linux/of_dma.h>
53 #include <linux/property.h>
54 #include <linux/delay.h>
55 #include <linux/acpi.h>
56 #include <linux/irq.h>
57 #include <linux/atomic.h>
58 #include <linux/pm_runtime.h>
60 #include "../dmaengine.h"
64 * Default idle time is 2 seconds. This parameter can
65 * be overridden by changing the following
66 * /sys/bus/platform/devices/QCOM8061:<xy>/power/autosuspend_delay_ms
69 #define HIDMA_AUTOSUSPEND_TIMEOUT 2000
70 #define HIDMA_ERR_INFO_SW 0xFF
71 #define HIDMA_ERR_CODE_UNEXPECTED_TERMINATE 0x0
72 #define HIDMA_NR_DEFAULT_DESC 10
74 static inline struct hidma_dev
*to_hidma_dev(struct dma_device
*dmadev
)
76 return container_of(dmadev
, struct hidma_dev
, ddev
);
80 struct hidma_dev
*to_hidma_dev_from_lldev(struct hidma_lldev
**_lldevp
)
82 return container_of(_lldevp
, struct hidma_dev
, lldev
);
85 static inline struct hidma_chan
*to_hidma_chan(struct dma_chan
*dmach
)
87 return container_of(dmach
, struct hidma_chan
, chan
);
91 struct hidma_desc
*to_hidma_desc(struct dma_async_tx_descriptor
*t
)
93 return container_of(t
, struct hidma_desc
, desc
);
96 static void hidma_free(struct hidma_dev
*dmadev
)
98 INIT_LIST_HEAD(&dmadev
->ddev
.channels
);
101 static unsigned int nr_desc_prm
;
102 module_param(nr_desc_prm
, uint
, 0644);
103 MODULE_PARM_DESC(nr_desc_prm
, "number of descriptors (default: 0)");
106 /* process completed descriptors */
107 static void hidma_process_completed(struct hidma_chan
*mchan
)
109 struct dma_device
*ddev
= mchan
->chan
.device
;
110 struct hidma_dev
*mdma
= to_hidma_dev(ddev
);
111 struct dma_async_tx_descriptor
*desc
;
112 dma_cookie_t last_cookie
;
113 struct hidma_desc
*mdesc
;
114 struct hidma_desc
*next
;
115 unsigned long irqflags
;
116 struct list_head list
;
118 INIT_LIST_HEAD(&list
);
120 /* Get all completed descriptors */
121 spin_lock_irqsave(&mchan
->lock
, irqflags
);
122 list_splice_tail_init(&mchan
->completed
, &list
);
123 spin_unlock_irqrestore(&mchan
->lock
, irqflags
);
125 /* Execute callbacks and run dependencies */
126 list_for_each_entry_safe(mdesc
, next
, &list
, node
) {
127 enum dma_status llstat
;
128 struct dmaengine_desc_callback cb
;
129 struct dmaengine_result result
;
132 last_cookie
= desc
->cookie
;
134 llstat
= hidma_ll_status(mdma
->lldev
, mdesc
->tre_ch
);
136 spin_lock_irqsave(&mchan
->lock
, irqflags
);
137 if (llstat
== DMA_COMPLETE
) {
138 mchan
->last_success
= last_cookie
;
139 result
.result
= DMA_TRANS_NOERROR
;
141 result
.result
= DMA_TRANS_ABORTED
;
144 dma_cookie_complete(desc
);
145 spin_unlock_irqrestore(&mchan
->lock
, irqflags
);
147 dmaengine_desc_get_callback(desc
, &cb
);
149 dma_run_dependencies(desc
);
151 spin_lock_irqsave(&mchan
->lock
, irqflags
);
152 list_move(&mdesc
->node
, &mchan
->free
);
153 spin_unlock_irqrestore(&mchan
->lock
, irqflags
);
155 dmaengine_desc_callback_invoke(&cb
, &result
);
160 * Called once for each submitted descriptor.
161 * PM is locked once for each descriptor that is currently
164 static void hidma_callback(void *data
)
166 struct hidma_desc
*mdesc
= data
;
167 struct hidma_chan
*mchan
= to_hidma_chan(mdesc
->desc
.chan
);
168 struct dma_device
*ddev
= mchan
->chan
.device
;
169 struct hidma_dev
*dmadev
= to_hidma_dev(ddev
);
170 unsigned long irqflags
;
173 spin_lock_irqsave(&mchan
->lock
, irqflags
);
174 if (mdesc
->node
.next
) {
175 /* Delete from the active list, add to completed list */
176 list_move_tail(&mdesc
->node
, &mchan
->completed
);
179 /* calculate the next running descriptor */
180 mchan
->running
= list_first_entry(&mchan
->active
,
181 struct hidma_desc
, node
);
183 spin_unlock_irqrestore(&mchan
->lock
, irqflags
);
185 hidma_process_completed(mchan
);
188 pm_runtime_mark_last_busy(dmadev
->ddev
.dev
);
189 pm_runtime_put_autosuspend(dmadev
->ddev
.dev
);
193 static int hidma_chan_init(struct hidma_dev
*dmadev
, u32 dma_sig
)
195 struct hidma_chan
*mchan
;
196 struct dma_device
*ddev
;
198 mchan
= devm_kzalloc(dmadev
->ddev
.dev
, sizeof(*mchan
), GFP_KERNEL
);
202 ddev
= &dmadev
->ddev
;
203 mchan
->dma_sig
= dma_sig
;
204 mchan
->dmadev
= dmadev
;
205 mchan
->chan
.device
= ddev
;
206 dma_cookie_init(&mchan
->chan
);
208 INIT_LIST_HEAD(&mchan
->free
);
209 INIT_LIST_HEAD(&mchan
->prepared
);
210 INIT_LIST_HEAD(&mchan
->active
);
211 INIT_LIST_HEAD(&mchan
->completed
);
213 spin_lock_init(&mchan
->lock
);
214 list_add_tail(&mchan
->chan
.device_node
, &ddev
->channels
);
215 dmadev
->ddev
.chancnt
++;
219 static void hidma_issue_task(unsigned long arg
)
221 struct hidma_dev
*dmadev
= (struct hidma_dev
*)arg
;
223 pm_runtime_get_sync(dmadev
->ddev
.dev
);
224 hidma_ll_start(dmadev
->lldev
);
227 static void hidma_issue_pending(struct dma_chan
*dmach
)
229 struct hidma_chan
*mchan
= to_hidma_chan(dmach
);
230 struct hidma_dev
*dmadev
= mchan
->dmadev
;
234 spin_lock_irqsave(&mchan
->lock
, flags
);
235 if (!mchan
->running
) {
236 struct hidma_desc
*desc
= list_first_entry(&mchan
->active
,
239 mchan
->running
= desc
;
241 spin_unlock_irqrestore(&mchan
->lock
, flags
);
243 /* PM will be released in hidma_callback function. */
244 status
= pm_runtime_get(dmadev
->ddev
.dev
);
246 tasklet_schedule(&dmadev
->task
);
248 hidma_ll_start(dmadev
->lldev
);
251 static inline bool hidma_txn_is_success(dma_cookie_t cookie
,
252 dma_cookie_t last_success
, dma_cookie_t last_used
)
254 if (last_success
<= last_used
) {
255 if ((cookie
<= last_success
) || (cookie
> last_used
))
258 if ((cookie
<= last_success
) && (cookie
> last_used
))
264 static enum dma_status
hidma_tx_status(struct dma_chan
*dmach
,
266 struct dma_tx_state
*txstate
)
268 struct hidma_chan
*mchan
= to_hidma_chan(dmach
);
271 ret
= dma_cookie_status(dmach
, cookie
, txstate
);
272 if (ret
== DMA_COMPLETE
) {
275 is_success
= hidma_txn_is_success(cookie
, mchan
->last_success
,
277 return is_success
? ret
: DMA_ERROR
;
280 if (mchan
->paused
&& (ret
== DMA_IN_PROGRESS
)) {
282 dma_cookie_t runcookie
;
284 spin_lock_irqsave(&mchan
->lock
, flags
);
286 runcookie
= mchan
->running
->desc
.cookie
;
290 if (runcookie
== cookie
)
293 spin_unlock_irqrestore(&mchan
->lock
, flags
);
300 * Submit descriptor to hardware.
301 * Lock the PM for each descriptor we are sending.
303 static dma_cookie_t
hidma_tx_submit(struct dma_async_tx_descriptor
*txd
)
305 struct hidma_chan
*mchan
= to_hidma_chan(txd
->chan
);
306 struct hidma_dev
*dmadev
= mchan
->dmadev
;
307 struct hidma_desc
*mdesc
;
308 unsigned long irqflags
;
311 pm_runtime_get_sync(dmadev
->ddev
.dev
);
312 if (!hidma_ll_isenabled(dmadev
->lldev
)) {
313 pm_runtime_mark_last_busy(dmadev
->ddev
.dev
);
314 pm_runtime_put_autosuspend(dmadev
->ddev
.dev
);
318 mdesc
= container_of(txd
, struct hidma_desc
, desc
);
319 spin_lock_irqsave(&mchan
->lock
, irqflags
);
321 /* Move descriptor to active */
322 list_move_tail(&mdesc
->node
, &mchan
->active
);
325 cookie
= dma_cookie_assign(txd
);
327 hidma_ll_queue_request(dmadev
->lldev
, mdesc
->tre_ch
);
328 spin_unlock_irqrestore(&mchan
->lock
, irqflags
);
333 static int hidma_alloc_chan_resources(struct dma_chan
*dmach
)
335 struct hidma_chan
*mchan
= to_hidma_chan(dmach
);
336 struct hidma_dev
*dmadev
= mchan
->dmadev
;
337 struct hidma_desc
*mdesc
, *tmp
;
338 unsigned long irqflags
;
343 if (mchan
->allocated
)
346 /* Alloc descriptors for this channel */
347 for (i
= 0; i
< dmadev
->nr_descriptors
; i
++) {
348 mdesc
= kzalloc(sizeof(struct hidma_desc
), GFP_NOWAIT
);
353 dma_async_tx_descriptor_init(&mdesc
->desc
, dmach
);
354 mdesc
->desc
.tx_submit
= hidma_tx_submit
;
356 rc
= hidma_ll_request(dmadev
->lldev
, mchan
->dma_sig
,
357 "DMA engine", hidma_callback
, mdesc
,
360 dev_err(dmach
->device
->dev
,
361 "channel alloc failed at %u\n", i
);
365 list_add_tail(&mdesc
->node
, &descs
);
369 /* return the allocated descriptors */
370 list_for_each_entry_safe(mdesc
, tmp
, &descs
, node
) {
371 hidma_ll_free(dmadev
->lldev
, mdesc
->tre_ch
);
377 spin_lock_irqsave(&mchan
->lock
, irqflags
);
378 list_splice_tail_init(&descs
, &mchan
->free
);
379 mchan
->allocated
= true;
380 spin_unlock_irqrestore(&mchan
->lock
, irqflags
);
384 static struct dma_async_tx_descriptor
*
385 hidma_prep_dma_memcpy(struct dma_chan
*dmach
, dma_addr_t dest
, dma_addr_t src
,
386 size_t len
, unsigned long flags
)
388 struct hidma_chan
*mchan
= to_hidma_chan(dmach
);
389 struct hidma_desc
*mdesc
= NULL
;
390 struct hidma_dev
*mdma
= mchan
->dmadev
;
391 unsigned long irqflags
;
393 /* Get free descriptor */
394 spin_lock_irqsave(&mchan
->lock
, irqflags
);
395 if (!list_empty(&mchan
->free
)) {
396 mdesc
= list_first_entry(&mchan
->free
, struct hidma_desc
, node
);
397 list_del(&mdesc
->node
);
399 spin_unlock_irqrestore(&mchan
->lock
, irqflags
);
404 hidma_ll_set_transfer_params(mdma
->lldev
, mdesc
->tre_ch
,
405 src
, dest
, len
, flags
);
407 /* Place descriptor in prepared list */
408 spin_lock_irqsave(&mchan
->lock
, irqflags
);
409 list_add_tail(&mdesc
->node
, &mchan
->prepared
);
410 spin_unlock_irqrestore(&mchan
->lock
, irqflags
);
415 static int hidma_terminate_channel(struct dma_chan
*chan
)
417 struct hidma_chan
*mchan
= to_hidma_chan(chan
);
418 struct hidma_dev
*dmadev
= to_hidma_dev(mchan
->chan
.device
);
419 struct hidma_desc
*tmp
, *mdesc
;
420 unsigned long irqflags
;
424 pm_runtime_get_sync(dmadev
->ddev
.dev
);
425 /* give completed requests a chance to finish */
426 hidma_process_completed(mchan
);
428 spin_lock_irqsave(&mchan
->lock
, irqflags
);
429 mchan
->last_success
= 0;
430 list_splice_init(&mchan
->active
, &list
);
431 list_splice_init(&mchan
->prepared
, &list
);
432 list_splice_init(&mchan
->completed
, &list
);
433 spin_unlock_irqrestore(&mchan
->lock
, irqflags
);
435 /* this suspends the existing transfer */
436 rc
= hidma_ll_disable(dmadev
->lldev
);
438 dev_err(dmadev
->ddev
.dev
, "channel did not pause\n");
442 /* return all user requests */
443 list_for_each_entry_safe(mdesc
, tmp
, &list
, node
) {
444 struct dma_async_tx_descriptor
*txd
= &mdesc
->desc
;
446 dma_descriptor_unmap(txd
);
447 dmaengine_desc_get_callback_invoke(txd
, NULL
);
448 dma_run_dependencies(txd
);
450 /* move myself to free_list */
451 list_move(&mdesc
->node
, &mchan
->free
);
454 rc
= hidma_ll_enable(dmadev
->lldev
);
456 pm_runtime_mark_last_busy(dmadev
->ddev
.dev
);
457 pm_runtime_put_autosuspend(dmadev
->ddev
.dev
);
461 static int hidma_terminate_all(struct dma_chan
*chan
)
463 struct hidma_chan
*mchan
= to_hidma_chan(chan
);
464 struct hidma_dev
*dmadev
= to_hidma_dev(mchan
->chan
.device
);
467 rc
= hidma_terminate_channel(chan
);
471 /* reinitialize the hardware */
472 pm_runtime_get_sync(dmadev
->ddev
.dev
);
473 rc
= hidma_ll_setup(dmadev
->lldev
);
474 pm_runtime_mark_last_busy(dmadev
->ddev
.dev
);
475 pm_runtime_put_autosuspend(dmadev
->ddev
.dev
);
479 static void hidma_free_chan_resources(struct dma_chan
*dmach
)
481 struct hidma_chan
*mchan
= to_hidma_chan(dmach
);
482 struct hidma_dev
*mdma
= mchan
->dmadev
;
483 struct hidma_desc
*mdesc
, *tmp
;
484 unsigned long irqflags
;
487 /* terminate running transactions and free descriptors */
488 hidma_terminate_channel(dmach
);
490 spin_lock_irqsave(&mchan
->lock
, irqflags
);
493 list_splice_tail_init(&mchan
->free
, &descs
);
495 /* Free descriptors */
496 list_for_each_entry_safe(mdesc
, tmp
, &descs
, node
) {
497 hidma_ll_free(mdma
->lldev
, mdesc
->tre_ch
);
498 list_del(&mdesc
->node
);
502 mchan
->allocated
= 0;
503 spin_unlock_irqrestore(&mchan
->lock
, irqflags
);
506 static int hidma_pause(struct dma_chan
*chan
)
508 struct hidma_chan
*mchan
;
509 struct hidma_dev
*dmadev
;
511 mchan
= to_hidma_chan(chan
);
512 dmadev
= to_hidma_dev(mchan
->chan
.device
);
513 if (!mchan
->paused
) {
514 pm_runtime_get_sync(dmadev
->ddev
.dev
);
515 if (hidma_ll_disable(dmadev
->lldev
))
516 dev_warn(dmadev
->ddev
.dev
, "channel did not stop\n");
517 mchan
->paused
= true;
518 pm_runtime_mark_last_busy(dmadev
->ddev
.dev
);
519 pm_runtime_put_autosuspend(dmadev
->ddev
.dev
);
524 static int hidma_resume(struct dma_chan
*chan
)
526 struct hidma_chan
*mchan
;
527 struct hidma_dev
*dmadev
;
530 mchan
= to_hidma_chan(chan
);
531 dmadev
= to_hidma_dev(mchan
->chan
.device
);
533 pm_runtime_get_sync(dmadev
->ddev
.dev
);
534 rc
= hidma_ll_enable(dmadev
->lldev
);
536 mchan
->paused
= false;
538 dev_err(dmadev
->ddev
.dev
,
539 "failed to resume the channel");
540 pm_runtime_mark_last_busy(dmadev
->ddev
.dev
);
541 pm_runtime_put_autosuspend(dmadev
->ddev
.dev
);
546 static irqreturn_t
hidma_chirq_handler(int chirq
, void *arg
)
548 struct hidma_lldev
*lldev
= arg
;
551 * All interrupts are request driven.
552 * HW doesn't send an interrupt by itself.
554 return hidma_ll_inthandler(chirq
, lldev
);
557 static ssize_t
hidma_show_values(struct device
*dev
,
558 struct device_attribute
*attr
, char *buf
)
560 struct platform_device
*pdev
= to_platform_device(dev
);
561 struct hidma_dev
*mdev
= platform_get_drvdata(pdev
);
565 if (strcmp(attr
->attr
.name
, "chid") == 0)
566 sprintf(buf
, "%d\n", mdev
->chidx
);
571 static int hidma_create_sysfs_entry(struct hidma_dev
*dev
, char *name
,
574 struct device_attribute
*attrs
;
577 attrs
= devm_kmalloc(dev
->ddev
.dev
, sizeof(struct device_attribute
),
582 name_copy
= devm_kstrdup(dev
->ddev
.dev
, name
, GFP_KERNEL
);
586 attrs
->attr
.name
= name_copy
;
587 attrs
->attr
.mode
= mode
;
588 attrs
->show
= hidma_show_values
;
589 sysfs_attr_init(&attrs
->attr
);
591 return device_create_file(dev
->ddev
.dev
, attrs
);
594 static int hidma_probe(struct platform_device
*pdev
)
596 struct hidma_dev
*dmadev
;
597 struct resource
*trca_resource
;
598 struct resource
*evca_resource
;
604 pm_runtime_set_autosuspend_delay(&pdev
->dev
, HIDMA_AUTOSUSPEND_TIMEOUT
);
605 pm_runtime_use_autosuspend(&pdev
->dev
);
606 pm_runtime_set_active(&pdev
->dev
);
607 pm_runtime_enable(&pdev
->dev
);
609 trca_resource
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
610 trca
= devm_ioremap_resource(&pdev
->dev
, trca_resource
);
616 evca_resource
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
617 evca
= devm_ioremap_resource(&pdev
->dev
, evca_resource
);
624 * This driver only handles the channel IRQs.
625 * Common IRQ is handled by the management driver.
627 chirq
= platform_get_irq(pdev
, 0);
633 dmadev
= devm_kzalloc(&pdev
->dev
, sizeof(*dmadev
), GFP_KERNEL
);
639 INIT_LIST_HEAD(&dmadev
->ddev
.channels
);
640 spin_lock_init(&dmadev
->lock
);
641 dmadev
->ddev
.dev
= &pdev
->dev
;
642 pm_runtime_get_sync(dmadev
->ddev
.dev
);
644 dma_cap_set(DMA_MEMCPY
, dmadev
->ddev
.cap_mask
);
645 if (WARN_ON(!pdev
->dev
.dma_mask
)) {
650 dmadev
->dev_evca
= evca
;
651 dmadev
->evca_resource
= evca_resource
;
652 dmadev
->dev_trca
= trca
;
653 dmadev
->trca_resource
= trca_resource
;
654 dmadev
->ddev
.device_prep_dma_memcpy
= hidma_prep_dma_memcpy
;
655 dmadev
->ddev
.device_alloc_chan_resources
= hidma_alloc_chan_resources
;
656 dmadev
->ddev
.device_free_chan_resources
= hidma_free_chan_resources
;
657 dmadev
->ddev
.device_tx_status
= hidma_tx_status
;
658 dmadev
->ddev
.device_issue_pending
= hidma_issue_pending
;
659 dmadev
->ddev
.device_pause
= hidma_pause
;
660 dmadev
->ddev
.device_resume
= hidma_resume
;
661 dmadev
->ddev
.device_terminate_all
= hidma_terminate_all
;
662 dmadev
->ddev
.copy_align
= 8;
664 device_property_read_u32(&pdev
->dev
, "desc-count",
665 &dmadev
->nr_descriptors
);
667 if (!dmadev
->nr_descriptors
&& nr_desc_prm
)
668 dmadev
->nr_descriptors
= nr_desc_prm
;
670 if (!dmadev
->nr_descriptors
)
671 dmadev
->nr_descriptors
= HIDMA_NR_DEFAULT_DESC
;
673 dmadev
->chidx
= readl(dmadev
->dev_trca
+ 0x28);
675 /* Set DMA mask to 64 bits. */
676 rc
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
678 dev_warn(&pdev
->dev
, "unable to set coherent mask to 64");
679 rc
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
684 dmadev
->lldev
= hidma_ll_init(dmadev
->ddev
.dev
,
685 dmadev
->nr_descriptors
, dmadev
->dev_trca
,
686 dmadev
->dev_evca
, dmadev
->chidx
);
687 if (!dmadev
->lldev
) {
692 rc
= devm_request_irq(&pdev
->dev
, chirq
, hidma_chirq_handler
, 0,
693 "qcom-hidma", dmadev
->lldev
);
697 INIT_LIST_HEAD(&dmadev
->ddev
.channels
);
698 rc
= hidma_chan_init(dmadev
, 0);
702 rc
= dma_async_device_register(&dmadev
->ddev
);
707 tasklet_init(&dmadev
->task
, hidma_issue_task
, (unsigned long)dmadev
);
708 hidma_debug_init(dmadev
);
709 hidma_create_sysfs_entry(dmadev
, "chid", S_IRUGO
);
710 dev_info(&pdev
->dev
, "HI-DMA engine driver registration complete\n");
711 platform_set_drvdata(pdev
, dmadev
);
712 pm_runtime_mark_last_busy(dmadev
->ddev
.dev
);
713 pm_runtime_put_autosuspend(dmadev
->ddev
.dev
);
717 hidma_debug_uninit(dmadev
);
718 hidma_ll_uninit(dmadev
->lldev
);
723 pm_runtime_put_sync(&pdev
->dev
);
724 pm_runtime_disable(&pdev
->dev
);
728 static int hidma_remove(struct platform_device
*pdev
)
730 struct hidma_dev
*dmadev
= platform_get_drvdata(pdev
);
732 pm_runtime_get_sync(dmadev
->ddev
.dev
);
733 dma_async_device_unregister(&dmadev
->ddev
);
734 devm_free_irq(dmadev
->ddev
.dev
, dmadev
->irq
, dmadev
->lldev
);
735 tasklet_kill(&dmadev
->task
);
736 hidma_debug_uninit(dmadev
);
737 hidma_ll_uninit(dmadev
->lldev
);
740 dev_info(&pdev
->dev
, "HI-DMA engine removed\n");
741 pm_runtime_put_sync_suspend(&pdev
->dev
);
742 pm_runtime_disable(&pdev
->dev
);
747 #if IS_ENABLED(CONFIG_ACPI)
748 static const struct acpi_device_id hidma_acpi_ids
[] = {
754 static const struct of_device_id hidma_match
[] = {
755 {.compatible
= "qcom,hidma-1.0",},
758 MODULE_DEVICE_TABLE(of
, hidma_match
);
760 static struct platform_driver hidma_driver
= {
761 .probe
= hidma_probe
,
762 .remove
= hidma_remove
,
765 .of_match_table
= hidma_match
,
766 .acpi_match_table
= ACPI_PTR(hidma_acpi_ids
),
770 module_platform_driver(hidma_driver
);
771 MODULE_LICENSE("GPL v2");