2 * Qualcomm Technologies HIDMA DMA engine interface
4 * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
17 * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
18 * Copyright (C) Semihalf 2009
19 * Copyright (C) Ilya Yanok, Emcraft Systems 2010
20 * Copyright (C) Alexander Popov, Promcontroller 2014
22 * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
23 * (defines, structures and comments) was taken from MPC5121 DMA driver
24 * written by Hongjun Chen <hong-jun.chen@freescale.com>.
26 * Approved as OSADL project by a majority of OSADL members and funded
27 * by OSADL membership fees in 2009; for details see www.osadl.org.
29 * This program is free software; you can redistribute it and/or modify it
30 * under the terms of the GNU General Public License as published by the Free
31 * Software Foundation; either version 2 of the License, or (at your option)
34 * This program is distributed in the hope that it will be useful, but WITHOUT
35 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
36 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
39 * The full GNU General Public License is included in this distribution in the
40 * file called COPYING.
43 /* Linux Foundation elects GPLv2 license only. */
45 #include <linux/dmaengine.h>
46 #include <linux/dma-mapping.h>
47 #include <linux/list.h>
48 #include <linux/module.h>
49 #include <linux/platform_device.h>
50 #include <linux/slab.h>
51 #include <linux/spinlock.h>
52 #include <linux/of_dma.h>
53 #include <linux/of_device.h>
54 #include <linux/property.h>
55 #include <linux/delay.h>
56 #include <linux/acpi.h>
57 #include <linux/irq.h>
58 #include <linux/atomic.h>
59 #include <linux/pm_runtime.h>
60 #include <linux/msi.h>
62 #include "../dmaengine.h"
66 * Default idle time is 2 seconds. This parameter can
67 * be overridden by changing the following
68 * /sys/bus/platform/devices/QCOM8061:<xy>/power/autosuspend_delay_ms
71 #define HIDMA_AUTOSUSPEND_TIMEOUT 2000
72 #define HIDMA_ERR_INFO_SW 0xFF
73 #define HIDMA_ERR_CODE_UNEXPECTED_TERMINATE 0x0
74 #define HIDMA_NR_DEFAULT_DESC 10
75 #define HIDMA_MSI_INTS 11
77 static inline struct hidma_dev
*to_hidma_dev(struct dma_device
*dmadev
)
79 return container_of(dmadev
, struct hidma_dev
, ddev
);
83 struct hidma_dev
*to_hidma_dev_from_lldev(struct hidma_lldev
**_lldevp
)
85 return container_of(_lldevp
, struct hidma_dev
, lldev
);
88 static inline struct hidma_chan
*to_hidma_chan(struct dma_chan
*dmach
)
90 return container_of(dmach
, struct hidma_chan
, chan
);
94 struct hidma_desc
*to_hidma_desc(struct dma_async_tx_descriptor
*t
)
96 return container_of(t
, struct hidma_desc
, desc
);
99 static void hidma_free(struct hidma_dev
*dmadev
)
101 INIT_LIST_HEAD(&dmadev
->ddev
.channels
);
104 static unsigned int nr_desc_prm
;
105 module_param(nr_desc_prm
, uint
, 0644);
106 MODULE_PARM_DESC(nr_desc_prm
, "number of descriptors (default: 0)");
113 /* process completed descriptors */
114 static void hidma_process_completed(struct hidma_chan
*mchan
)
116 struct dma_device
*ddev
= mchan
->chan
.device
;
117 struct hidma_dev
*mdma
= to_hidma_dev(ddev
);
118 struct dma_async_tx_descriptor
*desc
;
119 dma_cookie_t last_cookie
;
120 struct hidma_desc
*mdesc
;
121 struct hidma_desc
*next
;
122 unsigned long irqflags
;
123 struct list_head list
;
125 INIT_LIST_HEAD(&list
);
127 /* Get all completed descriptors */
128 spin_lock_irqsave(&mchan
->lock
, irqflags
);
129 list_splice_tail_init(&mchan
->completed
, &list
);
130 spin_unlock_irqrestore(&mchan
->lock
, irqflags
);
132 /* Execute callbacks and run dependencies */
133 list_for_each_entry_safe(mdesc
, next
, &list
, node
) {
134 enum dma_status llstat
;
135 struct dmaengine_desc_callback cb
;
136 struct dmaengine_result result
;
139 last_cookie
= desc
->cookie
;
141 spin_lock_irqsave(&mchan
->lock
, irqflags
);
142 dma_cookie_complete(desc
);
143 spin_unlock_irqrestore(&mchan
->lock
, irqflags
);
145 llstat
= hidma_ll_status(mdma
->lldev
, mdesc
->tre_ch
);
146 dmaengine_desc_get_callback(desc
, &cb
);
148 dma_run_dependencies(desc
);
150 spin_lock_irqsave(&mchan
->lock
, irqflags
);
151 list_move(&mdesc
->node
, &mchan
->free
);
153 if (llstat
== DMA_COMPLETE
) {
154 mchan
->last_success
= last_cookie
;
155 result
.result
= DMA_TRANS_NOERROR
;
157 result
.result
= DMA_TRANS_ABORTED
;
159 spin_unlock_irqrestore(&mchan
->lock
, irqflags
);
161 dmaengine_desc_callback_invoke(&cb
, &result
);
166 * Called once for each submitted descriptor.
167 * PM is locked once for each descriptor that is currently
170 static void hidma_callback(void *data
)
172 struct hidma_desc
*mdesc
= data
;
173 struct hidma_chan
*mchan
= to_hidma_chan(mdesc
->desc
.chan
);
174 struct dma_device
*ddev
= mchan
->chan
.device
;
175 struct hidma_dev
*dmadev
= to_hidma_dev(ddev
);
176 unsigned long irqflags
;
179 spin_lock_irqsave(&mchan
->lock
, irqflags
);
180 if (mdesc
->node
.next
) {
181 /* Delete from the active list, add to completed list */
182 list_move_tail(&mdesc
->node
, &mchan
->completed
);
185 /* calculate the next running descriptor */
186 mchan
->running
= list_first_entry(&mchan
->active
,
187 struct hidma_desc
, node
);
189 spin_unlock_irqrestore(&mchan
->lock
, irqflags
);
191 hidma_process_completed(mchan
);
194 pm_runtime_mark_last_busy(dmadev
->ddev
.dev
);
195 pm_runtime_put_autosuspend(dmadev
->ddev
.dev
);
199 static int hidma_chan_init(struct hidma_dev
*dmadev
, u32 dma_sig
)
201 struct hidma_chan
*mchan
;
202 struct dma_device
*ddev
;
204 mchan
= devm_kzalloc(dmadev
->ddev
.dev
, sizeof(*mchan
), GFP_KERNEL
);
208 ddev
= &dmadev
->ddev
;
209 mchan
->dma_sig
= dma_sig
;
210 mchan
->dmadev
= dmadev
;
211 mchan
->chan
.device
= ddev
;
212 dma_cookie_init(&mchan
->chan
);
214 INIT_LIST_HEAD(&mchan
->free
);
215 INIT_LIST_HEAD(&mchan
->prepared
);
216 INIT_LIST_HEAD(&mchan
->active
);
217 INIT_LIST_HEAD(&mchan
->completed
);
218 INIT_LIST_HEAD(&mchan
->queued
);
220 spin_lock_init(&mchan
->lock
);
221 list_add_tail(&mchan
->chan
.device_node
, &ddev
->channels
);
222 dmadev
->ddev
.chancnt
++;
226 static void hidma_issue_task(unsigned long arg
)
228 struct hidma_dev
*dmadev
= (struct hidma_dev
*)arg
;
230 pm_runtime_get_sync(dmadev
->ddev
.dev
);
231 hidma_ll_start(dmadev
->lldev
);
234 static void hidma_issue_pending(struct dma_chan
*dmach
)
236 struct hidma_chan
*mchan
= to_hidma_chan(dmach
);
237 struct hidma_dev
*dmadev
= mchan
->dmadev
;
239 struct hidma_desc
*qdesc
, *next
;
242 spin_lock_irqsave(&mchan
->lock
, flags
);
243 list_for_each_entry_safe(qdesc
, next
, &mchan
->queued
, node
) {
244 hidma_ll_queue_request(dmadev
->lldev
, qdesc
->tre_ch
);
245 list_move_tail(&qdesc
->node
, &mchan
->active
);
248 if (!mchan
->running
) {
249 struct hidma_desc
*desc
= list_first_entry(&mchan
->active
,
252 mchan
->running
= desc
;
254 spin_unlock_irqrestore(&mchan
->lock
, flags
);
256 /* PM will be released in hidma_callback function. */
257 status
= pm_runtime_get(dmadev
->ddev
.dev
);
259 tasklet_schedule(&dmadev
->task
);
261 hidma_ll_start(dmadev
->lldev
);
264 static inline bool hidma_txn_is_success(dma_cookie_t cookie
,
265 dma_cookie_t last_success
, dma_cookie_t last_used
)
267 if (last_success
<= last_used
) {
268 if ((cookie
<= last_success
) || (cookie
> last_used
))
271 if ((cookie
<= last_success
) && (cookie
> last_used
))
277 static enum dma_status
hidma_tx_status(struct dma_chan
*dmach
,
279 struct dma_tx_state
*txstate
)
281 struct hidma_chan
*mchan
= to_hidma_chan(dmach
);
284 ret
= dma_cookie_status(dmach
, cookie
, txstate
);
285 if (ret
== DMA_COMPLETE
) {
288 is_success
= hidma_txn_is_success(cookie
, mchan
->last_success
,
290 return is_success
? ret
: DMA_ERROR
;
293 if (mchan
->paused
&& (ret
== DMA_IN_PROGRESS
)) {
295 dma_cookie_t runcookie
;
297 spin_lock_irqsave(&mchan
->lock
, flags
);
299 runcookie
= mchan
->running
->desc
.cookie
;
303 if (runcookie
== cookie
)
306 spin_unlock_irqrestore(&mchan
->lock
, flags
);
313 * Submit descriptor to hardware.
314 * Lock the PM for each descriptor we are sending.
316 static dma_cookie_t
hidma_tx_submit(struct dma_async_tx_descriptor
*txd
)
318 struct hidma_chan
*mchan
= to_hidma_chan(txd
->chan
);
319 struct hidma_dev
*dmadev
= mchan
->dmadev
;
320 struct hidma_desc
*mdesc
;
321 unsigned long irqflags
;
324 pm_runtime_get_sync(dmadev
->ddev
.dev
);
325 if (!hidma_ll_isenabled(dmadev
->lldev
)) {
326 pm_runtime_mark_last_busy(dmadev
->ddev
.dev
);
327 pm_runtime_put_autosuspend(dmadev
->ddev
.dev
);
330 pm_runtime_mark_last_busy(dmadev
->ddev
.dev
);
331 pm_runtime_put_autosuspend(dmadev
->ddev
.dev
);
333 mdesc
= container_of(txd
, struct hidma_desc
, desc
);
334 spin_lock_irqsave(&mchan
->lock
, irqflags
);
336 /* Move descriptor to queued */
337 list_move_tail(&mdesc
->node
, &mchan
->queued
);
340 cookie
= dma_cookie_assign(txd
);
342 spin_unlock_irqrestore(&mchan
->lock
, irqflags
);
347 static int hidma_alloc_chan_resources(struct dma_chan
*dmach
)
349 struct hidma_chan
*mchan
= to_hidma_chan(dmach
);
350 struct hidma_dev
*dmadev
= mchan
->dmadev
;
351 struct hidma_desc
*mdesc
, *tmp
;
352 unsigned long irqflags
;
357 if (mchan
->allocated
)
360 /* Alloc descriptors for this channel */
361 for (i
= 0; i
< dmadev
->nr_descriptors
; i
++) {
362 mdesc
= kzalloc(sizeof(struct hidma_desc
), GFP_NOWAIT
);
367 dma_async_tx_descriptor_init(&mdesc
->desc
, dmach
);
368 mdesc
->desc
.tx_submit
= hidma_tx_submit
;
370 rc
= hidma_ll_request(dmadev
->lldev
, mchan
->dma_sig
,
371 "DMA engine", hidma_callback
, mdesc
,
374 dev_err(dmach
->device
->dev
,
375 "channel alloc failed at %u\n", i
);
379 list_add_tail(&mdesc
->node
, &descs
);
383 /* return the allocated descriptors */
384 list_for_each_entry_safe(mdesc
, tmp
, &descs
, node
) {
385 hidma_ll_free(dmadev
->lldev
, mdesc
->tre_ch
);
391 spin_lock_irqsave(&mchan
->lock
, irqflags
);
392 list_splice_tail_init(&descs
, &mchan
->free
);
393 mchan
->allocated
= true;
394 spin_unlock_irqrestore(&mchan
->lock
, irqflags
);
398 static struct dma_async_tx_descriptor
*
399 hidma_prep_dma_memcpy(struct dma_chan
*dmach
, dma_addr_t dest
, dma_addr_t src
,
400 size_t len
, unsigned long flags
)
402 struct hidma_chan
*mchan
= to_hidma_chan(dmach
);
403 struct hidma_desc
*mdesc
= NULL
;
404 struct hidma_dev
*mdma
= mchan
->dmadev
;
405 unsigned long irqflags
;
407 /* Get free descriptor */
408 spin_lock_irqsave(&mchan
->lock
, irqflags
);
409 if (!list_empty(&mchan
->free
)) {
410 mdesc
= list_first_entry(&mchan
->free
, struct hidma_desc
, node
);
411 list_del(&mdesc
->node
);
413 spin_unlock_irqrestore(&mchan
->lock
, irqflags
);
418 hidma_ll_set_transfer_params(mdma
->lldev
, mdesc
->tre_ch
,
419 src
, dest
, len
, flags
,
422 /* Place descriptor in prepared list */
423 spin_lock_irqsave(&mchan
->lock
, irqflags
);
424 list_add_tail(&mdesc
->node
, &mchan
->prepared
);
425 spin_unlock_irqrestore(&mchan
->lock
, irqflags
);
430 static struct dma_async_tx_descriptor
*
431 hidma_prep_dma_memset(struct dma_chan
*dmach
, dma_addr_t dest
, int value
,
432 size_t len
, unsigned long flags
)
434 struct hidma_chan
*mchan
= to_hidma_chan(dmach
);
435 struct hidma_desc
*mdesc
= NULL
;
436 struct hidma_dev
*mdma
= mchan
->dmadev
;
437 unsigned long irqflags
;
439 /* Get free descriptor */
440 spin_lock_irqsave(&mchan
->lock
, irqflags
);
441 if (!list_empty(&mchan
->free
)) {
442 mdesc
= list_first_entry(&mchan
->free
, struct hidma_desc
, node
);
443 list_del(&mdesc
->node
);
445 spin_unlock_irqrestore(&mchan
->lock
, irqflags
);
450 hidma_ll_set_transfer_params(mdma
->lldev
, mdesc
->tre_ch
,
451 value
, dest
, len
, flags
,
454 /* Place descriptor in prepared list */
455 spin_lock_irqsave(&mchan
->lock
, irqflags
);
456 list_add_tail(&mdesc
->node
, &mchan
->prepared
);
457 spin_unlock_irqrestore(&mchan
->lock
, irqflags
);
462 static int hidma_terminate_channel(struct dma_chan
*chan
)
464 struct hidma_chan
*mchan
= to_hidma_chan(chan
);
465 struct hidma_dev
*dmadev
= to_hidma_dev(mchan
->chan
.device
);
466 struct hidma_desc
*tmp
, *mdesc
;
467 unsigned long irqflags
;
471 pm_runtime_get_sync(dmadev
->ddev
.dev
);
472 /* give completed requests a chance to finish */
473 hidma_process_completed(mchan
);
475 spin_lock_irqsave(&mchan
->lock
, irqflags
);
476 mchan
->last_success
= 0;
477 list_splice_init(&mchan
->active
, &list
);
478 list_splice_init(&mchan
->prepared
, &list
);
479 list_splice_init(&mchan
->completed
, &list
);
480 list_splice_init(&mchan
->queued
, &list
);
481 spin_unlock_irqrestore(&mchan
->lock
, irqflags
);
483 /* this suspends the existing transfer */
484 rc
= hidma_ll_disable(dmadev
->lldev
);
486 dev_err(dmadev
->ddev
.dev
, "channel did not pause\n");
490 /* return all user requests */
491 list_for_each_entry_safe(mdesc
, tmp
, &list
, node
) {
492 struct dma_async_tx_descriptor
*txd
= &mdesc
->desc
;
494 dma_descriptor_unmap(txd
);
495 dmaengine_desc_get_callback_invoke(txd
, NULL
);
496 dma_run_dependencies(txd
);
498 /* move myself to free_list */
499 list_move(&mdesc
->node
, &mchan
->free
);
502 rc
= hidma_ll_enable(dmadev
->lldev
);
504 pm_runtime_mark_last_busy(dmadev
->ddev
.dev
);
505 pm_runtime_put_autosuspend(dmadev
->ddev
.dev
);
509 static int hidma_terminate_all(struct dma_chan
*chan
)
511 struct hidma_chan
*mchan
= to_hidma_chan(chan
);
512 struct hidma_dev
*dmadev
= to_hidma_dev(mchan
->chan
.device
);
515 rc
= hidma_terminate_channel(chan
);
519 /* reinitialize the hardware */
520 pm_runtime_get_sync(dmadev
->ddev
.dev
);
521 rc
= hidma_ll_setup(dmadev
->lldev
);
522 pm_runtime_mark_last_busy(dmadev
->ddev
.dev
);
523 pm_runtime_put_autosuspend(dmadev
->ddev
.dev
);
527 static void hidma_free_chan_resources(struct dma_chan
*dmach
)
529 struct hidma_chan
*mchan
= to_hidma_chan(dmach
);
530 struct hidma_dev
*mdma
= mchan
->dmadev
;
531 struct hidma_desc
*mdesc
, *tmp
;
532 unsigned long irqflags
;
535 /* terminate running transactions and free descriptors */
536 hidma_terminate_channel(dmach
);
538 spin_lock_irqsave(&mchan
->lock
, irqflags
);
541 list_splice_tail_init(&mchan
->free
, &descs
);
543 /* Free descriptors */
544 list_for_each_entry_safe(mdesc
, tmp
, &descs
, node
) {
545 hidma_ll_free(mdma
->lldev
, mdesc
->tre_ch
);
546 list_del(&mdesc
->node
);
550 mchan
->allocated
= 0;
551 spin_unlock_irqrestore(&mchan
->lock
, irqflags
);
554 static int hidma_pause(struct dma_chan
*chan
)
556 struct hidma_chan
*mchan
;
557 struct hidma_dev
*dmadev
;
559 mchan
= to_hidma_chan(chan
);
560 dmadev
= to_hidma_dev(mchan
->chan
.device
);
561 if (!mchan
->paused
) {
562 pm_runtime_get_sync(dmadev
->ddev
.dev
);
563 if (hidma_ll_disable(dmadev
->lldev
))
564 dev_warn(dmadev
->ddev
.dev
, "channel did not stop\n");
565 mchan
->paused
= true;
566 pm_runtime_mark_last_busy(dmadev
->ddev
.dev
);
567 pm_runtime_put_autosuspend(dmadev
->ddev
.dev
);
572 static int hidma_resume(struct dma_chan
*chan
)
574 struct hidma_chan
*mchan
;
575 struct hidma_dev
*dmadev
;
578 mchan
= to_hidma_chan(chan
);
579 dmadev
= to_hidma_dev(mchan
->chan
.device
);
581 pm_runtime_get_sync(dmadev
->ddev
.dev
);
582 rc
= hidma_ll_enable(dmadev
->lldev
);
584 mchan
->paused
= false;
586 dev_err(dmadev
->ddev
.dev
,
587 "failed to resume the channel");
588 pm_runtime_mark_last_busy(dmadev
->ddev
.dev
);
589 pm_runtime_put_autosuspend(dmadev
->ddev
.dev
);
594 static irqreturn_t
hidma_chirq_handler(int chirq
, void *arg
)
596 struct hidma_lldev
*lldev
= arg
;
599 * All interrupts are request driven.
600 * HW doesn't send an interrupt by itself.
602 return hidma_ll_inthandler(chirq
, lldev
);
605 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
606 static irqreturn_t
hidma_chirq_handler_msi(int chirq
, void *arg
)
608 struct hidma_lldev
**lldevp
= arg
;
609 struct hidma_dev
*dmadev
= to_hidma_dev_from_lldev(lldevp
);
611 return hidma_ll_inthandler_msi(chirq
, *lldevp
,
612 1 << (chirq
- dmadev
->msi_virqbase
));
616 static ssize_t
hidma_show_values(struct device
*dev
,
617 struct device_attribute
*attr
, char *buf
)
619 struct platform_device
*pdev
= to_platform_device(dev
);
620 struct hidma_dev
*mdev
= platform_get_drvdata(pdev
);
624 if (strcmp(attr
->attr
.name
, "chid") == 0)
625 sprintf(buf
, "%d\n", mdev
->chidx
);
630 static inline void hidma_sysfs_uninit(struct hidma_dev
*dev
)
632 device_remove_file(dev
->ddev
.dev
, dev
->chid_attrs
);
635 static struct device_attribute
*
636 hidma_create_sysfs_entry(struct hidma_dev
*dev
, char *name
, int mode
)
638 struct device_attribute
*attrs
;
641 attrs
= devm_kmalloc(dev
->ddev
.dev
, sizeof(struct device_attribute
),
646 name_copy
= devm_kstrdup(dev
->ddev
.dev
, name
, GFP_KERNEL
);
650 attrs
->attr
.name
= name_copy
;
651 attrs
->attr
.mode
= mode
;
652 attrs
->show
= hidma_show_values
;
653 sysfs_attr_init(&attrs
->attr
);
658 static int hidma_sysfs_init(struct hidma_dev
*dev
)
660 dev
->chid_attrs
= hidma_create_sysfs_entry(dev
, "chid", S_IRUGO
);
661 if (!dev
->chid_attrs
)
664 return device_create_file(dev
->ddev
.dev
, dev
->chid_attrs
);
667 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
668 static void hidma_write_msi_msg(struct msi_desc
*desc
, struct msi_msg
*msg
)
670 struct device
*dev
= msi_desc_to_dev(desc
);
671 struct hidma_dev
*dmadev
= dev_get_drvdata(dev
);
673 if (!desc
->platform
.msi_index
) {
674 writel(msg
->address_lo
, dmadev
->dev_evca
+ 0x118);
675 writel(msg
->address_hi
, dmadev
->dev_evca
+ 0x11C);
676 writel(msg
->data
, dmadev
->dev_evca
+ 0x120);
681 static void hidma_free_msis(struct hidma_dev
*dmadev
)
683 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
684 struct device
*dev
= dmadev
->ddev
.dev
;
685 struct msi_desc
*desc
;
687 /* free allocated MSI interrupts above */
688 for_each_msi_entry(desc
, dev
)
689 devm_free_irq(dev
, desc
->irq
, &dmadev
->lldev
);
691 platform_msi_domain_free_irqs(dev
);
695 static int hidma_request_msi(struct hidma_dev
*dmadev
,
696 struct platform_device
*pdev
)
698 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
700 struct msi_desc
*desc
;
701 struct msi_desc
*failed_desc
= NULL
;
703 rc
= platform_msi_domain_alloc_irqs(&pdev
->dev
, HIDMA_MSI_INTS
,
704 hidma_write_msi_msg
);
708 for_each_msi_entry(desc
, &pdev
->dev
) {
709 if (!desc
->platform
.msi_index
)
710 dmadev
->msi_virqbase
= desc
->irq
;
712 rc
= devm_request_irq(&pdev
->dev
, desc
->irq
,
713 hidma_chirq_handler_msi
,
723 /* free allocated MSI interrupts above */
724 for_each_msi_entry(desc
, &pdev
->dev
) {
725 if (desc
== failed_desc
)
727 devm_free_irq(&pdev
->dev
, desc
->irq
,
731 /* Add callback to free MSIs on teardown */
732 hidma_ll_setup_irq(dmadev
->lldev
, true);
737 "failed to request MSI irq, falling back to wired IRQ\n");
744 static bool hidma_test_capability(struct device
*dev
, enum hidma_cap test_cap
)
748 cap
= (enum hidma_cap
) device_get_match_data(dev
);
749 return cap
? ((cap
& test_cap
) > 0) : 0;
752 static int hidma_probe(struct platform_device
*pdev
)
754 struct hidma_dev
*dmadev
;
755 struct resource
*trca_resource
;
756 struct resource
*evca_resource
;
763 pm_runtime_set_autosuspend_delay(&pdev
->dev
, HIDMA_AUTOSUSPEND_TIMEOUT
);
764 pm_runtime_use_autosuspend(&pdev
->dev
);
765 pm_runtime_set_active(&pdev
->dev
);
766 pm_runtime_enable(&pdev
->dev
);
768 trca_resource
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
769 trca
= devm_ioremap_resource(&pdev
->dev
, trca_resource
);
775 evca_resource
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
776 evca
= devm_ioremap_resource(&pdev
->dev
, evca_resource
);
783 * This driver only handles the channel IRQs.
784 * Common IRQ is handled by the management driver.
786 chirq
= platform_get_irq(pdev
, 0);
792 dmadev
= devm_kzalloc(&pdev
->dev
, sizeof(*dmadev
), GFP_KERNEL
);
798 INIT_LIST_HEAD(&dmadev
->ddev
.channels
);
799 spin_lock_init(&dmadev
->lock
);
800 dmadev
->ddev
.dev
= &pdev
->dev
;
801 pm_runtime_get_sync(dmadev
->ddev
.dev
);
803 dma_cap_set(DMA_MEMCPY
, dmadev
->ddev
.cap_mask
);
804 dma_cap_set(DMA_MEMSET
, dmadev
->ddev
.cap_mask
);
805 if (WARN_ON(!pdev
->dev
.dma_mask
)) {
810 dmadev
->dev_evca
= evca
;
811 dmadev
->evca_resource
= evca_resource
;
812 dmadev
->dev_trca
= trca
;
813 dmadev
->trca_resource
= trca_resource
;
814 dmadev
->ddev
.device_prep_dma_memcpy
= hidma_prep_dma_memcpy
;
815 dmadev
->ddev
.device_prep_dma_memset
= hidma_prep_dma_memset
;
816 dmadev
->ddev
.device_alloc_chan_resources
= hidma_alloc_chan_resources
;
817 dmadev
->ddev
.device_free_chan_resources
= hidma_free_chan_resources
;
818 dmadev
->ddev
.device_tx_status
= hidma_tx_status
;
819 dmadev
->ddev
.device_issue_pending
= hidma_issue_pending
;
820 dmadev
->ddev
.device_pause
= hidma_pause
;
821 dmadev
->ddev
.device_resume
= hidma_resume
;
822 dmadev
->ddev
.device_terminate_all
= hidma_terminate_all
;
823 dmadev
->ddev
.copy_align
= 8;
826 * Determine the MSI capability of the platform. Old HW doesn't
829 msi
= hidma_test_capability(&pdev
->dev
, HIDMA_MSI_CAP
);
830 device_property_read_u32(&pdev
->dev
, "desc-count",
831 &dmadev
->nr_descriptors
);
834 dev_info(&pdev
->dev
, "overriding number of descriptors as %d\n",
836 dmadev
->nr_descriptors
= nr_desc_prm
;
839 if (!dmadev
->nr_descriptors
)
840 dmadev
->nr_descriptors
= HIDMA_NR_DEFAULT_DESC
;
842 if (hidma_test_capability(&pdev
->dev
, HIDMA_IDENTITY_CAP
))
843 dmadev
->chidx
= readl(dmadev
->dev_trca
+ 0x40);
845 dmadev
->chidx
= readl(dmadev
->dev_trca
+ 0x28);
847 /* Set DMA mask to 64 bits. */
848 rc
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
850 dev_warn(&pdev
->dev
, "unable to set coherent mask to 64");
851 rc
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
856 dmadev
->lldev
= hidma_ll_init(dmadev
->ddev
.dev
,
857 dmadev
->nr_descriptors
, dmadev
->dev_trca
,
858 dmadev
->dev_evca
, dmadev
->chidx
);
859 if (!dmadev
->lldev
) {
864 platform_set_drvdata(pdev
, dmadev
);
866 rc
= hidma_request_msi(dmadev
, pdev
);
869 hidma_ll_setup_irq(dmadev
->lldev
, false);
870 rc
= devm_request_irq(&pdev
->dev
, chirq
, hidma_chirq_handler
,
871 0, "qcom-hidma", dmadev
->lldev
);
876 INIT_LIST_HEAD(&dmadev
->ddev
.channels
);
877 rc
= hidma_chan_init(dmadev
, 0);
881 rc
= dma_async_device_register(&dmadev
->ddev
);
886 tasklet_init(&dmadev
->task
, hidma_issue_task
, (unsigned long)dmadev
);
887 hidma_debug_init(dmadev
);
888 hidma_sysfs_init(dmadev
);
889 dev_info(&pdev
->dev
, "HI-DMA engine driver registration complete\n");
890 pm_runtime_mark_last_busy(dmadev
->ddev
.dev
);
891 pm_runtime_put_autosuspend(dmadev
->ddev
.dev
);
896 hidma_free_msis(dmadev
);
898 hidma_debug_uninit(dmadev
);
899 hidma_ll_uninit(dmadev
->lldev
);
904 pm_runtime_put_sync(&pdev
->dev
);
905 pm_runtime_disable(&pdev
->dev
);
909 static void hidma_shutdown(struct platform_device
*pdev
)
911 struct hidma_dev
*dmadev
= platform_get_drvdata(pdev
);
913 dev_info(dmadev
->ddev
.dev
, "HI-DMA engine shutdown\n");
915 pm_runtime_get_sync(dmadev
->ddev
.dev
);
916 if (hidma_ll_disable(dmadev
->lldev
))
917 dev_warn(dmadev
->ddev
.dev
, "channel did not stop\n");
918 pm_runtime_mark_last_busy(dmadev
->ddev
.dev
);
919 pm_runtime_put_autosuspend(dmadev
->ddev
.dev
);
923 static int hidma_remove(struct platform_device
*pdev
)
925 struct hidma_dev
*dmadev
= platform_get_drvdata(pdev
);
927 pm_runtime_get_sync(dmadev
->ddev
.dev
);
928 dma_async_device_unregister(&dmadev
->ddev
);
929 if (!dmadev
->lldev
->msi_support
)
930 devm_free_irq(dmadev
->ddev
.dev
, dmadev
->irq
, dmadev
->lldev
);
932 hidma_free_msis(dmadev
);
934 tasklet_kill(&dmadev
->task
);
935 hidma_sysfs_uninit(dmadev
);
936 hidma_debug_uninit(dmadev
);
937 hidma_ll_uninit(dmadev
->lldev
);
940 dev_info(&pdev
->dev
, "HI-DMA engine removed\n");
941 pm_runtime_put_sync_suspend(&pdev
->dev
);
942 pm_runtime_disable(&pdev
->dev
);
947 #if IS_ENABLED(CONFIG_ACPI)
948 static const struct acpi_device_id hidma_acpi_ids
[] = {
950 {"QCOM8062", HIDMA_MSI_CAP
},
951 {"QCOM8063", (HIDMA_MSI_CAP
| HIDMA_IDENTITY_CAP
)},
954 MODULE_DEVICE_TABLE(acpi
, hidma_acpi_ids
);
957 static const struct of_device_id hidma_match
[] = {
958 {.compatible
= "qcom,hidma-1.0",},
959 {.compatible
= "qcom,hidma-1.1", .data
= (void *)(HIDMA_MSI_CAP
),},
960 {.compatible
= "qcom,hidma-1.2",
961 .data
= (void *)(HIDMA_MSI_CAP
| HIDMA_IDENTITY_CAP
),},
964 MODULE_DEVICE_TABLE(of
, hidma_match
);
966 static struct platform_driver hidma_driver
= {
967 .probe
= hidma_probe
,
968 .remove
= hidma_remove
,
969 .shutdown
= hidma_shutdown
,
972 .of_match_table
= hidma_match
,
973 .acpi_match_table
= ACPI_PTR(hidma_acpi_ids
),
977 module_platform_driver(hidma_driver
);
978 MODULE_LICENSE("GPL v2");