2 * Qualcomm Technologies HIDMA DMA engine interface
4 * Copyright (c) 2015, The Linux Foundation. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
17 * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
18 * Copyright (C) Semihalf 2009
19 * Copyright (C) Ilya Yanok, Emcraft Systems 2010
20 * Copyright (C) Alexander Popov, Promcontroller 2014
22 * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
23 * (defines, structures and comments) was taken from MPC5121 DMA driver
24 * written by Hongjun Chen <hong-jun.chen@freescale.com>.
26 * Approved as OSADL project by a majority of OSADL members and funded
27 * by OSADL membership fees in 2009; for details see www.osadl.org.
29 * This program is free software; you can redistribute it and/or modify it
30 * under the terms of the GNU General Public License as published by the Free
31 * Software Foundation; either version 2 of the License, or (at your option)
34 * This program is distributed in the hope that it will be useful, but WITHOUT
35 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
36 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
39 * The full GNU General Public License is included in this distribution in the
40 * file called COPYING.
43 /* Linux Foundation elects GPLv2 license only. */
45 #include <linux/dmaengine.h>
46 #include <linux/dma-mapping.h>
47 #include <linux/list.h>
48 #include <linux/module.h>
49 #include <linux/platform_device.h>
50 #include <linux/slab.h>
51 #include <linux/spinlock.h>
52 #include <linux/of_dma.h>
53 #include <linux/property.h>
54 #include <linux/delay.h>
55 #include <linux/acpi.h>
56 #include <linux/irq.h>
57 #include <linux/atomic.h>
58 #include <linux/pm_runtime.h>
60 #include "../dmaengine.h"
64 * Default idle time is 2 seconds. This parameter can
65 * be overridden by changing the following
66 * /sys/bus/platform/devices/QCOM8061:<xy>/power/autosuspend_delay_ms
69 #define HIDMA_AUTOSUSPEND_TIMEOUT 2000
70 #define HIDMA_ERR_INFO_SW 0xFF
71 #define HIDMA_ERR_CODE_UNEXPECTED_TERMINATE 0x0
72 #define HIDMA_NR_DEFAULT_DESC 10
74 static inline struct hidma_dev
*to_hidma_dev(struct dma_device
*dmadev
)
76 return container_of(dmadev
, struct hidma_dev
, ddev
);
80 struct hidma_dev
*to_hidma_dev_from_lldev(struct hidma_lldev
**_lldevp
)
82 return container_of(_lldevp
, struct hidma_dev
, lldev
);
85 static inline struct hidma_chan
*to_hidma_chan(struct dma_chan
*dmach
)
87 return container_of(dmach
, struct hidma_chan
, chan
);
91 struct hidma_desc
*to_hidma_desc(struct dma_async_tx_descriptor
*t
)
93 return container_of(t
, struct hidma_desc
, desc
);
96 static void hidma_free(struct hidma_dev
*dmadev
)
98 INIT_LIST_HEAD(&dmadev
->ddev
.channels
);
101 static unsigned int nr_desc_prm
;
102 module_param(nr_desc_prm
, uint
, 0644);
103 MODULE_PARM_DESC(nr_desc_prm
, "number of descriptors (default: 0)");
106 /* process completed descriptors */
107 static void hidma_process_completed(struct hidma_chan
*mchan
)
109 struct dma_device
*ddev
= mchan
->chan
.device
;
110 struct hidma_dev
*mdma
= to_hidma_dev(ddev
);
111 struct dma_async_tx_descriptor
*desc
;
112 dma_cookie_t last_cookie
;
113 struct hidma_desc
*mdesc
;
114 unsigned long irqflags
;
115 struct list_head list
;
117 INIT_LIST_HEAD(&list
);
119 /* Get all completed descriptors */
120 spin_lock_irqsave(&mchan
->lock
, irqflags
);
121 list_splice_tail_init(&mchan
->completed
, &list
);
122 spin_unlock_irqrestore(&mchan
->lock
, irqflags
);
124 /* Execute callbacks and run dependencies */
125 list_for_each_entry(mdesc
, &list
, node
) {
126 enum dma_status llstat
;
130 spin_lock_irqsave(&mchan
->lock
, irqflags
);
131 dma_cookie_complete(desc
);
132 spin_unlock_irqrestore(&mchan
->lock
, irqflags
);
134 llstat
= hidma_ll_status(mdma
->lldev
, mdesc
->tre_ch
);
135 if (desc
->callback
&& (llstat
== DMA_COMPLETE
))
136 desc
->callback(desc
->callback_param
);
138 last_cookie
= desc
->cookie
;
139 dma_run_dependencies(desc
);
142 /* Free descriptors */
143 spin_lock_irqsave(&mchan
->lock
, irqflags
);
144 list_splice_tail_init(&list
, &mchan
->free
);
145 spin_unlock_irqrestore(&mchan
->lock
, irqflags
);
150 * Called once for each submitted descriptor.
151 * PM is locked once for each descriptor that is currently
154 static void hidma_callback(void *data
)
156 struct hidma_desc
*mdesc
= data
;
157 struct hidma_chan
*mchan
= to_hidma_chan(mdesc
->desc
.chan
);
158 struct dma_device
*ddev
= mchan
->chan
.device
;
159 struct hidma_dev
*dmadev
= to_hidma_dev(ddev
);
160 unsigned long irqflags
;
163 spin_lock_irqsave(&mchan
->lock
, irqflags
);
164 if (mdesc
->node
.next
) {
165 /* Delete from the active list, add to completed list */
166 list_move_tail(&mdesc
->node
, &mchan
->completed
);
169 /* calculate the next running descriptor */
170 mchan
->running
= list_first_entry(&mchan
->active
,
171 struct hidma_desc
, node
);
173 spin_unlock_irqrestore(&mchan
->lock
, irqflags
);
175 hidma_process_completed(mchan
);
178 pm_runtime_mark_last_busy(dmadev
->ddev
.dev
);
179 pm_runtime_put_autosuspend(dmadev
->ddev
.dev
);
183 static int hidma_chan_init(struct hidma_dev
*dmadev
, u32 dma_sig
)
185 struct hidma_chan
*mchan
;
186 struct dma_device
*ddev
;
188 mchan
= devm_kzalloc(dmadev
->ddev
.dev
, sizeof(*mchan
), GFP_KERNEL
);
192 ddev
= &dmadev
->ddev
;
193 mchan
->dma_sig
= dma_sig
;
194 mchan
->dmadev
= dmadev
;
195 mchan
->chan
.device
= ddev
;
196 dma_cookie_init(&mchan
->chan
);
198 INIT_LIST_HEAD(&mchan
->free
);
199 INIT_LIST_HEAD(&mchan
->prepared
);
200 INIT_LIST_HEAD(&mchan
->active
);
201 INIT_LIST_HEAD(&mchan
->completed
);
203 spin_lock_init(&mchan
->lock
);
204 list_add_tail(&mchan
->chan
.device_node
, &ddev
->channels
);
205 dmadev
->ddev
.chancnt
++;
209 static void hidma_issue_task(unsigned long arg
)
211 struct hidma_dev
*dmadev
= (struct hidma_dev
*)arg
;
213 pm_runtime_get_sync(dmadev
->ddev
.dev
);
214 hidma_ll_start(dmadev
->lldev
);
217 static void hidma_issue_pending(struct dma_chan
*dmach
)
219 struct hidma_chan
*mchan
= to_hidma_chan(dmach
);
220 struct hidma_dev
*dmadev
= mchan
->dmadev
;
224 spin_lock_irqsave(&mchan
->lock
, flags
);
225 if (!mchan
->running
) {
226 struct hidma_desc
*desc
= list_first_entry(&mchan
->active
,
229 mchan
->running
= desc
;
231 spin_unlock_irqrestore(&mchan
->lock
, flags
);
233 /* PM will be released in hidma_callback function. */
234 status
= pm_runtime_get(dmadev
->ddev
.dev
);
236 tasklet_schedule(&dmadev
->task
);
238 hidma_ll_start(dmadev
->lldev
);
241 static enum dma_status
hidma_tx_status(struct dma_chan
*dmach
,
243 struct dma_tx_state
*txstate
)
245 struct hidma_chan
*mchan
= to_hidma_chan(dmach
);
248 ret
= dma_cookie_status(dmach
, cookie
, txstate
);
249 if (ret
== DMA_COMPLETE
)
252 if (mchan
->paused
&& (ret
== DMA_IN_PROGRESS
)) {
254 dma_cookie_t runcookie
;
256 spin_lock_irqsave(&mchan
->lock
, flags
);
258 runcookie
= mchan
->running
->desc
.cookie
;
262 if (runcookie
== cookie
)
265 spin_unlock_irqrestore(&mchan
->lock
, flags
);
272 * Submit descriptor to hardware.
273 * Lock the PM for each descriptor we are sending.
275 static dma_cookie_t
hidma_tx_submit(struct dma_async_tx_descriptor
*txd
)
277 struct hidma_chan
*mchan
= to_hidma_chan(txd
->chan
);
278 struct hidma_dev
*dmadev
= mchan
->dmadev
;
279 struct hidma_desc
*mdesc
;
280 unsigned long irqflags
;
283 pm_runtime_get_sync(dmadev
->ddev
.dev
);
284 if (!hidma_ll_isenabled(dmadev
->lldev
)) {
285 pm_runtime_mark_last_busy(dmadev
->ddev
.dev
);
286 pm_runtime_put_autosuspend(dmadev
->ddev
.dev
);
290 mdesc
= container_of(txd
, struct hidma_desc
, desc
);
291 spin_lock_irqsave(&mchan
->lock
, irqflags
);
293 /* Move descriptor to active */
294 list_move_tail(&mdesc
->node
, &mchan
->active
);
297 cookie
= dma_cookie_assign(txd
);
299 hidma_ll_queue_request(dmadev
->lldev
, mdesc
->tre_ch
);
300 spin_unlock_irqrestore(&mchan
->lock
, irqflags
);
305 static int hidma_alloc_chan_resources(struct dma_chan
*dmach
)
307 struct hidma_chan
*mchan
= to_hidma_chan(dmach
);
308 struct hidma_dev
*dmadev
= mchan
->dmadev
;
309 struct hidma_desc
*mdesc
, *tmp
;
310 unsigned long irqflags
;
315 if (mchan
->allocated
)
318 /* Alloc descriptors for this channel */
319 for (i
= 0; i
< dmadev
->nr_descriptors
; i
++) {
320 mdesc
= kzalloc(sizeof(struct hidma_desc
), GFP_NOWAIT
);
325 dma_async_tx_descriptor_init(&mdesc
->desc
, dmach
);
326 mdesc
->desc
.tx_submit
= hidma_tx_submit
;
328 rc
= hidma_ll_request(dmadev
->lldev
, mchan
->dma_sig
,
329 "DMA engine", hidma_callback
, mdesc
,
332 dev_err(dmach
->device
->dev
,
333 "channel alloc failed at %u\n", i
);
337 list_add_tail(&mdesc
->node
, &descs
);
341 /* return the allocated descriptors */
342 list_for_each_entry_safe(mdesc
, tmp
, &descs
, node
) {
343 hidma_ll_free(dmadev
->lldev
, mdesc
->tre_ch
);
349 spin_lock_irqsave(&mchan
->lock
, irqflags
);
350 list_splice_tail_init(&descs
, &mchan
->free
);
351 mchan
->allocated
= true;
352 spin_unlock_irqrestore(&mchan
->lock
, irqflags
);
356 static struct dma_async_tx_descriptor
*
357 hidma_prep_dma_memcpy(struct dma_chan
*dmach
, dma_addr_t dest
, dma_addr_t src
,
358 size_t len
, unsigned long flags
)
360 struct hidma_chan
*mchan
= to_hidma_chan(dmach
);
361 struct hidma_desc
*mdesc
= NULL
;
362 struct hidma_dev
*mdma
= mchan
->dmadev
;
363 unsigned long irqflags
;
365 /* Get free descriptor */
366 spin_lock_irqsave(&mchan
->lock
, irqflags
);
367 if (!list_empty(&mchan
->free
)) {
368 mdesc
= list_first_entry(&mchan
->free
, struct hidma_desc
, node
);
369 list_del(&mdesc
->node
);
371 spin_unlock_irqrestore(&mchan
->lock
, irqflags
);
376 hidma_ll_set_transfer_params(mdma
->lldev
, mdesc
->tre_ch
,
377 src
, dest
, len
, flags
);
379 /* Place descriptor in prepared list */
380 spin_lock_irqsave(&mchan
->lock
, irqflags
);
381 list_add_tail(&mdesc
->node
, &mchan
->prepared
);
382 spin_unlock_irqrestore(&mchan
->lock
, irqflags
);
387 static int hidma_terminate_channel(struct dma_chan
*chan
)
389 struct hidma_chan
*mchan
= to_hidma_chan(chan
);
390 struct hidma_dev
*dmadev
= to_hidma_dev(mchan
->chan
.device
);
391 struct hidma_desc
*tmp
, *mdesc
;
392 unsigned long irqflags
;
396 pm_runtime_get_sync(dmadev
->ddev
.dev
);
397 /* give completed requests a chance to finish */
398 hidma_process_completed(mchan
);
400 spin_lock_irqsave(&mchan
->lock
, irqflags
);
401 list_splice_init(&mchan
->active
, &list
);
402 list_splice_init(&mchan
->prepared
, &list
);
403 list_splice_init(&mchan
->completed
, &list
);
404 spin_unlock_irqrestore(&mchan
->lock
, irqflags
);
406 /* this suspends the existing transfer */
407 rc
= hidma_ll_pause(dmadev
->lldev
);
409 dev_err(dmadev
->ddev
.dev
, "channel did not pause\n");
413 /* return all user requests */
414 list_for_each_entry_safe(mdesc
, tmp
, &list
, node
) {
415 struct dma_async_tx_descriptor
*txd
= &mdesc
->desc
;
416 dma_async_tx_callback callback
= mdesc
->desc
.callback
;
417 void *param
= mdesc
->desc
.callback_param
;
419 dma_descriptor_unmap(txd
);
424 dma_run_dependencies(txd
);
426 /* move myself to free_list */
427 list_move(&mdesc
->node
, &mchan
->free
);
430 rc
= hidma_ll_resume(dmadev
->lldev
);
432 pm_runtime_mark_last_busy(dmadev
->ddev
.dev
);
433 pm_runtime_put_autosuspend(dmadev
->ddev
.dev
);
437 static int hidma_terminate_all(struct dma_chan
*chan
)
439 struct hidma_chan
*mchan
= to_hidma_chan(chan
);
440 struct hidma_dev
*dmadev
= to_hidma_dev(mchan
->chan
.device
);
443 rc
= hidma_terminate_channel(chan
);
447 /* reinitialize the hardware */
448 pm_runtime_get_sync(dmadev
->ddev
.dev
);
449 rc
= hidma_ll_setup(dmadev
->lldev
);
450 pm_runtime_mark_last_busy(dmadev
->ddev
.dev
);
451 pm_runtime_put_autosuspend(dmadev
->ddev
.dev
);
455 static void hidma_free_chan_resources(struct dma_chan
*dmach
)
457 struct hidma_chan
*mchan
= to_hidma_chan(dmach
);
458 struct hidma_dev
*mdma
= mchan
->dmadev
;
459 struct hidma_desc
*mdesc
, *tmp
;
460 unsigned long irqflags
;
463 /* terminate running transactions and free descriptors */
464 hidma_terminate_channel(dmach
);
466 spin_lock_irqsave(&mchan
->lock
, irqflags
);
469 list_splice_tail_init(&mchan
->free
, &descs
);
471 /* Free descriptors */
472 list_for_each_entry_safe(mdesc
, tmp
, &descs
, node
) {
473 hidma_ll_free(mdma
->lldev
, mdesc
->tre_ch
);
474 list_del(&mdesc
->node
);
478 mchan
->allocated
= 0;
479 spin_unlock_irqrestore(&mchan
->lock
, irqflags
);
482 static int hidma_pause(struct dma_chan
*chan
)
484 struct hidma_chan
*mchan
;
485 struct hidma_dev
*dmadev
;
487 mchan
= to_hidma_chan(chan
);
488 dmadev
= to_hidma_dev(mchan
->chan
.device
);
489 if (!mchan
->paused
) {
490 pm_runtime_get_sync(dmadev
->ddev
.dev
);
491 if (hidma_ll_pause(dmadev
->lldev
))
492 dev_warn(dmadev
->ddev
.dev
, "channel did not stop\n");
493 mchan
->paused
= true;
494 pm_runtime_mark_last_busy(dmadev
->ddev
.dev
);
495 pm_runtime_put_autosuspend(dmadev
->ddev
.dev
);
500 static int hidma_resume(struct dma_chan
*chan
)
502 struct hidma_chan
*mchan
;
503 struct hidma_dev
*dmadev
;
506 mchan
= to_hidma_chan(chan
);
507 dmadev
= to_hidma_dev(mchan
->chan
.device
);
509 pm_runtime_get_sync(dmadev
->ddev
.dev
);
510 rc
= hidma_ll_resume(dmadev
->lldev
);
512 mchan
->paused
= false;
514 dev_err(dmadev
->ddev
.dev
,
515 "failed to resume the channel");
516 pm_runtime_mark_last_busy(dmadev
->ddev
.dev
);
517 pm_runtime_put_autosuspend(dmadev
->ddev
.dev
);
522 static irqreturn_t
hidma_chirq_handler(int chirq
, void *arg
)
524 struct hidma_lldev
*lldev
= arg
;
527 * All interrupts are request driven.
528 * HW doesn't send an interrupt by itself.
530 return hidma_ll_inthandler(chirq
, lldev
);
533 static int hidma_probe(struct platform_device
*pdev
)
535 struct hidma_dev
*dmadev
;
536 struct resource
*trca_resource
;
537 struct resource
*evca_resource
;
543 pm_runtime_set_autosuspend_delay(&pdev
->dev
, HIDMA_AUTOSUSPEND_TIMEOUT
);
544 pm_runtime_use_autosuspend(&pdev
->dev
);
545 pm_runtime_set_active(&pdev
->dev
);
546 pm_runtime_enable(&pdev
->dev
);
548 trca_resource
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
549 trca
= devm_ioremap_resource(&pdev
->dev
, trca_resource
);
555 evca_resource
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
556 evca
= devm_ioremap_resource(&pdev
->dev
, evca_resource
);
563 * This driver only handles the channel IRQs.
564 * Common IRQ is handled by the management driver.
566 chirq
= platform_get_irq(pdev
, 0);
572 dmadev
= devm_kzalloc(&pdev
->dev
, sizeof(*dmadev
), GFP_KERNEL
);
578 INIT_LIST_HEAD(&dmadev
->ddev
.channels
);
579 spin_lock_init(&dmadev
->lock
);
580 dmadev
->ddev
.dev
= &pdev
->dev
;
581 pm_runtime_get_sync(dmadev
->ddev
.dev
);
583 dma_cap_set(DMA_MEMCPY
, dmadev
->ddev
.cap_mask
);
584 if (WARN_ON(!pdev
->dev
.dma_mask
)) {
589 dmadev
->dev_evca
= evca
;
590 dmadev
->evca_resource
= evca_resource
;
591 dmadev
->dev_trca
= trca
;
592 dmadev
->trca_resource
= trca_resource
;
593 dmadev
->ddev
.device_prep_dma_memcpy
= hidma_prep_dma_memcpy
;
594 dmadev
->ddev
.device_alloc_chan_resources
= hidma_alloc_chan_resources
;
595 dmadev
->ddev
.device_free_chan_resources
= hidma_free_chan_resources
;
596 dmadev
->ddev
.device_tx_status
= hidma_tx_status
;
597 dmadev
->ddev
.device_issue_pending
= hidma_issue_pending
;
598 dmadev
->ddev
.device_pause
= hidma_pause
;
599 dmadev
->ddev
.device_resume
= hidma_resume
;
600 dmadev
->ddev
.device_terminate_all
= hidma_terminate_all
;
601 dmadev
->ddev
.copy_align
= 8;
603 device_property_read_u32(&pdev
->dev
, "desc-count",
604 &dmadev
->nr_descriptors
);
606 if (!dmadev
->nr_descriptors
&& nr_desc_prm
)
607 dmadev
->nr_descriptors
= nr_desc_prm
;
609 if (!dmadev
->nr_descriptors
)
610 dmadev
->nr_descriptors
= HIDMA_NR_DEFAULT_DESC
;
612 dmadev
->chidx
= readl(dmadev
->dev_trca
+ 0x28);
614 /* Set DMA mask to 64 bits. */
615 rc
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
617 dev_warn(&pdev
->dev
, "unable to set coherent mask to 64");
618 rc
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
623 dmadev
->lldev
= hidma_ll_init(dmadev
->ddev
.dev
,
624 dmadev
->nr_descriptors
, dmadev
->dev_trca
,
625 dmadev
->dev_evca
, dmadev
->chidx
);
626 if (!dmadev
->lldev
) {
631 rc
= devm_request_irq(&pdev
->dev
, chirq
, hidma_chirq_handler
, 0,
632 "qcom-hidma", dmadev
->lldev
);
636 INIT_LIST_HEAD(&dmadev
->ddev
.channels
);
637 rc
= hidma_chan_init(dmadev
, 0);
641 rc
= dma_async_device_register(&dmadev
->ddev
);
646 tasklet_init(&dmadev
->task
, hidma_issue_task
, (unsigned long)dmadev
);
647 dev_info(&pdev
->dev
, "HI-DMA engine driver registration complete\n");
648 platform_set_drvdata(pdev
, dmadev
);
649 pm_runtime_mark_last_busy(dmadev
->ddev
.dev
);
650 pm_runtime_put_autosuspend(dmadev
->ddev
.dev
);
654 hidma_ll_uninit(dmadev
->lldev
);
659 pm_runtime_put_sync(&pdev
->dev
);
660 pm_runtime_disable(&pdev
->dev
);
664 static int hidma_remove(struct platform_device
*pdev
)
666 struct hidma_dev
*dmadev
= platform_get_drvdata(pdev
);
668 pm_runtime_get_sync(dmadev
->ddev
.dev
);
669 dma_async_device_unregister(&dmadev
->ddev
);
670 devm_free_irq(dmadev
->ddev
.dev
, dmadev
->irq
, dmadev
->lldev
);
671 hidma_ll_uninit(dmadev
->lldev
);
674 dev_info(&pdev
->dev
, "HI-DMA engine removed\n");
675 pm_runtime_put_sync_suspend(&pdev
->dev
);
676 pm_runtime_disable(&pdev
->dev
);
681 #if IS_ENABLED(CONFIG_ACPI)
682 static const struct acpi_device_id hidma_acpi_ids
[] = {
688 static const struct of_device_id hidma_match
[] = {
689 {.compatible
= "qcom,hidma-1.0",},
693 MODULE_DEVICE_TABLE(of
, hidma_match
);
695 static struct platform_driver hidma_driver
= {
696 .probe
= hidma_probe
,
697 .remove
= hidma_remove
,
700 .of_match_table
= hidma_match
,
701 .acpi_match_table
= ACPI_PTR(hidma_acpi_ids
),
705 module_platform_driver(hidma_driver
);
706 MODULE_LICENSE("GPL v2");