Merge branch 'sock_hold-misuses'
[linux/fpc-iii.git] / drivers / dma / qcom / hidma.c
blob3c982c96b4b7ccc4f65bec3ed048163be6c86bf8
1 /*
2 * Qualcomm Technologies HIDMA DMA engine interface
4 * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
17 * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
18 * Copyright (C) Semihalf 2009
19 * Copyright (C) Ilya Yanok, Emcraft Systems 2010
20 * Copyright (C) Alexander Popov, Promcontroller 2014
22 * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
23 * (defines, structures and comments) was taken from MPC5121 DMA driver
24 * written by Hongjun Chen <hong-jun.chen@freescale.com>.
26 * Approved as OSADL project by a majority of OSADL members and funded
27 * by OSADL membership fees in 2009; for details see www.osadl.org.
29 * This program is free software; you can redistribute it and/or modify it
30 * under the terms of the GNU General Public License as published by the Free
31 * Software Foundation; either version 2 of the License, or (at your option)
32 * any later version.
34 * This program is distributed in the hope that it will be useful, but WITHOUT
35 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
36 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
37 * more details.
39 * The full GNU General Public License is included in this distribution in the
40 * file called COPYING.
43 /* Linux Foundation elects GPLv2 license only. */
45 #include <linux/dmaengine.h>
46 #include <linux/dma-mapping.h>
47 #include <linux/list.h>
48 #include <linux/module.h>
49 #include <linux/platform_device.h>
50 #include <linux/slab.h>
51 #include <linux/spinlock.h>
52 #include <linux/of_dma.h>
53 #include <linux/property.h>
54 #include <linux/delay.h>
55 #include <linux/acpi.h>
56 #include <linux/irq.h>
57 #include <linux/atomic.h>
58 #include <linux/pm_runtime.h>
59 #include <linux/msi.h>
61 #include "../dmaengine.h"
62 #include "hidma.h"
65 * Default idle time is 2 seconds. This parameter can
66 * be overridden by changing the following
67 * /sys/bus/platform/devices/QCOM8061:<xy>/power/autosuspend_delay_ms
68 * during kernel boot.
70 #define HIDMA_AUTOSUSPEND_TIMEOUT 2000
71 #define HIDMA_ERR_INFO_SW 0xFF
72 #define HIDMA_ERR_CODE_UNEXPECTED_TERMINATE 0x0
73 #define HIDMA_NR_DEFAULT_DESC 10
74 #define HIDMA_MSI_INTS 11
76 static inline struct hidma_dev *to_hidma_dev(struct dma_device *dmadev)
78 return container_of(dmadev, struct hidma_dev, ddev);
81 static inline
82 struct hidma_dev *to_hidma_dev_from_lldev(struct hidma_lldev **_lldevp)
84 return container_of(_lldevp, struct hidma_dev, lldev);
87 static inline struct hidma_chan *to_hidma_chan(struct dma_chan *dmach)
89 return container_of(dmach, struct hidma_chan, chan);
92 static inline
93 struct hidma_desc *to_hidma_desc(struct dma_async_tx_descriptor *t)
95 return container_of(t, struct hidma_desc, desc);
98 static void hidma_free(struct hidma_dev *dmadev)
100 INIT_LIST_HEAD(&dmadev->ddev.channels);
103 static unsigned int nr_desc_prm;
104 module_param(nr_desc_prm, uint, 0644);
105 MODULE_PARM_DESC(nr_desc_prm, "number of descriptors (default: 0)");
108 /* process completed descriptors */
109 static void hidma_process_completed(struct hidma_chan *mchan)
111 struct dma_device *ddev = mchan->chan.device;
112 struct hidma_dev *mdma = to_hidma_dev(ddev);
113 struct dma_async_tx_descriptor *desc;
114 dma_cookie_t last_cookie;
115 struct hidma_desc *mdesc;
116 struct hidma_desc *next;
117 unsigned long irqflags;
118 struct list_head list;
120 INIT_LIST_HEAD(&list);
122 /* Get all completed descriptors */
123 spin_lock_irqsave(&mchan->lock, irqflags);
124 list_splice_tail_init(&mchan->completed, &list);
125 spin_unlock_irqrestore(&mchan->lock, irqflags);
127 /* Execute callbacks and run dependencies */
128 list_for_each_entry_safe(mdesc, next, &list, node) {
129 enum dma_status llstat;
130 struct dmaengine_desc_callback cb;
131 struct dmaengine_result result;
133 desc = &mdesc->desc;
134 last_cookie = desc->cookie;
136 spin_lock_irqsave(&mchan->lock, irqflags);
137 dma_cookie_complete(desc);
138 spin_unlock_irqrestore(&mchan->lock, irqflags);
140 llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch);
141 dmaengine_desc_get_callback(desc, &cb);
143 dma_run_dependencies(desc);
145 spin_lock_irqsave(&mchan->lock, irqflags);
146 list_move(&mdesc->node, &mchan->free);
148 if (llstat == DMA_COMPLETE) {
149 mchan->last_success = last_cookie;
150 result.result = DMA_TRANS_NOERROR;
151 } else
152 result.result = DMA_TRANS_ABORTED;
154 spin_unlock_irqrestore(&mchan->lock, irqflags);
156 dmaengine_desc_callback_invoke(&cb, &result);
161 * Called once for each submitted descriptor.
162 * PM is locked once for each descriptor that is currently
163 * in execution.
165 static void hidma_callback(void *data)
167 struct hidma_desc *mdesc = data;
168 struct hidma_chan *mchan = to_hidma_chan(mdesc->desc.chan);
169 struct dma_device *ddev = mchan->chan.device;
170 struct hidma_dev *dmadev = to_hidma_dev(ddev);
171 unsigned long irqflags;
172 bool queued = false;
174 spin_lock_irqsave(&mchan->lock, irqflags);
175 if (mdesc->node.next) {
176 /* Delete from the active list, add to completed list */
177 list_move_tail(&mdesc->node, &mchan->completed);
178 queued = true;
180 /* calculate the next running descriptor */
181 mchan->running = list_first_entry(&mchan->active,
182 struct hidma_desc, node);
184 spin_unlock_irqrestore(&mchan->lock, irqflags);
186 hidma_process_completed(mchan);
188 if (queued) {
189 pm_runtime_mark_last_busy(dmadev->ddev.dev);
190 pm_runtime_put_autosuspend(dmadev->ddev.dev);
194 static int hidma_chan_init(struct hidma_dev *dmadev, u32 dma_sig)
196 struct hidma_chan *mchan;
197 struct dma_device *ddev;
199 mchan = devm_kzalloc(dmadev->ddev.dev, sizeof(*mchan), GFP_KERNEL);
200 if (!mchan)
201 return -ENOMEM;
203 ddev = &dmadev->ddev;
204 mchan->dma_sig = dma_sig;
205 mchan->dmadev = dmadev;
206 mchan->chan.device = ddev;
207 dma_cookie_init(&mchan->chan);
209 INIT_LIST_HEAD(&mchan->free);
210 INIT_LIST_HEAD(&mchan->prepared);
211 INIT_LIST_HEAD(&mchan->active);
212 INIT_LIST_HEAD(&mchan->completed);
214 spin_lock_init(&mchan->lock);
215 list_add_tail(&mchan->chan.device_node, &ddev->channels);
216 dmadev->ddev.chancnt++;
217 return 0;
220 static void hidma_issue_task(unsigned long arg)
222 struct hidma_dev *dmadev = (struct hidma_dev *)arg;
224 pm_runtime_get_sync(dmadev->ddev.dev);
225 hidma_ll_start(dmadev->lldev);
228 static void hidma_issue_pending(struct dma_chan *dmach)
230 struct hidma_chan *mchan = to_hidma_chan(dmach);
231 struct hidma_dev *dmadev = mchan->dmadev;
232 unsigned long flags;
233 int status;
235 spin_lock_irqsave(&mchan->lock, flags);
236 if (!mchan->running) {
237 struct hidma_desc *desc = list_first_entry(&mchan->active,
238 struct hidma_desc,
239 node);
240 mchan->running = desc;
242 spin_unlock_irqrestore(&mchan->lock, flags);
244 /* PM will be released in hidma_callback function. */
245 status = pm_runtime_get(dmadev->ddev.dev);
246 if (status < 0)
247 tasklet_schedule(&dmadev->task);
248 else
249 hidma_ll_start(dmadev->lldev);
252 static inline bool hidma_txn_is_success(dma_cookie_t cookie,
253 dma_cookie_t last_success, dma_cookie_t last_used)
255 if (last_success <= last_used) {
256 if ((cookie <= last_success) || (cookie > last_used))
257 return true;
258 } else {
259 if ((cookie <= last_success) && (cookie > last_used))
260 return true;
262 return false;
265 static enum dma_status hidma_tx_status(struct dma_chan *dmach,
266 dma_cookie_t cookie,
267 struct dma_tx_state *txstate)
269 struct hidma_chan *mchan = to_hidma_chan(dmach);
270 enum dma_status ret;
272 ret = dma_cookie_status(dmach, cookie, txstate);
273 if (ret == DMA_COMPLETE) {
274 bool is_success;
276 is_success = hidma_txn_is_success(cookie, mchan->last_success,
277 dmach->cookie);
278 return is_success ? ret : DMA_ERROR;
281 if (mchan->paused && (ret == DMA_IN_PROGRESS)) {
282 unsigned long flags;
283 dma_cookie_t runcookie;
285 spin_lock_irqsave(&mchan->lock, flags);
286 if (mchan->running)
287 runcookie = mchan->running->desc.cookie;
288 else
289 runcookie = -EINVAL;
291 if (runcookie == cookie)
292 ret = DMA_PAUSED;
294 spin_unlock_irqrestore(&mchan->lock, flags);
297 return ret;
301 * Submit descriptor to hardware.
302 * Lock the PM for each descriptor we are sending.
304 static dma_cookie_t hidma_tx_submit(struct dma_async_tx_descriptor *txd)
306 struct hidma_chan *mchan = to_hidma_chan(txd->chan);
307 struct hidma_dev *dmadev = mchan->dmadev;
308 struct hidma_desc *mdesc;
309 unsigned long irqflags;
310 dma_cookie_t cookie;
312 pm_runtime_get_sync(dmadev->ddev.dev);
313 if (!hidma_ll_isenabled(dmadev->lldev)) {
314 pm_runtime_mark_last_busy(dmadev->ddev.dev);
315 pm_runtime_put_autosuspend(dmadev->ddev.dev);
316 return -ENODEV;
319 mdesc = container_of(txd, struct hidma_desc, desc);
320 spin_lock_irqsave(&mchan->lock, irqflags);
322 /* Move descriptor to active */
323 list_move_tail(&mdesc->node, &mchan->active);
325 /* Update cookie */
326 cookie = dma_cookie_assign(txd);
328 hidma_ll_queue_request(dmadev->lldev, mdesc->tre_ch);
329 spin_unlock_irqrestore(&mchan->lock, irqflags);
331 return cookie;
334 static int hidma_alloc_chan_resources(struct dma_chan *dmach)
336 struct hidma_chan *mchan = to_hidma_chan(dmach);
337 struct hidma_dev *dmadev = mchan->dmadev;
338 struct hidma_desc *mdesc, *tmp;
339 unsigned long irqflags;
340 LIST_HEAD(descs);
341 unsigned int i;
342 int rc = 0;
344 if (mchan->allocated)
345 return 0;
347 /* Alloc descriptors for this channel */
348 for (i = 0; i < dmadev->nr_descriptors; i++) {
349 mdesc = kzalloc(sizeof(struct hidma_desc), GFP_NOWAIT);
350 if (!mdesc) {
351 rc = -ENOMEM;
352 break;
354 dma_async_tx_descriptor_init(&mdesc->desc, dmach);
355 mdesc->desc.tx_submit = hidma_tx_submit;
357 rc = hidma_ll_request(dmadev->lldev, mchan->dma_sig,
358 "DMA engine", hidma_callback, mdesc,
359 &mdesc->tre_ch);
360 if (rc) {
361 dev_err(dmach->device->dev,
362 "channel alloc failed at %u\n", i);
363 kfree(mdesc);
364 break;
366 list_add_tail(&mdesc->node, &descs);
369 if (rc) {
370 /* return the allocated descriptors */
371 list_for_each_entry_safe(mdesc, tmp, &descs, node) {
372 hidma_ll_free(dmadev->lldev, mdesc->tre_ch);
373 kfree(mdesc);
375 return rc;
378 spin_lock_irqsave(&mchan->lock, irqflags);
379 list_splice_tail_init(&descs, &mchan->free);
380 mchan->allocated = true;
381 spin_unlock_irqrestore(&mchan->lock, irqflags);
382 return 1;
385 static struct dma_async_tx_descriptor *
386 hidma_prep_dma_memcpy(struct dma_chan *dmach, dma_addr_t dest, dma_addr_t src,
387 size_t len, unsigned long flags)
389 struct hidma_chan *mchan = to_hidma_chan(dmach);
390 struct hidma_desc *mdesc = NULL;
391 struct hidma_dev *mdma = mchan->dmadev;
392 unsigned long irqflags;
394 /* Get free descriptor */
395 spin_lock_irqsave(&mchan->lock, irqflags);
396 if (!list_empty(&mchan->free)) {
397 mdesc = list_first_entry(&mchan->free, struct hidma_desc, node);
398 list_del(&mdesc->node);
400 spin_unlock_irqrestore(&mchan->lock, irqflags);
402 if (!mdesc)
403 return NULL;
405 hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch,
406 src, dest, len, flags);
408 /* Place descriptor in prepared list */
409 spin_lock_irqsave(&mchan->lock, irqflags);
410 list_add_tail(&mdesc->node, &mchan->prepared);
411 spin_unlock_irqrestore(&mchan->lock, irqflags);
413 return &mdesc->desc;
416 static int hidma_terminate_channel(struct dma_chan *chan)
418 struct hidma_chan *mchan = to_hidma_chan(chan);
419 struct hidma_dev *dmadev = to_hidma_dev(mchan->chan.device);
420 struct hidma_desc *tmp, *mdesc;
421 unsigned long irqflags;
422 LIST_HEAD(list);
423 int rc;
425 pm_runtime_get_sync(dmadev->ddev.dev);
426 /* give completed requests a chance to finish */
427 hidma_process_completed(mchan);
429 spin_lock_irqsave(&mchan->lock, irqflags);
430 mchan->last_success = 0;
431 list_splice_init(&mchan->active, &list);
432 list_splice_init(&mchan->prepared, &list);
433 list_splice_init(&mchan->completed, &list);
434 spin_unlock_irqrestore(&mchan->lock, irqflags);
436 /* this suspends the existing transfer */
437 rc = hidma_ll_disable(dmadev->lldev);
438 if (rc) {
439 dev_err(dmadev->ddev.dev, "channel did not pause\n");
440 goto out;
443 /* return all user requests */
444 list_for_each_entry_safe(mdesc, tmp, &list, node) {
445 struct dma_async_tx_descriptor *txd = &mdesc->desc;
447 dma_descriptor_unmap(txd);
448 dmaengine_desc_get_callback_invoke(txd, NULL);
449 dma_run_dependencies(txd);
451 /* move myself to free_list */
452 list_move(&mdesc->node, &mchan->free);
455 rc = hidma_ll_enable(dmadev->lldev);
456 out:
457 pm_runtime_mark_last_busy(dmadev->ddev.dev);
458 pm_runtime_put_autosuspend(dmadev->ddev.dev);
459 return rc;
462 static int hidma_terminate_all(struct dma_chan *chan)
464 struct hidma_chan *mchan = to_hidma_chan(chan);
465 struct hidma_dev *dmadev = to_hidma_dev(mchan->chan.device);
466 int rc;
468 rc = hidma_terminate_channel(chan);
469 if (rc)
470 return rc;
472 /* reinitialize the hardware */
473 pm_runtime_get_sync(dmadev->ddev.dev);
474 rc = hidma_ll_setup(dmadev->lldev);
475 pm_runtime_mark_last_busy(dmadev->ddev.dev);
476 pm_runtime_put_autosuspend(dmadev->ddev.dev);
477 return rc;
480 static void hidma_free_chan_resources(struct dma_chan *dmach)
482 struct hidma_chan *mchan = to_hidma_chan(dmach);
483 struct hidma_dev *mdma = mchan->dmadev;
484 struct hidma_desc *mdesc, *tmp;
485 unsigned long irqflags;
486 LIST_HEAD(descs);
488 /* terminate running transactions and free descriptors */
489 hidma_terminate_channel(dmach);
491 spin_lock_irqsave(&mchan->lock, irqflags);
493 /* Move data */
494 list_splice_tail_init(&mchan->free, &descs);
496 /* Free descriptors */
497 list_for_each_entry_safe(mdesc, tmp, &descs, node) {
498 hidma_ll_free(mdma->lldev, mdesc->tre_ch);
499 list_del(&mdesc->node);
500 kfree(mdesc);
503 mchan->allocated = 0;
504 spin_unlock_irqrestore(&mchan->lock, irqflags);
507 static int hidma_pause(struct dma_chan *chan)
509 struct hidma_chan *mchan;
510 struct hidma_dev *dmadev;
512 mchan = to_hidma_chan(chan);
513 dmadev = to_hidma_dev(mchan->chan.device);
514 if (!mchan->paused) {
515 pm_runtime_get_sync(dmadev->ddev.dev);
516 if (hidma_ll_disable(dmadev->lldev))
517 dev_warn(dmadev->ddev.dev, "channel did not stop\n");
518 mchan->paused = true;
519 pm_runtime_mark_last_busy(dmadev->ddev.dev);
520 pm_runtime_put_autosuspend(dmadev->ddev.dev);
522 return 0;
525 static int hidma_resume(struct dma_chan *chan)
527 struct hidma_chan *mchan;
528 struct hidma_dev *dmadev;
529 int rc = 0;
531 mchan = to_hidma_chan(chan);
532 dmadev = to_hidma_dev(mchan->chan.device);
533 if (mchan->paused) {
534 pm_runtime_get_sync(dmadev->ddev.dev);
535 rc = hidma_ll_enable(dmadev->lldev);
536 if (!rc)
537 mchan->paused = false;
538 else
539 dev_err(dmadev->ddev.dev,
540 "failed to resume the channel");
541 pm_runtime_mark_last_busy(dmadev->ddev.dev);
542 pm_runtime_put_autosuspend(dmadev->ddev.dev);
544 return rc;
547 static irqreturn_t hidma_chirq_handler(int chirq, void *arg)
549 struct hidma_lldev *lldev = arg;
552 * All interrupts are request driven.
553 * HW doesn't send an interrupt by itself.
555 return hidma_ll_inthandler(chirq, lldev);
558 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
559 static irqreturn_t hidma_chirq_handler_msi(int chirq, void *arg)
561 struct hidma_lldev **lldevp = arg;
562 struct hidma_dev *dmadev = to_hidma_dev_from_lldev(lldevp);
564 return hidma_ll_inthandler_msi(chirq, *lldevp,
565 1 << (chirq - dmadev->msi_virqbase));
567 #endif
569 static ssize_t hidma_show_values(struct device *dev,
570 struct device_attribute *attr, char *buf)
572 struct platform_device *pdev = to_platform_device(dev);
573 struct hidma_dev *mdev = platform_get_drvdata(pdev);
575 buf[0] = 0;
577 if (strcmp(attr->attr.name, "chid") == 0)
578 sprintf(buf, "%d\n", mdev->chidx);
580 return strlen(buf);
583 static inline void hidma_sysfs_uninit(struct hidma_dev *dev)
585 device_remove_file(dev->ddev.dev, dev->chid_attrs);
588 static struct device_attribute*
589 hidma_create_sysfs_entry(struct hidma_dev *dev, char *name, int mode)
591 struct device_attribute *attrs;
592 char *name_copy;
594 attrs = devm_kmalloc(dev->ddev.dev, sizeof(struct device_attribute),
595 GFP_KERNEL);
596 if (!attrs)
597 return NULL;
599 name_copy = devm_kstrdup(dev->ddev.dev, name, GFP_KERNEL);
600 if (!name_copy)
601 return NULL;
603 attrs->attr.name = name_copy;
604 attrs->attr.mode = mode;
605 attrs->show = hidma_show_values;
606 sysfs_attr_init(&attrs->attr);
608 return attrs;
611 static int hidma_sysfs_init(struct hidma_dev *dev)
613 dev->chid_attrs = hidma_create_sysfs_entry(dev, "chid", S_IRUGO);
614 if (!dev->chid_attrs)
615 return -ENOMEM;
617 return device_create_file(dev->ddev.dev, dev->chid_attrs);
620 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
621 static void hidma_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
623 struct device *dev = msi_desc_to_dev(desc);
624 struct hidma_dev *dmadev = dev_get_drvdata(dev);
626 if (!desc->platform.msi_index) {
627 writel(msg->address_lo, dmadev->dev_evca + 0x118);
628 writel(msg->address_hi, dmadev->dev_evca + 0x11C);
629 writel(msg->data, dmadev->dev_evca + 0x120);
632 #endif
634 static void hidma_free_msis(struct hidma_dev *dmadev)
636 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
637 struct device *dev = dmadev->ddev.dev;
638 struct msi_desc *desc;
640 /* free allocated MSI interrupts above */
641 for_each_msi_entry(desc, dev)
642 devm_free_irq(dev, desc->irq, &dmadev->lldev);
644 platform_msi_domain_free_irqs(dev);
645 #endif
648 static int hidma_request_msi(struct hidma_dev *dmadev,
649 struct platform_device *pdev)
651 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
652 int rc;
653 struct msi_desc *desc;
654 struct msi_desc *failed_desc = NULL;
656 rc = platform_msi_domain_alloc_irqs(&pdev->dev, HIDMA_MSI_INTS,
657 hidma_write_msi_msg);
658 if (rc)
659 return rc;
661 for_each_msi_entry(desc, &pdev->dev) {
662 if (!desc->platform.msi_index)
663 dmadev->msi_virqbase = desc->irq;
665 rc = devm_request_irq(&pdev->dev, desc->irq,
666 hidma_chirq_handler_msi,
667 0, "qcom-hidma-msi",
668 &dmadev->lldev);
669 if (rc) {
670 failed_desc = desc;
671 break;
675 if (rc) {
676 /* free allocated MSI interrupts above */
677 for_each_msi_entry(desc, &pdev->dev) {
678 if (desc == failed_desc)
679 break;
680 devm_free_irq(&pdev->dev, desc->irq,
681 &dmadev->lldev);
683 } else {
684 /* Add callback to free MSIs on teardown */
685 hidma_ll_setup_irq(dmadev->lldev, true);
688 if (rc)
689 dev_warn(&pdev->dev,
690 "failed to request MSI irq, falling back to wired IRQ\n");
691 return rc;
692 #else
693 return -EINVAL;
694 #endif
697 static bool hidma_msi_capable(struct device *dev)
699 struct acpi_device *adev = ACPI_COMPANION(dev);
700 const char *of_compat;
701 int ret = -EINVAL;
703 if (!adev || acpi_disabled) {
704 ret = device_property_read_string(dev, "compatible",
705 &of_compat);
706 if (ret)
707 return false;
709 ret = strcmp(of_compat, "qcom,hidma-1.1");
710 } else {
711 #ifdef CONFIG_ACPI
712 ret = strcmp(acpi_device_hid(adev), "QCOM8062");
713 #endif
715 return ret == 0;
718 static int hidma_probe(struct platform_device *pdev)
720 struct hidma_dev *dmadev;
721 struct resource *trca_resource;
722 struct resource *evca_resource;
723 int chirq;
724 void __iomem *evca;
725 void __iomem *trca;
726 int rc;
727 bool msi;
729 pm_runtime_set_autosuspend_delay(&pdev->dev, HIDMA_AUTOSUSPEND_TIMEOUT);
730 pm_runtime_use_autosuspend(&pdev->dev);
731 pm_runtime_set_active(&pdev->dev);
732 pm_runtime_enable(&pdev->dev);
734 trca_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
735 trca = devm_ioremap_resource(&pdev->dev, trca_resource);
736 if (IS_ERR(trca)) {
737 rc = -ENOMEM;
738 goto bailout;
741 evca_resource = platform_get_resource(pdev, IORESOURCE_MEM, 1);
742 evca = devm_ioremap_resource(&pdev->dev, evca_resource);
743 if (IS_ERR(evca)) {
744 rc = -ENOMEM;
745 goto bailout;
749 * This driver only handles the channel IRQs.
750 * Common IRQ is handled by the management driver.
752 chirq = platform_get_irq(pdev, 0);
753 if (chirq < 0) {
754 rc = -ENODEV;
755 goto bailout;
758 dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev), GFP_KERNEL);
759 if (!dmadev) {
760 rc = -ENOMEM;
761 goto bailout;
764 INIT_LIST_HEAD(&dmadev->ddev.channels);
765 spin_lock_init(&dmadev->lock);
766 dmadev->ddev.dev = &pdev->dev;
767 pm_runtime_get_sync(dmadev->ddev.dev);
769 dma_cap_set(DMA_MEMCPY, dmadev->ddev.cap_mask);
770 if (WARN_ON(!pdev->dev.dma_mask)) {
771 rc = -ENXIO;
772 goto dmafree;
775 dmadev->dev_evca = evca;
776 dmadev->evca_resource = evca_resource;
777 dmadev->dev_trca = trca;
778 dmadev->trca_resource = trca_resource;
779 dmadev->ddev.device_prep_dma_memcpy = hidma_prep_dma_memcpy;
780 dmadev->ddev.device_alloc_chan_resources = hidma_alloc_chan_resources;
781 dmadev->ddev.device_free_chan_resources = hidma_free_chan_resources;
782 dmadev->ddev.device_tx_status = hidma_tx_status;
783 dmadev->ddev.device_issue_pending = hidma_issue_pending;
784 dmadev->ddev.device_pause = hidma_pause;
785 dmadev->ddev.device_resume = hidma_resume;
786 dmadev->ddev.device_terminate_all = hidma_terminate_all;
787 dmadev->ddev.copy_align = 8;
790 * Determine the MSI capability of the platform. Old HW doesn't
791 * support MSI.
793 msi = hidma_msi_capable(&pdev->dev);
795 device_property_read_u32(&pdev->dev, "desc-count",
796 &dmadev->nr_descriptors);
798 if (!dmadev->nr_descriptors && nr_desc_prm)
799 dmadev->nr_descriptors = nr_desc_prm;
801 if (!dmadev->nr_descriptors)
802 dmadev->nr_descriptors = HIDMA_NR_DEFAULT_DESC;
804 dmadev->chidx = readl(dmadev->dev_trca + 0x28);
806 /* Set DMA mask to 64 bits. */
807 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
808 if (rc) {
809 dev_warn(&pdev->dev, "unable to set coherent mask to 64");
810 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
811 if (rc)
812 goto dmafree;
815 dmadev->lldev = hidma_ll_init(dmadev->ddev.dev,
816 dmadev->nr_descriptors, dmadev->dev_trca,
817 dmadev->dev_evca, dmadev->chidx);
818 if (!dmadev->lldev) {
819 rc = -EPROBE_DEFER;
820 goto dmafree;
823 platform_set_drvdata(pdev, dmadev);
824 if (msi)
825 rc = hidma_request_msi(dmadev, pdev);
827 if (!msi || rc) {
828 hidma_ll_setup_irq(dmadev->lldev, false);
829 rc = devm_request_irq(&pdev->dev, chirq, hidma_chirq_handler,
830 0, "qcom-hidma", dmadev->lldev);
831 if (rc)
832 goto uninit;
835 INIT_LIST_HEAD(&dmadev->ddev.channels);
836 rc = hidma_chan_init(dmadev, 0);
837 if (rc)
838 goto uninit;
840 rc = dma_async_device_register(&dmadev->ddev);
841 if (rc)
842 goto uninit;
844 dmadev->irq = chirq;
845 tasklet_init(&dmadev->task, hidma_issue_task, (unsigned long)dmadev);
846 hidma_debug_init(dmadev);
847 hidma_sysfs_init(dmadev);
848 dev_info(&pdev->dev, "HI-DMA engine driver registration complete\n");
849 pm_runtime_mark_last_busy(dmadev->ddev.dev);
850 pm_runtime_put_autosuspend(dmadev->ddev.dev);
851 return 0;
853 uninit:
854 if (msi)
855 hidma_free_msis(dmadev);
857 hidma_debug_uninit(dmadev);
858 hidma_ll_uninit(dmadev->lldev);
859 dmafree:
860 if (dmadev)
861 hidma_free(dmadev);
862 bailout:
863 pm_runtime_put_sync(&pdev->dev);
864 pm_runtime_disable(&pdev->dev);
865 return rc;
868 static int hidma_remove(struct platform_device *pdev)
870 struct hidma_dev *dmadev = platform_get_drvdata(pdev);
872 pm_runtime_get_sync(dmadev->ddev.dev);
873 dma_async_device_unregister(&dmadev->ddev);
874 if (!dmadev->lldev->msi_support)
875 devm_free_irq(dmadev->ddev.dev, dmadev->irq, dmadev->lldev);
876 else
877 hidma_free_msis(dmadev);
879 tasklet_kill(&dmadev->task);
880 hidma_sysfs_uninit(dmadev);
881 hidma_debug_uninit(dmadev);
882 hidma_ll_uninit(dmadev->lldev);
883 hidma_free(dmadev);
885 dev_info(&pdev->dev, "HI-DMA engine removed\n");
886 pm_runtime_put_sync_suspend(&pdev->dev);
887 pm_runtime_disable(&pdev->dev);
889 return 0;
892 #if IS_ENABLED(CONFIG_ACPI)
893 static const struct acpi_device_id hidma_acpi_ids[] = {
894 {"QCOM8061"},
895 {"QCOM8062"},
898 MODULE_DEVICE_TABLE(acpi, hidma_acpi_ids);
899 #endif
901 static const struct of_device_id hidma_match[] = {
902 {.compatible = "qcom,hidma-1.0",},
903 {.compatible = "qcom,hidma-1.1",},
906 MODULE_DEVICE_TABLE(of, hidma_match);
908 static struct platform_driver hidma_driver = {
909 .probe = hidma_probe,
910 .remove = hidma_remove,
911 .driver = {
912 .name = "hidma",
913 .of_match_table = hidma_match,
914 .acpi_match_table = ACPI_PTR(hidma_acpi_ids),
918 module_platform_driver(hidma_driver);
919 MODULE_LICENSE("GPL v2");