1 // SPDX-License-Identifier: GPL-2.0-only
3 * Virtual DMA channel support for DMAengine
5 * Copyright (C) 2012 Russell King
7 #include <linux/device.h>
8 #include <linux/dmaengine.h>
9 #include <linux/module.h>
10 #include <linux/spinlock.h>
14 static struct virt_dma_desc
*to_virt_desc(struct dma_async_tx_descriptor
*tx
)
16 return container_of(tx
, struct virt_dma_desc
, tx
);
19 dma_cookie_t
vchan_tx_submit(struct dma_async_tx_descriptor
*tx
)
21 struct virt_dma_chan
*vc
= to_virt_chan(tx
->chan
);
22 struct virt_dma_desc
*vd
= to_virt_desc(tx
);
26 spin_lock_irqsave(&vc
->lock
, flags
);
27 cookie
= dma_cookie_assign(tx
);
29 list_move_tail(&vd
->node
, &vc
->desc_submitted
);
30 spin_unlock_irqrestore(&vc
->lock
, flags
);
32 dev_dbg(vc
->chan
.device
->dev
, "vchan %p: txd %p[%x]: submitted\n",
37 EXPORT_SYMBOL_GPL(vchan_tx_submit
);
40 * vchan_tx_desc_free - free a reusable descriptor
43 * This function frees a previously allocated reusable descriptor. The only
44 * other way is to clear the DMA_CTRL_REUSE flag and submit one last time the
47 * Returns 0 upon success
49 int vchan_tx_desc_free(struct dma_async_tx_descriptor
*tx
)
51 struct virt_dma_chan
*vc
= to_virt_chan(tx
->chan
);
52 struct virt_dma_desc
*vd
= to_virt_desc(tx
);
55 spin_lock_irqsave(&vc
->lock
, flags
);
57 spin_unlock_irqrestore(&vc
->lock
, flags
);
59 dev_dbg(vc
->chan
.device
->dev
, "vchan %p: txd %p[%x]: freeing\n",
60 vc
, vd
, vd
->tx
.cookie
);
64 EXPORT_SYMBOL_GPL(vchan_tx_desc_free
);
66 struct virt_dma_desc
*vchan_find_desc(struct virt_dma_chan
*vc
,
69 struct virt_dma_desc
*vd
;
71 list_for_each_entry(vd
, &vc
->desc_issued
, node
)
72 if (vd
->tx
.cookie
== cookie
)
77 EXPORT_SYMBOL_GPL(vchan_find_desc
);
80 * This tasklet handles the completion of a DMA descriptor by
81 * calling its callback and freeing it.
83 static void vchan_complete(struct tasklet_struct
*t
)
85 struct virt_dma_chan
*vc
= from_tasklet(vc
, t
, task
);
86 struct virt_dma_desc
*vd
, *_vd
;
87 struct dmaengine_desc_callback cb
;
90 spin_lock_irq(&vc
->lock
);
91 list_splice_tail_init(&vc
->desc_completed
, &head
);
95 dmaengine_desc_get_callback(&vd
->tx
, &cb
);
97 memset(&cb
, 0, sizeof(cb
));
99 spin_unlock_irq(&vc
->lock
);
101 dmaengine_desc_callback_invoke(&cb
, &vd
->tx_result
);
103 list_for_each_entry_safe(vd
, _vd
, &head
, node
) {
104 dmaengine_desc_get_callback(&vd
->tx
, &cb
);
107 dmaengine_desc_callback_invoke(&cb
, &vd
->tx_result
);
108 vchan_vdesc_fini(vd
);
112 void vchan_dma_desc_free_list(struct virt_dma_chan
*vc
, struct list_head
*head
)
114 struct virt_dma_desc
*vd
, *_vd
;
116 list_for_each_entry_safe(vd
, _vd
, head
, node
) {
118 vchan_vdesc_fini(vd
);
121 EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list
);
123 void vchan_init(struct virt_dma_chan
*vc
, struct dma_device
*dmadev
)
125 dma_cookie_init(&vc
->chan
);
127 spin_lock_init(&vc
->lock
);
128 INIT_LIST_HEAD(&vc
->desc_allocated
);
129 INIT_LIST_HEAD(&vc
->desc_submitted
);
130 INIT_LIST_HEAD(&vc
->desc_issued
);
131 INIT_LIST_HEAD(&vc
->desc_completed
);
132 INIT_LIST_HEAD(&vc
->desc_terminated
);
134 tasklet_setup(&vc
->task
, vchan_complete
);
136 vc
->chan
.device
= dmadev
;
137 list_add_tail(&vc
->chan
.device_node
, &dmadev
->channels
);
139 EXPORT_SYMBOL_GPL(vchan_init
);
141 MODULE_AUTHOR("Russell King");
142 MODULE_LICENSE("GPL");