2 * Virtual DMA channel support for DMAengine
4 * Copyright (C) 2012 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #include <linux/device.h>
11 #include <linux/dmaengine.h>
12 #include <linux/module.h>
13 #include <linux/spinlock.h>
17 static struct virt_dma_desc
*to_virt_desc(struct dma_async_tx_descriptor
*tx
)
19 return container_of(tx
, struct virt_dma_desc
, tx
);
22 dma_cookie_t
vchan_tx_submit(struct dma_async_tx_descriptor
*tx
)
24 struct virt_dma_chan
*vc
= to_virt_chan(tx
->chan
);
25 struct virt_dma_desc
*vd
= to_virt_desc(tx
);
29 spin_lock_irqsave(&vc
->lock
, flags
);
30 cookie
= dma_cookie_assign(tx
);
32 list_move_tail(&vd
->node
, &vc
->desc_submitted
);
33 spin_unlock_irqrestore(&vc
->lock
, flags
);
35 dev_dbg(vc
->chan
.device
->dev
, "vchan %p: txd %p[%x]: submitted\n",
40 EXPORT_SYMBOL_GPL(vchan_tx_submit
);
43 * vchan_tx_desc_free - free a reusable descriptor
46 * This function frees a previously allocated reusable descriptor. The only
47 * other way is to clear the DMA_CTRL_REUSE flag and submit one last time the
50 * Returns 0 upon success
52 int vchan_tx_desc_free(struct dma_async_tx_descriptor
*tx
)
54 struct virt_dma_chan
*vc
= to_virt_chan(tx
->chan
);
55 struct virt_dma_desc
*vd
= to_virt_desc(tx
);
58 spin_lock_irqsave(&vc
->lock
, flags
);
60 spin_unlock_irqrestore(&vc
->lock
, flags
);
62 dev_dbg(vc
->chan
.device
->dev
, "vchan %p: txd %p[%x]: freeing\n",
63 vc
, vd
, vd
->tx
.cookie
);
67 EXPORT_SYMBOL_GPL(vchan_tx_desc_free
);
69 struct virt_dma_desc
*vchan_find_desc(struct virt_dma_chan
*vc
,
72 struct virt_dma_desc
*vd
;
74 list_for_each_entry(vd
, &vc
->desc_issued
, node
)
75 if (vd
->tx
.cookie
== cookie
)
80 EXPORT_SYMBOL_GPL(vchan_find_desc
);
83 * This tasklet handles the completion of a DMA descriptor by
84 * calling its callback and freeing it.
86 static void vchan_complete(unsigned long arg
)
88 struct virt_dma_chan
*vc
= (struct virt_dma_chan
*)arg
;
89 struct virt_dma_desc
*vd
, *_vd
;
90 struct dmaengine_desc_callback cb
;
93 spin_lock_irq(&vc
->lock
);
94 list_splice_tail_init(&vc
->desc_completed
, &head
);
98 dmaengine_desc_get_callback(&vd
->tx
, &cb
);
100 memset(&cb
, 0, sizeof(cb
));
102 spin_unlock_irq(&vc
->lock
);
104 dmaengine_desc_callback_invoke(&cb
, NULL
);
106 list_for_each_entry_safe(vd
, _vd
, &head
, node
) {
107 dmaengine_desc_get_callback(&vd
->tx
, &cb
);
110 vchan_vdesc_fini(vd
);
112 dmaengine_desc_callback_invoke(&cb
, NULL
);
116 void vchan_dma_desc_free_list(struct virt_dma_chan
*vc
, struct list_head
*head
)
118 struct virt_dma_desc
*vd
, *_vd
;
120 list_for_each_entry_safe(vd
, _vd
, head
, node
) {
121 if (dmaengine_desc_test_reuse(&vd
->tx
)) {
122 list_move_tail(&vd
->node
, &vc
->desc_allocated
);
124 dev_dbg(vc
->chan
.device
->dev
, "txd %p: freeing\n", vd
);
130 EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list
);
132 void vchan_init(struct virt_dma_chan
*vc
, struct dma_device
*dmadev
)
134 dma_cookie_init(&vc
->chan
);
136 spin_lock_init(&vc
->lock
);
137 INIT_LIST_HEAD(&vc
->desc_allocated
);
138 INIT_LIST_HEAD(&vc
->desc_submitted
);
139 INIT_LIST_HEAD(&vc
->desc_issued
);
140 INIT_LIST_HEAD(&vc
->desc_completed
);
142 tasklet_init(&vc
->task
, vchan_complete
, (unsigned long)vc
);
144 vc
->chan
.device
= dmadev
;
145 list_add_tail(&vc
->chan
.device_node
, &dmadev
->channels
);
147 EXPORT_SYMBOL_GPL(vchan_init
);
149 MODULE_AUTHOR("Russell King");
150 MODULE_LICENSE("GPL");