2 * Virtual DMA channel support for DMAengine
4 * Copyright (C) 2012 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #include <linux/device.h>
11 #include <linux/dmaengine.h>
12 #include <linux/module.h>
13 #include <linux/spinlock.h>
17 static struct virt_dma_desc
*to_virt_desc(struct dma_async_tx_descriptor
*tx
)
19 return container_of(tx
, struct virt_dma_desc
, tx
);
22 dma_cookie_t
vchan_tx_submit(struct dma_async_tx_descriptor
*tx
)
24 struct virt_dma_chan
*vc
= to_virt_chan(tx
->chan
);
25 struct virt_dma_desc
*vd
= to_virt_desc(tx
);
29 spin_lock_irqsave(&vc
->lock
, flags
);
30 cookie
= dma_cookie_assign(tx
);
32 list_move_tail(&vd
->node
, &vc
->desc_submitted
);
33 spin_unlock_irqrestore(&vc
->lock
, flags
);
35 dev_dbg(vc
->chan
.device
->dev
, "vchan %p: txd %p[%x]: submitted\n",
40 EXPORT_SYMBOL_GPL(vchan_tx_submit
);
43 * vchan_tx_desc_free - free a reusable descriptor
46 * This function frees a previously allocated reusable descriptor. The only
47 * other way is to clear the DMA_CTRL_REUSE flag and submit one last time the
50 * Returns 0 upon success
52 int vchan_tx_desc_free(struct dma_async_tx_descriptor
*tx
)
54 struct virt_dma_chan
*vc
= to_virt_chan(tx
->chan
);
55 struct virt_dma_desc
*vd
= to_virt_desc(tx
);
58 spin_lock_irqsave(&vc
->lock
, flags
);
60 spin_unlock_irqrestore(&vc
->lock
, flags
);
62 dev_dbg(vc
->chan
.device
->dev
, "vchan %p: txd %p[%x]: freeing\n",
63 vc
, vd
, vd
->tx
.cookie
);
67 EXPORT_SYMBOL_GPL(vchan_tx_desc_free
);
69 struct virt_dma_desc
*vchan_find_desc(struct virt_dma_chan
*vc
,
72 struct virt_dma_desc
*vd
;
74 list_for_each_entry(vd
, &vc
->desc_issued
, node
)
75 if (vd
->tx
.cookie
== cookie
)
80 EXPORT_SYMBOL_GPL(vchan_find_desc
);
83 * This tasklet handles the completion of a DMA descriptor by
84 * calling its callback and freeing it.
86 static void vchan_complete(unsigned long arg
)
88 struct virt_dma_chan
*vc
= (struct virt_dma_chan
*)arg
;
89 struct virt_dma_desc
*vd
;
90 struct dmaengine_desc_callback cb
;
93 spin_lock_irq(&vc
->lock
);
94 list_splice_tail_init(&vc
->desc_completed
, &head
);
98 dmaengine_desc_get_callback(&vd
->tx
, &cb
);
100 memset(&cb
, 0, sizeof(cb
));
102 spin_unlock_irq(&vc
->lock
);
104 dmaengine_desc_callback_invoke(&cb
, NULL
);
106 while (!list_empty(&head
)) {
107 vd
= list_first_entry(&head
, struct virt_dma_desc
, node
);
108 dmaengine_desc_get_callback(&vd
->tx
, &cb
);
111 if (dmaengine_desc_test_reuse(&vd
->tx
))
112 list_add(&vd
->node
, &vc
->desc_allocated
);
116 dmaengine_desc_callback_invoke(&cb
, NULL
);
120 void vchan_dma_desc_free_list(struct virt_dma_chan
*vc
, struct list_head
*head
)
122 while (!list_empty(head
)) {
123 struct virt_dma_desc
*vd
= list_first_entry(head
,
124 struct virt_dma_desc
, node
);
125 if (dmaengine_desc_test_reuse(&vd
->tx
)) {
126 list_move_tail(&vd
->node
, &vc
->desc_allocated
);
128 dev_dbg(vc
->chan
.device
->dev
, "txd %p: freeing\n", vd
);
134 EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list
);
136 void vchan_init(struct virt_dma_chan
*vc
, struct dma_device
*dmadev
)
138 dma_cookie_init(&vc
->chan
);
140 spin_lock_init(&vc
->lock
);
141 INIT_LIST_HEAD(&vc
->desc_allocated
);
142 INIT_LIST_HEAD(&vc
->desc_submitted
);
143 INIT_LIST_HEAD(&vc
->desc_issued
);
144 INIT_LIST_HEAD(&vc
->desc_completed
);
146 tasklet_init(&vc
->task
, vchan_complete
, (unsigned long)vc
);
148 vc
->chan
.device
= dmadev
;
149 list_add_tail(&vc
->chan
.device_node
, &dmadev
->channels
);
151 EXPORT_SYMBOL_GPL(vchan_init
);
153 MODULE_AUTHOR("Russell King");
154 MODULE_LICENSE("GPL");