2 * Copyright 2014-2015 Analog Devices Inc.
3 * Author: Lars-Peter Clausen <lars@metafoo.de>
5 * Licensed under the GPL-2 or later.
8 #include <linux/slab.h>
9 #include <linux/kernel.h>
10 #include <linux/dmaengine.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/spinlock.h>
13 #include <linux/err.h>
15 #include <linux/iio/iio.h>
16 #include <linux/iio/buffer.h>
17 #include <linux/iio/buffer_impl.h>
18 #include <linux/iio/buffer-dma.h>
19 #include <linux/iio/buffer-dmaengine.h>
22 * The IIO DMAengine buffer combines the generic IIO DMA buffer infrastructure
23 * with the DMAengine framework. The generic IIO DMA buffer infrastructure is
24 * used to manage the buffer memory and implement the IIO buffer operations
25 * while the DMAengine framework is used to perform the DMA transfers. Combined
26 * this results in a device independent fully functional DMA buffer
27 * implementation that can be used by device drivers for peripherals which are
28 * connected to a DMA controller which has a DMAengine driver implementation.
31 struct dmaengine_buffer
{
32 struct iio_dma_buffer_queue queue
;
34 struct dma_chan
*chan
;
35 struct list_head active
;
41 static struct dmaengine_buffer
*iio_buffer_to_dmaengine_buffer(
42 struct iio_buffer
*buffer
)
44 return container_of(buffer
, struct dmaengine_buffer
, queue
.buffer
);
47 static void iio_dmaengine_buffer_block_done(void *data
)
49 struct iio_dma_buffer_block
*block
= data
;
52 spin_lock_irqsave(&block
->queue
->list_lock
, flags
);
53 list_del(&block
->head
);
54 spin_unlock_irqrestore(&block
->queue
->list_lock
, flags
);
55 iio_dma_buffer_block_done(block
);
58 static int iio_dmaengine_buffer_submit_block(struct iio_dma_buffer_queue
*queue
,
59 struct iio_dma_buffer_block
*block
)
61 struct dmaengine_buffer
*dmaengine_buffer
=
62 iio_buffer_to_dmaengine_buffer(&queue
->buffer
);
63 struct dma_async_tx_descriptor
*desc
;
66 block
->bytes_used
= min(block
->size
, dmaengine_buffer
->max_size
);
67 block
->bytes_used
= rounddown(block
->bytes_used
,
68 dmaengine_buffer
->align
);
70 desc
= dmaengine_prep_slave_single(dmaengine_buffer
->chan
,
71 block
->phys_addr
, block
->bytes_used
, DMA_DEV_TO_MEM
,
76 desc
->callback
= iio_dmaengine_buffer_block_done
;
77 desc
->callback_param
= block
;
79 cookie
= dmaengine_submit(desc
);
80 if (dma_submit_error(cookie
))
81 return dma_submit_error(cookie
);
83 spin_lock_irq(&dmaengine_buffer
->queue
.list_lock
);
84 list_add_tail(&block
->head
, &dmaengine_buffer
->active
);
85 spin_unlock_irq(&dmaengine_buffer
->queue
.list_lock
);
87 dma_async_issue_pending(dmaengine_buffer
->chan
);
92 static void iio_dmaengine_buffer_abort(struct iio_dma_buffer_queue
*queue
)
94 struct dmaengine_buffer
*dmaengine_buffer
=
95 iio_buffer_to_dmaengine_buffer(&queue
->buffer
);
97 dmaengine_terminate_sync(dmaengine_buffer
->chan
);
98 iio_dma_buffer_block_list_abort(queue
, &dmaengine_buffer
->active
);
101 static void iio_dmaengine_buffer_release(struct iio_buffer
*buf
)
103 struct dmaengine_buffer
*dmaengine_buffer
=
104 iio_buffer_to_dmaengine_buffer(buf
);
106 iio_dma_buffer_release(&dmaengine_buffer
->queue
);
107 kfree(dmaengine_buffer
);
110 static const struct iio_buffer_access_funcs iio_dmaengine_buffer_ops
= {
111 .read_first_n
= iio_dma_buffer_read
,
112 .set_bytes_per_datum
= iio_dma_buffer_set_bytes_per_datum
,
113 .set_length
= iio_dma_buffer_set_length
,
114 .request_update
= iio_dma_buffer_request_update
,
115 .enable
= iio_dma_buffer_enable
,
116 .disable
= iio_dma_buffer_disable
,
117 .data_available
= iio_dma_buffer_data_available
,
118 .release
= iio_dmaengine_buffer_release
,
120 .modes
= INDIO_BUFFER_HARDWARE
,
121 .flags
= INDIO_BUFFER_FLAG_FIXED_WATERMARK
,
124 static const struct iio_dma_buffer_ops iio_dmaengine_default_ops
= {
125 .submit
= iio_dmaengine_buffer_submit_block
,
126 .abort
= iio_dmaengine_buffer_abort
,
130 * iio_dmaengine_buffer_alloc() - Allocate new buffer which uses DMAengine
131 * @dev: Parent device for the buffer
132 * @channel: DMA channel name, typically "rx".
134 * This allocates a new IIO buffer which internally uses the DMAengine framework
135 * to perform its transfers. The parent device will be used to request the DMA
138 * Once done using the buffer iio_dmaengine_buffer_free() should be used to
141 struct iio_buffer
*iio_dmaengine_buffer_alloc(struct device
*dev
,
144 struct dmaengine_buffer
*dmaengine_buffer
;
145 unsigned int width
, src_width
, dest_width
;
146 struct dma_slave_caps caps
;
147 struct dma_chan
*chan
;
150 dmaengine_buffer
= kzalloc(sizeof(*dmaengine_buffer
), GFP_KERNEL
);
151 if (!dmaengine_buffer
)
152 return ERR_PTR(-ENOMEM
);
154 chan
= dma_request_slave_channel_reason(dev
, channel
);
160 ret
= dma_get_slave_caps(chan
, &caps
);
164 /* Needs to be aligned to the maximum of the minimums */
165 if (caps
.src_addr_widths
)
166 src_width
= __ffs(caps
.src_addr_widths
);
169 if (caps
.dst_addr_widths
)
170 dest_width
= __ffs(caps
.dst_addr_widths
);
173 width
= max(src_width
, dest_width
);
175 INIT_LIST_HEAD(&dmaengine_buffer
->active
);
176 dmaengine_buffer
->chan
= chan
;
177 dmaengine_buffer
->align
= width
;
178 dmaengine_buffer
->max_size
= dma_get_max_seg_size(chan
->device
->dev
);
180 iio_dma_buffer_init(&dmaengine_buffer
->queue
, chan
->device
->dev
,
181 &iio_dmaengine_default_ops
);
183 dmaengine_buffer
->queue
.buffer
.access
= &iio_dmaengine_buffer_ops
;
185 return &dmaengine_buffer
->queue
.buffer
;
188 kfree(dmaengine_buffer
);
191 EXPORT_SYMBOL(iio_dmaengine_buffer_alloc
);
194 * iio_dmaengine_buffer_free() - Free dmaengine buffer
195 * @buffer: Buffer to free
197 * Frees a buffer previously allocated with iio_dmaengine_buffer_alloc().
199 void iio_dmaengine_buffer_free(struct iio_buffer
*buffer
)
201 struct dmaengine_buffer
*dmaengine_buffer
=
202 iio_buffer_to_dmaengine_buffer(buffer
);
204 iio_dma_buffer_exit(&dmaengine_buffer
->queue
);
205 dma_release_channel(dmaengine_buffer
->chan
);
207 iio_buffer_put(buffer
);
209 EXPORT_SYMBOL_GPL(iio_dmaengine_buffer_free
);