1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright 2014-2015 Analog Devices Inc.
4 * Author: Lars-Peter Clausen <lars@metafoo.de>
7 #include <linux/slab.h>
8 #include <linux/kernel.h>
9 #include <linux/dmaengine.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/spinlock.h>
12 #include <linux/err.h>
13 #include <linux/module.h>
15 #include <linux/iio/iio.h>
16 #include <linux/iio/sysfs.h>
17 #include <linux/iio/buffer.h>
18 #include <linux/iio/buffer_impl.h>
19 #include <linux/iio/buffer-dma.h>
20 #include <linux/iio/buffer-dmaengine.h>
23 * The IIO DMAengine buffer combines the generic IIO DMA buffer infrastructure
24 * with the DMAengine framework. The generic IIO DMA buffer infrastructure is
25 * used to manage the buffer memory and implement the IIO buffer operations
26 * while the DMAengine framework is used to perform the DMA transfers. Combined
27 * this results in a device independent fully functional DMA buffer
28 * implementation that can be used by device drivers for peripherals which are
29 * connected to a DMA controller which has a DMAengine driver implementation.
32 struct dmaengine_buffer
{
33 struct iio_dma_buffer_queue queue
;
35 struct dma_chan
*chan
;
36 struct list_head active
;
42 static struct dmaengine_buffer
*iio_buffer_to_dmaengine_buffer(
43 struct iio_buffer
*buffer
)
45 return container_of(buffer
, struct dmaengine_buffer
, queue
.buffer
);
48 static void iio_dmaengine_buffer_block_done(void *data
,
49 const struct dmaengine_result
*result
)
51 struct iio_dma_buffer_block
*block
= data
;
54 spin_lock_irqsave(&block
->queue
->list_lock
, flags
);
55 list_del(&block
->head
);
56 spin_unlock_irqrestore(&block
->queue
->list_lock
, flags
);
57 block
->bytes_used
-= result
->residue
;
58 iio_dma_buffer_block_done(block
);
61 static int iio_dmaengine_buffer_submit_block(struct iio_dma_buffer_queue
*queue
,
62 struct iio_dma_buffer_block
*block
)
64 struct dmaengine_buffer
*dmaengine_buffer
=
65 iio_buffer_to_dmaengine_buffer(&queue
->buffer
);
66 struct dma_async_tx_descriptor
*desc
;
69 block
->bytes_used
= min(block
->size
, dmaengine_buffer
->max_size
);
70 block
->bytes_used
= rounddown(block
->bytes_used
,
71 dmaengine_buffer
->align
);
73 desc
= dmaengine_prep_slave_single(dmaengine_buffer
->chan
,
74 block
->phys_addr
, block
->bytes_used
, DMA_DEV_TO_MEM
,
79 desc
->callback_result
= iio_dmaengine_buffer_block_done
;
80 desc
->callback_param
= block
;
82 cookie
= dmaengine_submit(desc
);
83 if (dma_submit_error(cookie
))
84 return dma_submit_error(cookie
);
86 spin_lock_irq(&dmaengine_buffer
->queue
.list_lock
);
87 list_add_tail(&block
->head
, &dmaengine_buffer
->active
);
88 spin_unlock_irq(&dmaengine_buffer
->queue
.list_lock
);
90 dma_async_issue_pending(dmaengine_buffer
->chan
);
95 static void iio_dmaengine_buffer_abort(struct iio_dma_buffer_queue
*queue
)
97 struct dmaengine_buffer
*dmaengine_buffer
=
98 iio_buffer_to_dmaengine_buffer(&queue
->buffer
);
100 dmaengine_terminate_sync(dmaengine_buffer
->chan
);
101 iio_dma_buffer_block_list_abort(queue
, &dmaengine_buffer
->active
);
104 static void iio_dmaengine_buffer_release(struct iio_buffer
*buf
)
106 struct dmaengine_buffer
*dmaengine_buffer
=
107 iio_buffer_to_dmaengine_buffer(buf
);
109 iio_dma_buffer_release(&dmaengine_buffer
->queue
);
110 kfree(dmaengine_buffer
);
113 static const struct iio_buffer_access_funcs iio_dmaengine_buffer_ops
= {
114 .read
= iio_dma_buffer_read
,
115 .set_bytes_per_datum
= iio_dma_buffer_set_bytes_per_datum
,
116 .set_length
= iio_dma_buffer_set_length
,
117 .request_update
= iio_dma_buffer_request_update
,
118 .enable
= iio_dma_buffer_enable
,
119 .disable
= iio_dma_buffer_disable
,
120 .data_available
= iio_dma_buffer_data_available
,
121 .release
= iio_dmaengine_buffer_release
,
123 .modes
= INDIO_BUFFER_HARDWARE
,
124 .flags
= INDIO_BUFFER_FLAG_FIXED_WATERMARK
,
127 static const struct iio_dma_buffer_ops iio_dmaengine_default_ops
= {
128 .submit
= iio_dmaengine_buffer_submit_block
,
129 .abort
= iio_dmaengine_buffer_abort
,
132 static ssize_t
iio_dmaengine_buffer_get_length_align(struct device
*dev
,
133 struct device_attribute
*attr
, char *buf
)
135 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
136 struct dmaengine_buffer
*dmaengine_buffer
=
137 iio_buffer_to_dmaengine_buffer(indio_dev
->buffer
);
139 return sprintf(buf
, "%zu\n", dmaengine_buffer
->align
);
142 static IIO_DEVICE_ATTR(length_align_bytes
, 0444,
143 iio_dmaengine_buffer_get_length_align
, NULL
, 0);
145 static const struct attribute
*iio_dmaengine_buffer_attrs
[] = {
146 &iio_dev_attr_length_align_bytes
.dev_attr
.attr
,
151 * iio_dmaengine_buffer_alloc() - Allocate new buffer which uses DMAengine
152 * @dev: Parent device for the buffer
153 * @channel: DMA channel name, typically "rx".
155 * This allocates a new IIO buffer which internally uses the DMAengine framework
156 * to perform its transfers. The parent device will be used to request the DMA
159 * Once done using the buffer iio_dmaengine_buffer_free() should be used to
162 static struct iio_buffer
*iio_dmaengine_buffer_alloc(struct device
*dev
,
165 struct dmaengine_buffer
*dmaengine_buffer
;
166 unsigned int width
, src_width
, dest_width
;
167 struct dma_slave_caps caps
;
168 struct dma_chan
*chan
;
171 dmaengine_buffer
= kzalloc(sizeof(*dmaengine_buffer
), GFP_KERNEL
);
172 if (!dmaengine_buffer
)
173 return ERR_PTR(-ENOMEM
);
175 chan
= dma_request_chan(dev
, channel
);
181 ret
= dma_get_slave_caps(chan
, &caps
);
185 /* Needs to be aligned to the maximum of the minimums */
186 if (caps
.src_addr_widths
)
187 src_width
= __ffs(caps
.src_addr_widths
);
190 if (caps
.dst_addr_widths
)
191 dest_width
= __ffs(caps
.dst_addr_widths
);
194 width
= max(src_width
, dest_width
);
196 INIT_LIST_HEAD(&dmaengine_buffer
->active
);
197 dmaengine_buffer
->chan
= chan
;
198 dmaengine_buffer
->align
= width
;
199 dmaengine_buffer
->max_size
= dma_get_max_seg_size(chan
->device
->dev
);
201 iio_dma_buffer_init(&dmaengine_buffer
->queue
, chan
->device
->dev
,
202 &iio_dmaengine_default_ops
);
204 dmaengine_buffer
->queue
.buffer
.attrs
= iio_dmaengine_buffer_attrs
;
205 dmaengine_buffer
->queue
.buffer
.access
= &iio_dmaengine_buffer_ops
;
207 return &dmaengine_buffer
->queue
.buffer
;
210 kfree(dmaengine_buffer
);
215 * iio_dmaengine_buffer_free() - Free dmaengine buffer
216 * @buffer: Buffer to free
218 * Frees a buffer previously allocated with iio_dmaengine_buffer_alloc().
220 static void iio_dmaengine_buffer_free(struct iio_buffer
*buffer
)
222 struct dmaengine_buffer
*dmaengine_buffer
=
223 iio_buffer_to_dmaengine_buffer(buffer
);
225 iio_dma_buffer_exit(&dmaengine_buffer
->queue
);
226 dma_release_channel(dmaengine_buffer
->chan
);
228 iio_buffer_put(buffer
);
231 static void __devm_iio_dmaengine_buffer_free(struct device
*dev
, void *res
)
233 iio_dmaengine_buffer_free(*(struct iio_buffer
**)res
);
237 * devm_iio_dmaengine_buffer_alloc() - Resource-managed iio_dmaengine_buffer_alloc()
238 * @dev: Parent device for the buffer
239 * @channel: DMA channel name, typically "rx".
241 * This allocates a new IIO buffer which internally uses the DMAengine framework
242 * to perform its transfers. The parent device will be used to request the DMA
245 * The buffer will be automatically de-allocated once the device gets destroyed.
247 struct iio_buffer
*devm_iio_dmaengine_buffer_alloc(struct device
*dev
,
250 struct iio_buffer
**bufferp
, *buffer
;
252 bufferp
= devres_alloc(__devm_iio_dmaengine_buffer_free
,
253 sizeof(*bufferp
), GFP_KERNEL
);
255 return ERR_PTR(-ENOMEM
);
257 buffer
= iio_dmaengine_buffer_alloc(dev
, channel
);
258 if (IS_ERR(buffer
)) {
259 devres_free(bufferp
);
264 devres_add(dev
, bufferp
);
268 EXPORT_SYMBOL_GPL(devm_iio_dmaengine_buffer_alloc
);
270 MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
271 MODULE_DESCRIPTION("DMA buffer for the IIO framework");
272 MODULE_LICENSE("GPL");