1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright 2014-2015 Analog Devices Inc.
4 * Author: Lars-Peter Clausen <lars@metafoo.de>
7 #include <linux/slab.h>
8 #include <linux/kernel.h>
9 #include <linux/dmaengine.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/spinlock.h>
12 #include <linux/err.h>
13 #include <linux/module.h>
15 #include <linux/iio/iio.h>
16 #include <linux/iio/sysfs.h>
17 #include <linux/iio/buffer.h>
18 #include <linux/iio/buffer_impl.h>
19 #include <linux/iio/buffer-dma.h>
20 #include <linux/iio/buffer-dmaengine.h>
23 * The IIO DMAengine buffer combines the generic IIO DMA buffer infrastructure
24 * with the DMAengine framework. The generic IIO DMA buffer infrastructure is
25 * used to manage the buffer memory and implement the IIO buffer operations
26 * while the DMAengine framework is used to perform the DMA transfers. Combined
27 * this results in a device independent fully functional DMA buffer
28 * implementation that can be used by device drivers for peripherals which are
29 * connected to a DMA controller which has a DMAengine driver implementation.
32 struct dmaengine_buffer
{
33 struct iio_dma_buffer_queue queue
;
35 struct dma_chan
*chan
;
36 struct list_head active
;
42 static struct dmaengine_buffer
*iio_buffer_to_dmaengine_buffer(
43 struct iio_buffer
*buffer
)
45 return container_of(buffer
, struct dmaengine_buffer
, queue
.buffer
);
48 static void iio_dmaengine_buffer_block_done(void *data
)
50 struct iio_dma_buffer_block
*block
= data
;
53 spin_lock_irqsave(&block
->queue
->list_lock
, flags
);
54 list_del(&block
->head
);
55 spin_unlock_irqrestore(&block
->queue
->list_lock
, flags
);
56 iio_dma_buffer_block_done(block
);
59 static int iio_dmaengine_buffer_submit_block(struct iio_dma_buffer_queue
*queue
,
60 struct iio_dma_buffer_block
*block
)
62 struct dmaengine_buffer
*dmaengine_buffer
=
63 iio_buffer_to_dmaengine_buffer(&queue
->buffer
);
64 struct dma_async_tx_descriptor
*desc
;
67 block
->bytes_used
= min(block
->size
, dmaengine_buffer
->max_size
);
68 block
->bytes_used
= rounddown(block
->bytes_used
,
69 dmaengine_buffer
->align
);
71 desc
= dmaengine_prep_slave_single(dmaengine_buffer
->chan
,
72 block
->phys_addr
, block
->bytes_used
, DMA_DEV_TO_MEM
,
77 desc
->callback
= iio_dmaengine_buffer_block_done
;
78 desc
->callback_param
= block
;
80 cookie
= dmaengine_submit(desc
);
81 if (dma_submit_error(cookie
))
82 return dma_submit_error(cookie
);
84 spin_lock_irq(&dmaengine_buffer
->queue
.list_lock
);
85 list_add_tail(&block
->head
, &dmaengine_buffer
->active
);
86 spin_unlock_irq(&dmaengine_buffer
->queue
.list_lock
);
88 dma_async_issue_pending(dmaengine_buffer
->chan
);
93 static void iio_dmaengine_buffer_abort(struct iio_dma_buffer_queue
*queue
)
95 struct dmaengine_buffer
*dmaengine_buffer
=
96 iio_buffer_to_dmaengine_buffer(&queue
->buffer
);
98 dmaengine_terminate_sync(dmaengine_buffer
->chan
);
99 iio_dma_buffer_block_list_abort(queue
, &dmaengine_buffer
->active
);
102 static void iio_dmaengine_buffer_release(struct iio_buffer
*buf
)
104 struct dmaengine_buffer
*dmaengine_buffer
=
105 iio_buffer_to_dmaengine_buffer(buf
);
107 iio_dma_buffer_release(&dmaengine_buffer
->queue
);
108 kfree(dmaengine_buffer
);
111 static const struct iio_buffer_access_funcs iio_dmaengine_buffer_ops
= {
112 .read
= iio_dma_buffer_read
,
113 .set_bytes_per_datum
= iio_dma_buffer_set_bytes_per_datum
,
114 .set_length
= iio_dma_buffer_set_length
,
115 .request_update
= iio_dma_buffer_request_update
,
116 .enable
= iio_dma_buffer_enable
,
117 .disable
= iio_dma_buffer_disable
,
118 .data_available
= iio_dma_buffer_data_available
,
119 .release
= iio_dmaengine_buffer_release
,
121 .modes
= INDIO_BUFFER_HARDWARE
,
122 .flags
= INDIO_BUFFER_FLAG_FIXED_WATERMARK
,
125 static const struct iio_dma_buffer_ops iio_dmaengine_default_ops
= {
126 .submit
= iio_dmaengine_buffer_submit_block
,
127 .abort
= iio_dmaengine_buffer_abort
,
130 static ssize_t
iio_dmaengine_buffer_get_length_align(struct device
*dev
,
131 struct device_attribute
*attr
, char *buf
)
133 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
134 struct dmaengine_buffer
*dmaengine_buffer
=
135 iio_buffer_to_dmaengine_buffer(indio_dev
->buffer
);
137 return sprintf(buf
, "%u\n", dmaengine_buffer
->align
);
140 static IIO_DEVICE_ATTR(length_align_bytes
, 0444,
141 iio_dmaengine_buffer_get_length_align
, NULL
, 0);
143 static const struct attribute
*iio_dmaengine_buffer_attrs
[] = {
144 &iio_dev_attr_length_align_bytes
.dev_attr
.attr
,
149 * iio_dmaengine_buffer_alloc() - Allocate new buffer which uses DMAengine
150 * @dev: Parent device for the buffer
151 * @channel: DMA channel name, typically "rx".
153 * This allocates a new IIO buffer which internally uses the DMAengine framework
154 * to perform its transfers. The parent device will be used to request the DMA
157 * Once done using the buffer iio_dmaengine_buffer_free() should be used to
160 struct iio_buffer
*iio_dmaengine_buffer_alloc(struct device
*dev
,
163 struct dmaengine_buffer
*dmaengine_buffer
;
164 unsigned int width
, src_width
, dest_width
;
165 struct dma_slave_caps caps
;
166 struct dma_chan
*chan
;
169 dmaengine_buffer
= kzalloc(sizeof(*dmaengine_buffer
), GFP_KERNEL
);
170 if (!dmaengine_buffer
)
171 return ERR_PTR(-ENOMEM
);
173 chan
= dma_request_chan(dev
, channel
);
179 ret
= dma_get_slave_caps(chan
, &caps
);
183 /* Needs to be aligned to the maximum of the minimums */
184 if (caps
.src_addr_widths
)
185 src_width
= __ffs(caps
.src_addr_widths
);
188 if (caps
.dst_addr_widths
)
189 dest_width
= __ffs(caps
.dst_addr_widths
);
192 width
= max(src_width
, dest_width
);
194 INIT_LIST_HEAD(&dmaengine_buffer
->active
);
195 dmaengine_buffer
->chan
= chan
;
196 dmaengine_buffer
->align
= width
;
197 dmaengine_buffer
->max_size
= dma_get_max_seg_size(chan
->device
->dev
);
199 iio_dma_buffer_init(&dmaengine_buffer
->queue
, chan
->device
->dev
,
200 &iio_dmaengine_default_ops
);
201 iio_buffer_set_attrs(&dmaengine_buffer
->queue
.buffer
,
202 iio_dmaengine_buffer_attrs
);
204 dmaengine_buffer
->queue
.buffer
.access
= &iio_dmaengine_buffer_ops
;
206 return &dmaengine_buffer
->queue
.buffer
;
209 kfree(dmaengine_buffer
);
212 EXPORT_SYMBOL(iio_dmaengine_buffer_alloc
);
215 * iio_dmaengine_buffer_free() - Free dmaengine buffer
216 * @buffer: Buffer to free
218 * Frees a buffer previously allocated with iio_dmaengine_buffer_alloc().
220 void iio_dmaengine_buffer_free(struct iio_buffer
*buffer
)
222 struct dmaengine_buffer
*dmaengine_buffer
=
223 iio_buffer_to_dmaengine_buffer(buffer
);
225 iio_dma_buffer_exit(&dmaengine_buffer
->queue
);
226 dma_release_channel(dmaengine_buffer
->chan
);
228 iio_buffer_put(buffer
);
230 EXPORT_SYMBOL_GPL(iio_dmaengine_buffer_free
);
232 MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
233 MODULE_DESCRIPTION("DMA buffer for the IIO framework");
234 MODULE_LICENSE("GPL");