1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright 2013-2015 Analog Devices Inc.
4 * Author: Lars-Peter Clausen <lars@metafoo.de>
7 #include <linux/slab.h>
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/device.h>
11 #include <linux/workqueue.h>
12 #include <linux/mutex.h>
13 #include <linux/sched.h>
14 #include <linux/poll.h>
15 #include <linux/iio/buffer_impl.h>
16 #include <linux/iio/buffer-dma.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/sizes.h>
21 * For DMA buffers the storage is sub-divided into so called blocks. Each block
22 * has its own memory buffer. The size of the block is the granularity at which
23 * memory is exchanged between the hardware and the application. Increasing the
24 * basic unit of data exchange from one sample to one block decreases the
25 * management overhead that is associated with each sample. E.g. if we say the
26 * management overhead for one exchange is x and the unit of exchange is one
27 * sample the overhead will be x for each sample. Whereas when using a block
28 * which contains n samples the overhead per sample is reduced to x/n. This
29 * allows to achieve much higher samplerates than what can be sustained with
30 * the one sample approach.
32 * Blocks are exchanged between the DMA controller and the application via the
33 * means of two queues. The incoming queue and the outgoing queue. Blocks on the
34 * incoming queue are waiting for the DMA controller to pick them up and fill
35 * them with data. Block on the outgoing queue have been filled with data and
36 * are waiting for the application to dequeue them and read the data.
38 * A block can be in one of the following states:
39 * * Owned by the application. In this state the application can read data from
41 * * On the incoming list: Blocks on the incoming list are queued up to be
42 * processed by the DMA controller.
43 * * Owned by the DMA controller: The DMA controller is processing the block
44 * and filling it with data.
45 * * On the outgoing list: Blocks on the outgoing list have been successfully
46 * processed by the DMA controller and contain data. They can be dequeued by
48 * * Dead: A block that is dead has been marked as to be freed. It might still
49 * be owned by either the application or the DMA controller at the moment.
50 * But once they are done processing it instead of going to either the
51 * incoming or outgoing queue the block will be freed.
53 * In addition to this blocks are reference counted and the memory associated
54 * with both the block structure as well as the storage memory for the block
55 * will be freed when the last reference to the block is dropped. This means a
56 * block must not be accessed without holding a reference.
58 * The iio_dma_buffer implementation provides a generic infrastructure for
59 * managing the blocks.
61 * A driver for a specific piece of hardware that has DMA capabilities need to
62 * implement the submit() callback from the iio_dma_buffer_ops structure. This
63 * callback is supposed to initiate the DMA transfer copying data from the
64 * converter to the memory region of the block. Once the DMA transfer has been
65 * completed the driver must call iio_dma_buffer_block_done() for the completed
68 * Prior to this it must set the bytes_used field of the block contains
69 * the actual number of bytes in the buffer. Typically this will be equal to the
70 * size of the block, but if the DMA hardware has certain alignment requirements
71 * for the transfer length it might choose to use less than the full size. In
72 * either case it is expected that bytes_used is a multiple of the bytes per
73 * datum, i.e. the block must not contain partial samples.
75 * The driver must call iio_dma_buffer_block_done() for each block it has
76 * received through its submit_block() callback, even if it does not actually
77 * perform a DMA transfer for the block, e.g. because the buffer was disabled
78 * before the block transfer was started. In this case it should set bytes_used
81 * In addition it is recommended that a driver implements the abort() callback.
82 * It will be called when the buffer is disabled and can be used to cancel
83 * pending and stop active transfers.
85 * The specific driver implementation should use the default callback
86 * implementations provided by this module for the iio_buffer_access_funcs
87 * struct. It may overload some callbacks with custom variants if the hardware
88 * has special requirements that are not handled by the generic functions. If a
89 * driver chooses to overload a callback it has to ensure that the generic
90 * callback is called from within the custom callback.
93 static void iio_buffer_block_release(struct kref
*kref
)
95 struct iio_dma_buffer_block
*block
= container_of(kref
,
96 struct iio_dma_buffer_block
, kref
);
98 WARN_ON(block
->state
!= IIO_BLOCK_STATE_DEAD
);
100 dma_free_coherent(block
->queue
->dev
, PAGE_ALIGN(block
->size
),
101 block
->vaddr
, block
->phys_addr
);
103 iio_buffer_put(&block
->queue
->buffer
);
107 static void iio_buffer_block_get(struct iio_dma_buffer_block
*block
)
109 kref_get(&block
->kref
);
112 static void iio_buffer_block_put(struct iio_dma_buffer_block
*block
)
114 kref_put(&block
->kref
, iio_buffer_block_release
);
118 * dma_free_coherent can sleep, hence we need to take some special care to be
119 * able to drop a reference from an atomic context.
121 static LIST_HEAD(iio_dma_buffer_dead_blocks
);
122 static DEFINE_SPINLOCK(iio_dma_buffer_dead_blocks_lock
);
124 static void iio_dma_buffer_cleanup_worker(struct work_struct
*work
)
126 struct iio_dma_buffer_block
*block
, *_block
;
127 LIST_HEAD(block_list
);
129 spin_lock_irq(&iio_dma_buffer_dead_blocks_lock
);
130 list_splice_tail_init(&iio_dma_buffer_dead_blocks
, &block_list
);
131 spin_unlock_irq(&iio_dma_buffer_dead_blocks_lock
);
133 list_for_each_entry_safe(block
, _block
, &block_list
, head
)
134 iio_buffer_block_release(&block
->kref
);
136 static DECLARE_WORK(iio_dma_buffer_cleanup_work
, iio_dma_buffer_cleanup_worker
);
138 static void iio_buffer_block_release_atomic(struct kref
*kref
)
140 struct iio_dma_buffer_block
*block
;
143 block
= container_of(kref
, struct iio_dma_buffer_block
, kref
);
145 spin_lock_irqsave(&iio_dma_buffer_dead_blocks_lock
, flags
);
146 list_add_tail(&block
->head
, &iio_dma_buffer_dead_blocks
);
147 spin_unlock_irqrestore(&iio_dma_buffer_dead_blocks_lock
, flags
);
149 schedule_work(&iio_dma_buffer_cleanup_work
);
153 * Version of iio_buffer_block_put() that can be called from atomic context
155 static void iio_buffer_block_put_atomic(struct iio_dma_buffer_block
*block
)
157 kref_put(&block
->kref
, iio_buffer_block_release_atomic
);
160 static struct iio_dma_buffer_queue
*iio_buffer_to_queue(struct iio_buffer
*buf
)
162 return container_of(buf
, struct iio_dma_buffer_queue
, buffer
);
165 static struct iio_dma_buffer_block
*iio_dma_buffer_alloc_block(
166 struct iio_dma_buffer_queue
*queue
, size_t size
)
168 struct iio_dma_buffer_block
*block
;
170 block
= kzalloc(sizeof(*block
), GFP_KERNEL
);
174 block
->vaddr
= dma_alloc_coherent(queue
->dev
, PAGE_ALIGN(size
),
175 &block
->phys_addr
, GFP_KERNEL
);
182 block
->state
= IIO_BLOCK_STATE_DEQUEUED
;
183 block
->queue
= queue
;
184 INIT_LIST_HEAD(&block
->head
);
185 kref_init(&block
->kref
);
187 iio_buffer_get(&queue
->buffer
);
192 static void _iio_dma_buffer_block_done(struct iio_dma_buffer_block
*block
)
194 struct iio_dma_buffer_queue
*queue
= block
->queue
;
197 * The buffer has already been freed by the application, just drop the
200 if (block
->state
!= IIO_BLOCK_STATE_DEAD
) {
201 block
->state
= IIO_BLOCK_STATE_DONE
;
202 list_add_tail(&block
->head
, &queue
->outgoing
);
207 * iio_dma_buffer_block_done() - Indicate that a block has been completed
208 * @block: The completed block
210 * Should be called when the DMA controller has finished handling the block to
211 * pass back ownership of the block to the queue.
213 void iio_dma_buffer_block_done(struct iio_dma_buffer_block
*block
)
215 struct iio_dma_buffer_queue
*queue
= block
->queue
;
218 spin_lock_irqsave(&queue
->list_lock
, flags
);
219 _iio_dma_buffer_block_done(block
);
220 spin_unlock_irqrestore(&queue
->list_lock
, flags
);
222 iio_buffer_block_put_atomic(block
);
223 wake_up_interruptible_poll(&queue
->buffer
.pollq
, EPOLLIN
| EPOLLRDNORM
);
225 EXPORT_SYMBOL_GPL(iio_dma_buffer_block_done
);
228 * iio_dma_buffer_block_list_abort() - Indicate that a list block has been
230 * @queue: Queue for which to complete blocks.
231 * @list: List of aborted blocks. All blocks in this list must be from @queue.
233 * Typically called from the abort() callback after the DMA controller has been
234 * stopped. This will set bytes_used to 0 for each block in the list and then
235 * hand the blocks back to the queue.
237 void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue
*queue
,
238 struct list_head
*list
)
240 struct iio_dma_buffer_block
*block
, *_block
;
243 spin_lock_irqsave(&queue
->list_lock
, flags
);
244 list_for_each_entry_safe(block
, _block
, list
, head
) {
245 list_del(&block
->head
);
246 block
->bytes_used
= 0;
247 _iio_dma_buffer_block_done(block
);
248 iio_buffer_block_put_atomic(block
);
250 spin_unlock_irqrestore(&queue
->list_lock
, flags
);
252 wake_up_interruptible_poll(&queue
->buffer
.pollq
, EPOLLIN
| EPOLLRDNORM
);
254 EXPORT_SYMBOL_GPL(iio_dma_buffer_block_list_abort
);
256 static bool iio_dma_block_reusable(struct iio_dma_buffer_block
*block
)
259 * If the core owns the block it can be re-used. This should be the
260 * default case when enabling the buffer, unless the DMA controller does
261 * not support abort and has not given back the block yet.
263 switch (block
->state
) {
264 case IIO_BLOCK_STATE_DEQUEUED
:
265 case IIO_BLOCK_STATE_QUEUED
:
266 case IIO_BLOCK_STATE_DONE
:
274 * iio_dma_buffer_request_update() - DMA buffer request_update callback
275 * @buffer: The buffer which to request an update
277 * Should be used as the iio_dma_buffer_request_update() callback for
278 * iio_buffer_access_ops struct for DMA buffers.
280 int iio_dma_buffer_request_update(struct iio_buffer
*buffer
)
282 struct iio_dma_buffer_queue
*queue
= iio_buffer_to_queue(buffer
);
283 struct iio_dma_buffer_block
*block
;
284 bool try_reuse
= false;
290 * Split the buffer into two even parts. This is used as a double
291 * buffering scheme with usually one block at a time being used by the
292 * DMA and the other one by the application.
294 size
= DIV_ROUND_UP(queue
->buffer
.bytes_per_datum
*
295 queue
->buffer
.length
, 2);
297 mutex_lock(&queue
->lock
);
299 /* Allocations are page aligned */
300 if (PAGE_ALIGN(queue
->fileio
.block_size
) == PAGE_ALIGN(size
))
303 queue
->fileio
.block_size
= size
;
304 queue
->fileio
.active_block
= NULL
;
306 spin_lock_irq(&queue
->list_lock
);
307 for (i
= 0; i
< ARRAY_SIZE(queue
->fileio
.blocks
); i
++) {
308 block
= queue
->fileio
.blocks
[i
];
310 /* If we can't re-use it free it */
311 if (block
&& (!iio_dma_block_reusable(block
) || !try_reuse
))
312 block
->state
= IIO_BLOCK_STATE_DEAD
;
316 * At this point all blocks are either owned by the core or marked as
317 * dead. This means we can reset the lists without having to fear
320 INIT_LIST_HEAD(&queue
->outgoing
);
321 spin_unlock_irq(&queue
->list_lock
);
323 INIT_LIST_HEAD(&queue
->incoming
);
325 for (i
= 0; i
< ARRAY_SIZE(queue
->fileio
.blocks
); i
++) {
326 if (queue
->fileio
.blocks
[i
]) {
327 block
= queue
->fileio
.blocks
[i
];
328 if (block
->state
== IIO_BLOCK_STATE_DEAD
) {
329 /* Could not reuse it */
330 iio_buffer_block_put(block
);
340 block
= iio_dma_buffer_alloc_block(queue
, size
);
345 queue
->fileio
.blocks
[i
] = block
;
348 block
->state
= IIO_BLOCK_STATE_QUEUED
;
349 list_add_tail(&block
->head
, &queue
->incoming
);
353 mutex_unlock(&queue
->lock
);
357 EXPORT_SYMBOL_GPL(iio_dma_buffer_request_update
);
359 static void iio_dma_buffer_submit_block(struct iio_dma_buffer_queue
*queue
,
360 struct iio_dma_buffer_block
*block
)
365 * If the hardware has already been removed we put the block into
366 * limbo. It will neither be on the incoming nor outgoing list, nor will
367 * it ever complete. It will just wait to be freed eventually.
372 block
->state
= IIO_BLOCK_STATE_ACTIVE
;
373 iio_buffer_block_get(block
);
374 ret
= queue
->ops
->submit(queue
, block
);
377 * This is a bit of a problem and there is not much we can do
378 * other then wait for the buffer to be disabled and re-enabled
379 * and try again. But it should not really happen unless we run
380 * out of memory or something similar.
382 * TODO: Implement support in the IIO core to allow buffers to
383 * notify consumers that something went wrong and the buffer
384 * should be disabled.
386 iio_buffer_block_put(block
);
391 * iio_dma_buffer_enable() - Enable DMA buffer
392 * @buffer: IIO buffer to enable
393 * @indio_dev: IIO device the buffer is attached to
395 * Needs to be called when the device that the buffer is attached to starts
396 * sampling. Typically should be the iio_buffer_access_ops enable callback.
398 * This will allocate the DMA buffers and start the DMA transfers.
400 int iio_dma_buffer_enable(struct iio_buffer
*buffer
,
401 struct iio_dev
*indio_dev
)
403 struct iio_dma_buffer_queue
*queue
= iio_buffer_to_queue(buffer
);
404 struct iio_dma_buffer_block
*block
, *_block
;
406 mutex_lock(&queue
->lock
);
407 queue
->active
= true;
408 list_for_each_entry_safe(block
, _block
, &queue
->incoming
, head
) {
409 list_del(&block
->head
);
410 iio_dma_buffer_submit_block(queue
, block
);
412 mutex_unlock(&queue
->lock
);
416 EXPORT_SYMBOL_GPL(iio_dma_buffer_enable
);
419 * iio_dma_buffer_disable() - Disable DMA buffer
420 * @buffer: IIO DMA buffer to disable
421 * @indio_dev: IIO device the buffer is attached to
423 * Needs to be called when the device that the buffer is attached to stops
424 * sampling. Typically should be the iio_buffer_access_ops disable callback.
426 int iio_dma_buffer_disable(struct iio_buffer
*buffer
,
427 struct iio_dev
*indio_dev
)
429 struct iio_dma_buffer_queue
*queue
= iio_buffer_to_queue(buffer
);
431 mutex_lock(&queue
->lock
);
432 queue
->active
= false;
434 if (queue
->ops
&& queue
->ops
->abort
)
435 queue
->ops
->abort(queue
);
436 mutex_unlock(&queue
->lock
);
440 EXPORT_SYMBOL_GPL(iio_dma_buffer_disable
);
442 static void iio_dma_buffer_enqueue(struct iio_dma_buffer_queue
*queue
,
443 struct iio_dma_buffer_block
*block
)
445 if (block
->state
== IIO_BLOCK_STATE_DEAD
) {
446 iio_buffer_block_put(block
);
447 } else if (queue
->active
) {
448 iio_dma_buffer_submit_block(queue
, block
);
450 block
->state
= IIO_BLOCK_STATE_QUEUED
;
451 list_add_tail(&block
->head
, &queue
->incoming
);
455 static struct iio_dma_buffer_block
*iio_dma_buffer_dequeue(
456 struct iio_dma_buffer_queue
*queue
)
458 struct iio_dma_buffer_block
*block
;
460 spin_lock_irq(&queue
->list_lock
);
461 block
= list_first_entry_or_null(&queue
->outgoing
, struct
462 iio_dma_buffer_block
, head
);
464 list_del(&block
->head
);
465 block
->state
= IIO_BLOCK_STATE_DEQUEUED
;
467 spin_unlock_irq(&queue
->list_lock
);
473 * iio_dma_buffer_read() - DMA buffer read callback
474 * @buffer: Buffer to read form
475 * @n: Number of bytes to read
476 * @user_buffer: Userspace buffer to copy the data to
478 * Should be used as the read callback for iio_buffer_access_ops
479 * struct for DMA buffers.
481 int iio_dma_buffer_read(struct iio_buffer
*buffer
, size_t n
,
482 char __user
*user_buffer
)
484 struct iio_dma_buffer_queue
*queue
= iio_buffer_to_queue(buffer
);
485 struct iio_dma_buffer_block
*block
;
488 if (n
< buffer
->bytes_per_datum
)
491 mutex_lock(&queue
->lock
);
493 if (!queue
->fileio
.active_block
) {
494 block
= iio_dma_buffer_dequeue(queue
);
499 queue
->fileio
.pos
= 0;
500 queue
->fileio
.active_block
= block
;
502 block
= queue
->fileio
.active_block
;
505 n
= rounddown(n
, buffer
->bytes_per_datum
);
506 if (n
> block
->bytes_used
- queue
->fileio
.pos
)
507 n
= block
->bytes_used
- queue
->fileio
.pos
;
509 if (copy_to_user(user_buffer
, block
->vaddr
+ queue
->fileio
.pos
, n
)) {
514 queue
->fileio
.pos
+= n
;
516 if (queue
->fileio
.pos
== block
->bytes_used
) {
517 queue
->fileio
.active_block
= NULL
;
518 iio_dma_buffer_enqueue(queue
, block
);
524 mutex_unlock(&queue
->lock
);
528 EXPORT_SYMBOL_GPL(iio_dma_buffer_read
);
531 * iio_dma_buffer_data_available() - DMA buffer data_available callback
532 * @buf: Buffer to check for data availability
534 * Should be used as the data_available callback for iio_buffer_access_ops
535 * struct for DMA buffers.
537 size_t iio_dma_buffer_data_available(struct iio_buffer
*buf
)
539 struct iio_dma_buffer_queue
*queue
= iio_buffer_to_queue(buf
);
540 struct iio_dma_buffer_block
*block
;
541 size_t data_available
= 0;
544 * For counting the available bytes we'll use the size of the block not
545 * the number of actual bytes available in the block. Otherwise it is
546 * possible that we end up with a value that is lower than the watermark
547 * but won't increase since all blocks are in use.
550 mutex_lock(&queue
->lock
);
551 if (queue
->fileio
.active_block
)
552 data_available
+= queue
->fileio
.active_block
->size
;
554 spin_lock_irq(&queue
->list_lock
);
555 list_for_each_entry(block
, &queue
->outgoing
, head
)
556 data_available
+= block
->size
;
557 spin_unlock_irq(&queue
->list_lock
);
558 mutex_unlock(&queue
->lock
);
560 return data_available
;
562 EXPORT_SYMBOL_GPL(iio_dma_buffer_data_available
);
565 * iio_dma_buffer_set_bytes_per_datum() - DMA buffer set_bytes_per_datum callback
566 * @buffer: Buffer to set the bytes-per-datum for
567 * @bpd: The new bytes-per-datum value
569 * Should be used as the set_bytes_per_datum callback for iio_buffer_access_ops
570 * struct for DMA buffers.
572 int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer
*buffer
, size_t bpd
)
574 buffer
->bytes_per_datum
= bpd
;
578 EXPORT_SYMBOL_GPL(iio_dma_buffer_set_bytes_per_datum
);
581 * iio_dma_buffer_set_length - DMA buffer set_length callback
582 * @buffer: Buffer to set the length for
583 * @length: The new buffer length
585 * Should be used as the set_length callback for iio_buffer_access_ops
586 * struct for DMA buffers.
588 int iio_dma_buffer_set_length(struct iio_buffer
*buffer
, unsigned int length
)
590 /* Avoid an invalid state */
593 buffer
->length
= length
;
594 buffer
->watermark
= length
/ 2;
598 EXPORT_SYMBOL_GPL(iio_dma_buffer_set_length
);
601 * iio_dma_buffer_init() - Initialize DMA buffer queue
602 * @queue: Buffer to initialize
604 * @ops: DMA buffer queue callback operations
606 * The DMA device will be used by the queue to do DMA memory allocations. So it
607 * should refer to the device that will perform the DMA to ensure that
608 * allocations are done from a memory region that can be accessed by the device.
610 int iio_dma_buffer_init(struct iio_dma_buffer_queue
*queue
,
611 struct device
*dev
, const struct iio_dma_buffer_ops
*ops
)
613 iio_buffer_init(&queue
->buffer
);
614 queue
->buffer
.length
= PAGE_SIZE
;
615 queue
->buffer
.watermark
= queue
->buffer
.length
/ 2;
619 INIT_LIST_HEAD(&queue
->incoming
);
620 INIT_LIST_HEAD(&queue
->outgoing
);
622 mutex_init(&queue
->lock
);
623 spin_lock_init(&queue
->list_lock
);
627 EXPORT_SYMBOL_GPL(iio_dma_buffer_init
);
630 * iio_dma_buffer_exit() - Cleanup DMA buffer queue
631 * @queue: Buffer to cleanup
633 * After this function has completed it is safe to free any resources that are
634 * associated with the buffer and are accessed inside the callback operations.
636 void iio_dma_buffer_exit(struct iio_dma_buffer_queue
*queue
)
640 mutex_lock(&queue
->lock
);
642 spin_lock_irq(&queue
->list_lock
);
643 for (i
= 0; i
< ARRAY_SIZE(queue
->fileio
.blocks
); i
++) {
644 if (!queue
->fileio
.blocks
[i
])
646 queue
->fileio
.blocks
[i
]->state
= IIO_BLOCK_STATE_DEAD
;
648 INIT_LIST_HEAD(&queue
->outgoing
);
649 spin_unlock_irq(&queue
->list_lock
);
651 INIT_LIST_HEAD(&queue
->incoming
);
653 for (i
= 0; i
< ARRAY_SIZE(queue
->fileio
.blocks
); i
++) {
654 if (!queue
->fileio
.blocks
[i
])
656 iio_buffer_block_put(queue
->fileio
.blocks
[i
]);
657 queue
->fileio
.blocks
[i
] = NULL
;
659 queue
->fileio
.active_block
= NULL
;
662 mutex_unlock(&queue
->lock
);
664 EXPORT_SYMBOL_GPL(iio_dma_buffer_exit
);
667 * iio_dma_buffer_release() - Release final buffer resources
668 * @queue: Buffer to release
670 * Frees resources that can't yet be freed in iio_dma_buffer_exit(). Should be
671 * called in the buffers release callback implementation right before freeing
672 * the memory associated with the buffer.
674 void iio_dma_buffer_release(struct iio_dma_buffer_queue
*queue
)
676 mutex_destroy(&queue
->lock
);
678 EXPORT_SYMBOL_GPL(iio_dma_buffer_release
);
680 MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
681 MODULE_DESCRIPTION("DMA buffer for the IIO framework");
682 MODULE_LICENSE("GPL v2");