2 * Copyright 2013-2015 Analog Devices Inc.
3 * Author: Lars-Peter Clausen <lars@metafoo.de>
5 * Licensed under the GPL-2.
8 #include <linux/slab.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/device.h>
12 #include <linux/workqueue.h>
13 #include <linux/mutex.h>
14 #include <linux/sched.h>
15 #include <linux/poll.h>
16 #include <linux/iio/buffer.h>
17 #include <linux/iio/buffer_impl.h>
18 #include <linux/iio/buffer-dma.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/sizes.h>
23 * For DMA buffers the storage is sub-divided into so called blocks. Each block
24 * has its own memory buffer. The size of the block is the granularity at which
25 * memory is exchanged between the hardware and the application. Increasing the
26 * basic unit of data exchange from one sample to one block decreases the
27 * management overhead that is associated with each sample. E.g. if we say the
28 * management overhead for one exchange is x and the unit of exchange is one
29 * sample the overhead will be x for each sample. Whereas when using a block
30 * which contains n samples the overhead per sample is reduced to x/n. This
31 * allows to achieve much higher samplerates than what can be sustained with
32 * the one sample approach.
34 * Blocks are exchanged between the DMA controller and the application via the
35 * means of two queues. The incoming queue and the outgoing queue. Blocks on the
36 * incoming queue are waiting for the DMA controller to pick them up and fill
37 * them with data. Block on the outgoing queue have been filled with data and
38 * are waiting for the application to dequeue them and read the data.
40 * A block can be in one of the following states:
41 * * Owned by the application. In this state the application can read data from
43 * * On the incoming list: Blocks on the incoming list are queued up to be
44 * processed by the DMA controller.
45 * * Owned by the DMA controller: The DMA controller is processing the block
46 * and filling it with data.
47 * * On the outgoing list: Blocks on the outgoing list have been successfully
48 * processed by the DMA controller and contain data. They can be dequeued by
50 * * Dead: A block that is dead has been marked as to be freed. It might still
51 * be owned by either the application or the DMA controller at the moment.
52 * But once they are done processing it instead of going to either the
53 * incoming or outgoing queue the block will be freed.
55 * In addition to this blocks are reference counted and the memory associated
56 * with both the block structure as well as the storage memory for the block
57 * will be freed when the last reference to the block is dropped. This means a
58 * block must not be accessed without holding a reference.
60 * The iio_dma_buffer implementation provides a generic infrastructure for
61 * managing the blocks.
63 * A driver for a specific piece of hardware that has DMA capabilities need to
64 * implement the submit() callback from the iio_dma_buffer_ops structure. This
65 * callback is supposed to initiate the DMA transfer copying data from the
66 * converter to the memory region of the block. Once the DMA transfer has been
67 * completed the driver must call iio_dma_buffer_block_done() for the completed
70 * Prior to this it must set the bytes_used field of the block contains
71 * the actual number of bytes in the buffer. Typically this will be equal to the
72 * size of the block, but if the DMA hardware has certain alignment requirements
73 * for the transfer length it might choose to use less than the full size. In
74 * either case it is expected that bytes_used is a multiple of the bytes per
75 * datum, i.e. the block must not contain partial samples.
77 * The driver must call iio_dma_buffer_block_done() for each block it has
78 * received through its submit_block() callback, even if it does not actually
79 * perform a DMA transfer for the block, e.g. because the buffer was disabled
80 * before the block transfer was started. In this case it should set bytes_used
83 * In addition it is recommended that a driver implements the abort() callback.
84 * It will be called when the buffer is disabled and can be used to cancel
85 * pending and stop active transfers.
87 * The specific driver implementation should use the default callback
88 * implementations provided by this module for the iio_buffer_access_funcs
89 * struct. It may overload some callbacks with custom variants if the hardware
90 * has special requirements that are not handled by the generic functions. If a
91 * driver chooses to overload a callback it has to ensure that the generic
92 * callback is called from within the custom callback.
95 static void iio_buffer_block_release(struct kref
*kref
)
97 struct iio_dma_buffer_block
*block
= container_of(kref
,
98 struct iio_dma_buffer_block
, kref
);
100 WARN_ON(block
->state
!= IIO_BLOCK_STATE_DEAD
);
102 dma_free_coherent(block
->queue
->dev
, PAGE_ALIGN(block
->size
),
103 block
->vaddr
, block
->phys_addr
);
105 iio_buffer_put(&block
->queue
->buffer
);
109 static void iio_buffer_block_get(struct iio_dma_buffer_block
*block
)
111 kref_get(&block
->kref
);
114 static void iio_buffer_block_put(struct iio_dma_buffer_block
*block
)
116 kref_put(&block
->kref
, iio_buffer_block_release
);
120 * dma_free_coherent can sleep, hence we need to take some special care to be
121 * able to drop a reference from an atomic context.
123 static LIST_HEAD(iio_dma_buffer_dead_blocks
);
124 static DEFINE_SPINLOCK(iio_dma_buffer_dead_blocks_lock
);
126 static void iio_dma_buffer_cleanup_worker(struct work_struct
*work
)
128 struct iio_dma_buffer_block
*block
, *_block
;
129 LIST_HEAD(block_list
);
131 spin_lock_irq(&iio_dma_buffer_dead_blocks_lock
);
132 list_splice_tail_init(&iio_dma_buffer_dead_blocks
, &block_list
);
133 spin_unlock_irq(&iio_dma_buffer_dead_blocks_lock
);
135 list_for_each_entry_safe(block
, _block
, &block_list
, head
)
136 iio_buffer_block_release(&block
->kref
);
138 static DECLARE_WORK(iio_dma_buffer_cleanup_work
, iio_dma_buffer_cleanup_worker
);
140 static void iio_buffer_block_release_atomic(struct kref
*kref
)
142 struct iio_dma_buffer_block
*block
;
145 block
= container_of(kref
, struct iio_dma_buffer_block
, kref
);
147 spin_lock_irqsave(&iio_dma_buffer_dead_blocks_lock
, flags
);
148 list_add_tail(&block
->head
, &iio_dma_buffer_dead_blocks
);
149 spin_unlock_irqrestore(&iio_dma_buffer_dead_blocks_lock
, flags
);
151 schedule_work(&iio_dma_buffer_cleanup_work
);
155 * Version of iio_buffer_block_put() that can be called from atomic context
157 static void iio_buffer_block_put_atomic(struct iio_dma_buffer_block
*block
)
159 kref_put(&block
->kref
, iio_buffer_block_release_atomic
);
162 static struct iio_dma_buffer_queue
*iio_buffer_to_queue(struct iio_buffer
*buf
)
164 return container_of(buf
, struct iio_dma_buffer_queue
, buffer
);
167 static struct iio_dma_buffer_block
*iio_dma_buffer_alloc_block(
168 struct iio_dma_buffer_queue
*queue
, size_t size
)
170 struct iio_dma_buffer_block
*block
;
172 block
= kzalloc(sizeof(*block
), GFP_KERNEL
);
176 block
->vaddr
= dma_alloc_coherent(queue
->dev
, PAGE_ALIGN(size
),
177 &block
->phys_addr
, GFP_KERNEL
);
184 block
->state
= IIO_BLOCK_STATE_DEQUEUED
;
185 block
->queue
= queue
;
186 INIT_LIST_HEAD(&block
->head
);
187 kref_init(&block
->kref
);
189 iio_buffer_get(&queue
->buffer
);
194 static void _iio_dma_buffer_block_done(struct iio_dma_buffer_block
*block
)
196 struct iio_dma_buffer_queue
*queue
= block
->queue
;
199 * The buffer has already been freed by the application, just drop the
202 if (block
->state
!= IIO_BLOCK_STATE_DEAD
) {
203 block
->state
= IIO_BLOCK_STATE_DONE
;
204 list_add_tail(&block
->head
, &queue
->outgoing
);
209 * iio_dma_buffer_block_done() - Indicate that a block has been completed
210 * @block: The completed block
212 * Should be called when the DMA controller has finished handling the block to
213 * pass back ownership of the block to the queue.
215 void iio_dma_buffer_block_done(struct iio_dma_buffer_block
*block
)
217 struct iio_dma_buffer_queue
*queue
= block
->queue
;
220 spin_lock_irqsave(&queue
->list_lock
, flags
);
221 _iio_dma_buffer_block_done(block
);
222 spin_unlock_irqrestore(&queue
->list_lock
, flags
);
224 iio_buffer_block_put_atomic(block
);
225 wake_up_interruptible_poll(&queue
->buffer
.pollq
, EPOLLIN
| EPOLLRDNORM
);
227 EXPORT_SYMBOL_GPL(iio_dma_buffer_block_done
);
230 * iio_dma_buffer_block_list_abort() - Indicate that a list block has been
232 * @queue: Queue for which to complete blocks.
233 * @list: List of aborted blocks. All blocks in this list must be from @queue.
235 * Typically called from the abort() callback after the DMA controller has been
236 * stopped. This will set bytes_used to 0 for each block in the list and then
237 * hand the blocks back to the queue.
239 void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue
*queue
,
240 struct list_head
*list
)
242 struct iio_dma_buffer_block
*block
, *_block
;
245 spin_lock_irqsave(&queue
->list_lock
, flags
);
246 list_for_each_entry_safe(block
, _block
, list
, head
) {
247 list_del(&block
->head
);
248 block
->bytes_used
= 0;
249 _iio_dma_buffer_block_done(block
);
250 iio_buffer_block_put_atomic(block
);
252 spin_unlock_irqrestore(&queue
->list_lock
, flags
);
254 wake_up_interruptible_poll(&queue
->buffer
.pollq
, EPOLLIN
| EPOLLRDNORM
);
256 EXPORT_SYMBOL_GPL(iio_dma_buffer_block_list_abort
);
258 static bool iio_dma_block_reusable(struct iio_dma_buffer_block
*block
)
261 * If the core owns the block it can be re-used. This should be the
262 * default case when enabling the buffer, unless the DMA controller does
263 * not support abort and has not given back the block yet.
265 switch (block
->state
) {
266 case IIO_BLOCK_STATE_DEQUEUED
:
267 case IIO_BLOCK_STATE_QUEUED
:
268 case IIO_BLOCK_STATE_DONE
:
276 * iio_dma_buffer_request_update() - DMA buffer request_update callback
277 * @buffer: The buffer which to request an update
279 * Should be used as the iio_dma_buffer_request_update() callback for
280 * iio_buffer_access_ops struct for DMA buffers.
282 int iio_dma_buffer_request_update(struct iio_buffer
*buffer
)
284 struct iio_dma_buffer_queue
*queue
= iio_buffer_to_queue(buffer
);
285 struct iio_dma_buffer_block
*block
;
286 bool try_reuse
= false;
292 * Split the buffer into two even parts. This is used as a double
293 * buffering scheme with usually one block at a time being used by the
294 * DMA and the other one by the application.
296 size
= DIV_ROUND_UP(queue
->buffer
.bytes_per_datum
*
297 queue
->buffer
.length
, 2);
299 mutex_lock(&queue
->lock
);
301 /* Allocations are page aligned */
302 if (PAGE_ALIGN(queue
->fileio
.block_size
) == PAGE_ALIGN(size
))
305 queue
->fileio
.block_size
= size
;
306 queue
->fileio
.active_block
= NULL
;
308 spin_lock_irq(&queue
->list_lock
);
309 for (i
= 0; i
< ARRAY_SIZE(queue
->fileio
.blocks
); i
++) {
310 block
= queue
->fileio
.blocks
[i
];
312 /* If we can't re-use it free it */
313 if (block
&& (!iio_dma_block_reusable(block
) || !try_reuse
))
314 block
->state
= IIO_BLOCK_STATE_DEAD
;
318 * At this point all blocks are either owned by the core or marked as
319 * dead. This means we can reset the lists without having to fear
322 INIT_LIST_HEAD(&queue
->outgoing
);
323 spin_unlock_irq(&queue
->list_lock
);
325 INIT_LIST_HEAD(&queue
->incoming
);
327 for (i
= 0; i
< ARRAY_SIZE(queue
->fileio
.blocks
); i
++) {
328 if (queue
->fileio
.blocks
[i
]) {
329 block
= queue
->fileio
.blocks
[i
];
330 if (block
->state
== IIO_BLOCK_STATE_DEAD
) {
331 /* Could not reuse it */
332 iio_buffer_block_put(block
);
342 block
= iio_dma_buffer_alloc_block(queue
, size
);
347 queue
->fileio
.blocks
[i
] = block
;
350 block
->state
= IIO_BLOCK_STATE_QUEUED
;
351 list_add_tail(&block
->head
, &queue
->incoming
);
355 mutex_unlock(&queue
->lock
);
359 EXPORT_SYMBOL_GPL(iio_dma_buffer_request_update
);
361 static void iio_dma_buffer_submit_block(struct iio_dma_buffer_queue
*queue
,
362 struct iio_dma_buffer_block
*block
)
367 * If the hardware has already been removed we put the block into
368 * limbo. It will neither be on the incoming nor outgoing list, nor will
369 * it ever complete. It will just wait to be freed eventually.
374 block
->state
= IIO_BLOCK_STATE_ACTIVE
;
375 iio_buffer_block_get(block
);
376 ret
= queue
->ops
->submit(queue
, block
);
379 * This is a bit of a problem and there is not much we can do
380 * other then wait for the buffer to be disabled and re-enabled
381 * and try again. But it should not really happen unless we run
382 * out of memory or something similar.
384 * TODO: Implement support in the IIO core to allow buffers to
385 * notify consumers that something went wrong and the buffer
386 * should be disabled.
388 iio_buffer_block_put(block
);
393 * iio_dma_buffer_enable() - Enable DMA buffer
394 * @buffer: IIO buffer to enable
395 * @indio_dev: IIO device the buffer is attached to
397 * Needs to be called when the device that the buffer is attached to starts
398 * sampling. Typically should be the iio_buffer_access_ops enable callback.
400 * This will allocate the DMA buffers and start the DMA transfers.
402 int iio_dma_buffer_enable(struct iio_buffer
*buffer
,
403 struct iio_dev
*indio_dev
)
405 struct iio_dma_buffer_queue
*queue
= iio_buffer_to_queue(buffer
);
406 struct iio_dma_buffer_block
*block
, *_block
;
408 mutex_lock(&queue
->lock
);
409 queue
->active
= true;
410 list_for_each_entry_safe(block
, _block
, &queue
->incoming
, head
) {
411 list_del(&block
->head
);
412 iio_dma_buffer_submit_block(queue
, block
);
414 mutex_unlock(&queue
->lock
);
418 EXPORT_SYMBOL_GPL(iio_dma_buffer_enable
);
421 * iio_dma_buffer_disable() - Disable DMA buffer
422 * @buffer: IIO DMA buffer to disable
423 * @indio_dev: IIO device the buffer is attached to
425 * Needs to be called when the device that the buffer is attached to stops
426 * sampling. Typically should be the iio_buffer_access_ops disable callback.
428 int iio_dma_buffer_disable(struct iio_buffer
*buffer
,
429 struct iio_dev
*indio_dev
)
431 struct iio_dma_buffer_queue
*queue
= iio_buffer_to_queue(buffer
);
433 mutex_lock(&queue
->lock
);
434 queue
->active
= false;
436 if (queue
->ops
&& queue
->ops
->abort
)
437 queue
->ops
->abort(queue
);
438 mutex_unlock(&queue
->lock
);
442 EXPORT_SYMBOL_GPL(iio_dma_buffer_disable
);
444 static void iio_dma_buffer_enqueue(struct iio_dma_buffer_queue
*queue
,
445 struct iio_dma_buffer_block
*block
)
447 if (block
->state
== IIO_BLOCK_STATE_DEAD
) {
448 iio_buffer_block_put(block
);
449 } else if (queue
->active
) {
450 iio_dma_buffer_submit_block(queue
, block
);
452 block
->state
= IIO_BLOCK_STATE_QUEUED
;
453 list_add_tail(&block
->head
, &queue
->incoming
);
457 static struct iio_dma_buffer_block
*iio_dma_buffer_dequeue(
458 struct iio_dma_buffer_queue
*queue
)
460 struct iio_dma_buffer_block
*block
;
462 spin_lock_irq(&queue
->list_lock
);
463 block
= list_first_entry_or_null(&queue
->outgoing
, struct
464 iio_dma_buffer_block
, head
);
466 list_del(&block
->head
);
467 block
->state
= IIO_BLOCK_STATE_DEQUEUED
;
469 spin_unlock_irq(&queue
->list_lock
);
475 * iio_dma_buffer_read() - DMA buffer read callback
476 * @buffer: Buffer to read form
477 * @n: Number of bytes to read
478 * @user_buffer: Userspace buffer to copy the data to
480 * Should be used as the read_first_n callback for iio_buffer_access_ops
481 * struct for DMA buffers.
483 int iio_dma_buffer_read(struct iio_buffer
*buffer
, size_t n
,
484 char __user
*user_buffer
)
486 struct iio_dma_buffer_queue
*queue
= iio_buffer_to_queue(buffer
);
487 struct iio_dma_buffer_block
*block
;
490 if (n
< buffer
->bytes_per_datum
)
493 mutex_lock(&queue
->lock
);
495 if (!queue
->fileio
.active_block
) {
496 block
= iio_dma_buffer_dequeue(queue
);
501 queue
->fileio
.pos
= 0;
502 queue
->fileio
.active_block
= block
;
504 block
= queue
->fileio
.active_block
;
507 n
= rounddown(n
, buffer
->bytes_per_datum
);
508 if (n
> block
->bytes_used
- queue
->fileio
.pos
)
509 n
= block
->bytes_used
- queue
->fileio
.pos
;
511 if (copy_to_user(user_buffer
, block
->vaddr
+ queue
->fileio
.pos
, n
)) {
516 queue
->fileio
.pos
+= n
;
518 if (queue
->fileio
.pos
== block
->bytes_used
) {
519 queue
->fileio
.active_block
= NULL
;
520 iio_dma_buffer_enqueue(queue
, block
);
526 mutex_unlock(&queue
->lock
);
530 EXPORT_SYMBOL_GPL(iio_dma_buffer_read
);
533 * iio_dma_buffer_data_available() - DMA buffer data_available callback
534 * @buf: Buffer to check for data availability
536 * Should be used as the data_available callback for iio_buffer_access_ops
537 * struct for DMA buffers.
539 size_t iio_dma_buffer_data_available(struct iio_buffer
*buf
)
541 struct iio_dma_buffer_queue
*queue
= iio_buffer_to_queue(buf
);
542 struct iio_dma_buffer_block
*block
;
543 size_t data_available
= 0;
546 * For counting the available bytes we'll use the size of the block not
547 * the number of actual bytes available in the block. Otherwise it is
548 * possible that we end up with a value that is lower than the watermark
549 * but won't increase since all blocks are in use.
552 mutex_lock(&queue
->lock
);
553 if (queue
->fileio
.active_block
)
554 data_available
+= queue
->fileio
.active_block
->size
;
556 spin_lock_irq(&queue
->list_lock
);
557 list_for_each_entry(block
, &queue
->outgoing
, head
)
558 data_available
+= block
->size
;
559 spin_unlock_irq(&queue
->list_lock
);
560 mutex_unlock(&queue
->lock
);
562 return data_available
;
564 EXPORT_SYMBOL_GPL(iio_dma_buffer_data_available
);
567 * iio_dma_buffer_set_bytes_per_datum() - DMA buffer set_bytes_per_datum callback
568 * @buffer: Buffer to set the bytes-per-datum for
569 * @bpd: The new bytes-per-datum value
571 * Should be used as the set_bytes_per_datum callback for iio_buffer_access_ops
572 * struct for DMA buffers.
574 int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer
*buffer
, size_t bpd
)
576 buffer
->bytes_per_datum
= bpd
;
580 EXPORT_SYMBOL_GPL(iio_dma_buffer_set_bytes_per_datum
);
583 * iio_dma_buffer_set_length - DMA buffer set_length callback
584 * @buffer: Buffer to set the length for
585 * @length: The new buffer length
587 * Should be used as the set_length callback for iio_buffer_access_ops
588 * struct for DMA buffers.
590 int iio_dma_buffer_set_length(struct iio_buffer
*buffer
, unsigned int length
)
592 /* Avoid an invalid state */
595 buffer
->length
= length
;
596 buffer
->watermark
= length
/ 2;
600 EXPORT_SYMBOL_GPL(iio_dma_buffer_set_length
);
603 * iio_dma_buffer_init() - Initialize DMA buffer queue
604 * @queue: Buffer to initialize
606 * @ops: DMA buffer queue callback operations
608 * The DMA device will be used by the queue to do DMA memory allocations. So it
609 * should refer to the device that will perform the DMA to ensure that
610 * allocations are done from a memory region that can be accessed by the device.
612 int iio_dma_buffer_init(struct iio_dma_buffer_queue
*queue
,
613 struct device
*dev
, const struct iio_dma_buffer_ops
*ops
)
615 iio_buffer_init(&queue
->buffer
);
616 queue
->buffer
.length
= PAGE_SIZE
;
617 queue
->buffer
.watermark
= queue
->buffer
.length
/ 2;
621 INIT_LIST_HEAD(&queue
->incoming
);
622 INIT_LIST_HEAD(&queue
->outgoing
);
624 mutex_init(&queue
->lock
);
625 spin_lock_init(&queue
->list_lock
);
629 EXPORT_SYMBOL_GPL(iio_dma_buffer_init
);
632 * iio_dma_buffer_exit() - Cleanup DMA buffer queue
633 * @queue: Buffer to cleanup
635 * After this function has completed it is safe to free any resources that are
636 * associated with the buffer and are accessed inside the callback operations.
638 void iio_dma_buffer_exit(struct iio_dma_buffer_queue
*queue
)
642 mutex_lock(&queue
->lock
);
644 spin_lock_irq(&queue
->list_lock
);
645 for (i
= 0; i
< ARRAY_SIZE(queue
->fileio
.blocks
); i
++) {
646 if (!queue
->fileio
.blocks
[i
])
648 queue
->fileio
.blocks
[i
]->state
= IIO_BLOCK_STATE_DEAD
;
650 INIT_LIST_HEAD(&queue
->outgoing
);
651 spin_unlock_irq(&queue
->list_lock
);
653 INIT_LIST_HEAD(&queue
->incoming
);
655 for (i
= 0; i
< ARRAY_SIZE(queue
->fileio
.blocks
); i
++) {
656 if (!queue
->fileio
.blocks
[i
])
658 iio_buffer_block_put(queue
->fileio
.blocks
[i
]);
659 queue
->fileio
.blocks
[i
] = NULL
;
661 queue
->fileio
.active_block
= NULL
;
664 mutex_unlock(&queue
->lock
);
666 EXPORT_SYMBOL_GPL(iio_dma_buffer_exit
);
669 * iio_dma_buffer_release() - Release final buffer resources
670 * @queue: Buffer to release
672 * Frees resources that can't yet be freed in iio_dma_buffer_exit(). Should be
673 * called in the buffers release callback implementation right before freeing
674 * the memory associated with the buffer.
676 void iio_dma_buffer_release(struct iio_dma_buffer_queue
*queue
)
678 mutex_destroy(&queue
->lock
);
680 EXPORT_SYMBOL_GPL(iio_dma_buffer_release
);
682 MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
683 MODULE_DESCRIPTION("DMA buffer for the IIO framework");
684 MODULE_LICENSE("GPL v2");