1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright 2013-2015 Analog Devices Inc.
4 * Author: Lars-Peter Clausen <lars@metafoo.de>
7 #ifndef __INDUSTRIALIO_DMA_BUFFER_H__
8 #define __INDUSTRIALIO_DMA_BUFFER_H__
10 #include <linux/atomic.h>
11 #include <linux/list.h>
12 #include <linux/kref.h>
13 #include <linux/spinlock.h>
14 #include <linux/mutex.h>
15 #include <linux/iio/buffer_impl.h>
17 struct iio_dma_buffer_queue
;
18 struct iio_dma_buffer_ops
;
20 struct dma_buf_attachment
;
25 * enum iio_block_state - State of a struct iio_dma_buffer_block
26 * @IIO_BLOCK_STATE_QUEUED: Block is on the incoming queue
27 * @IIO_BLOCK_STATE_ACTIVE: Block is currently being processed by the DMA
28 * @IIO_BLOCK_STATE_DONE: Block is on the outgoing queue
29 * @IIO_BLOCK_STATE_DEAD: Block has been marked as to be freed
31 enum iio_block_state
{
32 IIO_BLOCK_STATE_QUEUED
,
33 IIO_BLOCK_STATE_ACTIVE
,
39 * struct iio_dma_buffer_block - IIO buffer block
41 * @size: Total size of the block in bytes
42 * @bytes_used: Number of bytes that contain valid data
43 * @vaddr: Virutal address of the blocks memory
44 * @phys_addr: Physical address of the blocks memory
45 * @queue: Parent DMA buffer queue
46 * @kref: kref used to manage the lifetime of block
47 * @state: Current state of the block
48 * @cyclic: True if this is a cyclic buffer
49 * @fileio: True if this buffer is used for fileio mode
50 * @sg_table: DMA table for the transfer when transferring a DMABUF
51 * @fence: DMA fence to be signaled when a DMABUF transfer is complete
53 struct iio_dma_buffer_block
{
54 /* May only be accessed by the owner of the block */
55 struct list_head head
;
59 * Set during allocation, constant thereafter. May be accessed read-only
60 * by anybody holding a reference to the block.
65 struct iio_dma_buffer_queue
*queue
;
67 /* Must not be accessed outside the core. */
70 * Must not be accessed outside the core. Access needs to hold
71 * queue->list_lock if the block is not owned by the core.
73 enum iio_block_state state
;
78 struct sg_table
*sg_table
;
79 struct dma_fence
*fence
;
83 * struct iio_dma_buffer_queue_fileio - FileIO state for the DMA buffer
84 * @blocks: Buffer blocks used for fileio
85 * @active_block: Block being used in read()
86 * @pos: Read offset in the active block
87 * @block_size: Size of each block
88 * @next_dequeue: index of next block that will be dequeued
89 * @enabled: Whether the buffer is operating in fileio mode
91 struct iio_dma_buffer_queue_fileio
{
92 struct iio_dma_buffer_block
*blocks
[2];
93 struct iio_dma_buffer_block
*active_block
;
97 unsigned int next_dequeue
;
102 * struct iio_dma_buffer_queue - DMA buffer base structure
103 * @buffer: IIO buffer base structure
104 * @dev: Parent device
105 * @ops: DMA buffer callbacks
106 * @lock: Protects the incoming list, active and the fields in the fileio
108 * @list_lock: Protects lists that contain blocks which can be modified in
109 * atomic context as well as blocks on those lists. This is the outgoing queue
110 * list and typically also a list of active blocks in the part that handles
112 * @incoming: List of buffers on the incoming queue
113 * @active: Whether the buffer is currently active
114 * @num_dmabufs: Total number of DMABUFs attached to this queue
115 * @fileio: FileIO state
117 struct iio_dma_buffer_queue
{
118 struct iio_buffer buffer
;
120 const struct iio_dma_buffer_ops
*ops
;
123 spinlock_t list_lock
;
124 struct list_head incoming
;
127 atomic_t num_dmabufs
;
129 struct iio_dma_buffer_queue_fileio fileio
;
133 * struct iio_dma_buffer_ops - DMA buffer callback operations
134 * @submit: Called when a block is submitted to the DMA controller
135 * @abort: Should abort all pending transfers
137 struct iio_dma_buffer_ops
{
138 int (*submit
)(struct iio_dma_buffer_queue
*queue
,
139 struct iio_dma_buffer_block
*block
);
140 void (*abort
)(struct iio_dma_buffer_queue
*queue
);
143 void iio_dma_buffer_block_done(struct iio_dma_buffer_block
*block
);
144 void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue
*queue
,
145 struct list_head
*list
);
147 int iio_dma_buffer_enable(struct iio_buffer
*buffer
,
148 struct iio_dev
*indio_dev
);
149 int iio_dma_buffer_disable(struct iio_buffer
*buffer
,
150 struct iio_dev
*indio_dev
);
151 int iio_dma_buffer_read(struct iio_buffer
*buffer
, size_t n
,
152 char __user
*user_buffer
);
153 int iio_dma_buffer_write(struct iio_buffer
*buffer
, size_t n
,
154 const char __user
*user_buffer
);
155 size_t iio_dma_buffer_usage(struct iio_buffer
*buffer
);
156 int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer
*buffer
, size_t bpd
);
157 int iio_dma_buffer_set_length(struct iio_buffer
*buffer
, unsigned int length
);
158 int iio_dma_buffer_request_update(struct iio_buffer
*buffer
);
160 int iio_dma_buffer_init(struct iio_dma_buffer_queue
*queue
,
161 struct device
*dma_dev
, const struct iio_dma_buffer_ops
*ops
);
162 void iio_dma_buffer_exit(struct iio_dma_buffer_queue
*queue
);
163 void iio_dma_buffer_release(struct iio_dma_buffer_queue
*queue
);
165 struct iio_dma_buffer_block
*
166 iio_dma_buffer_attach_dmabuf(struct iio_buffer
*buffer
,
167 struct dma_buf_attachment
*attach
);
168 void iio_dma_buffer_detach_dmabuf(struct iio_buffer
*buffer
,
169 struct iio_dma_buffer_block
*block
);
170 int iio_dma_buffer_enqueue_dmabuf(struct iio_buffer
*buffer
,
171 struct iio_dma_buffer_block
*block
,
172 struct dma_fence
*fence
,
173 struct sg_table
*sgt
,
174 size_t size
, bool cyclic
);
175 void iio_dma_buffer_lock_queue(struct iio_buffer
*buffer
);
176 void iio_dma_buffer_unlock_queue(struct iio_buffer
*buffer
);