1 /* The industrial I/O simple minimally locked ring buffer.
3 * Copyright (c) 2008 Jonathan Cameron
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
10 #include <linux/slab.h>
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/device.h>
14 #include <linux/workqueue.h>
15 #include <linux/sched.h>
16 #include <linux/poll.h>
21 * struct iio_sw_ring_buffer - software ring buffer
22 * @buf: generic ring buffer elements
23 * @data: the ring buffer memory
24 * @read_p: read pointer (oldest available)
25 * @write_p: write pointer
26 * @last_written_p: read pointer (newest available)
27 * @half_p: half buffer length behind write_p (event generation)
28 * @use_count: reference count to prevent resizing when in use
29 * @update_needed: flag to indicated change in size requested
30 * @use_lock: lock to prevent change in size when in use
32 * Note that the first element of all ring buffers must be a
33 * struct iio_ring_buffer.
35 struct iio_sw_ring_buffer
{
36 struct iio_ring_buffer buf
;
38 unsigned char *read_p
;
39 unsigned char *write_p
;
40 unsigned char *last_written_p
;
41 /* used to act as a point at which to signal an event */
42 unsigned char *half_p
;
48 #define iio_to_sw_ring(r) container_of(r, struct iio_sw_ring_buffer, buf)
50 static inline int __iio_allocate_sw_ring_buffer(struct iio_sw_ring_buffer
*ring
,
51 int bytes_per_datum
, int length
)
53 if ((length
== 0) || (bytes_per_datum
== 0))
55 __iio_update_ring_buffer(&ring
->buf
, bytes_per_datum
, length
);
56 ring
->data
= kmalloc(length
*ring
->buf
.bytes_per_datum
, GFP_ATOMIC
);
59 ring
->last_written_p
= NULL
;
61 return ring
->data
? 0 : -ENOMEM
;
64 static inline void __iio_init_sw_ring_buffer(struct iio_sw_ring_buffer
*ring
)
66 spin_lock_init(&ring
->use_lock
);
69 static inline void __iio_free_sw_ring_buffer(struct iio_sw_ring_buffer
*ring
)
74 static void iio_mark_sw_rb_in_use(struct iio_ring_buffer
*r
)
76 struct iio_sw_ring_buffer
*ring
= iio_to_sw_ring(r
);
77 spin_lock(&ring
->use_lock
);
79 spin_unlock(&ring
->use_lock
);
82 static void iio_unmark_sw_rb_in_use(struct iio_ring_buffer
*r
)
84 struct iio_sw_ring_buffer
*ring
= iio_to_sw_ring(r
);
85 spin_lock(&ring
->use_lock
);
87 spin_unlock(&ring
->use_lock
);
91 /* Ring buffer related functionality */
92 /* Store to ring is typically called in the bh of a data ready interrupt handler
93 * in the device driver */
94 /* Lock always held if their is a chance this may be called */
95 /* Only one of these per ring may run concurrently - enforced by drivers */
96 static int iio_store_to_sw_ring(struct iio_sw_ring_buffer
*ring
,
97 unsigned char *data
, s64 timestamp
)
100 unsigned char *temp_ptr
, *change_test_ptr
;
103 if (unlikely(ring
->write_p
== NULL
)) {
104 ring
->write_p
= ring
->data
;
105 /* Doesn't actually matter if this is out of the set
106 * as long as the read pointer is valid before this
107 * passes it - guaranteed as set later in this function.
109 ring
->half_p
= ring
->data
- ring
->buf
.length
*ring
->buf
.bytes_per_datum
/2;
111 /* Copy data to where ever the current write pointer says */
112 memcpy(ring
->write_p
, data
, ring
->buf
.bytes_per_datum
);
114 /* Update the pointer used to get most recent value.
115 * Always valid as either points to latest or second latest value.
116 * Before this runs it is null and read attempts fail with -EAGAIN.
118 ring
->last_written_p
= ring
->write_p
;
120 /* temp_ptr used to ensure we never have an invalid pointer
121 * it may be slightly lagging, but never invalid
123 temp_ptr
= ring
->write_p
+ ring
->buf
.bytes_per_datum
;
124 /* End of ring, back to the beginning */
125 if (temp_ptr
== ring
->data
+ ring
->buf
.length
*ring
->buf
.bytes_per_datum
)
126 temp_ptr
= ring
->data
;
127 /* Update the write pointer
128 * always valid as long as this is the only function able to write.
129 * Care needed with smp systems to ensure more than one ring fill
130 * is never scheduled.
132 ring
->write_p
= temp_ptr
;
134 if (ring
->read_p
== NULL
)
135 ring
->read_p
= ring
->data
;
136 /* Buffer full - move the read pointer and create / escalate
138 /* Tricky case - if the read pointer moves before we adjust it.
139 * Handle by not pushing if it has moved - may result in occasional
140 * unnecessary buffer full events when it wasn't quite true.
142 else if (ring
->write_p
== ring
->read_p
) {
143 change_test_ptr
= ring
->read_p
;
144 temp_ptr
= change_test_ptr
+ ring
->buf
.bytes_per_datum
;
146 == ring
->data
+ ring
->buf
.length
*ring
->buf
.bytes_per_datum
) {
147 temp_ptr
= ring
->data
;
149 /* We are moving pointer on one because the ring is full. Any
150 * change to the read pointer will be this or greater.
152 if (change_test_ptr
== ring
->read_p
)
153 ring
->read_p
= temp_ptr
;
155 /* investigate if our event barrier has been passed */
156 /* There are definite 'issues' with this and chances of
157 * simultaneous read */
158 /* Also need to use loop count to ensure this only happens once */
159 ring
->half_p
+= ring
->buf
.bytes_per_datum
;
160 if (ring
->half_p
== ring
->data
+ ring
->buf
.length
*ring
->buf
.bytes_per_datum
)
161 ring
->half_p
= ring
->data
;
162 if (ring
->half_p
== ring
->read_p
) {
163 ring
->buf
.stufftoread
= true;
164 wake_up_interruptible(&ring
->buf
.pollq
);
169 static int iio_read_first_n_sw_rb(struct iio_ring_buffer
*r
,
170 size_t n
, char __user
*buf
)
172 struct iio_sw_ring_buffer
*ring
= iio_to_sw_ring(r
);
174 u8
*initial_read_p
, *initial_write_p
, *current_read_p
, *end_read_p
;
176 int ret
, max_copied
, bytes_to_rip
, dead_offset
;
178 /* A userspace program has probably made an error if it tries to
179 * read something that is not a whole number of bpds.
182 if (n
% ring
->buf
.bytes_per_datum
) {
184 printk(KERN_INFO
"Ring buffer read request not whole number of"
185 "samples: Request bytes %zd, Current bytes per datum %d\n",
186 n
, ring
->buf
.bytes_per_datum
);
189 /* Limit size to whole of ring buffer */
190 bytes_to_rip
= min((size_t)(ring
->buf
.bytes_per_datum
*ring
->buf
.length
),
193 data
= kmalloc(bytes_to_rip
, GFP_KERNEL
);
199 /* build local copy */
200 initial_read_p
= ring
->read_p
;
201 if (unlikely(initial_read_p
== NULL
)) { /* No data here as yet */
203 goto error_free_data_cpy
;
206 initial_write_p
= ring
->write_p
;
208 /* Need a consistent pair */
209 while ((initial_read_p
!= ring
->read_p
)
210 || (initial_write_p
!= ring
->write_p
)) {
211 initial_read_p
= ring
->read_p
;
212 initial_write_p
= ring
->write_p
;
214 if (initial_write_p
== initial_read_p
) {
215 /* No new data available.*/
217 goto error_free_data_cpy
;
220 if (initial_write_p
>= initial_read_p
+ bytes_to_rip
) {
221 /* write_p is greater than necessary, all is easy */
222 max_copied
= bytes_to_rip
;
223 memcpy(data
, initial_read_p
, max_copied
);
224 end_read_p
= initial_read_p
+ max_copied
;
225 } else if (initial_write_p
> initial_read_p
) {
226 /*not enough data to cpy */
227 max_copied
= initial_write_p
- initial_read_p
;
228 memcpy(data
, initial_read_p
, max_copied
);
229 end_read_p
= initial_write_p
;
231 /* going through 'end' of ring buffer */
232 max_copied
= ring
->data
233 + ring
->buf
.length
*ring
->buf
.bytes_per_datum
- initial_read_p
;
234 memcpy(data
, initial_read_p
, max_copied
);
235 /* possible we are done if we align precisely with end */
236 if (max_copied
== bytes_to_rip
)
237 end_read_p
= ring
->data
;
238 else if (initial_write_p
239 > ring
->data
+ bytes_to_rip
- max_copied
) {
240 /* enough data to finish */
241 memcpy(data
+ max_copied
, ring
->data
,
242 bytes_to_rip
- max_copied
);
243 max_copied
= bytes_to_rip
;
244 end_read_p
= ring
->data
+ (bytes_to_rip
- max_copied
);
245 } else { /* not enough data */
246 memcpy(data
+ max_copied
, ring
->data
,
247 initial_write_p
- ring
->data
);
248 max_copied
+= initial_write_p
- ring
->data
;
249 end_read_p
= initial_write_p
;
252 /* Now to verify which section was cleanly copied - i.e. how far
253 * read pointer has been pushed */
254 current_read_p
= ring
->read_p
;
256 if (initial_read_p
<= current_read_p
)
257 dead_offset
= current_read_p
- initial_read_p
;
259 dead_offset
= ring
->buf
.length
*ring
->buf
.bytes_per_datum
260 - (initial_read_p
- current_read_p
);
262 /* possible issue if the initial write has been lapped or indeed
263 * the point we were reading to has been passed */
264 /* No valid data read.
265 * In this case the read pointer is already correct having been
266 * pushed further than we would look. */
267 if (max_copied
- dead_offset
< 0) {
269 goto error_free_data_cpy
;
272 /* setup the next read position */
273 /* Beware, this may fail due to concurrency fun and games.
274 * Possible that sufficient fill commands have run to push the read
275 * pointer past where we would be after the rip. If this occurs, leave
278 /* Tricky - deal with loops */
280 while (ring
->read_p
!= end_read_p
)
281 ring
->read_p
= end_read_p
;
283 ret
= max_copied
- dead_offset
;
285 if (copy_to_user(buf
, data
+ dead_offset
, ret
)) {
287 goto error_free_data_cpy
;
290 if (bytes_to_rip
>= ring
->buf
.length
*ring
->buf
.bytes_per_datum
/2)
291 ring
->buf
.stufftoread
= 0;
300 static int iio_store_to_sw_rb(struct iio_ring_buffer
*r
,
304 struct iio_sw_ring_buffer
*ring
= iio_to_sw_ring(r
);
305 return iio_store_to_sw_ring(ring
, data
, timestamp
);
308 static int iio_read_last_from_sw_ring(struct iio_sw_ring_buffer
*ring
,
311 unsigned char *last_written_p_copy
;
313 iio_mark_sw_rb_in_use(&ring
->buf
);
316 last_written_p_copy
= ring
->last_written_p
;
317 barrier(); /*unnessecary? */
318 /* Check there is anything here */
319 if (last_written_p_copy
== NULL
)
321 memcpy(data
, last_written_p_copy
, ring
->buf
.bytes_per_datum
);
323 if (unlikely(ring
->last_written_p
!= last_written_p_copy
))
326 iio_unmark_sw_rb_in_use(&ring
->buf
);
330 static int iio_read_last_from_sw_rb(struct iio_ring_buffer
*r
,
333 return iio_read_last_from_sw_ring(iio_to_sw_ring(r
), data
);
336 static int iio_request_update_sw_rb(struct iio_ring_buffer
*r
)
339 struct iio_sw_ring_buffer
*ring
= iio_to_sw_ring(r
);
341 r
->stufftoread
= false;
342 spin_lock(&ring
->use_lock
);
343 if (!ring
->update_needed
)
345 if (ring
->use_count
) {
349 __iio_free_sw_ring_buffer(ring
);
350 ret
= __iio_allocate_sw_ring_buffer(ring
, ring
->buf
.bytes_per_datum
,
353 spin_unlock(&ring
->use_lock
);
357 static int iio_get_bytes_per_datum_sw_rb(struct iio_ring_buffer
*r
)
359 struct iio_sw_ring_buffer
*ring
= iio_to_sw_ring(r
);
360 return ring
->buf
.bytes_per_datum
;
363 static int iio_set_bytes_per_datum_sw_rb(struct iio_ring_buffer
*r
, size_t bpd
)
365 if (r
->bytes_per_datum
!= bpd
) {
366 r
->bytes_per_datum
= bpd
;
367 if (r
->access
->mark_param_change
)
368 r
->access
->mark_param_change(r
);
373 static int iio_get_length_sw_rb(struct iio_ring_buffer
*r
)
378 static int iio_set_length_sw_rb(struct iio_ring_buffer
*r
, int length
)
380 if (r
->length
!= length
) {
382 if (r
->access
->mark_param_change
)
383 r
->access
->mark_param_change(r
);
388 static int iio_mark_update_needed_sw_rb(struct iio_ring_buffer
*r
)
390 struct iio_sw_ring_buffer
*ring
= iio_to_sw_ring(r
);
391 ring
->update_needed
= true;
395 static void iio_sw_rb_release(struct device
*dev
)
397 struct iio_ring_buffer
*r
= to_iio_ring_buffer(dev
);
398 iio_ring_access_release(&r
->dev
);
399 kfree(iio_to_sw_ring(r
));
402 static IIO_RING_ENABLE_ATTR
;
403 static IIO_RING_BYTES_PER_DATUM_ATTR
;
404 static IIO_RING_LENGTH_ATTR
;
406 /* Standard set of ring buffer attributes */
407 static struct attribute
*iio_ring_attributes
[] = {
408 &dev_attr_length
.attr
,
409 &dev_attr_bytes_per_datum
.attr
,
410 &dev_attr_enable
.attr
,
414 static struct attribute_group iio_ring_attribute_group
= {
415 .attrs
= iio_ring_attributes
,
418 static const struct attribute_group
*iio_ring_attribute_groups
[] = {
419 &iio_ring_attribute_group
,
423 static struct device_type iio_sw_ring_type
= {
424 .release
= iio_sw_rb_release
,
425 .groups
= iio_ring_attribute_groups
,
428 struct iio_ring_buffer
*iio_sw_rb_allocate(struct iio_dev
*indio_dev
)
430 struct iio_ring_buffer
*buf
;
431 struct iio_sw_ring_buffer
*ring
;
433 ring
= kzalloc(sizeof *ring
, GFP_KERNEL
);
436 ring
->update_needed
= true;
438 iio_ring_buffer_init(buf
, indio_dev
);
439 __iio_init_sw_ring_buffer(ring
);
440 buf
->dev
.type
= &iio_sw_ring_type
;
441 buf
->dev
.parent
= &indio_dev
->dev
;
442 dev_set_drvdata(&buf
->dev
, (void *)buf
);
446 EXPORT_SYMBOL(iio_sw_rb_allocate
);
448 void iio_sw_rb_free(struct iio_ring_buffer
*r
)
451 iio_put_ring_buffer(r
);
453 EXPORT_SYMBOL(iio_sw_rb_free
);
455 const struct iio_ring_access_funcs ring_sw_access_funcs
= {
456 .mark_in_use
= &iio_mark_sw_rb_in_use
,
457 .unmark_in_use
= &iio_unmark_sw_rb_in_use
,
458 .store_to
= &iio_store_to_sw_rb
,
459 .read_last
= &iio_read_last_from_sw_rb
,
460 .read_first_n
= &iio_read_first_n_sw_rb
,
461 .mark_param_change
= &iio_mark_update_needed_sw_rb
,
462 .request_update
= &iio_request_update_sw_rb
,
463 .get_bytes_per_datum
= &iio_get_bytes_per_datum_sw_rb
,
464 .set_bytes_per_datum
= &iio_set_bytes_per_datum_sw_rb
,
465 .get_length
= &iio_get_length_sw_rb
,
466 .set_length
= &iio_set_length_sw_rb
,
468 EXPORT_SYMBOL(ring_sw_access_funcs
);
470 MODULE_DESCRIPTION("Industrialio I/O software ring buffer");
471 MODULE_LICENSE("GPL");