1 /* The industrial I/O simple minimally locked ring buffer.
3 * Copyright (c) 2008 Jonathan Cameron
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
10 #include <linux/kernel.h>
11 #include <linux/device.h>
12 #include <linux/module.h>
13 #include <linux/device.h>
14 #include <linux/workqueue.h>
17 static inline int __iio_init_sw_ring_buffer(struct iio_sw_ring_buffer
*ring
,
18 int bytes_per_datum
, int length
)
20 if ((length
== 0) || (bytes_per_datum
== 0))
23 __iio_init_ring_buffer(&ring
->buf
, bytes_per_datum
, length
);
24 ring
->use_lock
= __SPIN_LOCK_UNLOCKED((ring
)->use_lock
);
25 ring
->data
= kmalloc(length
*ring
->buf
.bpd
, GFP_KERNEL
);
28 ring
->last_written_p
= 0;
30 return ring
->data
? 0 : -ENOMEM
;
33 static inline void __iio_free_sw_ring_buffer(struct iio_sw_ring_buffer
*ring
)
38 void iio_mark_sw_rb_in_use(struct iio_ring_buffer
*r
)
40 struct iio_sw_ring_buffer
*ring
= iio_to_sw_ring(r
);
41 spin_lock(&ring
->use_lock
);
43 spin_unlock(&ring
->use_lock
);
45 EXPORT_SYMBOL(iio_mark_sw_rb_in_use
);
47 void iio_unmark_sw_rb_in_use(struct iio_ring_buffer
*r
)
49 struct iio_sw_ring_buffer
*ring
= iio_to_sw_ring(r
);
50 spin_lock(&ring
->use_lock
);
52 spin_unlock(&ring
->use_lock
);
54 EXPORT_SYMBOL(iio_unmark_sw_rb_in_use
);
57 /* Ring buffer related functionality */
58 /* Store to ring is typically called in the bh of a data ready interrupt handler
59 * in the device driver */
60 /* Lock always held if their is a chance this may be called */
61 /* Only one of these per ring may run concurrently - enforced by drivers */
62 int iio_store_to_sw_ring(struct iio_sw_ring_buffer
*ring
,
68 unsigned char *temp_ptr
, *change_test_ptr
;
71 if (unlikely(ring
->write_p
== 0)) {
72 ring
->write_p
= ring
->data
;
73 /* Doesn't actually matter if this is out of the set
74 * as long as the read pointer is valid before this
75 * passes it - guaranteed as set later in this function.
77 ring
->half_p
= ring
->data
- ring
->buf
.length
*ring
->buf
.bpd
/2;
79 /* Copy data to where ever the current write pointer says */
80 memcpy(ring
->write_p
, data
, ring
->buf
.bpd
);
82 /* Update the pointer used to get most recent value.
83 * Always valid as either points to latest or second latest value.
84 * Before this runs it is null and read attempts fail with -EAGAIN.
86 ring
->last_written_p
= ring
->write_p
;
88 /* temp_ptr used to ensure we never have an invalid pointer
89 * it may be slightly lagging, but never invalid
91 temp_ptr
= ring
->write_p
+ ring
->buf
.bpd
;
92 /* End of ring, back to the beginning */
93 if (temp_ptr
== ring
->data
+ ring
->buf
.length
*ring
->buf
.bpd
)
94 temp_ptr
= ring
->data
;
95 /* Update the write pointer
96 * always valid as long as this is the only function able to write.
97 * Care needed with smp systems to ensure more than one ring fill
100 ring
->write_p
= temp_ptr
;
102 if (ring
->read_p
== 0)
103 ring
->read_p
= ring
->data
;
104 /* Buffer full - move the read pointer and create / escalate
106 /* Tricky case - if the read pointer moves before we adjust it.
107 * Handle by not pushing if it has moved - may result in occasional
108 * unnecessary buffer full events when it wasn't quite true.
110 else if (ring
->write_p
== ring
->read_p
) {
111 change_test_ptr
= ring
->read_p
;
112 temp_ptr
= change_test_ptr
+ ring
->buf
.bpd
;
114 == ring
->data
+ ring
->buf
.length
*ring
->buf
.bpd
) {
115 temp_ptr
= ring
->data
;
117 /* We are moving pointer on one because the ring is full. Any
118 * change to the read pointer will be this or greater.
120 if (change_test_ptr
== ring
->read_p
)
121 ring
->read_p
= temp_ptr
;
123 spin_lock(&ring
->buf
.shared_ev_pointer
.lock
);
125 ret
= iio_push_or_escallate_ring_event(&ring
->buf
,
126 IIO_EVENT_CODE_RING_100_FULL
,
128 spin_unlock(&ring
->buf
.shared_ev_pointer
.lock
);
132 /* investigate if our event barrier has been passed */
133 /* There are definite 'issues' with this and chances of
134 * simultaneous read */
135 /* Also need to use loop count to ensure this only happens once */
136 ring
->half_p
+= ring
->buf
.bpd
;
137 if (ring
->half_p
== ring
->data
+ ring
->buf
.length
*ring
->buf
.bpd
)
138 ring
->half_p
= ring
->data
;
139 if (ring
->half_p
== ring
->read_p
) {
140 spin_lock(&ring
->buf
.shared_ev_pointer
.lock
);
141 code
= IIO_EVENT_CODE_RING_50_FULL
;
142 ret
= __iio_push_event(&ring
->buf
.ev_int
,
145 &ring
->buf
.shared_ev_pointer
);
146 spin_unlock(&ring
->buf
.shared_ev_pointer
.lock
);
152 int iio_rip_sw_rb(struct iio_ring_buffer
*r
,
153 size_t count
, u8
**data
, int *dead_offset
)
155 struct iio_sw_ring_buffer
*ring
= iio_to_sw_ring(r
);
157 u8
*initial_read_p
, *initial_write_p
, *current_read_p
, *end_read_p
;
161 /* A userspace program has probably made an error if it tries to
162 * read something that is not a whole number of bpds.
165 if (count
% ring
->buf
.bpd
) {
167 printk(KERN_INFO
"Ring buffer read request not whole number of"
168 "samples: Request bytes %zd, Current bpd %d\n",
169 count
, ring
->buf
.bpd
);
172 /* Limit size to whole of ring buffer */
173 bytes_to_rip
= min((size_t)(ring
->buf
.bpd
*ring
->buf
.length
), count
);
175 *data
= kmalloc(bytes_to_rip
, GFP_KERNEL
);
181 /* build local copy */
182 initial_read_p
= ring
->read_p
;
183 if (unlikely(initial_read_p
== 0)) { /* No data here as yet */
185 goto error_free_data_cpy
;
188 initial_write_p
= ring
->write_p
;
190 /* Need a consistent pair */
191 while ((initial_read_p
!= ring
->read_p
)
192 || (initial_write_p
!= ring
->write_p
)) {
193 initial_read_p
= ring
->read_p
;
194 initial_write_p
= ring
->write_p
;
196 if (initial_write_p
== initial_read_p
) {
197 /* No new data available.*/
199 goto error_free_data_cpy
;
202 if (initial_write_p
>= initial_read_p
+ bytes_to_rip
) {
203 /* write_p is greater than necessary, all is easy */
204 max_copied
= bytes_to_rip
;
205 memcpy(*data
, initial_read_p
, max_copied
);
206 end_read_p
= initial_read_p
+ max_copied
;
207 } else if (initial_write_p
> initial_read_p
) {
208 /*not enough data to cpy */
209 max_copied
= initial_write_p
- initial_read_p
;
210 memcpy(*data
, initial_read_p
, max_copied
);
211 end_read_p
= initial_write_p
;
213 /* going through 'end' of ring buffer */
214 max_copied
= ring
->data
215 + ring
->buf
.length
*ring
->buf
.bpd
- initial_read_p
;
216 memcpy(*data
, initial_read_p
, max_copied
);
217 /* possible we are done if we align precisely with end */
218 if (max_copied
== bytes_to_rip
)
219 end_read_p
= ring
->data
;
220 else if (initial_write_p
221 > ring
->data
+ bytes_to_rip
- max_copied
) {
222 /* enough data to finish */
223 memcpy(*data
+ max_copied
, ring
->data
,
224 bytes_to_rip
- max_copied
);
225 max_copied
= bytes_to_rip
;
226 end_read_p
= ring
->data
+ (bytes_to_rip
- max_copied
);
227 } else { /* not enough data */
228 memcpy(*data
+ max_copied
, ring
->data
,
229 initial_write_p
- ring
->data
);
230 max_copied
+= initial_write_p
- ring
->data
;
231 end_read_p
= initial_write_p
;
234 /* Now to verify which section was cleanly copied - i.e. how far
235 * read pointer has been pushed */
236 current_read_p
= ring
->read_p
;
238 if (initial_read_p
<= current_read_p
)
239 *dead_offset
= current_read_p
- initial_read_p
;
241 *dead_offset
= ring
->buf
.length
*ring
->buf
.bpd
242 - (initial_read_p
- current_read_p
);
244 /* possible issue if the initial write has been lapped or indeed
245 * the point we were reading to has been passed */
246 /* No valid data read.
247 * In this case the read pointer is already correct having been
248 * pushed further than we would look. */
249 if (max_copied
- *dead_offset
< 0) {
251 goto error_free_data_cpy
;
254 /* setup the next read position */
255 /* Beware, this may fail due to concurrency fun and games.
256 * Possible that sufficient fill commands have run to push the read
257 * pointer past where we would be after the rip. If this occurs, leave
260 /* Tricky - deal with loops */
262 while (ring
->read_p
!= end_read_p
)
263 ring
->read_p
= end_read_p
;
265 return max_copied
- *dead_offset
;
272 EXPORT_SYMBOL(iio_rip_sw_rb
);
274 int iio_store_to_sw_rb(struct iio_ring_buffer
*r
, u8
*data
, s64 timestamp
)
276 struct iio_sw_ring_buffer
*ring
= iio_to_sw_ring(r
);
277 return iio_store_to_sw_ring(ring
, data
, timestamp
);
279 EXPORT_SYMBOL(iio_store_to_sw_rb
);
281 int iio_read_last_from_sw_ring(struct iio_sw_ring_buffer
*ring
,
284 unsigned char *last_written_p_copy
;
286 iio_mark_sw_rb_in_use(&ring
->buf
);
289 last_written_p_copy
= ring
->last_written_p
;
290 barrier(); /*unnessecary? */
291 /* Check there is anything here */
292 if (last_written_p_copy
== 0)
294 memcpy(data
, last_written_p_copy
, ring
->buf
.bpd
);
296 if (unlikely(ring
->last_written_p
>= last_written_p_copy
))
299 iio_unmark_sw_rb_in_use(&ring
->buf
);
303 int iio_read_last_from_sw_rb(struct iio_ring_buffer
*r
,
306 return iio_read_last_from_sw_ring(iio_to_sw_ring(r
), data
);
308 EXPORT_SYMBOL(iio_read_last_from_sw_rb
);
310 int iio_request_update_sw_rb(struct iio_ring_buffer
*r
)
313 struct iio_sw_ring_buffer
*ring
= iio_to_sw_ring(r
);
315 spin_lock(&ring
->use_lock
);
316 if (!ring
->update_needed
)
318 if (ring
->use_count
) {
322 __iio_free_sw_ring_buffer(ring
);
323 ret
= __iio_init_sw_ring_buffer(ring
, ring
->buf
.bpd
, ring
->buf
.length
);
325 spin_unlock(&ring
->use_lock
);
328 EXPORT_SYMBOL(iio_request_update_sw_rb
);
330 int iio_get_bpd_sw_rb(struct iio_ring_buffer
*r
)
332 struct iio_sw_ring_buffer
*ring
= iio_to_sw_ring(r
);
333 return ring
->buf
.bpd
;
335 EXPORT_SYMBOL(iio_get_bpd_sw_rb
);
337 int iio_set_bpd_sw_rb(struct iio_ring_buffer
*r
, size_t bpd
)
341 if (r
->access
.mark_param_change
)
342 r
->access
.mark_param_change(r
);
346 EXPORT_SYMBOL(iio_set_bpd_sw_rb
);
348 int iio_get_length_sw_rb(struct iio_ring_buffer
*r
)
352 EXPORT_SYMBOL(iio_get_length_sw_rb
);
354 int iio_set_length_sw_rb(struct iio_ring_buffer
*r
, int length
)
356 if (r
->length
!= length
) {
358 if (r
->access
.mark_param_change
)
359 r
->access
.mark_param_change(r
);
363 EXPORT_SYMBOL(iio_set_length_sw_rb
);
365 int iio_mark_update_needed_sw_rb(struct iio_ring_buffer
*r
)
367 struct iio_sw_ring_buffer
*ring
= iio_to_sw_ring(r
);
368 ring
->update_needed
= true;
371 EXPORT_SYMBOL(iio_mark_update_needed_sw_rb
);
373 static void iio_sw_rb_release(struct device
*dev
)
375 struct iio_ring_buffer
*r
= to_iio_ring_buffer(dev
);
376 kfree(iio_to_sw_ring(r
));
379 static IIO_RING_ENABLE_ATTR
;
380 static IIO_RING_BPS_ATTR
;
381 static IIO_RING_LENGTH_ATTR
;
383 /* Standard set of ring buffer attributes */
384 static struct attribute
*iio_ring_attributes
[] = {
385 &dev_attr_length
.attr
,
387 &dev_attr_ring_enable
.attr
,
391 static struct attribute_group iio_ring_attribute_group
= {
392 .attrs
= iio_ring_attributes
,
395 static const struct attribute_group
*iio_ring_attribute_groups
[] = {
396 &iio_ring_attribute_group
,
400 static struct device_type iio_sw_ring_type
= {
401 .release
= iio_sw_rb_release
,
402 .groups
= iio_ring_attribute_groups
,
405 struct iio_ring_buffer
*iio_sw_rb_allocate(struct iio_dev
*indio_dev
)
407 struct iio_ring_buffer
*buf
;
408 struct iio_sw_ring_buffer
*ring
;
410 ring
= kzalloc(sizeof *ring
, GFP_KERNEL
);
415 iio_ring_buffer_init(buf
, indio_dev
);
416 buf
->dev
.type
= &iio_sw_ring_type
;
417 device_initialize(&buf
->dev
);
418 buf
->dev
.parent
= &indio_dev
->dev
;
419 buf
->dev
.class = &iio_class
;
420 dev_set_drvdata(&buf
->dev
, (void *)buf
);
424 EXPORT_SYMBOL(iio_sw_rb_allocate
);
426 void iio_sw_rb_free(struct iio_ring_buffer
*r
)
429 iio_put_ring_buffer(r
);
431 EXPORT_SYMBOL(iio_sw_rb_free
);
432 MODULE_DESCRIPTION("Industrialio I/O software ring buffer");
433 MODULE_LICENSE("GPL");