staging:iio:buffering remove unused parameter dead_offset from read_last_n in all...
[zen-stable.git] / drivers / staging / iio / ring_sw.c
blob40beadd604da7915dbcd252467a55b6e6ed98090
1 /* The industrial I/O simple minimally locked ring buffer.
3 * Copyright (c) 2008 Jonathan Cameron
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 */
10 #include <linux/slab.h>
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/device.h>
14 #include <linux/workqueue.h>
15 #include <linux/sched.h>
16 #include <linux/poll.h>
17 #include "ring_sw.h"
18 #include "trigger.h"
20 static inline int __iio_allocate_sw_ring_buffer(struct iio_sw_ring_buffer *ring,
21 int bytes_per_datum, int length)
23 if ((length == 0) || (bytes_per_datum == 0))
24 return -EINVAL;
25 __iio_update_ring_buffer(&ring->buf, bytes_per_datum, length);
26 ring->data = kmalloc(length*ring->buf.bytes_per_datum, GFP_ATOMIC);
27 ring->read_p = NULL;
28 ring->write_p = NULL;
29 ring->last_written_p = NULL;
30 ring->half_p = NULL;
31 return ring->data ? 0 : -ENOMEM;
34 static inline void __iio_init_sw_ring_buffer(struct iio_sw_ring_buffer *ring)
36 spin_lock_init(&ring->use_lock);
39 static inline void __iio_free_sw_ring_buffer(struct iio_sw_ring_buffer *ring)
41 kfree(ring->data);
44 void iio_mark_sw_rb_in_use(struct iio_ring_buffer *r)
46 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
47 spin_lock(&ring->use_lock);
48 ring->use_count++;
49 spin_unlock(&ring->use_lock);
51 EXPORT_SYMBOL(iio_mark_sw_rb_in_use);
53 void iio_unmark_sw_rb_in_use(struct iio_ring_buffer *r)
55 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
56 spin_lock(&ring->use_lock);
57 ring->use_count--;
58 spin_unlock(&ring->use_lock);
60 EXPORT_SYMBOL(iio_unmark_sw_rb_in_use);
63 /* Ring buffer related functionality */
64 /* Store to ring is typically called in the bh of a data ready interrupt handler
65 * in the device driver */
66 /* Lock always held if their is a chance this may be called */
67 /* Only one of these per ring may run concurrently - enforced by drivers */
68 static int iio_store_to_sw_ring(struct iio_sw_ring_buffer *ring,
69 unsigned char *data, s64 timestamp)
71 int ret = 0;
72 unsigned char *temp_ptr, *change_test_ptr;
74 /* initial store */
75 if (unlikely(ring->write_p == NULL)) {
76 ring->write_p = ring->data;
77 /* Doesn't actually matter if this is out of the set
78 * as long as the read pointer is valid before this
79 * passes it - guaranteed as set later in this function.
81 ring->half_p = ring->data - ring->buf.length*ring->buf.bytes_per_datum/2;
83 /* Copy data to where ever the current write pointer says */
84 memcpy(ring->write_p, data, ring->buf.bytes_per_datum);
85 barrier();
86 /* Update the pointer used to get most recent value.
87 * Always valid as either points to latest or second latest value.
88 * Before this runs it is null and read attempts fail with -EAGAIN.
90 ring->last_written_p = ring->write_p;
91 barrier();
92 /* temp_ptr used to ensure we never have an invalid pointer
93 * it may be slightly lagging, but never invalid
95 temp_ptr = ring->write_p + ring->buf.bytes_per_datum;
96 /* End of ring, back to the beginning */
97 if (temp_ptr == ring->data + ring->buf.length*ring->buf.bytes_per_datum)
98 temp_ptr = ring->data;
99 /* Update the write pointer
100 * always valid as long as this is the only function able to write.
101 * Care needed with smp systems to ensure more than one ring fill
102 * is never scheduled.
104 ring->write_p = temp_ptr;
106 if (ring->read_p == NULL)
107 ring->read_p = ring->data;
108 /* Buffer full - move the read pointer and create / escalate
109 * ring event */
110 /* Tricky case - if the read pointer moves before we adjust it.
111 * Handle by not pushing if it has moved - may result in occasional
112 * unnecessary buffer full events when it wasn't quite true.
114 else if (ring->write_p == ring->read_p) {
115 change_test_ptr = ring->read_p;
116 temp_ptr = change_test_ptr + ring->buf.bytes_per_datum;
117 if (temp_ptr
118 == ring->data + ring->buf.length*ring->buf.bytes_per_datum) {
119 temp_ptr = ring->data;
121 /* We are moving pointer on one because the ring is full. Any
122 * change to the read pointer will be this or greater.
124 if (change_test_ptr == ring->read_p)
125 ring->read_p = temp_ptr;
127 /* investigate if our event barrier has been passed */
128 /* There are definite 'issues' with this and chances of
129 * simultaneous read */
130 /* Also need to use loop count to ensure this only happens once */
131 ring->half_p += ring->buf.bytes_per_datum;
132 if (ring->half_p == ring->data + ring->buf.length*ring->buf.bytes_per_datum)
133 ring->half_p = ring->data;
134 if (ring->half_p == ring->read_p) {
135 ring->buf.stufftoread = true;
136 wake_up_interruptible(&ring->buf.pollq);
138 return ret;
141 int iio_read_first_n_sw_rb(struct iio_ring_buffer *r,
142 size_t n, char __user *buf)
144 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
146 u8 *initial_read_p, *initial_write_p, *current_read_p, *end_read_p;
147 u8 *data;
148 int ret, max_copied, bytes_to_rip, dead_offset;
150 /* A userspace program has probably made an error if it tries to
151 * read something that is not a whole number of bpds.
152 * Return an error.
154 if (n % ring->buf.bytes_per_datum) {
155 ret = -EINVAL;
156 printk(KERN_INFO "Ring buffer read request not whole number of"
157 "samples: Request bytes %zd, Current bytes per datum %d\n",
158 n, ring->buf.bytes_per_datum);
159 goto error_ret;
161 /* Limit size to whole of ring buffer */
162 bytes_to_rip = min((size_t)(ring->buf.bytes_per_datum*ring->buf.length),
165 data = kmalloc(bytes_to_rip, GFP_KERNEL);
166 if (data == NULL) {
167 ret = -ENOMEM;
168 goto error_ret;
171 /* build local copy */
172 initial_read_p = ring->read_p;
173 if (unlikely(initial_read_p == NULL)) { /* No data here as yet */
174 ret = 0;
175 goto error_free_data_cpy;
178 initial_write_p = ring->write_p;
180 /* Need a consistent pair */
181 while ((initial_read_p != ring->read_p)
182 || (initial_write_p != ring->write_p)) {
183 initial_read_p = ring->read_p;
184 initial_write_p = ring->write_p;
186 if (initial_write_p == initial_read_p) {
187 /* No new data available.*/
188 ret = 0;
189 goto error_free_data_cpy;
192 if (initial_write_p >= initial_read_p + bytes_to_rip) {
193 /* write_p is greater than necessary, all is easy */
194 max_copied = bytes_to_rip;
195 memcpy(data, initial_read_p, max_copied);
196 end_read_p = initial_read_p + max_copied;
197 } else if (initial_write_p > initial_read_p) {
198 /*not enough data to cpy */
199 max_copied = initial_write_p - initial_read_p;
200 memcpy(data, initial_read_p, max_copied);
201 end_read_p = initial_write_p;
202 } else {
203 /* going through 'end' of ring buffer */
204 max_copied = ring->data
205 + ring->buf.length*ring->buf.bytes_per_datum - initial_read_p;
206 memcpy(data, initial_read_p, max_copied);
207 /* possible we are done if we align precisely with end */
208 if (max_copied == bytes_to_rip)
209 end_read_p = ring->data;
210 else if (initial_write_p
211 > ring->data + bytes_to_rip - max_copied) {
212 /* enough data to finish */
213 memcpy(data + max_copied, ring->data,
214 bytes_to_rip - max_copied);
215 max_copied = bytes_to_rip;
216 end_read_p = ring->data + (bytes_to_rip - max_copied);
217 } else { /* not enough data */
218 memcpy(data + max_copied, ring->data,
219 initial_write_p - ring->data);
220 max_copied += initial_write_p - ring->data;
221 end_read_p = initial_write_p;
224 /* Now to verify which section was cleanly copied - i.e. how far
225 * read pointer has been pushed */
226 current_read_p = ring->read_p;
228 if (initial_read_p <= current_read_p)
229 dead_offset = current_read_p - initial_read_p;
230 else
231 dead_offset = ring->buf.length*ring->buf.bytes_per_datum
232 - (initial_read_p - current_read_p);
234 /* possible issue if the initial write has been lapped or indeed
235 * the point we were reading to has been passed */
236 /* No valid data read.
237 * In this case the read pointer is already correct having been
238 * pushed further than we would look. */
239 if (max_copied - dead_offset < 0) {
240 ret = 0;
241 goto error_free_data_cpy;
244 /* setup the next read position */
245 /* Beware, this may fail due to concurrency fun and games.
246 * Possible that sufficient fill commands have run to push the read
247 * pointer past where we would be after the rip. If this occurs, leave
248 * it be.
250 /* Tricky - deal with loops */
252 while (ring->read_p != end_read_p)
253 ring->read_p = end_read_p;
255 ret = max_copied - dead_offset;
257 if (copy_to_user(buf, data + dead_offset, ret)) {
258 ret = -EFAULT;
259 goto error_free_data_cpy;
262 if (bytes_to_rip >= ring->buf.length*ring->buf.bytes_per_datum/2)
263 ring->buf.stufftoread = 0;
265 error_free_data_cpy:
266 kfree(data);
267 error_ret:
269 return ret;
271 EXPORT_SYMBOL(iio_read_first_n_sw_rb);
273 int iio_store_to_sw_rb(struct iio_ring_buffer *r, u8 *data, s64 timestamp)
275 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
276 return iio_store_to_sw_ring(ring, data, timestamp);
278 EXPORT_SYMBOL(iio_store_to_sw_rb);
280 static int iio_read_last_from_sw_ring(struct iio_sw_ring_buffer *ring,
281 unsigned char *data)
283 unsigned char *last_written_p_copy;
285 iio_mark_sw_rb_in_use(&ring->buf);
286 again:
287 barrier();
288 last_written_p_copy = ring->last_written_p;
289 barrier(); /*unnessecary? */
290 /* Check there is anything here */
291 if (last_written_p_copy == NULL)
292 return -EAGAIN;
293 memcpy(data, last_written_p_copy, ring->buf.bytes_per_datum);
295 if (unlikely(ring->last_written_p != last_written_p_copy))
296 goto again;
298 iio_unmark_sw_rb_in_use(&ring->buf);
299 return 0;
302 int iio_read_last_from_sw_rb(struct iio_ring_buffer *r,
303 unsigned char *data)
305 return iio_read_last_from_sw_ring(iio_to_sw_ring(r), data);
307 EXPORT_SYMBOL(iio_read_last_from_sw_rb);
309 int iio_request_update_sw_rb(struct iio_ring_buffer *r)
311 int ret = 0;
312 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
314 r->stufftoread = false;
315 spin_lock(&ring->use_lock);
316 if (!ring->update_needed)
317 goto error_ret;
318 if (ring->use_count) {
319 ret = -EAGAIN;
320 goto error_ret;
322 __iio_free_sw_ring_buffer(ring);
323 ret = __iio_allocate_sw_ring_buffer(ring, ring->buf.bytes_per_datum,
324 ring->buf.length);
325 error_ret:
326 spin_unlock(&ring->use_lock);
327 return ret;
329 EXPORT_SYMBOL(iio_request_update_sw_rb);
331 int iio_get_bytes_per_datum_sw_rb(struct iio_ring_buffer *r)
333 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
334 return ring->buf.bytes_per_datum;
336 EXPORT_SYMBOL(iio_get_bytes_per_datum_sw_rb);
338 int iio_set_bytes_per_datum_sw_rb(struct iio_ring_buffer *r, size_t bpd)
340 if (r->bytes_per_datum != bpd) {
341 r->bytes_per_datum = bpd;
342 if (r->access.mark_param_change)
343 r->access.mark_param_change(r);
345 return 0;
347 EXPORT_SYMBOL(iio_set_bytes_per_datum_sw_rb);
349 int iio_get_length_sw_rb(struct iio_ring_buffer *r)
351 return r->length;
353 EXPORT_SYMBOL(iio_get_length_sw_rb);
355 int iio_set_length_sw_rb(struct iio_ring_buffer *r, int length)
357 if (r->length != length) {
358 r->length = length;
359 if (r->access.mark_param_change)
360 r->access.mark_param_change(r);
362 return 0;
364 EXPORT_SYMBOL(iio_set_length_sw_rb);
366 int iio_mark_update_needed_sw_rb(struct iio_ring_buffer *r)
368 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
369 ring->update_needed = true;
370 return 0;
372 EXPORT_SYMBOL(iio_mark_update_needed_sw_rb);
374 static void iio_sw_rb_release(struct device *dev)
376 struct iio_ring_buffer *r = to_iio_ring_buffer(dev);
377 iio_ring_access_release(&r->dev);
378 kfree(iio_to_sw_ring(r));
381 static IIO_RING_ENABLE_ATTR;
382 static IIO_RING_BYTES_PER_DATUM_ATTR;
383 static IIO_RING_LENGTH_ATTR;
385 /* Standard set of ring buffer attributes */
386 static struct attribute *iio_ring_attributes[] = {
387 &dev_attr_length.attr,
388 &dev_attr_bytes_per_datum.attr,
389 &dev_attr_enable.attr,
390 NULL,
393 static struct attribute_group iio_ring_attribute_group = {
394 .attrs = iio_ring_attributes,
397 static const struct attribute_group *iio_ring_attribute_groups[] = {
398 &iio_ring_attribute_group,
399 NULL
402 static struct device_type iio_sw_ring_type = {
403 .release = iio_sw_rb_release,
404 .groups = iio_ring_attribute_groups,
407 struct iio_ring_buffer *iio_sw_rb_allocate(struct iio_dev *indio_dev)
409 struct iio_ring_buffer *buf;
410 struct iio_sw_ring_buffer *ring;
412 ring = kzalloc(sizeof *ring, GFP_KERNEL);
413 if (!ring)
414 return NULL;
415 buf = &ring->buf;
416 iio_ring_buffer_init(buf, indio_dev);
417 __iio_init_sw_ring_buffer(ring);
418 buf->dev.type = &iio_sw_ring_type;
419 buf->dev.parent = &indio_dev->dev;
420 dev_set_drvdata(&buf->dev, (void *)buf);
422 return buf;
424 EXPORT_SYMBOL(iio_sw_rb_allocate);
426 void iio_sw_rb_free(struct iio_ring_buffer *r)
428 if (r)
429 iio_put_ring_buffer(r);
431 EXPORT_SYMBOL(iio_sw_rb_free);
433 int iio_sw_ring_preenable(struct iio_dev *indio_dev)
435 struct iio_ring_buffer *ring = indio_dev->ring;
436 size_t size;
437 dev_dbg(&indio_dev->dev, "%s\n", __func__);
438 /* Check if there are any scan elements enabled, if not fail*/
439 if (!(ring->scan_count || ring->scan_timestamp))
440 return -EINVAL;
441 if (ring->scan_timestamp)
442 if (ring->scan_count)
443 /* Timestamp (aligned to s64) and data */
444 size = (((ring->scan_count * ring->bpe)
445 + sizeof(s64) - 1)
446 & ~(sizeof(s64) - 1))
447 + sizeof(s64);
448 else /* Timestamp only */
449 size = sizeof(s64);
450 else /* Data only */
451 size = ring->scan_count * ring->bpe;
452 ring->access.set_bytes_per_datum(ring, size);
454 return 0;
456 EXPORT_SYMBOL(iio_sw_ring_preenable);
458 void iio_sw_trigger_bh_to_ring(struct work_struct *work_s)
460 struct iio_sw_ring_helper_state *st
461 = container_of(work_s, struct iio_sw_ring_helper_state,
462 work_trigger_to_ring);
463 struct iio_ring_buffer *ring = st->indio_dev->ring;
464 int len = 0;
465 size_t datasize = ring->access.get_bytes_per_datum(ring);
466 char *data = kmalloc(datasize, GFP_KERNEL);
468 if (data == NULL) {
469 dev_err(st->indio_dev->dev.parent,
470 "memory alloc failed in ring bh");
471 return;
474 if (ring->scan_count)
475 len = st->get_ring_element(st, data);
477 /* Guaranteed to be aligned with 8 byte boundary */
478 if (ring->scan_timestamp)
479 *(s64 *)(((phys_addr_t)data + len
480 + sizeof(s64) - 1) & ~(sizeof(s64) - 1))
481 = st->last_timestamp;
482 ring->access.store_to(ring,
483 (u8 *)data,
484 st->last_timestamp);
486 iio_trigger_notify_done(st->indio_dev->trig);
487 kfree(data);
489 return;
491 EXPORT_SYMBOL(iio_sw_trigger_bh_to_ring);
493 void iio_sw_poll_func_th(struct iio_dev *indio_dev, s64 time)
494 { struct iio_sw_ring_helper_state *h
495 = iio_dev_get_devdata(indio_dev);
496 h->last_timestamp = time;
497 schedule_work(&h->work_trigger_to_ring);
499 EXPORT_SYMBOL(iio_sw_poll_func_th);
501 MODULE_DESCRIPTION("Industrialio I/O software ring buffer");
502 MODULE_LICENSE("GPL");