spi-topcliff-pch: add recovery processing in case wait-event timeout
[zen-stable.git] / drivers / staging / iio / ring_sw.c
blob3e24ec4558547fd2dd79530f57b149f46fa39322
1 /* The industrial I/O simple minimally locked ring buffer.
3 * Copyright (c) 2008 Jonathan Cameron
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 */
10 #include <linux/slab.h>
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/device.h>
14 #include <linux/workqueue.h>
15 #include <linux/sched.h>
16 #include <linux/poll.h>
17 #include "ring_sw.h"
18 #include "trigger.h"
20 /**
21 * struct iio_sw_ring_buffer - software ring buffer
22 * @buf: generic ring buffer elements
23 * @data: the ring buffer memory
24 * @read_p: read pointer (oldest available)
25 * @write_p: write pointer
26 * @half_p: half buffer length behind write_p (event generation)
27 * @update_needed: flag to indicated change in size requested
29 * Note that the first element of all ring buffers must be a
30 * struct iio_buffer.
31 **/
32 struct iio_sw_ring_buffer {
33 struct iio_buffer buf;
34 unsigned char *data;
35 unsigned char *read_p;
36 unsigned char *write_p;
37 /* used to act as a point at which to signal an event */
38 unsigned char *half_p;
39 int update_needed;
42 #define iio_to_sw_ring(r) container_of(r, struct iio_sw_ring_buffer, buf)
44 static inline int __iio_allocate_sw_ring_buffer(struct iio_sw_ring_buffer *ring,
45 int bytes_per_datum, int length)
47 if ((length == 0) || (bytes_per_datum == 0))
48 return -EINVAL;
49 __iio_update_buffer(&ring->buf, bytes_per_datum, length);
50 ring->data = kmalloc(length*ring->buf.bytes_per_datum, GFP_ATOMIC);
51 ring->read_p = NULL;
52 ring->write_p = NULL;
53 ring->half_p = NULL;
54 return ring->data ? 0 : -ENOMEM;
57 static inline void __iio_free_sw_ring_buffer(struct iio_sw_ring_buffer *ring)
59 kfree(ring->data);
62 /* Ring buffer related functionality */
63 /* Store to ring is typically called in the bh of a data ready interrupt handler
64 * in the device driver */
65 /* Lock always held if their is a chance this may be called */
66 /* Only one of these per ring may run concurrently - enforced by drivers */
67 static int iio_store_to_sw_ring(struct iio_sw_ring_buffer *ring,
68 unsigned char *data, s64 timestamp)
70 int ret = 0;
71 unsigned char *temp_ptr, *change_test_ptr;
73 /* initial store */
74 if (unlikely(ring->write_p == NULL)) {
75 ring->write_p = ring->data;
76 /* Doesn't actually matter if this is out of the set
77 * as long as the read pointer is valid before this
78 * passes it - guaranteed as set later in this function.
80 ring->half_p = ring->data - ring->buf.length*ring->buf.bytes_per_datum/2;
82 /* Copy data to where ever the current write pointer says */
83 memcpy(ring->write_p, data, ring->buf.bytes_per_datum);
84 barrier();
85 /* Update the pointer used to get most recent value.
86 * Always valid as either points to latest or second latest value.
87 * Before this runs it is null and read attempts fail with -EAGAIN.
89 barrier();
90 /* temp_ptr used to ensure we never have an invalid pointer
91 * it may be slightly lagging, but never invalid
93 temp_ptr = ring->write_p + ring->buf.bytes_per_datum;
94 /* End of ring, back to the beginning */
95 if (temp_ptr == ring->data + ring->buf.length*ring->buf.bytes_per_datum)
96 temp_ptr = ring->data;
97 /* Update the write pointer
98 * always valid as long as this is the only function able to write.
99 * Care needed with smp systems to ensure more than one ring fill
100 * is never scheduled.
102 ring->write_p = temp_ptr;
104 if (ring->read_p == NULL)
105 ring->read_p = ring->data;
106 /* Buffer full - move the read pointer and create / escalate
107 * ring event */
108 /* Tricky case - if the read pointer moves before we adjust it.
109 * Handle by not pushing if it has moved - may result in occasional
110 * unnecessary buffer full events when it wasn't quite true.
112 else if (ring->write_p == ring->read_p) {
113 change_test_ptr = ring->read_p;
114 temp_ptr = change_test_ptr + ring->buf.bytes_per_datum;
115 if (temp_ptr
116 == ring->data + ring->buf.length*ring->buf.bytes_per_datum) {
117 temp_ptr = ring->data;
119 /* We are moving pointer on one because the ring is full. Any
120 * change to the read pointer will be this or greater.
122 if (change_test_ptr == ring->read_p)
123 ring->read_p = temp_ptr;
125 /* investigate if our event barrier has been passed */
126 /* There are definite 'issues' with this and chances of
127 * simultaneous read */
128 /* Also need to use loop count to ensure this only happens once */
129 ring->half_p += ring->buf.bytes_per_datum;
130 if (ring->half_p == ring->data + ring->buf.length*ring->buf.bytes_per_datum)
131 ring->half_p = ring->data;
132 if (ring->half_p == ring->read_p) {
133 ring->buf.stufftoread = true;
134 wake_up_interruptible(&ring->buf.pollq);
136 return ret;
139 static int iio_read_first_n_sw_rb(struct iio_buffer *r,
140 size_t n, char __user *buf)
142 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
144 u8 *initial_read_p, *initial_write_p, *current_read_p, *end_read_p;
145 u8 *data;
146 int ret, max_copied, bytes_to_rip, dead_offset;
147 size_t data_available, buffer_size;
149 /* A userspace program has probably made an error if it tries to
150 * read something that is not a whole number of bpds.
151 * Return an error.
153 if (n % ring->buf.bytes_per_datum) {
154 ret = -EINVAL;
155 printk(KERN_INFO "Ring buffer read request not whole number of"
156 "samples: Request bytes %zd, Current bytes per datum %d\n",
157 n, ring->buf.bytes_per_datum);
158 goto error_ret;
161 buffer_size = ring->buf.bytes_per_datum*ring->buf.length;
163 /* Limit size to whole of ring buffer */
164 bytes_to_rip = min_t(size_t, buffer_size, n);
166 data = kmalloc(bytes_to_rip, GFP_KERNEL);
167 if (data == NULL) {
168 ret = -ENOMEM;
169 goto error_ret;
172 /* build local copy */
173 initial_read_p = ring->read_p;
174 if (unlikely(initial_read_p == NULL)) { /* No data here as yet */
175 ret = 0;
176 goto error_free_data_cpy;
179 initial_write_p = ring->write_p;
181 /* Need a consistent pair */
182 while ((initial_read_p != ring->read_p)
183 || (initial_write_p != ring->write_p)) {
184 initial_read_p = ring->read_p;
185 initial_write_p = ring->write_p;
187 if (initial_write_p == initial_read_p) {
188 /* No new data available.*/
189 ret = 0;
190 goto error_free_data_cpy;
193 if (initial_write_p >= initial_read_p)
194 data_available = initial_write_p - initial_read_p;
195 else
196 data_available = buffer_size - (initial_read_p - initial_write_p);
198 if (data_available < bytes_to_rip)
199 bytes_to_rip = data_available;
201 if (initial_read_p + bytes_to_rip >= ring->data + buffer_size) {
202 max_copied = ring->data + buffer_size - initial_read_p;
203 memcpy(data, initial_read_p, max_copied);
204 memcpy(data + max_copied, ring->data, bytes_to_rip - max_copied);
205 end_read_p = ring->data + bytes_to_rip - max_copied;
206 } else {
207 memcpy(data, initial_read_p, bytes_to_rip);
208 end_read_p = initial_read_p + bytes_to_rip;
211 /* Now to verify which section was cleanly copied - i.e. how far
212 * read pointer has been pushed */
213 current_read_p = ring->read_p;
215 if (initial_read_p <= current_read_p)
216 dead_offset = current_read_p - initial_read_p;
217 else
218 dead_offset = buffer_size - (initial_read_p - current_read_p);
220 /* possible issue if the initial write has been lapped or indeed
221 * the point we were reading to has been passed */
222 /* No valid data read.
223 * In this case the read pointer is already correct having been
224 * pushed further than we would look. */
225 if (bytes_to_rip - dead_offset < 0) {
226 ret = 0;
227 goto error_free_data_cpy;
230 /* setup the next read position */
231 /* Beware, this may fail due to concurrency fun and games.
232 * Possible that sufficient fill commands have run to push the read
233 * pointer past where we would be after the rip. If this occurs, leave
234 * it be.
236 /* Tricky - deal with loops */
238 while (ring->read_p != end_read_p)
239 ring->read_p = end_read_p;
241 ret = bytes_to_rip - dead_offset;
243 if (copy_to_user(buf, data + dead_offset, ret)) {
244 ret = -EFAULT;
245 goto error_free_data_cpy;
248 if (bytes_to_rip >= ring->buf.length*ring->buf.bytes_per_datum/2)
249 ring->buf.stufftoread = 0;
251 error_free_data_cpy:
252 kfree(data);
253 error_ret:
255 return ret;
258 static int iio_store_to_sw_rb(struct iio_buffer *r,
259 u8 *data,
260 s64 timestamp)
262 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
263 return iio_store_to_sw_ring(ring, data, timestamp);
266 static int iio_request_update_sw_rb(struct iio_buffer *r)
268 int ret = 0;
269 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
271 r->stufftoread = false;
272 if (!ring->update_needed)
273 goto error_ret;
274 __iio_free_sw_ring_buffer(ring);
275 ret = __iio_allocate_sw_ring_buffer(ring, ring->buf.bytes_per_datum,
276 ring->buf.length);
277 error_ret:
278 return ret;
281 static int iio_get_bytes_per_datum_sw_rb(struct iio_buffer *r)
283 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
284 return ring->buf.bytes_per_datum;
287 static int iio_mark_update_needed_sw_rb(struct iio_buffer *r)
289 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
290 ring->update_needed = true;
291 return 0;
294 static int iio_set_bytes_per_datum_sw_rb(struct iio_buffer *r, size_t bpd)
296 if (r->bytes_per_datum != bpd) {
297 r->bytes_per_datum = bpd;
298 iio_mark_update_needed_sw_rb(r);
300 return 0;
303 static int iio_get_length_sw_rb(struct iio_buffer *r)
305 return r->length;
308 static int iio_set_length_sw_rb(struct iio_buffer *r, int length)
310 if (r->length != length) {
311 r->length = length;
312 iio_mark_update_needed_sw_rb(r);
314 return 0;
317 static IIO_BUFFER_ENABLE_ATTR;
318 static IIO_BUFFER_LENGTH_ATTR;
320 /* Standard set of ring buffer attributes */
321 static struct attribute *iio_ring_attributes[] = {
322 &dev_attr_length.attr,
323 &dev_attr_enable.attr,
324 NULL,
327 static struct attribute_group iio_ring_attribute_group = {
328 .attrs = iio_ring_attributes,
329 .name = "buffer",
332 struct iio_buffer *iio_sw_rb_allocate(struct iio_dev *indio_dev)
334 struct iio_buffer *buf;
335 struct iio_sw_ring_buffer *ring;
337 ring = kzalloc(sizeof *ring, GFP_KERNEL);
338 if (!ring)
339 return NULL;
340 ring->update_needed = true;
341 buf = &ring->buf;
342 iio_buffer_init(buf);
343 buf->attrs = &iio_ring_attribute_group;
345 return buf;
347 EXPORT_SYMBOL(iio_sw_rb_allocate);
349 void iio_sw_rb_free(struct iio_buffer *r)
351 kfree(iio_to_sw_ring(r));
353 EXPORT_SYMBOL(iio_sw_rb_free);
355 const struct iio_buffer_access_funcs ring_sw_access_funcs = {
356 .store_to = &iio_store_to_sw_rb,
357 .read_first_n = &iio_read_first_n_sw_rb,
358 .request_update = &iio_request_update_sw_rb,
359 .get_bytes_per_datum = &iio_get_bytes_per_datum_sw_rb,
360 .set_bytes_per_datum = &iio_set_bytes_per_datum_sw_rb,
361 .get_length = &iio_get_length_sw_rb,
362 .set_length = &iio_set_length_sw_rb,
364 EXPORT_SYMBOL(ring_sw_access_funcs);
366 MODULE_DESCRIPTION("Industrialio I/O software ring buffer");
367 MODULE_LICENSE("GPL");