Merge branch 'for-linus' of git://oss.sgi.com/xfs/xfs
[linux/fpc-iii.git] / drivers / staging / iio / industrialio-ring.c
blobada159bbb1f703a5df0e9aad54f00834890fa636
1 /* The industrial I/O core
3 * Copyright (c) 2008 Jonathan Cameron
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * Handling of ring allocation / resizing.
12 * Things to look at here.
13 * - Better memory allocation techniques?
14 * - Alternative access techniques?
16 #include <linux/kernel.h>
17 #include <linux/device.h>
18 #include <linux/interrupt.h>
19 #include <linux/fs.h>
20 #include <linux/poll.h>
21 #include <linux/module.h>
22 #include <linux/cdev.h>
23 #include <linux/slab.h>
25 #include "iio.h"
26 #include "ring_generic.h"
28 int iio_push_ring_event(struct iio_ring_buffer *ring_buf,
29 int event_code,
30 s64 timestamp)
32 return __iio_push_event(&ring_buf->ev_int,
33 event_code,
34 timestamp,
35 &ring_buf->shared_ev_pointer);
37 EXPORT_SYMBOL(iio_push_ring_event);
39 int iio_push_or_escallate_ring_event(struct iio_ring_buffer *ring_buf,
40 int event_code,
41 s64 timestamp)
43 if (ring_buf->shared_ev_pointer.ev_p)
44 __iio_change_event(ring_buf->shared_ev_pointer.ev_p,
45 event_code,
46 timestamp);
47 else
48 return iio_push_ring_event(ring_buf,
49 event_code,
50 timestamp);
51 return 0;
53 EXPORT_SYMBOL(iio_push_or_escallate_ring_event);
55 /**
56 * iio_ring_open() chrdev file open for ring buffer access
58 * This function relies on all ring buffer implementations having an
59 * iio_ring_buffer as their first element.
60 **/
61 static int iio_ring_open(struct inode *inode, struct file *filp)
63 struct iio_handler *hand
64 = container_of(inode->i_cdev, struct iio_handler, chrdev);
65 struct iio_ring_buffer *rb = hand->private;
67 filp->private_data = hand->private;
68 if (rb->access.mark_in_use)
69 rb->access.mark_in_use(rb);
71 return 0;
74 /**
75 * iio_ring_release() -chrdev file close ring buffer access
77 * This function relies on all ring buffer implementations having an
78 * iio_ring_buffer as their first element.
79 **/
80 static int iio_ring_release(struct inode *inode, struct file *filp)
82 struct cdev *cd = inode->i_cdev;
83 struct iio_handler *hand = iio_cdev_to_handler(cd);
84 struct iio_ring_buffer *rb = hand->private;
86 clear_bit(IIO_BUSY_BIT_POS, &rb->access_handler.flags);
87 if (rb->access.unmark_in_use)
88 rb->access.unmark_in_use(rb);
90 return 0;
93 /**
94 * iio_ring_rip_outer() chrdev read for ring buffer access
96 * This function relies on all ring buffer implementations having an
97 * iio_ring _bufer as their first element.
98 **/
99 static ssize_t iio_ring_rip_outer(struct file *filp, char __user *buf,
100 size_t count, loff_t *f_ps)
102 struct iio_ring_buffer *rb = filp->private_data;
103 int ret, dead_offset, copied;
104 u8 *data;
105 /* rip lots must exist. */
106 if (!rb->access.rip_lots)
107 return -EINVAL;
108 copied = rb->access.rip_lots(rb, count, &data, &dead_offset);
110 if (copied < 0) {
111 ret = copied;
112 goto error_ret;
114 if (copy_to_user(buf, data + dead_offset, copied)) {
115 ret = -EFAULT;
116 goto error_free_data_cpy;
118 /* In clever ring buffer designs this may not need to be freed.
119 * When such a design exists I'll add this to ring access funcs.
121 kfree(data);
123 return copied;
125 error_free_data_cpy:
126 kfree(data);
127 error_ret:
128 return ret;
131 static const struct file_operations iio_ring_fileops = {
132 .read = iio_ring_rip_outer,
133 .release = iio_ring_release,
134 .open = iio_ring_open,
135 .owner = THIS_MODULE,
139 * __iio_request_ring_buffer_event_chrdev() allocate ring event chrdev
140 * @buf: ring buffer whose event chrdev we are allocating
141 * @owner: the module who owns the ring buffer (for ref counting)
142 * @dev: device with which the chrdev is associated
144 static inline int
145 __iio_request_ring_buffer_event_chrdev(struct iio_ring_buffer *buf,
146 int id,
147 struct module *owner,
148 struct device *dev)
150 int ret;
152 buf->ev_int.id = id;
154 snprintf(buf->ev_int._name, sizeof(buf->ev_int._name),
155 "%s:event%d",
156 dev_name(&buf->dev),
157 buf->ev_int.id);
158 ret = iio_setup_ev_int(&(buf->ev_int),
159 buf->ev_int._name,
160 owner,
161 dev);
162 if (ret)
163 goto error_ret;
164 return 0;
166 error_ret:
167 return ret;
170 static inline void
171 __iio_free_ring_buffer_event_chrdev(struct iio_ring_buffer *buf)
173 iio_free_ev_int(&(buf->ev_int));
176 static void iio_ring_access_release(struct device *dev)
178 struct iio_ring_buffer *buf
179 = access_dev_to_iio_ring_buffer(dev);
180 cdev_del(&buf->access_handler.chrdev);
181 iio_device_free_chrdev_minor(MINOR(dev->devt));
184 static struct device_type iio_ring_access_type = {
185 .release = iio_ring_access_release,
188 static inline int
189 __iio_request_ring_buffer_access_chrdev(struct iio_ring_buffer *buf,
190 int id,
191 struct module *owner)
193 int ret, minor;
195 buf->access_handler.flags = 0;
197 buf->access_dev.parent = &buf->dev;
198 buf->access_dev.bus = &iio_bus_type;
199 buf->access_dev.type = &iio_ring_access_type;
200 device_initialize(&buf->access_dev);
202 minor = iio_device_get_chrdev_minor();
203 if (minor < 0) {
204 ret = minor;
205 goto error_device_put;
207 buf->access_dev.devt = MKDEV(MAJOR(iio_devt), minor);
210 buf->access_id = id;
212 dev_set_name(&buf->access_dev, "%s:access%d",
213 dev_name(&buf->dev),
214 buf->access_id);
215 ret = device_add(&buf->access_dev);
216 if (ret < 0) {
217 printk(KERN_ERR "failed to add the ring access dev\n");
218 goto error_device_put;
221 cdev_init(&buf->access_handler.chrdev, &iio_ring_fileops);
222 buf->access_handler.chrdev.owner = owner;
224 ret = cdev_add(&buf->access_handler.chrdev, buf->access_dev.devt, 1);
225 if (ret) {
226 printk(KERN_ERR "failed to allocate ring access chrdev\n");
227 goto error_device_unregister;
229 return 0;
231 error_device_unregister:
232 device_unregister(&buf->access_dev);
233 error_device_put:
234 put_device(&buf->access_dev);
236 return ret;
239 static void __iio_free_ring_buffer_access_chrdev(struct iio_ring_buffer *buf)
241 device_unregister(&buf->access_dev);
244 void iio_ring_buffer_init(struct iio_ring_buffer *ring,
245 struct iio_dev *dev_info)
247 if (ring->access.mark_param_change)
248 ring->access.mark_param_change(ring);
249 ring->indio_dev = dev_info;
250 ring->ev_int.private = ring;
251 ring->access_handler.private = ring;
252 ring->shared_ev_pointer.ev_p = NULL;
253 spin_lock_init(&ring->shared_ev_pointer.lock);
255 EXPORT_SYMBOL(iio_ring_buffer_init);
257 int iio_ring_buffer_register(struct iio_ring_buffer *ring, int id)
259 int ret;
261 ring->id = id;
263 dev_set_name(&ring->dev, "%s:buffer%d",
264 dev_name(ring->dev.parent),
265 ring->id);
266 ret = device_add(&ring->dev);
267 if (ret)
268 goto error_ret;
270 ret = __iio_request_ring_buffer_event_chrdev(ring,
272 ring->owner,
273 &ring->dev);
274 if (ret)
275 goto error_remove_device;
277 ret = __iio_request_ring_buffer_access_chrdev(ring,
279 ring->owner);
281 if (ret)
282 goto error_free_ring_buffer_event_chrdev;
284 return ret;
285 error_free_ring_buffer_event_chrdev:
286 __iio_free_ring_buffer_event_chrdev(ring);
287 error_remove_device:
288 device_del(&ring->dev);
289 error_ret:
290 return ret;
292 EXPORT_SYMBOL(iio_ring_buffer_register);
294 void iio_ring_buffer_unregister(struct iio_ring_buffer *ring)
296 __iio_free_ring_buffer_access_chrdev(ring);
297 __iio_free_ring_buffer_event_chrdev(ring);
298 device_del(&ring->dev);
300 EXPORT_SYMBOL(iio_ring_buffer_unregister);
302 ssize_t iio_read_ring_length(struct device *dev,
303 struct device_attribute *attr,
304 char *buf)
306 int len = 0;
307 struct iio_ring_buffer *ring = dev_get_drvdata(dev);
309 if (ring->access.get_length)
310 len = sprintf(buf, "%d\n",
311 ring->access.get_length(ring));
313 return len;
315 EXPORT_SYMBOL(iio_read_ring_length);
317 ssize_t iio_write_ring_length(struct device *dev,
318 struct device_attribute *attr,
319 const char *buf,
320 size_t len)
322 int ret;
323 ulong val;
324 struct iio_ring_buffer *ring = dev_get_drvdata(dev);
325 ret = strict_strtoul(buf, 10, &val);
326 if (ret)
327 return ret;
329 if (ring->access.get_length)
330 if (val == ring->access.get_length(ring))
331 return len;
333 if (ring->access.set_length) {
334 ring->access.set_length(ring, val);
335 if (ring->access.mark_param_change)
336 ring->access.mark_param_change(ring);
339 return len;
341 EXPORT_SYMBOL(iio_write_ring_length);
343 ssize_t iio_read_ring_bps(struct device *dev,
344 struct device_attribute *attr,
345 char *buf)
347 int len = 0;
348 struct iio_ring_buffer *ring = dev_get_drvdata(dev);
350 if (ring->access.get_bpd)
351 len = sprintf(buf, "%d\n",
352 ring->access.get_bpd(ring));
354 return len;
356 EXPORT_SYMBOL(iio_read_ring_bps);
358 ssize_t iio_store_ring_enable(struct device *dev,
359 struct device_attribute *attr,
360 const char *buf,
361 size_t len)
363 int ret;
364 bool requested_state, current_state;
365 int previous_mode;
366 struct iio_ring_buffer *ring = dev_get_drvdata(dev);
367 struct iio_dev *dev_info = ring->indio_dev;
369 mutex_lock(&dev_info->mlock);
370 previous_mode = dev_info->currentmode;
371 requested_state = !(buf[0] == '0');
372 current_state = !!(previous_mode & INDIO_ALL_RING_MODES);
373 if (current_state == requested_state) {
374 printk(KERN_INFO "iio-ring, current state requested again\n");
375 goto done;
377 if (requested_state) {
378 if (ring->preenable) {
379 ret = ring->preenable(dev_info);
380 if (ret) {
381 printk(KERN_ERR
382 "Buffer not started:"
383 "ring preenable failed\n");
384 goto error_ret;
387 if (ring->access.request_update) {
388 ret = ring->access.request_update(ring);
389 if (ret) {
390 printk(KERN_INFO
391 "Buffer not started:"
392 "ring parameter update failed\n");
393 goto error_ret;
396 if (ring->access.mark_in_use)
397 ring->access.mark_in_use(ring);
398 /* Definitely possible for devices to support both of these.*/
399 if (dev_info->modes & INDIO_RING_TRIGGERED) {
400 if (!dev_info->trig) {
401 printk(KERN_INFO
402 "Buffer not started: no trigger\n");
403 ret = -EINVAL;
404 if (ring->access.unmark_in_use)
405 ring->access.unmark_in_use(ring);
406 goto error_ret;
408 dev_info->currentmode = INDIO_RING_TRIGGERED;
409 } else if (dev_info->modes & INDIO_RING_HARDWARE_BUFFER)
410 dev_info->currentmode = INDIO_RING_HARDWARE_BUFFER;
411 else { /* should never be reached */
412 ret = -EINVAL;
413 goto error_ret;
416 if (ring->postenable) {
418 ret = ring->postenable(dev_info);
419 if (ret) {
420 printk(KERN_INFO
421 "Buffer not started:"
422 "postenable failed\n");
423 if (ring->access.unmark_in_use)
424 ring->access.unmark_in_use(ring);
425 dev_info->currentmode = previous_mode;
426 if (ring->postdisable)
427 ring->postdisable(dev_info);
428 goto error_ret;
431 } else {
432 if (ring->predisable) {
433 ret = ring->predisable(dev_info);
434 if (ret)
435 goto error_ret;
437 if (ring->access.unmark_in_use)
438 ring->access.unmark_in_use(ring);
439 dev_info->currentmode = INDIO_DIRECT_MODE;
440 if (ring->postdisable) {
441 ret = ring->postdisable(dev_info);
442 if (ret)
443 goto error_ret;
446 done:
447 mutex_unlock(&dev_info->mlock);
448 return len;
450 error_ret:
451 mutex_unlock(&dev_info->mlock);
452 return ret;
454 EXPORT_SYMBOL(iio_store_ring_enable);
455 ssize_t iio_show_ring_enable(struct device *dev,
456 struct device_attribute *attr,
457 char *buf)
459 struct iio_ring_buffer *ring = dev_get_drvdata(dev);
460 return sprintf(buf, "%d\n", !!(ring->indio_dev->currentmode
461 & INDIO_ALL_RING_MODES));
463 EXPORT_SYMBOL(iio_show_ring_enable);
465 ssize_t iio_scan_el_show(struct device *dev,
466 struct device_attribute *attr,
467 char *buf)
469 int ret;
470 struct iio_dev *indio_dev = dev_get_drvdata(dev);
471 struct iio_scan_el *this_el = to_iio_scan_el(attr);
473 ret = iio_scan_mask_query(indio_dev, this_el->number);
474 if (ret < 0)
475 return ret;
476 return sprintf(buf, "%d\n", ret);
478 EXPORT_SYMBOL(iio_scan_el_show);
480 ssize_t iio_scan_el_store(struct device *dev,
481 struct device_attribute *attr,
482 const char *buf,
483 size_t len)
485 int ret = 0;
486 bool state;
487 struct iio_dev *indio_dev = dev_get_drvdata(dev);
488 struct iio_scan_el *this_el = to_iio_scan_el(attr);
490 state = !(buf[0] == '0');
491 mutex_lock(&indio_dev->mlock);
492 if (indio_dev->currentmode == INDIO_RING_TRIGGERED) {
493 ret = -EBUSY;
494 goto error_ret;
496 ret = iio_scan_mask_query(indio_dev, this_el->number);
497 if (ret < 0)
498 goto error_ret;
499 if (!state && ret) {
500 ret = iio_scan_mask_clear(indio_dev, this_el->number);
501 if (ret)
502 goto error_ret;
503 indio_dev->scan_count--;
504 } else if (state && !ret) {
505 ret = iio_scan_mask_set(indio_dev, this_el->number);
506 if (ret)
507 goto error_ret;
508 indio_dev->scan_count++;
510 if (this_el->set_state)
511 ret = this_el->set_state(this_el, indio_dev, state);
512 error_ret:
513 mutex_unlock(&indio_dev->mlock);
515 return ret ? ret : len;
518 EXPORT_SYMBOL(iio_scan_el_store);
520 ssize_t iio_scan_el_ts_show(struct device *dev,
521 struct device_attribute *attr,
522 char *buf)
524 struct iio_dev *indio_dev = dev_get_drvdata(dev);
525 return sprintf(buf, "%d\n", indio_dev->scan_timestamp);
527 EXPORT_SYMBOL(iio_scan_el_ts_show);
529 ssize_t iio_scan_el_ts_store(struct device *dev,
530 struct device_attribute *attr,
531 const char *buf,
532 size_t len)
534 int ret = 0;
535 struct iio_dev *indio_dev = dev_get_drvdata(dev);
536 bool state;
537 state = !(buf[0] == '0');
538 mutex_lock(&indio_dev->mlock);
539 if (indio_dev->currentmode == INDIO_RING_TRIGGERED) {
540 ret = -EBUSY;
541 goto error_ret;
543 indio_dev->scan_timestamp = state;
544 error_ret:
545 mutex_unlock(&indio_dev->mlock);
547 return ret ? ret : len;
549 EXPORT_SYMBOL(iio_scan_el_ts_store);