Add linux-next specific files for 20110831
[linux-2.6/next.git] / drivers / staging / iio / industrialio-ring.c
blob859961608c994d9038ca64e90d1aaddb66f20d78
1 /* The industrial I/O core
3 * Copyright (c) 2008 Jonathan Cameron
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * Handling of ring allocation / resizing.
12 * Things to look at here.
13 * - Better memory allocation techniques?
14 * - Alternative access techniques?
16 #include <linux/kernel.h>
17 #include <linux/device.h>
18 #include <linux/fs.h>
19 #include <linux/cdev.h>
20 #include <linux/slab.h>
21 #include <linux/poll.h>
22 #include <linux/export.h>
24 #include "iio.h"
25 #include "iio_core.h"
26 #include "ring_generic.h"
28 /**
29 * iio_ring_open() - chrdev file open for ring buffer access
31 * This function relies on all ring buffer implementations having an
32 * iio_ring_buffer as their first element.
33 **/
34 static int iio_ring_open(struct inode *inode, struct file *filp)
36 struct iio_handler *hand
37 = container_of(inode->i_cdev, struct iio_handler, chrdev);
38 struct iio_ring_buffer *rb = hand->private;
40 filp->private_data = hand->private;
41 if (rb->access->mark_in_use)
42 rb->access->mark_in_use(rb);
44 return 0;
47 /**
48 * iio_ring_release() - chrdev file close ring buffer access
50 * This function relies on all ring buffer implementations having an
51 * iio_ring_buffer as their first element.
52 **/
53 static int iio_ring_release(struct inode *inode, struct file *filp)
55 struct cdev *cd = inode->i_cdev;
56 struct iio_handler *hand = iio_cdev_to_handler(cd);
57 struct iio_ring_buffer *rb = hand->private;
59 clear_bit(IIO_BUSY_BIT_POS, &rb->access_handler.flags);
60 if (rb->access->unmark_in_use)
61 rb->access->unmark_in_use(rb);
63 return 0;
66 /**
67 * iio_ring_read_first_n_outer() - chrdev read for ring buffer access
69 * This function relies on all ring buffer implementations having an
70 * iio_ring _bufer as their first element.
71 **/
72 static ssize_t iio_ring_read_first_n_outer(struct file *filp, char __user *buf,
73 size_t n, loff_t *f_ps)
75 struct iio_ring_buffer *rb = filp->private_data;
77 if (!rb->access->read_first_n)
78 return -EINVAL;
79 return rb->access->read_first_n(rb, n, buf);
82 /**
83 * iio_ring_poll() - poll the ring to find out if it has data
85 static unsigned int iio_ring_poll(struct file *filp,
86 struct poll_table_struct *wait)
88 struct iio_ring_buffer *rb = filp->private_data;
90 poll_wait(filp, &rb->pollq, wait);
91 if (rb->stufftoread)
92 return POLLIN | POLLRDNORM;
93 /* need a way of knowing if there may be enough data... */
94 return 0;
97 static const struct file_operations iio_ring_fileops = {
98 .read = iio_ring_read_first_n_outer,
99 .release = iio_ring_release,
100 .open = iio_ring_open,
101 .poll = iio_ring_poll,
102 .owner = THIS_MODULE,
103 .llseek = noop_llseek,
106 void iio_ring_access_release(struct device *dev)
108 struct iio_ring_buffer *buf
109 = container_of(dev, struct iio_ring_buffer, dev);
110 cdev_del(&buf->access_handler.chrdev);
111 iio_device_free_chrdev_minor(MINOR(dev->devt));
113 EXPORT_SYMBOL(iio_ring_access_release);
115 static inline int
116 __iio_request_ring_buffer_chrdev(struct iio_ring_buffer *buf,
117 struct module *owner,
118 int id)
120 int ret;
122 buf->access_handler.flags = 0;
123 buf->dev.bus = &iio_bus_type;
124 device_initialize(&buf->dev);
126 ret = iio_device_get_chrdev_minor();
127 if (ret < 0)
128 goto error_device_put;
130 buf->dev.devt = MKDEV(MAJOR(iio_devt), ret);
131 dev_set_name(&buf->dev, "%s:buffer%d",
132 dev_name(buf->dev.parent),
133 id);
134 ret = device_add(&buf->dev);
135 if (ret < 0) {
136 printk(KERN_ERR "failed to add the ring dev\n");
137 goto error_device_put;
139 cdev_init(&buf->access_handler.chrdev, &iio_ring_fileops);
140 buf->access_handler.chrdev.owner = owner;
141 ret = cdev_add(&buf->access_handler.chrdev, buf->dev.devt, 1);
142 if (ret) {
143 printk(KERN_ERR "failed to allocate ring chrdev\n");
144 goto error_device_unregister;
146 return 0;
148 error_device_unregister:
149 device_unregister(&buf->dev);
150 error_device_put:
151 put_device(&buf->dev);
153 return ret;
156 static void __iio_free_ring_buffer_chrdev(struct iio_ring_buffer *buf)
158 device_unregister(&buf->dev);
161 void iio_ring_buffer_init(struct iio_ring_buffer *ring,
162 struct iio_dev *dev_info)
164 ring->indio_dev = dev_info;
165 ring->access_handler.private = ring;
166 init_waitqueue_head(&ring->pollq);
168 EXPORT_SYMBOL(iio_ring_buffer_init);
170 static ssize_t iio_show_scan_index(struct device *dev,
171 struct device_attribute *attr,
172 char *buf)
174 return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
177 static ssize_t iio_show_fixed_type(struct device *dev,
178 struct device_attribute *attr,
179 char *buf)
181 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
182 return sprintf(buf, "%c%d/%d>>%u\n",
183 this_attr->c->scan_type.sign,
184 this_attr->c->scan_type.realbits,
185 this_attr->c->scan_type.storagebits,
186 this_attr->c->scan_type.shift);
189 static ssize_t iio_scan_el_show(struct device *dev,
190 struct device_attribute *attr,
191 char *buf)
193 int ret;
194 struct iio_ring_buffer *ring = dev_get_drvdata(dev);
196 ret = iio_scan_mask_query(ring, to_iio_dev_attr(attr)->address);
197 if (ret < 0)
198 return ret;
199 return sprintf(buf, "%d\n", ret);
202 static int iio_scan_mask_clear(struct iio_ring_buffer *ring, int bit)
204 if (bit > IIO_MAX_SCAN_LENGTH)
205 return -EINVAL;
206 ring->scan_mask &= ~(1 << bit);
207 ring->scan_count--;
208 return 0;
211 static ssize_t iio_scan_el_store(struct device *dev,
212 struct device_attribute *attr,
213 const char *buf,
214 size_t len)
216 int ret = 0;
217 bool state;
218 struct iio_ring_buffer *ring = dev_get_drvdata(dev);
219 struct iio_dev *indio_dev = ring->indio_dev;
220 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
222 state = !(buf[0] == '0');
223 mutex_lock(&indio_dev->mlock);
224 if (indio_dev->currentmode == INDIO_RING_TRIGGERED) {
225 ret = -EBUSY;
226 goto error_ret;
228 ret = iio_scan_mask_query(ring, this_attr->address);
229 if (ret < 0)
230 goto error_ret;
231 if (!state && ret) {
232 ret = iio_scan_mask_clear(ring, this_attr->address);
233 if (ret)
234 goto error_ret;
235 } else if (state && !ret) {
236 ret = iio_scan_mask_set(ring, this_attr->address);
237 if (ret)
238 goto error_ret;
241 error_ret:
242 mutex_unlock(&indio_dev->mlock);
244 return ret ? ret : len;
248 static ssize_t iio_scan_el_ts_show(struct device *dev,
249 struct device_attribute *attr,
250 char *buf)
252 struct iio_ring_buffer *ring = dev_get_drvdata(dev);
253 return sprintf(buf, "%d\n", ring->scan_timestamp);
256 static ssize_t iio_scan_el_ts_store(struct device *dev,
257 struct device_attribute *attr,
258 const char *buf,
259 size_t len)
261 int ret = 0;
262 struct iio_ring_buffer *ring = dev_get_drvdata(dev);
263 struct iio_dev *indio_dev = ring->indio_dev;
264 bool state;
265 state = !(buf[0] == '0');
266 mutex_lock(&indio_dev->mlock);
267 if (indio_dev->currentmode == INDIO_RING_TRIGGERED) {
268 ret = -EBUSY;
269 goto error_ret;
271 ring->scan_timestamp = state;
272 error_ret:
273 mutex_unlock(&indio_dev->mlock);
275 return ret ? ret : len;
278 static int iio_ring_add_channel_sysfs(struct iio_ring_buffer *ring,
279 const struct iio_chan_spec *chan)
281 int ret;
283 ret = __iio_add_chan_devattr("index", "scan_elements",
284 chan,
285 &iio_show_scan_index,
286 NULL,
289 &ring->dev,
290 &ring->scan_el_dev_attr_list);
291 if (ret)
292 goto error_ret;
294 ret = __iio_add_chan_devattr("type", "scan_elements",
295 chan,
296 &iio_show_fixed_type,
297 NULL,
300 &ring->dev,
301 &ring->scan_el_dev_attr_list);
302 if (ret)
303 goto error_ret;
305 if (chan->type != IIO_TIMESTAMP)
306 ret = __iio_add_chan_devattr("en", "scan_elements",
307 chan,
308 &iio_scan_el_show,
309 &iio_scan_el_store,
310 chan->scan_index,
312 &ring->dev,
313 &ring->scan_el_dev_attr_list);
314 else
315 ret = __iio_add_chan_devattr("en", "scan_elements",
316 chan,
317 &iio_scan_el_ts_show,
318 &iio_scan_el_ts_store,
319 chan->scan_index,
321 &ring->dev,
322 &ring->scan_el_dev_attr_list);
323 error_ret:
324 return ret;
327 static void iio_ring_remove_and_free_scan_dev_attr(struct iio_ring_buffer *ring,
328 struct iio_dev_attr *p)
330 sysfs_remove_file_from_group(&ring->dev.kobj,
331 &p->dev_attr.attr, "scan_elements");
332 kfree(p->dev_attr.attr.name);
333 kfree(p);
336 static struct attribute *iio_scan_el_dummy_attrs[] = {
337 NULL
340 static struct attribute_group iio_scan_el_dummy_group = {
341 .name = "scan_elements",
342 .attrs = iio_scan_el_dummy_attrs
345 static void __iio_ring_attr_cleanup(struct iio_ring_buffer *ring)
347 struct iio_dev_attr *p, *n;
348 int anydynamic = !list_empty(&ring->scan_el_dev_attr_list);
349 list_for_each_entry_safe(p, n,
350 &ring->scan_el_dev_attr_list, l)
351 iio_ring_remove_and_free_scan_dev_attr(ring, p);
353 if (ring->scan_el_attrs)
354 sysfs_remove_group(&ring->dev.kobj,
355 ring->scan_el_attrs);
356 else if (anydynamic)
357 sysfs_remove_group(&ring->dev.kobj,
358 &iio_scan_el_dummy_group);
361 int iio_ring_buffer_register_ex(struct iio_ring_buffer *ring, int id,
362 const struct iio_chan_spec *channels,
363 int num_channels)
365 int ret, i;
367 ret = __iio_request_ring_buffer_chrdev(ring, ring->owner, id);
368 if (ret)
369 goto error_ret;
371 if (ring->scan_el_attrs) {
372 ret = sysfs_create_group(&ring->dev.kobj,
373 ring->scan_el_attrs);
374 if (ret) {
375 dev_err(&ring->dev,
376 "Failed to add sysfs scan elements\n");
377 goto error_free_ring_buffer_chrdev;
379 } else if (channels) {
380 ret = sysfs_create_group(&ring->dev.kobj,
381 &iio_scan_el_dummy_group);
382 if (ret)
383 goto error_free_ring_buffer_chrdev;
386 INIT_LIST_HEAD(&ring->scan_el_dev_attr_list);
387 if (channels) {
388 /* new magic */
389 for (i = 0; i < num_channels; i++) {
390 ret = iio_ring_add_channel_sysfs(ring, &channels[i]);
391 if (ret < 0)
392 goto error_cleanup_dynamic;
396 return 0;
397 error_cleanup_dynamic:
398 __iio_ring_attr_cleanup(ring);
399 error_free_ring_buffer_chrdev:
400 __iio_free_ring_buffer_chrdev(ring);
401 error_ret:
402 return ret;
404 EXPORT_SYMBOL(iio_ring_buffer_register_ex);
406 void iio_ring_buffer_unregister(struct iio_ring_buffer *ring)
408 __iio_ring_attr_cleanup(ring);
409 __iio_free_ring_buffer_chrdev(ring);
411 EXPORT_SYMBOL(iio_ring_buffer_unregister);
413 ssize_t iio_read_ring_length(struct device *dev,
414 struct device_attribute *attr,
415 char *buf)
417 struct iio_ring_buffer *ring = dev_get_drvdata(dev);
419 if (ring->access->get_length)
420 return sprintf(buf, "%d\n",
421 ring->access->get_length(ring));
423 return 0;
425 EXPORT_SYMBOL(iio_read_ring_length);
427 ssize_t iio_write_ring_length(struct device *dev,
428 struct device_attribute *attr,
429 const char *buf,
430 size_t len)
432 int ret;
433 ulong val;
434 struct iio_ring_buffer *ring = dev_get_drvdata(dev);
436 ret = strict_strtoul(buf, 10, &val);
437 if (ret)
438 return ret;
440 if (ring->access->get_length)
441 if (val == ring->access->get_length(ring))
442 return len;
444 if (ring->access->set_length) {
445 ring->access->set_length(ring, val);
446 if (ring->access->mark_param_change)
447 ring->access->mark_param_change(ring);
450 return len;
452 EXPORT_SYMBOL(iio_write_ring_length);
454 ssize_t iio_read_ring_bytes_per_datum(struct device *dev,
455 struct device_attribute *attr,
456 char *buf)
458 struct iio_ring_buffer *ring = dev_get_drvdata(dev);
460 if (ring->access->get_bytes_per_datum)
461 return sprintf(buf, "%d\n",
462 ring->access->get_bytes_per_datum(ring));
464 return 0;
466 EXPORT_SYMBOL(iio_read_ring_bytes_per_datum);
468 ssize_t iio_store_ring_enable(struct device *dev,
469 struct device_attribute *attr,
470 const char *buf,
471 size_t len)
473 int ret;
474 bool requested_state, current_state;
475 int previous_mode;
476 struct iio_ring_buffer *ring = dev_get_drvdata(dev);
477 struct iio_dev *dev_info = ring->indio_dev;
479 mutex_lock(&dev_info->mlock);
480 previous_mode = dev_info->currentmode;
481 requested_state = !(buf[0] == '0');
482 current_state = !!(previous_mode & INDIO_ALL_RING_MODES);
483 if (current_state == requested_state) {
484 printk(KERN_INFO "iio-ring, current state requested again\n");
485 goto done;
487 if (requested_state) {
488 if (ring->setup_ops->preenable) {
489 ret = ring->setup_ops->preenable(dev_info);
490 if (ret) {
491 printk(KERN_ERR
492 "Buffer not started:"
493 "ring preenable failed\n");
494 goto error_ret;
497 if (ring->access->request_update) {
498 ret = ring->access->request_update(ring);
499 if (ret) {
500 printk(KERN_INFO
501 "Buffer not started:"
502 "ring parameter update failed\n");
503 goto error_ret;
506 if (ring->access->mark_in_use)
507 ring->access->mark_in_use(ring);
508 /* Definitely possible for devices to support both of these.*/
509 if (dev_info->modes & INDIO_RING_TRIGGERED) {
510 if (!dev_info->trig) {
511 printk(KERN_INFO
512 "Buffer not started: no trigger\n");
513 ret = -EINVAL;
514 if (ring->access->unmark_in_use)
515 ring->access->unmark_in_use(ring);
516 goto error_ret;
518 dev_info->currentmode = INDIO_RING_TRIGGERED;
519 } else if (dev_info->modes & INDIO_RING_HARDWARE_BUFFER)
520 dev_info->currentmode = INDIO_RING_HARDWARE_BUFFER;
521 else { /* should never be reached */
522 ret = -EINVAL;
523 goto error_ret;
526 if (ring->setup_ops->postenable) {
527 ret = ring->setup_ops->postenable(dev_info);
528 if (ret) {
529 printk(KERN_INFO
530 "Buffer not started:"
531 "postenable failed\n");
532 if (ring->access->unmark_in_use)
533 ring->access->unmark_in_use(ring);
534 dev_info->currentmode = previous_mode;
535 if (ring->setup_ops->postdisable)
536 ring->setup_ops->postdisable(dev_info);
537 goto error_ret;
540 } else {
541 if (ring->setup_ops->predisable) {
542 ret = ring->setup_ops->predisable(dev_info);
543 if (ret)
544 goto error_ret;
546 if (ring->access->unmark_in_use)
547 ring->access->unmark_in_use(ring);
548 dev_info->currentmode = INDIO_DIRECT_MODE;
549 if (ring->setup_ops->postdisable) {
550 ret = ring->setup_ops->postdisable(dev_info);
551 if (ret)
552 goto error_ret;
555 done:
556 mutex_unlock(&dev_info->mlock);
557 return len;
559 error_ret:
560 mutex_unlock(&dev_info->mlock);
561 return ret;
563 EXPORT_SYMBOL(iio_store_ring_enable);
565 ssize_t iio_show_ring_enable(struct device *dev,
566 struct device_attribute *attr,
567 char *buf)
569 struct iio_ring_buffer *ring = dev_get_drvdata(dev);
570 return sprintf(buf, "%d\n", !!(ring->indio_dev->currentmode
571 & INDIO_ALL_RING_MODES));
573 EXPORT_SYMBOL(iio_show_ring_enable);
575 int iio_sw_ring_preenable(struct iio_dev *indio_dev)
577 struct iio_ring_buffer *ring = indio_dev->ring;
578 size_t size;
579 dev_dbg(&indio_dev->dev, "%s\n", __func__);
580 /* Check if there are any scan elements enabled, if not fail*/
581 if (!(ring->scan_count || ring->scan_timestamp))
582 return -EINVAL;
583 if (ring->scan_timestamp)
584 if (ring->scan_count)
585 /* Timestamp (aligned to s64) and data */
586 size = (((ring->scan_count * ring->bpe)
587 + sizeof(s64) - 1)
588 & ~(sizeof(s64) - 1))
589 + sizeof(s64);
590 else /* Timestamp only */
591 size = sizeof(s64);
592 else /* Data only */
593 size = ring->scan_count * ring->bpe;
594 ring->access->set_bytes_per_datum(ring, size);
596 return 0;
598 EXPORT_SYMBOL(iio_sw_ring_preenable);