1 /* The industrial I/O core
3 * Copyright (c) 2008 Jonathan Cameron
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * Handling of ring allocation / resizing.
12 * Things to look at here.
13 * - Better memory allocation techniques?
14 * - Alternative access techniques?
16 #include <linux/kernel.h>
17 #include <linux/device.h>
19 #include <linux/cdev.h>
20 #include <linux/slab.h>
21 #include <linux/poll.h>
22 #include <linux/export.h>
26 #include "ring_generic.h"
29 * iio_ring_open() - chrdev file open for ring buffer access
31 * This function relies on all ring buffer implementations having an
32 * iio_ring_buffer as their first element.
34 static int iio_ring_open(struct inode
*inode
, struct file
*filp
)
36 struct iio_handler
*hand
37 = container_of(inode
->i_cdev
, struct iio_handler
, chrdev
);
38 struct iio_ring_buffer
*rb
= hand
->private;
40 filp
->private_data
= hand
->private;
41 if (rb
->access
->mark_in_use
)
42 rb
->access
->mark_in_use(rb
);
48 * iio_ring_release() - chrdev file close ring buffer access
50 * This function relies on all ring buffer implementations having an
51 * iio_ring_buffer as their first element.
53 static int iio_ring_release(struct inode
*inode
, struct file
*filp
)
55 struct cdev
*cd
= inode
->i_cdev
;
56 struct iio_handler
*hand
= iio_cdev_to_handler(cd
);
57 struct iio_ring_buffer
*rb
= hand
->private;
59 clear_bit(IIO_BUSY_BIT_POS
, &rb
->access_handler
.flags
);
60 if (rb
->access
->unmark_in_use
)
61 rb
->access
->unmark_in_use(rb
);
67 * iio_ring_read_first_n_outer() - chrdev read for ring buffer access
69 * This function relies on all ring buffer implementations having an
70 * iio_ring _bufer as their first element.
72 static ssize_t
iio_ring_read_first_n_outer(struct file
*filp
, char __user
*buf
,
73 size_t n
, loff_t
*f_ps
)
75 struct iio_ring_buffer
*rb
= filp
->private_data
;
77 if (!rb
->access
->read_first_n
)
79 return rb
->access
->read_first_n(rb
, n
, buf
);
83 * iio_ring_poll() - poll the ring to find out if it has data
85 static unsigned int iio_ring_poll(struct file
*filp
,
86 struct poll_table_struct
*wait
)
88 struct iio_ring_buffer
*rb
= filp
->private_data
;
90 poll_wait(filp
, &rb
->pollq
, wait
);
92 return POLLIN
| POLLRDNORM
;
93 /* need a way of knowing if there may be enough data... */
97 static const struct file_operations iio_ring_fileops
= {
98 .read
= iio_ring_read_first_n_outer
,
99 .release
= iio_ring_release
,
100 .open
= iio_ring_open
,
101 .poll
= iio_ring_poll
,
102 .owner
= THIS_MODULE
,
103 .llseek
= noop_llseek
,
106 void iio_ring_access_release(struct device
*dev
)
108 struct iio_ring_buffer
*buf
109 = container_of(dev
, struct iio_ring_buffer
, dev
);
110 cdev_del(&buf
->access_handler
.chrdev
);
111 iio_device_free_chrdev_minor(MINOR(dev
->devt
));
113 EXPORT_SYMBOL(iio_ring_access_release
);
116 __iio_request_ring_buffer_chrdev(struct iio_ring_buffer
*buf
,
117 struct module
*owner
,
122 buf
->access_handler
.flags
= 0;
123 buf
->dev
.bus
= &iio_bus_type
;
124 device_initialize(&buf
->dev
);
126 ret
= iio_device_get_chrdev_minor();
128 goto error_device_put
;
130 buf
->dev
.devt
= MKDEV(MAJOR(iio_devt
), ret
);
131 dev_set_name(&buf
->dev
, "%s:buffer%d",
132 dev_name(buf
->dev
.parent
),
134 ret
= device_add(&buf
->dev
);
136 printk(KERN_ERR
"failed to add the ring dev\n");
137 goto error_device_put
;
139 cdev_init(&buf
->access_handler
.chrdev
, &iio_ring_fileops
);
140 buf
->access_handler
.chrdev
.owner
= owner
;
141 ret
= cdev_add(&buf
->access_handler
.chrdev
, buf
->dev
.devt
, 1);
143 printk(KERN_ERR
"failed to allocate ring chrdev\n");
144 goto error_device_unregister
;
148 error_device_unregister
:
149 device_unregister(&buf
->dev
);
151 put_device(&buf
->dev
);
156 static void __iio_free_ring_buffer_chrdev(struct iio_ring_buffer
*buf
)
158 device_unregister(&buf
->dev
);
161 void iio_ring_buffer_init(struct iio_ring_buffer
*ring
,
162 struct iio_dev
*dev_info
)
164 ring
->indio_dev
= dev_info
;
165 ring
->access_handler
.private = ring
;
166 init_waitqueue_head(&ring
->pollq
);
168 EXPORT_SYMBOL(iio_ring_buffer_init
);
170 static ssize_t
iio_show_scan_index(struct device
*dev
,
171 struct device_attribute
*attr
,
174 return sprintf(buf
, "%u\n", to_iio_dev_attr(attr
)->c
->scan_index
);
177 static ssize_t
iio_show_fixed_type(struct device
*dev
,
178 struct device_attribute
*attr
,
181 struct iio_dev_attr
*this_attr
= to_iio_dev_attr(attr
);
182 return sprintf(buf
, "%c%d/%d>>%u\n",
183 this_attr
->c
->scan_type
.sign
,
184 this_attr
->c
->scan_type
.realbits
,
185 this_attr
->c
->scan_type
.storagebits
,
186 this_attr
->c
->scan_type
.shift
);
189 static ssize_t
iio_scan_el_show(struct device
*dev
,
190 struct device_attribute
*attr
,
194 struct iio_ring_buffer
*ring
= dev_get_drvdata(dev
);
196 ret
= iio_scan_mask_query(ring
, to_iio_dev_attr(attr
)->address
);
199 return sprintf(buf
, "%d\n", ret
);
202 static int iio_scan_mask_clear(struct iio_ring_buffer
*ring
, int bit
)
204 if (bit
> IIO_MAX_SCAN_LENGTH
)
206 ring
->scan_mask
&= ~(1 << bit
);
211 static ssize_t
iio_scan_el_store(struct device
*dev
,
212 struct device_attribute
*attr
,
218 struct iio_ring_buffer
*ring
= dev_get_drvdata(dev
);
219 struct iio_dev
*indio_dev
= ring
->indio_dev
;
220 struct iio_dev_attr
*this_attr
= to_iio_dev_attr(attr
);
222 state
= !(buf
[0] == '0');
223 mutex_lock(&indio_dev
->mlock
);
224 if (indio_dev
->currentmode
== INDIO_RING_TRIGGERED
) {
228 ret
= iio_scan_mask_query(ring
, this_attr
->address
);
232 ret
= iio_scan_mask_clear(ring
, this_attr
->address
);
235 } else if (state
&& !ret
) {
236 ret
= iio_scan_mask_set(ring
, this_attr
->address
);
242 mutex_unlock(&indio_dev
->mlock
);
244 return ret
? ret
: len
;
248 static ssize_t
iio_scan_el_ts_show(struct device
*dev
,
249 struct device_attribute
*attr
,
252 struct iio_ring_buffer
*ring
= dev_get_drvdata(dev
);
253 return sprintf(buf
, "%d\n", ring
->scan_timestamp
);
256 static ssize_t
iio_scan_el_ts_store(struct device
*dev
,
257 struct device_attribute
*attr
,
262 struct iio_ring_buffer
*ring
= dev_get_drvdata(dev
);
263 struct iio_dev
*indio_dev
= ring
->indio_dev
;
265 state
= !(buf
[0] == '0');
266 mutex_lock(&indio_dev
->mlock
);
267 if (indio_dev
->currentmode
== INDIO_RING_TRIGGERED
) {
271 ring
->scan_timestamp
= state
;
273 mutex_unlock(&indio_dev
->mlock
);
275 return ret
? ret
: len
;
278 static int iio_ring_add_channel_sysfs(struct iio_ring_buffer
*ring
,
279 const struct iio_chan_spec
*chan
)
283 ret
= __iio_add_chan_devattr("index", "scan_elements",
285 &iio_show_scan_index
,
290 &ring
->scan_el_dev_attr_list
);
294 ret
= __iio_add_chan_devattr("type", "scan_elements",
296 &iio_show_fixed_type
,
301 &ring
->scan_el_dev_attr_list
);
305 if (chan
->type
!= IIO_TIMESTAMP
)
306 ret
= __iio_add_chan_devattr("en", "scan_elements",
313 &ring
->scan_el_dev_attr_list
);
315 ret
= __iio_add_chan_devattr("en", "scan_elements",
317 &iio_scan_el_ts_show
,
318 &iio_scan_el_ts_store
,
322 &ring
->scan_el_dev_attr_list
);
327 static void iio_ring_remove_and_free_scan_dev_attr(struct iio_ring_buffer
*ring
,
328 struct iio_dev_attr
*p
)
330 sysfs_remove_file_from_group(&ring
->dev
.kobj
,
331 &p
->dev_attr
.attr
, "scan_elements");
332 kfree(p
->dev_attr
.attr
.name
);
336 static struct attribute
*iio_scan_el_dummy_attrs
[] = {
340 static struct attribute_group iio_scan_el_dummy_group
= {
341 .name
= "scan_elements",
342 .attrs
= iio_scan_el_dummy_attrs
345 static void __iio_ring_attr_cleanup(struct iio_ring_buffer
*ring
)
347 struct iio_dev_attr
*p
, *n
;
348 int anydynamic
= !list_empty(&ring
->scan_el_dev_attr_list
);
349 list_for_each_entry_safe(p
, n
,
350 &ring
->scan_el_dev_attr_list
, l
)
351 iio_ring_remove_and_free_scan_dev_attr(ring
, p
);
353 if (ring
->scan_el_attrs
)
354 sysfs_remove_group(&ring
->dev
.kobj
,
355 ring
->scan_el_attrs
);
357 sysfs_remove_group(&ring
->dev
.kobj
,
358 &iio_scan_el_dummy_group
);
361 int iio_ring_buffer_register_ex(struct iio_ring_buffer
*ring
, int id
,
362 const struct iio_chan_spec
*channels
,
367 ret
= __iio_request_ring_buffer_chrdev(ring
, ring
->owner
, id
);
371 if (ring
->scan_el_attrs
) {
372 ret
= sysfs_create_group(&ring
->dev
.kobj
,
373 ring
->scan_el_attrs
);
376 "Failed to add sysfs scan elements\n");
377 goto error_free_ring_buffer_chrdev
;
379 } else if (channels
) {
380 ret
= sysfs_create_group(&ring
->dev
.kobj
,
381 &iio_scan_el_dummy_group
);
383 goto error_free_ring_buffer_chrdev
;
386 INIT_LIST_HEAD(&ring
->scan_el_dev_attr_list
);
389 for (i
= 0; i
< num_channels
; i
++) {
390 ret
= iio_ring_add_channel_sysfs(ring
, &channels
[i
]);
392 goto error_cleanup_dynamic
;
397 error_cleanup_dynamic
:
398 __iio_ring_attr_cleanup(ring
);
399 error_free_ring_buffer_chrdev
:
400 __iio_free_ring_buffer_chrdev(ring
);
404 EXPORT_SYMBOL(iio_ring_buffer_register_ex
);
406 void iio_ring_buffer_unregister(struct iio_ring_buffer
*ring
)
408 __iio_ring_attr_cleanup(ring
);
409 __iio_free_ring_buffer_chrdev(ring
);
411 EXPORT_SYMBOL(iio_ring_buffer_unregister
);
413 ssize_t
iio_read_ring_length(struct device
*dev
,
414 struct device_attribute
*attr
,
417 struct iio_ring_buffer
*ring
= dev_get_drvdata(dev
);
419 if (ring
->access
->get_length
)
420 return sprintf(buf
, "%d\n",
421 ring
->access
->get_length(ring
));
425 EXPORT_SYMBOL(iio_read_ring_length
);
427 ssize_t
iio_write_ring_length(struct device
*dev
,
428 struct device_attribute
*attr
,
434 struct iio_ring_buffer
*ring
= dev_get_drvdata(dev
);
436 ret
= strict_strtoul(buf
, 10, &val
);
440 if (ring
->access
->get_length
)
441 if (val
== ring
->access
->get_length(ring
))
444 if (ring
->access
->set_length
) {
445 ring
->access
->set_length(ring
, val
);
446 if (ring
->access
->mark_param_change
)
447 ring
->access
->mark_param_change(ring
);
452 EXPORT_SYMBOL(iio_write_ring_length
);
454 ssize_t
iio_read_ring_bytes_per_datum(struct device
*dev
,
455 struct device_attribute
*attr
,
458 struct iio_ring_buffer
*ring
= dev_get_drvdata(dev
);
460 if (ring
->access
->get_bytes_per_datum
)
461 return sprintf(buf
, "%d\n",
462 ring
->access
->get_bytes_per_datum(ring
));
466 EXPORT_SYMBOL(iio_read_ring_bytes_per_datum
);
468 ssize_t
iio_store_ring_enable(struct device
*dev
,
469 struct device_attribute
*attr
,
474 bool requested_state
, current_state
;
476 struct iio_ring_buffer
*ring
= dev_get_drvdata(dev
);
477 struct iio_dev
*dev_info
= ring
->indio_dev
;
479 mutex_lock(&dev_info
->mlock
);
480 previous_mode
= dev_info
->currentmode
;
481 requested_state
= !(buf
[0] == '0');
482 current_state
= !!(previous_mode
& INDIO_ALL_RING_MODES
);
483 if (current_state
== requested_state
) {
484 printk(KERN_INFO
"iio-ring, current state requested again\n");
487 if (requested_state
) {
488 if (ring
->setup_ops
->preenable
) {
489 ret
= ring
->setup_ops
->preenable(dev_info
);
492 "Buffer not started:"
493 "ring preenable failed\n");
497 if (ring
->access
->request_update
) {
498 ret
= ring
->access
->request_update(ring
);
501 "Buffer not started:"
502 "ring parameter update failed\n");
506 if (ring
->access
->mark_in_use
)
507 ring
->access
->mark_in_use(ring
);
508 /* Definitely possible for devices to support both of these.*/
509 if (dev_info
->modes
& INDIO_RING_TRIGGERED
) {
510 if (!dev_info
->trig
) {
512 "Buffer not started: no trigger\n");
514 if (ring
->access
->unmark_in_use
)
515 ring
->access
->unmark_in_use(ring
);
518 dev_info
->currentmode
= INDIO_RING_TRIGGERED
;
519 } else if (dev_info
->modes
& INDIO_RING_HARDWARE_BUFFER
)
520 dev_info
->currentmode
= INDIO_RING_HARDWARE_BUFFER
;
521 else { /* should never be reached */
526 if (ring
->setup_ops
->postenable
) {
527 ret
= ring
->setup_ops
->postenable(dev_info
);
530 "Buffer not started:"
531 "postenable failed\n");
532 if (ring
->access
->unmark_in_use
)
533 ring
->access
->unmark_in_use(ring
);
534 dev_info
->currentmode
= previous_mode
;
535 if (ring
->setup_ops
->postdisable
)
536 ring
->setup_ops
->postdisable(dev_info
);
541 if (ring
->setup_ops
->predisable
) {
542 ret
= ring
->setup_ops
->predisable(dev_info
);
546 if (ring
->access
->unmark_in_use
)
547 ring
->access
->unmark_in_use(ring
);
548 dev_info
->currentmode
= INDIO_DIRECT_MODE
;
549 if (ring
->setup_ops
->postdisable
) {
550 ret
= ring
->setup_ops
->postdisable(dev_info
);
556 mutex_unlock(&dev_info
->mlock
);
560 mutex_unlock(&dev_info
->mlock
);
563 EXPORT_SYMBOL(iio_store_ring_enable
);
565 ssize_t
iio_show_ring_enable(struct device
*dev
,
566 struct device_attribute
*attr
,
569 struct iio_ring_buffer
*ring
= dev_get_drvdata(dev
);
570 return sprintf(buf
, "%d\n", !!(ring
->indio_dev
->currentmode
571 & INDIO_ALL_RING_MODES
));
573 EXPORT_SYMBOL(iio_show_ring_enable
);
575 int iio_sw_ring_preenable(struct iio_dev
*indio_dev
)
577 struct iio_ring_buffer
*ring
= indio_dev
->ring
;
579 dev_dbg(&indio_dev
->dev
, "%s\n", __func__
);
580 /* Check if there are any scan elements enabled, if not fail*/
581 if (!(ring
->scan_count
|| ring
->scan_timestamp
))
583 if (ring
->scan_timestamp
)
584 if (ring
->scan_count
)
585 /* Timestamp (aligned to s64) and data */
586 size
= (((ring
->scan_count
* ring
->bpe
)
588 & ~(sizeof(s64
) - 1))
590 else /* Timestamp only */
593 size
= ring
->scan_count
* ring
->bpe
;
594 ring
->access
->set_bytes_per_datum(ring
, size
);
598 EXPORT_SYMBOL(iio_sw_ring_preenable
);