1 /* The industrial I/O core
3 * Copyright (c) 2008 Jonathan Cameron
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * Handling of ring allocation / resizing.
12 * Things to look at here.
13 * - Better memory allocation techniques?
14 * - Alternative access techniques?
16 #include <linux/kernel.h>
17 #include <linux/device.h>
19 #include <linux/cdev.h>
20 #include <linux/slab.h>
21 #include <linux/poll.h>
24 #include "ring_generic.h"
27 * iio_ring_open() - chrdev file open for ring buffer access
29 * This function relies on all ring buffer implementations having an
30 * iio_ring_buffer as their first element.
32 static int iio_ring_open(struct inode
*inode
, struct file
*filp
)
34 struct iio_handler
*hand
35 = container_of(inode
->i_cdev
, struct iio_handler
, chrdev
);
36 struct iio_ring_buffer
*rb
= hand
->private;
38 filp
->private_data
= hand
->private;
39 if (rb
->access
->mark_in_use
)
40 rb
->access
->mark_in_use(rb
);
46 * iio_ring_release() - chrdev file close ring buffer access
48 * This function relies on all ring buffer implementations having an
49 * iio_ring_buffer as their first element.
51 static int iio_ring_release(struct inode
*inode
, struct file
*filp
)
53 struct cdev
*cd
= inode
->i_cdev
;
54 struct iio_handler
*hand
= iio_cdev_to_handler(cd
);
55 struct iio_ring_buffer
*rb
= hand
->private;
57 clear_bit(IIO_BUSY_BIT_POS
, &rb
->access_handler
.flags
);
58 if (rb
->access
->unmark_in_use
)
59 rb
->access
->unmark_in_use(rb
);
65 * iio_ring_read_first_n_outer() - chrdev read for ring buffer access
67 * This function relies on all ring buffer implementations having an
68 * iio_ring _bufer as their first element.
70 static ssize_t
iio_ring_read_first_n_outer(struct file
*filp
, char __user
*buf
,
71 size_t n
, loff_t
*f_ps
)
73 struct iio_ring_buffer
*rb
= filp
->private_data
;
75 if (!rb
->access
->read_first_n
)
77 return rb
->access
->read_first_n(rb
, n
, buf
);
81 * iio_ring_poll() - poll the ring to find out if it has data
83 static unsigned int iio_ring_poll(struct file
*filp
,
84 struct poll_table_struct
*wait
)
86 struct iio_ring_buffer
*rb
= filp
->private_data
;
88 poll_wait(filp
, &rb
->pollq
, wait
);
90 return POLLIN
| POLLRDNORM
;
91 /* need a way of knowing if there may be enough data... */
95 static const struct file_operations iio_ring_fileops
= {
96 .read
= iio_ring_read_first_n_outer
,
97 .release
= iio_ring_release
,
98 .open
= iio_ring_open
,
99 .poll
= iio_ring_poll
,
100 .owner
= THIS_MODULE
,
101 .llseek
= noop_llseek
,
104 void iio_ring_access_release(struct device
*dev
)
106 struct iio_ring_buffer
*buf
107 = container_of(dev
, struct iio_ring_buffer
, dev
);
108 cdev_del(&buf
->access_handler
.chrdev
);
109 iio_device_free_chrdev_minor(MINOR(dev
->devt
));
111 EXPORT_SYMBOL(iio_ring_access_release
);
114 __iio_request_ring_buffer_chrdev(struct iio_ring_buffer
*buf
,
115 struct module
*owner
,
120 buf
->access_handler
.flags
= 0;
121 buf
->dev
.bus
= &iio_bus_type
;
122 device_initialize(&buf
->dev
);
124 ret
= iio_device_get_chrdev_minor();
126 goto error_device_put
;
128 buf
->dev
.devt
= MKDEV(MAJOR(iio_devt
), ret
);
129 dev_set_name(&buf
->dev
, "%s:buffer%d",
130 dev_name(buf
->dev
.parent
),
132 ret
= device_add(&buf
->dev
);
134 printk(KERN_ERR
"failed to add the ring dev\n");
135 goto error_device_put
;
137 cdev_init(&buf
->access_handler
.chrdev
, &iio_ring_fileops
);
138 buf
->access_handler
.chrdev
.owner
= owner
;
139 ret
= cdev_add(&buf
->access_handler
.chrdev
, buf
->dev
.devt
, 1);
141 printk(KERN_ERR
"failed to allocate ring chrdev\n");
142 goto error_device_unregister
;
146 error_device_unregister
:
147 device_unregister(&buf
->dev
);
149 put_device(&buf
->dev
);
154 static void __iio_free_ring_buffer_chrdev(struct iio_ring_buffer
*buf
)
156 device_unregister(&buf
->dev
);
159 void iio_ring_buffer_init(struct iio_ring_buffer
*ring
,
160 struct iio_dev
*dev_info
)
162 ring
->indio_dev
= dev_info
;
163 ring
->access_handler
.private = ring
;
164 init_waitqueue_head(&ring
->pollq
);
166 EXPORT_SYMBOL(iio_ring_buffer_init
);
168 static ssize_t
iio_show_scan_index(struct device
*dev
,
169 struct device_attribute
*attr
,
172 return sprintf(buf
, "%u\n", to_iio_dev_attr(attr
)->c
->scan_index
);
175 static ssize_t
iio_show_fixed_type(struct device
*dev
,
176 struct device_attribute
*attr
,
179 struct iio_dev_attr
*this_attr
= to_iio_dev_attr(attr
);
180 return sprintf(buf
, "%c%d/%d>>%u\n",
181 this_attr
->c
->scan_type
.sign
,
182 this_attr
->c
->scan_type
.realbits
,
183 this_attr
->c
->scan_type
.storagebits
,
184 this_attr
->c
->scan_type
.shift
);
187 static ssize_t
iio_scan_el_show(struct device
*dev
,
188 struct device_attribute
*attr
,
192 struct iio_ring_buffer
*ring
= dev_get_drvdata(dev
);
194 ret
= iio_scan_mask_query(ring
, to_iio_dev_attr(attr
)->address
);
197 return sprintf(buf
, "%d\n", ret
);
200 static int iio_scan_mask_clear(struct iio_ring_buffer
*ring
, int bit
)
202 if (bit
> IIO_MAX_SCAN_LENGTH
)
204 ring
->scan_mask
&= ~(1 << bit
);
209 static ssize_t
iio_scan_el_store(struct device
*dev
,
210 struct device_attribute
*attr
,
216 struct iio_ring_buffer
*ring
= dev_get_drvdata(dev
);
217 struct iio_dev
*indio_dev
= ring
->indio_dev
;
218 struct iio_dev_attr
*this_attr
= to_iio_dev_attr(attr
);
220 state
= !(buf
[0] == '0');
221 mutex_lock(&indio_dev
->mlock
);
222 if (indio_dev
->currentmode
== INDIO_RING_TRIGGERED
) {
226 ret
= iio_scan_mask_query(ring
, this_attr
->address
);
230 ret
= iio_scan_mask_clear(ring
, this_attr
->address
);
233 } else if (state
&& !ret
) {
234 ret
= iio_scan_mask_set(ring
, this_attr
->address
);
240 mutex_unlock(&indio_dev
->mlock
);
242 return ret
? ret
: len
;
246 static ssize_t
iio_scan_el_ts_show(struct device
*dev
,
247 struct device_attribute
*attr
,
250 struct iio_ring_buffer
*ring
= dev_get_drvdata(dev
);
251 return sprintf(buf
, "%d\n", ring
->scan_timestamp
);
254 static ssize_t
iio_scan_el_ts_store(struct device
*dev
,
255 struct device_attribute
*attr
,
260 struct iio_ring_buffer
*ring
= dev_get_drvdata(dev
);
261 struct iio_dev
*indio_dev
= ring
->indio_dev
;
263 state
= !(buf
[0] == '0');
264 mutex_lock(&indio_dev
->mlock
);
265 if (indio_dev
->currentmode
== INDIO_RING_TRIGGERED
) {
269 ring
->scan_timestamp
= state
;
271 mutex_unlock(&indio_dev
->mlock
);
273 return ret
? ret
: len
;
276 static int iio_ring_add_channel_sysfs(struct iio_ring_buffer
*ring
,
277 const struct iio_chan_spec
*chan
)
281 ret
= __iio_add_chan_devattr("index", "scan_elements",
283 &iio_show_scan_index
,
288 &ring
->scan_el_dev_attr_list
);
292 ret
= __iio_add_chan_devattr("type", "scan_elements",
294 &iio_show_fixed_type
,
299 &ring
->scan_el_dev_attr_list
);
303 if (chan
->type
!= IIO_TIMESTAMP
)
304 ret
= __iio_add_chan_devattr("en", "scan_elements",
311 &ring
->scan_el_dev_attr_list
);
313 ret
= __iio_add_chan_devattr("en", "scan_elements",
315 &iio_scan_el_ts_show
,
316 &iio_scan_el_ts_store
,
320 &ring
->scan_el_dev_attr_list
);
325 static void iio_ring_remove_and_free_scan_dev_attr(struct iio_ring_buffer
*ring
,
326 struct iio_dev_attr
*p
)
328 sysfs_remove_file_from_group(&ring
->dev
.kobj
,
329 &p
->dev_attr
.attr
, "scan_elements");
330 kfree(p
->dev_attr
.attr
.name
);
334 static struct attribute
*iio_scan_el_dummy_attrs
[] = {
338 static struct attribute_group iio_scan_el_dummy_group
= {
339 .name
= "scan_elements",
340 .attrs
= iio_scan_el_dummy_attrs
343 static void __iio_ring_attr_cleanup(struct iio_ring_buffer
*ring
)
345 struct iio_dev_attr
*p
, *n
;
346 int anydynamic
= !list_empty(&ring
->scan_el_dev_attr_list
);
347 list_for_each_entry_safe(p
, n
,
348 &ring
->scan_el_dev_attr_list
, l
)
349 iio_ring_remove_and_free_scan_dev_attr(ring
, p
);
351 if (ring
->scan_el_attrs
)
352 sysfs_remove_group(&ring
->dev
.kobj
,
353 ring
->scan_el_attrs
);
355 sysfs_remove_group(&ring
->dev
.kobj
,
356 &iio_scan_el_dummy_group
);
359 int iio_ring_buffer_register_ex(struct iio_ring_buffer
*ring
, int id
,
360 const struct iio_chan_spec
*channels
,
365 ret
= __iio_request_ring_buffer_chrdev(ring
, ring
->owner
, id
);
369 if (ring
->scan_el_attrs
) {
370 ret
= sysfs_create_group(&ring
->dev
.kobj
,
371 ring
->scan_el_attrs
);
374 "Failed to add sysfs scan elements\n");
375 goto error_free_ring_buffer_chrdev
;
377 } else if (channels
) {
378 ret
= sysfs_create_group(&ring
->dev
.kobj
,
379 &iio_scan_el_dummy_group
);
381 goto error_free_ring_buffer_chrdev
;
384 INIT_LIST_HEAD(&ring
->scan_el_dev_attr_list
);
387 for (i
= 0; i
< num_channels
; i
++) {
388 ret
= iio_ring_add_channel_sysfs(ring
, &channels
[i
]);
390 goto error_cleanup_dynamic
;
395 error_cleanup_dynamic
:
396 __iio_ring_attr_cleanup(ring
);
397 error_free_ring_buffer_chrdev
:
398 __iio_free_ring_buffer_chrdev(ring
);
402 EXPORT_SYMBOL(iio_ring_buffer_register_ex
);
404 void iio_ring_buffer_unregister(struct iio_ring_buffer
*ring
)
406 __iio_ring_attr_cleanup(ring
);
407 __iio_free_ring_buffer_chrdev(ring
);
409 EXPORT_SYMBOL(iio_ring_buffer_unregister
);
411 ssize_t
iio_read_ring_length(struct device
*dev
,
412 struct device_attribute
*attr
,
415 struct iio_ring_buffer
*ring
= dev_get_drvdata(dev
);
417 if (ring
->access
->get_length
)
418 return sprintf(buf
, "%d\n",
419 ring
->access
->get_length(ring
));
423 EXPORT_SYMBOL(iio_read_ring_length
);
425 ssize_t
iio_write_ring_length(struct device
*dev
,
426 struct device_attribute
*attr
,
432 struct iio_ring_buffer
*ring
= dev_get_drvdata(dev
);
434 ret
= strict_strtoul(buf
, 10, &val
);
438 if (ring
->access
->get_length
)
439 if (val
== ring
->access
->get_length(ring
))
442 if (ring
->access
->set_length
) {
443 ring
->access
->set_length(ring
, val
);
444 if (ring
->access
->mark_param_change
)
445 ring
->access
->mark_param_change(ring
);
450 EXPORT_SYMBOL(iio_write_ring_length
);
452 ssize_t
iio_read_ring_bytes_per_datum(struct device
*dev
,
453 struct device_attribute
*attr
,
456 struct iio_ring_buffer
*ring
= dev_get_drvdata(dev
);
458 if (ring
->access
->get_bytes_per_datum
)
459 return sprintf(buf
, "%d\n",
460 ring
->access
->get_bytes_per_datum(ring
));
464 EXPORT_SYMBOL(iio_read_ring_bytes_per_datum
);
466 ssize_t
iio_store_ring_enable(struct device
*dev
,
467 struct device_attribute
*attr
,
472 bool requested_state
, current_state
;
474 struct iio_ring_buffer
*ring
= dev_get_drvdata(dev
);
475 struct iio_dev
*dev_info
= ring
->indio_dev
;
477 mutex_lock(&dev_info
->mlock
);
478 previous_mode
= dev_info
->currentmode
;
479 requested_state
= !(buf
[0] == '0');
480 current_state
= !!(previous_mode
& INDIO_ALL_RING_MODES
);
481 if (current_state
== requested_state
) {
482 printk(KERN_INFO
"iio-ring, current state requested again\n");
485 if (requested_state
) {
486 if (ring
->setup_ops
->preenable
) {
487 ret
= ring
->setup_ops
->preenable(dev_info
);
490 "Buffer not started:"
491 "ring preenable failed\n");
495 if (ring
->access
->request_update
) {
496 ret
= ring
->access
->request_update(ring
);
499 "Buffer not started:"
500 "ring parameter update failed\n");
504 if (ring
->access
->mark_in_use
)
505 ring
->access
->mark_in_use(ring
);
506 /* Definitely possible for devices to support both of these.*/
507 if (dev_info
->modes
& INDIO_RING_TRIGGERED
) {
508 if (!dev_info
->trig
) {
510 "Buffer not started: no trigger\n");
512 if (ring
->access
->unmark_in_use
)
513 ring
->access
->unmark_in_use(ring
);
516 dev_info
->currentmode
= INDIO_RING_TRIGGERED
;
517 } else if (dev_info
->modes
& INDIO_RING_HARDWARE_BUFFER
)
518 dev_info
->currentmode
= INDIO_RING_HARDWARE_BUFFER
;
519 else { /* should never be reached */
524 if (ring
->setup_ops
->postenable
) {
525 ret
= ring
->setup_ops
->postenable(dev_info
);
528 "Buffer not started:"
529 "postenable failed\n");
530 if (ring
->access
->unmark_in_use
)
531 ring
->access
->unmark_in_use(ring
);
532 dev_info
->currentmode
= previous_mode
;
533 if (ring
->setup_ops
->postdisable
)
534 ring
->setup_ops
->postdisable(dev_info
);
539 if (ring
->setup_ops
->predisable
) {
540 ret
= ring
->setup_ops
->predisable(dev_info
);
544 if (ring
->access
->unmark_in_use
)
545 ring
->access
->unmark_in_use(ring
);
546 dev_info
->currentmode
= INDIO_DIRECT_MODE
;
547 if (ring
->setup_ops
->postdisable
) {
548 ret
= ring
->setup_ops
->postdisable(dev_info
);
554 mutex_unlock(&dev_info
->mlock
);
558 mutex_unlock(&dev_info
->mlock
);
561 EXPORT_SYMBOL(iio_store_ring_enable
);
563 ssize_t
iio_show_ring_enable(struct device
*dev
,
564 struct device_attribute
*attr
,
567 struct iio_ring_buffer
*ring
= dev_get_drvdata(dev
);
568 return sprintf(buf
, "%d\n", !!(ring
->indio_dev
->currentmode
569 & INDIO_ALL_RING_MODES
));
571 EXPORT_SYMBOL(iio_show_ring_enable
);
573 int iio_sw_ring_preenable(struct iio_dev
*indio_dev
)
575 struct iio_ring_buffer
*ring
= indio_dev
->ring
;
577 dev_dbg(&indio_dev
->dev
, "%s\n", __func__
);
578 /* Check if there are any scan elements enabled, if not fail*/
579 if (!(ring
->scan_count
|| ring
->scan_timestamp
))
581 if (ring
->scan_timestamp
)
582 if (ring
->scan_count
)
583 /* Timestamp (aligned to s64) and data */
584 size
= (((ring
->scan_count
* ring
->bpe
)
586 & ~(sizeof(s64
) - 1))
588 else /* Timestamp only */
591 size
= ring
->scan_count
* ring
->bpe
;
592 ring
->access
->set_bytes_per_datum(ring
, size
);
596 EXPORT_SYMBOL(iio_sw_ring_preenable
);