1 /* The industrial I/O core
3 * Copyright (c) 2008 Jonathan Cameron
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * Handling of ring allocation / resizing.
12 * Things to look at here.
13 * - Better memory allocation techniques?
14 * - Alternative access techniques?
16 #include <linux/kernel.h>
17 #include <linux/device.h>
19 #include <linux/cdev.h>
20 #include <linux/slab.h>
21 #include <linux/poll.h>
22 #include <linux/export.h>
25 #include "ring_generic.h"
28 * iio_ring_open() - chrdev file open for ring buffer access
30 * This function relies on all ring buffer implementations having an
31 * iio_ring_buffer as their first element.
33 static int iio_ring_open(struct inode
*inode
, struct file
*filp
)
35 struct iio_handler
*hand
36 = container_of(inode
->i_cdev
, struct iio_handler
, chrdev
);
37 struct iio_ring_buffer
*rb
= hand
->private;
39 filp
->private_data
= hand
->private;
40 if (rb
->access
->mark_in_use
)
41 rb
->access
->mark_in_use(rb
);
47 * iio_ring_release() - chrdev file close ring buffer access
49 * This function relies on all ring buffer implementations having an
50 * iio_ring_buffer as their first element.
52 static int iio_ring_release(struct inode
*inode
, struct file
*filp
)
54 struct cdev
*cd
= inode
->i_cdev
;
55 struct iio_handler
*hand
= iio_cdev_to_handler(cd
);
56 struct iio_ring_buffer
*rb
= hand
->private;
58 clear_bit(IIO_BUSY_BIT_POS
, &rb
->access_handler
.flags
);
59 if (rb
->access
->unmark_in_use
)
60 rb
->access
->unmark_in_use(rb
);
66 * iio_ring_read_first_n_outer() - chrdev read for ring buffer access
68 * This function relies on all ring buffer implementations having an
69 * iio_ring _bufer as their first element.
71 static ssize_t
iio_ring_read_first_n_outer(struct file
*filp
, char __user
*buf
,
72 size_t n
, loff_t
*f_ps
)
74 struct iio_ring_buffer
*rb
= filp
->private_data
;
76 if (!rb
->access
->read_first_n
)
78 return rb
->access
->read_first_n(rb
, n
, buf
);
82 * iio_ring_poll() - poll the ring to find out if it has data
84 static unsigned int iio_ring_poll(struct file
*filp
,
85 struct poll_table_struct
*wait
)
87 struct iio_ring_buffer
*rb
= filp
->private_data
;
89 poll_wait(filp
, &rb
->pollq
, wait
);
91 return POLLIN
| POLLRDNORM
;
92 /* need a way of knowing if there may be enough data... */
96 static const struct file_operations iio_ring_fileops
= {
97 .read
= iio_ring_read_first_n_outer
,
98 .release
= iio_ring_release
,
99 .open
= iio_ring_open
,
100 .poll
= iio_ring_poll
,
101 .owner
= THIS_MODULE
,
102 .llseek
= noop_llseek
,
105 void iio_ring_access_release(struct device
*dev
)
107 struct iio_ring_buffer
*buf
108 = container_of(dev
, struct iio_ring_buffer
, dev
);
109 cdev_del(&buf
->access_handler
.chrdev
);
110 iio_device_free_chrdev_minor(MINOR(dev
->devt
));
112 EXPORT_SYMBOL(iio_ring_access_release
);
115 __iio_request_ring_buffer_chrdev(struct iio_ring_buffer
*buf
,
116 struct module
*owner
,
121 buf
->access_handler
.flags
= 0;
122 buf
->dev
.bus
= &iio_bus_type
;
123 device_initialize(&buf
->dev
);
125 ret
= iio_device_get_chrdev_minor();
127 goto error_device_put
;
129 buf
->dev
.devt
= MKDEV(MAJOR(iio_devt
), ret
);
130 dev_set_name(&buf
->dev
, "%s:buffer%d",
131 dev_name(buf
->dev
.parent
),
133 ret
= device_add(&buf
->dev
);
135 printk(KERN_ERR
"failed to add the ring dev\n");
136 goto error_device_put
;
138 cdev_init(&buf
->access_handler
.chrdev
, &iio_ring_fileops
);
139 buf
->access_handler
.chrdev
.owner
= owner
;
140 ret
= cdev_add(&buf
->access_handler
.chrdev
, buf
->dev
.devt
, 1);
142 printk(KERN_ERR
"failed to allocate ring chrdev\n");
143 goto error_device_unregister
;
147 error_device_unregister
:
148 device_unregister(&buf
->dev
);
150 put_device(&buf
->dev
);
155 static void __iio_free_ring_buffer_chrdev(struct iio_ring_buffer
*buf
)
157 device_unregister(&buf
->dev
);
160 void iio_ring_buffer_init(struct iio_ring_buffer
*ring
,
161 struct iio_dev
*dev_info
)
163 ring
->indio_dev
= dev_info
;
164 ring
->access_handler
.private = ring
;
165 init_waitqueue_head(&ring
->pollq
);
167 EXPORT_SYMBOL(iio_ring_buffer_init
);
169 static ssize_t
iio_show_scan_index(struct device
*dev
,
170 struct device_attribute
*attr
,
173 return sprintf(buf
, "%u\n", to_iio_dev_attr(attr
)->c
->scan_index
);
176 static ssize_t
iio_show_fixed_type(struct device
*dev
,
177 struct device_attribute
*attr
,
180 struct iio_dev_attr
*this_attr
= to_iio_dev_attr(attr
);
181 return sprintf(buf
, "%c%d/%d>>%u\n",
182 this_attr
->c
->scan_type
.sign
,
183 this_attr
->c
->scan_type
.realbits
,
184 this_attr
->c
->scan_type
.storagebits
,
185 this_attr
->c
->scan_type
.shift
);
188 static ssize_t
iio_scan_el_show(struct device
*dev
,
189 struct device_attribute
*attr
,
193 struct iio_ring_buffer
*ring
= dev_get_drvdata(dev
);
195 ret
= iio_scan_mask_query(ring
, to_iio_dev_attr(attr
)->address
);
198 return sprintf(buf
, "%d\n", ret
);
201 static int iio_scan_mask_clear(struct iio_ring_buffer
*ring
, int bit
)
203 if (bit
> IIO_MAX_SCAN_LENGTH
)
205 ring
->scan_mask
&= ~(1 << bit
);
210 static ssize_t
iio_scan_el_store(struct device
*dev
,
211 struct device_attribute
*attr
,
217 struct iio_ring_buffer
*ring
= dev_get_drvdata(dev
);
218 struct iio_dev
*indio_dev
= ring
->indio_dev
;
219 struct iio_dev_attr
*this_attr
= to_iio_dev_attr(attr
);
221 state
= !(buf
[0] == '0');
222 mutex_lock(&indio_dev
->mlock
);
223 if (indio_dev
->currentmode
== INDIO_RING_TRIGGERED
) {
227 ret
= iio_scan_mask_query(ring
, this_attr
->address
);
231 ret
= iio_scan_mask_clear(ring
, this_attr
->address
);
234 } else if (state
&& !ret
) {
235 ret
= iio_scan_mask_set(ring
, this_attr
->address
);
241 mutex_unlock(&indio_dev
->mlock
);
243 return ret
? ret
: len
;
247 static ssize_t
iio_scan_el_ts_show(struct device
*dev
,
248 struct device_attribute
*attr
,
251 struct iio_ring_buffer
*ring
= dev_get_drvdata(dev
);
252 return sprintf(buf
, "%d\n", ring
->scan_timestamp
);
255 static ssize_t
iio_scan_el_ts_store(struct device
*dev
,
256 struct device_attribute
*attr
,
261 struct iio_ring_buffer
*ring
= dev_get_drvdata(dev
);
262 struct iio_dev
*indio_dev
= ring
->indio_dev
;
264 state
= !(buf
[0] == '0');
265 mutex_lock(&indio_dev
->mlock
);
266 if (indio_dev
->currentmode
== INDIO_RING_TRIGGERED
) {
270 ring
->scan_timestamp
= state
;
272 mutex_unlock(&indio_dev
->mlock
);
274 return ret
? ret
: len
;
277 static int iio_ring_add_channel_sysfs(struct iio_ring_buffer
*ring
,
278 const struct iio_chan_spec
*chan
)
282 ret
= __iio_add_chan_devattr("index", "scan_elements",
284 &iio_show_scan_index
,
289 &ring
->scan_el_dev_attr_list
);
293 ret
= __iio_add_chan_devattr("type", "scan_elements",
295 &iio_show_fixed_type
,
300 &ring
->scan_el_dev_attr_list
);
304 if (chan
->type
!= IIO_TIMESTAMP
)
305 ret
= __iio_add_chan_devattr("en", "scan_elements",
312 &ring
->scan_el_dev_attr_list
);
314 ret
= __iio_add_chan_devattr("en", "scan_elements",
316 &iio_scan_el_ts_show
,
317 &iio_scan_el_ts_store
,
321 &ring
->scan_el_dev_attr_list
);
326 static void iio_ring_remove_and_free_scan_dev_attr(struct iio_ring_buffer
*ring
,
327 struct iio_dev_attr
*p
)
329 sysfs_remove_file_from_group(&ring
->dev
.kobj
,
330 &p
->dev_attr
.attr
, "scan_elements");
331 kfree(p
->dev_attr
.attr
.name
);
335 static struct attribute
*iio_scan_el_dummy_attrs
[] = {
339 static struct attribute_group iio_scan_el_dummy_group
= {
340 .name
= "scan_elements",
341 .attrs
= iio_scan_el_dummy_attrs
344 static void __iio_ring_attr_cleanup(struct iio_ring_buffer
*ring
)
346 struct iio_dev_attr
*p
, *n
;
347 int anydynamic
= !list_empty(&ring
->scan_el_dev_attr_list
);
348 list_for_each_entry_safe(p
, n
,
349 &ring
->scan_el_dev_attr_list
, l
)
350 iio_ring_remove_and_free_scan_dev_attr(ring
, p
);
352 if (ring
->scan_el_attrs
)
353 sysfs_remove_group(&ring
->dev
.kobj
,
354 ring
->scan_el_attrs
);
356 sysfs_remove_group(&ring
->dev
.kobj
,
357 &iio_scan_el_dummy_group
);
360 int iio_ring_buffer_register_ex(struct iio_ring_buffer
*ring
, int id
,
361 const struct iio_chan_spec
*channels
,
366 ret
= __iio_request_ring_buffer_chrdev(ring
, ring
->owner
, id
);
370 if (ring
->scan_el_attrs
) {
371 ret
= sysfs_create_group(&ring
->dev
.kobj
,
372 ring
->scan_el_attrs
);
375 "Failed to add sysfs scan elements\n");
376 goto error_free_ring_buffer_chrdev
;
378 } else if (channels
) {
379 ret
= sysfs_create_group(&ring
->dev
.kobj
,
380 &iio_scan_el_dummy_group
);
382 goto error_free_ring_buffer_chrdev
;
385 INIT_LIST_HEAD(&ring
->scan_el_dev_attr_list
);
388 for (i
= 0; i
< num_channels
; i
++) {
389 ret
= iio_ring_add_channel_sysfs(ring
, &channels
[i
]);
391 goto error_cleanup_dynamic
;
396 error_cleanup_dynamic
:
397 __iio_ring_attr_cleanup(ring
);
398 error_free_ring_buffer_chrdev
:
399 __iio_free_ring_buffer_chrdev(ring
);
403 EXPORT_SYMBOL(iio_ring_buffer_register_ex
);
405 void iio_ring_buffer_unregister(struct iio_ring_buffer
*ring
)
407 __iio_ring_attr_cleanup(ring
);
408 __iio_free_ring_buffer_chrdev(ring
);
410 EXPORT_SYMBOL(iio_ring_buffer_unregister
);
412 ssize_t
iio_read_ring_length(struct device
*dev
,
413 struct device_attribute
*attr
,
416 struct iio_ring_buffer
*ring
= dev_get_drvdata(dev
);
418 if (ring
->access
->get_length
)
419 return sprintf(buf
, "%d\n",
420 ring
->access
->get_length(ring
));
424 EXPORT_SYMBOL(iio_read_ring_length
);
426 ssize_t
iio_write_ring_length(struct device
*dev
,
427 struct device_attribute
*attr
,
433 struct iio_ring_buffer
*ring
= dev_get_drvdata(dev
);
435 ret
= strict_strtoul(buf
, 10, &val
);
439 if (ring
->access
->get_length
)
440 if (val
== ring
->access
->get_length(ring
))
443 if (ring
->access
->set_length
) {
444 ring
->access
->set_length(ring
, val
);
445 if (ring
->access
->mark_param_change
)
446 ring
->access
->mark_param_change(ring
);
451 EXPORT_SYMBOL(iio_write_ring_length
);
453 ssize_t
iio_read_ring_bytes_per_datum(struct device
*dev
,
454 struct device_attribute
*attr
,
457 struct iio_ring_buffer
*ring
= dev_get_drvdata(dev
);
459 if (ring
->access
->get_bytes_per_datum
)
460 return sprintf(buf
, "%d\n",
461 ring
->access
->get_bytes_per_datum(ring
));
465 EXPORT_SYMBOL(iio_read_ring_bytes_per_datum
);
467 ssize_t
iio_store_ring_enable(struct device
*dev
,
468 struct device_attribute
*attr
,
473 bool requested_state
, current_state
;
475 struct iio_ring_buffer
*ring
= dev_get_drvdata(dev
);
476 struct iio_dev
*dev_info
= ring
->indio_dev
;
478 mutex_lock(&dev_info
->mlock
);
479 previous_mode
= dev_info
->currentmode
;
480 requested_state
= !(buf
[0] == '0');
481 current_state
= !!(previous_mode
& INDIO_ALL_RING_MODES
);
482 if (current_state
== requested_state
) {
483 printk(KERN_INFO
"iio-ring, current state requested again\n");
486 if (requested_state
) {
487 if (ring
->setup_ops
->preenable
) {
488 ret
= ring
->setup_ops
->preenable(dev_info
);
491 "Buffer not started:"
492 "ring preenable failed\n");
496 if (ring
->access
->request_update
) {
497 ret
= ring
->access
->request_update(ring
);
500 "Buffer not started:"
501 "ring parameter update failed\n");
505 if (ring
->access
->mark_in_use
)
506 ring
->access
->mark_in_use(ring
);
507 /* Definitely possible for devices to support both of these.*/
508 if (dev_info
->modes
& INDIO_RING_TRIGGERED
) {
509 if (!dev_info
->trig
) {
511 "Buffer not started: no trigger\n");
513 if (ring
->access
->unmark_in_use
)
514 ring
->access
->unmark_in_use(ring
);
517 dev_info
->currentmode
= INDIO_RING_TRIGGERED
;
518 } else if (dev_info
->modes
& INDIO_RING_HARDWARE_BUFFER
)
519 dev_info
->currentmode
= INDIO_RING_HARDWARE_BUFFER
;
520 else { /* should never be reached */
525 if (ring
->setup_ops
->postenable
) {
526 ret
= ring
->setup_ops
->postenable(dev_info
);
529 "Buffer not started:"
530 "postenable failed\n");
531 if (ring
->access
->unmark_in_use
)
532 ring
->access
->unmark_in_use(ring
);
533 dev_info
->currentmode
= previous_mode
;
534 if (ring
->setup_ops
->postdisable
)
535 ring
->setup_ops
->postdisable(dev_info
);
540 if (ring
->setup_ops
->predisable
) {
541 ret
= ring
->setup_ops
->predisable(dev_info
);
545 if (ring
->access
->unmark_in_use
)
546 ring
->access
->unmark_in_use(ring
);
547 dev_info
->currentmode
= INDIO_DIRECT_MODE
;
548 if (ring
->setup_ops
->postdisable
) {
549 ret
= ring
->setup_ops
->postdisable(dev_info
);
555 mutex_unlock(&dev_info
->mlock
);
559 mutex_unlock(&dev_info
->mlock
);
562 EXPORT_SYMBOL(iio_store_ring_enable
);
564 ssize_t
iio_show_ring_enable(struct device
*dev
,
565 struct device_attribute
*attr
,
568 struct iio_ring_buffer
*ring
= dev_get_drvdata(dev
);
569 return sprintf(buf
, "%d\n", !!(ring
->indio_dev
->currentmode
570 & INDIO_ALL_RING_MODES
));
572 EXPORT_SYMBOL(iio_show_ring_enable
);
574 int iio_sw_ring_preenable(struct iio_dev
*indio_dev
)
576 struct iio_ring_buffer
*ring
= indio_dev
->ring
;
578 dev_dbg(&indio_dev
->dev
, "%s\n", __func__
);
579 /* Check if there are any scan elements enabled, if not fail*/
580 if (!(ring
->scan_count
|| ring
->scan_timestamp
))
582 if (ring
->scan_timestamp
)
583 if (ring
->scan_count
)
584 /* Timestamp (aligned to s64) and data */
585 size
= (((ring
->scan_count
* ring
->bpe
)
587 & ~(sizeof(s64
) - 1))
589 else /* Timestamp only */
592 size
= ring
->scan_count
* ring
->bpe
;
593 ring
->access
->set_bytes_per_datum(ring
, size
);
597 EXPORT_SYMBOL(iio_sw_ring_preenable
);