1 /* The industrial I/O core
3 * Copyright (c) 2008 Jonathan Cameron
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * Handling of ring allocation / resizing.
12 * Things to look at here.
13 * - Better memory allocation techniques?
14 * - Alternative access techniques?
16 #include <linux/kernel.h>
17 #include <linux/device.h>
19 #include <linux/poll.h>
20 #include <linux/cdev.h>
21 #include <linux/slab.h>
24 #include "ring_generic.h"
26 int iio_push_ring_event(struct iio_ring_buffer
*ring_buf
,
30 return __iio_push_event(&ring_buf
->ev_int
,
33 &ring_buf
->shared_ev_pointer
);
35 EXPORT_SYMBOL(iio_push_ring_event
);
37 int iio_push_or_escallate_ring_event(struct iio_ring_buffer
*ring_buf
,
41 if (ring_buf
->shared_ev_pointer
.ev_p
)
42 __iio_change_event(ring_buf
->shared_ev_pointer
.ev_p
,
46 return iio_push_ring_event(ring_buf
,
51 EXPORT_SYMBOL(iio_push_or_escallate_ring_event
);
54 * iio_ring_open() - chrdev file open for ring buffer access
56 * This function relies on all ring buffer implementations having an
57 * iio_ring_buffer as their first element.
59 static int iio_ring_open(struct inode
*inode
, struct file
*filp
)
61 struct iio_handler
*hand
62 = container_of(inode
->i_cdev
, struct iio_handler
, chrdev
);
63 struct iio_ring_buffer
*rb
= hand
->private;
65 filp
->private_data
= hand
->private;
66 if (rb
->access
.mark_in_use
)
67 rb
->access
.mark_in_use(rb
);
73 * iio_ring_release() - chrdev file close ring buffer access
75 * This function relies on all ring buffer implementations having an
76 * iio_ring_buffer as their first element.
78 static int iio_ring_release(struct inode
*inode
, struct file
*filp
)
80 struct cdev
*cd
= inode
->i_cdev
;
81 struct iio_handler
*hand
= iio_cdev_to_handler(cd
);
82 struct iio_ring_buffer
*rb
= hand
->private;
84 clear_bit(IIO_BUSY_BIT_POS
, &rb
->access_handler
.flags
);
85 if (rb
->access
.unmark_in_use
)
86 rb
->access
.unmark_in_use(rb
);
92 * iio_ring_rip_outer() - chrdev read for ring buffer access
94 * This function relies on all ring buffer implementations having an
95 * iio_ring _bufer as their first element.
97 static ssize_t
iio_ring_rip_outer(struct file
*filp
, char __user
*buf
,
98 size_t count
, loff_t
*f_ps
)
100 struct iio_ring_buffer
*rb
= filp
->private_data
;
101 int ret
, dead_offset
, copied
;
103 /* rip lots must exist. */
104 if (!rb
->access
.rip_lots
)
106 copied
= rb
->access
.rip_lots(rb
, count
, &data
, &dead_offset
);
112 if (copy_to_user(buf
, data
+ dead_offset
, copied
)) {
114 goto error_free_data_cpy
;
116 /* In clever ring buffer designs this may not need to be freed.
117 * When such a design exists I'll add this to ring access funcs.
129 static const struct file_operations iio_ring_fileops
= {
130 .read
= iio_ring_rip_outer
,
131 .release
= iio_ring_release
,
132 .open
= iio_ring_open
,
133 .owner
= THIS_MODULE
,
134 .llseek
= noop_llseek
,
138 * __iio_request_ring_buffer_event_chrdev() - allocate ring event chrdev
139 * @buf: ring buffer whose event chrdev we are allocating
140 * @id: id of this ring buffer (typically 0)
141 * @owner: the module who owns the ring buffer (for ref counting)
142 * @dev: device with which the chrdev is associated
145 __iio_request_ring_buffer_event_chrdev(struct iio_ring_buffer
*buf
,
147 struct module
*owner
,
152 snprintf(buf
->ev_int
._name
, sizeof(buf
->ev_int
._name
),
156 ret
= iio_setup_ev_int(&(buf
->ev_int
),
169 __iio_free_ring_buffer_event_chrdev(struct iio_ring_buffer
*buf
)
171 iio_free_ev_int(&(buf
->ev_int
));
174 static void iio_ring_access_release(struct device
*dev
)
176 struct iio_ring_buffer
*buf
177 = access_dev_to_iio_ring_buffer(dev
);
178 cdev_del(&buf
->access_handler
.chrdev
);
179 iio_device_free_chrdev_minor(MINOR(dev
->devt
));
182 static struct device_type iio_ring_access_type
= {
183 .release
= iio_ring_access_release
,
187 __iio_request_ring_buffer_access_chrdev(struct iio_ring_buffer
*buf
,
189 struct module
*owner
)
193 buf
->access_handler
.flags
= 0;
195 buf
->access_dev
.parent
= &buf
->dev
;
196 buf
->access_dev
.bus
= &iio_bus_type
;
197 buf
->access_dev
.type
= &iio_ring_access_type
;
198 device_initialize(&buf
->access_dev
);
200 minor
= iio_device_get_chrdev_minor();
203 goto error_device_put
;
205 buf
->access_dev
.devt
= MKDEV(MAJOR(iio_devt
), minor
);
210 dev_set_name(&buf
->access_dev
, "%s:access%d",
213 ret
= device_add(&buf
->access_dev
);
215 printk(KERN_ERR
"failed to add the ring access dev\n");
216 goto error_device_put
;
219 cdev_init(&buf
->access_handler
.chrdev
, &iio_ring_fileops
);
220 buf
->access_handler
.chrdev
.owner
= owner
;
222 ret
= cdev_add(&buf
->access_handler
.chrdev
, buf
->access_dev
.devt
, 1);
224 printk(KERN_ERR
"failed to allocate ring access chrdev\n");
225 goto error_device_unregister
;
229 error_device_unregister
:
230 device_unregister(&buf
->access_dev
);
232 put_device(&buf
->access_dev
);
237 static void __iio_free_ring_buffer_access_chrdev(struct iio_ring_buffer
*buf
)
239 device_unregister(&buf
->access_dev
);
242 void iio_ring_buffer_init(struct iio_ring_buffer
*ring
,
243 struct iio_dev
*dev_info
)
245 if (ring
->access
.mark_param_change
)
246 ring
->access
.mark_param_change(ring
);
247 ring
->indio_dev
= dev_info
;
248 ring
->ev_int
.private = ring
;
249 ring
->access_handler
.private = ring
;
250 ring
->shared_ev_pointer
.ev_p
= NULL
;
251 spin_lock_init(&ring
->shared_ev_pointer
.lock
);
253 EXPORT_SYMBOL(iio_ring_buffer_init
);
255 int iio_ring_buffer_register(struct iio_ring_buffer
*ring
, int id
)
261 dev_set_name(&ring
->dev
, "%s:buffer%d",
262 dev_name(ring
->dev
.parent
),
264 ret
= device_add(&ring
->dev
);
268 ret
= __iio_request_ring_buffer_event_chrdev(ring
,
273 goto error_remove_device
;
275 ret
= __iio_request_ring_buffer_access_chrdev(ring
,
280 goto error_free_ring_buffer_event_chrdev
;
282 if (ring
->scan_el_attrs
) {
283 ret
= sysfs_create_group(&ring
->dev
.kobj
,
284 ring
->scan_el_attrs
);
287 "Failed to add sysfs scan elements\n");
288 goto error_free_ring_buffer_event_chrdev
;
293 error_free_ring_buffer_event_chrdev
:
294 __iio_free_ring_buffer_event_chrdev(ring
);
296 device_del(&ring
->dev
);
300 EXPORT_SYMBOL(iio_ring_buffer_register
);
302 void iio_ring_buffer_unregister(struct iio_ring_buffer
*ring
)
304 if (ring
->scan_el_attrs
)
305 sysfs_remove_group(&ring
->dev
.kobj
,
306 ring
->scan_el_attrs
);
308 __iio_free_ring_buffer_access_chrdev(ring
);
309 __iio_free_ring_buffer_event_chrdev(ring
);
310 device_del(&ring
->dev
);
312 EXPORT_SYMBOL(iio_ring_buffer_unregister
);
314 ssize_t
iio_read_ring_length(struct device
*dev
,
315 struct device_attribute
*attr
,
319 struct iio_ring_buffer
*ring
= dev_get_drvdata(dev
);
321 if (ring
->access
.get_length
)
322 len
= sprintf(buf
, "%d\n",
323 ring
->access
.get_length(ring
));
327 EXPORT_SYMBOL(iio_read_ring_length
);
329 ssize_t
iio_write_ring_length(struct device
*dev
,
330 struct device_attribute
*attr
,
336 struct iio_ring_buffer
*ring
= dev_get_drvdata(dev
);
337 ret
= strict_strtoul(buf
, 10, &val
);
341 if (ring
->access
.get_length
)
342 if (val
== ring
->access
.get_length(ring
))
345 if (ring
->access
.set_length
) {
346 ring
->access
.set_length(ring
, val
);
347 if (ring
->access
.mark_param_change
)
348 ring
->access
.mark_param_change(ring
);
353 EXPORT_SYMBOL(iio_write_ring_length
);
355 ssize_t
iio_read_ring_bytes_per_datum(struct device
*dev
,
356 struct device_attribute
*attr
,
360 struct iio_ring_buffer
*ring
= dev_get_drvdata(dev
);
362 if (ring
->access
.get_bytes_per_datum
)
363 len
= sprintf(buf
, "%d\n",
364 ring
->access
.get_bytes_per_datum(ring
));
368 EXPORT_SYMBOL(iio_read_ring_bytes_per_datum
);
370 ssize_t
iio_store_ring_enable(struct device
*dev
,
371 struct device_attribute
*attr
,
376 bool requested_state
, current_state
;
378 struct iio_ring_buffer
*ring
= dev_get_drvdata(dev
);
379 struct iio_dev
*dev_info
= ring
->indio_dev
;
381 mutex_lock(&dev_info
->mlock
);
382 previous_mode
= dev_info
->currentmode
;
383 requested_state
= !(buf
[0] == '0');
384 current_state
= !!(previous_mode
& INDIO_ALL_RING_MODES
);
385 if (current_state
== requested_state
) {
386 printk(KERN_INFO
"iio-ring, current state requested again\n");
389 if (requested_state
) {
390 if (ring
->preenable
) {
391 ret
= ring
->preenable(dev_info
);
394 "Buffer not started:"
395 "ring preenable failed\n");
399 if (ring
->access
.request_update
) {
400 ret
= ring
->access
.request_update(ring
);
403 "Buffer not started:"
404 "ring parameter update failed\n");
408 if (ring
->access
.mark_in_use
)
409 ring
->access
.mark_in_use(ring
);
410 /* Definitely possible for devices to support both of these.*/
411 if (dev_info
->modes
& INDIO_RING_TRIGGERED
) {
412 if (!dev_info
->trig
) {
414 "Buffer not started: no trigger\n");
416 if (ring
->access
.unmark_in_use
)
417 ring
->access
.unmark_in_use(ring
);
420 dev_info
->currentmode
= INDIO_RING_TRIGGERED
;
421 } else if (dev_info
->modes
& INDIO_RING_HARDWARE_BUFFER
)
422 dev_info
->currentmode
= INDIO_RING_HARDWARE_BUFFER
;
423 else { /* should never be reached */
428 if (ring
->postenable
) {
430 ret
= ring
->postenable(dev_info
);
433 "Buffer not started:"
434 "postenable failed\n");
435 if (ring
->access
.unmark_in_use
)
436 ring
->access
.unmark_in_use(ring
);
437 dev_info
->currentmode
= previous_mode
;
438 if (ring
->postdisable
)
439 ring
->postdisable(dev_info
);
444 if (ring
->predisable
) {
445 ret
= ring
->predisable(dev_info
);
449 if (ring
->access
.unmark_in_use
)
450 ring
->access
.unmark_in_use(ring
);
451 dev_info
->currentmode
= INDIO_DIRECT_MODE
;
452 if (ring
->postdisable
) {
453 ret
= ring
->postdisable(dev_info
);
459 mutex_unlock(&dev_info
->mlock
);
463 mutex_unlock(&dev_info
->mlock
);
466 EXPORT_SYMBOL(iio_store_ring_enable
);
467 ssize_t
iio_show_ring_enable(struct device
*dev
,
468 struct device_attribute
*attr
,
471 struct iio_ring_buffer
*ring
= dev_get_drvdata(dev
);
472 return sprintf(buf
, "%d\n", !!(ring
->indio_dev
->currentmode
473 & INDIO_ALL_RING_MODES
));
475 EXPORT_SYMBOL(iio_show_ring_enable
);
477 ssize_t
iio_scan_el_show(struct device
*dev
,
478 struct device_attribute
*attr
,
482 struct iio_ring_buffer
*ring
= dev_get_drvdata(dev
);
483 struct iio_scan_el
*this_el
= to_iio_scan_el(attr
);
485 ret
= iio_scan_mask_query(ring
, this_el
->number
);
488 return sprintf(buf
, "%d\n", ret
);
490 EXPORT_SYMBOL(iio_scan_el_show
);
492 ssize_t
iio_scan_el_store(struct device
*dev
,
493 struct device_attribute
*attr
,
499 struct iio_ring_buffer
*ring
= dev_get_drvdata(dev
);
500 struct iio_dev
*indio_dev
= ring
->indio_dev
;
501 struct iio_scan_el
*this_el
= to_iio_scan_el(attr
);
503 state
= !(buf
[0] == '0');
504 mutex_lock(&indio_dev
->mlock
);
505 if (indio_dev
->currentmode
== INDIO_RING_TRIGGERED
) {
509 ret
= iio_scan_mask_query(ring
, this_el
->number
);
513 ret
= iio_scan_mask_clear(ring
, this_el
->number
);
516 } else if (state
&& !ret
) {
517 ret
= iio_scan_mask_set(ring
, this_el
->number
);
521 if (this_el
->set_state
)
522 ret
= this_el
->set_state(this_el
, indio_dev
, state
);
524 mutex_unlock(&indio_dev
->mlock
);
526 return ret
? ret
: len
;
529 EXPORT_SYMBOL(iio_scan_el_store
);
531 ssize_t
iio_scan_el_ts_show(struct device
*dev
,
532 struct device_attribute
*attr
,
535 struct iio_ring_buffer
*ring
= dev_get_drvdata(dev
);
536 return sprintf(buf
, "%d\n", ring
->scan_timestamp
);
538 EXPORT_SYMBOL(iio_scan_el_ts_show
);
540 ssize_t
iio_scan_el_ts_store(struct device
*dev
,
541 struct device_attribute
*attr
,
546 struct iio_ring_buffer
*ring
= dev_get_drvdata(dev
);
547 struct iio_dev
*indio_dev
= ring
->indio_dev
;
549 state
= !(buf
[0] == '0');
550 mutex_lock(&indio_dev
->mlock
);
551 if (indio_dev
->currentmode
== INDIO_RING_TRIGGERED
) {
555 ring
->scan_timestamp
= state
;
557 mutex_unlock(&indio_dev
->mlock
);
559 return ret
? ret
: len
;
561 EXPORT_SYMBOL(iio_scan_el_ts_store
);