1 /* The industrial I/O core
3 * Copyright (c) 2008 Jonathan Cameron
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * Handling of ring allocation / resizing.
12 * Things to look at here.
13 * - Better memory allocation techniques?
14 * - Alternative access techniques?
16 #include <linux/kernel.h>
17 #include <linux/device.h>
18 #include <linux/interrupt.h>
20 #include <linux/poll.h>
21 #include <linux/module.h>
22 #include <linux/cdev.h>
23 #include <linux/slab.h>
26 #include "ring_generic.h"
28 int iio_push_ring_event(struct iio_ring_buffer
*ring_buf
,
32 return __iio_push_event(&ring_buf
->ev_int
,
35 &ring_buf
->shared_ev_pointer
);
37 EXPORT_SYMBOL(iio_push_ring_event
);
39 int iio_push_or_escallate_ring_event(struct iio_ring_buffer
*ring_buf
,
43 if (ring_buf
->shared_ev_pointer
.ev_p
)
44 __iio_change_event(ring_buf
->shared_ev_pointer
.ev_p
,
48 return iio_push_ring_event(ring_buf
,
53 EXPORT_SYMBOL(iio_push_or_escallate_ring_event
);
56 * iio_ring_open() chrdev file open for ring buffer access
58 * This function relies on all ring buffer implementations having an
59 * iio_ring_buffer as their first element.
61 static int iio_ring_open(struct inode
*inode
, struct file
*filp
)
63 struct iio_handler
*hand
64 = container_of(inode
->i_cdev
, struct iio_handler
, chrdev
);
65 struct iio_ring_buffer
*rb
= hand
->private;
67 filp
->private_data
= hand
->private;
68 if (rb
->access
.mark_in_use
)
69 rb
->access
.mark_in_use(rb
);
75 * iio_ring_release() -chrdev file close ring buffer access
77 * This function relies on all ring buffer implementations having an
78 * iio_ring_buffer as their first element.
80 static int iio_ring_release(struct inode
*inode
, struct file
*filp
)
82 struct cdev
*cd
= inode
->i_cdev
;
83 struct iio_handler
*hand
= iio_cdev_to_handler(cd
);
84 struct iio_ring_buffer
*rb
= hand
->private;
86 clear_bit(IIO_BUSY_BIT_POS
, &rb
->access_handler
.flags
);
87 if (rb
->access
.unmark_in_use
)
88 rb
->access
.unmark_in_use(rb
);
94 * iio_ring_rip_outer() chrdev read for ring buffer access
96 * This function relies on all ring buffer implementations having an
97 * iio_ring _bufer as their first element.
99 static ssize_t
iio_ring_rip_outer(struct file
*filp
, char __user
*buf
,
100 size_t count
, loff_t
*f_ps
)
102 struct iio_ring_buffer
*rb
= filp
->private_data
;
103 int ret
, dead_offset
, copied
;
105 /* rip lots must exist. */
106 if (!rb
->access
.rip_lots
)
108 copied
= rb
->access
.rip_lots(rb
, count
, &data
, &dead_offset
);
114 if (copy_to_user(buf
, data
+ dead_offset
, copied
)) {
116 goto error_free_data_cpy
;
118 /* In clever ring buffer designs this may not need to be freed.
119 * When such a design exists I'll add this to ring access funcs.
131 static const struct file_operations iio_ring_fileops
= {
132 .read
= iio_ring_rip_outer
,
133 .release
= iio_ring_release
,
134 .open
= iio_ring_open
,
135 .owner
= THIS_MODULE
,
139 * __iio_request_ring_buffer_event_chrdev() allocate ring event chrdev
140 * @buf: ring buffer whose event chrdev we are allocating
141 * @owner: the module who owns the ring buffer (for ref counting)
142 * @dev: device with which the chrdev is associated
145 __iio_request_ring_buffer_event_chrdev(struct iio_ring_buffer
*buf
,
147 struct module
*owner
,
154 snprintf(buf
->ev_int
._name
, sizeof(buf
->ev_int
._name
),
158 ret
= iio_setup_ev_int(&(buf
->ev_int
),
171 __iio_free_ring_buffer_event_chrdev(struct iio_ring_buffer
*buf
)
173 iio_free_ev_int(&(buf
->ev_int
));
176 static void iio_ring_access_release(struct device
*dev
)
178 struct iio_ring_buffer
*buf
179 = access_dev_to_iio_ring_buffer(dev
);
180 cdev_del(&buf
->access_handler
.chrdev
);
181 iio_device_free_chrdev_minor(MINOR(dev
->devt
));
184 static struct device_type iio_ring_access_type
= {
185 .release
= iio_ring_access_release
,
189 __iio_request_ring_buffer_access_chrdev(struct iio_ring_buffer
*buf
,
191 struct module
*owner
)
195 buf
->access_handler
.flags
= 0;
197 buf
->access_dev
.parent
= &buf
->dev
;
198 buf
->access_dev
.bus
= &iio_bus_type
;
199 buf
->access_dev
.type
= &iio_ring_access_type
;
200 device_initialize(&buf
->access_dev
);
202 minor
= iio_device_get_chrdev_minor();
205 goto error_device_put
;
207 buf
->access_dev
.devt
= MKDEV(MAJOR(iio_devt
), minor
);
212 dev_set_name(&buf
->access_dev
, "%s:access%d",
215 ret
= device_add(&buf
->access_dev
);
217 printk(KERN_ERR
"failed to add the ring access dev\n");
218 goto error_device_put
;
221 cdev_init(&buf
->access_handler
.chrdev
, &iio_ring_fileops
);
222 buf
->access_handler
.chrdev
.owner
= owner
;
224 ret
= cdev_add(&buf
->access_handler
.chrdev
, buf
->access_dev
.devt
, 1);
226 printk(KERN_ERR
"failed to allocate ring access chrdev\n");
227 goto error_device_unregister
;
231 error_device_unregister
:
232 device_unregister(&buf
->access_dev
);
234 put_device(&buf
->access_dev
);
239 static void __iio_free_ring_buffer_access_chrdev(struct iio_ring_buffer
*buf
)
241 device_unregister(&buf
->access_dev
);
244 void iio_ring_buffer_init(struct iio_ring_buffer
*ring
,
245 struct iio_dev
*dev_info
)
247 if (ring
->access
.mark_param_change
)
248 ring
->access
.mark_param_change(ring
);
249 ring
->indio_dev
= dev_info
;
250 ring
->ev_int
.private = ring
;
251 ring
->access_handler
.private = ring
;
252 ring
->shared_ev_pointer
.ev_p
= NULL
;
253 spin_lock_init(&ring
->shared_ev_pointer
.lock
);
255 EXPORT_SYMBOL(iio_ring_buffer_init
);
257 int iio_ring_buffer_register(struct iio_ring_buffer
*ring
, int id
)
263 dev_set_name(&ring
->dev
, "%s:buffer%d",
264 dev_name(ring
->dev
.parent
),
266 ret
= device_add(&ring
->dev
);
270 ret
= __iio_request_ring_buffer_event_chrdev(ring
,
275 goto error_remove_device
;
277 ret
= __iio_request_ring_buffer_access_chrdev(ring
,
282 goto error_free_ring_buffer_event_chrdev
;
285 error_free_ring_buffer_event_chrdev
:
286 __iio_free_ring_buffer_event_chrdev(ring
);
288 device_del(&ring
->dev
);
292 EXPORT_SYMBOL(iio_ring_buffer_register
);
294 void iio_ring_buffer_unregister(struct iio_ring_buffer
*ring
)
296 __iio_free_ring_buffer_access_chrdev(ring
);
297 __iio_free_ring_buffer_event_chrdev(ring
);
298 device_del(&ring
->dev
);
300 EXPORT_SYMBOL(iio_ring_buffer_unregister
);
302 ssize_t
iio_read_ring_length(struct device
*dev
,
303 struct device_attribute
*attr
,
307 struct iio_ring_buffer
*ring
= dev_get_drvdata(dev
);
309 if (ring
->access
.get_length
)
310 len
= sprintf(buf
, "%d\n",
311 ring
->access
.get_length(ring
));
315 EXPORT_SYMBOL(iio_read_ring_length
);
317 ssize_t
iio_write_ring_length(struct device
*dev
,
318 struct device_attribute
*attr
,
324 struct iio_ring_buffer
*ring
= dev_get_drvdata(dev
);
325 ret
= strict_strtoul(buf
, 10, &val
);
329 if (ring
->access
.get_length
)
330 if (val
== ring
->access
.get_length(ring
))
333 if (ring
->access
.set_length
) {
334 ring
->access
.set_length(ring
, val
);
335 if (ring
->access
.mark_param_change
)
336 ring
->access
.mark_param_change(ring
);
341 EXPORT_SYMBOL(iio_write_ring_length
);
343 ssize_t
iio_read_ring_bps(struct device
*dev
,
344 struct device_attribute
*attr
,
348 struct iio_ring_buffer
*ring
= dev_get_drvdata(dev
);
350 if (ring
->access
.get_bpd
)
351 len
= sprintf(buf
, "%d\n",
352 ring
->access
.get_bpd(ring
));
356 EXPORT_SYMBOL(iio_read_ring_bps
);
358 ssize_t
iio_store_ring_enable(struct device
*dev
,
359 struct device_attribute
*attr
,
364 bool requested_state
, current_state
;
366 struct iio_ring_buffer
*ring
= dev_get_drvdata(dev
);
367 struct iio_dev
*dev_info
= ring
->indio_dev
;
369 mutex_lock(&dev_info
->mlock
);
370 previous_mode
= dev_info
->currentmode
;
371 requested_state
= !(buf
[0] == '0');
372 current_state
= !!(previous_mode
& INDIO_ALL_RING_MODES
);
373 if (current_state
== requested_state
) {
374 printk(KERN_INFO
"iio-ring, current state requested again\n");
377 if (requested_state
) {
378 if (ring
->preenable
) {
379 ret
= ring
->preenable(dev_info
);
382 "Buffer not started:"
383 "ring preenable failed\n");
387 if (ring
->access
.request_update
) {
388 ret
= ring
->access
.request_update(ring
);
391 "Buffer not started:"
392 "ring parameter update failed\n");
396 if (ring
->access
.mark_in_use
)
397 ring
->access
.mark_in_use(ring
);
398 /* Definitely possible for devices to support both of these.*/
399 if (dev_info
->modes
& INDIO_RING_TRIGGERED
) {
400 if (!dev_info
->trig
) {
402 "Buffer not started: no trigger\n");
404 if (ring
->access
.unmark_in_use
)
405 ring
->access
.unmark_in_use(ring
);
408 dev_info
->currentmode
= INDIO_RING_TRIGGERED
;
409 } else if (dev_info
->modes
& INDIO_RING_HARDWARE_BUFFER
)
410 dev_info
->currentmode
= INDIO_RING_HARDWARE_BUFFER
;
411 else { /* should never be reached */
416 if (ring
->postenable
) {
418 ret
= ring
->postenable(dev_info
);
421 "Buffer not started:"
422 "postenable failed\n");
423 if (ring
->access
.unmark_in_use
)
424 ring
->access
.unmark_in_use(ring
);
425 dev_info
->currentmode
= previous_mode
;
426 if (ring
->postdisable
)
427 ring
->postdisable(dev_info
);
432 if (ring
->predisable
) {
433 ret
= ring
->predisable(dev_info
);
437 if (ring
->access
.unmark_in_use
)
438 ring
->access
.unmark_in_use(ring
);
439 dev_info
->currentmode
= INDIO_DIRECT_MODE
;
440 if (ring
->postdisable
) {
441 ret
= ring
->postdisable(dev_info
);
447 mutex_unlock(&dev_info
->mlock
);
451 mutex_unlock(&dev_info
->mlock
);
454 EXPORT_SYMBOL(iio_store_ring_enable
);
455 ssize_t
iio_show_ring_enable(struct device
*dev
,
456 struct device_attribute
*attr
,
459 struct iio_ring_buffer
*ring
= dev_get_drvdata(dev
);
460 return sprintf(buf
, "%d\n", !!(ring
->indio_dev
->currentmode
461 & INDIO_ALL_RING_MODES
));
463 EXPORT_SYMBOL(iio_show_ring_enable
);
465 ssize_t
iio_scan_el_show(struct device
*dev
,
466 struct device_attribute
*attr
,
470 struct iio_dev
*indio_dev
= dev_get_drvdata(dev
);
471 struct iio_scan_el
*this_el
= to_iio_scan_el(attr
);
473 ret
= iio_scan_mask_query(indio_dev
, this_el
->number
);
476 return sprintf(buf
, "%d\n", ret
);
478 EXPORT_SYMBOL(iio_scan_el_show
);
480 ssize_t
iio_scan_el_store(struct device
*dev
,
481 struct device_attribute
*attr
,
487 struct iio_dev
*indio_dev
= dev_get_drvdata(dev
);
488 struct iio_scan_el
*this_el
= to_iio_scan_el(attr
);
490 state
= !(buf
[0] == '0');
491 mutex_lock(&indio_dev
->mlock
);
492 if (indio_dev
->currentmode
== INDIO_RING_TRIGGERED
) {
496 ret
= iio_scan_mask_query(indio_dev
, this_el
->number
);
500 ret
= iio_scan_mask_clear(indio_dev
, this_el
->number
);
503 indio_dev
->scan_count
--;
504 } else if (state
&& !ret
) {
505 ret
= iio_scan_mask_set(indio_dev
, this_el
->number
);
508 indio_dev
->scan_count
++;
510 if (this_el
->set_state
)
511 ret
= this_el
->set_state(this_el
, indio_dev
, state
);
513 mutex_unlock(&indio_dev
->mlock
);
515 return ret
? ret
: len
;
518 EXPORT_SYMBOL(iio_scan_el_store
);
520 ssize_t
iio_scan_el_ts_show(struct device
*dev
,
521 struct device_attribute
*attr
,
524 struct iio_dev
*indio_dev
= dev_get_drvdata(dev
);
525 return sprintf(buf
, "%d\n", indio_dev
->scan_timestamp
);
527 EXPORT_SYMBOL(iio_scan_el_ts_show
);
529 ssize_t
iio_scan_el_ts_store(struct device
*dev
,
530 struct device_attribute
*attr
,
535 struct iio_dev
*indio_dev
= dev_get_drvdata(dev
);
537 state
= !(buf
[0] == '0');
538 mutex_lock(&indio_dev
->mlock
);
539 if (indio_dev
->currentmode
== INDIO_RING_TRIGGERED
) {
543 indio_dev
->scan_timestamp
= state
;
545 mutex_unlock(&indio_dev
->mlock
);
547 return ret
? ret
: len
;
549 EXPORT_SYMBOL(iio_scan_el_ts_store
);