1 /* The industrial I/O core
3 * Copyright (c) 2008 Jonathan Cameron
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * Handling of ring allocation / resizing.
12 * Things to look at here.
13 * - Better memory allocation techniques?
14 * - Alternative access techniques?
16 #include <linux/kernel.h>
17 #include <linux/device.h>
18 #include <linux/interrupt.h>
20 #include <linux/poll.h>
21 #include <linux/module.h>
22 #include <linux/cdev.h>
23 #include <linux/idr.h>
26 #include "ring_generic.h"
28 /* IDR for ring buffer identifier */
29 static DEFINE_IDR(iio_ring_idr
);
30 /* IDR for ring event identifier */
31 static DEFINE_IDR(iio_ring_event_idr
);
32 /* IDR for ring access identifier */
33 static DEFINE_IDR(iio_ring_access_idr
);
35 int iio_push_ring_event(struct iio_ring_buffer
*ring_buf
,
39 return __iio_push_event(&ring_buf
->ev_int
,
42 &ring_buf
->shared_ev_pointer
);
44 EXPORT_SYMBOL(iio_push_ring_event
);
46 int iio_push_or_escallate_ring_event(struct iio_ring_buffer
*ring_buf
,
50 if (ring_buf
->shared_ev_pointer
.ev_p
)
51 __iio_change_event(ring_buf
->shared_ev_pointer
.ev_p
,
55 return iio_push_ring_event(ring_buf
,
60 EXPORT_SYMBOL(iio_push_or_escallate_ring_event
);
63 * iio_ring_open() chrdev file open for ring buffer access
65 * This function relies on all ring buffer implementations having an
66 * iio_ring_buffer as their first element.
68 int iio_ring_open(struct inode
*inode
, struct file
*filp
)
70 struct iio_handler
*hand
71 = container_of(inode
->i_cdev
, struct iio_handler
, chrdev
);
72 struct iio_ring_buffer
*rb
= hand
->private;
74 filp
->private_data
= hand
->private;
75 if (rb
->access
.mark_in_use
)
76 rb
->access
.mark_in_use(rb
);
82 * iio_ring_release() -chrdev file close ring buffer access
84 * This function relies on all ring buffer implementations having an
85 * iio_ring_buffer as their first element.
87 int iio_ring_release(struct inode
*inode
, struct file
*filp
)
89 struct cdev
*cd
= inode
->i_cdev
;
90 struct iio_handler
*hand
= iio_cdev_to_handler(cd
);
91 struct iio_ring_buffer
*rb
= hand
->private;
93 clear_bit(IIO_BUSY_BIT_POS
, &rb
->access_handler
.flags
);
94 if (rb
->access
.unmark_in_use
)
95 rb
->access
.unmark_in_use(rb
);
101 * iio_ring_rip_outer() chrdev read for ring buffer access
103 * This function relies on all ring buffer implementations having an
104 * iio_ring _bufer as their first element.
106 ssize_t
iio_ring_rip_outer(struct file
*filp
,
111 struct iio_ring_buffer
*rb
= filp
->private_data
;
112 int ret
, dead_offset
, copied
;
114 /* rip lots must exist. */
115 if (!rb
->access
.rip_lots
)
117 copied
= rb
->access
.rip_lots(rb
, count
, &data
, &dead_offset
);
123 if (copy_to_user(buf
, data
+ dead_offset
, copied
)) {
125 goto error_free_data_cpy
;
127 /* In clever ring buffer designs this may not need to be freed.
128 * When such a design exists I'll add this to ring access funcs.
140 static const struct file_operations iio_ring_fileops
= {
141 .read
= iio_ring_rip_outer
,
142 .release
= iio_ring_release
,
143 .open
= iio_ring_open
,
144 .owner
= THIS_MODULE
,
148 * __iio_request_ring_buffer_event_chrdev() allocate ring event chrdev
149 * @buf: ring buffer whose event chrdev we are allocating
150 * @owner: the module who owns the ring buffer (for ref counting)
151 * @dev: device with which the chrdev is associated
154 __iio_request_ring_buffer_event_chrdev(struct iio_ring_buffer
*buf
,
156 struct module
*owner
,
160 ret
= iio_get_new_idr_val(&iio_ring_event_idr
);
164 buf
->ev_int
.id
= ret
;
166 snprintf(buf
->ev_int
._name
, 20,
169 ret
= iio_setup_ev_int(&(buf
->ev_int
),
178 iio_free_idr_val(&iio_ring_event_idr
, buf
->ev_int
.id
);
184 __iio_free_ring_buffer_event_chrdev(struct iio_ring_buffer
*buf
)
186 iio_free_ev_int(&(buf
->ev_int
));
187 iio_free_idr_val(&iio_ring_event_idr
, buf
->ev_int
.id
);
190 static void iio_ring_access_release(struct device
*dev
)
192 struct iio_ring_buffer
*buf
193 = access_dev_to_iio_ring_buffer(dev
);
194 cdev_del(&buf
->access_handler
.chrdev
);
195 iio_device_free_chrdev_minor(MINOR(dev
->devt
));
198 static struct device_type iio_ring_access_type
= {
199 .release
= iio_ring_access_release
,
203 __iio_request_ring_buffer_access_chrdev(struct iio_ring_buffer
*buf
,
205 struct module
*owner
)
209 buf
->access_handler
.flags
= 0;
211 buf
->access_dev
.parent
= &buf
->dev
;
212 buf
->access_dev
.class = &iio_class
;
213 buf
->access_dev
.type
= &iio_ring_access_type
;
214 device_initialize(&buf
->access_dev
);
216 minor
= iio_device_get_chrdev_minor();
219 goto error_device_put
;
221 buf
->access_dev
.devt
= MKDEV(MAJOR(iio_devt
), minor
);
223 ret
= iio_get_new_idr_val(&iio_ring_access_idr
);
225 goto error_device_put
;
227 buf
->access_id
= ret
;
228 dev_set_name(&buf
->access_dev
, "ring_access%d", buf
->access_id
);
229 ret
= device_add(&buf
->access_dev
);
231 printk(KERN_ERR
"failed to add the ring access dev\n");
235 cdev_init(&buf
->access_handler
.chrdev
, &iio_ring_fileops
);
236 buf
->access_handler
.chrdev
.owner
= owner
;
238 ret
= cdev_add(&buf
->access_handler
.chrdev
, buf
->access_dev
.devt
, 1);
240 printk(KERN_ERR
"failed to allocate ring access chrdev\n");
241 goto error_device_unregister
;
244 error_device_unregister
:
245 device_unregister(&buf
->access_dev
);
247 iio_free_idr_val(&iio_ring_access_idr
, buf
->access_id
);
249 put_device(&buf
->access_dev
);
254 static void __iio_free_ring_buffer_access_chrdev(struct iio_ring_buffer
*buf
)
256 iio_free_idr_val(&iio_ring_access_idr
, buf
->access_id
);
257 device_unregister(&buf
->access_dev
);
260 void iio_ring_buffer_init(struct iio_ring_buffer
*ring
,
261 struct iio_dev
*dev_info
)
263 if (ring
->access
.mark_param_change
)
264 ring
->access
.mark_param_change(ring
);
265 ring
->indio_dev
= dev_info
;
266 ring
->ev_int
.private = ring
;
267 ring
->access_handler
.private = ring
;
269 EXPORT_SYMBOL(iio_ring_buffer_init
);
271 int iio_ring_buffer_register(struct iio_ring_buffer
*ring
)
274 ret
= iio_get_new_idr_val(&iio_ring_idr
);
280 dev_set_name(&ring
->dev
, "ring_buffer%d", ring
->id
);
281 ret
= device_add(&ring
->dev
);
285 ret
= __iio_request_ring_buffer_event_chrdev(ring
,
290 goto error_remove_device
;
292 ret
= __iio_request_ring_buffer_access_chrdev(ring
,
297 goto error_free_ring_buffer_event_chrdev
;
300 error_free_ring_buffer_event_chrdev
:
301 __iio_free_ring_buffer_event_chrdev(ring
);
303 device_del(&ring
->dev
);
305 iio_free_idr_val(&iio_ring_idr
, ring
->id
);
309 EXPORT_SYMBOL(iio_ring_buffer_register
);
311 void iio_ring_buffer_unregister(struct iio_ring_buffer
*ring
)
313 __iio_free_ring_buffer_access_chrdev(ring
);
314 __iio_free_ring_buffer_event_chrdev(ring
);
315 device_del(&ring
->dev
);
316 iio_free_idr_val(&iio_ring_idr
, ring
->id
);
318 EXPORT_SYMBOL(iio_ring_buffer_unregister
);
320 ssize_t
iio_read_ring_length(struct device
*dev
,
321 struct device_attribute
*attr
,
325 struct iio_ring_buffer
*ring
= dev_get_drvdata(dev
);
327 if (ring
->access
.get_length
)
328 len
= sprintf(buf
, "%d\n",
329 ring
->access
.get_length(ring
));
333 EXPORT_SYMBOL(iio_read_ring_length
);
335 ssize_t
iio_write_ring_length(struct device
*dev
,
336 struct device_attribute
*attr
,
342 struct iio_ring_buffer
*ring
= dev_get_drvdata(dev
);
343 ret
= strict_strtoul(buf
, 10, &val
);
347 if (ring
->access
.get_length
)
348 if (val
== ring
->access
.get_length(ring
))
351 if (ring
->access
.set_length
) {
352 ring
->access
.set_length(ring
, val
);
353 if (ring
->access
.mark_param_change
)
354 ring
->access
.mark_param_change(ring
);
359 EXPORT_SYMBOL(iio_write_ring_length
);
361 ssize_t
iio_read_ring_bps(struct device
*dev
,
362 struct device_attribute
*attr
,
366 struct iio_ring_buffer
*ring
= dev_get_drvdata(dev
);
368 if (ring
->access
.get_bpd
)
369 len
= sprintf(buf
, "%d\n",
370 ring
->access
.get_bpd(ring
));
374 EXPORT_SYMBOL(iio_read_ring_bps
);
376 ssize_t
iio_store_ring_enable(struct device
*dev
,
377 struct device_attribute
*attr
,
382 bool requested_state
, current_state
;
384 struct iio_ring_buffer
*ring
= dev_get_drvdata(dev
);
385 struct iio_dev
*dev_info
= ring
->indio_dev
;
387 mutex_lock(&dev_info
->mlock
);
388 previous_mode
= dev_info
->currentmode
;
389 requested_state
= !(buf
[0] == '0');
390 current_state
= !!(previous_mode
& INDIO_ALL_RING_MODES
);
391 if (current_state
== requested_state
) {
392 printk(KERN_INFO
"iio-ring, current state requested again\n");
395 if (requested_state
) {
396 if (ring
->preenable
) {
397 ret
= ring
->preenable(dev_info
);
400 "Buffer not started:"
401 "ring preenable failed\n");
405 if (ring
->access
.request_update
) {
406 ret
= ring
->access
.request_update(ring
);
409 "Buffer not started:"
410 "ring parameter update failed\n");
414 if (ring
->access
.mark_in_use
)
415 ring
->access
.mark_in_use(ring
);
416 /* Definitely possible for devices to support both of these.*/
417 if (dev_info
->modes
& INDIO_RING_TRIGGERED
) {
418 if (!dev_info
->trig
) {
420 "Buffer not started: no trigger\n");
422 if (ring
->access
.unmark_in_use
)
423 ring
->access
.unmark_in_use(ring
);
426 dev_info
->currentmode
= INDIO_RING_TRIGGERED
;
427 } else if (dev_info
->modes
& INDIO_RING_HARDWARE_BUFFER
)
428 dev_info
->currentmode
= INDIO_RING_HARDWARE_BUFFER
;
429 else { /* should never be reached */
434 if (ring
->postenable
) {
436 ret
= ring
->postenable(dev_info
);
439 "Buffer not started:"
440 "postenable failed\n");
441 if (ring
->access
.unmark_in_use
)
442 ring
->access
.unmark_in_use(ring
);
443 dev_info
->currentmode
= previous_mode
;
444 if (ring
->postdisable
)
445 ring
->postdisable(dev_info
);
450 if (ring
->predisable
) {
451 ret
= ring
->predisable(dev_info
);
455 if (ring
->access
.unmark_in_use
)
456 ring
->access
.unmark_in_use(ring
);
457 dev_info
->currentmode
= INDIO_DIRECT_MODE
;
458 if (ring
->postdisable
) {
459 ret
= ring
->postdisable(dev_info
);
465 mutex_unlock(&dev_info
->mlock
);
469 mutex_unlock(&dev_info
->mlock
);
472 EXPORT_SYMBOL(iio_store_ring_enable
);
473 ssize_t
iio_show_ring_enable(struct device
*dev
,
474 struct device_attribute
*attr
,
477 struct iio_ring_buffer
*ring
= dev_get_drvdata(dev
);
478 return sprintf(buf
, "%d\n", !!(ring
->indio_dev
->currentmode
479 & INDIO_ALL_RING_MODES
));
481 EXPORT_SYMBOL(iio_show_ring_enable
);
483 ssize_t
iio_scan_el_show(struct device
*dev
,
484 struct device_attribute
*attr
,
488 struct iio_dev
*indio_dev
= dev_get_drvdata(dev
);
489 struct iio_scan_el
*this_el
= to_iio_scan_el(attr
);
491 ret
= iio_scan_mask_query(indio_dev
, this_el
->number
);
494 return sprintf(buf
, "%d\n", ret
);
496 EXPORT_SYMBOL(iio_scan_el_show
);
498 ssize_t
iio_scan_el_store(struct device
*dev
,
499 struct device_attribute
*attr
,
505 struct iio_dev
*indio_dev
= dev_get_drvdata(dev
);
506 struct iio_scan_el
*this_el
= to_iio_scan_el(attr
);
508 state
= !(buf
[0] == '0');
509 mutex_lock(&indio_dev
->mlock
);
510 if (indio_dev
->currentmode
== INDIO_RING_TRIGGERED
) {
514 ret
= iio_scan_mask_query(indio_dev
, this_el
->number
);
518 ret
= iio_scan_mask_clear(indio_dev
, this_el
->number
);
521 indio_dev
->scan_count
--;
522 } else if (state
&& !ret
) {
523 ret
= iio_scan_mask_set(indio_dev
, this_el
->number
);
526 indio_dev
->scan_count
++;
528 if (this_el
->set_state
)
529 ret
= this_el
->set_state(this_el
, indio_dev
, state
);
531 mutex_unlock(&indio_dev
->mlock
);
533 return ret
? ret
: len
;
536 EXPORT_SYMBOL(iio_scan_el_store
);
538 ssize_t
iio_scan_el_ts_show(struct device
*dev
,
539 struct device_attribute
*attr
,
542 struct iio_dev
*indio_dev
= dev_get_drvdata(dev
);
543 return sprintf(buf
, "%d\n", indio_dev
->scan_timestamp
);
545 EXPORT_SYMBOL(iio_scan_el_ts_show
);
547 ssize_t
iio_scan_el_ts_store(struct device
*dev
,
548 struct device_attribute
*attr
,
553 struct iio_dev
*indio_dev
= dev_get_drvdata(dev
);
555 state
= !(buf
[0] == '0');
556 mutex_lock(&indio_dev
->mlock
);
557 if (indio_dev
->currentmode
== INDIO_RING_TRIGGERED
) {
561 indio_dev
->scan_timestamp
= state
;
563 mutex_unlock(&indio_dev
->mlock
);
565 return ret
? ret
: len
;
567 EXPORT_SYMBOL(iio_scan_el_ts_store
);