On Tue, Nov 06, 2007 at 02:33:53AM -0800, akpm@linux-foundation.org wrote:
[mmotm.git] / drivers / staging / iio / industrialio-ring.c
blobebe5cccb4034ce5711f6b0a765f4986abefb05bf
1 /* The industrial I/O core
3 * Copyright (c) 2008 Jonathan Cameron
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * Handling of ring allocation / resizing.
12 * Things to look at here.
13 * - Better memory allocation techniques?
14 * - Alternative access techniques?
16 #include <linux/kernel.h>
17 #include <linux/device.h>
18 #include <linux/interrupt.h>
19 #include <linux/fs.h>
20 #include <linux/poll.h>
21 #include <linux/module.h>
22 #include <linux/cdev.h>
23 #include <linux/idr.h>
25 #include "iio.h"
26 #include "ring_generic.h"
28 /* IDR for ring buffer identifier */
29 static DEFINE_IDR(iio_ring_idr);
30 /* IDR for ring event identifier */
31 static DEFINE_IDR(iio_ring_event_idr);
32 /* IDR for ring access identifier */
33 static DEFINE_IDR(iio_ring_access_idr);
35 int iio_push_ring_event(struct iio_ring_buffer *ring_buf,
36 int event_code,
37 s64 timestamp)
39 return __iio_push_event(&ring_buf->ev_int,
40 event_code,
41 timestamp,
42 &ring_buf->shared_ev_pointer);
44 EXPORT_SYMBOL(iio_push_ring_event);
46 int iio_push_or_escallate_ring_event(struct iio_ring_buffer *ring_buf,
47 int event_code,
48 s64 timestamp)
50 if (ring_buf->shared_ev_pointer.ev_p)
51 __iio_change_event(ring_buf->shared_ev_pointer.ev_p,
52 event_code,
53 timestamp);
54 else
55 return iio_push_ring_event(ring_buf,
56 event_code,
57 timestamp);
58 return 0;
60 EXPORT_SYMBOL(iio_push_or_escallate_ring_event);
62 /**
63 * iio_ring_open() chrdev file open for ring buffer access
65 * This function relies on all ring buffer implementations having an
66 * iio_ring_buffer as their first element.
67 **/
68 int iio_ring_open(struct inode *inode, struct file *filp)
70 struct iio_handler *hand
71 = container_of(inode->i_cdev, struct iio_handler, chrdev);
72 struct iio_ring_buffer *rb = hand->private;
74 filp->private_data = hand->private;
75 if (rb->access.mark_in_use)
76 rb->access.mark_in_use(rb);
78 return 0;
81 /**
82 * iio_ring_release() -chrdev file close ring buffer access
84 * This function relies on all ring buffer implementations having an
85 * iio_ring_buffer as their first element.
86 **/
87 int iio_ring_release(struct inode *inode, struct file *filp)
89 struct cdev *cd = inode->i_cdev;
90 struct iio_handler *hand = iio_cdev_to_handler(cd);
91 struct iio_ring_buffer *rb = hand->private;
93 clear_bit(IIO_BUSY_BIT_POS, &rb->access_handler.flags);
94 if (rb->access.unmark_in_use)
95 rb->access.unmark_in_use(rb);
97 return 0;
101 * iio_ring_rip_outer() chrdev read for ring buffer access
103 * This function relies on all ring buffer implementations having an
104 * iio_ring _bufer as their first element.
106 ssize_t iio_ring_rip_outer(struct file *filp,
107 char *buf,
108 size_t count,
109 loff_t *f_ps)
111 struct iio_ring_buffer *rb = filp->private_data;
112 int ret, dead_offset, copied;
113 u8 *data;
114 /* rip lots must exist. */
115 if (!rb->access.rip_lots)
116 return -EINVAL;
117 copied = rb->access.rip_lots(rb, count, &data, &dead_offset);
119 if (copied < 0) {
120 ret = copied;
121 goto error_ret;
123 if (copy_to_user(buf, data + dead_offset, copied)) {
124 ret = -EFAULT;
125 goto error_free_data_cpy;
127 /* In clever ring buffer designs this may not need to be freed.
128 * When such a design exists I'll add this to ring access funcs.
130 kfree(data);
132 return copied;
134 error_free_data_cpy:
135 kfree(data);
136 error_ret:
137 return ret;
140 static const struct file_operations iio_ring_fileops = {
141 .read = iio_ring_rip_outer,
142 .release = iio_ring_release,
143 .open = iio_ring_open,
144 .owner = THIS_MODULE,
148 * __iio_request_ring_buffer_event_chrdev() allocate ring event chrdev
149 * @buf: ring buffer whose event chrdev we are allocating
150 * @owner: the module who owns the ring buffer (for ref counting)
151 * @dev: device with which the chrdev is associated
153 static inline int
154 __iio_request_ring_buffer_event_chrdev(struct iio_ring_buffer *buf,
155 int id,
156 struct module *owner,
157 struct device *dev)
159 int ret;
160 ret = iio_get_new_idr_val(&iio_ring_event_idr);
161 if (ret < 0)
162 goto error_ret;
163 else
164 buf->ev_int.id = ret;
166 snprintf(buf->ev_int._name, 20,
167 "ring_event_line%d",
168 buf->ev_int.id);
169 ret = iio_setup_ev_int(&(buf->ev_int),
170 buf->ev_int._name,
171 owner,
172 dev);
173 if (ret)
174 goto error_free_id;
175 return 0;
177 error_free_id:
178 iio_free_idr_val(&iio_ring_event_idr, buf->ev_int.id);
179 error_ret:
180 return ret;
183 static inline void
184 __iio_free_ring_buffer_event_chrdev(struct iio_ring_buffer *buf)
186 iio_free_ev_int(&(buf->ev_int));
187 iio_free_idr_val(&iio_ring_event_idr, buf->ev_int.id);
190 static void iio_ring_access_release(struct device *dev)
192 struct iio_ring_buffer *buf
193 = access_dev_to_iio_ring_buffer(dev);
194 cdev_del(&buf->access_handler.chrdev);
195 iio_device_free_chrdev_minor(MINOR(dev->devt));
198 static struct device_type iio_ring_access_type = {
199 .release = iio_ring_access_release,
202 static inline int
203 __iio_request_ring_buffer_access_chrdev(struct iio_ring_buffer *buf,
204 int id,
205 struct module *owner)
207 int ret, minor;
209 buf->access_handler.flags = 0;
211 buf->access_dev.parent = &buf->dev;
212 buf->access_dev.class = &iio_class;
213 buf->access_dev.type = &iio_ring_access_type;
214 device_initialize(&buf->access_dev);
216 minor = iio_device_get_chrdev_minor();
217 if (minor < 0) {
218 ret = minor;
219 goto error_device_put;
221 buf->access_dev.devt = MKDEV(MAJOR(iio_devt), minor);
223 ret = iio_get_new_idr_val(&iio_ring_access_idr);
224 if (ret < 0)
225 goto error_device_put;
226 else
227 buf->access_id = ret;
228 dev_set_name(&buf->access_dev, "ring_access%d", buf->access_id);
229 ret = device_add(&buf->access_dev);
230 if (ret < 0) {
231 printk(KERN_ERR "failed to add the ring access dev\n");
232 goto error_free_idr;
235 cdev_init(&buf->access_handler.chrdev, &iio_ring_fileops);
236 buf->access_handler.chrdev.owner = owner;
238 ret = cdev_add(&buf->access_handler.chrdev, buf->access_dev.devt, 1);
239 if (ret) {
240 printk(KERN_ERR "failed to allocate ring access chrdev\n");
241 goto error_device_unregister;
243 return 0;
244 error_device_unregister:
245 device_unregister(&buf->access_dev);
246 error_free_idr:
247 iio_free_idr_val(&iio_ring_access_idr, buf->access_id);
248 error_device_put:
249 put_device(&buf->access_dev);
251 return ret;
254 static void __iio_free_ring_buffer_access_chrdev(struct iio_ring_buffer *buf)
256 iio_free_idr_val(&iio_ring_access_idr, buf->access_id);
257 device_unregister(&buf->access_dev);
260 void iio_ring_buffer_init(struct iio_ring_buffer *ring,
261 struct iio_dev *dev_info)
263 if (ring->access.mark_param_change)
264 ring->access.mark_param_change(ring);
265 ring->indio_dev = dev_info;
266 ring->ev_int.private = ring;
267 ring->access_handler.private = ring;
269 EXPORT_SYMBOL(iio_ring_buffer_init);
271 int iio_ring_buffer_register(struct iio_ring_buffer *ring)
273 int ret;
274 ret = iio_get_new_idr_val(&iio_ring_idr);
275 if (ret < 0)
276 goto error_ret;
277 else
278 ring->id = ret;
280 dev_set_name(&ring->dev, "ring_buffer%d", ring->id);
281 ret = device_add(&ring->dev);
282 if (ret)
283 goto error_free_id;
285 ret = __iio_request_ring_buffer_event_chrdev(ring,
287 ring->owner,
288 &ring->dev);
289 if (ret)
290 goto error_remove_device;
292 ret = __iio_request_ring_buffer_access_chrdev(ring,
294 ring->owner);
296 if (ret)
297 goto error_free_ring_buffer_event_chrdev;
299 return ret;
300 error_free_ring_buffer_event_chrdev:
301 __iio_free_ring_buffer_event_chrdev(ring);
302 error_remove_device:
303 device_del(&ring->dev);
304 error_free_id:
305 iio_free_idr_val(&iio_ring_idr, ring->id);
306 error_ret:
307 return ret;
309 EXPORT_SYMBOL(iio_ring_buffer_register);
311 void iio_ring_buffer_unregister(struct iio_ring_buffer *ring)
313 __iio_free_ring_buffer_access_chrdev(ring);
314 __iio_free_ring_buffer_event_chrdev(ring);
315 device_del(&ring->dev);
316 iio_free_idr_val(&iio_ring_idr, ring->id);
318 EXPORT_SYMBOL(iio_ring_buffer_unregister);
320 ssize_t iio_read_ring_length(struct device *dev,
321 struct device_attribute *attr,
322 char *buf)
324 int len = 0;
325 struct iio_ring_buffer *ring = dev_get_drvdata(dev);
327 if (ring->access.get_length)
328 len = sprintf(buf, "%d\n",
329 ring->access.get_length(ring));
331 return len;
333 EXPORT_SYMBOL(iio_read_ring_length);
335 ssize_t iio_write_ring_length(struct device *dev,
336 struct device_attribute *attr,
337 const char *buf,
338 size_t len)
340 int ret;
341 ulong val;
342 struct iio_ring_buffer *ring = dev_get_drvdata(dev);
343 ret = strict_strtoul(buf, 10, &val);
344 if (ret)
345 return ret;
347 if (ring->access.get_length)
348 if (val == ring->access.get_length(ring))
349 return len;
351 if (ring->access.set_length) {
352 ring->access.set_length(ring, val);
353 if (ring->access.mark_param_change)
354 ring->access.mark_param_change(ring);
357 return len;
359 EXPORT_SYMBOL(iio_write_ring_length);
361 ssize_t iio_read_ring_bps(struct device *dev,
362 struct device_attribute *attr,
363 char *buf)
365 int len = 0;
366 struct iio_ring_buffer *ring = dev_get_drvdata(dev);
368 if (ring->access.get_bpd)
369 len = sprintf(buf, "%d\n",
370 ring->access.get_bpd(ring));
372 return len;
374 EXPORT_SYMBOL(iio_read_ring_bps);
376 ssize_t iio_store_ring_enable(struct device *dev,
377 struct device_attribute *attr,
378 const char *buf,
379 size_t len)
381 int ret;
382 bool requested_state, current_state;
383 int previous_mode;
384 struct iio_ring_buffer *ring = dev_get_drvdata(dev);
385 struct iio_dev *dev_info = ring->indio_dev;
387 mutex_lock(&dev_info->mlock);
388 previous_mode = dev_info->currentmode;
389 requested_state = !(buf[0] == '0');
390 current_state = !!(previous_mode & INDIO_ALL_RING_MODES);
391 if (current_state == requested_state) {
392 printk(KERN_INFO "iio-ring, current state requested again\n");
393 goto done;
395 if (requested_state) {
396 if (ring->preenable) {
397 ret = ring->preenable(dev_info);
398 if (ret) {
399 printk(KERN_ERR
400 "Buffer not started:"
401 "ring preenable failed\n");
402 goto error_ret;
405 if (ring->access.request_update) {
406 ret = ring->access.request_update(ring);
407 if (ret) {
408 printk(KERN_INFO
409 "Buffer not started:"
410 "ring parameter update failed\n");
411 goto error_ret;
414 if (ring->access.mark_in_use)
415 ring->access.mark_in_use(ring);
416 /* Definitely possible for devices to support both of these.*/
417 if (dev_info->modes & INDIO_RING_TRIGGERED) {
418 if (!dev_info->trig) {
419 printk(KERN_INFO
420 "Buffer not started: no trigger\n");
421 ret = -EINVAL;
422 if (ring->access.unmark_in_use)
423 ring->access.unmark_in_use(ring);
424 goto error_ret;
426 dev_info->currentmode = INDIO_RING_TRIGGERED;
427 } else if (dev_info->modes & INDIO_RING_HARDWARE_BUFFER)
428 dev_info->currentmode = INDIO_RING_HARDWARE_BUFFER;
429 else { /* should never be reached */
430 ret = -EINVAL;
431 goto error_ret;
434 if (ring->postenable) {
436 ret = ring->postenable(dev_info);
437 if (ret) {
438 printk(KERN_INFO
439 "Buffer not started:"
440 "postenable failed\n");
441 if (ring->access.unmark_in_use)
442 ring->access.unmark_in_use(ring);
443 dev_info->currentmode = previous_mode;
444 if (ring->postdisable)
445 ring->postdisable(dev_info);
446 goto error_ret;
449 } else {
450 if (ring->predisable) {
451 ret = ring->predisable(dev_info);
452 if (ret)
453 goto error_ret;
455 if (ring->access.unmark_in_use)
456 ring->access.unmark_in_use(ring);
457 dev_info->currentmode = INDIO_DIRECT_MODE;
458 if (ring->postdisable) {
459 ret = ring->postdisable(dev_info);
460 if (ret)
461 goto error_ret;
464 done:
465 mutex_unlock(&dev_info->mlock);
466 return len;
468 error_ret:
469 mutex_unlock(&dev_info->mlock);
470 return ret;
472 EXPORT_SYMBOL(iio_store_ring_enable);
473 ssize_t iio_show_ring_enable(struct device *dev,
474 struct device_attribute *attr,
475 char *buf)
477 struct iio_ring_buffer *ring = dev_get_drvdata(dev);
478 return sprintf(buf, "%d\n", !!(ring->indio_dev->currentmode
479 & INDIO_ALL_RING_MODES));
481 EXPORT_SYMBOL(iio_show_ring_enable);
483 ssize_t iio_scan_el_show(struct device *dev,
484 struct device_attribute *attr,
485 char *buf)
487 int ret;
488 struct iio_dev *indio_dev = dev_get_drvdata(dev);
489 struct iio_scan_el *this_el = to_iio_scan_el(attr);
491 ret = iio_scan_mask_query(indio_dev, this_el->number);
492 if (ret < 0)
493 return ret;
494 return sprintf(buf, "%d\n", ret);
496 EXPORT_SYMBOL(iio_scan_el_show);
498 ssize_t iio_scan_el_store(struct device *dev,
499 struct device_attribute *attr,
500 const char *buf,
501 size_t len)
503 int ret = 0;
504 bool state;
505 struct iio_dev *indio_dev = dev_get_drvdata(dev);
506 struct iio_scan_el *this_el = to_iio_scan_el(attr);
508 state = !(buf[0] == '0');
509 mutex_lock(&indio_dev->mlock);
510 if (indio_dev->currentmode == INDIO_RING_TRIGGERED) {
511 ret = -EBUSY;
512 goto error_ret;
514 ret = iio_scan_mask_query(indio_dev, this_el->number);
515 if (ret < 0)
516 goto error_ret;
517 if (!state && ret) {
518 ret = iio_scan_mask_clear(indio_dev, this_el->number);
519 if (ret)
520 goto error_ret;
521 indio_dev->scan_count--;
522 } else if (state && !ret) {
523 ret = iio_scan_mask_set(indio_dev, this_el->number);
524 if (ret)
525 goto error_ret;
526 indio_dev->scan_count++;
528 if (this_el->set_state)
529 ret = this_el->set_state(this_el, indio_dev, state);
530 error_ret:
531 mutex_unlock(&indio_dev->mlock);
533 return ret ? ret : len;
536 EXPORT_SYMBOL(iio_scan_el_store);
538 ssize_t iio_scan_el_ts_show(struct device *dev,
539 struct device_attribute *attr,
540 char *buf)
542 struct iio_dev *indio_dev = dev_get_drvdata(dev);
543 return sprintf(buf, "%d\n", indio_dev->scan_timestamp);
545 EXPORT_SYMBOL(iio_scan_el_ts_show);
547 ssize_t iio_scan_el_ts_store(struct device *dev,
548 struct device_attribute *attr,
549 const char *buf,
550 size_t len)
552 int ret = 0;
553 struct iio_dev *indio_dev = dev_get_drvdata(dev);
554 bool state;
555 state = !(buf[0] == '0');
556 mutex_lock(&indio_dev->mlock);
557 if (indio_dev->currentmode == INDIO_RING_TRIGGERED) {
558 ret = -EBUSY;
559 goto error_ret;
561 indio_dev->scan_timestamp = state;
562 error_ret:
563 mutex_unlock(&indio_dev->mlock);
565 return ret ? ret : len;
567 EXPORT_SYMBOL(iio_scan_el_ts_store);