1 /* The industrial I/O core
3 * Copyright (c) 2008 Jonathan Cameron
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * Handling of buffer allocation / resizing.
12 * Things to look at here.
13 * - Better memory allocation techniques?
14 * - Alternative access techniques?
16 #include <linux/kernel.h>
17 #include <linux/device.h>
19 #include <linux/cdev.h>
20 #include <linux/slab.h>
21 #include <linux/poll.h>
26 #include "buffer_generic.h"
28 static const char * const iio_endian_prefix
[] = {
34 * iio_buffer_read_first_n_outer() - chrdev read for buffer access
36 * This function relies on all buffer implementations having an
37 * iio_buffer as their first element.
39 ssize_t
iio_buffer_read_first_n_outer(struct file
*filp
, char __user
*buf
,
40 size_t n
, loff_t
*f_ps
)
42 struct iio_dev
*indio_dev
= filp
->private_data
;
43 struct iio_buffer
*rb
= indio_dev
->buffer
;
45 if (!rb
->access
->read_first_n
)
47 return rb
->access
->read_first_n(rb
, n
, buf
);
51 * iio_buffer_poll() - poll the buffer to find out if it has data
53 unsigned int iio_buffer_poll(struct file
*filp
,
54 struct poll_table_struct
*wait
)
56 struct iio_dev
*indio_dev
= filp
->private_data
;
57 struct iio_buffer
*rb
= indio_dev
->buffer
;
59 poll_wait(filp
, &rb
->pollq
, wait
);
61 return POLLIN
| POLLRDNORM
;
62 /* need a way of knowing if there may be enough data... */
66 int iio_chrdev_buffer_open(struct iio_dev
*indio_dev
)
68 struct iio_buffer
*rb
= indio_dev
->buffer
;
71 if (rb
->access
->mark_in_use
)
72 rb
->access
->mark_in_use(rb
);
76 void iio_chrdev_buffer_release(struct iio_dev
*indio_dev
)
78 struct iio_buffer
*rb
= indio_dev
->buffer
;
80 clear_bit(IIO_BUSY_BIT_POS
, &rb
->flags
);
81 if (rb
->access
->unmark_in_use
)
82 rb
->access
->unmark_in_use(rb
);
85 void iio_buffer_init(struct iio_buffer
*buffer
, struct iio_dev
*dev_info
)
87 buffer
->indio_dev
= dev_info
;
88 init_waitqueue_head(&buffer
->pollq
);
90 EXPORT_SYMBOL(iio_buffer_init
);
92 static ssize_t
iio_show_scan_index(struct device
*dev
,
93 struct device_attribute
*attr
,
96 return sprintf(buf
, "%u\n", to_iio_dev_attr(attr
)->c
->scan_index
);
99 static ssize_t
iio_show_fixed_type(struct device
*dev
,
100 struct device_attribute
*attr
,
103 struct iio_dev_attr
*this_attr
= to_iio_dev_attr(attr
);
104 u8 type
= this_attr
->c
->scan_type
.endianness
;
106 if (type
== IIO_CPU
) {
107 #ifdef __LITTLE_ENDIAN
113 return sprintf(buf
, "%s:%c%d/%d>>%u\n",
114 iio_endian_prefix
[type
],
115 this_attr
->c
->scan_type
.sign
,
116 this_attr
->c
->scan_type
.realbits
,
117 this_attr
->c
->scan_type
.storagebits
,
118 this_attr
->c
->scan_type
.shift
);
121 static ssize_t
iio_scan_el_show(struct device
*dev
,
122 struct device_attribute
*attr
,
126 struct iio_dev
*dev_info
= dev_get_drvdata(dev
);
128 ret
= iio_scan_mask_query(dev_info
->buffer
,
129 to_iio_dev_attr(attr
)->address
);
132 return sprintf(buf
, "%d\n", ret
);
135 static int iio_scan_mask_clear(struct iio_buffer
*buffer
, int bit
)
137 clear_bit(bit
, buffer
->scan_mask
);
138 buffer
->scan_count
--;
142 static ssize_t
iio_scan_el_store(struct device
*dev
,
143 struct device_attribute
*attr
,
149 struct iio_dev
*indio_dev
= dev_get_drvdata(dev
);
150 struct iio_buffer
*buffer
= indio_dev
->buffer
;
151 struct iio_dev_attr
*this_attr
= to_iio_dev_attr(attr
);
153 state
= !(buf
[0] == '0');
154 mutex_lock(&indio_dev
->mlock
);
155 if (indio_dev
->currentmode
== INDIO_BUFFER_TRIGGERED
) {
159 ret
= iio_scan_mask_query(buffer
, this_attr
->address
);
163 ret
= iio_scan_mask_clear(buffer
, this_attr
->address
);
166 } else if (state
&& !ret
) {
167 ret
= iio_scan_mask_set(buffer
, this_attr
->address
);
173 mutex_unlock(&indio_dev
->mlock
);
175 return ret
? ret
: len
;
179 static ssize_t
iio_scan_el_ts_show(struct device
*dev
,
180 struct device_attribute
*attr
,
183 struct iio_dev
*dev_info
= dev_get_drvdata(dev
);
184 return sprintf(buf
, "%d\n", dev_info
->buffer
->scan_timestamp
);
187 static ssize_t
iio_scan_el_ts_store(struct device
*dev
,
188 struct device_attribute
*attr
,
193 struct iio_dev
*indio_dev
= dev_get_drvdata(dev
);
196 state
= !(buf
[0] == '0');
197 mutex_lock(&indio_dev
->mlock
);
198 if (indio_dev
->currentmode
== INDIO_BUFFER_TRIGGERED
) {
202 indio_dev
->buffer
->scan_timestamp
= state
;
204 mutex_unlock(&indio_dev
->mlock
);
206 return ret
? ret
: len
;
209 static int iio_buffer_add_channel_sysfs(struct iio_dev
*indio_dev
,
210 const struct iio_chan_spec
*chan
)
212 int ret
, attrcount
= 0;
213 struct iio_buffer
*buffer
= indio_dev
->buffer
;
215 ret
= __iio_add_chan_devattr("index",
217 &iio_show_scan_index
,
222 &buffer
->scan_el_dev_attr_list
);
226 ret
= __iio_add_chan_devattr("type",
228 &iio_show_fixed_type
,
233 &buffer
->scan_el_dev_attr_list
);
237 if (chan
->type
!= IIO_TIMESTAMP
)
238 ret
= __iio_add_chan_devattr("en",
245 &buffer
->scan_el_dev_attr_list
);
247 ret
= __iio_add_chan_devattr("en",
249 &iio_scan_el_ts_show
,
250 &iio_scan_el_ts_store
,
254 &buffer
->scan_el_dev_attr_list
);
261 static void iio_buffer_remove_and_free_scan_dev_attr(struct iio_dev
*indio_dev
,
262 struct iio_dev_attr
*p
)
264 kfree(p
->dev_attr
.attr
.name
);
268 static void __iio_buffer_attr_cleanup(struct iio_dev
*indio_dev
)
270 struct iio_dev_attr
*p
, *n
;
271 struct iio_buffer
*buffer
= indio_dev
->buffer
;
273 list_for_each_entry_safe(p
, n
,
274 &buffer
->scan_el_dev_attr_list
, l
)
275 iio_buffer_remove_and_free_scan_dev_attr(indio_dev
, p
);
278 static const char * const iio_scan_elements_group_name
= "scan_elements";
280 int iio_buffer_register(struct iio_dev
*indio_dev
,
281 const struct iio_chan_spec
*channels
,
284 struct iio_dev_attr
*p
;
285 struct attribute
**attr
;
286 struct iio_buffer
*buffer
= indio_dev
->buffer
;
287 int ret
, i
, attrn
, attrcount
, attrcount_orig
= 0;
290 indio_dev
->groups
[indio_dev
->groupcounter
++] = buffer
->attrs
;
292 if (buffer
->scan_el_attrs
!= NULL
) {
293 attr
= buffer
->scan_el_attrs
->attrs
;
294 while (*attr
++ != NULL
)
297 attrcount
= attrcount_orig
;
298 INIT_LIST_HEAD(&buffer
->scan_el_dev_attr_list
);
301 for (i
= 0; i
< num_channels
; i
++) {
302 /* Establish necessary mask length */
303 if (channels
[i
].scan_index
>
304 (int)indio_dev
->masklength
- 1)
305 indio_dev
->masklength
306 = indio_dev
->channels
[i
].scan_index
+ 1;
308 ret
= iio_buffer_add_channel_sysfs(indio_dev
,
311 goto error_cleanup_dynamic
;
314 if (indio_dev
->masklength
&& buffer
->scan_mask
== NULL
) {
316 = kzalloc(sizeof(*buffer
->scan_mask
)*
317 BITS_TO_LONGS(indio_dev
->masklength
),
319 if (buffer
->scan_mask
== NULL
) {
321 goto error_cleanup_dynamic
;
326 buffer
->scan_el_group
.name
= iio_scan_elements_group_name
;
328 buffer
->scan_el_group
.attrs
329 = kzalloc(sizeof(buffer
->scan_el_group
.attrs
[0])*
332 if (buffer
->scan_el_group
.attrs
== NULL
) {
334 goto error_free_scan_mask
;
336 if (buffer
->scan_el_attrs
)
337 memcpy(buffer
->scan_el_group
.attrs
, buffer
->scan_el_attrs
,
338 sizeof(buffer
->scan_el_group
.attrs
[0])*attrcount_orig
);
339 attrn
= attrcount_orig
;
341 list_for_each_entry(p
, &buffer
->scan_el_dev_attr_list
, l
)
342 buffer
->scan_el_group
.attrs
[attrn
++] = &p
->dev_attr
.attr
;
343 indio_dev
->groups
[indio_dev
->groupcounter
++] = &buffer
->scan_el_group
;
347 error_free_scan_mask
:
348 kfree(buffer
->scan_mask
);
349 error_cleanup_dynamic
:
350 __iio_buffer_attr_cleanup(indio_dev
);
354 EXPORT_SYMBOL(iio_buffer_register
);
356 void iio_buffer_unregister(struct iio_dev
*indio_dev
)
358 kfree(indio_dev
->buffer
->scan_mask
);
359 kfree(indio_dev
->buffer
->scan_el_group
.attrs
);
360 __iio_buffer_attr_cleanup(indio_dev
);
362 EXPORT_SYMBOL(iio_buffer_unregister
);
364 ssize_t
iio_buffer_read_length(struct device
*dev
,
365 struct device_attribute
*attr
,
368 struct iio_dev
*indio_dev
= dev_get_drvdata(dev
);
369 struct iio_buffer
*buffer
= indio_dev
->buffer
;
371 if (buffer
->access
->get_length
)
372 return sprintf(buf
, "%d\n",
373 buffer
->access
->get_length(buffer
));
377 EXPORT_SYMBOL(iio_buffer_read_length
);
379 ssize_t
iio_buffer_write_length(struct device
*dev
,
380 struct device_attribute
*attr
,
386 struct iio_dev
*indio_dev
= dev_get_drvdata(dev
);
387 struct iio_buffer
*buffer
= indio_dev
->buffer
;
389 ret
= strict_strtoul(buf
, 10, &val
);
393 if (buffer
->access
->get_length
)
394 if (val
== buffer
->access
->get_length(buffer
))
397 if (buffer
->access
->set_length
) {
398 buffer
->access
->set_length(buffer
, val
);
399 if (buffer
->access
->mark_param_change
)
400 buffer
->access
->mark_param_change(buffer
);
405 EXPORT_SYMBOL(iio_buffer_write_length
);
407 ssize_t
iio_buffer_read_bytes_per_datum(struct device
*dev
,
408 struct device_attribute
*attr
,
411 struct iio_dev
*indio_dev
= dev_get_drvdata(dev
);
412 struct iio_buffer
*buffer
= indio_dev
->buffer
;
414 if (buffer
->access
->get_bytes_per_datum
)
415 return sprintf(buf
, "%d\n",
416 buffer
->access
->get_bytes_per_datum(buffer
));
420 EXPORT_SYMBOL(iio_buffer_read_bytes_per_datum
);
422 ssize_t
iio_buffer_store_enable(struct device
*dev
,
423 struct device_attribute
*attr
,
428 bool requested_state
, current_state
;
430 struct iio_dev
*dev_info
= dev_get_drvdata(dev
);
431 struct iio_buffer
*buffer
= dev_info
->buffer
;
433 mutex_lock(&dev_info
->mlock
);
434 previous_mode
= dev_info
->currentmode
;
435 requested_state
= !(buf
[0] == '0');
436 current_state
= !!(previous_mode
& INDIO_ALL_BUFFER_MODES
);
437 if (current_state
== requested_state
) {
438 printk(KERN_INFO
"iio-buffer, current state requested again\n");
441 if (requested_state
) {
442 if (buffer
->setup_ops
->preenable
) {
443 ret
= buffer
->setup_ops
->preenable(dev_info
);
446 "Buffer not started:"
447 "buffer preenable failed\n");
451 if (buffer
->access
->request_update
) {
452 ret
= buffer
->access
->request_update(buffer
);
455 "Buffer not started:"
456 "buffer parameter update failed\n");
460 if (buffer
->access
->mark_in_use
)
461 buffer
->access
->mark_in_use(buffer
);
462 /* Definitely possible for devices to support both of these.*/
463 if (dev_info
->modes
& INDIO_BUFFER_TRIGGERED
) {
464 if (!dev_info
->trig
) {
466 "Buffer not started: no trigger\n");
468 if (buffer
->access
->unmark_in_use
)
469 buffer
->access
->unmark_in_use(buffer
);
472 dev_info
->currentmode
= INDIO_BUFFER_TRIGGERED
;
473 } else if (dev_info
->modes
& INDIO_BUFFER_HARDWARE
)
474 dev_info
->currentmode
= INDIO_BUFFER_HARDWARE
;
475 else { /* should never be reached */
480 if (buffer
->setup_ops
->postenable
) {
481 ret
= buffer
->setup_ops
->postenable(dev_info
);
484 "Buffer not started:"
485 "postenable failed\n");
486 if (buffer
->access
->unmark_in_use
)
487 buffer
->access
->unmark_in_use(buffer
);
488 dev_info
->currentmode
= previous_mode
;
489 if (buffer
->setup_ops
->postdisable
)
491 postdisable(dev_info
);
496 if (buffer
->setup_ops
->predisable
) {
497 ret
= buffer
->setup_ops
->predisable(dev_info
);
501 if (buffer
->access
->unmark_in_use
)
502 buffer
->access
->unmark_in_use(buffer
);
503 dev_info
->currentmode
= INDIO_DIRECT_MODE
;
504 if (buffer
->setup_ops
->postdisable
) {
505 ret
= buffer
->setup_ops
->postdisable(dev_info
);
511 mutex_unlock(&dev_info
->mlock
);
515 mutex_unlock(&dev_info
->mlock
);
518 EXPORT_SYMBOL(iio_buffer_store_enable
);
520 ssize_t
iio_buffer_show_enable(struct device
*dev
,
521 struct device_attribute
*attr
,
524 struct iio_dev
*dev_info
= dev_get_drvdata(dev
);
525 return sprintf(buf
, "%d\n", !!(dev_info
->currentmode
526 & INDIO_ALL_BUFFER_MODES
));
528 EXPORT_SYMBOL(iio_buffer_show_enable
);
530 int iio_sw_buffer_preenable(struct iio_dev
*indio_dev
)
532 struct iio_buffer
*buffer
= indio_dev
->buffer
;
534 dev_dbg(&indio_dev
->dev
, "%s\n", __func__
);
535 /* Check if there are any scan elements enabled, if not fail*/
536 if (!(buffer
->scan_count
|| buffer
->scan_timestamp
))
538 if (buffer
->scan_timestamp
)
539 if (buffer
->scan_count
)
540 /* Timestamp (aligned to s64) and data */
541 size
= (((buffer
->scan_count
* buffer
->bpe
)
543 & ~(sizeof(s64
) - 1))
545 else /* Timestamp only */
548 size
= buffer
->scan_count
* buffer
->bpe
;
549 buffer
->access
->set_bytes_per_datum(buffer
, size
);
553 EXPORT_SYMBOL(iio_sw_buffer_preenable
);
556 /* note NULL used as error indicator as it doesn't make sense. */
557 static unsigned long *iio_scan_mask_match(unsigned long *av_masks
,
558 unsigned int masklength
,
561 if (bitmap_empty(mask
, masklength
))
564 if (bitmap_subset(mask
, av_masks
, masklength
))
566 av_masks
+= BITS_TO_LONGS(masklength
);
572 * iio_scan_mask_set() - set particular bit in the scan mask
573 * @buffer: the buffer whose scan mask we are interested in
574 * @bit: the bit to be set.
576 int iio_scan_mask_set(struct iio_buffer
*buffer
, int bit
)
578 struct iio_dev
*dev_info
= buffer
->indio_dev
;
580 unsigned long *trialmask
;
582 trialmask
= kmalloc(sizeof(*trialmask
)*
583 BITS_TO_LONGS(dev_info
->masklength
),
586 if (trialmask
== NULL
)
588 if (!dev_info
->masklength
) {
589 WARN_ON("trying to set scanmask prior to registering buffer\n");
593 bitmap_copy(trialmask
, buffer
->scan_mask
, dev_info
->masklength
);
594 set_bit(bit
, trialmask
);
596 if (dev_info
->available_scan_masks
) {
597 mask
= iio_scan_mask_match(dev_info
->available_scan_masks
,
598 dev_info
->masklength
,
605 bitmap_copy(buffer
->scan_mask
, trialmask
, dev_info
->masklength
);
606 buffer
->scan_count
++;
612 EXPORT_SYMBOL_GPL(iio_scan_mask_set
);
614 int iio_scan_mask_query(struct iio_buffer
*buffer
, int bit
)
616 struct iio_dev
*dev_info
= buffer
->indio_dev
;
619 if (bit
> dev_info
->masklength
)
622 if (!buffer
->scan_mask
)
624 if (dev_info
->available_scan_masks
)
625 mask
= iio_scan_mask_match(dev_info
->available_scan_masks
,
626 dev_info
->masklength
,
629 mask
= buffer
->scan_mask
;
633 return test_bit(bit
, mask
);
635 EXPORT_SYMBOL_GPL(iio_scan_mask_query
);