1 /* The industrial I/O core
3 * Copyright (c) 2008 Jonathan Cameron
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * Handling of buffer allocation / resizing.
12 * Things to look at here.
13 * - Better memory allocation techniques?
14 * - Alternative access techniques?
16 #include <linux/kernel.h>
17 #include <linux/export.h>
18 #include <linux/device.h>
20 #include <linux/cdev.h>
21 #include <linux/slab.h>
22 #include <linux/poll.h>
24 #include <linux/iio/iio.h>
26 #include <linux/iio/sysfs.h>
27 #include <linux/iio/buffer.h>
29 static const char * const iio_endian_prefix
[] = {
35 * iio_buffer_read_first_n_outer() - chrdev read for buffer access
37 * This function relies on all buffer implementations having an
38 * iio_buffer as their first element.
40 ssize_t
iio_buffer_read_first_n_outer(struct file
*filp
, char __user
*buf
,
41 size_t n
, loff_t
*f_ps
)
43 struct iio_dev
*indio_dev
= filp
->private_data
;
44 struct iio_buffer
*rb
= indio_dev
->buffer
;
46 if (!rb
|| !rb
->access
->read_first_n
)
48 return rb
->access
->read_first_n(rb
, n
, buf
);
52 * iio_buffer_poll() - poll the buffer to find out if it has data
54 unsigned int iio_buffer_poll(struct file
*filp
,
55 struct poll_table_struct
*wait
)
57 struct iio_dev
*indio_dev
= filp
->private_data
;
58 struct iio_buffer
*rb
= indio_dev
->buffer
;
60 poll_wait(filp
, &rb
->pollq
, wait
);
62 return POLLIN
| POLLRDNORM
;
63 /* need a way of knowing if there may be enough data... */
67 void iio_buffer_init(struct iio_buffer
*buffer
)
69 INIT_LIST_HEAD(&buffer
->demux_list
);
70 init_waitqueue_head(&buffer
->pollq
);
72 EXPORT_SYMBOL(iio_buffer_init
);
74 static ssize_t
iio_show_scan_index(struct device
*dev
,
75 struct device_attribute
*attr
,
78 return sprintf(buf
, "%u\n", to_iio_dev_attr(attr
)->c
->scan_index
);
81 static ssize_t
iio_show_fixed_type(struct device
*dev
,
82 struct device_attribute
*attr
,
85 struct iio_dev_attr
*this_attr
= to_iio_dev_attr(attr
);
86 u8 type
= this_attr
->c
->scan_type
.endianness
;
88 if (type
== IIO_CPU
) {
89 #ifdef __LITTLE_ENDIAN
95 return sprintf(buf
, "%s:%c%d/%d>>%u\n",
96 iio_endian_prefix
[type
],
97 this_attr
->c
->scan_type
.sign
,
98 this_attr
->c
->scan_type
.realbits
,
99 this_attr
->c
->scan_type
.storagebits
,
100 this_attr
->c
->scan_type
.shift
);
103 static ssize_t
iio_scan_el_show(struct device
*dev
,
104 struct device_attribute
*attr
,
108 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
110 ret
= test_bit(to_iio_dev_attr(attr
)->address
,
111 indio_dev
->buffer
->scan_mask
);
113 return sprintf(buf
, "%d\n", ret
);
116 static int iio_scan_mask_clear(struct iio_buffer
*buffer
, int bit
)
118 clear_bit(bit
, buffer
->scan_mask
);
122 static ssize_t
iio_scan_el_store(struct device
*dev
,
123 struct device_attribute
*attr
,
129 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
130 struct iio_buffer
*buffer
= indio_dev
->buffer
;
131 struct iio_dev_attr
*this_attr
= to_iio_dev_attr(attr
);
133 ret
= strtobool(buf
, &state
);
136 mutex_lock(&indio_dev
->mlock
);
137 if (iio_buffer_enabled(indio_dev
)) {
141 ret
= iio_scan_mask_query(indio_dev
, buffer
, this_attr
->address
);
145 ret
= iio_scan_mask_clear(buffer
, this_attr
->address
);
148 } else if (state
&& !ret
) {
149 ret
= iio_scan_mask_set(indio_dev
, buffer
, this_attr
->address
);
155 mutex_unlock(&indio_dev
->mlock
);
157 return ret
< 0 ? ret
: len
;
161 static ssize_t
iio_scan_el_ts_show(struct device
*dev
,
162 struct device_attribute
*attr
,
165 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
166 return sprintf(buf
, "%d\n", indio_dev
->buffer
->scan_timestamp
);
169 static ssize_t
iio_scan_el_ts_store(struct device
*dev
,
170 struct device_attribute
*attr
,
175 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
178 ret
= strtobool(buf
, &state
);
182 mutex_lock(&indio_dev
->mlock
);
183 if (iio_buffer_enabled(indio_dev
)) {
187 indio_dev
->buffer
->scan_timestamp
= state
;
188 indio_dev
->scan_timestamp
= state
;
190 mutex_unlock(&indio_dev
->mlock
);
192 return ret
? ret
: len
;
195 static int iio_buffer_add_channel_sysfs(struct iio_dev
*indio_dev
,
196 const struct iio_chan_spec
*chan
)
198 int ret
, attrcount
= 0;
199 struct iio_buffer
*buffer
= indio_dev
->buffer
;
201 ret
= __iio_add_chan_devattr("index",
203 &iio_show_scan_index
,
208 &buffer
->scan_el_dev_attr_list
);
212 ret
= __iio_add_chan_devattr("type",
214 &iio_show_fixed_type
,
219 &buffer
->scan_el_dev_attr_list
);
223 if (chan
->type
!= IIO_TIMESTAMP
)
224 ret
= __iio_add_chan_devattr("en",
231 &buffer
->scan_el_dev_attr_list
);
233 ret
= __iio_add_chan_devattr("en",
235 &iio_scan_el_ts_show
,
236 &iio_scan_el_ts_store
,
240 &buffer
->scan_el_dev_attr_list
);
247 static void iio_buffer_remove_and_free_scan_dev_attr(struct iio_dev
*indio_dev
,
248 struct iio_dev_attr
*p
)
250 kfree(p
->dev_attr
.attr
.name
);
254 static void __iio_buffer_attr_cleanup(struct iio_dev
*indio_dev
)
256 struct iio_dev_attr
*p
, *n
;
257 struct iio_buffer
*buffer
= indio_dev
->buffer
;
259 list_for_each_entry_safe(p
, n
,
260 &buffer
->scan_el_dev_attr_list
, l
)
261 iio_buffer_remove_and_free_scan_dev_attr(indio_dev
, p
);
264 static const char * const iio_scan_elements_group_name
= "scan_elements";
266 int iio_buffer_register(struct iio_dev
*indio_dev
,
267 const struct iio_chan_spec
*channels
,
270 struct iio_dev_attr
*p
;
271 struct attribute
**attr
;
272 struct iio_buffer
*buffer
= indio_dev
->buffer
;
273 int ret
, i
, attrn
, attrcount
, attrcount_orig
= 0;
276 indio_dev
->groups
[indio_dev
->groupcounter
++] = buffer
->attrs
;
278 if (buffer
->scan_el_attrs
!= NULL
) {
279 attr
= buffer
->scan_el_attrs
->attrs
;
280 while (*attr
++ != NULL
)
283 attrcount
= attrcount_orig
;
284 INIT_LIST_HEAD(&buffer
->scan_el_dev_attr_list
);
287 for (i
= 0; i
< num_channels
; i
++) {
288 if (channels
[i
].scan_index
< 0)
291 /* Establish necessary mask length */
292 if (channels
[i
].scan_index
>
293 (int)indio_dev
->masklength
- 1)
294 indio_dev
->masklength
295 = channels
[i
].scan_index
+ 1;
297 ret
= iio_buffer_add_channel_sysfs(indio_dev
,
300 goto error_cleanup_dynamic
;
302 if (channels
[i
].type
== IIO_TIMESTAMP
)
303 indio_dev
->scan_index_timestamp
=
304 channels
[i
].scan_index
;
306 if (indio_dev
->masklength
&& buffer
->scan_mask
== NULL
) {
307 buffer
->scan_mask
= kcalloc(BITS_TO_LONGS(indio_dev
->masklength
),
308 sizeof(*buffer
->scan_mask
),
310 if (buffer
->scan_mask
== NULL
) {
312 goto error_cleanup_dynamic
;
317 buffer
->scan_el_group
.name
= iio_scan_elements_group_name
;
319 buffer
->scan_el_group
.attrs
= kcalloc(attrcount
+ 1,
320 sizeof(buffer
->scan_el_group
.attrs
[0]),
322 if (buffer
->scan_el_group
.attrs
== NULL
) {
324 goto error_free_scan_mask
;
326 if (buffer
->scan_el_attrs
)
327 memcpy(buffer
->scan_el_group
.attrs
, buffer
->scan_el_attrs
,
328 sizeof(buffer
->scan_el_group
.attrs
[0])*attrcount_orig
);
329 attrn
= attrcount_orig
;
331 list_for_each_entry(p
, &buffer
->scan_el_dev_attr_list
, l
)
332 buffer
->scan_el_group
.attrs
[attrn
++] = &p
->dev_attr
.attr
;
333 indio_dev
->groups
[indio_dev
->groupcounter
++] = &buffer
->scan_el_group
;
337 error_free_scan_mask
:
338 kfree(buffer
->scan_mask
);
339 error_cleanup_dynamic
:
340 __iio_buffer_attr_cleanup(indio_dev
);
344 EXPORT_SYMBOL(iio_buffer_register
);
346 void iio_buffer_unregister(struct iio_dev
*indio_dev
)
348 kfree(indio_dev
->buffer
->scan_mask
);
349 kfree(indio_dev
->buffer
->scan_el_group
.attrs
);
350 __iio_buffer_attr_cleanup(indio_dev
);
352 EXPORT_SYMBOL(iio_buffer_unregister
);
354 ssize_t
iio_buffer_read_length(struct device
*dev
,
355 struct device_attribute
*attr
,
358 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
359 struct iio_buffer
*buffer
= indio_dev
->buffer
;
361 if (buffer
->access
->get_length
)
362 return sprintf(buf
, "%d\n",
363 buffer
->access
->get_length(buffer
));
367 EXPORT_SYMBOL(iio_buffer_read_length
);
369 ssize_t
iio_buffer_write_length(struct device
*dev
,
370 struct device_attribute
*attr
,
376 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
377 struct iio_buffer
*buffer
= indio_dev
->buffer
;
379 ret
= strict_strtoul(buf
, 10, &val
);
383 if (buffer
->access
->get_length
)
384 if (val
== buffer
->access
->get_length(buffer
))
387 mutex_lock(&indio_dev
->mlock
);
388 if (iio_buffer_enabled(indio_dev
)) {
391 if (buffer
->access
->set_length
)
392 buffer
->access
->set_length(buffer
, val
);
395 mutex_unlock(&indio_dev
->mlock
);
397 return ret
? ret
: len
;
399 EXPORT_SYMBOL(iio_buffer_write_length
);
401 ssize_t
iio_buffer_store_enable(struct device
*dev
,
402 struct device_attribute
*attr
,
407 bool requested_state
, current_state
;
409 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
410 struct iio_buffer
*buffer
= indio_dev
->buffer
;
412 mutex_lock(&indio_dev
->mlock
);
413 previous_mode
= indio_dev
->currentmode
;
414 requested_state
= !(buf
[0] == '0');
415 current_state
= iio_buffer_enabled(indio_dev
);
416 if (current_state
== requested_state
) {
417 printk(KERN_INFO
"iio-buffer, current state requested again\n");
420 if (requested_state
) {
421 if (indio_dev
->setup_ops
->preenable
) {
422 ret
= indio_dev
->setup_ops
->preenable(indio_dev
);
425 "Buffer not started:"
426 "buffer preenable failed\n");
430 if (buffer
->access
->request_update
) {
431 ret
= buffer
->access
->request_update(buffer
);
434 "Buffer not started:"
435 "buffer parameter update failed\n");
439 /* Definitely possible for devices to support both of these.*/
440 if (indio_dev
->modes
& INDIO_BUFFER_TRIGGERED
) {
441 if (!indio_dev
->trig
) {
443 "Buffer not started: no trigger\n");
447 indio_dev
->currentmode
= INDIO_BUFFER_TRIGGERED
;
448 } else if (indio_dev
->modes
& INDIO_BUFFER_HARDWARE
)
449 indio_dev
->currentmode
= INDIO_BUFFER_HARDWARE
;
450 else { /* should never be reached */
455 if (indio_dev
->setup_ops
->postenable
) {
456 ret
= indio_dev
->setup_ops
->postenable(indio_dev
);
459 "Buffer not started:"
460 "postenable failed\n");
461 indio_dev
->currentmode
= previous_mode
;
462 if (indio_dev
->setup_ops
->postdisable
)
463 indio_dev
->setup_ops
->
464 postdisable(indio_dev
);
469 if (indio_dev
->setup_ops
->predisable
) {
470 ret
= indio_dev
->setup_ops
->predisable(indio_dev
);
474 indio_dev
->currentmode
= INDIO_DIRECT_MODE
;
475 if (indio_dev
->setup_ops
->postdisable
) {
476 ret
= indio_dev
->setup_ops
->postdisable(indio_dev
);
482 mutex_unlock(&indio_dev
->mlock
);
486 mutex_unlock(&indio_dev
->mlock
);
489 EXPORT_SYMBOL(iio_buffer_store_enable
);
491 ssize_t
iio_buffer_show_enable(struct device
*dev
,
492 struct device_attribute
*attr
,
495 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
496 return sprintf(buf
, "%d\n", iio_buffer_enabled(indio_dev
));
498 EXPORT_SYMBOL(iio_buffer_show_enable
);
500 /* note NULL used as error indicator as it doesn't make sense. */
501 static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks
,
502 unsigned int masklength
,
503 const unsigned long *mask
)
505 if (bitmap_empty(mask
, masklength
))
508 if (bitmap_subset(mask
, av_masks
, masklength
))
510 av_masks
+= BITS_TO_LONGS(masklength
);
515 static int iio_compute_scan_bytes(struct iio_dev
*indio_dev
, const long *mask
,
518 const struct iio_chan_spec
*ch
;
522 /* How much space will the demuxed element take? */
523 for_each_set_bit(i
, mask
,
524 indio_dev
->masklength
) {
525 ch
= iio_find_channel_from_si(indio_dev
, i
);
526 length
= ch
->scan_type
.storagebits
/ 8;
527 bytes
= ALIGN(bytes
, length
);
531 ch
= iio_find_channel_from_si(indio_dev
,
532 indio_dev
->scan_index_timestamp
);
533 length
= ch
->scan_type
.storagebits
/ 8;
534 bytes
= ALIGN(bytes
, length
);
540 int iio_sw_buffer_preenable(struct iio_dev
*indio_dev
)
542 struct iio_buffer
*buffer
= indio_dev
->buffer
;
543 dev_dbg(&indio_dev
->dev
, "%s\n", __func__
);
545 /* How much space will the demuxed element take? */
546 indio_dev
->scan_bytes
=
547 iio_compute_scan_bytes(indio_dev
, buffer
->scan_mask
,
548 buffer
->scan_timestamp
);
549 buffer
->access
->set_bytes_per_datum(buffer
, indio_dev
->scan_bytes
);
551 /* What scan mask do we actually have ?*/
552 if (indio_dev
->available_scan_masks
)
553 indio_dev
->active_scan_mask
=
554 iio_scan_mask_match(indio_dev
->available_scan_masks
,
555 indio_dev
->masklength
,
558 indio_dev
->active_scan_mask
= buffer
->scan_mask
;
560 if (indio_dev
->active_scan_mask
== NULL
)
563 iio_update_demux(indio_dev
);
565 if (indio_dev
->info
->update_scan_mode
)
566 return indio_dev
->info
567 ->update_scan_mode(indio_dev
,
568 indio_dev
->active_scan_mask
);
571 EXPORT_SYMBOL(iio_sw_buffer_preenable
);
574 * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
575 * @indio_dev: the iio device
576 * @mask: scan mask to be checked
578 * Return true if exactly one bit is set in the scan mask, false otherwise. It
579 * can be used for devices where only one channel can be active for sampling at
582 bool iio_validate_scan_mask_onehot(struct iio_dev
*indio_dev
,
583 const unsigned long *mask
)
585 return bitmap_weight(mask
, indio_dev
->masklength
) == 1;
587 EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot
);
589 static bool iio_validate_scan_mask(struct iio_dev
*indio_dev
,
590 const unsigned long *mask
)
592 if (!indio_dev
->setup_ops
->validate_scan_mask
)
595 return indio_dev
->setup_ops
->validate_scan_mask(indio_dev
, mask
);
599 * iio_scan_mask_set() - set particular bit in the scan mask
600 * @buffer: the buffer whose scan mask we are interested in
601 * @bit: the bit to be set.
603 int iio_scan_mask_set(struct iio_dev
*indio_dev
,
604 struct iio_buffer
*buffer
, int bit
)
606 const unsigned long *mask
;
607 unsigned long *trialmask
;
609 trialmask
= kmalloc(sizeof(*trialmask
)*
610 BITS_TO_LONGS(indio_dev
->masklength
),
613 if (trialmask
== NULL
)
615 if (!indio_dev
->masklength
) {
616 WARN_ON("trying to set scanmask prior to registering buffer\n");
617 goto err_invalid_mask
;
619 bitmap_copy(trialmask
, buffer
->scan_mask
, indio_dev
->masklength
);
620 set_bit(bit
, trialmask
);
622 if (!iio_validate_scan_mask(indio_dev
, trialmask
))
623 goto err_invalid_mask
;
625 if (indio_dev
->available_scan_masks
) {
626 mask
= iio_scan_mask_match(indio_dev
->available_scan_masks
,
627 indio_dev
->masklength
,
630 goto err_invalid_mask
;
632 bitmap_copy(buffer
->scan_mask
, trialmask
, indio_dev
->masklength
);
642 EXPORT_SYMBOL_GPL(iio_scan_mask_set
);
644 int iio_scan_mask_query(struct iio_dev
*indio_dev
,
645 struct iio_buffer
*buffer
, int bit
)
647 if (bit
> indio_dev
->masklength
)
650 if (!buffer
->scan_mask
)
653 return test_bit(bit
, buffer
->scan_mask
);
655 EXPORT_SYMBOL_GPL(iio_scan_mask_query
);
658 * struct iio_demux_table() - table describing demux memcpy ops
659 * @from: index to copy from
660 * @to: index to copy to
661 * @length: how many bytes to copy
662 * @l: list head used for management
664 struct iio_demux_table
{
671 static unsigned char *iio_demux(struct iio_buffer
*buffer
,
672 unsigned char *datain
)
674 struct iio_demux_table
*t
;
676 if (list_empty(&buffer
->demux_list
))
678 list_for_each_entry(t
, &buffer
->demux_list
, l
)
679 memcpy(buffer
->demux_bounce
+ t
->to
,
680 datain
+ t
->from
, t
->length
);
682 return buffer
->demux_bounce
;
685 int iio_push_to_buffer(struct iio_buffer
*buffer
, unsigned char *data
,
688 unsigned char *dataout
= iio_demux(buffer
, data
);
690 return buffer
->access
->store_to(buffer
, dataout
, timestamp
);
692 EXPORT_SYMBOL_GPL(iio_push_to_buffer
);
694 static void iio_buffer_demux_free(struct iio_buffer
*buffer
)
696 struct iio_demux_table
*p
, *q
;
697 list_for_each_entry_safe(p
, q
, &buffer
->demux_list
, l
) {
703 int iio_update_demux(struct iio_dev
*indio_dev
)
705 const struct iio_chan_spec
*ch
;
706 struct iio_buffer
*buffer
= indio_dev
->buffer
;
707 int ret
, in_ind
= -1, out_ind
, length
;
708 unsigned in_loc
= 0, out_loc
= 0;
709 struct iio_demux_table
*p
;
711 /* Clear out any old demux */
712 iio_buffer_demux_free(buffer
);
713 kfree(buffer
->demux_bounce
);
714 buffer
->demux_bounce
= NULL
;
716 /* First work out which scan mode we will actually have */
717 if (bitmap_equal(indio_dev
->active_scan_mask
,
719 indio_dev
->masklength
))
722 /* Now we have the two masks, work from least sig and build up sizes */
723 for_each_set_bit(out_ind
,
724 indio_dev
->active_scan_mask
,
725 indio_dev
->masklength
) {
726 in_ind
= find_next_bit(indio_dev
->active_scan_mask
,
727 indio_dev
->masklength
,
729 while (in_ind
!= out_ind
) {
730 in_ind
= find_next_bit(indio_dev
->active_scan_mask
,
731 indio_dev
->masklength
,
733 ch
= iio_find_channel_from_si(indio_dev
, in_ind
);
734 length
= ch
->scan_type
.storagebits
/8;
735 /* Make sure we are aligned */
738 in_loc
+= length
- in_loc
% length
;
740 p
= kmalloc(sizeof(*p
), GFP_KERNEL
);
743 goto error_clear_mux_table
;
745 ch
= iio_find_channel_from_si(indio_dev
, in_ind
);
746 length
= ch
->scan_type
.storagebits
/8;
747 if (out_loc
% length
)
748 out_loc
+= length
- out_loc
% length
;
750 in_loc
+= length
- in_loc
% length
;
754 list_add_tail(&p
->l
, &buffer
->demux_list
);
758 /* Relies on scan_timestamp being last */
759 if (buffer
->scan_timestamp
) {
760 p
= kmalloc(sizeof(*p
), GFP_KERNEL
);
763 goto error_clear_mux_table
;
765 ch
= iio_find_channel_from_si(indio_dev
,
766 indio_dev
->scan_index_timestamp
);
767 length
= ch
->scan_type
.storagebits
/8;
768 if (out_loc
% length
)
769 out_loc
+= length
- out_loc
% length
;
771 in_loc
+= length
- in_loc
% length
;
775 list_add_tail(&p
->l
, &buffer
->demux_list
);
779 buffer
->demux_bounce
= kzalloc(out_loc
, GFP_KERNEL
);
780 if (buffer
->demux_bounce
== NULL
) {
782 goto error_clear_mux_table
;
786 error_clear_mux_table
:
787 iio_buffer_demux_free(buffer
);
791 EXPORT_SYMBOL_GPL(iio_update_demux
);