1 /* The industrial I/O core
3 * Copyright (c) 2008 Jonathan Cameron
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * Handling of buffer allocation / resizing.
12 * Things to look at here.
13 * - Better memory allocation techniques?
14 * - Alternative access techniques?
16 #include <linux/kernel.h>
17 #include <linux/export.h>
18 #include <linux/device.h>
20 #include <linux/cdev.h>
21 #include <linux/slab.h>
22 #include <linux/poll.h>
24 #include <linux/iio/iio.h>
26 #include <linux/iio/sysfs.h>
27 #include <linux/iio/buffer.h>
29 static const char * const iio_endian_prefix
[] = {
34 static bool iio_buffer_is_active(struct iio_dev
*indio_dev
,
35 struct iio_buffer
*buf
)
39 list_for_each(p
, &indio_dev
->buffer_list
)
40 if (p
== &buf
->buffer_list
)
47 * iio_buffer_read_first_n_outer() - chrdev read for buffer access
49 * This function relies on all buffer implementations having an
50 * iio_buffer as their first element.
52 ssize_t
iio_buffer_read_first_n_outer(struct file
*filp
, char __user
*buf
,
53 size_t n
, loff_t
*f_ps
)
55 struct iio_dev
*indio_dev
= filp
->private_data
;
56 struct iio_buffer
*rb
= indio_dev
->buffer
;
58 if (!rb
|| !rb
->access
->read_first_n
)
60 return rb
->access
->read_first_n(rb
, n
, buf
);
64 * iio_buffer_poll() - poll the buffer to find out if it has data
66 unsigned int iio_buffer_poll(struct file
*filp
,
67 struct poll_table_struct
*wait
)
69 struct iio_dev
*indio_dev
= filp
->private_data
;
70 struct iio_buffer
*rb
= indio_dev
->buffer
;
72 poll_wait(filp
, &rb
->pollq
, wait
);
74 return POLLIN
| POLLRDNORM
;
75 /* need a way of knowing if there may be enough data... */
79 void iio_buffer_init(struct iio_buffer
*buffer
)
81 INIT_LIST_HEAD(&buffer
->demux_list
);
82 init_waitqueue_head(&buffer
->pollq
);
84 EXPORT_SYMBOL(iio_buffer_init
);
86 static ssize_t
iio_show_scan_index(struct device
*dev
,
87 struct device_attribute
*attr
,
90 return sprintf(buf
, "%u\n", to_iio_dev_attr(attr
)->c
->scan_index
);
93 static ssize_t
iio_show_fixed_type(struct device
*dev
,
94 struct device_attribute
*attr
,
97 struct iio_dev_attr
*this_attr
= to_iio_dev_attr(attr
);
98 u8 type
= this_attr
->c
->scan_type
.endianness
;
100 if (type
== IIO_CPU
) {
101 #ifdef __LITTLE_ENDIAN
107 return sprintf(buf
, "%s:%c%d/%d>>%u\n",
108 iio_endian_prefix
[type
],
109 this_attr
->c
->scan_type
.sign
,
110 this_attr
->c
->scan_type
.realbits
,
111 this_attr
->c
->scan_type
.storagebits
,
112 this_attr
->c
->scan_type
.shift
);
115 static ssize_t
iio_scan_el_show(struct device
*dev
,
116 struct device_attribute
*attr
,
120 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
122 /* Ensure ret is 0 or 1. */
123 ret
= !!test_bit(to_iio_dev_attr(attr
)->address
,
124 indio_dev
->buffer
->scan_mask
);
126 return sprintf(buf
, "%d\n", ret
);
129 static int iio_scan_mask_clear(struct iio_buffer
*buffer
, int bit
)
131 clear_bit(bit
, buffer
->scan_mask
);
135 static ssize_t
iio_scan_el_store(struct device
*dev
,
136 struct device_attribute
*attr
,
142 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
143 struct iio_buffer
*buffer
= indio_dev
->buffer
;
144 struct iio_dev_attr
*this_attr
= to_iio_dev_attr(attr
);
146 ret
= strtobool(buf
, &state
);
149 mutex_lock(&indio_dev
->mlock
);
150 if (iio_buffer_is_active(indio_dev
, indio_dev
->buffer
)) {
154 ret
= iio_scan_mask_query(indio_dev
, buffer
, this_attr
->address
);
158 ret
= iio_scan_mask_clear(buffer
, this_attr
->address
);
161 } else if (state
&& !ret
) {
162 ret
= iio_scan_mask_set(indio_dev
, buffer
, this_attr
->address
);
168 mutex_unlock(&indio_dev
->mlock
);
170 return ret
< 0 ? ret
: len
;
174 static ssize_t
iio_scan_el_ts_show(struct device
*dev
,
175 struct device_attribute
*attr
,
178 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
179 return sprintf(buf
, "%d\n", indio_dev
->buffer
->scan_timestamp
);
182 static ssize_t
iio_scan_el_ts_store(struct device
*dev
,
183 struct device_attribute
*attr
,
188 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
191 ret
= strtobool(buf
, &state
);
195 mutex_lock(&indio_dev
->mlock
);
196 if (iio_buffer_is_active(indio_dev
, indio_dev
->buffer
)) {
200 indio_dev
->buffer
->scan_timestamp
= state
;
202 mutex_unlock(&indio_dev
->mlock
);
204 return ret
? ret
: len
;
207 static int iio_buffer_add_channel_sysfs(struct iio_dev
*indio_dev
,
208 const struct iio_chan_spec
*chan
)
210 int ret
, attrcount
= 0;
211 struct iio_buffer
*buffer
= indio_dev
->buffer
;
213 ret
= __iio_add_chan_devattr("index",
215 &iio_show_scan_index
,
220 &buffer
->scan_el_dev_attr_list
);
224 ret
= __iio_add_chan_devattr("type",
226 &iio_show_fixed_type
,
231 &buffer
->scan_el_dev_attr_list
);
235 if (chan
->type
!= IIO_TIMESTAMP
)
236 ret
= __iio_add_chan_devattr("en",
243 &buffer
->scan_el_dev_attr_list
);
245 ret
= __iio_add_chan_devattr("en",
247 &iio_scan_el_ts_show
,
248 &iio_scan_el_ts_store
,
252 &buffer
->scan_el_dev_attr_list
);
259 static void iio_buffer_remove_and_free_scan_dev_attr(struct iio_dev
*indio_dev
,
260 struct iio_dev_attr
*p
)
262 kfree(p
->dev_attr
.attr
.name
);
266 static void __iio_buffer_attr_cleanup(struct iio_dev
*indio_dev
)
268 struct iio_dev_attr
*p
, *n
;
269 struct iio_buffer
*buffer
= indio_dev
->buffer
;
271 list_for_each_entry_safe(p
, n
,
272 &buffer
->scan_el_dev_attr_list
, l
)
273 iio_buffer_remove_and_free_scan_dev_attr(indio_dev
, p
);
276 static const char * const iio_scan_elements_group_name
= "scan_elements";
278 int iio_buffer_register(struct iio_dev
*indio_dev
,
279 const struct iio_chan_spec
*channels
,
282 struct iio_dev_attr
*p
;
283 struct attribute
**attr
;
284 struct iio_buffer
*buffer
= indio_dev
->buffer
;
285 int ret
, i
, attrn
, attrcount
, attrcount_orig
= 0;
288 indio_dev
->groups
[indio_dev
->groupcounter
++] = buffer
->attrs
;
290 if (buffer
->scan_el_attrs
!= NULL
) {
291 attr
= buffer
->scan_el_attrs
->attrs
;
292 while (*attr
++ != NULL
)
295 attrcount
= attrcount_orig
;
296 INIT_LIST_HEAD(&buffer
->scan_el_dev_attr_list
);
299 for (i
= 0; i
< num_channels
; i
++) {
300 if (channels
[i
].scan_index
< 0)
303 /* Establish necessary mask length */
304 if (channels
[i
].scan_index
>
305 (int)indio_dev
->masklength
- 1)
306 indio_dev
->masklength
307 = channels
[i
].scan_index
+ 1;
309 ret
= iio_buffer_add_channel_sysfs(indio_dev
,
312 goto error_cleanup_dynamic
;
314 if (channels
[i
].type
== IIO_TIMESTAMP
)
315 indio_dev
->scan_index_timestamp
=
316 channels
[i
].scan_index
;
318 if (indio_dev
->masklength
&& buffer
->scan_mask
== NULL
) {
319 buffer
->scan_mask
= kcalloc(BITS_TO_LONGS(indio_dev
->masklength
),
320 sizeof(*buffer
->scan_mask
),
322 if (buffer
->scan_mask
== NULL
) {
324 goto error_cleanup_dynamic
;
329 buffer
->scan_el_group
.name
= iio_scan_elements_group_name
;
331 buffer
->scan_el_group
.attrs
= kcalloc(attrcount
+ 1,
332 sizeof(buffer
->scan_el_group
.attrs
[0]),
334 if (buffer
->scan_el_group
.attrs
== NULL
) {
336 goto error_free_scan_mask
;
338 if (buffer
->scan_el_attrs
)
339 memcpy(buffer
->scan_el_group
.attrs
, buffer
->scan_el_attrs
,
340 sizeof(buffer
->scan_el_group
.attrs
[0])*attrcount_orig
);
341 attrn
= attrcount_orig
;
343 list_for_each_entry(p
, &buffer
->scan_el_dev_attr_list
, l
)
344 buffer
->scan_el_group
.attrs
[attrn
++] = &p
->dev_attr
.attr
;
345 indio_dev
->groups
[indio_dev
->groupcounter
++] = &buffer
->scan_el_group
;
349 error_free_scan_mask
:
350 kfree(buffer
->scan_mask
);
351 error_cleanup_dynamic
:
352 __iio_buffer_attr_cleanup(indio_dev
);
356 EXPORT_SYMBOL(iio_buffer_register
);
358 void iio_buffer_unregister(struct iio_dev
*indio_dev
)
360 kfree(indio_dev
->buffer
->scan_mask
);
361 kfree(indio_dev
->buffer
->scan_el_group
.attrs
);
362 __iio_buffer_attr_cleanup(indio_dev
);
364 EXPORT_SYMBOL(iio_buffer_unregister
);
366 ssize_t
iio_buffer_read_length(struct device
*dev
,
367 struct device_attribute
*attr
,
370 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
371 struct iio_buffer
*buffer
= indio_dev
->buffer
;
373 if (buffer
->access
->get_length
)
374 return sprintf(buf
, "%d\n",
375 buffer
->access
->get_length(buffer
));
379 EXPORT_SYMBOL(iio_buffer_read_length
);
381 ssize_t
iio_buffer_write_length(struct device
*dev
,
382 struct device_attribute
*attr
,
386 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
387 struct iio_buffer
*buffer
= indio_dev
->buffer
;
391 ret
= kstrtouint(buf
, 10, &val
);
395 if (buffer
->access
->get_length
)
396 if (val
== buffer
->access
->get_length(buffer
))
399 mutex_lock(&indio_dev
->mlock
);
400 if (iio_buffer_is_active(indio_dev
, indio_dev
->buffer
)) {
403 if (buffer
->access
->set_length
)
404 buffer
->access
->set_length(buffer
, val
);
407 mutex_unlock(&indio_dev
->mlock
);
409 return ret
? ret
: len
;
411 EXPORT_SYMBOL(iio_buffer_write_length
);
413 ssize_t
iio_buffer_show_enable(struct device
*dev
,
414 struct device_attribute
*attr
,
417 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
418 return sprintf(buf
, "%d\n",
419 iio_buffer_is_active(indio_dev
,
422 EXPORT_SYMBOL(iio_buffer_show_enable
);
424 /* note NULL used as error indicator as it doesn't make sense. */
425 static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks
,
426 unsigned int masklength
,
427 const unsigned long *mask
)
429 if (bitmap_empty(mask
, masklength
))
432 if (bitmap_subset(mask
, av_masks
, masklength
))
434 av_masks
+= BITS_TO_LONGS(masklength
);
439 static int iio_compute_scan_bytes(struct iio_dev
*indio_dev
, const long *mask
,
442 const struct iio_chan_spec
*ch
;
446 /* How much space will the demuxed element take? */
447 for_each_set_bit(i
, mask
,
448 indio_dev
->masklength
) {
449 ch
= iio_find_channel_from_si(indio_dev
, i
);
450 length
= ch
->scan_type
.storagebits
/ 8;
451 bytes
= ALIGN(bytes
, length
);
455 ch
= iio_find_channel_from_si(indio_dev
,
456 indio_dev
->scan_index_timestamp
);
457 length
= ch
->scan_type
.storagebits
/ 8;
458 bytes
= ALIGN(bytes
, length
);
464 void iio_disable_all_buffers(struct iio_dev
*indio_dev
)
466 struct iio_buffer
*buffer
, *_buffer
;
468 if (list_empty(&indio_dev
->buffer_list
))
471 if (indio_dev
->setup_ops
->predisable
)
472 indio_dev
->setup_ops
->predisable(indio_dev
);
474 list_for_each_entry_safe(buffer
, _buffer
,
475 &indio_dev
->buffer_list
, buffer_list
)
476 list_del_init(&buffer
->buffer_list
);
478 indio_dev
->currentmode
= INDIO_DIRECT_MODE
;
479 if (indio_dev
->setup_ops
->postdisable
)
480 indio_dev
->setup_ops
->postdisable(indio_dev
);
482 if (indio_dev
->available_scan_masks
== NULL
)
483 kfree(indio_dev
->active_scan_mask
);
486 int iio_update_buffers(struct iio_dev
*indio_dev
,
487 struct iio_buffer
*insert_buffer
,
488 struct iio_buffer
*remove_buffer
)
492 struct iio_buffer
*buffer
;
493 unsigned long *compound_mask
;
494 const unsigned long *old_mask
;
496 /* Wind down existing buffers - iff there are any */
497 if (!list_empty(&indio_dev
->buffer_list
)) {
498 if (indio_dev
->setup_ops
->predisable
) {
499 ret
= indio_dev
->setup_ops
->predisable(indio_dev
);
503 indio_dev
->currentmode
= INDIO_DIRECT_MODE
;
504 if (indio_dev
->setup_ops
->postdisable
) {
505 ret
= indio_dev
->setup_ops
->postdisable(indio_dev
);
510 /* Keep a copy of current setup to allow roll back */
511 old_mask
= indio_dev
->active_scan_mask
;
512 if (!indio_dev
->available_scan_masks
)
513 indio_dev
->active_scan_mask
= NULL
;
516 list_del(&remove_buffer
->buffer_list
);
518 list_add(&insert_buffer
->buffer_list
, &indio_dev
->buffer_list
);
520 /* If no buffers in list, we are done */
521 if (list_empty(&indio_dev
->buffer_list
)) {
522 indio_dev
->currentmode
= INDIO_DIRECT_MODE
;
523 if (indio_dev
->available_scan_masks
== NULL
)
528 /* What scan mask do we actually have ?*/
529 compound_mask
= kcalloc(BITS_TO_LONGS(indio_dev
->masklength
),
530 sizeof(long), GFP_KERNEL
);
531 if (compound_mask
== NULL
) {
532 if (indio_dev
->available_scan_masks
== NULL
)
536 indio_dev
->scan_timestamp
= 0;
538 list_for_each_entry(buffer
, &indio_dev
->buffer_list
, buffer_list
) {
539 bitmap_or(compound_mask
, compound_mask
, buffer
->scan_mask
,
540 indio_dev
->masklength
);
541 indio_dev
->scan_timestamp
|= buffer
->scan_timestamp
;
543 if (indio_dev
->available_scan_masks
) {
544 indio_dev
->active_scan_mask
=
545 iio_scan_mask_match(indio_dev
->available_scan_masks
,
546 indio_dev
->masklength
,
548 if (indio_dev
->active_scan_mask
== NULL
) {
551 * Note can only occur when adding a buffer.
553 list_del(&insert_buffer
->buffer_list
);
555 indio_dev
->active_scan_mask
= old_mask
;
559 kfree(compound_mask
);
565 indio_dev
->active_scan_mask
= compound_mask
;
568 iio_update_demux(indio_dev
);
571 if (indio_dev
->setup_ops
->preenable
) {
572 ret
= indio_dev
->setup_ops
->preenable(indio_dev
);
575 "Buffer not started: buffer preenable failed (%d)\n", ret
);
576 goto error_remove_inserted
;
579 indio_dev
->scan_bytes
=
580 iio_compute_scan_bytes(indio_dev
,
581 indio_dev
->active_scan_mask
,
582 indio_dev
->scan_timestamp
);
583 list_for_each_entry(buffer
, &indio_dev
->buffer_list
, buffer_list
)
584 if (buffer
->access
->request_update
) {
585 ret
= buffer
->access
->request_update(buffer
);
588 "Buffer not started: buffer parameter update failed (%d)\n", ret
);
589 goto error_run_postdisable
;
592 if (indio_dev
->info
->update_scan_mode
) {
593 ret
= indio_dev
->info
594 ->update_scan_mode(indio_dev
,
595 indio_dev
->active_scan_mask
);
597 printk(KERN_INFO
"Buffer not started: update scan mode failed (%d)\n", ret
);
598 goto error_run_postdisable
;
601 /* Definitely possible for devices to support both of these.*/
602 if (indio_dev
->modes
& INDIO_BUFFER_TRIGGERED
) {
603 if (!indio_dev
->trig
) {
604 printk(KERN_INFO
"Buffer not started: no trigger\n");
606 /* Can only occur on first buffer */
607 goto error_run_postdisable
;
609 indio_dev
->currentmode
= INDIO_BUFFER_TRIGGERED
;
610 } else if (indio_dev
->modes
& INDIO_BUFFER_HARDWARE
) {
611 indio_dev
->currentmode
= INDIO_BUFFER_HARDWARE
;
612 } else { /* should never be reached */
614 goto error_run_postdisable
;
617 if (indio_dev
->setup_ops
->postenable
) {
618 ret
= indio_dev
->setup_ops
->postenable(indio_dev
);
621 "Buffer not started: postenable failed (%d)\n", ret
);
622 indio_dev
->currentmode
= INDIO_DIRECT_MODE
;
623 if (indio_dev
->setup_ops
->postdisable
)
624 indio_dev
->setup_ops
->postdisable(indio_dev
);
625 goto error_disable_all_buffers
;
629 if (indio_dev
->available_scan_masks
)
630 kfree(compound_mask
);
636 error_disable_all_buffers
:
637 indio_dev
->currentmode
= INDIO_DIRECT_MODE
;
638 error_run_postdisable
:
639 if (indio_dev
->setup_ops
->postdisable
)
640 indio_dev
->setup_ops
->postdisable(indio_dev
);
641 error_remove_inserted
:
644 list_del(&insert_buffer
->buffer_list
);
645 indio_dev
->active_scan_mask
= old_mask
;
646 kfree(compound_mask
);
651 EXPORT_SYMBOL_GPL(iio_update_buffers
);
653 ssize_t
iio_buffer_store_enable(struct device
*dev
,
654 struct device_attribute
*attr
,
659 bool requested_state
;
660 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
661 struct iio_buffer
*pbuf
= indio_dev
->buffer
;
664 ret
= strtobool(buf
, &requested_state
);
668 mutex_lock(&indio_dev
->mlock
);
670 /* Find out if it is in the list */
671 inlist
= iio_buffer_is_active(indio_dev
, pbuf
);
672 /* Already in desired state */
673 if (inlist
== requested_state
)
677 ret
= iio_update_buffers(indio_dev
,
678 indio_dev
->buffer
, NULL
);
680 ret
= iio_update_buffers(indio_dev
,
681 NULL
, indio_dev
->buffer
);
686 mutex_unlock(&indio_dev
->mlock
);
687 return (ret
< 0) ? ret
: len
;
689 EXPORT_SYMBOL(iio_buffer_store_enable
);
691 int iio_sw_buffer_preenable(struct iio_dev
*indio_dev
)
693 struct iio_buffer
*buffer
;
695 dev_dbg(&indio_dev
->dev
, "%s\n", __func__
);
697 list_for_each_entry(buffer
, &indio_dev
->buffer_list
, buffer_list
)
698 if (buffer
->access
->set_bytes_per_datum
) {
699 bytes
= iio_compute_scan_bytes(indio_dev
,
701 buffer
->scan_timestamp
);
703 buffer
->access
->set_bytes_per_datum(buffer
, bytes
);
707 EXPORT_SYMBOL(iio_sw_buffer_preenable
);
710 * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
711 * @indio_dev: the iio device
712 * @mask: scan mask to be checked
714 * Return true if exactly one bit is set in the scan mask, false otherwise. It
715 * can be used for devices where only one channel can be active for sampling at
718 bool iio_validate_scan_mask_onehot(struct iio_dev
*indio_dev
,
719 const unsigned long *mask
)
721 return bitmap_weight(mask
, indio_dev
->masklength
) == 1;
723 EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot
);
725 static bool iio_validate_scan_mask(struct iio_dev
*indio_dev
,
726 const unsigned long *mask
)
728 if (!indio_dev
->setup_ops
->validate_scan_mask
)
731 return indio_dev
->setup_ops
->validate_scan_mask(indio_dev
, mask
);
735 * iio_scan_mask_set() - set particular bit in the scan mask
736 * @buffer: the buffer whose scan mask we are interested in
737 * @bit: the bit to be set.
739 * Note that at this point we have no way of knowing what other
740 * buffers might request, hence this code only verifies that the
741 * individual buffers request is plausible.
743 int iio_scan_mask_set(struct iio_dev
*indio_dev
,
744 struct iio_buffer
*buffer
, int bit
)
746 const unsigned long *mask
;
747 unsigned long *trialmask
;
749 trialmask
= kmalloc(sizeof(*trialmask
)*
750 BITS_TO_LONGS(indio_dev
->masklength
),
753 if (trialmask
== NULL
)
755 if (!indio_dev
->masklength
) {
756 WARN(1, "trying to set scanmask prior to registering buffer\n");
757 goto err_invalid_mask
;
759 bitmap_copy(trialmask
, buffer
->scan_mask
, indio_dev
->masklength
);
760 set_bit(bit
, trialmask
);
762 if (!iio_validate_scan_mask(indio_dev
, trialmask
))
763 goto err_invalid_mask
;
765 if (indio_dev
->available_scan_masks
) {
766 mask
= iio_scan_mask_match(indio_dev
->available_scan_masks
,
767 indio_dev
->masklength
,
770 goto err_invalid_mask
;
772 bitmap_copy(buffer
->scan_mask
, trialmask
, indio_dev
->masklength
);
782 EXPORT_SYMBOL_GPL(iio_scan_mask_set
);
784 int iio_scan_mask_query(struct iio_dev
*indio_dev
,
785 struct iio_buffer
*buffer
, int bit
)
787 if (bit
> indio_dev
->masklength
)
790 if (!buffer
->scan_mask
)
793 /* Ensure return value is 0 or 1. */
794 return !!test_bit(bit
, buffer
->scan_mask
);
796 EXPORT_SYMBOL_GPL(iio_scan_mask_query
);
799 * struct iio_demux_table() - table describing demux memcpy ops
800 * @from: index to copy from
801 * @to: index to copy to
802 * @length: how many bytes to copy
803 * @l: list head used for management
805 struct iio_demux_table
{
812 static unsigned char *iio_demux(struct iio_buffer
*buffer
,
813 unsigned char *datain
)
815 struct iio_demux_table
*t
;
817 if (list_empty(&buffer
->demux_list
))
819 list_for_each_entry(t
, &buffer
->demux_list
, l
)
820 memcpy(buffer
->demux_bounce
+ t
->to
,
821 datain
+ t
->from
, t
->length
);
823 return buffer
->demux_bounce
;
826 static int iio_push_to_buffer(struct iio_buffer
*buffer
, unsigned char *data
)
828 unsigned char *dataout
= iio_demux(buffer
, data
);
830 return buffer
->access
->store_to(buffer
, dataout
);
833 static void iio_buffer_demux_free(struct iio_buffer
*buffer
)
835 struct iio_demux_table
*p
, *q
;
836 list_for_each_entry_safe(p
, q
, &buffer
->demux_list
, l
) {
843 int iio_push_to_buffers(struct iio_dev
*indio_dev
, unsigned char *data
)
846 struct iio_buffer
*buf
;
848 list_for_each_entry(buf
, &indio_dev
->buffer_list
, buffer_list
) {
849 ret
= iio_push_to_buffer(buf
, data
);
856 EXPORT_SYMBOL_GPL(iio_push_to_buffers
);
858 static int iio_buffer_update_demux(struct iio_dev
*indio_dev
,
859 struct iio_buffer
*buffer
)
861 const struct iio_chan_spec
*ch
;
862 int ret
, in_ind
= -1, out_ind
, length
;
863 unsigned in_loc
= 0, out_loc
= 0;
864 struct iio_demux_table
*p
;
866 /* Clear out any old demux */
867 iio_buffer_demux_free(buffer
);
868 kfree(buffer
->demux_bounce
);
869 buffer
->demux_bounce
= NULL
;
871 /* First work out which scan mode we will actually have */
872 if (bitmap_equal(indio_dev
->active_scan_mask
,
874 indio_dev
->masklength
))
877 /* Now we have the two masks, work from least sig and build up sizes */
878 for_each_set_bit(out_ind
,
880 indio_dev
->masklength
) {
881 in_ind
= find_next_bit(indio_dev
->active_scan_mask
,
882 indio_dev
->masklength
,
884 while (in_ind
!= out_ind
) {
885 in_ind
= find_next_bit(indio_dev
->active_scan_mask
,
886 indio_dev
->masklength
,
888 ch
= iio_find_channel_from_si(indio_dev
, in_ind
);
889 length
= ch
->scan_type
.storagebits
/8;
890 /* Make sure we are aligned */
893 in_loc
+= length
- in_loc
% length
;
895 p
= kmalloc(sizeof(*p
), GFP_KERNEL
);
898 goto error_clear_mux_table
;
900 ch
= iio_find_channel_from_si(indio_dev
, in_ind
);
901 length
= ch
->scan_type
.storagebits
/8;
902 if (out_loc
% length
)
903 out_loc
+= length
- out_loc
% length
;
905 in_loc
+= length
- in_loc
% length
;
909 list_add_tail(&p
->l
, &buffer
->demux_list
);
913 /* Relies on scan_timestamp being last */
914 if (buffer
->scan_timestamp
) {
915 p
= kmalloc(sizeof(*p
), GFP_KERNEL
);
918 goto error_clear_mux_table
;
920 ch
= iio_find_channel_from_si(indio_dev
,
921 indio_dev
->scan_index_timestamp
);
922 length
= ch
->scan_type
.storagebits
/8;
923 if (out_loc
% length
)
924 out_loc
+= length
- out_loc
% length
;
926 in_loc
+= length
- in_loc
% length
;
930 list_add_tail(&p
->l
, &buffer
->demux_list
);
934 buffer
->demux_bounce
= kzalloc(out_loc
, GFP_KERNEL
);
935 if (buffer
->demux_bounce
== NULL
) {
937 goto error_clear_mux_table
;
941 error_clear_mux_table
:
942 iio_buffer_demux_free(buffer
);
947 int iio_update_demux(struct iio_dev
*indio_dev
)
949 struct iio_buffer
*buffer
;
952 list_for_each_entry(buffer
, &indio_dev
->buffer_list
, buffer_list
) {
953 ret
= iio_buffer_update_demux(indio_dev
, buffer
);
955 goto error_clear_mux_table
;
959 error_clear_mux_table
:
960 list_for_each_entry(buffer
, &indio_dev
->buffer_list
, buffer_list
)
961 iio_buffer_demux_free(buffer
);
965 EXPORT_SYMBOL_GPL(iio_update_demux
);