1 /* The industrial I/O core
3 * Copyright (c) 2008 Jonathan Cameron
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * Handling of buffer allocation / resizing.
12 * Things to look at here.
13 * - Better memory allocation techniques?
14 * - Alternative access techniques?
16 #include <linux/kernel.h>
17 #include <linux/export.h>
18 #include <linux/device.h>
20 #include <linux/cdev.h>
21 #include <linux/slab.h>
22 #include <linux/poll.h>
29 static const char * const iio_endian_prefix
[] = {
35 * iio_buffer_read_first_n_outer() - chrdev read for buffer access
37 * This function relies on all buffer implementations having an
38 * iio_buffer as their first element.
40 ssize_t
iio_buffer_read_first_n_outer(struct file
*filp
, char __user
*buf
,
41 size_t n
, loff_t
*f_ps
)
43 struct iio_dev
*indio_dev
= filp
->private_data
;
44 struct iio_buffer
*rb
= indio_dev
->buffer
;
46 if (!rb
|| !rb
->access
->read_first_n
)
48 return rb
->access
->read_first_n(rb
, n
, buf
);
52 * iio_buffer_poll() - poll the buffer to find out if it has data
54 unsigned int iio_buffer_poll(struct file
*filp
,
55 struct poll_table_struct
*wait
)
57 struct iio_dev
*indio_dev
= filp
->private_data
;
58 struct iio_buffer
*rb
= indio_dev
->buffer
;
60 poll_wait(filp
, &rb
->pollq
, wait
);
62 return POLLIN
| POLLRDNORM
;
63 /* need a way of knowing if there may be enough data... */
67 void iio_buffer_init(struct iio_buffer
*buffer
)
69 INIT_LIST_HEAD(&buffer
->demux_list
);
70 init_waitqueue_head(&buffer
->pollq
);
72 EXPORT_SYMBOL(iio_buffer_init
);
74 static ssize_t
iio_show_scan_index(struct device
*dev
,
75 struct device_attribute
*attr
,
78 return sprintf(buf
, "%u\n", to_iio_dev_attr(attr
)->c
->scan_index
);
81 static ssize_t
iio_show_fixed_type(struct device
*dev
,
82 struct device_attribute
*attr
,
85 struct iio_dev_attr
*this_attr
= to_iio_dev_attr(attr
);
86 u8 type
= this_attr
->c
->scan_type
.endianness
;
88 if (type
== IIO_CPU
) {
89 #ifdef __LITTLE_ENDIAN
95 return sprintf(buf
, "%s:%c%d/%d>>%u\n",
96 iio_endian_prefix
[type
],
97 this_attr
->c
->scan_type
.sign
,
98 this_attr
->c
->scan_type
.realbits
,
99 this_attr
->c
->scan_type
.storagebits
,
100 this_attr
->c
->scan_type
.shift
);
103 static ssize_t
iio_scan_el_show(struct device
*dev
,
104 struct device_attribute
*attr
,
108 struct iio_dev
*indio_dev
= dev_get_drvdata(dev
);
110 ret
= test_bit(to_iio_dev_attr(attr
)->address
,
111 indio_dev
->buffer
->scan_mask
);
113 return sprintf(buf
, "%d\n", ret
);
116 static int iio_scan_mask_clear(struct iio_buffer
*buffer
, int bit
)
118 clear_bit(bit
, buffer
->scan_mask
);
122 static ssize_t
iio_scan_el_store(struct device
*dev
,
123 struct device_attribute
*attr
,
129 struct iio_dev
*indio_dev
= dev_get_drvdata(dev
);
130 struct iio_buffer
*buffer
= indio_dev
->buffer
;
131 struct iio_dev_attr
*this_attr
= to_iio_dev_attr(attr
);
133 state
= !(buf
[0] == '0');
134 mutex_lock(&indio_dev
->mlock
);
135 if (iio_buffer_enabled(indio_dev
)) {
139 ret
= iio_scan_mask_query(indio_dev
, buffer
, this_attr
->address
);
143 ret
= iio_scan_mask_clear(buffer
, this_attr
->address
);
146 } else if (state
&& !ret
) {
147 ret
= iio_scan_mask_set(indio_dev
, buffer
, this_attr
->address
);
153 mutex_unlock(&indio_dev
->mlock
);
155 return ret
< 0 ? ret
: len
;
159 static ssize_t
iio_scan_el_ts_show(struct device
*dev
,
160 struct device_attribute
*attr
,
163 struct iio_dev
*indio_dev
= dev_get_drvdata(dev
);
164 return sprintf(buf
, "%d\n", indio_dev
->buffer
->scan_timestamp
);
167 static ssize_t
iio_scan_el_ts_store(struct device
*dev
,
168 struct device_attribute
*attr
,
173 struct iio_dev
*indio_dev
= dev_get_drvdata(dev
);
176 state
= !(buf
[0] == '0');
177 mutex_lock(&indio_dev
->mlock
);
178 if (iio_buffer_enabled(indio_dev
)) {
182 indio_dev
->buffer
->scan_timestamp
= state
;
184 mutex_unlock(&indio_dev
->mlock
);
186 return ret
? ret
: len
;
189 static int iio_buffer_add_channel_sysfs(struct iio_dev
*indio_dev
,
190 const struct iio_chan_spec
*chan
)
192 int ret
, attrcount
= 0;
193 struct iio_buffer
*buffer
= indio_dev
->buffer
;
195 ret
= __iio_add_chan_devattr("index",
197 &iio_show_scan_index
,
202 &buffer
->scan_el_dev_attr_list
);
206 ret
= __iio_add_chan_devattr("type",
208 &iio_show_fixed_type
,
213 &buffer
->scan_el_dev_attr_list
);
217 if (chan
->type
!= IIO_TIMESTAMP
)
218 ret
= __iio_add_chan_devattr("en",
225 &buffer
->scan_el_dev_attr_list
);
227 ret
= __iio_add_chan_devattr("en",
229 &iio_scan_el_ts_show
,
230 &iio_scan_el_ts_store
,
234 &buffer
->scan_el_dev_attr_list
);
241 static void iio_buffer_remove_and_free_scan_dev_attr(struct iio_dev
*indio_dev
,
242 struct iio_dev_attr
*p
)
244 kfree(p
->dev_attr
.attr
.name
);
248 static void __iio_buffer_attr_cleanup(struct iio_dev
*indio_dev
)
250 struct iio_dev_attr
*p
, *n
;
251 struct iio_buffer
*buffer
= indio_dev
->buffer
;
253 list_for_each_entry_safe(p
, n
,
254 &buffer
->scan_el_dev_attr_list
, l
)
255 iio_buffer_remove_and_free_scan_dev_attr(indio_dev
, p
);
258 static const char * const iio_scan_elements_group_name
= "scan_elements";
260 int iio_buffer_register(struct iio_dev
*indio_dev
,
261 const struct iio_chan_spec
*channels
,
264 struct iio_dev_attr
*p
;
265 struct attribute
**attr
;
266 struct iio_buffer
*buffer
= indio_dev
->buffer
;
267 int ret
, i
, attrn
, attrcount
, attrcount_orig
= 0;
270 indio_dev
->groups
[indio_dev
->groupcounter
++] = buffer
->attrs
;
272 if (buffer
->scan_el_attrs
!= NULL
) {
273 attr
= buffer
->scan_el_attrs
->attrs
;
274 while (*attr
++ != NULL
)
277 attrcount
= attrcount_orig
;
278 INIT_LIST_HEAD(&buffer
->scan_el_dev_attr_list
);
281 for (i
= 0; i
< num_channels
; i
++) {
282 /* Establish necessary mask length */
283 if (channels
[i
].scan_index
>
284 (int)indio_dev
->masklength
- 1)
285 indio_dev
->masklength
286 = indio_dev
->channels
[i
].scan_index
+ 1;
288 ret
= iio_buffer_add_channel_sysfs(indio_dev
,
291 goto error_cleanup_dynamic
;
293 if (channels
[i
].type
== IIO_TIMESTAMP
)
294 buffer
->scan_index_timestamp
=
295 channels
[i
].scan_index
;
297 if (indio_dev
->masklength
&& buffer
->scan_mask
== NULL
) {
298 buffer
->scan_mask
= kcalloc(BITS_TO_LONGS(indio_dev
->masklength
),
299 sizeof(*buffer
->scan_mask
),
301 if (buffer
->scan_mask
== NULL
) {
303 goto error_cleanup_dynamic
;
308 buffer
->scan_el_group
.name
= iio_scan_elements_group_name
;
310 buffer
->scan_el_group
.attrs
= kcalloc(attrcount
+ 1,
311 sizeof(buffer
->scan_el_group
.attrs
[0]),
313 if (buffer
->scan_el_group
.attrs
== NULL
) {
315 goto error_free_scan_mask
;
317 if (buffer
->scan_el_attrs
)
318 memcpy(buffer
->scan_el_group
.attrs
, buffer
->scan_el_attrs
,
319 sizeof(buffer
->scan_el_group
.attrs
[0])*attrcount_orig
);
320 attrn
= attrcount_orig
;
322 list_for_each_entry(p
, &buffer
->scan_el_dev_attr_list
, l
)
323 buffer
->scan_el_group
.attrs
[attrn
++] = &p
->dev_attr
.attr
;
324 indio_dev
->groups
[indio_dev
->groupcounter
++] = &buffer
->scan_el_group
;
328 error_free_scan_mask
:
329 kfree(buffer
->scan_mask
);
330 error_cleanup_dynamic
:
331 __iio_buffer_attr_cleanup(indio_dev
);
335 EXPORT_SYMBOL(iio_buffer_register
);
337 void iio_buffer_unregister(struct iio_dev
*indio_dev
)
339 kfree(indio_dev
->buffer
->scan_mask
);
340 kfree(indio_dev
->buffer
->scan_el_group
.attrs
);
341 __iio_buffer_attr_cleanup(indio_dev
);
343 EXPORT_SYMBOL(iio_buffer_unregister
);
345 ssize_t
iio_buffer_read_length(struct device
*dev
,
346 struct device_attribute
*attr
,
349 struct iio_dev
*indio_dev
= dev_get_drvdata(dev
);
350 struct iio_buffer
*buffer
= indio_dev
->buffer
;
352 if (buffer
->access
->get_length
)
353 return sprintf(buf
, "%d\n",
354 buffer
->access
->get_length(buffer
));
358 EXPORT_SYMBOL(iio_buffer_read_length
);
360 ssize_t
iio_buffer_write_length(struct device
*dev
,
361 struct device_attribute
*attr
,
367 struct iio_dev
*indio_dev
= dev_get_drvdata(dev
);
368 struct iio_buffer
*buffer
= indio_dev
->buffer
;
370 ret
= strict_strtoul(buf
, 10, &val
);
374 if (buffer
->access
->get_length
)
375 if (val
== buffer
->access
->get_length(buffer
))
378 mutex_lock(&indio_dev
->mlock
);
379 if (iio_buffer_enabled(indio_dev
)) {
382 if (buffer
->access
->set_length
)
383 buffer
->access
->set_length(buffer
, val
);
386 mutex_unlock(&indio_dev
->mlock
);
388 return ret
? ret
: len
;
390 EXPORT_SYMBOL(iio_buffer_write_length
);
392 ssize_t
iio_buffer_store_enable(struct device
*dev
,
393 struct device_attribute
*attr
,
398 bool requested_state
, current_state
;
400 struct iio_dev
*indio_dev
= dev_get_drvdata(dev
);
401 struct iio_buffer
*buffer
= indio_dev
->buffer
;
403 mutex_lock(&indio_dev
->mlock
);
404 previous_mode
= indio_dev
->currentmode
;
405 requested_state
= !(buf
[0] == '0');
406 current_state
= iio_buffer_enabled(indio_dev
);
407 if (current_state
== requested_state
) {
408 printk(KERN_INFO
"iio-buffer, current state requested again\n");
411 if (requested_state
) {
412 if (indio_dev
->setup_ops
->preenable
) {
413 ret
= indio_dev
->setup_ops
->preenable(indio_dev
);
416 "Buffer not started:"
417 "buffer preenable failed\n");
421 if (buffer
->access
->request_update
) {
422 ret
= buffer
->access
->request_update(buffer
);
425 "Buffer not started:"
426 "buffer parameter update failed\n");
430 /* Definitely possible for devices to support both of these.*/
431 if (indio_dev
->modes
& INDIO_BUFFER_TRIGGERED
) {
432 if (!indio_dev
->trig
) {
434 "Buffer not started: no trigger\n");
438 indio_dev
->currentmode
= INDIO_BUFFER_TRIGGERED
;
439 } else if (indio_dev
->modes
& INDIO_BUFFER_HARDWARE
)
440 indio_dev
->currentmode
= INDIO_BUFFER_HARDWARE
;
441 else { /* should never be reached */
446 if (indio_dev
->setup_ops
->postenable
) {
447 ret
= indio_dev
->setup_ops
->postenable(indio_dev
);
450 "Buffer not started:"
451 "postenable failed\n");
452 indio_dev
->currentmode
= previous_mode
;
453 if (indio_dev
->setup_ops
->postdisable
)
454 indio_dev
->setup_ops
->
455 postdisable(indio_dev
);
460 if (indio_dev
->setup_ops
->predisable
) {
461 ret
= indio_dev
->setup_ops
->predisable(indio_dev
);
465 indio_dev
->currentmode
= INDIO_DIRECT_MODE
;
466 if (indio_dev
->setup_ops
->postdisable
) {
467 ret
= indio_dev
->setup_ops
->postdisable(indio_dev
);
473 mutex_unlock(&indio_dev
->mlock
);
477 mutex_unlock(&indio_dev
->mlock
);
480 EXPORT_SYMBOL(iio_buffer_store_enable
);
482 ssize_t
iio_buffer_show_enable(struct device
*dev
,
483 struct device_attribute
*attr
,
486 struct iio_dev
*indio_dev
= dev_get_drvdata(dev
);
487 return sprintf(buf
, "%d\n", iio_buffer_enabled(indio_dev
));
489 EXPORT_SYMBOL(iio_buffer_show_enable
);
491 /* note NULL used as error indicator as it doesn't make sense. */
492 static unsigned long *iio_scan_mask_match(unsigned long *av_masks
,
493 unsigned int masklength
,
496 if (bitmap_empty(mask
, masklength
))
499 if (bitmap_subset(mask
, av_masks
, masklength
))
501 av_masks
+= BITS_TO_LONGS(masklength
);
506 int iio_sw_buffer_preenable(struct iio_dev
*indio_dev
)
508 struct iio_buffer
*buffer
= indio_dev
->buffer
;
509 const struct iio_chan_spec
*ch
;
512 dev_dbg(&indio_dev
->dev
, "%s\n", __func__
);
514 /* How much space will the demuxed element take? */
515 for_each_set_bit(i
, buffer
->scan_mask
,
516 indio_dev
->masklength
) {
517 ch
= iio_find_channel_from_si(indio_dev
, i
);
518 length
= ch
->scan_type
.storagebits
/8;
519 bytes
= ALIGN(bytes
, length
);
522 if (buffer
->scan_timestamp
) {
523 ch
= iio_find_channel_from_si(indio_dev
,
524 buffer
->scan_index_timestamp
);
525 length
= ch
->scan_type
.storagebits
/8;
526 bytes
= ALIGN(bytes
, length
);
529 buffer
->access
->set_bytes_per_datum(buffer
, bytes
);
531 /* What scan mask do we actually have ?*/
532 if (indio_dev
->available_scan_masks
)
533 indio_dev
->active_scan_mask
=
534 iio_scan_mask_match(indio_dev
->available_scan_masks
,
535 indio_dev
->masklength
,
538 indio_dev
->active_scan_mask
= buffer
->scan_mask
;
539 iio_update_demux(indio_dev
);
541 if (indio_dev
->info
->update_scan_mode
)
542 return indio_dev
->info
543 ->update_scan_mode(indio_dev
,
544 indio_dev
->active_scan_mask
);
547 EXPORT_SYMBOL(iio_sw_buffer_preenable
);
550 * iio_scan_mask_set() - set particular bit in the scan mask
551 * @buffer: the buffer whose scan mask we are interested in
552 * @bit: the bit to be set.
554 int iio_scan_mask_set(struct iio_dev
*indio_dev
,
555 struct iio_buffer
*buffer
, int bit
)
558 unsigned long *trialmask
;
560 trialmask
= kmalloc(sizeof(*trialmask
)*
561 BITS_TO_LONGS(indio_dev
->masklength
),
564 if (trialmask
== NULL
)
566 if (!indio_dev
->masklength
) {
567 WARN_ON("trying to set scanmask prior to registering buffer\n");
571 bitmap_copy(trialmask
, buffer
->scan_mask
, indio_dev
->masklength
);
572 set_bit(bit
, trialmask
);
574 if (indio_dev
->available_scan_masks
) {
575 mask
= iio_scan_mask_match(indio_dev
->available_scan_masks
,
576 indio_dev
->masklength
,
583 bitmap_copy(buffer
->scan_mask
, trialmask
, indio_dev
->masklength
);
589 EXPORT_SYMBOL_GPL(iio_scan_mask_set
);
591 int iio_scan_mask_query(struct iio_dev
*indio_dev
,
592 struct iio_buffer
*buffer
, int bit
)
594 if (bit
> indio_dev
->masklength
)
597 if (!buffer
->scan_mask
)
600 return test_bit(bit
, buffer
->scan_mask
);
602 EXPORT_SYMBOL_GPL(iio_scan_mask_query
);
605 * struct iio_demux_table() - table describing demux memcpy ops
606 * @from: index to copy from
607 * @to: index to copy to
608 * @length: how many bytes to copy
609 * @l: list head used for management
611 struct iio_demux_table
{
618 static unsigned char *iio_demux(struct iio_buffer
*buffer
,
619 unsigned char *datain
)
621 struct iio_demux_table
*t
;
623 if (list_empty(&buffer
->demux_list
))
625 list_for_each_entry(t
, &buffer
->demux_list
, l
)
626 memcpy(buffer
->demux_bounce
+ t
->to
,
627 datain
+ t
->from
, t
->length
);
629 return buffer
->demux_bounce
;
632 int iio_push_to_buffer(struct iio_buffer
*buffer
, unsigned char *data
,
635 unsigned char *dataout
= iio_demux(buffer
, data
);
637 return buffer
->access
->store_to(buffer
, dataout
, timestamp
);
639 EXPORT_SYMBOL_GPL(iio_push_to_buffer
);
641 int iio_update_demux(struct iio_dev
*indio_dev
)
643 const struct iio_chan_spec
*ch
;
644 struct iio_buffer
*buffer
= indio_dev
->buffer
;
645 int ret
, in_ind
= -1, out_ind
, length
;
646 unsigned in_loc
= 0, out_loc
= 0;
647 struct iio_demux_table
*p
, *q
;
649 /* Clear out any old demux */
650 list_for_each_entry_safe(p
, q
, &buffer
->demux_list
, l
) {
654 kfree(buffer
->demux_bounce
);
655 buffer
->demux_bounce
= NULL
;
657 /* First work out which scan mode we will actually have */
658 if (bitmap_equal(indio_dev
->active_scan_mask
,
660 indio_dev
->masklength
))
663 /* Now we have the two masks, work from least sig and build up sizes */
664 for_each_set_bit(out_ind
,
665 indio_dev
->active_scan_mask
,
666 indio_dev
->masklength
) {
667 in_ind
= find_next_bit(indio_dev
->active_scan_mask
,
668 indio_dev
->masklength
,
670 while (in_ind
!= out_ind
) {
671 in_ind
= find_next_bit(indio_dev
->active_scan_mask
,
672 indio_dev
->masklength
,
674 ch
= iio_find_channel_from_si(indio_dev
, in_ind
);
675 length
= ch
->scan_type
.storagebits
/8;
676 /* Make sure we are aligned */
679 in_loc
+= length
- in_loc
% length
;
681 p
= kmalloc(sizeof(*p
), GFP_KERNEL
);
684 goto error_clear_mux_table
;
686 ch
= iio_find_channel_from_si(indio_dev
, in_ind
);
687 length
= ch
->scan_type
.storagebits
/8;
688 if (out_loc
% length
)
689 out_loc
+= length
- out_loc
% length
;
691 in_loc
+= length
- in_loc
% length
;
695 list_add_tail(&p
->l
, &buffer
->demux_list
);
699 /* Relies on scan_timestamp being last */
700 if (buffer
->scan_timestamp
) {
701 p
= kmalloc(sizeof(*p
), GFP_KERNEL
);
704 goto error_clear_mux_table
;
706 ch
= iio_find_channel_from_si(indio_dev
,
707 buffer
->scan_index_timestamp
);
708 length
= ch
->scan_type
.storagebits
/8;
709 if (out_loc
% length
)
710 out_loc
+= length
- out_loc
% length
;
712 in_loc
+= length
- in_loc
% length
;
716 list_add_tail(&p
->l
, &buffer
->demux_list
);
720 buffer
->demux_bounce
= kzalloc(out_loc
, GFP_KERNEL
);
721 if (buffer
->demux_bounce
== NULL
) {
723 goto error_clear_mux_table
;
727 error_clear_mux_table
:
728 list_for_each_entry_safe(p
, q
, &buffer
->demux_list
, l
) {
734 EXPORT_SYMBOL_GPL(iio_update_demux
);