1 /* The industrial I/O core
3 * Copyright (c) 2008 Jonathan Cameron
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * Handling of buffer allocation / resizing.
12 * Things to look at here.
13 * - Better memory allocation techniques?
14 * - Alternative access techniques?
16 #include <linux/kernel.h>
17 #include <linux/export.h>
18 #include <linux/device.h>
20 #include <linux/cdev.h>
21 #include <linux/slab.h>
22 #include <linux/poll.h>
27 #include "buffer_generic.h"
29 static const char * const iio_endian_prefix
[] = {
35 * iio_buffer_read_first_n_outer() - chrdev read for buffer access
37 * This function relies on all buffer implementations having an
38 * iio_buffer as their first element.
40 ssize_t
iio_buffer_read_first_n_outer(struct file
*filp
, char __user
*buf
,
41 size_t n
, loff_t
*f_ps
)
43 struct iio_dev
*indio_dev
= filp
->private_data
;
44 struct iio_buffer
*rb
= indio_dev
->buffer
;
46 if (!rb
->access
->read_first_n
)
48 return rb
->access
->read_first_n(rb
, n
, buf
);
52 * iio_buffer_poll() - poll the buffer to find out if it has data
54 unsigned int iio_buffer_poll(struct file
*filp
,
55 struct poll_table_struct
*wait
)
57 struct iio_dev
*indio_dev
= filp
->private_data
;
58 struct iio_buffer
*rb
= indio_dev
->buffer
;
60 poll_wait(filp
, &rb
->pollq
, wait
);
62 return POLLIN
| POLLRDNORM
;
63 /* need a way of knowing if there may be enough data... */
67 int iio_chrdev_buffer_open(struct iio_dev
*indio_dev
)
69 struct iio_buffer
*rb
= indio_dev
->buffer
;
72 if (rb
->access
->mark_in_use
)
73 rb
->access
->mark_in_use(rb
);
77 void iio_chrdev_buffer_release(struct iio_dev
*indio_dev
)
79 struct iio_buffer
*rb
= indio_dev
->buffer
;
81 clear_bit(IIO_BUSY_BIT_POS
, &rb
->flags
);
82 if (rb
->access
->unmark_in_use
)
83 rb
->access
->unmark_in_use(rb
);
86 void iio_buffer_init(struct iio_buffer
*buffer
, struct iio_dev
*indio_dev
)
88 buffer
->indio_dev
= indio_dev
;
89 init_waitqueue_head(&buffer
->pollq
);
91 EXPORT_SYMBOL(iio_buffer_init
);
93 static ssize_t
iio_show_scan_index(struct device
*dev
,
94 struct device_attribute
*attr
,
97 return sprintf(buf
, "%u\n", to_iio_dev_attr(attr
)->c
->scan_index
);
100 static ssize_t
iio_show_fixed_type(struct device
*dev
,
101 struct device_attribute
*attr
,
104 struct iio_dev_attr
*this_attr
= to_iio_dev_attr(attr
);
105 u8 type
= this_attr
->c
->scan_type
.endianness
;
107 if (type
== IIO_CPU
) {
108 #ifdef __LITTLE_ENDIAN
114 return sprintf(buf
, "%s:%c%d/%d>>%u\n",
115 iio_endian_prefix
[type
],
116 this_attr
->c
->scan_type
.sign
,
117 this_attr
->c
->scan_type
.realbits
,
118 this_attr
->c
->scan_type
.storagebits
,
119 this_attr
->c
->scan_type
.shift
);
122 static ssize_t
iio_scan_el_show(struct device
*dev
,
123 struct device_attribute
*attr
,
127 struct iio_dev
*indio_dev
= dev_get_drvdata(dev
);
129 ret
= iio_scan_mask_query(indio_dev
->buffer
,
130 to_iio_dev_attr(attr
)->address
);
133 return sprintf(buf
, "%d\n", ret
);
136 static int iio_scan_mask_clear(struct iio_buffer
*buffer
, int bit
)
138 clear_bit(bit
, buffer
->scan_mask
);
139 buffer
->scan_count
--;
143 static ssize_t
iio_scan_el_store(struct device
*dev
,
144 struct device_attribute
*attr
,
150 struct iio_dev
*indio_dev
= dev_get_drvdata(dev
);
151 struct iio_buffer
*buffer
= indio_dev
->buffer
;
152 struct iio_dev_attr
*this_attr
= to_iio_dev_attr(attr
);
154 state
= !(buf
[0] == '0');
155 mutex_lock(&indio_dev
->mlock
);
156 if (indio_dev
->currentmode
== INDIO_BUFFER_TRIGGERED
) {
160 ret
= iio_scan_mask_query(buffer
, this_attr
->address
);
164 ret
= iio_scan_mask_clear(buffer
, this_attr
->address
);
167 } else if (state
&& !ret
) {
168 ret
= iio_scan_mask_set(buffer
, this_attr
->address
);
174 mutex_unlock(&indio_dev
->mlock
);
176 return ret
? ret
: len
;
180 static ssize_t
iio_scan_el_ts_show(struct device
*dev
,
181 struct device_attribute
*attr
,
184 struct iio_dev
*indio_dev
= dev_get_drvdata(dev
);
185 return sprintf(buf
, "%d\n", indio_dev
->buffer
->scan_timestamp
);
188 static ssize_t
iio_scan_el_ts_store(struct device
*dev
,
189 struct device_attribute
*attr
,
194 struct iio_dev
*indio_dev
= dev_get_drvdata(dev
);
197 state
= !(buf
[0] == '0');
198 mutex_lock(&indio_dev
->mlock
);
199 if (indio_dev
->currentmode
== INDIO_BUFFER_TRIGGERED
) {
203 indio_dev
->buffer
->scan_timestamp
= state
;
205 mutex_unlock(&indio_dev
->mlock
);
207 return ret
? ret
: len
;
210 static int iio_buffer_add_channel_sysfs(struct iio_dev
*indio_dev
,
211 const struct iio_chan_spec
*chan
)
213 int ret
, attrcount
= 0;
214 struct iio_buffer
*buffer
= indio_dev
->buffer
;
216 ret
= __iio_add_chan_devattr("index",
218 &iio_show_scan_index
,
223 &buffer
->scan_el_dev_attr_list
);
227 ret
= __iio_add_chan_devattr("type",
229 &iio_show_fixed_type
,
234 &buffer
->scan_el_dev_attr_list
);
238 if (chan
->type
!= IIO_TIMESTAMP
)
239 ret
= __iio_add_chan_devattr("en",
246 &buffer
->scan_el_dev_attr_list
);
248 ret
= __iio_add_chan_devattr("en",
250 &iio_scan_el_ts_show
,
251 &iio_scan_el_ts_store
,
255 &buffer
->scan_el_dev_attr_list
);
262 static void iio_buffer_remove_and_free_scan_dev_attr(struct iio_dev
*indio_dev
,
263 struct iio_dev_attr
*p
)
265 kfree(p
->dev_attr
.attr
.name
);
269 static void __iio_buffer_attr_cleanup(struct iio_dev
*indio_dev
)
271 struct iio_dev_attr
*p
, *n
;
272 struct iio_buffer
*buffer
= indio_dev
->buffer
;
274 list_for_each_entry_safe(p
, n
,
275 &buffer
->scan_el_dev_attr_list
, l
)
276 iio_buffer_remove_and_free_scan_dev_attr(indio_dev
, p
);
279 static const char * const iio_scan_elements_group_name
= "scan_elements";
281 int iio_buffer_register(struct iio_dev
*indio_dev
,
282 const struct iio_chan_spec
*channels
,
285 struct iio_dev_attr
*p
;
286 struct attribute
**attr
;
287 struct iio_buffer
*buffer
= indio_dev
->buffer
;
288 int ret
, i
, attrn
, attrcount
, attrcount_orig
= 0;
291 indio_dev
->groups
[indio_dev
->groupcounter
++] = buffer
->attrs
;
293 if (buffer
->scan_el_attrs
!= NULL
) {
294 attr
= buffer
->scan_el_attrs
->attrs
;
295 while (*attr
++ != NULL
)
298 attrcount
= attrcount_orig
;
299 INIT_LIST_HEAD(&buffer
->scan_el_dev_attr_list
);
302 for (i
= 0; i
< num_channels
; i
++) {
303 /* Establish necessary mask length */
304 if (channels
[i
].scan_index
>
305 (int)indio_dev
->masklength
- 1)
306 indio_dev
->masklength
307 = indio_dev
->channels
[i
].scan_index
+ 1;
309 ret
= iio_buffer_add_channel_sysfs(indio_dev
,
312 goto error_cleanup_dynamic
;
315 if (indio_dev
->masklength
&& buffer
->scan_mask
== NULL
) {
317 = kzalloc(sizeof(*buffer
->scan_mask
)*
318 BITS_TO_LONGS(indio_dev
->masklength
),
320 if (buffer
->scan_mask
== NULL
) {
322 goto error_cleanup_dynamic
;
327 buffer
->scan_el_group
.name
= iio_scan_elements_group_name
;
329 buffer
->scan_el_group
.attrs
330 = kzalloc(sizeof(buffer
->scan_el_group
.attrs
[0])*
333 if (buffer
->scan_el_group
.attrs
== NULL
) {
335 goto error_free_scan_mask
;
337 if (buffer
->scan_el_attrs
)
338 memcpy(buffer
->scan_el_group
.attrs
, buffer
->scan_el_attrs
,
339 sizeof(buffer
->scan_el_group
.attrs
[0])*attrcount_orig
);
340 attrn
= attrcount_orig
;
342 list_for_each_entry(p
, &buffer
->scan_el_dev_attr_list
, l
)
343 buffer
->scan_el_group
.attrs
[attrn
++] = &p
->dev_attr
.attr
;
344 indio_dev
->groups
[indio_dev
->groupcounter
++] = &buffer
->scan_el_group
;
348 error_free_scan_mask
:
349 kfree(buffer
->scan_mask
);
350 error_cleanup_dynamic
:
351 __iio_buffer_attr_cleanup(indio_dev
);
355 EXPORT_SYMBOL(iio_buffer_register
);
357 void iio_buffer_unregister(struct iio_dev
*indio_dev
)
359 kfree(indio_dev
->buffer
->scan_mask
);
360 kfree(indio_dev
->buffer
->scan_el_group
.attrs
);
361 __iio_buffer_attr_cleanup(indio_dev
);
363 EXPORT_SYMBOL(iio_buffer_unregister
);
365 ssize_t
iio_buffer_read_length(struct device
*dev
,
366 struct device_attribute
*attr
,
369 struct iio_dev
*indio_dev
= dev_get_drvdata(dev
);
370 struct iio_buffer
*buffer
= indio_dev
->buffer
;
372 if (buffer
->access
->get_length
)
373 return sprintf(buf
, "%d\n",
374 buffer
->access
->get_length(buffer
));
378 EXPORT_SYMBOL(iio_buffer_read_length
);
380 ssize_t
iio_buffer_write_length(struct device
*dev
,
381 struct device_attribute
*attr
,
387 struct iio_dev
*indio_dev
= dev_get_drvdata(dev
);
388 struct iio_buffer
*buffer
= indio_dev
->buffer
;
390 ret
= strict_strtoul(buf
, 10, &val
);
394 if (buffer
->access
->get_length
)
395 if (val
== buffer
->access
->get_length(buffer
))
398 if (buffer
->access
->set_length
) {
399 buffer
->access
->set_length(buffer
, val
);
400 if (buffer
->access
->mark_param_change
)
401 buffer
->access
->mark_param_change(buffer
);
406 EXPORT_SYMBOL(iio_buffer_write_length
);
408 ssize_t
iio_buffer_read_bytes_per_datum(struct device
*dev
,
409 struct device_attribute
*attr
,
412 struct iio_dev
*indio_dev
= dev_get_drvdata(dev
);
413 struct iio_buffer
*buffer
= indio_dev
->buffer
;
415 if (buffer
->access
->get_bytes_per_datum
)
416 return sprintf(buf
, "%d\n",
417 buffer
->access
->get_bytes_per_datum(buffer
));
421 EXPORT_SYMBOL(iio_buffer_read_bytes_per_datum
);
423 ssize_t
iio_buffer_store_enable(struct device
*dev
,
424 struct device_attribute
*attr
,
429 bool requested_state
, current_state
;
431 struct iio_dev
*indio_dev
= dev_get_drvdata(dev
);
432 struct iio_buffer
*buffer
= indio_dev
->buffer
;
434 mutex_lock(&indio_dev
->mlock
);
435 previous_mode
= indio_dev
->currentmode
;
436 requested_state
= !(buf
[0] == '0');
437 current_state
= !!(previous_mode
& INDIO_ALL_BUFFER_MODES
);
438 if (current_state
== requested_state
) {
439 printk(KERN_INFO
"iio-buffer, current state requested again\n");
442 if (requested_state
) {
443 if (buffer
->setup_ops
->preenable
) {
444 ret
= buffer
->setup_ops
->preenable(indio_dev
);
447 "Buffer not started:"
448 "buffer preenable failed\n");
452 if (buffer
->access
->request_update
) {
453 ret
= buffer
->access
->request_update(buffer
);
456 "Buffer not started:"
457 "buffer parameter update failed\n");
461 if (buffer
->access
->mark_in_use
)
462 buffer
->access
->mark_in_use(buffer
);
463 /* Definitely possible for devices to support both of these.*/
464 if (indio_dev
->modes
& INDIO_BUFFER_TRIGGERED
) {
465 if (!indio_dev
->trig
) {
467 "Buffer not started: no trigger\n");
469 if (buffer
->access
->unmark_in_use
)
470 buffer
->access
->unmark_in_use(buffer
);
473 indio_dev
->currentmode
= INDIO_BUFFER_TRIGGERED
;
474 } else if (indio_dev
->modes
& INDIO_BUFFER_HARDWARE
)
475 indio_dev
->currentmode
= INDIO_BUFFER_HARDWARE
;
476 else { /* should never be reached */
481 if (buffer
->setup_ops
->postenable
) {
482 ret
= buffer
->setup_ops
->postenable(indio_dev
);
485 "Buffer not started:"
486 "postenable failed\n");
487 if (buffer
->access
->unmark_in_use
)
488 buffer
->access
->unmark_in_use(buffer
);
489 indio_dev
->currentmode
= previous_mode
;
490 if (buffer
->setup_ops
->postdisable
)
492 postdisable(indio_dev
);
497 if (buffer
->setup_ops
->predisable
) {
498 ret
= buffer
->setup_ops
->predisable(indio_dev
);
502 if (buffer
->access
->unmark_in_use
)
503 buffer
->access
->unmark_in_use(buffer
);
504 indio_dev
->currentmode
= INDIO_DIRECT_MODE
;
505 if (buffer
->setup_ops
->postdisable
) {
506 ret
= buffer
->setup_ops
->postdisable(indio_dev
);
512 mutex_unlock(&indio_dev
->mlock
);
516 mutex_unlock(&indio_dev
->mlock
);
519 EXPORT_SYMBOL(iio_buffer_store_enable
);
521 ssize_t
iio_buffer_show_enable(struct device
*dev
,
522 struct device_attribute
*attr
,
525 struct iio_dev
*indio_dev
= dev_get_drvdata(dev
);
526 return sprintf(buf
, "%d\n", !!(indio_dev
->currentmode
527 & INDIO_ALL_BUFFER_MODES
));
529 EXPORT_SYMBOL(iio_buffer_show_enable
);
531 int iio_sw_buffer_preenable(struct iio_dev
*indio_dev
)
533 struct iio_buffer
*buffer
= indio_dev
->buffer
;
535 dev_dbg(&indio_dev
->dev
, "%s\n", __func__
);
536 /* Check if there are any scan elements enabled, if not fail*/
537 if (!(buffer
->scan_count
|| buffer
->scan_timestamp
))
539 if (buffer
->scan_timestamp
)
540 if (buffer
->scan_count
)
541 /* Timestamp (aligned to s64) and data */
542 size
= (((buffer
->scan_count
* buffer
->bpe
)
544 & ~(sizeof(s64
) - 1))
546 else /* Timestamp only */
549 size
= buffer
->scan_count
* buffer
->bpe
;
550 buffer
->access
->set_bytes_per_datum(buffer
, size
);
554 EXPORT_SYMBOL(iio_sw_buffer_preenable
);
557 /* note NULL used as error indicator as it doesn't make sense. */
558 static unsigned long *iio_scan_mask_match(unsigned long *av_masks
,
559 unsigned int masklength
,
562 if (bitmap_empty(mask
, masklength
))
565 if (bitmap_subset(mask
, av_masks
, masklength
))
567 av_masks
+= BITS_TO_LONGS(masklength
);
573 * iio_scan_mask_set() - set particular bit in the scan mask
574 * @buffer: the buffer whose scan mask we are interested in
575 * @bit: the bit to be set.
577 int iio_scan_mask_set(struct iio_buffer
*buffer
, int bit
)
579 struct iio_dev
*indio_dev
= buffer
->indio_dev
;
581 unsigned long *trialmask
;
583 trialmask
= kmalloc(sizeof(*trialmask
)*
584 BITS_TO_LONGS(indio_dev
->masklength
),
587 if (trialmask
== NULL
)
589 if (!indio_dev
->masklength
) {
590 WARN_ON("trying to set scanmask prior to registering buffer\n");
594 bitmap_copy(trialmask
, buffer
->scan_mask
, indio_dev
->masklength
);
595 set_bit(bit
, trialmask
);
597 if (indio_dev
->available_scan_masks
) {
598 mask
= iio_scan_mask_match(indio_dev
->available_scan_masks
,
599 indio_dev
->masklength
,
606 bitmap_copy(buffer
->scan_mask
, trialmask
, indio_dev
->masklength
);
607 buffer
->scan_count
++;
613 EXPORT_SYMBOL_GPL(iio_scan_mask_set
);
615 int iio_scan_mask_query(struct iio_buffer
*buffer
, int bit
)
617 struct iio_dev
*indio_dev
= buffer
->indio_dev
;
620 if (bit
> indio_dev
->masklength
)
623 if (!buffer
->scan_mask
)
625 if (indio_dev
->available_scan_masks
)
626 mask
= iio_scan_mask_match(indio_dev
->available_scan_masks
,
627 indio_dev
->masklength
,
630 mask
= buffer
->scan_mask
;
634 return test_bit(bit
, mask
);
636 EXPORT_SYMBOL_GPL(iio_scan_mask_query
);