1 /* The industrial I/O core in kernel channel mapping
3 * Copyright (c) 2011 Jonathan Cameron
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
10 #include <linux/export.h>
11 #include <linux/slab.h>
12 #include <linux/mutex.h>
15 #include <linux/iio/iio.h>
17 #include <linux/iio/machine.h>
18 #include <linux/iio/driver.h>
19 #include <linux/iio/consumer.h>
21 struct iio_map_internal
{
22 struct iio_dev
*indio_dev
;
27 static LIST_HEAD(iio_map_list
);
28 static DEFINE_MUTEX(iio_map_list_lock
);
30 int iio_map_array_register(struct iio_dev
*indio_dev
, struct iio_map
*maps
)
33 struct iio_map_internal
*mapi
;
38 mutex_lock(&iio_map_list_lock
);
39 while (maps
[i
].consumer_dev_name
!= NULL
) {
40 mapi
= kzalloc(sizeof(*mapi
), GFP_KERNEL
);
46 mapi
->indio_dev
= indio_dev
;
47 list_add_tail(&mapi
->l
, &iio_map_list
);
51 mutex_unlock(&iio_map_list_lock
);
55 EXPORT_SYMBOL_GPL(iio_map_array_register
);
59 * Remove all map entries associated with the given iio device
61 int iio_map_array_unregister(struct iio_dev
*indio_dev
)
64 struct iio_map_internal
*mapi
, *next
;
66 mutex_lock(&iio_map_list_lock
);
67 list_for_each_entry_safe(mapi
, next
, &iio_map_list
, l
) {
68 if (indio_dev
== mapi
->indio_dev
) {
74 mutex_unlock(&iio_map_list_lock
);
77 EXPORT_SYMBOL_GPL(iio_map_array_unregister
);
79 static const struct iio_chan_spec
80 *iio_chan_spec_from_name(const struct iio_dev
*indio_dev
, const char *name
)
83 const struct iio_chan_spec
*chan
= NULL
;
85 for (i
= 0; i
< indio_dev
->num_channels
; i
++)
86 if (indio_dev
->channels
[i
].datasheet_name
&&
87 strcmp(name
, indio_dev
->channels
[i
].datasheet_name
) == 0) {
88 chan
= &indio_dev
->channels
[i
];
96 static int iio_dev_node_match(struct device
*dev
, void *data
)
98 return dev
->of_node
== data
&& dev
->type
== &iio_device_type
;
102 * __of_iio_simple_xlate - translate iiospec to the IIO channel index
103 * @indio_dev: pointer to the iio_dev structure
104 * @iiospec: IIO specifier as found in the device tree
106 * This is simple translation function, suitable for the most 1:1 mapped
107 * channels in IIO chips. This function performs only one sanity check:
108 * whether IIO index is less than num_channels (that is specified in the
111 static int __of_iio_simple_xlate(struct iio_dev
*indio_dev
,
112 const struct of_phandle_args
*iiospec
)
114 if (!iiospec
->args_count
)
117 if (iiospec
->args
[0] >= indio_dev
->num_channels
) {
118 dev_err(&indio_dev
->dev
, "invalid channel index %u\n",
123 return iiospec
->args
[0];
126 static int __of_iio_channel_get(struct iio_channel
*channel
,
127 struct device_node
*np
, int index
)
130 struct iio_dev
*indio_dev
;
132 struct of_phandle_args iiospec
;
134 err
= of_parse_phandle_with_args(np
, "io-channels",
140 idev
= bus_find_device(&iio_bus_type
, NULL
, iiospec
.np
,
142 of_node_put(iiospec
.np
);
144 return -EPROBE_DEFER
;
146 indio_dev
= dev_to_iio_dev(idev
);
147 channel
->indio_dev
= indio_dev
;
148 if (indio_dev
->info
->of_xlate
)
149 index
= indio_dev
->info
->of_xlate(indio_dev
, &iiospec
);
151 index
= __of_iio_simple_xlate(indio_dev
, &iiospec
);
154 channel
->channel
= &indio_dev
->channels
[index
];
159 iio_device_put(indio_dev
);
163 static struct iio_channel
*of_iio_channel_get(struct device_node
*np
, int index
)
165 struct iio_channel
*channel
;
169 return ERR_PTR(-EINVAL
);
171 channel
= kzalloc(sizeof(*channel
), GFP_KERNEL
);
173 return ERR_PTR(-ENOMEM
);
175 err
= __of_iio_channel_get(channel
, np
, index
);
177 goto err_free_channel
;
186 static struct iio_channel
*of_iio_channel_get_by_name(struct device_node
*np
,
189 struct iio_channel
*chan
= NULL
;
191 /* Walk up the tree of devices looking for a matching iio channel */
196 * For named iio channels, first look up the name in the
197 * "io-channel-names" property. If it cannot be found, the
198 * index will be an error code, and of_iio_channel_get()
202 index
= of_property_match_string(np
, "io-channel-names",
204 chan
= of_iio_channel_get(np
, index
);
205 if (!IS_ERR(chan
) || PTR_ERR(chan
) == -EPROBE_DEFER
)
207 else if (name
&& index
>= 0) {
208 pr_err("ERROR: could not get IIO channel %pOF:%s(%i)\n",
209 np
, name
? name
: "", index
);
214 * No matching IIO channel found on this node.
215 * If the parent node has a "io-channel-ranges" property,
216 * then we can try one of its channels.
219 if (np
&& !of_get_property(np
, "io-channel-ranges", NULL
))
226 static struct iio_channel
*of_iio_channel_get_all(struct device
*dev
)
228 struct iio_channel
*chans
;
229 int i
, mapind
, nummaps
= 0;
233 ret
= of_parse_phandle_with_args(dev
->of_node
,
241 if (nummaps
== 0) /* no error, return NULL to search map table */
244 /* NULL terminated array to save passing size */
245 chans
= kcalloc(nummaps
+ 1, sizeof(*chans
), GFP_KERNEL
);
247 return ERR_PTR(-ENOMEM
);
249 /* Search for OF matches */
250 for (mapind
= 0; mapind
< nummaps
; mapind
++) {
251 ret
= __of_iio_channel_get(&chans
[mapind
], dev
->of_node
,
254 goto error_free_chans
;
259 for (i
= 0; i
< mapind
; i
++)
260 iio_device_put(chans
[i
].indio_dev
);
265 #else /* CONFIG_OF */
267 static inline struct iio_channel
*
268 of_iio_channel_get_by_name(struct device_node
*np
, const char *name
)
273 static inline struct iio_channel
*of_iio_channel_get_all(struct device
*dev
)
278 #endif /* CONFIG_OF */
280 static struct iio_channel
*iio_channel_get_sys(const char *name
,
281 const char *channel_name
)
283 struct iio_map_internal
*c_i
= NULL
, *c
= NULL
;
284 struct iio_channel
*channel
;
287 if (name
== NULL
&& channel_name
== NULL
)
288 return ERR_PTR(-ENODEV
);
290 /* first find matching entry the channel map */
291 mutex_lock(&iio_map_list_lock
);
292 list_for_each_entry(c_i
, &iio_map_list
, l
) {
293 if ((name
&& strcmp(name
, c_i
->map
->consumer_dev_name
) != 0) ||
295 strcmp(channel_name
, c_i
->map
->consumer_channel
) != 0))
298 iio_device_get(c
->indio_dev
);
301 mutex_unlock(&iio_map_list_lock
);
303 return ERR_PTR(-ENODEV
);
305 channel
= kzalloc(sizeof(*channel
), GFP_KERNEL
);
306 if (channel
== NULL
) {
311 channel
->indio_dev
= c
->indio_dev
;
313 if (c
->map
->adc_channel_label
) {
315 iio_chan_spec_from_name(channel
->indio_dev
,
316 c
->map
->adc_channel_label
);
318 if (channel
->channel
== NULL
) {
329 iio_device_put(c
->indio_dev
);
333 struct iio_channel
*iio_channel_get(struct device
*dev
,
334 const char *channel_name
)
336 const char *name
= dev
? dev_name(dev
) : NULL
;
337 struct iio_channel
*channel
;
340 channel
= of_iio_channel_get_by_name(dev
->of_node
,
346 return iio_channel_get_sys(name
, channel_name
);
348 EXPORT_SYMBOL_GPL(iio_channel_get
);
350 void iio_channel_release(struct iio_channel
*channel
)
354 iio_device_put(channel
->indio_dev
);
357 EXPORT_SYMBOL_GPL(iio_channel_release
);
359 static void devm_iio_channel_free(struct device
*dev
, void *res
)
361 struct iio_channel
*channel
= *(struct iio_channel
**)res
;
363 iio_channel_release(channel
);
366 static int devm_iio_channel_match(struct device
*dev
, void *res
, void *data
)
368 struct iio_channel
**r
= res
;
378 struct iio_channel
*devm_iio_channel_get(struct device
*dev
,
379 const char *channel_name
)
381 struct iio_channel
**ptr
, *channel
;
383 ptr
= devres_alloc(devm_iio_channel_free
, sizeof(*ptr
), GFP_KERNEL
);
385 return ERR_PTR(-ENOMEM
);
387 channel
= iio_channel_get(dev
, channel_name
);
388 if (IS_ERR(channel
)) {
394 devres_add(dev
, ptr
);
398 EXPORT_SYMBOL_GPL(devm_iio_channel_get
);
400 void devm_iio_channel_release(struct device
*dev
, struct iio_channel
*channel
)
402 WARN_ON(devres_release(dev
, devm_iio_channel_free
,
403 devm_iio_channel_match
, channel
));
405 EXPORT_SYMBOL_GPL(devm_iio_channel_release
);
407 struct iio_channel
*iio_channel_get_all(struct device
*dev
)
410 struct iio_channel
*chans
;
411 struct iio_map_internal
*c
= NULL
;
417 return ERR_PTR(-EINVAL
);
419 chans
= of_iio_channel_get_all(dev
);
423 name
= dev_name(dev
);
425 mutex_lock(&iio_map_list_lock
);
426 /* first count the matching maps */
427 list_for_each_entry(c
, &iio_map_list
, l
)
428 if (name
&& strcmp(name
, c
->map
->consumer_dev_name
) != 0)
438 /* NULL terminated array to save passing size */
439 chans
= kcalloc(nummaps
+ 1, sizeof(*chans
), GFP_KERNEL
);
445 /* for each map fill in the chans element */
446 list_for_each_entry(c
, &iio_map_list
, l
) {
447 if (name
&& strcmp(name
, c
->map
->consumer_dev_name
) != 0)
449 chans
[mapind
].indio_dev
= c
->indio_dev
;
450 chans
[mapind
].data
= c
->map
->consumer_data
;
451 chans
[mapind
].channel
=
452 iio_chan_spec_from_name(chans
[mapind
].indio_dev
,
453 c
->map
->adc_channel_label
);
454 if (chans
[mapind
].channel
== NULL
) {
456 goto error_free_chans
;
458 iio_device_get(chans
[mapind
].indio_dev
);
463 goto error_free_chans
;
465 mutex_unlock(&iio_map_list_lock
);
470 for (i
= 0; i
< nummaps
; i
++)
471 iio_device_put(chans
[i
].indio_dev
);
474 mutex_unlock(&iio_map_list_lock
);
478 EXPORT_SYMBOL_GPL(iio_channel_get_all
);
480 void iio_channel_release_all(struct iio_channel
*channels
)
482 struct iio_channel
*chan
= &channels
[0];
484 while (chan
->indio_dev
) {
485 iio_device_put(chan
->indio_dev
);
490 EXPORT_SYMBOL_GPL(iio_channel_release_all
);
492 static void devm_iio_channel_free_all(struct device
*dev
, void *res
)
494 struct iio_channel
*channels
= *(struct iio_channel
**)res
;
496 iio_channel_release_all(channels
);
499 struct iio_channel
*devm_iio_channel_get_all(struct device
*dev
)
501 struct iio_channel
**ptr
, *channels
;
503 ptr
= devres_alloc(devm_iio_channel_free_all
, sizeof(*ptr
), GFP_KERNEL
);
505 return ERR_PTR(-ENOMEM
);
507 channels
= iio_channel_get_all(dev
);
508 if (IS_ERR(channels
)) {
514 devres_add(dev
, ptr
);
518 EXPORT_SYMBOL_GPL(devm_iio_channel_get_all
);
520 void devm_iio_channel_release_all(struct device
*dev
,
521 struct iio_channel
*channels
)
523 WARN_ON(devres_release(dev
, devm_iio_channel_free_all
,
524 devm_iio_channel_match
, channels
));
526 EXPORT_SYMBOL_GPL(devm_iio_channel_release_all
);
528 static int iio_channel_read(struct iio_channel
*chan
, int *val
, int *val2
,
529 enum iio_chan_info_enum info
)
532 int vals
[INDIO_MAX_RAW_ELEMENTS
];
539 if (!iio_channel_has_info(chan
->channel
, info
))
542 if (chan
->indio_dev
->info
->read_raw_multi
) {
543 ret
= chan
->indio_dev
->info
->read_raw_multi(chan
->indio_dev
,
544 chan
->channel
, INDIO_MAX_RAW_ELEMENTS
,
545 vals
, &val_len
, info
);
549 ret
= chan
->indio_dev
->info
->read_raw(chan
->indio_dev
,
550 chan
->channel
, val
, val2
, info
);
555 int iio_read_channel_raw(struct iio_channel
*chan
, int *val
)
559 mutex_lock(&chan
->indio_dev
->info_exist_lock
);
560 if (chan
->indio_dev
->info
== NULL
) {
565 ret
= iio_channel_read(chan
, val
, NULL
, IIO_CHAN_INFO_RAW
);
567 mutex_unlock(&chan
->indio_dev
->info_exist_lock
);
571 EXPORT_SYMBOL_GPL(iio_read_channel_raw
);
573 int iio_read_channel_average_raw(struct iio_channel
*chan
, int *val
)
577 mutex_lock(&chan
->indio_dev
->info_exist_lock
);
578 if (chan
->indio_dev
->info
== NULL
) {
583 ret
= iio_channel_read(chan
, val
, NULL
, IIO_CHAN_INFO_AVERAGE_RAW
);
585 mutex_unlock(&chan
->indio_dev
->info_exist_lock
);
589 EXPORT_SYMBOL_GPL(iio_read_channel_average_raw
);
591 static int iio_convert_raw_to_processed_unlocked(struct iio_channel
*chan
,
592 int raw
, int *processed
, unsigned int scale
)
594 int scale_type
, scale_val
, scale_val2
, offset
;
598 ret
= iio_channel_read(chan
, &offset
, NULL
, IIO_CHAN_INFO_OFFSET
);
602 scale_type
= iio_channel_read(chan
, &scale_val
, &scale_val2
,
603 IIO_CHAN_INFO_SCALE
);
604 if (scale_type
< 0) {
606 * Just pass raw values as processed if no scaling is
613 switch (scale_type
) {
615 *processed
= raw64
* scale_val
;
617 case IIO_VAL_INT_PLUS_MICRO
:
619 *processed
= -raw64
* scale_val
;
621 *processed
= raw64
* scale_val
;
622 *processed
+= div_s64(raw64
* (s64
)scale_val2
* scale
,
625 case IIO_VAL_INT_PLUS_NANO
:
627 *processed
= -raw64
* scale_val
;
629 *processed
= raw64
* scale_val
;
630 *processed
+= div_s64(raw64
* (s64
)scale_val2
* scale
,
633 case IIO_VAL_FRACTIONAL
:
634 *processed
= div_s64(raw64
* (s64
)scale_val
* scale
,
637 case IIO_VAL_FRACTIONAL_LOG2
:
638 *processed
= (raw64
* (s64
)scale_val
* scale
) >> scale_val2
;
647 int iio_convert_raw_to_processed(struct iio_channel
*chan
, int raw
,
648 int *processed
, unsigned int scale
)
652 mutex_lock(&chan
->indio_dev
->info_exist_lock
);
653 if (chan
->indio_dev
->info
== NULL
) {
658 ret
= iio_convert_raw_to_processed_unlocked(chan
, raw
, processed
,
661 mutex_unlock(&chan
->indio_dev
->info_exist_lock
);
665 EXPORT_SYMBOL_GPL(iio_convert_raw_to_processed
);
667 int iio_read_channel_attribute(struct iio_channel
*chan
, int *val
, int *val2
,
668 enum iio_chan_info_enum attribute
)
672 mutex_lock(&chan
->indio_dev
->info_exist_lock
);
673 if (chan
->indio_dev
->info
== NULL
) {
678 ret
= iio_channel_read(chan
, val
, val2
, attribute
);
680 mutex_unlock(&chan
->indio_dev
->info_exist_lock
);
684 EXPORT_SYMBOL_GPL(iio_read_channel_attribute
);
686 int iio_read_channel_offset(struct iio_channel
*chan
, int *val
, int *val2
)
688 return iio_read_channel_attribute(chan
, val
, val2
, IIO_CHAN_INFO_OFFSET
);
690 EXPORT_SYMBOL_GPL(iio_read_channel_offset
);
692 int iio_read_channel_processed(struct iio_channel
*chan
, int *val
)
696 mutex_lock(&chan
->indio_dev
->info_exist_lock
);
697 if (chan
->indio_dev
->info
== NULL
) {
702 if (iio_channel_has_info(chan
->channel
, IIO_CHAN_INFO_PROCESSED
)) {
703 ret
= iio_channel_read(chan
, val
, NULL
,
704 IIO_CHAN_INFO_PROCESSED
);
706 ret
= iio_channel_read(chan
, val
, NULL
, IIO_CHAN_INFO_RAW
);
709 ret
= iio_convert_raw_to_processed_unlocked(chan
, *val
, val
, 1);
713 mutex_unlock(&chan
->indio_dev
->info_exist_lock
);
717 EXPORT_SYMBOL_GPL(iio_read_channel_processed
);
719 int iio_read_channel_scale(struct iio_channel
*chan
, int *val
, int *val2
)
721 return iio_read_channel_attribute(chan
, val
, val2
, IIO_CHAN_INFO_SCALE
);
723 EXPORT_SYMBOL_GPL(iio_read_channel_scale
);
725 static int iio_channel_read_avail(struct iio_channel
*chan
,
726 const int **vals
, int *type
, int *length
,
727 enum iio_chan_info_enum info
)
729 if (!iio_channel_has_available(chan
->channel
, info
))
732 return chan
->indio_dev
->info
->read_avail(chan
->indio_dev
, chan
->channel
,
733 vals
, type
, length
, info
);
736 int iio_read_avail_channel_raw(struct iio_channel
*chan
,
737 const int **vals
, int *length
)
742 mutex_lock(&chan
->indio_dev
->info_exist_lock
);
743 if (!chan
->indio_dev
->info
) {
748 ret
= iio_channel_read_avail(chan
,
749 vals
, &type
, length
, IIO_CHAN_INFO_RAW
);
751 mutex_unlock(&chan
->indio_dev
->info_exist_lock
);
753 if (ret
>= 0 && type
!= IIO_VAL_INT
)
754 /* raw values are assumed to be IIO_VAL_INT */
759 EXPORT_SYMBOL_GPL(iio_read_avail_channel_raw
);
761 static int iio_channel_read_max(struct iio_channel
*chan
,
762 int *val
, int *val2
, int *type
,
763 enum iio_chan_info_enum info
)
773 ret
= iio_channel_read_avail(chan
, &vals
, type
, &length
, info
);
775 case IIO_AVAIL_RANGE
:
791 *val
= vals
[--length
];
793 if (vals
[--length
] > *val
)
798 /* FIXME: learn about max for other iio values */
808 int iio_read_max_channel_raw(struct iio_channel
*chan
, int *val
)
813 mutex_lock(&chan
->indio_dev
->info_exist_lock
);
814 if (!chan
->indio_dev
->info
) {
819 ret
= iio_channel_read_max(chan
, val
, NULL
, &type
, IIO_CHAN_INFO_RAW
);
821 mutex_unlock(&chan
->indio_dev
->info_exist_lock
);
825 EXPORT_SYMBOL_GPL(iio_read_max_channel_raw
);
827 int iio_get_channel_type(struct iio_channel
*chan
, enum iio_chan_type
*type
)
830 /* Need to verify underlying driver has not gone away */
832 mutex_lock(&chan
->indio_dev
->info_exist_lock
);
833 if (chan
->indio_dev
->info
== NULL
) {
838 *type
= chan
->channel
->type
;
840 mutex_unlock(&chan
->indio_dev
->info_exist_lock
);
844 EXPORT_SYMBOL_GPL(iio_get_channel_type
);
846 static int iio_channel_write(struct iio_channel
*chan
, int val
, int val2
,
847 enum iio_chan_info_enum info
)
849 return chan
->indio_dev
->info
->write_raw(chan
->indio_dev
,
850 chan
->channel
, val
, val2
, info
);
853 int iio_write_channel_attribute(struct iio_channel
*chan
, int val
, int val2
,
854 enum iio_chan_info_enum attribute
)
858 mutex_lock(&chan
->indio_dev
->info_exist_lock
);
859 if (chan
->indio_dev
->info
== NULL
) {
864 ret
= iio_channel_write(chan
, val
, val2
, attribute
);
866 mutex_unlock(&chan
->indio_dev
->info_exist_lock
);
870 EXPORT_SYMBOL_GPL(iio_write_channel_attribute
);
872 int iio_write_channel_raw(struct iio_channel
*chan
, int val
)
874 return iio_write_channel_attribute(chan
, val
, 0, IIO_CHAN_INFO_RAW
);
876 EXPORT_SYMBOL_GPL(iio_write_channel_raw
);
878 unsigned int iio_get_channel_ext_info_count(struct iio_channel
*chan
)
880 const struct iio_chan_spec_ext_info
*ext_info
;
883 if (!chan
->channel
->ext_info
)
886 for (ext_info
= chan
->channel
->ext_info
; ext_info
->name
; ext_info
++)
891 EXPORT_SYMBOL_GPL(iio_get_channel_ext_info_count
);
893 static const struct iio_chan_spec_ext_info
*iio_lookup_ext_info(
894 const struct iio_channel
*chan
,
897 const struct iio_chan_spec_ext_info
*ext_info
;
899 if (!chan
->channel
->ext_info
)
902 for (ext_info
= chan
->channel
->ext_info
; ext_info
->name
; ++ext_info
) {
903 if (!strcmp(attr
, ext_info
->name
))
910 ssize_t
iio_read_channel_ext_info(struct iio_channel
*chan
,
911 const char *attr
, char *buf
)
913 const struct iio_chan_spec_ext_info
*ext_info
;
915 ext_info
= iio_lookup_ext_info(chan
, attr
);
919 return ext_info
->read(chan
->indio_dev
, ext_info
->private,
922 EXPORT_SYMBOL_GPL(iio_read_channel_ext_info
);
924 ssize_t
iio_write_channel_ext_info(struct iio_channel
*chan
, const char *attr
,
925 const char *buf
, size_t len
)
927 const struct iio_chan_spec_ext_info
*ext_info
;
929 ext_info
= iio_lookup_ext_info(chan
, attr
);
933 return ext_info
->write(chan
->indio_dev
, ext_info
->private,
934 chan
->channel
, buf
, len
);
936 EXPORT_SYMBOL_GPL(iio_write_channel_ext_info
);