1 // SPDX-License-Identifier: GPL-2.0-only
2 /* The industrial I/O core in kernel channel mapping
4 * Copyright (c) 2011 Jonathan Cameron
7 #include <linux/export.h>
8 #include <linux/slab.h>
9 #include <linux/mutex.h>
12 #include <linux/iio/iio.h>
14 #include <linux/iio/machine.h>
15 #include <linux/iio/driver.h>
16 #include <linux/iio/consumer.h>
18 struct iio_map_internal
{
19 struct iio_dev
*indio_dev
;
24 static LIST_HEAD(iio_map_list
);
25 static DEFINE_MUTEX(iio_map_list_lock
);
27 int iio_map_array_register(struct iio_dev
*indio_dev
, struct iio_map
*maps
)
30 struct iio_map_internal
*mapi
;
35 mutex_lock(&iio_map_list_lock
);
36 while (maps
[i
].consumer_dev_name
!= NULL
) {
37 mapi
= kzalloc(sizeof(*mapi
), GFP_KERNEL
);
43 mapi
->indio_dev
= indio_dev
;
44 list_add_tail(&mapi
->l
, &iio_map_list
);
48 mutex_unlock(&iio_map_list_lock
);
52 EXPORT_SYMBOL_GPL(iio_map_array_register
);
56 * Remove all map entries associated with the given iio device
58 int iio_map_array_unregister(struct iio_dev
*indio_dev
)
61 struct iio_map_internal
*mapi
, *next
;
63 mutex_lock(&iio_map_list_lock
);
64 list_for_each_entry_safe(mapi
, next
, &iio_map_list
, l
) {
65 if (indio_dev
== mapi
->indio_dev
) {
71 mutex_unlock(&iio_map_list_lock
);
74 EXPORT_SYMBOL_GPL(iio_map_array_unregister
);
76 static const struct iio_chan_spec
77 *iio_chan_spec_from_name(const struct iio_dev
*indio_dev
, const char *name
)
80 const struct iio_chan_spec
*chan
= NULL
;
82 for (i
= 0; i
< indio_dev
->num_channels
; i
++)
83 if (indio_dev
->channels
[i
].datasheet_name
&&
84 strcmp(name
, indio_dev
->channels
[i
].datasheet_name
) == 0) {
85 chan
= &indio_dev
->channels
[i
];
93 static int iio_dev_node_match(struct device
*dev
, const void *data
)
95 return dev
->of_node
== data
&& dev
->type
== &iio_device_type
;
99 * __of_iio_simple_xlate - translate iiospec to the IIO channel index
100 * @indio_dev: pointer to the iio_dev structure
101 * @iiospec: IIO specifier as found in the device tree
103 * This is simple translation function, suitable for the most 1:1 mapped
104 * channels in IIO chips. This function performs only one sanity check:
105 * whether IIO index is less than num_channels (that is specified in the
108 static int __of_iio_simple_xlate(struct iio_dev
*indio_dev
,
109 const struct of_phandle_args
*iiospec
)
111 if (!iiospec
->args_count
)
114 if (iiospec
->args
[0] >= indio_dev
->num_channels
) {
115 dev_err(&indio_dev
->dev
, "invalid channel index %u\n",
120 return iiospec
->args
[0];
123 static int __of_iio_channel_get(struct iio_channel
*channel
,
124 struct device_node
*np
, int index
)
127 struct iio_dev
*indio_dev
;
129 struct of_phandle_args iiospec
;
131 err
= of_parse_phandle_with_args(np
, "io-channels",
137 idev
= bus_find_device(&iio_bus_type
, NULL
, iiospec
.np
,
139 of_node_put(iiospec
.np
);
141 return -EPROBE_DEFER
;
143 indio_dev
= dev_to_iio_dev(idev
);
144 channel
->indio_dev
= indio_dev
;
145 if (indio_dev
->info
->of_xlate
)
146 index
= indio_dev
->info
->of_xlate(indio_dev
, &iiospec
);
148 index
= __of_iio_simple_xlate(indio_dev
, &iiospec
);
151 channel
->channel
= &indio_dev
->channels
[index
];
156 iio_device_put(indio_dev
);
160 static struct iio_channel
*of_iio_channel_get(struct device_node
*np
, int index
)
162 struct iio_channel
*channel
;
166 return ERR_PTR(-EINVAL
);
168 channel
= kzalloc(sizeof(*channel
), GFP_KERNEL
);
170 return ERR_PTR(-ENOMEM
);
172 err
= __of_iio_channel_get(channel
, np
, index
);
174 goto err_free_channel
;
183 static struct iio_channel
*of_iio_channel_get_by_name(struct device_node
*np
,
186 struct iio_channel
*chan
= NULL
;
188 /* Walk up the tree of devices looking for a matching iio channel */
193 * For named iio channels, first look up the name in the
194 * "io-channel-names" property. If it cannot be found, the
195 * index will be an error code, and of_iio_channel_get()
199 index
= of_property_match_string(np
, "io-channel-names",
201 chan
= of_iio_channel_get(np
, index
);
202 if (!IS_ERR(chan
) || PTR_ERR(chan
) == -EPROBE_DEFER
)
204 else if (name
&& index
>= 0) {
205 pr_err("ERROR: could not get IIO channel %pOF:%s(%i)\n",
206 np
, name
? name
: "", index
);
211 * No matching IIO channel found on this node.
212 * If the parent node has a "io-channel-ranges" property,
213 * then we can try one of its channels.
216 if (np
&& !of_get_property(np
, "io-channel-ranges", NULL
))
223 static struct iio_channel
*of_iio_channel_get_all(struct device
*dev
)
225 struct iio_channel
*chans
;
226 int i
, mapind
, nummaps
= 0;
230 ret
= of_parse_phandle_with_args(dev
->of_node
,
238 if (nummaps
== 0) /* no error, return NULL to search map table */
241 /* NULL terminated array to save passing size */
242 chans
= kcalloc(nummaps
+ 1, sizeof(*chans
), GFP_KERNEL
);
244 return ERR_PTR(-ENOMEM
);
246 /* Search for OF matches */
247 for (mapind
= 0; mapind
< nummaps
; mapind
++) {
248 ret
= __of_iio_channel_get(&chans
[mapind
], dev
->of_node
,
251 goto error_free_chans
;
256 for (i
= 0; i
< mapind
; i
++)
257 iio_device_put(chans
[i
].indio_dev
);
262 #else /* CONFIG_OF */
264 static inline struct iio_channel
*
265 of_iio_channel_get_by_name(struct device_node
*np
, const char *name
)
270 static inline struct iio_channel
*of_iio_channel_get_all(struct device
*dev
)
275 #endif /* CONFIG_OF */
277 static struct iio_channel
*iio_channel_get_sys(const char *name
,
278 const char *channel_name
)
280 struct iio_map_internal
*c_i
= NULL
, *c
= NULL
;
281 struct iio_channel
*channel
;
284 if (name
== NULL
&& channel_name
== NULL
)
285 return ERR_PTR(-ENODEV
);
287 /* first find matching entry the channel map */
288 mutex_lock(&iio_map_list_lock
);
289 list_for_each_entry(c_i
, &iio_map_list
, l
) {
290 if ((name
&& strcmp(name
, c_i
->map
->consumer_dev_name
) != 0) ||
292 strcmp(channel_name
, c_i
->map
->consumer_channel
) != 0))
295 iio_device_get(c
->indio_dev
);
298 mutex_unlock(&iio_map_list_lock
);
300 return ERR_PTR(-ENODEV
);
302 channel
= kzalloc(sizeof(*channel
), GFP_KERNEL
);
303 if (channel
== NULL
) {
308 channel
->indio_dev
= c
->indio_dev
;
310 if (c
->map
->adc_channel_label
) {
312 iio_chan_spec_from_name(channel
->indio_dev
,
313 c
->map
->adc_channel_label
);
315 if (channel
->channel
== NULL
) {
326 iio_device_put(c
->indio_dev
);
330 struct iio_channel
*iio_channel_get(struct device
*dev
,
331 const char *channel_name
)
333 const char *name
= dev
? dev_name(dev
) : NULL
;
334 struct iio_channel
*channel
;
337 channel
= of_iio_channel_get_by_name(dev
->of_node
,
343 return iio_channel_get_sys(name
, channel_name
);
345 EXPORT_SYMBOL_GPL(iio_channel_get
);
347 void iio_channel_release(struct iio_channel
*channel
)
351 iio_device_put(channel
->indio_dev
);
354 EXPORT_SYMBOL_GPL(iio_channel_release
);
356 static void devm_iio_channel_free(struct device
*dev
, void *res
)
358 struct iio_channel
*channel
= *(struct iio_channel
**)res
;
360 iio_channel_release(channel
);
363 static int devm_iio_channel_match(struct device
*dev
, void *res
, void *data
)
365 struct iio_channel
**r
= res
;
375 struct iio_channel
*devm_iio_channel_get(struct device
*dev
,
376 const char *channel_name
)
378 struct iio_channel
**ptr
, *channel
;
380 ptr
= devres_alloc(devm_iio_channel_free
, sizeof(*ptr
), GFP_KERNEL
);
382 return ERR_PTR(-ENOMEM
);
384 channel
= iio_channel_get(dev
, channel_name
);
385 if (IS_ERR(channel
)) {
391 devres_add(dev
, ptr
);
395 EXPORT_SYMBOL_GPL(devm_iio_channel_get
);
397 void devm_iio_channel_release(struct device
*dev
, struct iio_channel
*channel
)
399 WARN_ON(devres_release(dev
, devm_iio_channel_free
,
400 devm_iio_channel_match
, channel
));
402 EXPORT_SYMBOL_GPL(devm_iio_channel_release
);
404 struct iio_channel
*iio_channel_get_all(struct device
*dev
)
407 struct iio_channel
*chans
;
408 struct iio_map_internal
*c
= NULL
;
414 return ERR_PTR(-EINVAL
);
416 chans
= of_iio_channel_get_all(dev
);
420 name
= dev_name(dev
);
422 mutex_lock(&iio_map_list_lock
);
423 /* first count the matching maps */
424 list_for_each_entry(c
, &iio_map_list
, l
)
425 if (name
&& strcmp(name
, c
->map
->consumer_dev_name
) != 0)
435 /* NULL terminated array to save passing size */
436 chans
= kcalloc(nummaps
+ 1, sizeof(*chans
), GFP_KERNEL
);
442 /* for each map fill in the chans element */
443 list_for_each_entry(c
, &iio_map_list
, l
) {
444 if (name
&& strcmp(name
, c
->map
->consumer_dev_name
) != 0)
446 chans
[mapind
].indio_dev
= c
->indio_dev
;
447 chans
[mapind
].data
= c
->map
->consumer_data
;
448 chans
[mapind
].channel
=
449 iio_chan_spec_from_name(chans
[mapind
].indio_dev
,
450 c
->map
->adc_channel_label
);
451 if (chans
[mapind
].channel
== NULL
) {
453 goto error_free_chans
;
455 iio_device_get(chans
[mapind
].indio_dev
);
460 goto error_free_chans
;
462 mutex_unlock(&iio_map_list_lock
);
467 for (i
= 0; i
< nummaps
; i
++)
468 iio_device_put(chans
[i
].indio_dev
);
471 mutex_unlock(&iio_map_list_lock
);
475 EXPORT_SYMBOL_GPL(iio_channel_get_all
);
477 void iio_channel_release_all(struct iio_channel
*channels
)
479 struct iio_channel
*chan
= &channels
[0];
481 while (chan
->indio_dev
) {
482 iio_device_put(chan
->indio_dev
);
487 EXPORT_SYMBOL_GPL(iio_channel_release_all
);
489 static void devm_iio_channel_free_all(struct device
*dev
, void *res
)
491 struct iio_channel
*channels
= *(struct iio_channel
**)res
;
493 iio_channel_release_all(channels
);
496 struct iio_channel
*devm_iio_channel_get_all(struct device
*dev
)
498 struct iio_channel
**ptr
, *channels
;
500 ptr
= devres_alloc(devm_iio_channel_free_all
, sizeof(*ptr
), GFP_KERNEL
);
502 return ERR_PTR(-ENOMEM
);
504 channels
= iio_channel_get_all(dev
);
505 if (IS_ERR(channels
)) {
511 devres_add(dev
, ptr
);
515 EXPORT_SYMBOL_GPL(devm_iio_channel_get_all
);
517 void devm_iio_channel_release_all(struct device
*dev
,
518 struct iio_channel
*channels
)
520 WARN_ON(devres_release(dev
, devm_iio_channel_free_all
,
521 devm_iio_channel_match
, channels
));
523 EXPORT_SYMBOL_GPL(devm_iio_channel_release_all
);
525 static int iio_channel_read(struct iio_channel
*chan
, int *val
, int *val2
,
526 enum iio_chan_info_enum info
)
529 int vals
[INDIO_MAX_RAW_ELEMENTS
];
536 if (!iio_channel_has_info(chan
->channel
, info
))
539 if (chan
->indio_dev
->info
->read_raw_multi
) {
540 ret
= chan
->indio_dev
->info
->read_raw_multi(chan
->indio_dev
,
541 chan
->channel
, INDIO_MAX_RAW_ELEMENTS
,
542 vals
, &val_len
, info
);
546 ret
= chan
->indio_dev
->info
->read_raw(chan
->indio_dev
,
547 chan
->channel
, val
, val2
, info
);
552 int iio_read_channel_raw(struct iio_channel
*chan
, int *val
)
556 mutex_lock(&chan
->indio_dev
->info_exist_lock
);
557 if (chan
->indio_dev
->info
== NULL
) {
562 ret
= iio_channel_read(chan
, val
, NULL
, IIO_CHAN_INFO_RAW
);
564 mutex_unlock(&chan
->indio_dev
->info_exist_lock
);
568 EXPORT_SYMBOL_GPL(iio_read_channel_raw
);
570 int iio_read_channel_average_raw(struct iio_channel
*chan
, int *val
)
574 mutex_lock(&chan
->indio_dev
->info_exist_lock
);
575 if (chan
->indio_dev
->info
== NULL
) {
580 ret
= iio_channel_read(chan
, val
, NULL
, IIO_CHAN_INFO_AVERAGE_RAW
);
582 mutex_unlock(&chan
->indio_dev
->info_exist_lock
);
586 EXPORT_SYMBOL_GPL(iio_read_channel_average_raw
);
588 static int iio_convert_raw_to_processed_unlocked(struct iio_channel
*chan
,
589 int raw
, int *processed
, unsigned int scale
)
591 int scale_type
, scale_val
, scale_val2
, offset
;
595 ret
= iio_channel_read(chan
, &offset
, NULL
, IIO_CHAN_INFO_OFFSET
);
599 scale_type
= iio_channel_read(chan
, &scale_val
, &scale_val2
,
600 IIO_CHAN_INFO_SCALE
);
601 if (scale_type
< 0) {
603 * Just pass raw values as processed if no scaling is
610 switch (scale_type
) {
612 *processed
= raw64
* scale_val
;
614 case IIO_VAL_INT_PLUS_MICRO
:
616 *processed
= -raw64
* scale_val
;
618 *processed
= raw64
* scale_val
;
619 *processed
+= div_s64(raw64
* (s64
)scale_val2
* scale
,
622 case IIO_VAL_INT_PLUS_NANO
:
624 *processed
= -raw64
* scale_val
;
626 *processed
= raw64
* scale_val
;
627 *processed
+= div_s64(raw64
* (s64
)scale_val2
* scale
,
630 case IIO_VAL_FRACTIONAL
:
631 *processed
= div_s64(raw64
* (s64
)scale_val
* scale
,
634 case IIO_VAL_FRACTIONAL_LOG2
:
635 *processed
= (raw64
* (s64
)scale_val
* scale
) >> scale_val2
;
644 int iio_convert_raw_to_processed(struct iio_channel
*chan
, int raw
,
645 int *processed
, unsigned int scale
)
649 mutex_lock(&chan
->indio_dev
->info_exist_lock
);
650 if (chan
->indio_dev
->info
== NULL
) {
655 ret
= iio_convert_raw_to_processed_unlocked(chan
, raw
, processed
,
658 mutex_unlock(&chan
->indio_dev
->info_exist_lock
);
662 EXPORT_SYMBOL_GPL(iio_convert_raw_to_processed
);
664 int iio_read_channel_attribute(struct iio_channel
*chan
, int *val
, int *val2
,
665 enum iio_chan_info_enum attribute
)
669 mutex_lock(&chan
->indio_dev
->info_exist_lock
);
670 if (chan
->indio_dev
->info
== NULL
) {
675 ret
= iio_channel_read(chan
, val
, val2
, attribute
);
677 mutex_unlock(&chan
->indio_dev
->info_exist_lock
);
681 EXPORT_SYMBOL_GPL(iio_read_channel_attribute
);
683 int iio_read_channel_offset(struct iio_channel
*chan
, int *val
, int *val2
)
685 return iio_read_channel_attribute(chan
, val
, val2
, IIO_CHAN_INFO_OFFSET
);
687 EXPORT_SYMBOL_GPL(iio_read_channel_offset
);
689 int iio_read_channel_processed(struct iio_channel
*chan
, int *val
)
693 mutex_lock(&chan
->indio_dev
->info_exist_lock
);
694 if (chan
->indio_dev
->info
== NULL
) {
699 if (iio_channel_has_info(chan
->channel
, IIO_CHAN_INFO_PROCESSED
)) {
700 ret
= iio_channel_read(chan
, val
, NULL
,
701 IIO_CHAN_INFO_PROCESSED
);
703 ret
= iio_channel_read(chan
, val
, NULL
, IIO_CHAN_INFO_RAW
);
706 ret
= iio_convert_raw_to_processed_unlocked(chan
, *val
, val
, 1);
710 mutex_unlock(&chan
->indio_dev
->info_exist_lock
);
714 EXPORT_SYMBOL_GPL(iio_read_channel_processed
);
716 int iio_read_channel_scale(struct iio_channel
*chan
, int *val
, int *val2
)
718 return iio_read_channel_attribute(chan
, val
, val2
, IIO_CHAN_INFO_SCALE
);
720 EXPORT_SYMBOL_GPL(iio_read_channel_scale
);
722 static int iio_channel_read_avail(struct iio_channel
*chan
,
723 const int **vals
, int *type
, int *length
,
724 enum iio_chan_info_enum info
)
726 if (!iio_channel_has_available(chan
->channel
, info
))
729 return chan
->indio_dev
->info
->read_avail(chan
->indio_dev
, chan
->channel
,
730 vals
, type
, length
, info
);
733 int iio_read_avail_channel_attribute(struct iio_channel
*chan
,
734 const int **vals
, int *type
, int *length
,
735 enum iio_chan_info_enum attribute
)
739 mutex_lock(&chan
->indio_dev
->info_exist_lock
);
740 if (!chan
->indio_dev
->info
) {
745 ret
= iio_channel_read_avail(chan
, vals
, type
, length
, attribute
);
747 mutex_unlock(&chan
->indio_dev
->info_exist_lock
);
751 EXPORT_SYMBOL_GPL(iio_read_avail_channel_attribute
);
753 int iio_read_avail_channel_raw(struct iio_channel
*chan
,
754 const int **vals
, int *length
)
759 ret
= iio_read_avail_channel_attribute(chan
, vals
, &type
, length
,
762 if (ret
>= 0 && type
!= IIO_VAL_INT
)
763 /* raw values are assumed to be IIO_VAL_INT */
768 EXPORT_SYMBOL_GPL(iio_read_avail_channel_raw
);
770 static int iio_channel_read_max(struct iio_channel
*chan
,
771 int *val
, int *val2
, int *type
,
772 enum iio_chan_info_enum info
)
782 ret
= iio_channel_read_avail(chan
, &vals
, type
, &length
, info
);
784 case IIO_AVAIL_RANGE
:
800 *val
= vals
[--length
];
802 if (vals
[--length
] > *val
)
807 /* FIXME: learn about max for other iio values */
817 int iio_read_max_channel_raw(struct iio_channel
*chan
, int *val
)
822 mutex_lock(&chan
->indio_dev
->info_exist_lock
);
823 if (!chan
->indio_dev
->info
) {
828 ret
= iio_channel_read_max(chan
, val
, NULL
, &type
, IIO_CHAN_INFO_RAW
);
830 mutex_unlock(&chan
->indio_dev
->info_exist_lock
);
834 EXPORT_SYMBOL_GPL(iio_read_max_channel_raw
);
836 int iio_get_channel_type(struct iio_channel
*chan
, enum iio_chan_type
*type
)
839 /* Need to verify underlying driver has not gone away */
841 mutex_lock(&chan
->indio_dev
->info_exist_lock
);
842 if (chan
->indio_dev
->info
== NULL
) {
847 *type
= chan
->channel
->type
;
849 mutex_unlock(&chan
->indio_dev
->info_exist_lock
);
853 EXPORT_SYMBOL_GPL(iio_get_channel_type
);
855 static int iio_channel_write(struct iio_channel
*chan
, int val
, int val2
,
856 enum iio_chan_info_enum info
)
858 return chan
->indio_dev
->info
->write_raw(chan
->indio_dev
,
859 chan
->channel
, val
, val2
, info
);
862 int iio_write_channel_attribute(struct iio_channel
*chan
, int val
, int val2
,
863 enum iio_chan_info_enum attribute
)
867 mutex_lock(&chan
->indio_dev
->info_exist_lock
);
868 if (chan
->indio_dev
->info
== NULL
) {
873 ret
= iio_channel_write(chan
, val
, val2
, attribute
);
875 mutex_unlock(&chan
->indio_dev
->info_exist_lock
);
879 EXPORT_SYMBOL_GPL(iio_write_channel_attribute
);
881 int iio_write_channel_raw(struct iio_channel
*chan
, int val
)
883 return iio_write_channel_attribute(chan
, val
, 0, IIO_CHAN_INFO_RAW
);
885 EXPORT_SYMBOL_GPL(iio_write_channel_raw
);
887 unsigned int iio_get_channel_ext_info_count(struct iio_channel
*chan
)
889 const struct iio_chan_spec_ext_info
*ext_info
;
892 if (!chan
->channel
->ext_info
)
895 for (ext_info
= chan
->channel
->ext_info
; ext_info
->name
; ext_info
++)
900 EXPORT_SYMBOL_GPL(iio_get_channel_ext_info_count
);
902 static const struct iio_chan_spec_ext_info
*iio_lookup_ext_info(
903 const struct iio_channel
*chan
,
906 const struct iio_chan_spec_ext_info
*ext_info
;
908 if (!chan
->channel
->ext_info
)
911 for (ext_info
= chan
->channel
->ext_info
; ext_info
->name
; ++ext_info
) {
912 if (!strcmp(attr
, ext_info
->name
))
919 ssize_t
iio_read_channel_ext_info(struct iio_channel
*chan
,
920 const char *attr
, char *buf
)
922 const struct iio_chan_spec_ext_info
*ext_info
;
924 ext_info
= iio_lookup_ext_info(chan
, attr
);
928 return ext_info
->read(chan
->indio_dev
, ext_info
->private,
931 EXPORT_SYMBOL_GPL(iio_read_channel_ext_info
);
933 ssize_t
iio_write_channel_ext_info(struct iio_channel
*chan
, const char *attr
,
934 const char *buf
, size_t len
)
936 const struct iio_chan_spec_ext_info
*ext_info
;
938 ext_info
= iio_lookup_ext_info(chan
, attr
);
942 return ext_info
->write(chan
->indio_dev
, ext_info
->private,
943 chan
->channel
, buf
, len
);
945 EXPORT_SYMBOL_GPL(iio_write_channel_ext_info
);