1 // SPDX-License-Identifier: GPL-2.0-only
2 /* The industrial I/O core in kernel channel mapping
4 * Copyright (c) 2011 Jonathan Cameron
6 #include <linux/cleanup.h>
8 #include <linux/export.h>
9 #include <linux/minmax.h>
10 #include <linux/mutex.h>
11 #include <linux/property.h>
12 #include <linux/slab.h>
14 #include <linux/iio/iio.h>
15 #include <linux/iio/iio-opaque.h>
17 #include <linux/iio/machine.h>
18 #include <linux/iio/driver.h>
19 #include <linux/iio/consumer.h>
21 struct iio_map_internal
{
22 struct iio_dev
*indio_dev
;
23 const struct iio_map
*map
;
27 static LIST_HEAD(iio_map_list
);
28 static DEFINE_MUTEX(iio_map_list_lock
);
30 static int iio_map_array_unregister_locked(struct iio_dev
*indio_dev
)
33 struct iio_map_internal
*mapi
, *next
;
35 list_for_each_entry_safe(mapi
, next
, &iio_map_list
, l
) {
36 if (indio_dev
== mapi
->indio_dev
) {
45 int iio_map_array_register(struct iio_dev
*indio_dev
, const struct iio_map
*maps
)
47 struct iio_map_internal
*mapi
;
54 guard(mutex
)(&iio_map_list_lock
);
55 while (maps
[i
].consumer_dev_name
) {
56 mapi
= kzalloc(sizeof(*mapi
), GFP_KERNEL
);
62 mapi
->indio_dev
= indio_dev
;
63 list_add_tail(&mapi
->l
, &iio_map_list
);
69 iio_map_array_unregister_locked(indio_dev
);
72 EXPORT_SYMBOL_GPL(iio_map_array_register
);
75 * Remove all map entries associated with the given iio device
77 int iio_map_array_unregister(struct iio_dev
*indio_dev
)
79 guard(mutex
)(&iio_map_list_lock
);
80 return iio_map_array_unregister_locked(indio_dev
);
82 EXPORT_SYMBOL_GPL(iio_map_array_unregister
);
84 static void iio_map_array_unregister_cb(void *indio_dev
)
86 iio_map_array_unregister(indio_dev
);
89 int devm_iio_map_array_register(struct device
*dev
, struct iio_dev
*indio_dev
,
90 const struct iio_map
*maps
)
94 ret
= iio_map_array_register(indio_dev
, maps
);
98 return devm_add_action_or_reset(dev
, iio_map_array_unregister_cb
, indio_dev
);
100 EXPORT_SYMBOL_GPL(devm_iio_map_array_register
);
102 static const struct iio_chan_spec
103 *iio_chan_spec_from_name(const struct iio_dev
*indio_dev
, const char *name
)
106 const struct iio_chan_spec
*chan
= NULL
;
108 for (i
= 0; i
< indio_dev
->num_channels
; i
++)
109 if (indio_dev
->channels
[i
].datasheet_name
&&
110 strcmp(name
, indio_dev
->channels
[i
].datasheet_name
) == 0) {
111 chan
= &indio_dev
->channels
[i
];
118 * __fwnode_iio_simple_xlate - translate iiospec to the IIO channel index
119 * @indio_dev: pointer to the iio_dev structure
120 * @iiospec: IIO specifier as found in the device tree
122 * This is simple translation function, suitable for the most 1:1 mapped
123 * channels in IIO chips. This function performs only one sanity check:
124 * whether IIO index is less than num_channels (that is specified in the
127 static int __fwnode_iio_simple_xlate(struct iio_dev
*indio_dev
,
128 const struct fwnode_reference_args
*iiospec
)
133 if (iiospec
->args
[0] >= indio_dev
->num_channels
) {
134 dev_err(&indio_dev
->dev
, "invalid channel index %llu\n",
139 return iiospec
->args
[0];
142 static int __fwnode_iio_channel_get(struct iio_channel
*channel
,
143 struct fwnode_handle
*fwnode
, int index
)
145 struct fwnode_reference_args iiospec
;
147 struct iio_dev
*indio_dev
;
150 err
= fwnode_property_get_reference_args(fwnode
, "io-channels",
151 "#io-channel-cells", 0,
156 idev
= bus_find_device_by_fwnode(&iio_bus_type
, iiospec
.fwnode
);
158 fwnode_handle_put(iiospec
.fwnode
);
159 return -EPROBE_DEFER
;
162 indio_dev
= dev_to_iio_dev(idev
);
163 channel
->indio_dev
= indio_dev
;
164 if (indio_dev
->info
->fwnode_xlate
)
165 index
= indio_dev
->info
->fwnode_xlate(indio_dev
, &iiospec
);
167 index
= __fwnode_iio_simple_xlate(indio_dev
, &iiospec
);
168 fwnode_handle_put(iiospec
.fwnode
);
171 channel
->channel
= &indio_dev
->channels
[index
];
176 iio_device_put(indio_dev
);
180 static struct iio_channel
*fwnode_iio_channel_get(struct fwnode_handle
*fwnode
,
186 return ERR_PTR(-EINVAL
);
188 struct iio_channel
*channel
__free(kfree
) =
189 kzalloc(sizeof(*channel
), GFP_KERNEL
);
191 return ERR_PTR(-ENOMEM
);
193 err
= __fwnode_iio_channel_get(channel
, fwnode
, index
);
200 static struct iio_channel
*
201 __fwnode_iio_channel_get_by_name(struct fwnode_handle
*fwnode
, const char *name
)
203 struct iio_channel
*chan
;
207 * For named iio channels, first look up the name in the
208 * "io-channel-names" property. If it cannot be found, the
209 * index will be an error code, and fwnode_iio_channel_get()
213 index
= fwnode_property_match_string(fwnode
, "io-channel-names",
216 chan
= fwnode_iio_channel_get(fwnode
, index
);
217 if (!IS_ERR(chan
) || PTR_ERR(chan
) == -EPROBE_DEFER
)
221 pr_err("ERROR: could not get IIO channel %pfw:%s(%i)\n",
222 fwnode
, name
, index
);
224 * In this case, we found 'name' in 'io-channel-names'
225 * but somehow we still fail so that we should not proceed
226 * with any other lookup. Hence, explicitly return -EINVAL
227 * (maybe not the better error code) so that the caller
228 * won't do a system lookup.
230 return ERR_PTR(-EINVAL
);
233 * If index < 0, then fwnode_property_get_reference_args() fails
234 * with -EINVAL or -ENOENT (ACPI case) which is expected. We
235 * should not proceed if we get any other error.
237 if (PTR_ERR(chan
) != -EINVAL
&& PTR_ERR(chan
) != -ENOENT
)
239 } else if (PTR_ERR(chan
) != -ENOENT
) {
241 * if !name, then we should only proceed the lookup if
242 * fwnode_property_get_reference_args() returns -ENOENT.
247 /* so we continue the lookup */
248 return ERR_PTR(-ENODEV
);
251 struct iio_channel
*fwnode_iio_channel_get_by_name(struct fwnode_handle
*fwnode
,
254 struct fwnode_handle
*parent
;
255 struct iio_channel
*chan
;
257 /* Walk up the tree of devices looking for a matching iio channel */
258 chan
= __fwnode_iio_channel_get_by_name(fwnode
, name
);
259 if (!IS_ERR(chan
) || PTR_ERR(chan
) != -ENODEV
)
263 * No matching IIO channel found on this node.
264 * If the parent node has a "io-channel-ranges" property,
265 * then we can try one of its channels.
267 fwnode_for_each_parent_node(fwnode
, parent
) {
268 if (!fwnode_property_present(parent
, "io-channel-ranges")) {
269 fwnode_handle_put(parent
);
270 return ERR_PTR(-ENODEV
);
273 chan
= __fwnode_iio_channel_get_by_name(parent
, name
);
274 if (!IS_ERR(chan
) || PTR_ERR(chan
) != -ENODEV
) {
275 fwnode_handle_put(parent
);
280 return ERR_PTR(-ENODEV
);
282 EXPORT_SYMBOL_GPL(fwnode_iio_channel_get_by_name
);
284 static struct iio_channel
*fwnode_iio_channel_get_all(struct device
*dev
)
286 struct fwnode_handle
*fwnode
= dev_fwnode(dev
);
287 int i
, mapind
, nummaps
= 0;
291 ret
= fwnode_property_get_reference_args(fwnode
, "io-channels",
292 "#io-channel-cells", 0,
299 return ERR_PTR(-ENODEV
);
301 /* NULL terminated array to save passing size */
302 struct iio_channel
*chans
__free(kfree
) =
303 kcalloc(nummaps
+ 1, sizeof(*chans
), GFP_KERNEL
);
305 return ERR_PTR(-ENOMEM
);
307 /* Search for FW matches */
308 for (mapind
= 0; mapind
< nummaps
; mapind
++) {
309 ret
= __fwnode_iio_channel_get(&chans
[mapind
], fwnode
, mapind
);
311 goto error_free_chans
;
316 for (i
= 0; i
< mapind
; i
++)
317 iio_device_put(chans
[i
].indio_dev
);
321 static struct iio_channel
*iio_channel_get_sys(const char *name
,
322 const char *channel_name
)
324 struct iio_map_internal
*c_i
= NULL
, *c
= NULL
;
327 if (!(name
|| channel_name
))
328 return ERR_PTR(-ENODEV
);
330 /* first find matching entry the channel map */
331 scoped_guard(mutex
, &iio_map_list_lock
) {
332 list_for_each_entry(c_i
, &iio_map_list
, l
) {
333 if ((name
&& strcmp(name
, c_i
->map
->consumer_dev_name
) != 0) ||
335 strcmp(channel_name
, c_i
->map
->consumer_channel
) != 0))
338 iio_device_get(c
->indio_dev
);
343 return ERR_PTR(-ENODEV
);
345 struct iio_channel
*channel
__free(kfree
) =
346 kzalloc(sizeof(*channel
), GFP_KERNEL
);
352 channel
->indio_dev
= c
->indio_dev
;
354 if (c
->map
->adc_channel_label
) {
356 iio_chan_spec_from_name(channel
->indio_dev
,
357 c
->map
->adc_channel_label
);
359 if (!channel
->channel
) {
368 iio_device_put(c
->indio_dev
);
372 struct iio_channel
*iio_channel_get(struct device
*dev
,
373 const char *channel_name
)
375 const char *name
= dev
? dev_name(dev
) : NULL
;
376 struct iio_channel
*channel
;
379 channel
= fwnode_iio_channel_get_by_name(dev_fwnode(dev
),
381 if (!IS_ERR(channel
) || PTR_ERR(channel
) != -ENODEV
)
385 return iio_channel_get_sys(name
, channel_name
);
387 EXPORT_SYMBOL_GPL(iio_channel_get
);
389 void iio_channel_release(struct iio_channel
*channel
)
393 iio_device_put(channel
->indio_dev
);
396 EXPORT_SYMBOL_GPL(iio_channel_release
);
398 static void devm_iio_channel_free(void *iio_channel
)
400 iio_channel_release(iio_channel
);
403 struct iio_channel
*devm_iio_channel_get(struct device
*dev
,
404 const char *channel_name
)
406 struct iio_channel
*channel
;
409 channel
= iio_channel_get(dev
, channel_name
);
413 ret
= devm_add_action_or_reset(dev
, devm_iio_channel_free
, channel
);
419 EXPORT_SYMBOL_GPL(devm_iio_channel_get
);
421 struct iio_channel
*devm_fwnode_iio_channel_get_by_name(struct device
*dev
,
422 struct fwnode_handle
*fwnode
,
423 const char *channel_name
)
425 struct iio_channel
*channel
;
428 channel
= fwnode_iio_channel_get_by_name(fwnode
, channel_name
);
432 ret
= devm_add_action_or_reset(dev
, devm_iio_channel_free
, channel
);
438 EXPORT_SYMBOL_GPL(devm_fwnode_iio_channel_get_by_name
);
440 struct iio_channel
*iio_channel_get_all(struct device
*dev
)
443 struct iio_map_internal
*c
= NULL
;
444 struct iio_channel
*fw_chans
;
450 return ERR_PTR(-EINVAL
);
452 fw_chans
= fwnode_iio_channel_get_all(dev
);
454 * We only want to carry on if the error is -ENODEV. Anything else
455 * should be reported up the stack.
457 if (!IS_ERR(fw_chans
) || PTR_ERR(fw_chans
) != -ENODEV
)
460 name
= dev_name(dev
);
462 guard(mutex
)(&iio_map_list_lock
);
463 /* first count the matching maps */
464 list_for_each_entry(c
, &iio_map_list
, l
)
465 if (name
&& strcmp(name
, c
->map
->consumer_dev_name
) != 0)
471 return ERR_PTR(-ENODEV
);
473 /* NULL terminated array to save passing size */
474 struct iio_channel
*chans
__free(kfree
) =
475 kcalloc(nummaps
+ 1, sizeof(*chans
), GFP_KERNEL
);
477 return ERR_PTR(-ENOMEM
);
479 /* for each map fill in the chans element */
480 list_for_each_entry(c
, &iio_map_list
, l
) {
481 if (name
&& strcmp(name
, c
->map
->consumer_dev_name
) != 0)
483 chans
[mapind
].indio_dev
= c
->indio_dev
;
484 chans
[mapind
].data
= c
->map
->consumer_data
;
485 chans
[mapind
].channel
=
486 iio_chan_spec_from_name(chans
[mapind
].indio_dev
,
487 c
->map
->adc_channel_label
);
488 if (!chans
[mapind
].channel
) {
490 goto error_free_chans
;
492 iio_device_get(chans
[mapind
].indio_dev
);
497 goto error_free_chans
;
503 for (i
= 0; i
< nummaps
; i
++)
504 iio_device_put(chans
[i
].indio_dev
);
507 EXPORT_SYMBOL_GPL(iio_channel_get_all
);
509 void iio_channel_release_all(struct iio_channel
*channels
)
511 struct iio_channel
*chan
= &channels
[0];
513 while (chan
->indio_dev
) {
514 iio_device_put(chan
->indio_dev
);
519 EXPORT_SYMBOL_GPL(iio_channel_release_all
);
521 static void devm_iio_channel_free_all(void *iio_channels
)
523 iio_channel_release_all(iio_channels
);
526 struct iio_channel
*devm_iio_channel_get_all(struct device
*dev
)
528 struct iio_channel
*channels
;
531 channels
= iio_channel_get_all(dev
);
532 if (IS_ERR(channels
))
535 ret
= devm_add_action_or_reset(dev
, devm_iio_channel_free_all
,
542 EXPORT_SYMBOL_GPL(devm_iio_channel_get_all
);
544 static int iio_channel_read(struct iio_channel
*chan
, int *val
, int *val2
,
545 enum iio_chan_info_enum info
)
547 const struct iio_info
*iio_info
= chan
->indio_dev
->info
;
549 int vals
[INDIO_MAX_RAW_ELEMENTS
];
556 if (!iio_channel_has_info(chan
->channel
, info
))
559 if (iio_info
->read_raw_multi
) {
560 ret
= iio_info
->read_raw_multi(chan
->indio_dev
,
562 INDIO_MAX_RAW_ELEMENTS
,
563 vals
, &val_len
, info
);
566 } else if (iio_info
->read_raw
) {
567 ret
= iio_info
->read_raw(chan
->indio_dev
,
568 chan
->channel
, val
, val2
, info
);
576 int iio_read_channel_raw(struct iio_channel
*chan
, int *val
)
578 struct iio_dev_opaque
*iio_dev_opaque
= to_iio_dev_opaque(chan
->indio_dev
);
580 guard(mutex
)(&iio_dev_opaque
->info_exist_lock
);
581 if (!chan
->indio_dev
->info
)
584 return iio_channel_read(chan
, val
, NULL
, IIO_CHAN_INFO_RAW
);
586 EXPORT_SYMBOL_GPL(iio_read_channel_raw
);
588 int iio_read_channel_average_raw(struct iio_channel
*chan
, int *val
)
590 struct iio_dev_opaque
*iio_dev_opaque
= to_iio_dev_opaque(chan
->indio_dev
);
592 guard(mutex
)(&iio_dev_opaque
->info_exist_lock
);
593 if (!chan
->indio_dev
->info
)
596 return iio_channel_read(chan
, val
, NULL
, IIO_CHAN_INFO_AVERAGE_RAW
);
598 EXPORT_SYMBOL_GPL(iio_read_channel_average_raw
);
600 static int iio_convert_raw_to_processed_unlocked(struct iio_channel
*chan
,
601 int raw
, int *processed
,
604 int scale_type
, scale_val
, scale_val2
;
605 int offset_type
, offset_val
, offset_val2
;
608 offset_type
= iio_channel_read(chan
, &offset_val
, &offset_val2
,
609 IIO_CHAN_INFO_OFFSET
);
610 if (offset_type
>= 0) {
611 switch (offset_type
) {
614 case IIO_VAL_INT_PLUS_MICRO
:
615 case IIO_VAL_INT_PLUS_NANO
:
617 * Both IIO_VAL_INT_PLUS_MICRO and IIO_VAL_INT_PLUS_NANO
618 * implicitely truncate the offset to it's integer form.
621 case IIO_VAL_FRACTIONAL
:
622 offset_val
/= offset_val2
;
624 case IIO_VAL_FRACTIONAL_LOG2
:
625 offset_val
>>= offset_val2
;
634 scale_type
= iio_channel_read(chan
, &scale_val
, &scale_val2
,
635 IIO_CHAN_INFO_SCALE
);
636 if (scale_type
< 0) {
638 * If no channel scaling is available apply consumer scale to
639 * raw value and return.
641 *processed
= raw
* scale
;
645 switch (scale_type
) {
647 *processed
= raw64
* scale_val
* scale
;
649 case IIO_VAL_INT_PLUS_MICRO
:
651 *processed
= -raw64
* scale_val
* scale
;
653 *processed
= raw64
* scale_val
* scale
;
654 *processed
+= div_s64(raw64
* (s64
)scale_val2
* scale
,
657 case IIO_VAL_INT_PLUS_NANO
:
659 *processed
= -raw64
* scale_val
* scale
;
661 *processed
= raw64
* scale_val
* scale
;
662 *processed
+= div_s64(raw64
* (s64
)scale_val2
* scale
,
665 case IIO_VAL_FRACTIONAL
:
666 *processed
= div_s64(raw64
* (s64
)scale_val
* scale
,
669 case IIO_VAL_FRACTIONAL_LOG2
:
670 *processed
= (raw64
* (s64
)scale_val
* scale
) >> scale_val2
;
679 int iio_convert_raw_to_processed(struct iio_channel
*chan
, int raw
,
680 int *processed
, unsigned int scale
)
682 struct iio_dev_opaque
*iio_dev_opaque
= to_iio_dev_opaque(chan
->indio_dev
);
684 guard(mutex
)(&iio_dev_opaque
->info_exist_lock
);
685 if (!chan
->indio_dev
->info
)
688 return iio_convert_raw_to_processed_unlocked(chan
, raw
, processed
,
691 EXPORT_SYMBOL_GPL(iio_convert_raw_to_processed
);
693 int iio_read_channel_attribute(struct iio_channel
*chan
, int *val
, int *val2
,
694 enum iio_chan_info_enum attribute
)
696 struct iio_dev_opaque
*iio_dev_opaque
= to_iio_dev_opaque(chan
->indio_dev
);
698 guard(mutex
)(&iio_dev_opaque
->info_exist_lock
);
699 if (!chan
->indio_dev
->info
)
702 return iio_channel_read(chan
, val
, val2
, attribute
);
704 EXPORT_SYMBOL_GPL(iio_read_channel_attribute
);
706 int iio_read_channel_offset(struct iio_channel
*chan
, int *val
, int *val2
)
708 return iio_read_channel_attribute(chan
, val
, val2
, IIO_CHAN_INFO_OFFSET
);
710 EXPORT_SYMBOL_GPL(iio_read_channel_offset
);
712 int iio_read_channel_processed_scale(struct iio_channel
*chan
, int *val
,
715 struct iio_dev_opaque
*iio_dev_opaque
= to_iio_dev_opaque(chan
->indio_dev
);
718 guard(mutex
)(&iio_dev_opaque
->info_exist_lock
);
719 if (!chan
->indio_dev
->info
)
722 if (iio_channel_has_info(chan
->channel
, IIO_CHAN_INFO_PROCESSED
)) {
723 ret
= iio_channel_read(chan
, val
, NULL
,
724 IIO_CHAN_INFO_PROCESSED
);
731 ret
= iio_channel_read(chan
, val
, NULL
, IIO_CHAN_INFO_RAW
);
735 return iio_convert_raw_to_processed_unlocked(chan
, *val
, val
,
739 EXPORT_SYMBOL_GPL(iio_read_channel_processed_scale
);
741 int iio_read_channel_processed(struct iio_channel
*chan
, int *val
)
743 /* This is just a special case with scale factor 1 */
744 return iio_read_channel_processed_scale(chan
, val
, 1);
746 EXPORT_SYMBOL_GPL(iio_read_channel_processed
);
748 int iio_read_channel_scale(struct iio_channel
*chan
, int *val
, int *val2
)
750 return iio_read_channel_attribute(chan
, val
, val2
, IIO_CHAN_INFO_SCALE
);
752 EXPORT_SYMBOL_GPL(iio_read_channel_scale
);
754 static int iio_channel_read_avail(struct iio_channel
*chan
,
755 const int **vals
, int *type
, int *length
,
756 enum iio_chan_info_enum info
)
758 const struct iio_info
*iio_info
= chan
->indio_dev
->info
;
760 if (!iio_channel_has_available(chan
->channel
, info
))
763 if (iio_info
->read_avail
)
764 return iio_info
->read_avail(chan
->indio_dev
, chan
->channel
,
765 vals
, type
, length
, info
);
769 int iio_read_avail_channel_attribute(struct iio_channel
*chan
,
770 const int **vals
, int *type
, int *length
,
771 enum iio_chan_info_enum attribute
)
773 struct iio_dev_opaque
*iio_dev_opaque
= to_iio_dev_opaque(chan
->indio_dev
);
775 guard(mutex
)(&iio_dev_opaque
->info_exist_lock
);
776 if (!chan
->indio_dev
->info
)
779 return iio_channel_read_avail(chan
, vals
, type
, length
, attribute
);
781 EXPORT_SYMBOL_GPL(iio_read_avail_channel_attribute
);
783 int iio_read_avail_channel_raw(struct iio_channel
*chan
,
784 const int **vals
, int *length
)
789 ret
= iio_read_avail_channel_attribute(chan
, vals
, &type
, length
,
792 if (ret
>= 0 && type
!= IIO_VAL_INT
)
793 /* raw values are assumed to be IIO_VAL_INT */
798 EXPORT_SYMBOL_GPL(iio_read_avail_channel_raw
);
800 static int iio_channel_read_max(struct iio_channel
*chan
,
801 int *val
, int *val2
, int *type
,
802 enum iio_chan_info_enum info
)
808 ret
= iio_channel_read_avail(chan
, &vals
, type
, &length
, info
);
813 case IIO_AVAIL_RANGE
:
830 *val
= max_array(vals
, length
);
833 /* TODO: learn about max for other iio values */
843 int iio_read_max_channel_raw(struct iio_channel
*chan
, int *val
)
845 struct iio_dev_opaque
*iio_dev_opaque
= to_iio_dev_opaque(chan
->indio_dev
);
848 guard(mutex
)(&iio_dev_opaque
->info_exist_lock
);
849 if (!chan
->indio_dev
->info
)
852 return iio_channel_read_max(chan
, val
, NULL
, &type
, IIO_CHAN_INFO_RAW
);
854 EXPORT_SYMBOL_GPL(iio_read_max_channel_raw
);
856 static int iio_channel_read_min(struct iio_channel
*chan
,
857 int *val
, int *val2
, int *type
,
858 enum iio_chan_info_enum info
)
864 ret
= iio_channel_read_avail(chan
, &vals
, type
, &length
, info
);
869 case IIO_AVAIL_RANGE
:
886 *val
= min_array(vals
, length
);
889 /* TODO: learn about min for other iio values */
899 int iio_read_min_channel_raw(struct iio_channel
*chan
, int *val
)
901 struct iio_dev_opaque
*iio_dev_opaque
= to_iio_dev_opaque(chan
->indio_dev
);
904 guard(mutex
)(&iio_dev_opaque
->info_exist_lock
);
905 if (!chan
->indio_dev
->info
)
908 return iio_channel_read_min(chan
, val
, NULL
, &type
, IIO_CHAN_INFO_RAW
);
910 EXPORT_SYMBOL_GPL(iio_read_min_channel_raw
);
912 int iio_get_channel_type(struct iio_channel
*chan
, enum iio_chan_type
*type
)
914 struct iio_dev_opaque
*iio_dev_opaque
= to_iio_dev_opaque(chan
->indio_dev
);
916 guard(mutex
)(&iio_dev_opaque
->info_exist_lock
);
917 if (!chan
->indio_dev
->info
)
920 *type
= chan
->channel
->type
;
924 EXPORT_SYMBOL_GPL(iio_get_channel_type
);
926 static int iio_channel_write(struct iio_channel
*chan
, int val
, int val2
,
927 enum iio_chan_info_enum info
)
929 const struct iio_info
*iio_info
= chan
->indio_dev
->info
;
931 if (iio_info
->write_raw
)
932 return iio_info
->write_raw(chan
->indio_dev
,
933 chan
->channel
, val
, val2
, info
);
937 int iio_write_channel_attribute(struct iio_channel
*chan
, int val
, int val2
,
938 enum iio_chan_info_enum attribute
)
940 struct iio_dev_opaque
*iio_dev_opaque
= to_iio_dev_opaque(chan
->indio_dev
);
942 guard(mutex
)(&iio_dev_opaque
->info_exist_lock
);
943 if (!chan
->indio_dev
->info
)
946 return iio_channel_write(chan
, val
, val2
, attribute
);
948 EXPORT_SYMBOL_GPL(iio_write_channel_attribute
);
950 int iio_write_channel_raw(struct iio_channel
*chan
, int val
)
952 return iio_write_channel_attribute(chan
, val
, 0, IIO_CHAN_INFO_RAW
);
954 EXPORT_SYMBOL_GPL(iio_write_channel_raw
);
956 unsigned int iio_get_channel_ext_info_count(struct iio_channel
*chan
)
958 const struct iio_chan_spec_ext_info
*ext_info
;
961 if (!chan
->channel
->ext_info
)
964 for (ext_info
= chan
->channel
->ext_info
; ext_info
->name
; ext_info
++)
969 EXPORT_SYMBOL_GPL(iio_get_channel_ext_info_count
);
971 static const struct iio_chan_spec_ext_info
*
972 iio_lookup_ext_info(const struct iio_channel
*chan
, const char *attr
)
974 const struct iio_chan_spec_ext_info
*ext_info
;
976 if (!chan
->channel
->ext_info
)
979 for (ext_info
= chan
->channel
->ext_info
; ext_info
->name
; ++ext_info
) {
980 if (!strcmp(attr
, ext_info
->name
))
987 ssize_t
iio_read_channel_ext_info(struct iio_channel
*chan
,
988 const char *attr
, char *buf
)
990 const struct iio_chan_spec_ext_info
*ext_info
;
992 ext_info
= iio_lookup_ext_info(chan
, attr
);
996 return ext_info
->read(chan
->indio_dev
, ext_info
->private,
999 EXPORT_SYMBOL_GPL(iio_read_channel_ext_info
);
1001 ssize_t
iio_write_channel_ext_info(struct iio_channel
*chan
, const char *attr
,
1002 const char *buf
, size_t len
)
1004 const struct iio_chan_spec_ext_info
*ext_info
;
1006 ext_info
= iio_lookup_ext_info(chan
, attr
);
1010 return ext_info
->write(chan
->indio_dev
, ext_info
->private,
1011 chan
->channel
, buf
, len
);
1013 EXPORT_SYMBOL_GPL(iio_write_channel_ext_info
);
1015 ssize_t
iio_read_channel_label(struct iio_channel
*chan
, char *buf
)
1017 return do_iio_read_channel_label(chan
->indio_dev
, chan
->channel
, buf
);
1019 EXPORT_SYMBOL_GPL(iio_read_channel_label
);