1 // SPDX-License-Identifier: GPL-2.0-only
2 /* The industrial I/O core in kernel channel mapping
4 * Copyright (c) 2011 Jonathan Cameron
6 #include <linux/cleanup.h>
8 #include <linux/export.h>
9 #include <linux/minmax.h>
10 #include <linux/mutex.h>
11 #include <linux/property.h>
12 #include <linux/slab.h>
14 #include <linux/iio/iio.h>
15 #include <linux/iio/iio-opaque.h>
17 #include <linux/iio/machine.h>
18 #include <linux/iio/driver.h>
19 #include <linux/iio/consumer.h>
21 struct iio_map_internal
{
22 struct iio_dev
*indio_dev
;
27 static LIST_HEAD(iio_map_list
);
28 static DEFINE_MUTEX(iio_map_list_lock
);
30 static int iio_map_array_unregister_locked(struct iio_dev
*indio_dev
)
33 struct iio_map_internal
*mapi
, *next
;
35 list_for_each_entry_safe(mapi
, next
, &iio_map_list
, l
) {
36 if (indio_dev
== mapi
->indio_dev
) {
45 int iio_map_array_register(struct iio_dev
*indio_dev
, struct iio_map
*maps
)
47 struct iio_map_internal
*mapi
;
54 guard(mutex
)(&iio_map_list_lock
);
55 while (maps
[i
].consumer_dev_name
) {
56 mapi
= kzalloc(sizeof(*mapi
), GFP_KERNEL
);
62 mapi
->indio_dev
= indio_dev
;
63 list_add_tail(&mapi
->l
, &iio_map_list
);
69 iio_map_array_unregister_locked(indio_dev
);
72 EXPORT_SYMBOL_GPL(iio_map_array_register
);
75 * Remove all map entries associated with the given iio device
77 int iio_map_array_unregister(struct iio_dev
*indio_dev
)
79 guard(mutex
)(&iio_map_list_lock
);
80 return iio_map_array_unregister_locked(indio_dev
);
82 EXPORT_SYMBOL_GPL(iio_map_array_unregister
);
84 static void iio_map_array_unregister_cb(void *indio_dev
)
86 iio_map_array_unregister(indio_dev
);
89 int devm_iio_map_array_register(struct device
*dev
, struct iio_dev
*indio_dev
, struct iio_map
*maps
)
93 ret
= iio_map_array_register(indio_dev
, maps
);
97 return devm_add_action_or_reset(dev
, iio_map_array_unregister_cb
, indio_dev
);
99 EXPORT_SYMBOL_GPL(devm_iio_map_array_register
);
101 static const struct iio_chan_spec
102 *iio_chan_spec_from_name(const struct iio_dev
*indio_dev
, const char *name
)
105 const struct iio_chan_spec
*chan
= NULL
;
107 for (i
= 0; i
< indio_dev
->num_channels
; i
++)
108 if (indio_dev
->channels
[i
].datasheet_name
&&
109 strcmp(name
, indio_dev
->channels
[i
].datasheet_name
) == 0) {
110 chan
= &indio_dev
->channels
[i
];
117 * __fwnode_iio_simple_xlate - translate iiospec to the IIO channel index
118 * @indio_dev: pointer to the iio_dev structure
119 * @iiospec: IIO specifier as found in the device tree
121 * This is simple translation function, suitable for the most 1:1 mapped
122 * channels in IIO chips. This function performs only one sanity check:
123 * whether IIO index is less than num_channels (that is specified in the
126 static int __fwnode_iio_simple_xlate(struct iio_dev
*indio_dev
,
127 const struct fwnode_reference_args
*iiospec
)
132 if (iiospec
->args
[0] >= indio_dev
->num_channels
) {
133 dev_err(&indio_dev
->dev
, "invalid channel index %llu\n",
138 return iiospec
->args
[0];
141 static int __fwnode_iio_channel_get(struct iio_channel
*channel
,
142 struct fwnode_handle
*fwnode
, int index
)
144 struct fwnode_reference_args iiospec
;
146 struct iio_dev
*indio_dev
;
149 err
= fwnode_property_get_reference_args(fwnode
, "io-channels",
150 "#io-channel-cells", 0,
155 idev
= bus_find_device_by_fwnode(&iio_bus_type
, iiospec
.fwnode
);
157 fwnode_handle_put(iiospec
.fwnode
);
158 return -EPROBE_DEFER
;
161 indio_dev
= dev_to_iio_dev(idev
);
162 channel
->indio_dev
= indio_dev
;
163 if (indio_dev
->info
->fwnode_xlate
)
164 index
= indio_dev
->info
->fwnode_xlate(indio_dev
, &iiospec
);
166 index
= __fwnode_iio_simple_xlate(indio_dev
, &iiospec
);
167 fwnode_handle_put(iiospec
.fwnode
);
170 channel
->channel
= &indio_dev
->channels
[index
];
175 iio_device_put(indio_dev
);
179 static struct iio_channel
*fwnode_iio_channel_get(struct fwnode_handle
*fwnode
,
185 return ERR_PTR(-EINVAL
);
187 struct iio_channel
*channel
__free(kfree
) =
188 kzalloc(sizeof(*channel
), GFP_KERNEL
);
190 return ERR_PTR(-ENOMEM
);
192 err
= __fwnode_iio_channel_get(channel
, fwnode
, index
);
199 static struct iio_channel
*
200 __fwnode_iio_channel_get_by_name(struct fwnode_handle
*fwnode
, const char *name
)
202 struct iio_channel
*chan
;
206 * For named iio channels, first look up the name in the
207 * "io-channel-names" property. If it cannot be found, the
208 * index will be an error code, and fwnode_iio_channel_get()
212 index
= fwnode_property_match_string(fwnode
, "io-channel-names",
215 chan
= fwnode_iio_channel_get(fwnode
, index
);
216 if (!IS_ERR(chan
) || PTR_ERR(chan
) == -EPROBE_DEFER
)
220 pr_err("ERROR: could not get IIO channel %pfw:%s(%i)\n",
221 fwnode
, name
, index
);
223 * In this case, we found 'name' in 'io-channel-names'
224 * but somehow we still fail so that we should not proceed
225 * with any other lookup. Hence, explicitly return -EINVAL
226 * (maybe not the better error code) so that the caller
227 * won't do a system lookup.
229 return ERR_PTR(-EINVAL
);
232 * If index < 0, then fwnode_property_get_reference_args() fails
233 * with -EINVAL or -ENOENT (ACPI case) which is expected. We
234 * should not proceed if we get any other error.
236 if (PTR_ERR(chan
) != -EINVAL
&& PTR_ERR(chan
) != -ENOENT
)
238 } else if (PTR_ERR(chan
) != -ENOENT
) {
240 * if !name, then we should only proceed the lookup if
241 * fwnode_property_get_reference_args() returns -ENOENT.
246 /* so we continue the lookup */
247 return ERR_PTR(-ENODEV
);
250 struct iio_channel
*fwnode_iio_channel_get_by_name(struct fwnode_handle
*fwnode
,
253 struct fwnode_handle
*parent
;
254 struct iio_channel
*chan
;
256 /* Walk up the tree of devices looking for a matching iio channel */
257 chan
= __fwnode_iio_channel_get_by_name(fwnode
, name
);
258 if (!IS_ERR(chan
) || PTR_ERR(chan
) != -ENODEV
)
262 * No matching IIO channel found on this node.
263 * If the parent node has a "io-channel-ranges" property,
264 * then we can try one of its channels.
266 fwnode_for_each_parent_node(fwnode
, parent
) {
267 if (!fwnode_property_present(parent
, "io-channel-ranges")) {
268 fwnode_handle_put(parent
);
269 return ERR_PTR(-ENODEV
);
272 chan
= __fwnode_iio_channel_get_by_name(fwnode
, name
);
273 if (!IS_ERR(chan
) || PTR_ERR(chan
) != -ENODEV
) {
274 fwnode_handle_put(parent
);
279 return ERR_PTR(-ENODEV
);
281 EXPORT_SYMBOL_GPL(fwnode_iio_channel_get_by_name
);
283 static struct iio_channel
*fwnode_iio_channel_get_all(struct device
*dev
)
285 struct fwnode_handle
*fwnode
= dev_fwnode(dev
);
286 int i
, mapind
, nummaps
= 0;
290 ret
= fwnode_property_get_reference_args(fwnode
, "io-channels",
291 "#io-channel-cells", 0,
298 return ERR_PTR(-ENODEV
);
300 /* NULL terminated array to save passing size */
301 struct iio_channel
*chans
__free(kfree
) =
302 kcalloc(nummaps
+ 1, sizeof(*chans
), GFP_KERNEL
);
304 return ERR_PTR(-ENOMEM
);
306 /* Search for FW matches */
307 for (mapind
= 0; mapind
< nummaps
; mapind
++) {
308 ret
= __fwnode_iio_channel_get(&chans
[mapind
], fwnode
, mapind
);
310 goto error_free_chans
;
315 for (i
= 0; i
< mapind
; i
++)
316 iio_device_put(chans
[i
].indio_dev
);
320 static struct iio_channel
*iio_channel_get_sys(const char *name
,
321 const char *channel_name
)
323 struct iio_map_internal
*c_i
= NULL
, *c
= NULL
;
326 if (!(name
|| channel_name
))
327 return ERR_PTR(-ENODEV
);
329 /* first find matching entry the channel map */
330 scoped_guard(mutex
, &iio_map_list_lock
) {
331 list_for_each_entry(c_i
, &iio_map_list
, l
) {
332 if ((name
&& strcmp(name
, c_i
->map
->consumer_dev_name
) != 0) ||
334 strcmp(channel_name
, c_i
->map
->consumer_channel
) != 0))
337 iio_device_get(c
->indio_dev
);
342 return ERR_PTR(-ENODEV
);
344 struct iio_channel
*channel
__free(kfree
) =
345 kzalloc(sizeof(*channel
), GFP_KERNEL
);
351 channel
->indio_dev
= c
->indio_dev
;
353 if (c
->map
->adc_channel_label
) {
355 iio_chan_spec_from_name(channel
->indio_dev
,
356 c
->map
->adc_channel_label
);
358 if (!channel
->channel
) {
367 iio_device_put(c
->indio_dev
);
371 struct iio_channel
*iio_channel_get(struct device
*dev
,
372 const char *channel_name
)
374 const char *name
= dev
? dev_name(dev
) : NULL
;
375 struct iio_channel
*channel
;
378 channel
= fwnode_iio_channel_get_by_name(dev_fwnode(dev
),
380 if (!IS_ERR(channel
) || PTR_ERR(channel
) != -ENODEV
)
384 return iio_channel_get_sys(name
, channel_name
);
386 EXPORT_SYMBOL_GPL(iio_channel_get
);
388 void iio_channel_release(struct iio_channel
*channel
)
392 iio_device_put(channel
->indio_dev
);
395 EXPORT_SYMBOL_GPL(iio_channel_release
);
397 static void devm_iio_channel_free(void *iio_channel
)
399 iio_channel_release(iio_channel
);
402 struct iio_channel
*devm_iio_channel_get(struct device
*dev
,
403 const char *channel_name
)
405 struct iio_channel
*channel
;
408 channel
= iio_channel_get(dev
, channel_name
);
412 ret
= devm_add_action_or_reset(dev
, devm_iio_channel_free
, channel
);
418 EXPORT_SYMBOL_GPL(devm_iio_channel_get
);
420 struct iio_channel
*devm_fwnode_iio_channel_get_by_name(struct device
*dev
,
421 struct fwnode_handle
*fwnode
,
422 const char *channel_name
)
424 struct iio_channel
*channel
;
427 channel
= fwnode_iio_channel_get_by_name(fwnode
, channel_name
);
431 ret
= devm_add_action_or_reset(dev
, devm_iio_channel_free
, channel
);
437 EXPORT_SYMBOL_GPL(devm_fwnode_iio_channel_get_by_name
);
439 struct iio_channel
*iio_channel_get_all(struct device
*dev
)
442 struct iio_map_internal
*c
= NULL
;
443 struct iio_channel
*fw_chans
;
449 return ERR_PTR(-EINVAL
);
451 fw_chans
= fwnode_iio_channel_get_all(dev
);
453 * We only want to carry on if the error is -ENODEV. Anything else
454 * should be reported up the stack.
456 if (!IS_ERR(fw_chans
) || PTR_ERR(fw_chans
) != -ENODEV
)
459 name
= dev_name(dev
);
461 guard(mutex
)(&iio_map_list_lock
);
462 /* first count the matching maps */
463 list_for_each_entry(c
, &iio_map_list
, l
)
464 if (name
&& strcmp(name
, c
->map
->consumer_dev_name
) != 0)
470 return ERR_PTR(-ENODEV
);
472 /* NULL terminated array to save passing size */
473 struct iio_channel
*chans
__free(kfree
) =
474 kcalloc(nummaps
+ 1, sizeof(*chans
), GFP_KERNEL
);
476 return ERR_PTR(-ENOMEM
);
478 /* for each map fill in the chans element */
479 list_for_each_entry(c
, &iio_map_list
, l
) {
480 if (name
&& strcmp(name
, c
->map
->consumer_dev_name
) != 0)
482 chans
[mapind
].indio_dev
= c
->indio_dev
;
483 chans
[mapind
].data
= c
->map
->consumer_data
;
484 chans
[mapind
].channel
=
485 iio_chan_spec_from_name(chans
[mapind
].indio_dev
,
486 c
->map
->adc_channel_label
);
487 if (!chans
[mapind
].channel
) {
489 goto error_free_chans
;
491 iio_device_get(chans
[mapind
].indio_dev
);
496 goto error_free_chans
;
502 for (i
= 0; i
< nummaps
; i
++)
503 iio_device_put(chans
[i
].indio_dev
);
506 EXPORT_SYMBOL_GPL(iio_channel_get_all
);
508 void iio_channel_release_all(struct iio_channel
*channels
)
510 struct iio_channel
*chan
= &channels
[0];
512 while (chan
->indio_dev
) {
513 iio_device_put(chan
->indio_dev
);
518 EXPORT_SYMBOL_GPL(iio_channel_release_all
);
520 static void devm_iio_channel_free_all(void *iio_channels
)
522 iio_channel_release_all(iio_channels
);
525 struct iio_channel
*devm_iio_channel_get_all(struct device
*dev
)
527 struct iio_channel
*channels
;
530 channels
= iio_channel_get_all(dev
);
531 if (IS_ERR(channels
))
534 ret
= devm_add_action_or_reset(dev
, devm_iio_channel_free_all
,
541 EXPORT_SYMBOL_GPL(devm_iio_channel_get_all
);
543 static int iio_channel_read(struct iio_channel
*chan
, int *val
, int *val2
,
544 enum iio_chan_info_enum info
)
546 const struct iio_info
*iio_info
= chan
->indio_dev
->info
;
548 int vals
[INDIO_MAX_RAW_ELEMENTS
];
555 if (!iio_channel_has_info(chan
->channel
, info
))
558 if (iio_info
->read_raw_multi
) {
559 ret
= iio_info
->read_raw_multi(chan
->indio_dev
,
561 INDIO_MAX_RAW_ELEMENTS
,
562 vals
, &val_len
, info
);
565 } else if (iio_info
->read_raw
) {
566 ret
= iio_info
->read_raw(chan
->indio_dev
,
567 chan
->channel
, val
, val2
, info
);
575 int iio_read_channel_raw(struct iio_channel
*chan
, int *val
)
577 struct iio_dev_opaque
*iio_dev_opaque
= to_iio_dev_opaque(chan
->indio_dev
);
579 guard(mutex
)(&iio_dev_opaque
->info_exist_lock
);
580 if (!chan
->indio_dev
->info
)
583 return iio_channel_read(chan
, val
, NULL
, IIO_CHAN_INFO_RAW
);
585 EXPORT_SYMBOL_GPL(iio_read_channel_raw
);
587 int iio_read_channel_average_raw(struct iio_channel
*chan
, int *val
)
589 struct iio_dev_opaque
*iio_dev_opaque
= to_iio_dev_opaque(chan
->indio_dev
);
591 guard(mutex
)(&iio_dev_opaque
->info_exist_lock
);
592 if (!chan
->indio_dev
->info
)
595 return iio_channel_read(chan
, val
, NULL
, IIO_CHAN_INFO_AVERAGE_RAW
);
597 EXPORT_SYMBOL_GPL(iio_read_channel_average_raw
);
599 static int iio_convert_raw_to_processed_unlocked(struct iio_channel
*chan
,
600 int raw
, int *processed
,
603 int scale_type
, scale_val
, scale_val2
;
604 int offset_type
, offset_val
, offset_val2
;
607 offset_type
= iio_channel_read(chan
, &offset_val
, &offset_val2
,
608 IIO_CHAN_INFO_OFFSET
);
609 if (offset_type
>= 0) {
610 switch (offset_type
) {
613 case IIO_VAL_INT_PLUS_MICRO
:
614 case IIO_VAL_INT_PLUS_NANO
:
616 * Both IIO_VAL_INT_PLUS_MICRO and IIO_VAL_INT_PLUS_NANO
617 * implicitely truncate the offset to it's integer form.
620 case IIO_VAL_FRACTIONAL
:
621 offset_val
/= offset_val2
;
623 case IIO_VAL_FRACTIONAL_LOG2
:
624 offset_val
>>= offset_val2
;
633 scale_type
= iio_channel_read(chan
, &scale_val
, &scale_val2
,
634 IIO_CHAN_INFO_SCALE
);
635 if (scale_type
< 0) {
637 * If no channel scaling is available apply consumer scale to
638 * raw value and return.
640 *processed
= raw
* scale
;
644 switch (scale_type
) {
646 *processed
= raw64
* scale_val
* scale
;
648 case IIO_VAL_INT_PLUS_MICRO
:
650 *processed
= -raw64
* scale_val
* scale
;
652 *processed
= raw64
* scale_val
* scale
;
653 *processed
+= div_s64(raw64
* (s64
)scale_val2
* scale
,
656 case IIO_VAL_INT_PLUS_NANO
:
658 *processed
= -raw64
* scale_val
* scale
;
660 *processed
= raw64
* scale_val
* scale
;
661 *processed
+= div_s64(raw64
* (s64
)scale_val2
* scale
,
664 case IIO_VAL_FRACTIONAL
:
665 *processed
= div_s64(raw64
* (s64
)scale_val
* scale
,
668 case IIO_VAL_FRACTIONAL_LOG2
:
669 *processed
= (raw64
* (s64
)scale_val
* scale
) >> scale_val2
;
678 int iio_convert_raw_to_processed(struct iio_channel
*chan
, int raw
,
679 int *processed
, unsigned int scale
)
681 struct iio_dev_opaque
*iio_dev_opaque
= to_iio_dev_opaque(chan
->indio_dev
);
683 guard(mutex
)(&iio_dev_opaque
->info_exist_lock
);
684 if (!chan
->indio_dev
->info
)
687 return iio_convert_raw_to_processed_unlocked(chan
, raw
, processed
,
690 EXPORT_SYMBOL_GPL(iio_convert_raw_to_processed
);
692 int iio_read_channel_attribute(struct iio_channel
*chan
, int *val
, int *val2
,
693 enum iio_chan_info_enum attribute
)
695 struct iio_dev_opaque
*iio_dev_opaque
= to_iio_dev_opaque(chan
->indio_dev
);
697 guard(mutex
)(&iio_dev_opaque
->info_exist_lock
);
698 if (!chan
->indio_dev
->info
)
701 return iio_channel_read(chan
, val
, val2
, attribute
);
703 EXPORT_SYMBOL_GPL(iio_read_channel_attribute
);
705 int iio_read_channel_offset(struct iio_channel
*chan
, int *val
, int *val2
)
707 return iio_read_channel_attribute(chan
, val
, val2
, IIO_CHAN_INFO_OFFSET
);
709 EXPORT_SYMBOL_GPL(iio_read_channel_offset
);
711 int iio_read_channel_processed_scale(struct iio_channel
*chan
, int *val
,
714 struct iio_dev_opaque
*iio_dev_opaque
= to_iio_dev_opaque(chan
->indio_dev
);
717 guard(mutex
)(&iio_dev_opaque
->info_exist_lock
);
718 if (!chan
->indio_dev
->info
)
721 if (iio_channel_has_info(chan
->channel
, IIO_CHAN_INFO_PROCESSED
)) {
722 ret
= iio_channel_read(chan
, val
, NULL
,
723 IIO_CHAN_INFO_PROCESSED
);
730 ret
= iio_channel_read(chan
, val
, NULL
, IIO_CHAN_INFO_RAW
);
734 return iio_convert_raw_to_processed_unlocked(chan
, *val
, val
,
738 EXPORT_SYMBOL_GPL(iio_read_channel_processed_scale
);
740 int iio_read_channel_processed(struct iio_channel
*chan
, int *val
)
742 /* This is just a special case with scale factor 1 */
743 return iio_read_channel_processed_scale(chan
, val
, 1);
745 EXPORT_SYMBOL_GPL(iio_read_channel_processed
);
747 int iio_read_channel_scale(struct iio_channel
*chan
, int *val
, int *val2
)
749 return iio_read_channel_attribute(chan
, val
, val2
, IIO_CHAN_INFO_SCALE
);
751 EXPORT_SYMBOL_GPL(iio_read_channel_scale
);
753 static int iio_channel_read_avail(struct iio_channel
*chan
,
754 const int **vals
, int *type
, int *length
,
755 enum iio_chan_info_enum info
)
757 const struct iio_info
*iio_info
= chan
->indio_dev
->info
;
759 if (!iio_channel_has_available(chan
->channel
, info
))
762 if (iio_info
->read_avail
)
763 return iio_info
->read_avail(chan
->indio_dev
, chan
->channel
,
764 vals
, type
, length
, info
);
768 int iio_read_avail_channel_attribute(struct iio_channel
*chan
,
769 const int **vals
, int *type
, int *length
,
770 enum iio_chan_info_enum attribute
)
772 struct iio_dev_opaque
*iio_dev_opaque
= to_iio_dev_opaque(chan
->indio_dev
);
774 guard(mutex
)(&iio_dev_opaque
->info_exist_lock
);
775 if (!chan
->indio_dev
->info
)
778 return iio_channel_read_avail(chan
, vals
, type
, length
, attribute
);
780 EXPORT_SYMBOL_GPL(iio_read_avail_channel_attribute
);
782 int iio_read_avail_channel_raw(struct iio_channel
*chan
,
783 const int **vals
, int *length
)
788 ret
= iio_read_avail_channel_attribute(chan
, vals
, &type
, length
,
791 if (ret
>= 0 && type
!= IIO_VAL_INT
)
792 /* raw values are assumed to be IIO_VAL_INT */
797 EXPORT_SYMBOL_GPL(iio_read_avail_channel_raw
);
799 static int iio_channel_read_max(struct iio_channel
*chan
,
800 int *val
, int *val2
, int *type
,
801 enum iio_chan_info_enum info
)
807 ret
= iio_channel_read_avail(chan
, &vals
, type
, &length
, info
);
812 case IIO_AVAIL_RANGE
:
829 *val
= max_array(vals
, length
);
832 /* TODO: learn about max for other iio values */
842 int iio_read_max_channel_raw(struct iio_channel
*chan
, int *val
)
844 struct iio_dev_opaque
*iio_dev_opaque
= to_iio_dev_opaque(chan
->indio_dev
);
847 guard(mutex
)(&iio_dev_opaque
->info_exist_lock
);
848 if (!chan
->indio_dev
->info
)
851 return iio_channel_read_max(chan
, val
, NULL
, &type
, IIO_CHAN_INFO_RAW
);
853 EXPORT_SYMBOL_GPL(iio_read_max_channel_raw
);
855 static int iio_channel_read_min(struct iio_channel
*chan
,
856 int *val
, int *val2
, int *type
,
857 enum iio_chan_info_enum info
)
863 ret
= iio_channel_read_avail(chan
, &vals
, type
, &length
, info
);
868 case IIO_AVAIL_RANGE
:
885 *val
= min_array(vals
, length
);
888 /* TODO: learn about min for other iio values */
898 int iio_read_min_channel_raw(struct iio_channel
*chan
, int *val
)
900 struct iio_dev_opaque
*iio_dev_opaque
= to_iio_dev_opaque(chan
->indio_dev
);
903 guard(mutex
)(&iio_dev_opaque
->info_exist_lock
);
904 if (!chan
->indio_dev
->info
)
907 return iio_channel_read_min(chan
, val
, NULL
, &type
, IIO_CHAN_INFO_RAW
);
909 EXPORT_SYMBOL_GPL(iio_read_min_channel_raw
);
911 int iio_get_channel_type(struct iio_channel
*chan
, enum iio_chan_type
*type
)
913 struct iio_dev_opaque
*iio_dev_opaque
= to_iio_dev_opaque(chan
->indio_dev
);
915 guard(mutex
)(&iio_dev_opaque
->info_exist_lock
);
916 if (!chan
->indio_dev
->info
)
919 *type
= chan
->channel
->type
;
923 EXPORT_SYMBOL_GPL(iio_get_channel_type
);
925 static int iio_channel_write(struct iio_channel
*chan
, int val
, int val2
,
926 enum iio_chan_info_enum info
)
928 const struct iio_info
*iio_info
= chan
->indio_dev
->info
;
930 if (iio_info
->write_raw
)
931 return iio_info
->write_raw(chan
->indio_dev
,
932 chan
->channel
, val
, val2
, info
);
936 int iio_write_channel_attribute(struct iio_channel
*chan
, int val
, int val2
,
937 enum iio_chan_info_enum attribute
)
939 struct iio_dev_opaque
*iio_dev_opaque
= to_iio_dev_opaque(chan
->indio_dev
);
941 guard(mutex
)(&iio_dev_opaque
->info_exist_lock
);
942 if (!chan
->indio_dev
->info
)
945 return iio_channel_write(chan
, val
, val2
, attribute
);
947 EXPORT_SYMBOL_GPL(iio_write_channel_attribute
);
949 int iio_write_channel_raw(struct iio_channel
*chan
, int val
)
951 return iio_write_channel_attribute(chan
, val
, 0, IIO_CHAN_INFO_RAW
);
953 EXPORT_SYMBOL_GPL(iio_write_channel_raw
);
955 unsigned int iio_get_channel_ext_info_count(struct iio_channel
*chan
)
957 const struct iio_chan_spec_ext_info
*ext_info
;
960 if (!chan
->channel
->ext_info
)
963 for (ext_info
= chan
->channel
->ext_info
; ext_info
->name
; ext_info
++)
968 EXPORT_SYMBOL_GPL(iio_get_channel_ext_info_count
);
970 static const struct iio_chan_spec_ext_info
*
971 iio_lookup_ext_info(const struct iio_channel
*chan
, const char *attr
)
973 const struct iio_chan_spec_ext_info
*ext_info
;
975 if (!chan
->channel
->ext_info
)
978 for (ext_info
= chan
->channel
->ext_info
; ext_info
->name
; ++ext_info
) {
979 if (!strcmp(attr
, ext_info
->name
))
986 ssize_t
iio_read_channel_ext_info(struct iio_channel
*chan
,
987 const char *attr
, char *buf
)
989 const struct iio_chan_spec_ext_info
*ext_info
;
991 ext_info
= iio_lookup_ext_info(chan
, attr
);
995 return ext_info
->read(chan
->indio_dev
, ext_info
->private,
998 EXPORT_SYMBOL_GPL(iio_read_channel_ext_info
);
1000 ssize_t
iio_write_channel_ext_info(struct iio_channel
*chan
, const char *attr
,
1001 const char *buf
, size_t len
)
1003 const struct iio_chan_spec_ext_info
*ext_info
;
1005 ext_info
= iio_lookup_ext_info(chan
, attr
);
1009 return ext_info
->write(chan
->indio_dev
, ext_info
->private,
1010 chan
->channel
, buf
, len
);
1012 EXPORT_SYMBOL_GPL(iio_write_channel_ext_info
);
1014 ssize_t
iio_read_channel_label(struct iio_channel
*chan
, char *buf
)
1016 return do_iio_read_channel_label(chan
->indio_dev
, chan
->channel
, buf
);
1018 EXPORT_SYMBOL_GPL(iio_read_channel_label
);