1 /* The industrial I/O core in kernel channel mapping
3 * Copyright (c) 2011 Jonathan Cameron
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
10 #include <linux/export.h>
11 #include <linux/slab.h>
12 #include <linux/mutex.h>
15 #include <linux/iio/iio.h>
17 #include <linux/iio/machine.h>
18 #include <linux/iio/driver.h>
19 #include <linux/iio/consumer.h>
21 struct iio_map_internal
{
22 struct iio_dev
*indio_dev
;
27 static LIST_HEAD(iio_map_list
);
28 static DEFINE_MUTEX(iio_map_list_lock
);
30 int iio_map_array_register(struct iio_dev
*indio_dev
, struct iio_map
*maps
)
33 struct iio_map_internal
*mapi
;
38 mutex_lock(&iio_map_list_lock
);
39 while (maps
[i
].consumer_dev_name
!= NULL
) {
40 mapi
= kzalloc(sizeof(*mapi
), GFP_KERNEL
);
46 mapi
->indio_dev
= indio_dev
;
47 list_add(&mapi
->l
, &iio_map_list
);
51 mutex_unlock(&iio_map_list_lock
);
55 EXPORT_SYMBOL_GPL(iio_map_array_register
);
59 * Remove all map entries associated with the given iio device
61 int iio_map_array_unregister(struct iio_dev
*indio_dev
)
64 struct iio_map_internal
*mapi
;
65 struct list_head
*pos
, *tmp
;
67 mutex_lock(&iio_map_list_lock
);
68 list_for_each_safe(pos
, tmp
, &iio_map_list
) {
69 mapi
= list_entry(pos
, struct iio_map_internal
, l
);
70 if (indio_dev
== mapi
->indio_dev
) {
76 mutex_unlock(&iio_map_list_lock
);
79 EXPORT_SYMBOL_GPL(iio_map_array_unregister
);
81 static const struct iio_chan_spec
82 *iio_chan_spec_from_name(const struct iio_dev
*indio_dev
, const char *name
)
85 const struct iio_chan_spec
*chan
= NULL
;
87 for (i
= 0; i
< indio_dev
->num_channels
; i
++)
88 if (indio_dev
->channels
[i
].datasheet_name
&&
89 strcmp(name
, indio_dev
->channels
[i
].datasheet_name
) == 0) {
90 chan
= &indio_dev
->channels
[i
];
98 static int iio_dev_node_match(struct device
*dev
, void *data
)
100 return dev
->of_node
== data
&& dev
->type
== &iio_device_type
;
103 static int __of_iio_channel_get(struct iio_channel
*channel
,
104 struct device_node
*np
, int index
)
107 struct iio_dev
*indio_dev
;
109 struct of_phandle_args iiospec
;
111 err
= of_parse_phandle_with_args(np
, "io-channels",
117 idev
= bus_find_device(&iio_bus_type
, NULL
, iiospec
.np
,
119 of_node_put(iiospec
.np
);
121 return -EPROBE_DEFER
;
123 indio_dev
= dev_to_iio_dev(idev
);
124 channel
->indio_dev
= indio_dev
;
125 index
= iiospec
.args_count
? iiospec
.args
[0] : 0;
126 if (index
>= indio_dev
->num_channels
) {
130 channel
->channel
= &indio_dev
->channels
[index
];
135 iio_device_put(indio_dev
);
139 static struct iio_channel
*of_iio_channel_get(struct device_node
*np
, int index
)
141 struct iio_channel
*channel
;
145 return ERR_PTR(-EINVAL
);
147 channel
= kzalloc(sizeof(*channel
), GFP_KERNEL
);
149 return ERR_PTR(-ENOMEM
);
151 err
= __of_iio_channel_get(channel
, np
, index
);
153 goto err_free_channel
;
162 static struct iio_channel
*of_iio_channel_get_by_name(struct device_node
*np
,
165 struct iio_channel
*chan
= NULL
;
167 /* Walk up the tree of devices looking for a matching iio channel */
172 * For named iio channels, first look up the name in the
173 * "io-channel-names" property. If it cannot be found, the
174 * index will be an error code, and of_iio_channel_get()
178 index
= of_property_match_string(np
, "io-channel-names",
180 chan
= of_iio_channel_get(np
, index
);
183 else if (name
&& index
>= 0) {
184 pr_err("ERROR: could not get IIO channel %s:%s(%i)\n",
185 np
->full_name
, name
? name
: "", index
);
190 * No matching IIO channel found on this node.
191 * If the parent node has a "io-channel-ranges" property,
192 * then we can try one of its channels.
195 if (np
&& !of_get_property(np
, "io-channel-ranges", NULL
))
201 static struct iio_channel
*of_iio_channel_get_all(struct device
*dev
)
203 struct iio_channel
*chans
;
204 int i
, mapind
, nummaps
= 0;
208 ret
= of_parse_phandle_with_args(dev
->of_node
,
216 if (nummaps
== 0) /* no error, return NULL to search map table */
219 /* NULL terminated array to save passing size */
220 chans
= kcalloc(nummaps
+ 1, sizeof(*chans
), GFP_KERNEL
);
222 return ERR_PTR(-ENOMEM
);
224 /* Search for OF matches */
225 for (mapind
= 0; mapind
< nummaps
; mapind
++) {
226 ret
= __of_iio_channel_get(&chans
[mapind
], dev
->of_node
,
229 goto error_free_chans
;
234 for (i
= 0; i
< mapind
; i
++)
235 iio_device_put(chans
[i
].indio_dev
);
240 #else /* CONFIG_OF */
242 static inline struct iio_channel
*
243 of_iio_channel_get_by_name(struct device_node
*np
, const char *name
)
248 static inline struct iio_channel
*of_iio_channel_get_all(struct device
*dev
)
253 #endif /* CONFIG_OF */
255 static struct iio_channel
*iio_channel_get_sys(const char *name
,
256 const char *channel_name
)
258 struct iio_map_internal
*c_i
= NULL
, *c
= NULL
;
259 struct iio_channel
*channel
;
262 if (name
== NULL
&& channel_name
== NULL
)
263 return ERR_PTR(-ENODEV
);
265 /* first find matching entry the channel map */
266 mutex_lock(&iio_map_list_lock
);
267 list_for_each_entry(c_i
, &iio_map_list
, l
) {
268 if ((name
&& strcmp(name
, c_i
->map
->consumer_dev_name
) != 0) ||
270 strcmp(channel_name
, c_i
->map
->consumer_channel
) != 0))
273 iio_device_get(c
->indio_dev
);
276 mutex_unlock(&iio_map_list_lock
);
278 return ERR_PTR(-ENODEV
);
280 channel
= kzalloc(sizeof(*channel
), GFP_KERNEL
);
281 if (channel
== NULL
) {
286 channel
->indio_dev
= c
->indio_dev
;
288 if (c
->map
->adc_channel_label
) {
290 iio_chan_spec_from_name(channel
->indio_dev
,
291 c
->map
->adc_channel_label
);
293 if (channel
->channel
== NULL
) {
304 iio_device_put(c
->indio_dev
);
308 struct iio_channel
*iio_channel_get(struct device
*dev
,
309 const char *channel_name
)
311 const char *name
= dev
? dev_name(dev
) : NULL
;
312 struct iio_channel
*channel
;
315 channel
= of_iio_channel_get_by_name(dev
->of_node
,
320 return iio_channel_get_sys(name
, channel_name
);
322 EXPORT_SYMBOL_GPL(iio_channel_get
);
324 void iio_channel_release(struct iio_channel
*channel
)
326 iio_device_put(channel
->indio_dev
);
329 EXPORT_SYMBOL_GPL(iio_channel_release
);
331 struct iio_channel
*iio_channel_get_all(struct device
*dev
)
334 struct iio_channel
*chans
;
335 struct iio_map_internal
*c
= NULL
;
341 return ERR_PTR(-EINVAL
);
343 chans
= of_iio_channel_get_all(dev
);
347 name
= dev_name(dev
);
349 mutex_lock(&iio_map_list_lock
);
350 /* first count the matching maps */
351 list_for_each_entry(c
, &iio_map_list
, l
)
352 if (name
&& strcmp(name
, c
->map
->consumer_dev_name
) != 0)
362 /* NULL terminated array to save passing size */
363 chans
= kzalloc(sizeof(*chans
)*(nummaps
+ 1), GFP_KERNEL
);
369 /* for each map fill in the chans element */
370 list_for_each_entry(c
, &iio_map_list
, l
) {
371 if (name
&& strcmp(name
, c
->map
->consumer_dev_name
) != 0)
373 chans
[mapind
].indio_dev
= c
->indio_dev
;
374 chans
[mapind
].data
= c
->map
->consumer_data
;
375 chans
[mapind
].channel
=
376 iio_chan_spec_from_name(chans
[mapind
].indio_dev
,
377 c
->map
->adc_channel_label
);
378 if (chans
[mapind
].channel
== NULL
) {
380 goto error_free_chans
;
382 iio_device_get(chans
[mapind
].indio_dev
);
387 goto error_free_chans
;
389 mutex_unlock(&iio_map_list_lock
);
394 for (i
= 0; i
< nummaps
; i
++)
395 iio_device_put(chans
[i
].indio_dev
);
398 mutex_unlock(&iio_map_list_lock
);
402 EXPORT_SYMBOL_GPL(iio_channel_get_all
);
404 void iio_channel_release_all(struct iio_channel
*channels
)
406 struct iio_channel
*chan
= &channels
[0];
408 while (chan
->indio_dev
) {
409 iio_device_put(chan
->indio_dev
);
414 EXPORT_SYMBOL_GPL(iio_channel_release_all
);
416 static int iio_channel_read(struct iio_channel
*chan
, int *val
, int *val2
,
417 enum iio_chan_info_enum info
)
424 return chan
->indio_dev
->info
->read_raw(chan
->indio_dev
, chan
->channel
,
428 int iio_read_channel_raw(struct iio_channel
*chan
, int *val
)
432 mutex_lock(&chan
->indio_dev
->info_exist_lock
);
433 if (chan
->indio_dev
->info
== NULL
) {
438 ret
= iio_channel_read(chan
, val
, NULL
, IIO_CHAN_INFO_RAW
);
440 mutex_unlock(&chan
->indio_dev
->info_exist_lock
);
444 EXPORT_SYMBOL_GPL(iio_read_channel_raw
);
446 static int iio_convert_raw_to_processed_unlocked(struct iio_channel
*chan
,
447 int raw
, int *processed
, unsigned int scale
)
449 int scale_type
, scale_val
, scale_val2
, offset
;
453 ret
= iio_channel_read(chan
, &offset
, NULL
, IIO_CHAN_INFO_OFFSET
);
457 scale_type
= iio_channel_read(chan
, &scale_val
, &scale_val2
,
458 IIO_CHAN_INFO_SCALE
);
462 switch (scale_type
) {
464 *processed
= raw64
* scale_val
;
466 case IIO_VAL_INT_PLUS_MICRO
:
468 *processed
= -raw64
* scale_val
;
470 *processed
= raw64
* scale_val
;
471 *processed
+= div_s64(raw64
* (s64
)scale_val2
* scale
,
474 case IIO_VAL_INT_PLUS_NANO
:
476 *processed
= -raw64
* scale_val
;
478 *processed
= raw64
* scale_val
;
479 *processed
+= div_s64(raw64
* (s64
)scale_val2
* scale
,
482 case IIO_VAL_FRACTIONAL
:
483 *processed
= div_s64(raw64
* (s64
)scale_val
* scale
,
486 case IIO_VAL_FRACTIONAL_LOG2
:
487 *processed
= (raw64
* (s64
)scale_val
* scale
) >> scale_val2
;
496 int iio_convert_raw_to_processed(struct iio_channel
*chan
, int raw
,
497 int *processed
, unsigned int scale
)
501 mutex_lock(&chan
->indio_dev
->info_exist_lock
);
502 if (chan
->indio_dev
->info
== NULL
) {
507 ret
= iio_convert_raw_to_processed_unlocked(chan
, raw
, processed
,
510 mutex_unlock(&chan
->indio_dev
->info_exist_lock
);
514 EXPORT_SYMBOL_GPL(iio_convert_raw_to_processed
);
516 int iio_read_channel_processed(struct iio_channel
*chan
, int *val
)
520 mutex_lock(&chan
->indio_dev
->info_exist_lock
);
521 if (chan
->indio_dev
->info
== NULL
) {
526 if (iio_channel_has_info(chan
->channel
, IIO_CHAN_INFO_PROCESSED
)) {
527 ret
= iio_channel_read(chan
, val
, NULL
,
528 IIO_CHAN_INFO_PROCESSED
);
530 ret
= iio_channel_read(chan
, val
, NULL
, IIO_CHAN_INFO_RAW
);
533 ret
= iio_convert_raw_to_processed_unlocked(chan
, *val
, val
, 1);
537 mutex_unlock(&chan
->indio_dev
->info_exist_lock
);
541 EXPORT_SYMBOL_GPL(iio_read_channel_processed
);
543 int iio_read_channel_scale(struct iio_channel
*chan
, int *val
, int *val2
)
547 mutex_lock(&chan
->indio_dev
->info_exist_lock
);
548 if (chan
->indio_dev
->info
== NULL
) {
553 ret
= iio_channel_read(chan
, val
, val2
, IIO_CHAN_INFO_SCALE
);
555 mutex_unlock(&chan
->indio_dev
->info_exist_lock
);
559 EXPORT_SYMBOL_GPL(iio_read_channel_scale
);
561 int iio_get_channel_type(struct iio_channel
*chan
, enum iio_chan_type
*type
)
564 /* Need to verify underlying driver has not gone away */
566 mutex_lock(&chan
->indio_dev
->info_exist_lock
);
567 if (chan
->indio_dev
->info
== NULL
) {
572 *type
= chan
->channel
->type
;
574 mutex_unlock(&chan
->indio_dev
->info_exist_lock
);
578 EXPORT_SYMBOL_GPL(iio_get_channel_type
);