x86/xen: resume timer irqs early
[linux/fpc-iii.git] / drivers / iio / buffer_cb.c
blob415f3c6efd7293087cc1d5fa9313e9de308e6245
1 #include <linux/kernel.h>
2 #include <linux/slab.h>
3 #include <linux/err.h>
4 #include <linux/export.h>
5 #include <linux/iio/buffer.h>
6 #include <linux/iio/consumer.h>
8 struct iio_cb_buffer {
9 struct iio_buffer buffer;
10 int (*cb)(u8 *data, void *private);
11 void *private;
12 struct iio_channel *channels;
15 static int iio_buffer_cb_store_to(struct iio_buffer *buffer, u8 *data)
17 struct iio_cb_buffer *cb_buff = container_of(buffer,
18 struct iio_cb_buffer,
19 buffer);
21 return cb_buff->cb(data, cb_buff->private);
24 static struct iio_buffer_access_funcs iio_cb_access = {
25 .store_to = &iio_buffer_cb_store_to,
28 struct iio_cb_buffer *iio_channel_get_all_cb(struct device *dev,
29 int (*cb)(u8 *data,
30 void *private),
31 void *private)
33 int ret;
34 struct iio_cb_buffer *cb_buff;
35 struct iio_dev *indio_dev;
36 struct iio_channel *chan;
38 cb_buff = kzalloc(sizeof(*cb_buff), GFP_KERNEL);
39 if (cb_buff == NULL) {
40 ret = -ENOMEM;
41 goto error_ret;
44 iio_buffer_init(&cb_buff->buffer);
46 cb_buff->private = private;
47 cb_buff->cb = cb;
48 cb_buff->buffer.access = &iio_cb_access;
49 INIT_LIST_HEAD(&cb_buff->buffer.demux_list);
51 cb_buff->channels = iio_channel_get_all(dev);
52 if (IS_ERR(cb_buff->channels)) {
53 ret = PTR_ERR(cb_buff->channels);
54 goto error_free_cb_buff;
57 indio_dev = cb_buff->channels[0].indio_dev;
58 cb_buff->buffer.scan_mask
59 = kcalloc(BITS_TO_LONGS(indio_dev->masklength), sizeof(long),
60 GFP_KERNEL);
61 if (cb_buff->buffer.scan_mask == NULL) {
62 ret = -ENOMEM;
63 goto error_release_channels;
65 chan = &cb_buff->channels[0];
66 while (chan->indio_dev) {
67 if (chan->indio_dev != indio_dev) {
68 ret = -EINVAL;
69 goto error_free_scan_mask;
71 set_bit(chan->channel->scan_index,
72 cb_buff->buffer.scan_mask);
73 chan++;
76 return cb_buff;
78 error_free_scan_mask:
79 kfree(cb_buff->buffer.scan_mask);
80 error_release_channels:
81 iio_channel_release_all(cb_buff->channels);
82 error_free_cb_buff:
83 kfree(cb_buff);
84 error_ret:
85 return ERR_PTR(ret);
87 EXPORT_SYMBOL_GPL(iio_channel_get_all_cb);
89 int iio_channel_start_all_cb(struct iio_cb_buffer *cb_buff)
91 return iio_update_buffers(cb_buff->channels[0].indio_dev,
92 &cb_buff->buffer,
93 NULL);
95 EXPORT_SYMBOL_GPL(iio_channel_start_all_cb);
97 void iio_channel_stop_all_cb(struct iio_cb_buffer *cb_buff)
99 iio_update_buffers(cb_buff->channels[0].indio_dev,
100 NULL,
101 &cb_buff->buffer);
103 EXPORT_SYMBOL_GPL(iio_channel_stop_all_cb);
105 void iio_channel_release_all_cb(struct iio_cb_buffer *cb_buff)
107 kfree(cb_buff->buffer.scan_mask);
108 iio_channel_release_all(cb_buff->channels);
109 kfree(cb_buff);
111 EXPORT_SYMBOL_GPL(iio_channel_release_all_cb);
113 struct iio_channel
114 *iio_channel_cb_get_channels(const struct iio_cb_buffer *cb_buffer)
116 return cb_buffer->channels;
118 EXPORT_SYMBOL_GPL(iio_channel_cb_get_channels);