x86/mm/pat: Don't report PAT on CPUs that don't support it
[linux/fpc-iii.git] / drivers / iio / buffer / industrialio-buffer-cb.c
blob4847534700e734be8da302a79c1f7d61a4c5bd22
1 /* The industrial I/O callback buffer
3 * This program is free software; you can redistribute it and/or modify it
4 * under the terms of the GNU General Public License version 2 as published by
5 * the Free Software Foundation.
6 */
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <linux/err.h>
12 #include <linux/export.h>
13 #include <linux/iio/iio.h>
14 #include <linux/iio/buffer_impl.h>
15 #include <linux/iio/consumer.h>
17 struct iio_cb_buffer {
18 struct iio_buffer buffer;
19 int (*cb)(const void *data, void *private);
20 void *private;
21 struct iio_channel *channels;
22 struct iio_dev *indio_dev;
25 static struct iio_cb_buffer *buffer_to_cb_buffer(struct iio_buffer *buffer)
27 return container_of(buffer, struct iio_cb_buffer, buffer);
30 static int iio_buffer_cb_store_to(struct iio_buffer *buffer, const void *data)
32 struct iio_cb_buffer *cb_buff = buffer_to_cb_buffer(buffer);
33 return cb_buff->cb(data, cb_buff->private);
36 static void iio_buffer_cb_release(struct iio_buffer *buffer)
38 struct iio_cb_buffer *cb_buff = buffer_to_cb_buffer(buffer);
39 kfree(cb_buff->buffer.scan_mask);
40 kfree(cb_buff);
43 static const struct iio_buffer_access_funcs iio_cb_access = {
44 .store_to = &iio_buffer_cb_store_to,
45 .release = &iio_buffer_cb_release,
47 .modes = INDIO_BUFFER_SOFTWARE | INDIO_BUFFER_TRIGGERED,
50 struct iio_cb_buffer *iio_channel_get_all_cb(struct device *dev,
51 int (*cb)(const void *data,
52 void *private),
53 void *private)
55 int ret;
56 struct iio_cb_buffer *cb_buff;
57 struct iio_channel *chan;
59 cb_buff = kzalloc(sizeof(*cb_buff), GFP_KERNEL);
60 if (cb_buff == NULL)
61 return ERR_PTR(-ENOMEM);
63 iio_buffer_init(&cb_buff->buffer);
65 cb_buff->private = private;
66 cb_buff->cb = cb;
67 cb_buff->buffer.access = &iio_cb_access;
68 INIT_LIST_HEAD(&cb_buff->buffer.demux_list);
70 cb_buff->channels = iio_channel_get_all(dev);
71 if (IS_ERR(cb_buff->channels)) {
72 ret = PTR_ERR(cb_buff->channels);
73 goto error_free_cb_buff;
76 cb_buff->indio_dev = cb_buff->channels[0].indio_dev;
77 cb_buff->buffer.scan_mask
78 = kcalloc(BITS_TO_LONGS(cb_buff->indio_dev->masklength),
79 sizeof(long), GFP_KERNEL);
80 if (cb_buff->buffer.scan_mask == NULL) {
81 ret = -ENOMEM;
82 goto error_release_channels;
84 chan = &cb_buff->channels[0];
85 while (chan->indio_dev) {
86 if (chan->indio_dev != cb_buff->indio_dev) {
87 ret = -EINVAL;
88 goto error_free_scan_mask;
90 set_bit(chan->channel->scan_index,
91 cb_buff->buffer.scan_mask);
92 chan++;
95 return cb_buff;
97 error_free_scan_mask:
98 kfree(cb_buff->buffer.scan_mask);
99 error_release_channels:
100 iio_channel_release_all(cb_buff->channels);
101 error_free_cb_buff:
102 kfree(cb_buff);
103 return ERR_PTR(ret);
105 EXPORT_SYMBOL_GPL(iio_channel_get_all_cb);
107 int iio_channel_start_all_cb(struct iio_cb_buffer *cb_buff)
109 return iio_update_buffers(cb_buff->indio_dev, &cb_buff->buffer,
110 NULL);
112 EXPORT_SYMBOL_GPL(iio_channel_start_all_cb);
114 void iio_channel_stop_all_cb(struct iio_cb_buffer *cb_buff)
116 iio_update_buffers(cb_buff->indio_dev, NULL, &cb_buff->buffer);
118 EXPORT_SYMBOL_GPL(iio_channel_stop_all_cb);
120 void iio_channel_release_all_cb(struct iio_cb_buffer *cb_buff)
122 iio_channel_release_all(cb_buff->channels);
123 iio_buffer_put(&cb_buff->buffer);
125 EXPORT_SYMBOL_GPL(iio_channel_release_all_cb);
127 struct iio_channel
128 *iio_channel_cb_get_channels(const struct iio_cb_buffer *cb_buffer)
130 return cb_buffer->channels;
132 EXPORT_SYMBOL_GPL(iio_channel_cb_get_channels);
134 struct iio_dev
135 *iio_channel_cb_get_iio_dev(const struct iio_cb_buffer *cb_buffer)
137 return cb_buffer->indio_dev;
139 EXPORT_SYMBOL_GPL(iio_channel_cb_get_iio_dev);
141 MODULE_AUTHOR("Jonathan Cameron <jic23@kernel.org>");
142 MODULE_DESCRIPTION("Industrial I/O callback buffer");
143 MODULE_LICENSE("GPL");