sh_eth: fix EESIPR values for SH77{34|63}
[linux/fpc-iii.git] / drivers / iio / buffer / kfifo_buf.c
blobc5b999f0c51943f4ba79c066702cad96536310d4
1 #include <linux/slab.h>
2 #include <linux/kernel.h>
3 #include <linux/module.h>
4 #include <linux/device.h>
5 #include <linux/workqueue.h>
6 #include <linux/kfifo.h>
7 #include <linux/mutex.h>
8 #include <linux/iio/kfifo_buf.h>
9 #include <linux/sched.h>
10 #include <linux/poll.h>
12 struct iio_kfifo {
13 struct iio_buffer buffer;
14 struct kfifo kf;
15 struct mutex user_lock;
16 int update_needed;
19 #define iio_to_kfifo(r) container_of(r, struct iio_kfifo, buffer)
21 static inline int __iio_allocate_kfifo(struct iio_kfifo *buf,
22 int bytes_per_datum, int length)
24 if ((length == 0) || (bytes_per_datum == 0))
25 return -EINVAL;
27 return __kfifo_alloc((struct __kfifo *)&buf->kf, length,
28 bytes_per_datum, GFP_KERNEL);
31 static int iio_request_update_kfifo(struct iio_buffer *r)
33 int ret = 0;
34 struct iio_kfifo *buf = iio_to_kfifo(r);
36 mutex_lock(&buf->user_lock);
37 if (buf->update_needed) {
38 kfifo_free(&buf->kf);
39 ret = __iio_allocate_kfifo(buf, buf->buffer.bytes_per_datum,
40 buf->buffer.length);
41 if (ret >= 0)
42 buf->update_needed = false;
43 } else {
44 kfifo_reset_out(&buf->kf);
46 mutex_unlock(&buf->user_lock);
48 return ret;
51 static int iio_mark_update_needed_kfifo(struct iio_buffer *r)
53 struct iio_kfifo *kf = iio_to_kfifo(r);
54 kf->update_needed = true;
55 return 0;
58 static int iio_set_bytes_per_datum_kfifo(struct iio_buffer *r, size_t bpd)
60 if (r->bytes_per_datum != bpd) {
61 r->bytes_per_datum = bpd;
62 iio_mark_update_needed_kfifo(r);
64 return 0;
67 static int iio_set_length_kfifo(struct iio_buffer *r, int length)
69 /* Avoid an invalid state */
70 if (length < 2)
71 length = 2;
72 if (r->length != length) {
73 r->length = length;
74 iio_mark_update_needed_kfifo(r);
76 return 0;
79 static int iio_store_to_kfifo(struct iio_buffer *r,
80 const void *data)
82 int ret;
83 struct iio_kfifo *kf = iio_to_kfifo(r);
84 ret = kfifo_in(&kf->kf, data, 1);
85 if (ret != 1)
86 return -EBUSY;
87 return 0;
90 static int iio_read_first_n_kfifo(struct iio_buffer *r,
91 size_t n, char __user *buf)
93 int ret, copied;
94 struct iio_kfifo *kf = iio_to_kfifo(r);
96 if (mutex_lock_interruptible(&kf->user_lock))
97 return -ERESTARTSYS;
99 if (!kfifo_initialized(&kf->kf) || n < kfifo_esize(&kf->kf))
100 ret = -EINVAL;
101 else
102 ret = kfifo_to_user(&kf->kf, buf, n, &copied);
103 mutex_unlock(&kf->user_lock);
104 if (ret < 0)
105 return ret;
107 return copied;
110 static size_t iio_kfifo_buf_data_available(struct iio_buffer *r)
112 struct iio_kfifo *kf = iio_to_kfifo(r);
113 size_t samples;
115 mutex_lock(&kf->user_lock);
116 samples = kfifo_len(&kf->kf);
117 mutex_unlock(&kf->user_lock);
119 return samples;
122 static void iio_kfifo_buffer_release(struct iio_buffer *buffer)
124 struct iio_kfifo *kf = iio_to_kfifo(buffer);
126 mutex_destroy(&kf->user_lock);
127 kfifo_free(&kf->kf);
128 kfree(kf);
131 static const struct iio_buffer_access_funcs kfifo_access_funcs = {
132 .store_to = &iio_store_to_kfifo,
133 .read_first_n = &iio_read_first_n_kfifo,
134 .data_available = iio_kfifo_buf_data_available,
135 .request_update = &iio_request_update_kfifo,
136 .set_bytes_per_datum = &iio_set_bytes_per_datum_kfifo,
137 .set_length = &iio_set_length_kfifo,
138 .release = &iio_kfifo_buffer_release,
140 .modes = INDIO_BUFFER_SOFTWARE | INDIO_BUFFER_TRIGGERED,
143 struct iio_buffer *iio_kfifo_allocate(void)
145 struct iio_kfifo *kf;
147 kf = kzalloc(sizeof(*kf), GFP_KERNEL);
148 if (!kf)
149 return NULL;
151 kf->update_needed = true;
152 iio_buffer_init(&kf->buffer);
153 kf->buffer.access = &kfifo_access_funcs;
154 kf->buffer.length = 2;
155 mutex_init(&kf->user_lock);
157 return &kf->buffer;
159 EXPORT_SYMBOL(iio_kfifo_allocate);
161 void iio_kfifo_free(struct iio_buffer *r)
163 iio_buffer_put(r);
165 EXPORT_SYMBOL(iio_kfifo_free);
167 static void devm_iio_kfifo_release(struct device *dev, void *res)
169 iio_kfifo_free(*(struct iio_buffer **)res);
172 static int devm_iio_kfifo_match(struct device *dev, void *res, void *data)
174 struct iio_buffer **r = res;
176 if (WARN_ON(!r || !*r))
177 return 0;
179 return *r == data;
183 * devm_iio_fifo_allocate - Resource-managed iio_kfifo_allocate()
184 * @dev: Device to allocate kfifo buffer for
186 * RETURNS:
187 * Pointer to allocated iio_buffer on success, NULL on failure.
189 struct iio_buffer *devm_iio_kfifo_allocate(struct device *dev)
191 struct iio_buffer **ptr, *r;
193 ptr = devres_alloc(devm_iio_kfifo_release, sizeof(*ptr), GFP_KERNEL);
194 if (!ptr)
195 return NULL;
197 r = iio_kfifo_allocate();
198 if (r) {
199 *ptr = r;
200 devres_add(dev, ptr);
201 } else {
202 devres_free(ptr);
205 return r;
207 EXPORT_SYMBOL(devm_iio_kfifo_allocate);
210 * devm_iio_fifo_free - Resource-managed iio_kfifo_free()
211 * @dev: Device the buffer belongs to
212 * @r: The buffer associated with the device
214 void devm_iio_kfifo_free(struct device *dev, struct iio_buffer *r)
216 WARN_ON(devres_release(dev, devm_iio_kfifo_release,
217 devm_iio_kfifo_match, r));
219 EXPORT_SYMBOL(devm_iio_kfifo_free);
221 MODULE_LICENSE("GPL");