staging: octeon: prevent poll during rx init
[linux/fpc-iii.git] / drivers / iio / industrialio-trigger.c
blob7ad82fdd3e5bfb0e39565f93fb2491962f97e01b
1 /* The industrial I/O core, trigger handling functions
3 * Copyright (c) 2008 Jonathan Cameron
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 */
10 #include <linux/kernel.h>
11 #include <linux/idr.h>
12 #include <linux/err.h>
13 #include <linux/device.h>
14 #include <linux/interrupt.h>
15 #include <linux/list.h>
16 #include <linux/slab.h>
18 #include <linux/iio/iio.h>
19 #include <linux/iio/trigger.h>
20 #include "iio_core.h"
21 #include "iio_core_trigger.h"
22 #include <linux/iio/trigger_consumer.h>
24 /* RFC - Question of approach
25 * Make the common case (single sensor single trigger)
26 * simple by starting trigger capture from when first sensors
27 * is added.
29 * Complex simultaneous start requires use of 'hold' functionality
30 * of the trigger. (not implemented)
32 * Any other suggestions?
35 static DEFINE_IDA(iio_trigger_ida);
37 /* Single list of all available triggers */
38 static LIST_HEAD(iio_trigger_list);
39 static DEFINE_MUTEX(iio_trigger_list_lock);
41 /**
42 * iio_trigger_read_name() - retrieve useful identifying name
43 * @dev: device associated with the iio_trigger
44 * @attr: pointer to the device_attribute structure that is
45 * being processed
46 * @buf: buffer to print the name into
48 * Return: a negative number on failure or the number of written
49 * characters on success.
51 static ssize_t iio_trigger_read_name(struct device *dev,
52 struct device_attribute *attr,
53 char *buf)
55 struct iio_trigger *trig = to_iio_trigger(dev);
56 return sprintf(buf, "%s\n", trig->name);
59 static DEVICE_ATTR(name, S_IRUGO, iio_trigger_read_name, NULL);
61 static struct attribute *iio_trig_dev_attrs[] = {
62 &dev_attr_name.attr,
63 NULL,
65 ATTRIBUTE_GROUPS(iio_trig_dev);
67 static struct iio_trigger *__iio_trigger_find_by_name(const char *name);
69 int iio_trigger_register(struct iio_trigger *trig_info)
71 int ret;
73 /* trig_info->ops is required for the module member */
74 if (!trig_info->ops)
75 return -EINVAL;
77 trig_info->id = ida_simple_get(&iio_trigger_ida, 0, 0, GFP_KERNEL);
78 if (trig_info->id < 0)
79 return trig_info->id;
81 /* Set the name used for the sysfs directory etc */
82 dev_set_name(&trig_info->dev, "trigger%ld",
83 (unsigned long) trig_info->id);
85 ret = device_add(&trig_info->dev);
86 if (ret)
87 goto error_unregister_id;
89 /* Add to list of available triggers held by the IIO core */
90 mutex_lock(&iio_trigger_list_lock);
91 if (__iio_trigger_find_by_name(trig_info->name)) {
92 pr_err("Duplicate trigger name '%s'\n", trig_info->name);
93 ret = -EEXIST;
94 goto error_device_del;
96 list_add_tail(&trig_info->list, &iio_trigger_list);
97 mutex_unlock(&iio_trigger_list_lock);
99 return 0;
101 error_device_del:
102 mutex_unlock(&iio_trigger_list_lock);
103 device_del(&trig_info->dev);
104 error_unregister_id:
105 ida_simple_remove(&iio_trigger_ida, trig_info->id);
106 return ret;
108 EXPORT_SYMBOL(iio_trigger_register);
110 void iio_trigger_unregister(struct iio_trigger *trig_info)
112 mutex_lock(&iio_trigger_list_lock);
113 list_del(&trig_info->list);
114 mutex_unlock(&iio_trigger_list_lock);
116 ida_simple_remove(&iio_trigger_ida, trig_info->id);
117 /* Possible issue in here */
118 device_del(&trig_info->dev);
120 EXPORT_SYMBOL(iio_trigger_unregister);
122 /* Search for trigger by name, assuming iio_trigger_list_lock held */
123 static struct iio_trigger *__iio_trigger_find_by_name(const char *name)
125 struct iio_trigger *iter;
127 list_for_each_entry(iter, &iio_trigger_list, list)
128 if (!strcmp(iter->name, name))
129 return iter;
131 return NULL;
134 static struct iio_trigger *iio_trigger_find_by_name(const char *name,
135 size_t len)
137 struct iio_trigger *trig = NULL, *iter;
139 mutex_lock(&iio_trigger_list_lock);
140 list_for_each_entry(iter, &iio_trigger_list, list)
141 if (sysfs_streq(iter->name, name)) {
142 trig = iter;
143 break;
145 mutex_unlock(&iio_trigger_list_lock);
147 return trig;
150 void iio_trigger_poll(struct iio_trigger *trig)
152 int i;
154 if (!atomic_read(&trig->use_count)) {
155 atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
157 for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
158 if (trig->subirqs[i].enabled)
159 generic_handle_irq(trig->subirq_base + i);
160 else
161 iio_trigger_notify_done(trig);
165 EXPORT_SYMBOL(iio_trigger_poll);
167 irqreturn_t iio_trigger_generic_data_rdy_poll(int irq, void *private)
169 iio_trigger_poll(private);
170 return IRQ_HANDLED;
172 EXPORT_SYMBOL(iio_trigger_generic_data_rdy_poll);
174 void iio_trigger_poll_chained(struct iio_trigger *trig)
176 int i;
178 if (!atomic_read(&trig->use_count)) {
179 atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
181 for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
182 if (trig->subirqs[i].enabled)
183 handle_nested_irq(trig->subirq_base + i);
184 else
185 iio_trigger_notify_done(trig);
189 EXPORT_SYMBOL(iio_trigger_poll_chained);
191 void iio_trigger_notify_done(struct iio_trigger *trig)
193 if (atomic_dec_and_test(&trig->use_count) && trig->ops->try_reenable)
194 if (trig->ops->try_reenable(trig))
195 /* Missed an interrupt so launch new poll now */
196 iio_trigger_poll(trig);
198 EXPORT_SYMBOL(iio_trigger_notify_done);
200 /* Trigger Consumer related functions */
201 static int iio_trigger_get_irq(struct iio_trigger *trig)
203 int ret;
204 mutex_lock(&trig->pool_lock);
205 ret = bitmap_find_free_region(trig->pool,
206 CONFIG_IIO_CONSUMERS_PER_TRIGGER,
207 ilog2(1));
208 mutex_unlock(&trig->pool_lock);
209 if (ret >= 0)
210 ret += trig->subirq_base;
212 return ret;
215 static void iio_trigger_put_irq(struct iio_trigger *trig, int irq)
217 mutex_lock(&trig->pool_lock);
218 clear_bit(irq - trig->subirq_base, trig->pool);
219 mutex_unlock(&trig->pool_lock);
222 /* Complexity in here. With certain triggers (datardy) an acknowledgement
223 * may be needed if the pollfuncs do not include the data read for the
224 * triggering device.
225 * This is not currently handled. Alternative of not enabling trigger unless
226 * the relevant function is in there may be the best option.
228 /* Worth protecting against double additions? */
229 static int iio_trigger_attach_poll_func(struct iio_trigger *trig,
230 struct iio_poll_func *pf)
232 int ret = 0;
233 bool notinuse
234 = bitmap_empty(trig->pool, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
236 /* Prevent the module from being removed whilst attached to a trigger */
237 __module_get(pf->indio_dev->info->driver_module);
239 /* Get irq number */
240 pf->irq = iio_trigger_get_irq(trig);
241 if (pf->irq < 0)
242 goto out_put_module;
244 /* Request irq */
245 ret = request_threaded_irq(pf->irq, pf->h, pf->thread,
246 pf->type, pf->name,
247 pf);
248 if (ret < 0)
249 goto out_put_irq;
251 /* Enable trigger in driver */
252 if (trig->ops->set_trigger_state && notinuse) {
253 ret = trig->ops->set_trigger_state(trig, true);
254 if (ret < 0)
255 goto out_free_irq;
258 return ret;
260 out_free_irq:
261 free_irq(pf->irq, pf);
262 out_put_irq:
263 iio_trigger_put_irq(trig, pf->irq);
264 out_put_module:
265 module_put(pf->indio_dev->info->driver_module);
266 return ret;
269 static int iio_trigger_detach_poll_func(struct iio_trigger *trig,
270 struct iio_poll_func *pf)
272 int ret = 0;
273 bool no_other_users
274 = (bitmap_weight(trig->pool,
275 CONFIG_IIO_CONSUMERS_PER_TRIGGER)
276 == 1);
277 if (trig->ops->set_trigger_state && no_other_users) {
278 ret = trig->ops->set_trigger_state(trig, false);
279 if (ret)
280 return ret;
282 iio_trigger_put_irq(trig, pf->irq);
283 free_irq(pf->irq, pf);
284 module_put(pf->indio_dev->info->driver_module);
286 return ret;
289 irqreturn_t iio_pollfunc_store_time(int irq, void *p)
291 struct iio_poll_func *pf = p;
292 pf->timestamp = iio_get_time_ns(pf->indio_dev);
293 return IRQ_WAKE_THREAD;
295 EXPORT_SYMBOL(iio_pollfunc_store_time);
297 struct iio_poll_func
298 *iio_alloc_pollfunc(irqreturn_t (*h)(int irq, void *p),
299 irqreturn_t (*thread)(int irq, void *p),
300 int type,
301 struct iio_dev *indio_dev,
302 const char *fmt,
303 ...)
305 va_list vargs;
306 struct iio_poll_func *pf;
308 pf = kmalloc(sizeof *pf, GFP_KERNEL);
309 if (pf == NULL)
310 return NULL;
311 va_start(vargs, fmt);
312 pf->name = kvasprintf(GFP_KERNEL, fmt, vargs);
313 va_end(vargs);
314 if (pf->name == NULL) {
315 kfree(pf);
316 return NULL;
318 pf->h = h;
319 pf->thread = thread;
320 pf->type = type;
321 pf->indio_dev = indio_dev;
323 return pf;
325 EXPORT_SYMBOL_GPL(iio_alloc_pollfunc);
327 void iio_dealloc_pollfunc(struct iio_poll_func *pf)
329 kfree(pf->name);
330 kfree(pf);
332 EXPORT_SYMBOL_GPL(iio_dealloc_pollfunc);
335 * iio_trigger_read_current() - trigger consumer sysfs query current trigger
336 * @dev: device associated with an industrial I/O device
337 * @attr: pointer to the device_attribute structure that
338 * is being processed
339 * @buf: buffer where the current trigger name will be printed into
341 * For trigger consumers the current_trigger interface allows the trigger
342 * used by the device to be queried.
344 * Return: a negative number on failure, the number of characters written
345 * on success or 0 if no trigger is available
347 static ssize_t iio_trigger_read_current(struct device *dev,
348 struct device_attribute *attr,
349 char *buf)
351 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
353 if (indio_dev->trig)
354 return sprintf(buf, "%s\n", indio_dev->trig->name);
355 return 0;
359 * iio_trigger_write_current() - trigger consumer sysfs set current trigger
360 * @dev: device associated with an industrial I/O device
361 * @attr: device attribute that is being processed
362 * @buf: string buffer that holds the name of the trigger
363 * @len: length of the trigger name held by buf
365 * For trigger consumers the current_trigger interface allows the trigger
366 * used for this device to be specified at run time based on the trigger's
367 * name.
369 * Return: negative error code on failure or length of the buffer
370 * on success
372 static ssize_t iio_trigger_write_current(struct device *dev,
373 struct device_attribute *attr,
374 const char *buf,
375 size_t len)
377 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
378 struct iio_trigger *oldtrig = indio_dev->trig;
379 struct iio_trigger *trig;
380 int ret;
382 mutex_lock(&indio_dev->mlock);
383 if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
384 mutex_unlock(&indio_dev->mlock);
385 return -EBUSY;
387 mutex_unlock(&indio_dev->mlock);
389 trig = iio_trigger_find_by_name(buf, len);
390 if (oldtrig == trig)
391 return len;
393 if (trig && indio_dev->info->validate_trigger) {
394 ret = indio_dev->info->validate_trigger(indio_dev, trig);
395 if (ret)
396 return ret;
399 if (trig && trig->ops->validate_device) {
400 ret = trig->ops->validate_device(trig, indio_dev);
401 if (ret)
402 return ret;
405 indio_dev->trig = trig;
407 if (oldtrig) {
408 if (indio_dev->modes & INDIO_EVENT_TRIGGERED)
409 iio_trigger_detach_poll_func(oldtrig,
410 indio_dev->pollfunc_event);
411 iio_trigger_put(oldtrig);
413 if (indio_dev->trig) {
414 iio_trigger_get(indio_dev->trig);
415 if (indio_dev->modes & INDIO_EVENT_TRIGGERED)
416 iio_trigger_attach_poll_func(indio_dev->trig,
417 indio_dev->pollfunc_event);
420 return len;
423 static DEVICE_ATTR(current_trigger, S_IRUGO | S_IWUSR,
424 iio_trigger_read_current,
425 iio_trigger_write_current);
427 static struct attribute *iio_trigger_consumer_attrs[] = {
428 &dev_attr_current_trigger.attr,
429 NULL,
432 static const struct attribute_group iio_trigger_consumer_attr_group = {
433 .name = "trigger",
434 .attrs = iio_trigger_consumer_attrs,
437 static void iio_trig_release(struct device *device)
439 struct iio_trigger *trig = to_iio_trigger(device);
440 int i;
442 if (trig->subirq_base) {
443 for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
444 irq_modify_status(trig->subirq_base + i,
445 IRQ_NOAUTOEN,
446 IRQ_NOREQUEST | IRQ_NOPROBE);
447 irq_set_chip(trig->subirq_base + i,
448 NULL);
449 irq_set_handler(trig->subirq_base + i,
450 NULL);
453 irq_free_descs(trig->subirq_base,
454 CONFIG_IIO_CONSUMERS_PER_TRIGGER);
456 kfree(trig->name);
457 kfree(trig);
460 static struct device_type iio_trig_type = {
461 .release = iio_trig_release,
462 .groups = iio_trig_dev_groups,
465 static void iio_trig_subirqmask(struct irq_data *d)
467 struct irq_chip *chip = irq_data_get_irq_chip(d);
468 struct iio_trigger *trig
469 = container_of(chip,
470 struct iio_trigger, subirq_chip);
471 trig->subirqs[d->irq - trig->subirq_base].enabled = false;
474 static void iio_trig_subirqunmask(struct irq_data *d)
476 struct irq_chip *chip = irq_data_get_irq_chip(d);
477 struct iio_trigger *trig
478 = container_of(chip,
479 struct iio_trigger, subirq_chip);
480 trig->subirqs[d->irq - trig->subirq_base].enabled = true;
483 static struct iio_trigger *viio_trigger_alloc(const char *fmt, va_list vargs)
485 struct iio_trigger *trig;
486 trig = kzalloc(sizeof *trig, GFP_KERNEL);
487 if (trig) {
488 int i;
489 trig->dev.type = &iio_trig_type;
490 trig->dev.bus = &iio_bus_type;
491 device_initialize(&trig->dev);
493 mutex_init(&trig->pool_lock);
494 trig->subirq_base
495 = irq_alloc_descs(-1, 0,
496 CONFIG_IIO_CONSUMERS_PER_TRIGGER,
498 if (trig->subirq_base < 0) {
499 kfree(trig);
500 return NULL;
503 trig->name = kvasprintf(GFP_KERNEL, fmt, vargs);
504 if (trig->name == NULL) {
505 irq_free_descs(trig->subirq_base,
506 CONFIG_IIO_CONSUMERS_PER_TRIGGER);
507 kfree(trig);
508 return NULL;
510 trig->subirq_chip.name = trig->name;
511 trig->subirq_chip.irq_mask = &iio_trig_subirqmask;
512 trig->subirq_chip.irq_unmask = &iio_trig_subirqunmask;
513 for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
514 irq_set_chip(trig->subirq_base + i,
515 &trig->subirq_chip);
516 irq_set_handler(trig->subirq_base + i,
517 &handle_simple_irq);
518 irq_modify_status(trig->subirq_base + i,
519 IRQ_NOREQUEST | IRQ_NOAUTOEN,
520 IRQ_NOPROBE);
522 get_device(&trig->dev);
525 return trig;
528 struct iio_trigger *iio_trigger_alloc(const char *fmt, ...)
530 struct iio_trigger *trig;
531 va_list vargs;
533 va_start(vargs, fmt);
534 trig = viio_trigger_alloc(fmt, vargs);
535 va_end(vargs);
537 return trig;
539 EXPORT_SYMBOL(iio_trigger_alloc);
541 void iio_trigger_free(struct iio_trigger *trig)
543 if (trig)
544 put_device(&trig->dev);
546 EXPORT_SYMBOL(iio_trigger_free);
548 static void devm_iio_trigger_release(struct device *dev, void *res)
550 iio_trigger_free(*(struct iio_trigger **)res);
553 static int devm_iio_trigger_match(struct device *dev, void *res, void *data)
555 struct iio_trigger **r = res;
557 if (!r || !*r) {
558 WARN_ON(!r || !*r);
559 return 0;
562 return *r == data;
566 * devm_iio_trigger_alloc - Resource-managed iio_trigger_alloc()
567 * @dev: Device to allocate iio_trigger for
568 * @fmt: trigger name format. If it includes format
569 * specifiers, the additional arguments following
570 * format are formatted and inserted in the resulting
571 * string replacing their respective specifiers.
573 * Managed iio_trigger_alloc. iio_trigger allocated with this function is
574 * automatically freed on driver detach.
576 * If an iio_trigger allocated with this function needs to be freed separately,
577 * devm_iio_trigger_free() must be used.
579 * RETURNS:
580 * Pointer to allocated iio_trigger on success, NULL on failure.
582 struct iio_trigger *devm_iio_trigger_alloc(struct device *dev,
583 const char *fmt, ...)
585 struct iio_trigger **ptr, *trig;
586 va_list vargs;
588 ptr = devres_alloc(devm_iio_trigger_release, sizeof(*ptr),
589 GFP_KERNEL);
590 if (!ptr)
591 return NULL;
593 /* use raw alloc_dr for kmalloc caller tracing */
594 va_start(vargs, fmt);
595 trig = viio_trigger_alloc(fmt, vargs);
596 va_end(vargs);
597 if (trig) {
598 *ptr = trig;
599 devres_add(dev, ptr);
600 } else {
601 devres_free(ptr);
604 return trig;
606 EXPORT_SYMBOL_GPL(devm_iio_trigger_alloc);
609 * devm_iio_trigger_free - Resource-managed iio_trigger_free()
610 * @dev: Device this iio_dev belongs to
611 * @iio_trig: the iio_trigger associated with the device
613 * Free iio_trigger allocated with devm_iio_trigger_alloc().
615 void devm_iio_trigger_free(struct device *dev, struct iio_trigger *iio_trig)
617 int rc;
619 rc = devres_release(dev, devm_iio_trigger_release,
620 devm_iio_trigger_match, iio_trig);
621 WARN_ON(rc);
623 EXPORT_SYMBOL_GPL(devm_iio_trigger_free);
625 void iio_device_register_trigger_consumer(struct iio_dev *indio_dev)
627 indio_dev->groups[indio_dev->groupcounter++] =
628 &iio_trigger_consumer_attr_group;
631 void iio_device_unregister_trigger_consumer(struct iio_dev *indio_dev)
633 /* Clean up an associated but not attached trigger reference */
634 if (indio_dev->trig)
635 iio_trigger_put(indio_dev->trig);
638 int iio_triggered_buffer_postenable(struct iio_dev *indio_dev)
640 return iio_trigger_attach_poll_func(indio_dev->trig,
641 indio_dev->pollfunc);
643 EXPORT_SYMBOL(iio_triggered_buffer_postenable);
645 int iio_triggered_buffer_predisable(struct iio_dev *indio_dev)
647 return iio_trigger_detach_poll_func(indio_dev->trig,
648 indio_dev->pollfunc);
650 EXPORT_SYMBOL(iio_triggered_buffer_predisable);