perf tools: Don't clone maps from parent when synthesizing forks
[linux/fpc-iii.git] / drivers / media / v4l2-core / v4l2-event.c
bloba3ef1f50a4b3496dcfbe7cb4332a3bab9a3acc56
1 /*
2 * v4l2-event.c
4 * V4L2 events.
6 * Copyright (C) 2009--2010 Nokia Corporation.
8 * Contact: Sakari Ailus <sakari.ailus@iki.fi>
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * version 2 as published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
20 #include <media/v4l2-dev.h>
21 #include <media/v4l2-fh.h>
22 #include <media/v4l2-event.h>
24 #include <linux/mm.h>
25 #include <linux/sched.h>
26 #include <linux/slab.h>
27 #include <linux/export.h>
29 static unsigned sev_pos(const struct v4l2_subscribed_event *sev, unsigned idx)
31 idx += sev->first;
32 return idx >= sev->elems ? idx - sev->elems : idx;
35 static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event)
37 struct v4l2_kevent *kev;
38 unsigned long flags;
40 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
42 if (list_empty(&fh->available)) {
43 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
44 return -ENOENT;
47 WARN_ON(fh->navailable == 0);
49 kev = list_first_entry(&fh->available, struct v4l2_kevent, list);
50 list_del(&kev->list);
51 fh->navailable--;
53 kev->event.pending = fh->navailable;
54 *event = kev->event;
55 kev->sev->first = sev_pos(kev->sev, 1);
56 kev->sev->in_use--;
58 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
60 return 0;
63 int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event,
64 int nonblocking)
66 int ret;
68 if (nonblocking)
69 return __v4l2_event_dequeue(fh, event);
71 /* Release the vdev lock while waiting */
72 if (fh->vdev->lock)
73 mutex_unlock(fh->vdev->lock);
75 do {
76 ret = wait_event_interruptible(fh->wait,
77 fh->navailable != 0);
78 if (ret < 0)
79 break;
81 ret = __v4l2_event_dequeue(fh, event);
82 } while (ret == -ENOENT);
84 if (fh->vdev->lock)
85 mutex_lock(fh->vdev->lock);
87 return ret;
89 EXPORT_SYMBOL_GPL(v4l2_event_dequeue);
91 /* Caller must hold fh->vdev->fh_lock! */
92 static struct v4l2_subscribed_event *v4l2_event_subscribed(
93 struct v4l2_fh *fh, u32 type, u32 id)
95 struct v4l2_subscribed_event *sev;
97 assert_spin_locked(&fh->vdev->fh_lock);
99 list_for_each_entry(sev, &fh->subscribed, list)
100 if (sev->type == type && sev->id == id)
101 return sev;
103 return NULL;
106 static void __v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev,
107 const struct timespec *ts)
109 struct v4l2_subscribed_event *sev;
110 struct v4l2_kevent *kev;
111 bool copy_payload = true;
113 /* Are we subscribed? */
114 sev = v4l2_event_subscribed(fh, ev->type, ev->id);
115 if (sev == NULL)
116 return;
118 /* Increase event sequence number on fh. */
119 fh->sequence++;
121 /* Do we have any free events? */
122 if (sev->in_use == sev->elems) {
123 /* no, remove the oldest one */
124 kev = sev->events + sev_pos(sev, 0);
125 list_del(&kev->list);
126 sev->in_use--;
127 sev->first = sev_pos(sev, 1);
128 fh->navailable--;
129 if (sev->elems == 1) {
130 if (sev->ops && sev->ops->replace) {
131 sev->ops->replace(&kev->event, ev);
132 copy_payload = false;
134 } else if (sev->ops && sev->ops->merge) {
135 struct v4l2_kevent *second_oldest =
136 sev->events + sev_pos(sev, 0);
137 sev->ops->merge(&kev->event, &second_oldest->event);
141 /* Take one and fill it. */
142 kev = sev->events + sev_pos(sev, sev->in_use);
143 kev->event.type = ev->type;
144 if (copy_payload)
145 kev->event.u = ev->u;
146 kev->event.id = ev->id;
147 kev->event.timestamp = *ts;
148 kev->event.sequence = fh->sequence;
149 sev->in_use++;
150 list_add_tail(&kev->list, &fh->available);
152 fh->navailable++;
154 wake_up_all(&fh->wait);
157 void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev)
159 struct v4l2_fh *fh;
160 unsigned long flags;
161 struct timespec timestamp;
163 if (vdev == NULL)
164 return;
166 ktime_get_ts(&timestamp);
168 spin_lock_irqsave(&vdev->fh_lock, flags);
170 list_for_each_entry(fh, &vdev->fh_list, list)
171 __v4l2_event_queue_fh(fh, ev, &timestamp);
173 spin_unlock_irqrestore(&vdev->fh_lock, flags);
175 EXPORT_SYMBOL_GPL(v4l2_event_queue);
177 void v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev)
179 unsigned long flags;
180 struct timespec timestamp;
182 ktime_get_ts(&timestamp);
184 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
185 __v4l2_event_queue_fh(fh, ev, &timestamp);
186 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
188 EXPORT_SYMBOL_GPL(v4l2_event_queue_fh);
190 int v4l2_event_pending(struct v4l2_fh *fh)
192 return fh->navailable;
194 EXPORT_SYMBOL_GPL(v4l2_event_pending);
196 int v4l2_event_subscribe(struct v4l2_fh *fh,
197 const struct v4l2_event_subscription *sub, unsigned elems,
198 const struct v4l2_subscribed_event_ops *ops)
200 struct v4l2_subscribed_event *sev, *found_ev;
201 unsigned long flags;
202 unsigned i;
203 int ret = 0;
205 if (sub->type == V4L2_EVENT_ALL)
206 return -EINVAL;
208 if (elems < 1)
209 elems = 1;
211 sev = kvzalloc(struct_size(sev, events, elems), GFP_KERNEL);
212 if (!sev)
213 return -ENOMEM;
214 for (i = 0; i < elems; i++)
215 sev->events[i].sev = sev;
216 sev->type = sub->type;
217 sev->id = sub->id;
218 sev->flags = sub->flags;
219 sev->fh = fh;
220 sev->ops = ops;
221 sev->elems = elems;
223 mutex_lock(&fh->subscribe_lock);
225 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
226 found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
227 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
229 if (found_ev) {
230 /* Already listening */
231 kvfree(sev);
232 goto out_unlock;
235 if (sev->ops && sev->ops->add) {
236 ret = sev->ops->add(sev, elems);
237 if (ret) {
238 kvfree(sev);
239 goto out_unlock;
243 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
244 list_add(&sev->list, &fh->subscribed);
245 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
247 out_unlock:
248 mutex_unlock(&fh->subscribe_lock);
250 return ret;
252 EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
254 void v4l2_event_unsubscribe_all(struct v4l2_fh *fh)
256 struct v4l2_event_subscription sub;
257 struct v4l2_subscribed_event *sev;
258 unsigned long flags;
260 do {
261 sev = NULL;
263 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
264 if (!list_empty(&fh->subscribed)) {
265 sev = list_first_entry(&fh->subscribed,
266 struct v4l2_subscribed_event, list);
267 sub.type = sev->type;
268 sub.id = sev->id;
270 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
271 if (sev)
272 v4l2_event_unsubscribe(fh, &sub);
273 } while (sev);
275 EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe_all);
277 int v4l2_event_unsubscribe(struct v4l2_fh *fh,
278 const struct v4l2_event_subscription *sub)
280 struct v4l2_subscribed_event *sev;
281 unsigned long flags;
282 int i;
284 if (sub->type == V4L2_EVENT_ALL) {
285 v4l2_event_unsubscribe_all(fh);
286 return 0;
289 mutex_lock(&fh->subscribe_lock);
291 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
293 sev = v4l2_event_subscribed(fh, sub->type, sub->id);
294 if (sev != NULL) {
295 /* Remove any pending events for this subscription */
296 for (i = 0; i < sev->in_use; i++) {
297 list_del(&sev->events[sev_pos(sev, i)].list);
298 fh->navailable--;
300 list_del(&sev->list);
303 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
305 if (sev && sev->ops && sev->ops->del)
306 sev->ops->del(sev);
308 mutex_unlock(&fh->subscribe_lock);
310 kvfree(sev);
312 return 0;
314 EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe);
316 int v4l2_event_subdev_unsubscribe(struct v4l2_subdev *sd, struct v4l2_fh *fh,
317 struct v4l2_event_subscription *sub)
319 return v4l2_event_unsubscribe(fh, sub);
321 EXPORT_SYMBOL_GPL(v4l2_event_subdev_unsubscribe);
323 static void v4l2_event_src_replace(struct v4l2_event *old,
324 const struct v4l2_event *new)
326 u32 old_changes = old->u.src_change.changes;
328 old->u.src_change = new->u.src_change;
329 old->u.src_change.changes |= old_changes;
332 static void v4l2_event_src_merge(const struct v4l2_event *old,
333 struct v4l2_event *new)
335 new->u.src_change.changes |= old->u.src_change.changes;
338 static const struct v4l2_subscribed_event_ops v4l2_event_src_ch_ops = {
339 .replace = v4l2_event_src_replace,
340 .merge = v4l2_event_src_merge,
343 int v4l2_src_change_event_subscribe(struct v4l2_fh *fh,
344 const struct v4l2_event_subscription *sub)
346 if (sub->type == V4L2_EVENT_SOURCE_CHANGE)
347 return v4l2_event_subscribe(fh, sub, 0, &v4l2_event_src_ch_ops);
348 return -EINVAL;
350 EXPORT_SYMBOL_GPL(v4l2_src_change_event_subscribe);
352 int v4l2_src_change_event_subdev_subscribe(struct v4l2_subdev *sd,
353 struct v4l2_fh *fh, struct v4l2_event_subscription *sub)
355 return v4l2_src_change_event_subscribe(fh, sub);
357 EXPORT_SYMBOL_GPL(v4l2_src_change_event_subdev_subscribe);