6 * Copyright (C) 2009--2010 Nokia Corporation.
8 * Contact: Sakari Ailus <sakari.ailus@iki.fi>
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * version 2 as published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
20 #include <media/v4l2-dev.h>
21 #include <media/v4l2-fh.h>
22 #include <media/v4l2-event.h>
25 #include <linux/sched.h>
26 #include <linux/slab.h>
27 #include <linux/export.h>
29 static unsigned sev_pos(const struct v4l2_subscribed_event
*sev
, unsigned idx
)
32 return idx
>= sev
->elems
? idx
- sev
->elems
: idx
;
35 static int __v4l2_event_dequeue(struct v4l2_fh
*fh
, struct v4l2_event
*event
)
37 struct v4l2_kevent
*kev
;
40 spin_lock_irqsave(&fh
->vdev
->fh_lock
, flags
);
42 if (list_empty(&fh
->available
)) {
43 spin_unlock_irqrestore(&fh
->vdev
->fh_lock
, flags
);
47 WARN_ON(fh
->navailable
== 0);
49 kev
= list_first_entry(&fh
->available
, struct v4l2_kevent
, list
);
53 kev
->event
.pending
= fh
->navailable
;
55 kev
->sev
->first
= sev_pos(kev
->sev
, 1);
58 spin_unlock_irqrestore(&fh
->vdev
->fh_lock
, flags
);
63 int v4l2_event_dequeue(struct v4l2_fh
*fh
, struct v4l2_event
*event
,
69 return __v4l2_event_dequeue(fh
, event
);
71 /* Release the vdev lock while waiting */
73 mutex_unlock(fh
->vdev
->lock
);
76 ret
= wait_event_interruptible(fh
->wait
,
81 ret
= __v4l2_event_dequeue(fh
, event
);
82 } while (ret
== -ENOENT
);
85 mutex_lock(fh
->vdev
->lock
);
89 EXPORT_SYMBOL_GPL(v4l2_event_dequeue
);
91 /* Caller must hold fh->vdev->fh_lock! */
92 static struct v4l2_subscribed_event
*v4l2_event_subscribed(
93 struct v4l2_fh
*fh
, u32 type
, u32 id
)
95 struct v4l2_subscribed_event
*sev
;
97 assert_spin_locked(&fh
->vdev
->fh_lock
);
99 list_for_each_entry(sev
, &fh
->subscribed
, list
)
100 if (sev
->type
== type
&& sev
->id
== id
)
106 static void __v4l2_event_queue_fh(struct v4l2_fh
*fh
, const struct v4l2_event
*ev
,
107 const struct timespec
*ts
)
109 struct v4l2_subscribed_event
*sev
;
110 struct v4l2_kevent
*kev
;
111 bool copy_payload
= true;
113 /* Are we subscribed? */
114 sev
= v4l2_event_subscribed(fh
, ev
->type
, ev
->id
);
119 * If the event has been added to the fh->subscribed list, but its
120 * add op has not completed yet elems will be 0, treat this as
121 * not being subscribed.
126 /* Increase event sequence number on fh. */
129 /* Do we have any free events? */
130 if (sev
->in_use
== sev
->elems
) {
131 /* no, remove the oldest one */
132 kev
= sev
->events
+ sev_pos(sev
, 0);
133 list_del(&kev
->list
);
135 sev
->first
= sev_pos(sev
, 1);
137 if (sev
->elems
== 1) {
138 if (sev
->ops
&& sev
->ops
->replace
) {
139 sev
->ops
->replace(&kev
->event
, ev
);
140 copy_payload
= false;
142 } else if (sev
->ops
&& sev
->ops
->merge
) {
143 struct v4l2_kevent
*second_oldest
=
144 sev
->events
+ sev_pos(sev
, 0);
145 sev
->ops
->merge(&kev
->event
, &second_oldest
->event
);
149 /* Take one and fill it. */
150 kev
= sev
->events
+ sev_pos(sev
, sev
->in_use
);
151 kev
->event
.type
= ev
->type
;
153 kev
->event
.u
= ev
->u
;
154 kev
->event
.id
= ev
->id
;
155 kev
->event
.timestamp
= *ts
;
156 kev
->event
.sequence
= fh
->sequence
;
158 list_add_tail(&kev
->list
, &fh
->available
);
162 wake_up_all(&fh
->wait
);
165 void v4l2_event_queue(struct video_device
*vdev
, const struct v4l2_event
*ev
)
169 struct timespec timestamp
;
174 ktime_get_ts(×tamp
);
176 spin_lock_irqsave(&vdev
->fh_lock
, flags
);
178 list_for_each_entry(fh
, &vdev
->fh_list
, list
)
179 __v4l2_event_queue_fh(fh
, ev
, ×tamp
);
181 spin_unlock_irqrestore(&vdev
->fh_lock
, flags
);
183 EXPORT_SYMBOL_GPL(v4l2_event_queue
);
185 void v4l2_event_queue_fh(struct v4l2_fh
*fh
, const struct v4l2_event
*ev
)
188 struct timespec timestamp
;
190 ktime_get_ts(×tamp
);
192 spin_lock_irqsave(&fh
->vdev
->fh_lock
, flags
);
193 __v4l2_event_queue_fh(fh
, ev
, ×tamp
);
194 spin_unlock_irqrestore(&fh
->vdev
->fh_lock
, flags
);
196 EXPORT_SYMBOL_GPL(v4l2_event_queue_fh
);
198 int v4l2_event_pending(struct v4l2_fh
*fh
)
200 return fh
->navailable
;
202 EXPORT_SYMBOL_GPL(v4l2_event_pending
);
204 int v4l2_event_subscribe(struct v4l2_fh
*fh
,
205 const struct v4l2_event_subscription
*sub
, unsigned elems
,
206 const struct v4l2_subscribed_event_ops
*ops
)
208 struct v4l2_subscribed_event
*sev
, *found_ev
;
212 if (sub
->type
== V4L2_EVENT_ALL
)
218 sev
= kvzalloc(sizeof(*sev
) + sizeof(struct v4l2_kevent
) * elems
,
222 for (i
= 0; i
< elems
; i
++)
223 sev
->events
[i
].sev
= sev
;
224 sev
->type
= sub
->type
;
226 sev
->flags
= sub
->flags
;
230 spin_lock_irqsave(&fh
->vdev
->fh_lock
, flags
);
231 found_ev
= v4l2_event_subscribed(fh
, sub
->type
, sub
->id
);
233 list_add(&sev
->list
, &fh
->subscribed
);
234 spin_unlock_irqrestore(&fh
->vdev
->fh_lock
, flags
);
238 return 0; /* Already listening */
241 if (sev
->ops
&& sev
->ops
->add
) {
242 int ret
= sev
->ops
->add(sev
, elems
);
245 v4l2_event_unsubscribe(fh
, sub
);
250 /* Mark as ready for use */
255 EXPORT_SYMBOL_GPL(v4l2_event_subscribe
);
257 void v4l2_event_unsubscribe_all(struct v4l2_fh
*fh
)
259 struct v4l2_event_subscription sub
;
260 struct v4l2_subscribed_event
*sev
;
266 spin_lock_irqsave(&fh
->vdev
->fh_lock
, flags
);
267 if (!list_empty(&fh
->subscribed
)) {
268 sev
= list_first_entry(&fh
->subscribed
,
269 struct v4l2_subscribed_event
, list
);
270 sub
.type
= sev
->type
;
273 spin_unlock_irqrestore(&fh
->vdev
->fh_lock
, flags
);
275 v4l2_event_unsubscribe(fh
, &sub
);
278 EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe_all
);
280 int v4l2_event_unsubscribe(struct v4l2_fh
*fh
,
281 const struct v4l2_event_subscription
*sub
)
283 struct v4l2_subscribed_event
*sev
;
287 if (sub
->type
== V4L2_EVENT_ALL
) {
288 v4l2_event_unsubscribe_all(fh
);
292 spin_lock_irqsave(&fh
->vdev
->fh_lock
, flags
);
294 sev
= v4l2_event_subscribed(fh
, sub
->type
, sub
->id
);
296 /* Remove any pending events for this subscription */
297 for (i
= 0; i
< sev
->in_use
; i
++) {
298 list_del(&sev
->events
[sev_pos(sev
, i
)].list
);
301 list_del(&sev
->list
);
304 spin_unlock_irqrestore(&fh
->vdev
->fh_lock
, flags
);
306 if (sev
&& sev
->ops
&& sev
->ops
->del
)
313 EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe
);
315 int v4l2_event_subdev_unsubscribe(struct v4l2_subdev
*sd
, struct v4l2_fh
*fh
,
316 struct v4l2_event_subscription
*sub
)
318 return v4l2_event_unsubscribe(fh
, sub
);
320 EXPORT_SYMBOL_GPL(v4l2_event_subdev_unsubscribe
);
322 static void v4l2_event_src_replace(struct v4l2_event
*old
,
323 const struct v4l2_event
*new)
325 u32 old_changes
= old
->u
.src_change
.changes
;
327 old
->u
.src_change
= new->u
.src_change
;
328 old
->u
.src_change
.changes
|= old_changes
;
331 static void v4l2_event_src_merge(const struct v4l2_event
*old
,
332 struct v4l2_event
*new)
334 new->u
.src_change
.changes
|= old
->u
.src_change
.changes
;
337 static const struct v4l2_subscribed_event_ops v4l2_event_src_ch_ops
= {
338 .replace
= v4l2_event_src_replace
,
339 .merge
= v4l2_event_src_merge
,
342 int v4l2_src_change_event_subscribe(struct v4l2_fh
*fh
,
343 const struct v4l2_event_subscription
*sub
)
345 if (sub
->type
== V4L2_EVENT_SOURCE_CHANGE
)
346 return v4l2_event_subscribe(fh
, sub
, 0, &v4l2_event_src_ch_ops
);
349 EXPORT_SYMBOL_GPL(v4l2_src_change_event_subscribe
);
351 int v4l2_src_change_event_subdev_subscribe(struct v4l2_subdev
*sd
,
352 struct v4l2_fh
*fh
, struct v4l2_event_subscription
*sub
)
354 return v4l2_src_change_event_subscribe(fh
, sub
);
356 EXPORT_SYMBOL_GPL(v4l2_src_change_event_subdev_subscribe
);