6 * Copyright (C) 2009--2010 Nokia Corporation.
8 * Contact: Sakari Ailus <sakari.ailus@iki.fi>
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * version 2 as published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
25 #include <media/v4l2-dev.h>
26 #include <media/v4l2-fh.h>
27 #include <media/v4l2-event.h>
29 #include <linux/sched.h>
30 #include <linux/slab.h>
31 #include <linux/export.h>
33 static unsigned sev_pos(const struct v4l2_subscribed_event
*sev
, unsigned idx
)
36 return idx
>= sev
->elems
? idx
- sev
->elems
: idx
;
39 static int __v4l2_event_dequeue(struct v4l2_fh
*fh
, struct v4l2_event
*event
)
41 struct v4l2_kevent
*kev
;
44 spin_lock_irqsave(&fh
->vdev
->fh_lock
, flags
);
46 if (list_empty(&fh
->available
)) {
47 spin_unlock_irqrestore(&fh
->vdev
->fh_lock
, flags
);
51 WARN_ON(fh
->navailable
== 0);
53 kev
= list_first_entry(&fh
->available
, struct v4l2_kevent
, list
);
57 kev
->event
.pending
= fh
->navailable
;
59 kev
->sev
->first
= sev_pos(kev
->sev
, 1);
62 spin_unlock_irqrestore(&fh
->vdev
->fh_lock
, flags
);
67 int v4l2_event_dequeue(struct v4l2_fh
*fh
, struct v4l2_event
*event
,
73 return __v4l2_event_dequeue(fh
, event
);
75 /* Release the vdev lock while waiting */
77 mutex_unlock(fh
->vdev
->lock
);
80 ret
= wait_event_interruptible(fh
->wait
,
85 ret
= __v4l2_event_dequeue(fh
, event
);
86 } while (ret
== -ENOENT
);
89 mutex_lock(fh
->vdev
->lock
);
93 EXPORT_SYMBOL_GPL(v4l2_event_dequeue
);
95 /* Caller must hold fh->vdev->fh_lock! */
96 static struct v4l2_subscribed_event
*v4l2_event_subscribed(
97 struct v4l2_fh
*fh
, u32 type
, u32 id
)
99 struct v4l2_subscribed_event
*sev
;
101 assert_spin_locked(&fh
->vdev
->fh_lock
);
103 list_for_each_entry(sev
, &fh
->subscribed
, list
)
104 if (sev
->type
== type
&& sev
->id
== id
)
110 static void __v4l2_event_queue_fh(struct v4l2_fh
*fh
, const struct v4l2_event
*ev
,
111 const struct timespec
*ts
)
113 struct v4l2_subscribed_event
*sev
;
114 struct v4l2_kevent
*kev
;
115 bool copy_payload
= true;
117 /* Are we subscribed? */
118 sev
= v4l2_event_subscribed(fh
, ev
->type
, ev
->id
);
123 * If the event has been added to the fh->subscribed list, but its
124 * add op has not completed yet elems will be 0, treat this as
125 * not being subscribed.
130 /* Increase event sequence number on fh. */
133 /* Do we have any free events? */
134 if (sev
->in_use
== sev
->elems
) {
135 /* no, remove the oldest one */
136 kev
= sev
->events
+ sev_pos(sev
, 0);
137 list_del(&kev
->list
);
139 sev
->first
= sev_pos(sev
, 1);
141 if (sev
->elems
== 1) {
142 if (sev
->ops
&& sev
->ops
->replace
) {
143 sev
->ops
->replace(&kev
->event
, ev
);
144 copy_payload
= false;
146 } else if (sev
->ops
&& sev
->ops
->merge
) {
147 struct v4l2_kevent
*second_oldest
=
148 sev
->events
+ sev_pos(sev
, 0);
149 sev
->ops
->merge(&kev
->event
, &second_oldest
->event
);
153 /* Take one and fill it. */
154 kev
= sev
->events
+ sev_pos(sev
, sev
->in_use
);
155 kev
->event
.type
= ev
->type
;
157 kev
->event
.u
= ev
->u
;
158 kev
->event
.id
= ev
->id
;
159 kev
->event
.timestamp
= *ts
;
160 kev
->event
.sequence
= fh
->sequence
;
162 list_add_tail(&kev
->list
, &fh
->available
);
166 wake_up_all(&fh
->wait
);
169 void v4l2_event_queue(struct video_device
*vdev
, const struct v4l2_event
*ev
)
173 struct timespec timestamp
;
178 ktime_get_ts(×tamp
);
180 spin_lock_irqsave(&vdev
->fh_lock
, flags
);
182 list_for_each_entry(fh
, &vdev
->fh_list
, list
)
183 __v4l2_event_queue_fh(fh
, ev
, ×tamp
);
185 spin_unlock_irqrestore(&vdev
->fh_lock
, flags
);
187 EXPORT_SYMBOL_GPL(v4l2_event_queue
);
189 void v4l2_event_queue_fh(struct v4l2_fh
*fh
, const struct v4l2_event
*ev
)
192 struct timespec timestamp
;
194 ktime_get_ts(×tamp
);
196 spin_lock_irqsave(&fh
->vdev
->fh_lock
, flags
);
197 __v4l2_event_queue_fh(fh
, ev
, ×tamp
);
198 spin_unlock_irqrestore(&fh
->vdev
->fh_lock
, flags
);
200 EXPORT_SYMBOL_GPL(v4l2_event_queue_fh
);
202 int v4l2_event_pending(struct v4l2_fh
*fh
)
204 return fh
->navailable
;
206 EXPORT_SYMBOL_GPL(v4l2_event_pending
);
208 int v4l2_event_subscribe(struct v4l2_fh
*fh
,
209 const struct v4l2_event_subscription
*sub
, unsigned elems
,
210 const struct v4l2_subscribed_event_ops
*ops
)
212 struct v4l2_subscribed_event
*sev
, *found_ev
;
216 if (sub
->type
== V4L2_EVENT_ALL
)
222 sev
= kzalloc(sizeof(*sev
) + sizeof(struct v4l2_kevent
) * elems
, GFP_KERNEL
);
225 for (i
= 0; i
< elems
; i
++)
226 sev
->events
[i
].sev
= sev
;
227 sev
->type
= sub
->type
;
229 sev
->flags
= sub
->flags
;
233 spin_lock_irqsave(&fh
->vdev
->fh_lock
, flags
);
234 found_ev
= v4l2_event_subscribed(fh
, sub
->type
, sub
->id
);
236 list_add(&sev
->list
, &fh
->subscribed
);
237 spin_unlock_irqrestore(&fh
->vdev
->fh_lock
, flags
);
241 return 0; /* Already listening */
244 if (sev
->ops
&& sev
->ops
->add
) {
245 int ret
= sev
->ops
->add(sev
, elems
);
248 v4l2_event_unsubscribe(fh
, sub
);
253 /* Mark as ready for use */
258 EXPORT_SYMBOL_GPL(v4l2_event_subscribe
);
260 void v4l2_event_unsubscribe_all(struct v4l2_fh
*fh
)
262 struct v4l2_event_subscription sub
;
263 struct v4l2_subscribed_event
*sev
;
269 spin_lock_irqsave(&fh
->vdev
->fh_lock
, flags
);
270 if (!list_empty(&fh
->subscribed
)) {
271 sev
= list_first_entry(&fh
->subscribed
,
272 struct v4l2_subscribed_event
, list
);
273 sub
.type
= sev
->type
;
276 spin_unlock_irqrestore(&fh
->vdev
->fh_lock
, flags
);
278 v4l2_event_unsubscribe(fh
, &sub
);
281 EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe_all
);
283 int v4l2_event_unsubscribe(struct v4l2_fh
*fh
,
284 const struct v4l2_event_subscription
*sub
)
286 struct v4l2_subscribed_event
*sev
;
290 if (sub
->type
== V4L2_EVENT_ALL
) {
291 v4l2_event_unsubscribe_all(fh
);
295 spin_lock_irqsave(&fh
->vdev
->fh_lock
, flags
);
297 sev
= v4l2_event_subscribed(fh
, sub
->type
, sub
->id
);
299 /* Remove any pending events for this subscription */
300 for (i
= 0; i
< sev
->in_use
; i
++) {
301 list_del(&sev
->events
[sev_pos(sev
, i
)].list
);
304 list_del(&sev
->list
);
307 spin_unlock_irqrestore(&fh
->vdev
->fh_lock
, flags
);
309 if (sev
&& sev
->ops
&& sev
->ops
->del
)
316 EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe
);
318 int v4l2_event_subdev_unsubscribe(struct v4l2_subdev
*sd
, struct v4l2_fh
*fh
,
319 struct v4l2_event_subscription
*sub
)
321 return v4l2_event_unsubscribe(fh
, sub
);
323 EXPORT_SYMBOL_GPL(v4l2_event_subdev_unsubscribe
);
325 static void v4l2_event_src_replace(struct v4l2_event
*old
,
326 const struct v4l2_event
*new)
328 u32 old_changes
= old
->u
.src_change
.changes
;
330 old
->u
.src_change
= new->u
.src_change
;
331 old
->u
.src_change
.changes
|= old_changes
;
334 static void v4l2_event_src_merge(const struct v4l2_event
*old
,
335 struct v4l2_event
*new)
337 new->u
.src_change
.changes
|= old
->u
.src_change
.changes
;
340 static const struct v4l2_subscribed_event_ops v4l2_event_src_ch_ops
= {
341 .replace
= v4l2_event_src_replace
,
342 .merge
= v4l2_event_src_merge
,
345 int v4l2_src_change_event_subscribe(struct v4l2_fh
*fh
,
346 const struct v4l2_event_subscription
*sub
)
348 if (sub
->type
== V4L2_EVENT_SOURCE_CHANGE
)
349 return v4l2_event_subscribe(fh
, sub
, 0, &v4l2_event_src_ch_ops
);
352 EXPORT_SYMBOL_GPL(v4l2_src_change_event_subscribe
);
354 int v4l2_src_change_event_subdev_subscribe(struct v4l2_subdev
*sd
,
355 struct v4l2_fh
*fh
, struct v4l2_event_subscription
*sub
)
357 return v4l2_src_change_event_subscribe(fh
, sub
);
359 EXPORT_SYMBOL_GPL(v4l2_src_change_event_subdev_subscribe
);