2 * Memory-to-memory device framework for Video for Linux 2 and videobuf.
4 * Helper functions for devices that use videobuf buffers for both their
5 * source and destination.
7 * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
8 * Pawel Osciak, <pawel@osciak.com>
9 * Marek Szyprowski, <m.szyprowski@samsung.com>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
16 #include <linux/module.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
20 #include <media/videobuf2-v4l2.h>
21 #include <media/v4l2-mem2mem.h>
22 #include <media/v4l2-dev.h>
23 #include <media/v4l2-fh.h>
24 #include <media/v4l2-event.h>
26 MODULE_DESCRIPTION("Mem to mem device framework for videobuf");
27 MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>");
28 MODULE_LICENSE("GPL");
31 module_param(debug
, bool, 0644);
33 #define dprintk(fmt, arg...) \
36 printk(KERN_DEBUG "%s: " fmt, __func__, ## arg);\
40 /* Instance is already queued on the job_queue */
41 #define TRANS_QUEUED (1 << 0)
42 /* Instance is currently running in hardware */
43 #define TRANS_RUNNING (1 << 1)
44 /* Instance is currently aborting */
45 #define TRANS_ABORT (1 << 2)
48 /* Offset base for buffers on the destination queue - used to distinguish
49 * between source and destination buffers when mmapping - they receive the same
50 * offsets but for different queues */
51 #define DST_QUEUE_OFF_BASE (1 << 30)
55 * struct v4l2_m2m_dev - per-device context
56 * @curr_ctx: currently running instance
57 * @job_queue: instances queued to run
58 * @job_spinlock: protects job_queue
59 * @m2m_ops: driver callbacks
62 struct v4l2_m2m_ctx
*curr_ctx
;
64 struct list_head job_queue
;
65 spinlock_t job_spinlock
;
67 const struct v4l2_m2m_ops
*m2m_ops
;
70 static struct v4l2_m2m_queue_ctx
*get_queue_ctx(struct v4l2_m2m_ctx
*m2m_ctx
,
71 enum v4l2_buf_type type
)
73 if (V4L2_TYPE_IS_OUTPUT(type
))
74 return &m2m_ctx
->out_q_ctx
;
76 return &m2m_ctx
->cap_q_ctx
;
79 struct vb2_queue
*v4l2_m2m_get_vq(struct v4l2_m2m_ctx
*m2m_ctx
,
80 enum v4l2_buf_type type
)
82 struct v4l2_m2m_queue_ctx
*q_ctx
;
84 q_ctx
= get_queue_ctx(m2m_ctx
, type
);
90 EXPORT_SYMBOL(v4l2_m2m_get_vq
);
92 void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx
*q_ctx
)
94 struct v4l2_m2m_buffer
*b
;
97 spin_lock_irqsave(&q_ctx
->rdy_spinlock
, flags
);
99 if (list_empty(&q_ctx
->rdy_queue
)) {
100 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
104 b
= list_first_entry(&q_ctx
->rdy_queue
, struct v4l2_m2m_buffer
, list
);
105 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
108 EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf
);
110 void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx
*q_ctx
)
112 struct v4l2_m2m_buffer
*b
;
115 spin_lock_irqsave(&q_ctx
->rdy_spinlock
, flags
);
116 if (list_empty(&q_ctx
->rdy_queue
)) {
117 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
120 b
= list_first_entry(&q_ctx
->rdy_queue
, struct v4l2_m2m_buffer
, list
);
123 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
127 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove
);
130 * Scheduling handlers
133 void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev
*m2m_dev
)
138 spin_lock_irqsave(&m2m_dev
->job_spinlock
, flags
);
139 if (m2m_dev
->curr_ctx
)
140 ret
= m2m_dev
->curr_ctx
->priv
;
141 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
145 EXPORT_SYMBOL(v4l2_m2m_get_curr_priv
);
148 * v4l2_m2m_try_run() - select next job to perform and run it if possible
150 * Get next transaction (if present) from the waiting jobs list and run it.
152 static void v4l2_m2m_try_run(struct v4l2_m2m_dev
*m2m_dev
)
156 spin_lock_irqsave(&m2m_dev
->job_spinlock
, flags
);
157 if (NULL
!= m2m_dev
->curr_ctx
) {
158 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
159 dprintk("Another instance is running, won't run now\n");
163 if (list_empty(&m2m_dev
->job_queue
)) {
164 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
165 dprintk("No job pending\n");
169 m2m_dev
->curr_ctx
= list_first_entry(&m2m_dev
->job_queue
,
170 struct v4l2_m2m_ctx
, queue
);
171 m2m_dev
->curr_ctx
->job_flags
|= TRANS_RUNNING
;
172 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
174 m2m_dev
->m2m_ops
->device_run(m2m_dev
->curr_ctx
->priv
);
177 void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx
*m2m_ctx
)
179 struct v4l2_m2m_dev
*m2m_dev
;
180 unsigned long flags_job
, flags_out
, flags_cap
;
182 m2m_dev
= m2m_ctx
->m2m_dev
;
183 dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx
);
185 if (!m2m_ctx
->out_q_ctx
.q
.streaming
186 || !m2m_ctx
->cap_q_ctx
.q
.streaming
) {
187 dprintk("Streaming needs to be on for both queues\n");
191 spin_lock_irqsave(&m2m_dev
->job_spinlock
, flags_job
);
193 /* If the context is aborted then don't schedule it */
194 if (m2m_ctx
->job_flags
& TRANS_ABORT
) {
195 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags_job
);
196 dprintk("Aborted context\n");
200 if (m2m_ctx
->job_flags
& TRANS_QUEUED
) {
201 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags_job
);
202 dprintk("On job queue already\n");
206 spin_lock_irqsave(&m2m_ctx
->out_q_ctx
.rdy_spinlock
, flags_out
);
207 if (list_empty(&m2m_ctx
->out_q_ctx
.rdy_queue
)
208 && !m2m_ctx
->out_q_ctx
.buffered
) {
209 spin_unlock_irqrestore(&m2m_ctx
->out_q_ctx
.rdy_spinlock
,
211 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags_job
);
212 dprintk("No input buffers available\n");
215 spin_lock_irqsave(&m2m_ctx
->cap_q_ctx
.rdy_spinlock
, flags_cap
);
216 if (list_empty(&m2m_ctx
->cap_q_ctx
.rdy_queue
)
217 && !m2m_ctx
->cap_q_ctx
.buffered
) {
218 spin_unlock_irqrestore(&m2m_ctx
->cap_q_ctx
.rdy_spinlock
,
220 spin_unlock_irqrestore(&m2m_ctx
->out_q_ctx
.rdy_spinlock
,
222 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags_job
);
223 dprintk("No output buffers available\n");
226 spin_unlock_irqrestore(&m2m_ctx
->cap_q_ctx
.rdy_spinlock
, flags_cap
);
227 spin_unlock_irqrestore(&m2m_ctx
->out_q_ctx
.rdy_spinlock
, flags_out
);
229 if (m2m_dev
->m2m_ops
->job_ready
230 && (!m2m_dev
->m2m_ops
->job_ready(m2m_ctx
->priv
))) {
231 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags_job
);
232 dprintk("Driver not ready\n");
236 list_add_tail(&m2m_ctx
->queue
, &m2m_dev
->job_queue
);
237 m2m_ctx
->job_flags
|= TRANS_QUEUED
;
239 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags_job
);
241 v4l2_m2m_try_run(m2m_dev
);
243 EXPORT_SYMBOL_GPL(v4l2_m2m_try_schedule
);
246 * v4l2_m2m_cancel_job() - cancel pending jobs for the context
248 * In case of streamoff or release called on any context,
249 * 1] If the context is currently running, then abort job will be called
250 * 2] If the context is queued, then the context will be removed from
253 static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx
*m2m_ctx
)
255 struct v4l2_m2m_dev
*m2m_dev
;
258 m2m_dev
= m2m_ctx
->m2m_dev
;
259 spin_lock_irqsave(&m2m_dev
->job_spinlock
, flags
);
261 m2m_ctx
->job_flags
|= TRANS_ABORT
;
262 if (m2m_ctx
->job_flags
& TRANS_RUNNING
) {
263 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
264 m2m_dev
->m2m_ops
->job_abort(m2m_ctx
->priv
);
265 dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx
);
266 wait_event(m2m_ctx
->finished
,
267 !(m2m_ctx
->job_flags
& TRANS_RUNNING
));
268 } else if (m2m_ctx
->job_flags
& TRANS_QUEUED
) {
269 list_del(&m2m_ctx
->queue
);
270 m2m_ctx
->job_flags
&= ~(TRANS_QUEUED
| TRANS_RUNNING
);
271 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
272 dprintk("m2m_ctx: %p had been on queue and was removed\n",
275 /* Do nothing, was not on queue/running */
276 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
280 void v4l2_m2m_job_finish(struct v4l2_m2m_dev
*m2m_dev
,
281 struct v4l2_m2m_ctx
*m2m_ctx
)
285 spin_lock_irqsave(&m2m_dev
->job_spinlock
, flags
);
286 if (!m2m_dev
->curr_ctx
|| m2m_dev
->curr_ctx
!= m2m_ctx
) {
287 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
288 dprintk("Called by an instance not currently running\n");
292 list_del(&m2m_dev
->curr_ctx
->queue
);
293 m2m_dev
->curr_ctx
->job_flags
&= ~(TRANS_QUEUED
| TRANS_RUNNING
);
294 wake_up(&m2m_dev
->curr_ctx
->finished
);
295 m2m_dev
->curr_ctx
= NULL
;
297 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
299 /* This instance might have more buffers ready, but since we do not
300 * allow more than one job on the job_queue per instance, each has
301 * to be scheduled separately after the previous one finishes. */
302 v4l2_m2m_try_schedule(m2m_ctx
);
303 v4l2_m2m_try_run(m2m_dev
);
305 EXPORT_SYMBOL(v4l2_m2m_job_finish
);
307 int v4l2_m2m_reqbufs(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
308 struct v4l2_requestbuffers
*reqbufs
)
310 struct vb2_queue
*vq
;
313 vq
= v4l2_m2m_get_vq(m2m_ctx
, reqbufs
->type
);
314 ret
= vb2_reqbufs(vq
, reqbufs
);
315 /* If count == 0, then the owner has released all buffers and he
316 is no longer owner of the queue. Otherwise we have an owner. */
318 vq
->owner
= reqbufs
->count
? file
->private_data
: NULL
;
322 EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs
);
324 int v4l2_m2m_querybuf(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
325 struct v4l2_buffer
*buf
)
327 struct vb2_queue
*vq
;
331 vq
= v4l2_m2m_get_vq(m2m_ctx
, buf
->type
);
332 ret
= vb2_querybuf(vq
, buf
);
334 /* Adjust MMAP memory offsets for the CAPTURE queue */
335 if (buf
->memory
== V4L2_MEMORY_MMAP
&& !V4L2_TYPE_IS_OUTPUT(vq
->type
)) {
336 if (V4L2_TYPE_IS_MULTIPLANAR(vq
->type
)) {
337 for (i
= 0; i
< buf
->length
; ++i
)
338 buf
->m
.planes
[i
].m
.mem_offset
339 += DST_QUEUE_OFF_BASE
;
341 buf
->m
.offset
+= DST_QUEUE_OFF_BASE
;
347 EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf
);
349 int v4l2_m2m_qbuf(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
350 struct v4l2_buffer
*buf
)
352 struct vb2_queue
*vq
;
355 vq
= v4l2_m2m_get_vq(m2m_ctx
, buf
->type
);
356 ret
= vb2_qbuf(vq
, buf
);
358 v4l2_m2m_try_schedule(m2m_ctx
);
362 EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf
);
364 int v4l2_m2m_dqbuf(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
365 struct v4l2_buffer
*buf
)
367 struct vb2_queue
*vq
;
369 vq
= v4l2_m2m_get_vq(m2m_ctx
, buf
->type
);
370 return vb2_dqbuf(vq
, buf
, file
->f_flags
& O_NONBLOCK
);
372 EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf
);
374 int v4l2_m2m_prepare_buf(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
375 struct v4l2_buffer
*buf
)
377 struct vb2_queue
*vq
;
380 vq
= v4l2_m2m_get_vq(m2m_ctx
, buf
->type
);
381 ret
= vb2_prepare_buf(vq
, buf
);
383 v4l2_m2m_try_schedule(m2m_ctx
);
387 EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf
);
389 int v4l2_m2m_create_bufs(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
390 struct v4l2_create_buffers
*create
)
392 struct vb2_queue
*vq
;
394 vq
= v4l2_m2m_get_vq(m2m_ctx
, create
->format
.type
);
395 return vb2_create_bufs(vq
, create
);
397 EXPORT_SYMBOL_GPL(v4l2_m2m_create_bufs
);
399 int v4l2_m2m_expbuf(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
400 struct v4l2_exportbuffer
*eb
)
402 struct vb2_queue
*vq
;
404 vq
= v4l2_m2m_get_vq(m2m_ctx
, eb
->type
);
405 return vb2_expbuf(vq
, eb
);
407 EXPORT_SYMBOL_GPL(v4l2_m2m_expbuf
);
409 int v4l2_m2m_streamon(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
410 enum v4l2_buf_type type
)
412 struct vb2_queue
*vq
;
415 vq
= v4l2_m2m_get_vq(m2m_ctx
, type
);
416 ret
= vb2_streamon(vq
, type
);
418 v4l2_m2m_try_schedule(m2m_ctx
);
422 EXPORT_SYMBOL_GPL(v4l2_m2m_streamon
);
424 int v4l2_m2m_streamoff(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
425 enum v4l2_buf_type type
)
427 struct v4l2_m2m_dev
*m2m_dev
;
428 struct v4l2_m2m_queue_ctx
*q_ctx
;
429 unsigned long flags_job
, flags
;
432 /* wait until the current context is dequeued from job_queue */
433 v4l2_m2m_cancel_job(m2m_ctx
);
435 q_ctx
= get_queue_ctx(m2m_ctx
, type
);
436 ret
= vb2_streamoff(&q_ctx
->q
, type
);
440 m2m_dev
= m2m_ctx
->m2m_dev
;
441 spin_lock_irqsave(&m2m_dev
->job_spinlock
, flags_job
);
442 /* We should not be scheduled anymore, since we're dropping a queue. */
443 if (m2m_ctx
->job_flags
& TRANS_QUEUED
)
444 list_del(&m2m_ctx
->queue
);
445 m2m_ctx
->job_flags
= 0;
447 spin_lock_irqsave(&q_ctx
->rdy_spinlock
, flags
);
448 /* Drop queue, since streamoff returns device to the same state as after
449 * calling reqbufs. */
450 INIT_LIST_HEAD(&q_ctx
->rdy_queue
);
452 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
454 if (m2m_dev
->curr_ctx
== m2m_ctx
) {
455 m2m_dev
->curr_ctx
= NULL
;
456 wake_up(&m2m_ctx
->finished
);
458 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags_job
);
462 EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff
);
464 unsigned int v4l2_m2m_poll(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
465 struct poll_table_struct
*wait
)
467 struct video_device
*vfd
= video_devdata(file
);
468 unsigned long req_events
= poll_requested_events(wait
);
469 struct vb2_queue
*src_q
, *dst_q
;
470 struct vb2_buffer
*src_vb
= NULL
, *dst_vb
= NULL
;
474 if (test_bit(V4L2_FL_USES_V4L2_FH
, &vfd
->flags
)) {
475 struct v4l2_fh
*fh
= file
->private_data
;
477 if (v4l2_event_pending(fh
))
479 else if (req_events
& POLLPRI
)
480 poll_wait(file
, &fh
->wait
, wait
);
481 if (!(req_events
& (POLLOUT
| POLLWRNORM
| POLLIN
| POLLRDNORM
)))
485 src_q
= v4l2_m2m_get_src_vq(m2m_ctx
);
486 dst_q
= v4l2_m2m_get_dst_vq(m2m_ctx
);
489 * There has to be at least one buffer queued on each queued_list, which
490 * means either in driver already or waiting for driver to claim it
491 * and start processing.
493 if ((!src_q
->streaming
|| list_empty(&src_q
->queued_list
))
494 && (!dst_q
->streaming
|| list_empty(&dst_q
->queued_list
))) {
499 spin_lock_irqsave(&src_q
->done_lock
, flags
);
500 if (list_empty(&src_q
->done_list
))
501 poll_wait(file
, &src_q
->done_wq
, wait
);
502 spin_unlock_irqrestore(&src_q
->done_lock
, flags
);
504 spin_lock_irqsave(&dst_q
->done_lock
, flags
);
505 if (list_empty(&dst_q
->done_list
)) {
507 * If the last buffer was dequeued from the capture queue,
508 * return immediately. DQBUF will return -EPIPE.
510 if (dst_q
->last_buffer_dequeued
) {
511 spin_unlock_irqrestore(&dst_q
->done_lock
, flags
);
512 return rc
| POLLIN
| POLLRDNORM
;
515 poll_wait(file
, &dst_q
->done_wq
, wait
);
517 spin_unlock_irqrestore(&dst_q
->done_lock
, flags
);
519 spin_lock_irqsave(&src_q
->done_lock
, flags
);
520 if (!list_empty(&src_q
->done_list
))
521 src_vb
= list_first_entry(&src_q
->done_list
, struct vb2_buffer
,
523 if (src_vb
&& (src_vb
->state
== VB2_BUF_STATE_DONE
524 || src_vb
->state
== VB2_BUF_STATE_ERROR
))
525 rc
|= POLLOUT
| POLLWRNORM
;
526 spin_unlock_irqrestore(&src_q
->done_lock
, flags
);
528 spin_lock_irqsave(&dst_q
->done_lock
, flags
);
529 if (!list_empty(&dst_q
->done_list
))
530 dst_vb
= list_first_entry(&dst_q
->done_list
, struct vb2_buffer
,
532 if (dst_vb
&& (dst_vb
->state
== VB2_BUF_STATE_DONE
533 || dst_vb
->state
== VB2_BUF_STATE_ERROR
))
534 rc
|= POLLIN
| POLLRDNORM
;
535 spin_unlock_irqrestore(&dst_q
->done_lock
, flags
);
540 EXPORT_SYMBOL_GPL(v4l2_m2m_poll
);
542 int v4l2_m2m_mmap(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
543 struct vm_area_struct
*vma
)
545 unsigned long offset
= vma
->vm_pgoff
<< PAGE_SHIFT
;
546 struct vb2_queue
*vq
;
548 if (offset
< DST_QUEUE_OFF_BASE
) {
549 vq
= v4l2_m2m_get_src_vq(m2m_ctx
);
551 vq
= v4l2_m2m_get_dst_vq(m2m_ctx
);
552 vma
->vm_pgoff
-= (DST_QUEUE_OFF_BASE
>> PAGE_SHIFT
);
555 return vb2_mmap(vq
, vma
);
557 EXPORT_SYMBOL(v4l2_m2m_mmap
);
559 struct v4l2_m2m_dev
*v4l2_m2m_init(const struct v4l2_m2m_ops
*m2m_ops
)
561 struct v4l2_m2m_dev
*m2m_dev
;
563 if (!m2m_ops
|| WARN_ON(!m2m_ops
->device_run
) ||
564 WARN_ON(!m2m_ops
->job_abort
))
565 return ERR_PTR(-EINVAL
);
567 m2m_dev
= kzalloc(sizeof *m2m_dev
, GFP_KERNEL
);
569 return ERR_PTR(-ENOMEM
);
571 m2m_dev
->curr_ctx
= NULL
;
572 m2m_dev
->m2m_ops
= m2m_ops
;
573 INIT_LIST_HEAD(&m2m_dev
->job_queue
);
574 spin_lock_init(&m2m_dev
->job_spinlock
);
578 EXPORT_SYMBOL_GPL(v4l2_m2m_init
);
580 void v4l2_m2m_release(struct v4l2_m2m_dev
*m2m_dev
)
584 EXPORT_SYMBOL_GPL(v4l2_m2m_release
);
586 struct v4l2_m2m_ctx
*v4l2_m2m_ctx_init(struct v4l2_m2m_dev
*m2m_dev
,
588 int (*queue_init
)(void *priv
, struct vb2_queue
*src_vq
, struct vb2_queue
*dst_vq
))
590 struct v4l2_m2m_ctx
*m2m_ctx
;
591 struct v4l2_m2m_queue_ctx
*out_q_ctx
, *cap_q_ctx
;
594 m2m_ctx
= kzalloc(sizeof *m2m_ctx
, GFP_KERNEL
);
596 return ERR_PTR(-ENOMEM
);
598 m2m_ctx
->priv
= drv_priv
;
599 m2m_ctx
->m2m_dev
= m2m_dev
;
600 init_waitqueue_head(&m2m_ctx
->finished
);
602 out_q_ctx
= &m2m_ctx
->out_q_ctx
;
603 cap_q_ctx
= &m2m_ctx
->cap_q_ctx
;
605 INIT_LIST_HEAD(&out_q_ctx
->rdy_queue
);
606 INIT_LIST_HEAD(&cap_q_ctx
->rdy_queue
);
607 spin_lock_init(&out_q_ctx
->rdy_spinlock
);
608 spin_lock_init(&cap_q_ctx
->rdy_spinlock
);
610 INIT_LIST_HEAD(&m2m_ctx
->queue
);
612 ret
= queue_init(drv_priv
, &out_q_ctx
->q
, &cap_q_ctx
->q
);
617 * If both queues use same mutex assign it as the common buffer
618 * queues lock to the m2m context. This lock is used in the
619 * v4l2_m2m_ioctl_* helpers.
621 if (out_q_ctx
->q
.lock
== cap_q_ctx
->q
.lock
)
622 m2m_ctx
->q_lock
= out_q_ctx
->q
.lock
;
629 EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init
);
631 void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx
*m2m_ctx
)
633 /* wait until the current context is dequeued from job_queue */
634 v4l2_m2m_cancel_job(m2m_ctx
);
636 vb2_queue_release(&m2m_ctx
->cap_q_ctx
.q
);
637 vb2_queue_release(&m2m_ctx
->out_q_ctx
.q
);
641 EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release
);
643 void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx
*m2m_ctx
,
644 struct vb2_v4l2_buffer
*vbuf
)
646 struct v4l2_m2m_buffer
*b
= container_of(vbuf
,
647 struct v4l2_m2m_buffer
, vb
);
648 struct v4l2_m2m_queue_ctx
*q_ctx
;
651 q_ctx
= get_queue_ctx(m2m_ctx
, vbuf
->vb2_buf
.vb2_queue
->type
);
655 spin_lock_irqsave(&q_ctx
->rdy_spinlock
, flags
);
656 list_add_tail(&b
->list
, &q_ctx
->rdy_queue
);
658 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
660 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue
);
662 /* Videobuf2 ioctl helpers */
664 int v4l2_m2m_ioctl_reqbufs(struct file
*file
, void *priv
,
665 struct v4l2_requestbuffers
*rb
)
667 struct v4l2_fh
*fh
= file
->private_data
;
669 return v4l2_m2m_reqbufs(file
, fh
->m2m_ctx
, rb
);
671 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_reqbufs
);
673 int v4l2_m2m_ioctl_create_bufs(struct file
*file
, void *priv
,
674 struct v4l2_create_buffers
*create
)
676 struct v4l2_fh
*fh
= file
->private_data
;
678 return v4l2_m2m_create_bufs(file
, fh
->m2m_ctx
, create
);
680 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_create_bufs
);
682 int v4l2_m2m_ioctl_querybuf(struct file
*file
, void *priv
,
683 struct v4l2_buffer
*buf
)
685 struct v4l2_fh
*fh
= file
->private_data
;
687 return v4l2_m2m_querybuf(file
, fh
->m2m_ctx
, buf
);
689 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_querybuf
);
691 int v4l2_m2m_ioctl_qbuf(struct file
*file
, void *priv
,
692 struct v4l2_buffer
*buf
)
694 struct v4l2_fh
*fh
= file
->private_data
;
696 return v4l2_m2m_qbuf(file
, fh
->m2m_ctx
, buf
);
698 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_qbuf
);
700 int v4l2_m2m_ioctl_dqbuf(struct file
*file
, void *priv
,
701 struct v4l2_buffer
*buf
)
703 struct v4l2_fh
*fh
= file
->private_data
;
705 return v4l2_m2m_dqbuf(file
, fh
->m2m_ctx
, buf
);
707 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_dqbuf
);
709 int v4l2_m2m_ioctl_prepare_buf(struct file
*file
, void *priv
,
710 struct v4l2_buffer
*buf
)
712 struct v4l2_fh
*fh
= file
->private_data
;
714 return v4l2_m2m_prepare_buf(file
, fh
->m2m_ctx
, buf
);
716 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_prepare_buf
);
718 int v4l2_m2m_ioctl_expbuf(struct file
*file
, void *priv
,
719 struct v4l2_exportbuffer
*eb
)
721 struct v4l2_fh
*fh
= file
->private_data
;
723 return v4l2_m2m_expbuf(file
, fh
->m2m_ctx
, eb
);
725 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_expbuf
);
727 int v4l2_m2m_ioctl_streamon(struct file
*file
, void *priv
,
728 enum v4l2_buf_type type
)
730 struct v4l2_fh
*fh
= file
->private_data
;
732 return v4l2_m2m_streamon(file
, fh
->m2m_ctx
, type
);
734 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamon
);
736 int v4l2_m2m_ioctl_streamoff(struct file
*file
, void *priv
,
737 enum v4l2_buf_type type
)
739 struct v4l2_fh
*fh
= file
->private_data
;
741 return v4l2_m2m_streamoff(file
, fh
->m2m_ctx
, type
);
743 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamoff
);
746 * v4l2_file_operations helpers. It is assumed here same lock is used
747 * for the output and the capture buffer queue.
750 int v4l2_m2m_fop_mmap(struct file
*file
, struct vm_area_struct
*vma
)
752 struct v4l2_fh
*fh
= file
->private_data
;
754 return v4l2_m2m_mmap(file
, fh
->m2m_ctx
, vma
);
756 EXPORT_SYMBOL_GPL(v4l2_m2m_fop_mmap
);
758 unsigned int v4l2_m2m_fop_poll(struct file
*file
, poll_table
*wait
)
760 struct v4l2_fh
*fh
= file
->private_data
;
761 struct v4l2_m2m_ctx
*m2m_ctx
= fh
->m2m_ctx
;
765 mutex_lock(m2m_ctx
->q_lock
);
767 ret
= v4l2_m2m_poll(file
, m2m_ctx
, wait
);
770 mutex_unlock(m2m_ctx
->q_lock
);
774 EXPORT_SYMBOL_GPL(v4l2_m2m_fop_poll
);