2 * Memory-to-memory device framework for Video for Linux 2 and videobuf.
4 * Helper functions for devices that use videobuf buffers for both their
5 * source and destination.
7 * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
8 * Pawel Osciak, <pawel@osciak.com>
9 * Marek Szyprowski, <m.szyprowski@samsung.com>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
16 #include <linux/module.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
20 #include <media/videobuf2-core.h>
21 #include <media/v4l2-mem2mem.h>
22 #include <media/v4l2-dev.h>
23 #include <media/v4l2-fh.h>
24 #include <media/v4l2-event.h>
26 MODULE_DESCRIPTION("Mem to mem device framework for videobuf");
27 MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>");
28 MODULE_LICENSE("GPL");
31 module_param(debug
, bool, 0644);
33 #define dprintk(fmt, arg...) \
36 printk(KERN_DEBUG "%s: " fmt, __func__, ## arg);\
40 /* Instance is already queued on the job_queue */
41 #define TRANS_QUEUED (1 << 0)
42 /* Instance is currently running in hardware */
43 #define TRANS_RUNNING (1 << 1)
44 /* Instance is currently aborting */
45 #define TRANS_ABORT (1 << 2)
48 /* Offset base for buffers on the destination queue - used to distinguish
49 * between source and destination buffers when mmapping - they receive the same
50 * offsets but for different queues */
51 #define DST_QUEUE_OFF_BASE (1 << 30)
55 * struct v4l2_m2m_dev - per-device context
56 * @curr_ctx: currently running instance
57 * @job_queue: instances queued to run
58 * @job_spinlock: protects job_queue
59 * @m2m_ops: driver callbacks
62 struct v4l2_m2m_ctx
*curr_ctx
;
64 struct list_head job_queue
;
65 spinlock_t job_spinlock
;
67 const struct v4l2_m2m_ops
*m2m_ops
;
70 static struct v4l2_m2m_queue_ctx
*get_queue_ctx(struct v4l2_m2m_ctx
*m2m_ctx
,
71 enum v4l2_buf_type type
)
73 if (V4L2_TYPE_IS_OUTPUT(type
))
74 return &m2m_ctx
->out_q_ctx
;
76 return &m2m_ctx
->cap_q_ctx
;
80 * v4l2_m2m_get_vq() - return vb2_queue for the given type
82 struct vb2_queue
*v4l2_m2m_get_vq(struct v4l2_m2m_ctx
*m2m_ctx
,
83 enum v4l2_buf_type type
)
85 struct v4l2_m2m_queue_ctx
*q_ctx
;
87 q_ctx
= get_queue_ctx(m2m_ctx
, type
);
93 EXPORT_SYMBOL(v4l2_m2m_get_vq
);
96 * v4l2_m2m_next_buf() - return next buffer from the list of ready buffers
98 void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx
*q_ctx
)
100 struct v4l2_m2m_buffer
*b
= NULL
;
103 spin_lock_irqsave(&q_ctx
->rdy_spinlock
, flags
);
105 if (list_empty(&q_ctx
->rdy_queue
)) {
106 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
110 b
= list_first_entry(&q_ctx
->rdy_queue
, struct v4l2_m2m_buffer
, list
);
111 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
114 EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf
);
117 * v4l2_m2m_buf_remove() - take off a buffer from the list of ready buffers and
120 void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx
*q_ctx
)
122 struct v4l2_m2m_buffer
*b
= NULL
;
125 spin_lock_irqsave(&q_ctx
->rdy_spinlock
, flags
);
126 if (list_empty(&q_ctx
->rdy_queue
)) {
127 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
130 b
= list_first_entry(&q_ctx
->rdy_queue
, struct v4l2_m2m_buffer
, list
);
133 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
137 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove
);
140 * Scheduling handlers
144 * v4l2_m2m_get_curr_priv() - return driver private data for the currently
145 * running instance or NULL if no instance is running
147 void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev
*m2m_dev
)
152 spin_lock_irqsave(&m2m_dev
->job_spinlock
, flags
);
153 if (m2m_dev
->curr_ctx
)
154 ret
= m2m_dev
->curr_ctx
->priv
;
155 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
159 EXPORT_SYMBOL(v4l2_m2m_get_curr_priv
);
162 * v4l2_m2m_try_run() - select next job to perform and run it if possible
164 * Get next transaction (if present) from the waiting jobs list and run it.
166 static void v4l2_m2m_try_run(struct v4l2_m2m_dev
*m2m_dev
)
170 spin_lock_irqsave(&m2m_dev
->job_spinlock
, flags
);
171 if (NULL
!= m2m_dev
->curr_ctx
) {
172 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
173 dprintk("Another instance is running, won't run now\n");
177 if (list_empty(&m2m_dev
->job_queue
)) {
178 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
179 dprintk("No job pending\n");
183 m2m_dev
->curr_ctx
= list_first_entry(&m2m_dev
->job_queue
,
184 struct v4l2_m2m_ctx
, queue
);
185 m2m_dev
->curr_ctx
->job_flags
|= TRANS_RUNNING
;
186 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
188 m2m_dev
->m2m_ops
->device_run(m2m_dev
->curr_ctx
->priv
);
192 * v4l2_m2m_try_schedule() - check whether an instance is ready to be added to
193 * the pending job queue and add it if so.
194 * @m2m_ctx: m2m context assigned to the instance to be checked
196 * There are three basic requirements an instance has to meet to be able to run:
197 * 1) at least one source buffer has to be queued,
198 * 2) at least one destination buffer has to be queued,
199 * 3) streaming has to be on.
201 * If a queue is buffered (for example a decoder hardware ringbuffer that has
202 * to be drained before doing streamoff), allow scheduling without v4l2 buffers
205 * There may also be additional, custom requirements. In such case the driver
206 * should supply a custom callback (job_ready in v4l2_m2m_ops) that should
207 * return 1 if the instance is ready.
208 * An example of the above could be an instance that requires more than one
209 * src/dst buffer per transaction.
211 static void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx
*m2m_ctx
)
213 struct v4l2_m2m_dev
*m2m_dev
;
214 unsigned long flags_job
, flags_out
, flags_cap
;
216 m2m_dev
= m2m_ctx
->m2m_dev
;
217 dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx
);
219 if (!m2m_ctx
->out_q_ctx
.q
.streaming
220 || !m2m_ctx
->cap_q_ctx
.q
.streaming
) {
221 dprintk("Streaming needs to be on for both queues\n");
225 spin_lock_irqsave(&m2m_dev
->job_spinlock
, flags_job
);
227 /* If the context is aborted then don't schedule it */
228 if (m2m_ctx
->job_flags
& TRANS_ABORT
) {
229 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags_job
);
230 dprintk("Aborted context\n");
234 if (m2m_ctx
->job_flags
& TRANS_QUEUED
) {
235 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags_job
);
236 dprintk("On job queue already\n");
240 spin_lock_irqsave(&m2m_ctx
->out_q_ctx
.rdy_spinlock
, flags_out
);
241 if (list_empty(&m2m_ctx
->out_q_ctx
.rdy_queue
)
242 && !m2m_ctx
->out_q_ctx
.buffered
) {
243 spin_unlock_irqrestore(&m2m_ctx
->out_q_ctx
.rdy_spinlock
,
245 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags_job
);
246 dprintk("No input buffers available\n");
249 spin_lock_irqsave(&m2m_ctx
->cap_q_ctx
.rdy_spinlock
, flags_cap
);
250 if (list_empty(&m2m_ctx
->cap_q_ctx
.rdy_queue
)
251 && !m2m_ctx
->cap_q_ctx
.buffered
) {
252 spin_unlock_irqrestore(&m2m_ctx
->cap_q_ctx
.rdy_spinlock
,
254 spin_unlock_irqrestore(&m2m_ctx
->out_q_ctx
.rdy_spinlock
,
256 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags_job
);
257 dprintk("No output buffers available\n");
260 spin_unlock_irqrestore(&m2m_ctx
->cap_q_ctx
.rdy_spinlock
, flags_cap
);
261 spin_unlock_irqrestore(&m2m_ctx
->out_q_ctx
.rdy_spinlock
, flags_out
);
263 if (m2m_dev
->m2m_ops
->job_ready
264 && (!m2m_dev
->m2m_ops
->job_ready(m2m_ctx
->priv
))) {
265 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags_job
);
266 dprintk("Driver not ready\n");
270 list_add_tail(&m2m_ctx
->queue
, &m2m_dev
->job_queue
);
271 m2m_ctx
->job_flags
|= TRANS_QUEUED
;
273 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags_job
);
275 v4l2_m2m_try_run(m2m_dev
);
279 * v4l2_m2m_cancel_job() - cancel pending jobs for the context
281 * In case of streamoff or release called on any context,
282 * 1] If the context is currently running, then abort job will be called
283 * 2] If the context is queued, then the context will be removed from
286 static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx
*m2m_ctx
)
288 struct v4l2_m2m_dev
*m2m_dev
;
291 m2m_dev
= m2m_ctx
->m2m_dev
;
292 spin_lock_irqsave(&m2m_dev
->job_spinlock
, flags
);
294 m2m_ctx
->job_flags
|= TRANS_ABORT
;
295 if (m2m_ctx
->job_flags
& TRANS_RUNNING
) {
296 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
297 m2m_dev
->m2m_ops
->job_abort(m2m_ctx
->priv
);
298 dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx
);
299 wait_event(m2m_ctx
->finished
,
300 !(m2m_ctx
->job_flags
& TRANS_RUNNING
));
301 } else if (m2m_ctx
->job_flags
& TRANS_QUEUED
) {
302 list_del(&m2m_ctx
->queue
);
303 m2m_ctx
->job_flags
&= ~(TRANS_QUEUED
| TRANS_RUNNING
);
304 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
305 dprintk("m2m_ctx: %p had been on queue and was removed\n",
308 /* Do nothing, was not on queue/running */
309 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
314 * v4l2_m2m_job_finish() - inform the framework that a job has been finished
315 * and have it clean up
317 * Called by a driver to yield back the device after it has finished with it.
318 * Should be called as soon as possible after reaching a state which allows
319 * other instances to take control of the device.
321 * This function has to be called only after device_run() callback has been
322 * called on the driver. To prevent recursion, it should not be called directly
323 * from the device_run() callback though.
325 void v4l2_m2m_job_finish(struct v4l2_m2m_dev
*m2m_dev
,
326 struct v4l2_m2m_ctx
*m2m_ctx
)
330 spin_lock_irqsave(&m2m_dev
->job_spinlock
, flags
);
331 if (!m2m_dev
->curr_ctx
|| m2m_dev
->curr_ctx
!= m2m_ctx
) {
332 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
333 dprintk("Called by an instance not currently running\n");
337 list_del(&m2m_dev
->curr_ctx
->queue
);
338 m2m_dev
->curr_ctx
->job_flags
&= ~(TRANS_QUEUED
| TRANS_RUNNING
);
339 wake_up(&m2m_dev
->curr_ctx
->finished
);
340 m2m_dev
->curr_ctx
= NULL
;
342 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
344 /* This instance might have more buffers ready, but since we do not
345 * allow more than one job on the job_queue per instance, each has
346 * to be scheduled separately after the previous one finishes. */
347 v4l2_m2m_try_schedule(m2m_ctx
);
348 v4l2_m2m_try_run(m2m_dev
);
350 EXPORT_SYMBOL(v4l2_m2m_job_finish
);
353 * v4l2_m2m_reqbufs() - multi-queue-aware REQBUFS multiplexer
355 int v4l2_m2m_reqbufs(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
356 struct v4l2_requestbuffers
*reqbufs
)
358 struct vb2_queue
*vq
;
360 vq
= v4l2_m2m_get_vq(m2m_ctx
, reqbufs
->type
);
361 return vb2_reqbufs(vq
, reqbufs
);
363 EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs
);
366 * v4l2_m2m_querybuf() - multi-queue-aware QUERYBUF multiplexer
368 * See v4l2_m2m_mmap() documentation for details.
370 int v4l2_m2m_querybuf(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
371 struct v4l2_buffer
*buf
)
373 struct vb2_queue
*vq
;
377 vq
= v4l2_m2m_get_vq(m2m_ctx
, buf
->type
);
378 ret
= vb2_querybuf(vq
, buf
);
380 /* Adjust MMAP memory offsets for the CAPTURE queue */
381 if (buf
->memory
== V4L2_MEMORY_MMAP
&& !V4L2_TYPE_IS_OUTPUT(vq
->type
)) {
382 if (V4L2_TYPE_IS_MULTIPLANAR(vq
->type
)) {
383 for (i
= 0; i
< buf
->length
; ++i
)
384 buf
->m
.planes
[i
].m
.mem_offset
385 += DST_QUEUE_OFF_BASE
;
387 buf
->m
.offset
+= DST_QUEUE_OFF_BASE
;
393 EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf
);
396 * v4l2_m2m_qbuf() - enqueue a source or destination buffer, depending on
399 int v4l2_m2m_qbuf(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
400 struct v4l2_buffer
*buf
)
402 struct vb2_queue
*vq
;
405 vq
= v4l2_m2m_get_vq(m2m_ctx
, buf
->type
);
406 ret
= vb2_qbuf(vq
, buf
);
408 v4l2_m2m_try_schedule(m2m_ctx
);
412 EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf
);
415 * v4l2_m2m_dqbuf() - dequeue a source or destination buffer, depending on
418 int v4l2_m2m_dqbuf(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
419 struct v4l2_buffer
*buf
)
421 struct vb2_queue
*vq
;
423 vq
= v4l2_m2m_get_vq(m2m_ctx
, buf
->type
);
424 return vb2_dqbuf(vq
, buf
, file
->f_flags
& O_NONBLOCK
);
426 EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf
);
429 * v4l2_m2m_create_bufs() - create a source or destination buffer, depending
432 int v4l2_m2m_create_bufs(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
433 struct v4l2_create_buffers
*create
)
435 struct vb2_queue
*vq
;
437 vq
= v4l2_m2m_get_vq(m2m_ctx
, create
->format
.type
);
438 return vb2_create_bufs(vq
, create
);
440 EXPORT_SYMBOL_GPL(v4l2_m2m_create_bufs
);
443 * v4l2_m2m_expbuf() - export a source or destination buffer, depending on
446 int v4l2_m2m_expbuf(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
447 struct v4l2_exportbuffer
*eb
)
449 struct vb2_queue
*vq
;
451 vq
= v4l2_m2m_get_vq(m2m_ctx
, eb
->type
);
452 return vb2_expbuf(vq
, eb
);
454 EXPORT_SYMBOL_GPL(v4l2_m2m_expbuf
);
456 * v4l2_m2m_streamon() - turn on streaming for a video queue
458 int v4l2_m2m_streamon(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
459 enum v4l2_buf_type type
)
461 struct vb2_queue
*vq
;
464 vq
= v4l2_m2m_get_vq(m2m_ctx
, type
);
465 ret
= vb2_streamon(vq
, type
);
467 v4l2_m2m_try_schedule(m2m_ctx
);
471 EXPORT_SYMBOL_GPL(v4l2_m2m_streamon
);
474 * v4l2_m2m_streamoff() - turn off streaming for a video queue
476 int v4l2_m2m_streamoff(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
477 enum v4l2_buf_type type
)
479 struct v4l2_m2m_dev
*m2m_dev
;
480 struct v4l2_m2m_queue_ctx
*q_ctx
;
481 unsigned long flags_job
, flags
;
484 /* wait until the current context is dequeued from job_queue */
485 v4l2_m2m_cancel_job(m2m_ctx
);
487 q_ctx
= get_queue_ctx(m2m_ctx
, type
);
488 ret
= vb2_streamoff(&q_ctx
->q
, type
);
492 m2m_dev
= m2m_ctx
->m2m_dev
;
493 spin_lock_irqsave(&m2m_dev
->job_spinlock
, flags_job
);
494 /* We should not be scheduled anymore, since we're dropping a queue. */
495 if (m2m_ctx
->job_flags
& TRANS_QUEUED
)
496 list_del(&m2m_ctx
->queue
);
497 m2m_ctx
->job_flags
= 0;
499 spin_lock_irqsave(&q_ctx
->rdy_spinlock
, flags
);
500 /* Drop queue, since streamoff returns device to the same state as after
501 * calling reqbufs. */
502 INIT_LIST_HEAD(&q_ctx
->rdy_queue
);
504 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
506 if (m2m_dev
->curr_ctx
== m2m_ctx
) {
507 m2m_dev
->curr_ctx
= NULL
;
508 wake_up(&m2m_ctx
->finished
);
510 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags_job
);
514 EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff
);
517 * v4l2_m2m_poll() - poll replacement, for destination buffers only
519 * Call from the driver's poll() function. Will poll both queues. If a buffer
520 * is available to dequeue (with dqbuf) from the source queue, this will
521 * indicate that a non-blocking write can be performed, while read will be
522 * returned in case of the destination queue.
524 unsigned int v4l2_m2m_poll(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
525 struct poll_table_struct
*wait
)
527 struct video_device
*vfd
= video_devdata(file
);
528 unsigned long req_events
= poll_requested_events(wait
);
529 struct vb2_queue
*src_q
, *dst_q
;
530 struct vb2_buffer
*src_vb
= NULL
, *dst_vb
= NULL
;
534 if (test_bit(V4L2_FL_USES_V4L2_FH
, &vfd
->flags
)) {
535 struct v4l2_fh
*fh
= file
->private_data
;
537 if (v4l2_event_pending(fh
))
539 else if (req_events
& POLLPRI
)
540 poll_wait(file
, &fh
->wait
, wait
);
541 if (!(req_events
& (POLLOUT
| POLLWRNORM
| POLLIN
| POLLRDNORM
)))
545 src_q
= v4l2_m2m_get_src_vq(m2m_ctx
);
546 dst_q
= v4l2_m2m_get_dst_vq(m2m_ctx
);
549 * There has to be at least one buffer queued on each queued_list, which
550 * means either in driver already or waiting for driver to claim it
551 * and start processing.
553 if ((!src_q
->streaming
|| list_empty(&src_q
->queued_list
))
554 && (!dst_q
->streaming
|| list_empty(&dst_q
->queued_list
))) {
559 if (m2m_ctx
->m2m_dev
->m2m_ops
->unlock
)
560 m2m_ctx
->m2m_dev
->m2m_ops
->unlock(m2m_ctx
->priv
);
561 else if (m2m_ctx
->q_lock
)
562 mutex_unlock(m2m_ctx
->q_lock
);
564 if (list_empty(&src_q
->done_list
))
565 poll_wait(file
, &src_q
->done_wq
, wait
);
566 if (list_empty(&dst_q
->done_list
))
567 poll_wait(file
, &dst_q
->done_wq
, wait
);
569 if (m2m_ctx
->m2m_dev
->m2m_ops
->lock
)
570 m2m_ctx
->m2m_dev
->m2m_ops
->lock(m2m_ctx
->priv
);
571 else if (m2m_ctx
->q_lock
)
572 mutex_lock(m2m_ctx
->q_lock
);
574 spin_lock_irqsave(&src_q
->done_lock
, flags
);
575 if (!list_empty(&src_q
->done_list
))
576 src_vb
= list_first_entry(&src_q
->done_list
, struct vb2_buffer
,
578 if (src_vb
&& (src_vb
->state
== VB2_BUF_STATE_DONE
579 || src_vb
->state
== VB2_BUF_STATE_ERROR
))
580 rc
|= POLLOUT
| POLLWRNORM
;
581 spin_unlock_irqrestore(&src_q
->done_lock
, flags
);
583 spin_lock_irqsave(&dst_q
->done_lock
, flags
);
584 if (!list_empty(&dst_q
->done_list
))
585 dst_vb
= list_first_entry(&dst_q
->done_list
, struct vb2_buffer
,
587 if (dst_vb
&& (dst_vb
->state
== VB2_BUF_STATE_DONE
588 || dst_vb
->state
== VB2_BUF_STATE_ERROR
))
589 rc
|= POLLIN
| POLLRDNORM
;
590 spin_unlock_irqrestore(&dst_q
->done_lock
, flags
);
595 EXPORT_SYMBOL_GPL(v4l2_m2m_poll
);
598 * v4l2_m2m_mmap() - source and destination queues-aware mmap multiplexer
600 * Call from driver's mmap() function. Will handle mmap() for both queues
601 * seamlessly for videobuffer, which will receive normal per-queue offsets and
602 * proper videobuf queue pointers. The differentiation is made outside videobuf
603 * by adding a predefined offset to buffers from one of the queues and
604 * subtracting it before passing it back to videobuf. Only drivers (and
605 * thus applications) receive modified offsets.
607 int v4l2_m2m_mmap(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
608 struct vm_area_struct
*vma
)
610 unsigned long offset
= vma
->vm_pgoff
<< PAGE_SHIFT
;
611 struct vb2_queue
*vq
;
613 if (offset
< DST_QUEUE_OFF_BASE
) {
614 vq
= v4l2_m2m_get_src_vq(m2m_ctx
);
616 vq
= v4l2_m2m_get_dst_vq(m2m_ctx
);
617 vma
->vm_pgoff
-= (DST_QUEUE_OFF_BASE
>> PAGE_SHIFT
);
620 return vb2_mmap(vq
, vma
);
622 EXPORT_SYMBOL(v4l2_m2m_mmap
);
625 * v4l2_m2m_init() - initialize per-driver m2m data
627 * Usually called from driver's probe() function.
629 struct v4l2_m2m_dev
*v4l2_m2m_init(const struct v4l2_m2m_ops
*m2m_ops
)
631 struct v4l2_m2m_dev
*m2m_dev
;
633 if (!m2m_ops
|| WARN_ON(!m2m_ops
->device_run
) ||
634 WARN_ON(!m2m_ops
->job_abort
))
635 return ERR_PTR(-EINVAL
);
637 m2m_dev
= kzalloc(sizeof *m2m_dev
, GFP_KERNEL
);
639 return ERR_PTR(-ENOMEM
);
641 m2m_dev
->curr_ctx
= NULL
;
642 m2m_dev
->m2m_ops
= m2m_ops
;
643 INIT_LIST_HEAD(&m2m_dev
->job_queue
);
644 spin_lock_init(&m2m_dev
->job_spinlock
);
648 EXPORT_SYMBOL_GPL(v4l2_m2m_init
);
651 * v4l2_m2m_release() - cleans up and frees a m2m_dev structure
653 * Usually called from driver's remove() function.
655 void v4l2_m2m_release(struct v4l2_m2m_dev
*m2m_dev
)
659 EXPORT_SYMBOL_GPL(v4l2_m2m_release
);
662 * v4l2_m2m_ctx_init() - allocate and initialize a m2m context
663 * @priv - driver's instance private data
664 * @m2m_dev - a previously initialized m2m_dev struct
665 * @vq_init - a callback for queue type-specific initialization function to be
666 * used for initializing videobuf_queues
668 * Usually called from driver's open() function.
670 struct v4l2_m2m_ctx
*v4l2_m2m_ctx_init(struct v4l2_m2m_dev
*m2m_dev
,
672 int (*queue_init
)(void *priv
, struct vb2_queue
*src_vq
, struct vb2_queue
*dst_vq
))
674 struct v4l2_m2m_ctx
*m2m_ctx
;
675 struct v4l2_m2m_queue_ctx
*out_q_ctx
, *cap_q_ctx
;
678 m2m_ctx
= kzalloc(sizeof *m2m_ctx
, GFP_KERNEL
);
680 return ERR_PTR(-ENOMEM
);
682 m2m_ctx
->priv
= drv_priv
;
683 m2m_ctx
->m2m_dev
= m2m_dev
;
684 init_waitqueue_head(&m2m_ctx
->finished
);
686 out_q_ctx
= &m2m_ctx
->out_q_ctx
;
687 cap_q_ctx
= &m2m_ctx
->cap_q_ctx
;
689 INIT_LIST_HEAD(&out_q_ctx
->rdy_queue
);
690 INIT_LIST_HEAD(&cap_q_ctx
->rdy_queue
);
691 spin_lock_init(&out_q_ctx
->rdy_spinlock
);
692 spin_lock_init(&cap_q_ctx
->rdy_spinlock
);
694 INIT_LIST_HEAD(&m2m_ctx
->queue
);
696 ret
= queue_init(drv_priv
, &out_q_ctx
->q
, &cap_q_ctx
->q
);
701 * If both queues use same mutex assign it as the common buffer
702 * queues lock to the m2m context. This lock is used in the
703 * v4l2_m2m_ioctl_* helpers.
705 if (out_q_ctx
->q
.lock
== cap_q_ctx
->q
.lock
)
706 m2m_ctx
->q_lock
= out_q_ctx
->q
.lock
;
713 EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init
);
716 * v4l2_m2m_ctx_release() - release m2m context
718 * Usually called from driver's release() function.
720 void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx
*m2m_ctx
)
722 /* wait until the current context is dequeued from job_queue */
723 v4l2_m2m_cancel_job(m2m_ctx
);
725 vb2_queue_release(&m2m_ctx
->cap_q_ctx
.q
);
726 vb2_queue_release(&m2m_ctx
->out_q_ctx
.q
);
730 EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release
);
733 * v4l2_m2m_buf_queue() - add a buffer to the proper ready buffers list.
735 * Call from buf_queue(), videobuf_queue_ops callback.
737 void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx
*m2m_ctx
, struct vb2_buffer
*vb
)
739 struct v4l2_m2m_buffer
*b
= container_of(vb
, struct v4l2_m2m_buffer
, vb
);
740 struct v4l2_m2m_queue_ctx
*q_ctx
;
743 q_ctx
= get_queue_ctx(m2m_ctx
, vb
->vb2_queue
->type
);
747 spin_lock_irqsave(&q_ctx
->rdy_spinlock
, flags
);
748 list_add_tail(&b
->list
, &q_ctx
->rdy_queue
);
750 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
752 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue
);
754 /* Videobuf2 ioctl helpers */
756 int v4l2_m2m_ioctl_reqbufs(struct file
*file
, void *priv
,
757 struct v4l2_requestbuffers
*rb
)
759 struct v4l2_fh
*fh
= file
->private_data
;
761 return v4l2_m2m_reqbufs(file
, fh
->m2m_ctx
, rb
);
763 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_reqbufs
);
765 int v4l2_m2m_ioctl_create_bufs(struct file
*file
, void *priv
,
766 struct v4l2_create_buffers
*create
)
768 struct v4l2_fh
*fh
= file
->private_data
;
770 return v4l2_m2m_create_bufs(file
, fh
->m2m_ctx
, create
);
772 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_create_bufs
);
774 int v4l2_m2m_ioctl_querybuf(struct file
*file
, void *priv
,
775 struct v4l2_buffer
*buf
)
777 struct v4l2_fh
*fh
= file
->private_data
;
779 return v4l2_m2m_querybuf(file
, fh
->m2m_ctx
, buf
);
781 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_querybuf
);
783 int v4l2_m2m_ioctl_qbuf(struct file
*file
, void *priv
,
784 struct v4l2_buffer
*buf
)
786 struct v4l2_fh
*fh
= file
->private_data
;
788 return v4l2_m2m_qbuf(file
, fh
->m2m_ctx
, buf
);
790 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_qbuf
);
792 int v4l2_m2m_ioctl_dqbuf(struct file
*file
, void *priv
,
793 struct v4l2_buffer
*buf
)
795 struct v4l2_fh
*fh
= file
->private_data
;
797 return v4l2_m2m_dqbuf(file
, fh
->m2m_ctx
, buf
);
799 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_dqbuf
);
801 int v4l2_m2m_ioctl_expbuf(struct file
*file
, void *priv
,
802 struct v4l2_exportbuffer
*eb
)
804 struct v4l2_fh
*fh
= file
->private_data
;
806 return v4l2_m2m_expbuf(file
, fh
->m2m_ctx
, eb
);
808 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_expbuf
);
810 int v4l2_m2m_ioctl_streamon(struct file
*file
, void *priv
,
811 enum v4l2_buf_type type
)
813 struct v4l2_fh
*fh
= file
->private_data
;
815 return v4l2_m2m_streamon(file
, fh
->m2m_ctx
, type
);
817 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamon
);
819 int v4l2_m2m_ioctl_streamoff(struct file
*file
, void *priv
,
820 enum v4l2_buf_type type
)
822 struct v4l2_fh
*fh
= file
->private_data
;
824 return v4l2_m2m_streamoff(file
, fh
->m2m_ctx
, type
);
826 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamoff
);
829 * v4l2_file_operations helpers. It is assumed here same lock is used
830 * for the output and the capture buffer queue.
833 int v4l2_m2m_fop_mmap(struct file
*file
, struct vm_area_struct
*vma
)
835 struct v4l2_fh
*fh
= file
->private_data
;
836 struct v4l2_m2m_ctx
*m2m_ctx
= fh
->m2m_ctx
;
839 if (m2m_ctx
->q_lock
&& mutex_lock_interruptible(m2m_ctx
->q_lock
))
842 ret
= v4l2_m2m_mmap(file
, m2m_ctx
, vma
);
845 mutex_unlock(m2m_ctx
->q_lock
);
849 EXPORT_SYMBOL_GPL(v4l2_m2m_fop_mmap
);
851 unsigned int v4l2_m2m_fop_poll(struct file
*file
, poll_table
*wait
)
853 struct v4l2_fh
*fh
= file
->private_data
;
854 struct v4l2_m2m_ctx
*m2m_ctx
= fh
->m2m_ctx
;
858 mutex_lock(m2m_ctx
->q_lock
);
860 ret
= v4l2_m2m_poll(file
, m2m_ctx
, wait
);
863 mutex_unlock(m2m_ctx
->q_lock
);
867 EXPORT_SYMBOL_GPL(v4l2_m2m_fop_poll
);