2 * Memory-to-memory device framework for Video for Linux 2 and videobuf.
4 * Helper functions for devices that use videobuf buffers for both their
5 * source and destination.
7 * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
8 * Pawel Osciak, <pawel@osciak.com>
9 * Marek Szyprowski, <m.szyprowski@samsung.com>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
16 #include <linux/module.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
20 #include <media/videobuf2-v4l2.h>
21 #include <media/v4l2-mem2mem.h>
22 #include <media/v4l2-dev.h>
23 #include <media/v4l2-fh.h>
24 #include <media/v4l2-event.h>
26 MODULE_DESCRIPTION("Mem to mem device framework for videobuf");
27 MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>");
28 MODULE_LICENSE("GPL");
31 module_param(debug
, bool, 0644);
33 #define dprintk(fmt, arg...) \
36 printk(KERN_DEBUG "%s: " fmt, __func__, ## arg);\
40 /* Instance is already queued on the job_queue */
41 #define TRANS_QUEUED (1 << 0)
42 /* Instance is currently running in hardware */
43 #define TRANS_RUNNING (1 << 1)
44 /* Instance is currently aborting */
45 #define TRANS_ABORT (1 << 2)
48 /* Offset base for buffers on the destination queue - used to distinguish
49 * between source and destination buffers when mmapping - they receive the same
50 * offsets but for different queues */
51 #define DST_QUEUE_OFF_BASE (1 << 30)
55 * struct v4l2_m2m_dev - per-device context
56 * @curr_ctx: currently running instance
57 * @job_queue: instances queued to run
58 * @job_spinlock: protects job_queue
59 * @m2m_ops: driver callbacks
62 struct v4l2_m2m_ctx
*curr_ctx
;
64 struct list_head job_queue
;
65 spinlock_t job_spinlock
;
67 const struct v4l2_m2m_ops
*m2m_ops
;
70 static struct v4l2_m2m_queue_ctx
*get_queue_ctx(struct v4l2_m2m_ctx
*m2m_ctx
,
71 enum v4l2_buf_type type
)
73 if (V4L2_TYPE_IS_OUTPUT(type
))
74 return &m2m_ctx
->out_q_ctx
;
76 return &m2m_ctx
->cap_q_ctx
;
79 struct vb2_queue
*v4l2_m2m_get_vq(struct v4l2_m2m_ctx
*m2m_ctx
,
80 enum v4l2_buf_type type
)
82 struct v4l2_m2m_queue_ctx
*q_ctx
;
84 q_ctx
= get_queue_ctx(m2m_ctx
, type
);
90 EXPORT_SYMBOL(v4l2_m2m_get_vq
);
92 void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx
*q_ctx
)
94 struct v4l2_m2m_buffer
*b
;
97 spin_lock_irqsave(&q_ctx
->rdy_spinlock
, flags
);
99 if (list_empty(&q_ctx
->rdy_queue
)) {
100 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
104 b
= list_first_entry(&q_ctx
->rdy_queue
, struct v4l2_m2m_buffer
, list
);
105 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
108 EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf
);
110 void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx
*q_ctx
)
112 struct v4l2_m2m_buffer
*b
;
115 spin_lock_irqsave(&q_ctx
->rdy_spinlock
, flags
);
116 if (list_empty(&q_ctx
->rdy_queue
)) {
117 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
120 b
= list_first_entry(&q_ctx
->rdy_queue
, struct v4l2_m2m_buffer
, list
);
123 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
127 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove
);
129 void v4l2_m2m_buf_remove_by_buf(struct v4l2_m2m_queue_ctx
*q_ctx
,
130 struct vb2_v4l2_buffer
*vbuf
)
132 struct v4l2_m2m_buffer
*b
;
135 spin_lock_irqsave(&q_ctx
->rdy_spinlock
, flags
);
136 b
= container_of(vbuf
, struct v4l2_m2m_buffer
, vb
);
139 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
141 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_buf
);
143 struct vb2_v4l2_buffer
*
144 v4l2_m2m_buf_remove_by_idx(struct v4l2_m2m_queue_ctx
*q_ctx
, unsigned int idx
)
147 struct v4l2_m2m_buffer
*b
, *tmp
;
148 struct vb2_v4l2_buffer
*ret
= NULL
;
151 spin_lock_irqsave(&q_ctx
->rdy_spinlock
, flags
);
152 list_for_each_entry_safe(b
, tmp
, &q_ctx
->rdy_queue
, list
) {
153 if (b
->vb
.vb2_buf
.index
== idx
) {
160 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
164 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_idx
);
167 * Scheduling handlers
170 void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev
*m2m_dev
)
175 spin_lock_irqsave(&m2m_dev
->job_spinlock
, flags
);
176 if (m2m_dev
->curr_ctx
)
177 ret
= m2m_dev
->curr_ctx
->priv
;
178 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
182 EXPORT_SYMBOL(v4l2_m2m_get_curr_priv
);
185 * v4l2_m2m_try_run() - select next job to perform and run it if possible
186 * @m2m_dev: per-device context
188 * Get next transaction (if present) from the waiting jobs list and run it.
190 static void v4l2_m2m_try_run(struct v4l2_m2m_dev
*m2m_dev
)
194 spin_lock_irqsave(&m2m_dev
->job_spinlock
, flags
);
195 if (NULL
!= m2m_dev
->curr_ctx
) {
196 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
197 dprintk("Another instance is running, won't run now\n");
201 if (list_empty(&m2m_dev
->job_queue
)) {
202 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
203 dprintk("No job pending\n");
207 m2m_dev
->curr_ctx
= list_first_entry(&m2m_dev
->job_queue
,
208 struct v4l2_m2m_ctx
, queue
);
209 m2m_dev
->curr_ctx
->job_flags
|= TRANS_RUNNING
;
210 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
212 m2m_dev
->m2m_ops
->device_run(m2m_dev
->curr_ctx
->priv
);
215 void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx
*m2m_ctx
)
217 struct v4l2_m2m_dev
*m2m_dev
;
218 unsigned long flags_job
, flags_out
, flags_cap
;
220 m2m_dev
= m2m_ctx
->m2m_dev
;
221 dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx
);
223 if (!m2m_ctx
->out_q_ctx
.q
.streaming
224 || !m2m_ctx
->cap_q_ctx
.q
.streaming
) {
225 dprintk("Streaming needs to be on for both queues\n");
229 spin_lock_irqsave(&m2m_dev
->job_spinlock
, flags_job
);
231 /* If the context is aborted then don't schedule it */
232 if (m2m_ctx
->job_flags
& TRANS_ABORT
) {
233 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags_job
);
234 dprintk("Aborted context\n");
238 if (m2m_ctx
->job_flags
& TRANS_QUEUED
) {
239 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags_job
);
240 dprintk("On job queue already\n");
244 spin_lock_irqsave(&m2m_ctx
->out_q_ctx
.rdy_spinlock
, flags_out
);
245 if (list_empty(&m2m_ctx
->out_q_ctx
.rdy_queue
)
246 && !m2m_ctx
->out_q_ctx
.buffered
) {
247 spin_unlock_irqrestore(&m2m_ctx
->out_q_ctx
.rdy_spinlock
,
249 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags_job
);
250 dprintk("No input buffers available\n");
253 spin_lock_irqsave(&m2m_ctx
->cap_q_ctx
.rdy_spinlock
, flags_cap
);
254 if (list_empty(&m2m_ctx
->cap_q_ctx
.rdy_queue
)
255 && !m2m_ctx
->cap_q_ctx
.buffered
) {
256 spin_unlock_irqrestore(&m2m_ctx
->cap_q_ctx
.rdy_spinlock
,
258 spin_unlock_irqrestore(&m2m_ctx
->out_q_ctx
.rdy_spinlock
,
260 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags_job
);
261 dprintk("No output buffers available\n");
264 spin_unlock_irqrestore(&m2m_ctx
->cap_q_ctx
.rdy_spinlock
, flags_cap
);
265 spin_unlock_irqrestore(&m2m_ctx
->out_q_ctx
.rdy_spinlock
, flags_out
);
267 if (m2m_dev
->m2m_ops
->job_ready
268 && (!m2m_dev
->m2m_ops
->job_ready(m2m_ctx
->priv
))) {
269 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags_job
);
270 dprintk("Driver not ready\n");
274 list_add_tail(&m2m_ctx
->queue
, &m2m_dev
->job_queue
);
275 m2m_ctx
->job_flags
|= TRANS_QUEUED
;
277 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags_job
);
279 v4l2_m2m_try_run(m2m_dev
);
281 EXPORT_SYMBOL_GPL(v4l2_m2m_try_schedule
);
284 * v4l2_m2m_cancel_job() - cancel pending jobs for the context
285 * @m2m_ctx: m2m context with jobs to be canceled
287 * In case of streamoff or release called on any context,
288 * 1] If the context is currently running, then abort job will be called
289 * 2] If the context is queued, then the context will be removed from
292 static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx
*m2m_ctx
)
294 struct v4l2_m2m_dev
*m2m_dev
;
297 m2m_dev
= m2m_ctx
->m2m_dev
;
298 spin_lock_irqsave(&m2m_dev
->job_spinlock
, flags
);
300 m2m_ctx
->job_flags
|= TRANS_ABORT
;
301 if (m2m_ctx
->job_flags
& TRANS_RUNNING
) {
302 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
303 m2m_dev
->m2m_ops
->job_abort(m2m_ctx
->priv
);
304 dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx
);
305 wait_event(m2m_ctx
->finished
,
306 !(m2m_ctx
->job_flags
& TRANS_RUNNING
));
307 } else if (m2m_ctx
->job_flags
& TRANS_QUEUED
) {
308 list_del(&m2m_ctx
->queue
);
309 m2m_ctx
->job_flags
&= ~(TRANS_QUEUED
| TRANS_RUNNING
);
310 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
311 dprintk("m2m_ctx: %p had been on queue and was removed\n",
314 /* Do nothing, was not on queue/running */
315 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
319 void v4l2_m2m_job_finish(struct v4l2_m2m_dev
*m2m_dev
,
320 struct v4l2_m2m_ctx
*m2m_ctx
)
324 spin_lock_irqsave(&m2m_dev
->job_spinlock
, flags
);
325 if (!m2m_dev
->curr_ctx
|| m2m_dev
->curr_ctx
!= m2m_ctx
) {
326 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
327 dprintk("Called by an instance not currently running\n");
331 list_del(&m2m_dev
->curr_ctx
->queue
);
332 m2m_dev
->curr_ctx
->job_flags
&= ~(TRANS_QUEUED
| TRANS_RUNNING
);
333 wake_up(&m2m_dev
->curr_ctx
->finished
);
334 m2m_dev
->curr_ctx
= NULL
;
336 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
338 /* This instance might have more buffers ready, but since we do not
339 * allow more than one job on the job_queue per instance, each has
340 * to be scheduled separately after the previous one finishes. */
341 v4l2_m2m_try_schedule(m2m_ctx
);
342 v4l2_m2m_try_run(m2m_dev
);
344 EXPORT_SYMBOL(v4l2_m2m_job_finish
);
346 int v4l2_m2m_reqbufs(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
347 struct v4l2_requestbuffers
*reqbufs
)
349 struct vb2_queue
*vq
;
352 vq
= v4l2_m2m_get_vq(m2m_ctx
, reqbufs
->type
);
353 ret
= vb2_reqbufs(vq
, reqbufs
);
354 /* If count == 0, then the owner has released all buffers and he
355 is no longer owner of the queue. Otherwise we have an owner. */
357 vq
->owner
= reqbufs
->count
? file
->private_data
: NULL
;
361 EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs
);
363 int v4l2_m2m_querybuf(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
364 struct v4l2_buffer
*buf
)
366 struct vb2_queue
*vq
;
370 vq
= v4l2_m2m_get_vq(m2m_ctx
, buf
->type
);
371 ret
= vb2_querybuf(vq
, buf
);
373 /* Adjust MMAP memory offsets for the CAPTURE queue */
374 if (buf
->memory
== V4L2_MEMORY_MMAP
&& !V4L2_TYPE_IS_OUTPUT(vq
->type
)) {
375 if (V4L2_TYPE_IS_MULTIPLANAR(vq
->type
)) {
376 for (i
= 0; i
< buf
->length
; ++i
)
377 buf
->m
.planes
[i
].m
.mem_offset
378 += DST_QUEUE_OFF_BASE
;
380 buf
->m
.offset
+= DST_QUEUE_OFF_BASE
;
386 EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf
);
388 int v4l2_m2m_qbuf(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
389 struct v4l2_buffer
*buf
)
391 struct vb2_queue
*vq
;
394 vq
= v4l2_m2m_get_vq(m2m_ctx
, buf
->type
);
395 ret
= vb2_qbuf(vq
, buf
);
397 v4l2_m2m_try_schedule(m2m_ctx
);
401 EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf
);
403 int v4l2_m2m_dqbuf(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
404 struct v4l2_buffer
*buf
)
406 struct vb2_queue
*vq
;
408 vq
= v4l2_m2m_get_vq(m2m_ctx
, buf
->type
);
409 return vb2_dqbuf(vq
, buf
, file
->f_flags
& O_NONBLOCK
);
411 EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf
);
413 int v4l2_m2m_prepare_buf(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
414 struct v4l2_buffer
*buf
)
416 struct vb2_queue
*vq
;
419 vq
= v4l2_m2m_get_vq(m2m_ctx
, buf
->type
);
420 ret
= vb2_prepare_buf(vq
, buf
);
422 v4l2_m2m_try_schedule(m2m_ctx
);
426 EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf
);
428 int v4l2_m2m_create_bufs(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
429 struct v4l2_create_buffers
*create
)
431 struct vb2_queue
*vq
;
433 vq
= v4l2_m2m_get_vq(m2m_ctx
, create
->format
.type
);
434 return vb2_create_bufs(vq
, create
);
436 EXPORT_SYMBOL_GPL(v4l2_m2m_create_bufs
);
438 int v4l2_m2m_expbuf(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
439 struct v4l2_exportbuffer
*eb
)
441 struct vb2_queue
*vq
;
443 vq
= v4l2_m2m_get_vq(m2m_ctx
, eb
->type
);
444 return vb2_expbuf(vq
, eb
);
446 EXPORT_SYMBOL_GPL(v4l2_m2m_expbuf
);
448 int v4l2_m2m_streamon(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
449 enum v4l2_buf_type type
)
451 struct vb2_queue
*vq
;
454 vq
= v4l2_m2m_get_vq(m2m_ctx
, type
);
455 ret
= vb2_streamon(vq
, type
);
457 v4l2_m2m_try_schedule(m2m_ctx
);
461 EXPORT_SYMBOL_GPL(v4l2_m2m_streamon
);
463 int v4l2_m2m_streamoff(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
464 enum v4l2_buf_type type
)
466 struct v4l2_m2m_dev
*m2m_dev
;
467 struct v4l2_m2m_queue_ctx
*q_ctx
;
468 unsigned long flags_job
, flags
;
471 /* wait until the current context is dequeued from job_queue */
472 v4l2_m2m_cancel_job(m2m_ctx
);
474 q_ctx
= get_queue_ctx(m2m_ctx
, type
);
475 ret
= vb2_streamoff(&q_ctx
->q
, type
);
479 m2m_dev
= m2m_ctx
->m2m_dev
;
480 spin_lock_irqsave(&m2m_dev
->job_spinlock
, flags_job
);
481 /* We should not be scheduled anymore, since we're dropping a queue. */
482 if (m2m_ctx
->job_flags
& TRANS_QUEUED
)
483 list_del(&m2m_ctx
->queue
);
484 m2m_ctx
->job_flags
= 0;
486 spin_lock_irqsave(&q_ctx
->rdy_spinlock
, flags
);
487 /* Drop queue, since streamoff returns device to the same state as after
488 * calling reqbufs. */
489 INIT_LIST_HEAD(&q_ctx
->rdy_queue
);
491 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
493 if (m2m_dev
->curr_ctx
== m2m_ctx
) {
494 m2m_dev
->curr_ctx
= NULL
;
495 wake_up(&m2m_ctx
->finished
);
497 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags_job
);
501 EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff
);
503 __poll_t
v4l2_m2m_poll(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
504 struct poll_table_struct
*wait
)
506 struct video_device
*vfd
= video_devdata(file
);
507 __poll_t req_events
= poll_requested_events(wait
);
508 struct vb2_queue
*src_q
, *dst_q
;
509 struct vb2_buffer
*src_vb
= NULL
, *dst_vb
= NULL
;
513 if (test_bit(V4L2_FL_USES_V4L2_FH
, &vfd
->flags
)) {
514 struct v4l2_fh
*fh
= file
->private_data
;
516 if (v4l2_event_pending(fh
))
518 else if (req_events
& EPOLLPRI
)
519 poll_wait(file
, &fh
->wait
, wait
);
520 if (!(req_events
& (EPOLLOUT
| EPOLLWRNORM
| EPOLLIN
| EPOLLRDNORM
)))
524 src_q
= v4l2_m2m_get_src_vq(m2m_ctx
);
525 dst_q
= v4l2_m2m_get_dst_vq(m2m_ctx
);
528 * There has to be at least one buffer queued on each queued_list, which
529 * means either in driver already or waiting for driver to claim it
530 * and start processing.
532 if ((!src_q
->streaming
|| list_empty(&src_q
->queued_list
))
533 && (!dst_q
->streaming
|| list_empty(&dst_q
->queued_list
))) {
538 spin_lock_irqsave(&src_q
->done_lock
, flags
);
539 if (list_empty(&src_q
->done_list
))
540 poll_wait(file
, &src_q
->done_wq
, wait
);
541 spin_unlock_irqrestore(&src_q
->done_lock
, flags
);
543 spin_lock_irqsave(&dst_q
->done_lock
, flags
);
544 if (list_empty(&dst_q
->done_list
)) {
546 * If the last buffer was dequeued from the capture queue,
547 * return immediately. DQBUF will return -EPIPE.
549 if (dst_q
->last_buffer_dequeued
) {
550 spin_unlock_irqrestore(&dst_q
->done_lock
, flags
);
551 return rc
| EPOLLIN
| EPOLLRDNORM
;
554 poll_wait(file
, &dst_q
->done_wq
, wait
);
556 spin_unlock_irqrestore(&dst_q
->done_lock
, flags
);
558 spin_lock_irqsave(&src_q
->done_lock
, flags
);
559 if (!list_empty(&src_q
->done_list
))
560 src_vb
= list_first_entry(&src_q
->done_list
, struct vb2_buffer
,
562 if (src_vb
&& (src_vb
->state
== VB2_BUF_STATE_DONE
563 || src_vb
->state
== VB2_BUF_STATE_ERROR
))
564 rc
|= EPOLLOUT
| EPOLLWRNORM
;
565 spin_unlock_irqrestore(&src_q
->done_lock
, flags
);
567 spin_lock_irqsave(&dst_q
->done_lock
, flags
);
568 if (!list_empty(&dst_q
->done_list
))
569 dst_vb
= list_first_entry(&dst_q
->done_list
, struct vb2_buffer
,
571 if (dst_vb
&& (dst_vb
->state
== VB2_BUF_STATE_DONE
572 || dst_vb
->state
== VB2_BUF_STATE_ERROR
))
573 rc
|= EPOLLIN
| EPOLLRDNORM
;
574 spin_unlock_irqrestore(&dst_q
->done_lock
, flags
);
579 EXPORT_SYMBOL_GPL(v4l2_m2m_poll
);
581 int v4l2_m2m_mmap(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
582 struct vm_area_struct
*vma
)
584 unsigned long offset
= vma
->vm_pgoff
<< PAGE_SHIFT
;
585 struct vb2_queue
*vq
;
587 if (offset
< DST_QUEUE_OFF_BASE
) {
588 vq
= v4l2_m2m_get_src_vq(m2m_ctx
);
590 vq
= v4l2_m2m_get_dst_vq(m2m_ctx
);
591 vma
->vm_pgoff
-= (DST_QUEUE_OFF_BASE
>> PAGE_SHIFT
);
594 return vb2_mmap(vq
, vma
);
596 EXPORT_SYMBOL(v4l2_m2m_mmap
);
598 struct v4l2_m2m_dev
*v4l2_m2m_init(const struct v4l2_m2m_ops
*m2m_ops
)
600 struct v4l2_m2m_dev
*m2m_dev
;
602 if (!m2m_ops
|| WARN_ON(!m2m_ops
->device_run
) ||
603 WARN_ON(!m2m_ops
->job_abort
))
604 return ERR_PTR(-EINVAL
);
606 m2m_dev
= kzalloc(sizeof *m2m_dev
, GFP_KERNEL
);
608 return ERR_PTR(-ENOMEM
);
610 m2m_dev
->curr_ctx
= NULL
;
611 m2m_dev
->m2m_ops
= m2m_ops
;
612 INIT_LIST_HEAD(&m2m_dev
->job_queue
);
613 spin_lock_init(&m2m_dev
->job_spinlock
);
617 EXPORT_SYMBOL_GPL(v4l2_m2m_init
);
619 void v4l2_m2m_release(struct v4l2_m2m_dev
*m2m_dev
)
623 EXPORT_SYMBOL_GPL(v4l2_m2m_release
);
625 struct v4l2_m2m_ctx
*v4l2_m2m_ctx_init(struct v4l2_m2m_dev
*m2m_dev
,
627 int (*queue_init
)(void *priv
, struct vb2_queue
*src_vq
, struct vb2_queue
*dst_vq
))
629 struct v4l2_m2m_ctx
*m2m_ctx
;
630 struct v4l2_m2m_queue_ctx
*out_q_ctx
, *cap_q_ctx
;
633 m2m_ctx
= kzalloc(sizeof *m2m_ctx
, GFP_KERNEL
);
635 return ERR_PTR(-ENOMEM
);
637 m2m_ctx
->priv
= drv_priv
;
638 m2m_ctx
->m2m_dev
= m2m_dev
;
639 init_waitqueue_head(&m2m_ctx
->finished
);
641 out_q_ctx
= &m2m_ctx
->out_q_ctx
;
642 cap_q_ctx
= &m2m_ctx
->cap_q_ctx
;
644 INIT_LIST_HEAD(&out_q_ctx
->rdy_queue
);
645 INIT_LIST_HEAD(&cap_q_ctx
->rdy_queue
);
646 spin_lock_init(&out_q_ctx
->rdy_spinlock
);
647 spin_lock_init(&cap_q_ctx
->rdy_spinlock
);
649 INIT_LIST_HEAD(&m2m_ctx
->queue
);
651 ret
= queue_init(drv_priv
, &out_q_ctx
->q
, &cap_q_ctx
->q
);
656 * If both queues use same mutex assign it as the common buffer
657 * queues lock to the m2m context. This lock is used in the
658 * v4l2_m2m_ioctl_* helpers.
660 if (out_q_ctx
->q
.lock
== cap_q_ctx
->q
.lock
)
661 m2m_ctx
->q_lock
= out_q_ctx
->q
.lock
;
668 EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init
);
670 void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx
*m2m_ctx
)
672 /* wait until the current context is dequeued from job_queue */
673 v4l2_m2m_cancel_job(m2m_ctx
);
675 vb2_queue_release(&m2m_ctx
->cap_q_ctx
.q
);
676 vb2_queue_release(&m2m_ctx
->out_q_ctx
.q
);
680 EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release
);
682 void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx
*m2m_ctx
,
683 struct vb2_v4l2_buffer
*vbuf
)
685 struct v4l2_m2m_buffer
*b
= container_of(vbuf
,
686 struct v4l2_m2m_buffer
, vb
);
687 struct v4l2_m2m_queue_ctx
*q_ctx
;
690 q_ctx
= get_queue_ctx(m2m_ctx
, vbuf
->vb2_buf
.vb2_queue
->type
);
694 spin_lock_irqsave(&q_ctx
->rdy_spinlock
, flags
);
695 list_add_tail(&b
->list
, &q_ctx
->rdy_queue
);
697 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
699 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue
);
701 /* Videobuf2 ioctl helpers */
703 int v4l2_m2m_ioctl_reqbufs(struct file
*file
, void *priv
,
704 struct v4l2_requestbuffers
*rb
)
706 struct v4l2_fh
*fh
= file
->private_data
;
708 return v4l2_m2m_reqbufs(file
, fh
->m2m_ctx
, rb
);
710 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_reqbufs
);
712 int v4l2_m2m_ioctl_create_bufs(struct file
*file
, void *priv
,
713 struct v4l2_create_buffers
*create
)
715 struct v4l2_fh
*fh
= file
->private_data
;
717 return v4l2_m2m_create_bufs(file
, fh
->m2m_ctx
, create
);
719 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_create_bufs
);
721 int v4l2_m2m_ioctl_querybuf(struct file
*file
, void *priv
,
722 struct v4l2_buffer
*buf
)
724 struct v4l2_fh
*fh
= file
->private_data
;
726 return v4l2_m2m_querybuf(file
, fh
->m2m_ctx
, buf
);
728 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_querybuf
);
730 int v4l2_m2m_ioctl_qbuf(struct file
*file
, void *priv
,
731 struct v4l2_buffer
*buf
)
733 struct v4l2_fh
*fh
= file
->private_data
;
735 return v4l2_m2m_qbuf(file
, fh
->m2m_ctx
, buf
);
737 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_qbuf
);
739 int v4l2_m2m_ioctl_dqbuf(struct file
*file
, void *priv
,
740 struct v4l2_buffer
*buf
)
742 struct v4l2_fh
*fh
= file
->private_data
;
744 return v4l2_m2m_dqbuf(file
, fh
->m2m_ctx
, buf
);
746 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_dqbuf
);
748 int v4l2_m2m_ioctl_prepare_buf(struct file
*file
, void *priv
,
749 struct v4l2_buffer
*buf
)
751 struct v4l2_fh
*fh
= file
->private_data
;
753 return v4l2_m2m_prepare_buf(file
, fh
->m2m_ctx
, buf
);
755 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_prepare_buf
);
757 int v4l2_m2m_ioctl_expbuf(struct file
*file
, void *priv
,
758 struct v4l2_exportbuffer
*eb
)
760 struct v4l2_fh
*fh
= file
->private_data
;
762 return v4l2_m2m_expbuf(file
, fh
->m2m_ctx
, eb
);
764 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_expbuf
);
766 int v4l2_m2m_ioctl_streamon(struct file
*file
, void *priv
,
767 enum v4l2_buf_type type
)
769 struct v4l2_fh
*fh
= file
->private_data
;
771 return v4l2_m2m_streamon(file
, fh
->m2m_ctx
, type
);
773 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamon
);
775 int v4l2_m2m_ioctl_streamoff(struct file
*file
, void *priv
,
776 enum v4l2_buf_type type
)
778 struct v4l2_fh
*fh
= file
->private_data
;
780 return v4l2_m2m_streamoff(file
, fh
->m2m_ctx
, type
);
782 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamoff
);
785 * v4l2_file_operations helpers. It is assumed here same lock is used
786 * for the output and the capture buffer queue.
789 int v4l2_m2m_fop_mmap(struct file
*file
, struct vm_area_struct
*vma
)
791 struct v4l2_fh
*fh
= file
->private_data
;
793 return v4l2_m2m_mmap(file
, fh
->m2m_ctx
, vma
);
795 EXPORT_SYMBOL_GPL(v4l2_m2m_fop_mmap
);
797 __poll_t
v4l2_m2m_fop_poll(struct file
*file
, poll_table
*wait
)
799 struct v4l2_fh
*fh
= file
->private_data
;
800 struct v4l2_m2m_ctx
*m2m_ctx
= fh
->m2m_ctx
;
804 mutex_lock(m2m_ctx
->q_lock
);
806 ret
= v4l2_m2m_poll(file
, m2m_ctx
, wait
);
809 mutex_unlock(m2m_ctx
->q_lock
);
813 EXPORT_SYMBOL_GPL(v4l2_m2m_fop_poll
);