1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Memory-to-memory device framework for Video for Linux 2 and vb2.
5 * Helper functions for devices that use vb2 buffers for both their
6 * source and destination.
8 * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
9 * Pawel Osciak, <pawel@osciak.com>
10 * Marek Szyprowski, <m.szyprowski@samsung.com>
12 #include <linux/module.h>
13 #include <linux/sched.h>
14 #include <linux/slab.h>
16 #include <media/media-device.h>
17 #include <media/videobuf2-v4l2.h>
18 #include <media/v4l2-mem2mem.h>
19 #include <media/v4l2-dev.h>
20 #include <media/v4l2-device.h>
21 #include <media/v4l2-fh.h>
22 #include <media/v4l2-event.h>
24 MODULE_DESCRIPTION("Mem to mem device framework for vb2");
25 MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>");
26 MODULE_LICENSE("GPL");
29 module_param(debug
, bool, 0644);
31 #define dprintk(fmt, arg...) \
34 printk(KERN_DEBUG "%s: " fmt, __func__, ## arg);\
38 /* Instance is already queued on the job_queue */
39 #define TRANS_QUEUED (1 << 0)
40 /* Instance is currently running in hardware */
41 #define TRANS_RUNNING (1 << 1)
42 /* Instance is currently aborting */
43 #define TRANS_ABORT (1 << 2)
46 /* The job queue is not running new jobs */
47 #define QUEUE_PAUSED (1 << 0)
50 /* Offset base for buffers on the destination queue - used to distinguish
51 * between source and destination buffers when mmapping - they receive the same
52 * offsets but for different queues */
53 #define DST_QUEUE_OFF_BASE (1 << 30)
55 enum v4l2_m2m_entity_type
{
56 MEM2MEM_ENT_TYPE_SOURCE
,
57 MEM2MEM_ENT_TYPE_SINK
,
61 static const char * const m2m_entity_name
[] = {
68 * struct v4l2_m2m_dev - per-device context
69 * @source: &struct media_entity pointer with the source entity
70 * Used only when the M2M device is registered via
71 * v4l2_m2m_register_media_controller().
72 * @source_pad: &struct media_pad with the source pad.
73 * Used only when the M2M device is registered via
74 * v4l2_m2m_register_media_controller().
75 * @sink: &struct media_entity pointer with the sink entity
76 * Used only when the M2M device is registered via
77 * v4l2_m2m_register_media_controller().
78 * @sink_pad: &struct media_pad with the sink pad.
79 * Used only when the M2M device is registered via
80 * v4l2_m2m_register_media_controller().
81 * @proc: &struct media_entity pointer with the M2M device itself.
82 * @proc_pads: &struct media_pad with the @proc pads.
83 * Used only when the M2M device is registered via
84 * v4l2_m2m_unregister_media_controller().
85 * @intf_devnode: &struct media_intf devnode pointer with the interface
86 * with controls the M2M device.
87 * @curr_ctx: currently running instance
88 * @job_queue: instances queued to run
89 * @job_spinlock: protects job_queue
90 * @job_work: worker to run queued jobs.
91 * @job_queue_flags: flags of the queue status, %QUEUE_PAUSED.
92 * @m2m_ops: driver callbacks
95 struct v4l2_m2m_ctx
*curr_ctx
;
96 #ifdef CONFIG_MEDIA_CONTROLLER
97 struct media_entity
*source
;
98 struct media_pad source_pad
;
99 struct media_entity sink
;
100 struct media_pad sink_pad
;
101 struct media_entity proc
;
102 struct media_pad proc_pads
[2];
103 struct media_intf_devnode
*intf_devnode
;
106 struct list_head job_queue
;
107 spinlock_t job_spinlock
;
108 struct work_struct job_work
;
109 unsigned long job_queue_flags
;
111 const struct v4l2_m2m_ops
*m2m_ops
;
114 static struct v4l2_m2m_queue_ctx
*get_queue_ctx(struct v4l2_m2m_ctx
*m2m_ctx
,
115 enum v4l2_buf_type type
)
117 if (V4L2_TYPE_IS_OUTPUT(type
))
118 return &m2m_ctx
->out_q_ctx
;
120 return &m2m_ctx
->cap_q_ctx
;
123 struct vb2_queue
*v4l2_m2m_get_vq(struct v4l2_m2m_ctx
*m2m_ctx
,
124 enum v4l2_buf_type type
)
126 struct v4l2_m2m_queue_ctx
*q_ctx
;
128 q_ctx
= get_queue_ctx(m2m_ctx
, type
);
134 EXPORT_SYMBOL(v4l2_m2m_get_vq
);
136 struct vb2_v4l2_buffer
*v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx
*q_ctx
)
138 struct v4l2_m2m_buffer
*b
;
141 spin_lock_irqsave(&q_ctx
->rdy_spinlock
, flags
);
143 if (list_empty(&q_ctx
->rdy_queue
)) {
144 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
148 b
= list_first_entry(&q_ctx
->rdy_queue
, struct v4l2_m2m_buffer
, list
);
149 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
152 EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf
);
154 struct vb2_v4l2_buffer
*v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx
*q_ctx
)
156 struct v4l2_m2m_buffer
*b
;
159 spin_lock_irqsave(&q_ctx
->rdy_spinlock
, flags
);
161 if (list_empty(&q_ctx
->rdy_queue
)) {
162 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
166 b
= list_last_entry(&q_ctx
->rdy_queue
, struct v4l2_m2m_buffer
, list
);
167 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
170 EXPORT_SYMBOL_GPL(v4l2_m2m_last_buf
);
172 struct vb2_v4l2_buffer
*v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx
*q_ctx
)
174 struct v4l2_m2m_buffer
*b
;
177 spin_lock_irqsave(&q_ctx
->rdy_spinlock
, flags
);
178 if (list_empty(&q_ctx
->rdy_queue
)) {
179 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
182 b
= list_first_entry(&q_ctx
->rdy_queue
, struct v4l2_m2m_buffer
, list
);
185 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
189 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove
);
191 void v4l2_m2m_buf_remove_by_buf(struct v4l2_m2m_queue_ctx
*q_ctx
,
192 struct vb2_v4l2_buffer
*vbuf
)
194 struct v4l2_m2m_buffer
*b
;
197 spin_lock_irqsave(&q_ctx
->rdy_spinlock
, flags
);
198 b
= container_of(vbuf
, struct v4l2_m2m_buffer
, vb
);
201 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
203 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_buf
);
205 struct vb2_v4l2_buffer
*
206 v4l2_m2m_buf_remove_by_idx(struct v4l2_m2m_queue_ctx
*q_ctx
, unsigned int idx
)
209 struct v4l2_m2m_buffer
*b
, *tmp
;
210 struct vb2_v4l2_buffer
*ret
= NULL
;
213 spin_lock_irqsave(&q_ctx
->rdy_spinlock
, flags
);
214 list_for_each_entry_safe(b
, tmp
, &q_ctx
->rdy_queue
, list
) {
215 if (b
->vb
.vb2_buf
.index
== idx
) {
222 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
226 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_idx
);
229 * Scheduling handlers
232 void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev
*m2m_dev
)
237 spin_lock_irqsave(&m2m_dev
->job_spinlock
, flags
);
238 if (m2m_dev
->curr_ctx
)
239 ret
= m2m_dev
->curr_ctx
->priv
;
240 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
244 EXPORT_SYMBOL(v4l2_m2m_get_curr_priv
);
247 * v4l2_m2m_try_run() - select next job to perform and run it if possible
248 * @m2m_dev: per-device context
250 * Get next transaction (if present) from the waiting jobs list and run it.
252 * Note that this function can run on a given v4l2_m2m_ctx context,
253 * but call .device_run for another context.
255 static void v4l2_m2m_try_run(struct v4l2_m2m_dev
*m2m_dev
)
259 spin_lock_irqsave(&m2m_dev
->job_spinlock
, flags
);
260 if (NULL
!= m2m_dev
->curr_ctx
) {
261 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
262 dprintk("Another instance is running, won't run now\n");
266 if (list_empty(&m2m_dev
->job_queue
)) {
267 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
268 dprintk("No job pending\n");
272 if (m2m_dev
->job_queue_flags
& QUEUE_PAUSED
) {
273 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
274 dprintk("Running new jobs is paused\n");
278 m2m_dev
->curr_ctx
= list_first_entry(&m2m_dev
->job_queue
,
279 struct v4l2_m2m_ctx
, queue
);
280 m2m_dev
->curr_ctx
->job_flags
|= TRANS_RUNNING
;
281 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
283 dprintk("Running job on m2m_ctx: %p\n", m2m_dev
->curr_ctx
);
284 m2m_dev
->m2m_ops
->device_run(m2m_dev
->curr_ctx
->priv
);
288 * __v4l2_m2m_try_queue() - queue a job
289 * @m2m_dev: m2m device
290 * @m2m_ctx: m2m context
292 * Check if this context is ready to queue a job.
294 * This function can run in interrupt context.
296 static void __v4l2_m2m_try_queue(struct v4l2_m2m_dev
*m2m_dev
,
297 struct v4l2_m2m_ctx
*m2m_ctx
)
299 unsigned long flags_job
;
300 struct vb2_v4l2_buffer
*dst
, *src
;
302 dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx
);
304 if (!m2m_ctx
->out_q_ctx
.q
.streaming
||
305 (!m2m_ctx
->cap_q_ctx
.q
.streaming
&& !m2m_ctx
->ignore_cap_streaming
)) {
306 if (!m2m_ctx
->ignore_cap_streaming
)
307 dprintk("Streaming needs to be on for both queues\n");
309 dprintk("Streaming needs to be on for the OUTPUT queue\n");
313 spin_lock_irqsave(&m2m_dev
->job_spinlock
, flags_job
);
315 /* If the context is aborted then don't schedule it */
316 if (m2m_ctx
->job_flags
& TRANS_ABORT
) {
317 dprintk("Aborted context\n");
321 if (m2m_ctx
->job_flags
& TRANS_QUEUED
) {
322 dprintk("On job queue already\n");
326 src
= v4l2_m2m_next_src_buf(m2m_ctx
);
327 dst
= v4l2_m2m_next_dst_buf(m2m_ctx
);
328 if (!src
&& !m2m_ctx
->out_q_ctx
.buffered
) {
329 dprintk("No input buffers available\n");
332 if (!dst
&& !m2m_ctx
->cap_q_ctx
.buffered
) {
333 dprintk("No output buffers available\n");
337 m2m_ctx
->new_frame
= true;
339 if (src
&& dst
&& dst
->is_held
&&
340 dst
->vb2_buf
.copied_timestamp
&&
341 dst
->vb2_buf
.timestamp
!= src
->vb2_buf
.timestamp
) {
342 dprintk("Timestamp mismatch, returning held capture buffer\n");
343 dst
->is_held
= false;
344 v4l2_m2m_dst_buf_remove(m2m_ctx
);
345 v4l2_m2m_buf_done(dst
, VB2_BUF_STATE_DONE
);
346 dst
= v4l2_m2m_next_dst_buf(m2m_ctx
);
348 if (!dst
&& !m2m_ctx
->cap_q_ctx
.buffered
) {
349 dprintk("No output buffers available after returning held buffer\n");
354 if (src
&& dst
&& (m2m_ctx
->out_q_ctx
.q
.subsystem_flags
&
355 VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF
))
356 m2m_ctx
->new_frame
= !dst
->vb2_buf
.copied_timestamp
||
357 dst
->vb2_buf
.timestamp
!= src
->vb2_buf
.timestamp
;
359 if (m2m_ctx
->has_stopped
) {
360 dprintk("Device has stopped\n");
364 if (m2m_dev
->m2m_ops
->job_ready
365 && (!m2m_dev
->m2m_ops
->job_ready(m2m_ctx
->priv
))) {
366 dprintk("Driver not ready\n");
370 list_add_tail(&m2m_ctx
->queue
, &m2m_dev
->job_queue
);
371 m2m_ctx
->job_flags
|= TRANS_QUEUED
;
374 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags_job
);
378 * v4l2_m2m_try_schedule() - schedule and possibly run a job for any context
379 * @m2m_ctx: m2m context
381 * Check if this context is ready to queue a job. If suitable,
382 * run the next queued job on the mem2mem device.
384 * This function shouldn't run in interrupt context.
386 * Note that v4l2_m2m_try_schedule() can schedule one job for this context,
387 * and then run another job for another context.
389 void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx
*m2m_ctx
)
391 struct v4l2_m2m_dev
*m2m_dev
= m2m_ctx
->m2m_dev
;
393 __v4l2_m2m_try_queue(m2m_dev
, m2m_ctx
);
394 v4l2_m2m_try_run(m2m_dev
);
396 EXPORT_SYMBOL_GPL(v4l2_m2m_try_schedule
);
399 * v4l2_m2m_device_run_work() - run pending jobs for the context
400 * @work: Work structure used for scheduling the execution of this function.
402 static void v4l2_m2m_device_run_work(struct work_struct
*work
)
404 struct v4l2_m2m_dev
*m2m_dev
=
405 container_of(work
, struct v4l2_m2m_dev
, job_work
);
407 v4l2_m2m_try_run(m2m_dev
);
411 * v4l2_m2m_cancel_job() - cancel pending jobs for the context
412 * @m2m_ctx: m2m context with jobs to be canceled
414 * In case of streamoff or release called on any context,
415 * 1] If the context is currently running, then abort job will be called
416 * 2] If the context is queued, then the context will be removed from
419 static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx
*m2m_ctx
)
421 struct v4l2_m2m_dev
*m2m_dev
;
424 m2m_dev
= m2m_ctx
->m2m_dev
;
425 spin_lock_irqsave(&m2m_dev
->job_spinlock
, flags
);
427 m2m_ctx
->job_flags
|= TRANS_ABORT
;
428 if (m2m_ctx
->job_flags
& TRANS_RUNNING
) {
429 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
430 if (m2m_dev
->m2m_ops
->job_abort
)
431 m2m_dev
->m2m_ops
->job_abort(m2m_ctx
->priv
);
432 dprintk("m2m_ctx %p running, will wait to complete\n", m2m_ctx
);
433 wait_event(m2m_ctx
->finished
,
434 !(m2m_ctx
->job_flags
& TRANS_RUNNING
));
435 } else if (m2m_ctx
->job_flags
& TRANS_QUEUED
) {
436 list_del(&m2m_ctx
->queue
);
437 m2m_ctx
->job_flags
&= ~(TRANS_QUEUED
| TRANS_RUNNING
);
438 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
439 dprintk("m2m_ctx: %p had been on queue and was removed\n",
442 /* Do nothing, was not on queue/running */
443 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
448 * Schedule the next job, called from v4l2_m2m_job_finish() or
449 * v4l2_m2m_buf_done_and_job_finish().
451 static void v4l2_m2m_schedule_next_job(struct v4l2_m2m_dev
*m2m_dev
,
452 struct v4l2_m2m_ctx
*m2m_ctx
)
455 * This instance might have more buffers ready, but since we do not
456 * allow more than one job on the job_queue per instance, each has
457 * to be scheduled separately after the previous one finishes.
459 __v4l2_m2m_try_queue(m2m_dev
, m2m_ctx
);
462 * We might be running in atomic context,
463 * but the job must be run in non-atomic context.
465 schedule_work(&m2m_dev
->job_work
);
469 * Assumes job_spinlock is held, called from v4l2_m2m_job_finish() or
470 * v4l2_m2m_buf_done_and_job_finish().
472 static bool _v4l2_m2m_job_finish(struct v4l2_m2m_dev
*m2m_dev
,
473 struct v4l2_m2m_ctx
*m2m_ctx
)
475 if (!m2m_dev
->curr_ctx
|| m2m_dev
->curr_ctx
!= m2m_ctx
) {
476 dprintk("Called by an instance not currently running\n");
480 list_del(&m2m_dev
->curr_ctx
->queue
);
481 m2m_dev
->curr_ctx
->job_flags
&= ~(TRANS_QUEUED
| TRANS_RUNNING
);
482 wake_up(&m2m_dev
->curr_ctx
->finished
);
483 m2m_dev
->curr_ctx
= NULL
;
487 void v4l2_m2m_job_finish(struct v4l2_m2m_dev
*m2m_dev
,
488 struct v4l2_m2m_ctx
*m2m_ctx
)
494 * This function should not be used for drivers that support
495 * holding capture buffers. Those should use
496 * v4l2_m2m_buf_done_and_job_finish() instead.
498 WARN_ON(m2m_ctx
->out_q_ctx
.q
.subsystem_flags
&
499 VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF
);
500 spin_lock_irqsave(&m2m_dev
->job_spinlock
, flags
);
501 schedule_next
= _v4l2_m2m_job_finish(m2m_dev
, m2m_ctx
);
502 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
505 v4l2_m2m_schedule_next_job(m2m_dev
, m2m_ctx
);
507 EXPORT_SYMBOL(v4l2_m2m_job_finish
);
509 void v4l2_m2m_buf_done_and_job_finish(struct v4l2_m2m_dev
*m2m_dev
,
510 struct v4l2_m2m_ctx
*m2m_ctx
,
511 enum vb2_buffer_state state
)
513 struct vb2_v4l2_buffer
*src_buf
, *dst_buf
;
514 bool schedule_next
= false;
517 spin_lock_irqsave(&m2m_dev
->job_spinlock
, flags
);
518 src_buf
= v4l2_m2m_src_buf_remove(m2m_ctx
);
519 dst_buf
= v4l2_m2m_next_dst_buf(m2m_ctx
);
521 if (WARN_ON(!src_buf
|| !dst_buf
))
523 dst_buf
->is_held
= src_buf
->flags
& V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF
;
524 if (!dst_buf
->is_held
) {
525 v4l2_m2m_dst_buf_remove(m2m_ctx
);
526 v4l2_m2m_buf_done(dst_buf
, state
);
529 * If the request API is being used, returning the OUTPUT
530 * (src) buffer will wake-up any process waiting on the
531 * request file descriptor.
533 * Therefore, return the CAPTURE (dst) buffer first,
534 * to avoid signalling the request file descriptor
535 * before the CAPTURE buffer is done.
537 v4l2_m2m_buf_done(src_buf
, state
);
538 schedule_next
= _v4l2_m2m_job_finish(m2m_dev
, m2m_ctx
);
540 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
543 v4l2_m2m_schedule_next_job(m2m_dev
, m2m_ctx
);
545 EXPORT_SYMBOL(v4l2_m2m_buf_done_and_job_finish
);
547 void v4l2_m2m_suspend(struct v4l2_m2m_dev
*m2m_dev
)
550 struct v4l2_m2m_ctx
*curr_ctx
;
552 spin_lock_irqsave(&m2m_dev
->job_spinlock
, flags
);
553 m2m_dev
->job_queue_flags
|= QUEUE_PAUSED
;
554 curr_ctx
= m2m_dev
->curr_ctx
;
555 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
558 wait_event(curr_ctx
->finished
,
559 !(curr_ctx
->job_flags
& TRANS_RUNNING
));
561 EXPORT_SYMBOL(v4l2_m2m_suspend
);
563 void v4l2_m2m_resume(struct v4l2_m2m_dev
*m2m_dev
)
567 spin_lock_irqsave(&m2m_dev
->job_spinlock
, flags
);
568 m2m_dev
->job_queue_flags
&= ~QUEUE_PAUSED
;
569 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
571 v4l2_m2m_try_run(m2m_dev
);
573 EXPORT_SYMBOL(v4l2_m2m_resume
);
575 int v4l2_m2m_reqbufs(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
576 struct v4l2_requestbuffers
*reqbufs
)
578 struct vb2_queue
*vq
;
581 vq
= v4l2_m2m_get_vq(m2m_ctx
, reqbufs
->type
);
582 ret
= vb2_reqbufs(vq
, reqbufs
);
583 /* If count == 0, then the owner has released all buffers and he
584 is no longer owner of the queue. Otherwise we have an owner. */
586 vq
->owner
= reqbufs
->count
? file
->private_data
: NULL
;
590 EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs
);
592 static void v4l2_m2m_adjust_mem_offset(struct vb2_queue
*vq
,
593 struct v4l2_buffer
*buf
)
595 /* Adjust MMAP memory offsets for the CAPTURE queue */
596 if (buf
->memory
== V4L2_MEMORY_MMAP
&& V4L2_TYPE_IS_CAPTURE(vq
->type
)) {
597 if (V4L2_TYPE_IS_MULTIPLANAR(vq
->type
)) {
600 for (i
= 0; i
< buf
->length
; ++i
)
601 buf
->m
.planes
[i
].m
.mem_offset
602 += DST_QUEUE_OFF_BASE
;
604 buf
->m
.offset
+= DST_QUEUE_OFF_BASE
;
609 int v4l2_m2m_querybuf(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
610 struct v4l2_buffer
*buf
)
612 struct vb2_queue
*vq
;
615 vq
= v4l2_m2m_get_vq(m2m_ctx
, buf
->type
);
616 ret
= vb2_querybuf(vq
, buf
);
620 /* Adjust MMAP memory offsets for the CAPTURE queue */
621 v4l2_m2m_adjust_mem_offset(vq
, buf
);
625 EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf
);
628 * This will add the LAST flag and mark the buffer management
630 * This is called when the last capture buffer must be flagged as LAST
631 * in draining mode from the encoder/decoder driver buf_queue() callback
632 * or from v4l2_update_last_buf_state() when a capture buffer is available.
634 void v4l2_m2m_last_buffer_done(struct v4l2_m2m_ctx
*m2m_ctx
,
635 struct vb2_v4l2_buffer
*vbuf
)
637 vbuf
->flags
|= V4L2_BUF_FLAG_LAST
;
638 vb2_buffer_done(&vbuf
->vb2_buf
, VB2_BUF_STATE_DONE
);
640 v4l2_m2m_mark_stopped(m2m_ctx
);
642 EXPORT_SYMBOL_GPL(v4l2_m2m_last_buffer_done
);
644 /* When stop command is issued, update buffer management state */
645 static int v4l2_update_last_buf_state(struct v4l2_m2m_ctx
*m2m_ctx
)
647 struct vb2_v4l2_buffer
*next_dst_buf
;
649 if (m2m_ctx
->is_draining
)
652 if (m2m_ctx
->has_stopped
)
655 m2m_ctx
->last_src_buf
= v4l2_m2m_last_src_buf(m2m_ctx
);
656 m2m_ctx
->is_draining
= true;
659 * The processing of the last output buffer queued before
660 * the STOP command is expected to mark the buffer management
661 * state as stopped with v4l2_m2m_mark_stopped().
663 if (m2m_ctx
->last_src_buf
)
667 * In case the output queue is empty, try to mark the last capture
670 next_dst_buf
= v4l2_m2m_dst_buf_remove(m2m_ctx
);
673 * Wait for the next queued one in encoder/decoder driver
674 * buf_queue() callback using the v4l2_m2m_dst_buf_is_last()
675 * helper or in v4l2_m2m_qbuf() if encoder/decoder is not yet
678 m2m_ctx
->next_buf_last
= true;
682 v4l2_m2m_last_buffer_done(m2m_ctx
, next_dst_buf
);
688 * Updates the encoding/decoding buffer management state, should
689 * be called from encoder/decoder drivers start_streaming()
691 void v4l2_m2m_update_start_streaming_state(struct v4l2_m2m_ctx
*m2m_ctx
,
694 /* If start streaming again, untag the last output buffer */
695 if (V4L2_TYPE_IS_OUTPUT(q
->type
))
696 m2m_ctx
->last_src_buf
= NULL
;
698 EXPORT_SYMBOL_GPL(v4l2_m2m_update_start_streaming_state
);
701 * Updates the encoding/decoding buffer management state, should
702 * be called from encoder/decoder driver stop_streaming()
704 void v4l2_m2m_update_stop_streaming_state(struct v4l2_m2m_ctx
*m2m_ctx
,
707 if (V4L2_TYPE_IS_OUTPUT(q
->type
)) {
709 * If in draining state, either mark next dst buffer as
710 * done or flag next one to be marked as done either
711 * in encoder/decoder driver buf_queue() callback using
712 * the v4l2_m2m_dst_buf_is_last() helper or in v4l2_m2m_qbuf()
713 * if encoder/decoder is not yet streaming
715 if (m2m_ctx
->is_draining
) {
716 struct vb2_v4l2_buffer
*next_dst_buf
;
718 m2m_ctx
->last_src_buf
= NULL
;
719 next_dst_buf
= v4l2_m2m_dst_buf_remove(m2m_ctx
);
721 m2m_ctx
->next_buf_last
= true;
723 v4l2_m2m_last_buffer_done(m2m_ctx
,
727 v4l2_m2m_clear_state(m2m_ctx
);
730 EXPORT_SYMBOL_GPL(v4l2_m2m_update_stop_streaming_state
);
732 static void v4l2_m2m_force_last_buf_done(struct v4l2_m2m_ctx
*m2m_ctx
,
735 struct vb2_buffer
*vb
;
736 struct vb2_v4l2_buffer
*vbuf
;
739 if (WARN_ON(q
->is_output
))
741 if (list_empty(&q
->queued_list
))
744 vb
= list_first_entry(&q
->queued_list
, struct vb2_buffer
, queued_entry
);
745 for (i
= 0; i
< vb
->num_planes
; i
++)
746 vb2_set_plane_payload(vb
, i
, 0);
749 * Since the buffer hasn't been queued to the ready queue,
750 * mark is active and owned before marking it LAST and DONE
752 vb
->state
= VB2_BUF_STATE_ACTIVE
;
753 atomic_inc(&q
->owned_by_drv_count
);
755 vbuf
= to_vb2_v4l2_buffer(vb
);
756 vbuf
->field
= V4L2_FIELD_NONE
;
758 v4l2_m2m_last_buffer_done(m2m_ctx
, vbuf
);
761 int v4l2_m2m_qbuf(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
762 struct v4l2_buffer
*buf
)
764 struct video_device
*vdev
= video_devdata(file
);
765 struct vb2_queue
*vq
;
768 vq
= v4l2_m2m_get_vq(m2m_ctx
, buf
->type
);
769 if (V4L2_TYPE_IS_CAPTURE(vq
->type
) &&
770 (buf
->flags
& V4L2_BUF_FLAG_REQUEST_FD
)) {
771 dprintk("%s: requests cannot be used with capture buffers\n",
776 ret
= vb2_qbuf(vq
, vdev
->v4l2_dev
->mdev
, buf
);
780 /* Adjust MMAP memory offsets for the CAPTURE queue */
781 v4l2_m2m_adjust_mem_offset(vq
, buf
);
784 * If the capture queue is streaming, but streaming hasn't started
785 * on the device, but was asked to stop, mark the previously queued
786 * buffer as DONE with LAST flag since it won't be queued on the
789 if (V4L2_TYPE_IS_CAPTURE(vq
->type
) &&
790 vb2_is_streaming(vq
) && !vb2_start_streaming_called(vq
) &&
791 (v4l2_m2m_has_stopped(m2m_ctx
) || v4l2_m2m_dst_buf_is_last(m2m_ctx
)))
792 v4l2_m2m_force_last_buf_done(m2m_ctx
, vq
);
793 else if (!(buf
->flags
& V4L2_BUF_FLAG_IN_REQUEST
))
794 v4l2_m2m_try_schedule(m2m_ctx
);
798 EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf
);
800 int v4l2_m2m_dqbuf(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
801 struct v4l2_buffer
*buf
)
803 struct vb2_queue
*vq
;
806 vq
= v4l2_m2m_get_vq(m2m_ctx
, buf
->type
);
807 ret
= vb2_dqbuf(vq
, buf
, file
->f_flags
& O_NONBLOCK
);
811 /* Adjust MMAP memory offsets for the CAPTURE queue */
812 v4l2_m2m_adjust_mem_offset(vq
, buf
);
816 EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf
);
818 int v4l2_m2m_prepare_buf(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
819 struct v4l2_buffer
*buf
)
821 struct video_device
*vdev
= video_devdata(file
);
822 struct vb2_queue
*vq
;
825 vq
= v4l2_m2m_get_vq(m2m_ctx
, buf
->type
);
826 ret
= vb2_prepare_buf(vq
, vdev
->v4l2_dev
->mdev
, buf
);
830 /* Adjust MMAP memory offsets for the CAPTURE queue */
831 v4l2_m2m_adjust_mem_offset(vq
, buf
);
835 EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf
);
837 int v4l2_m2m_create_bufs(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
838 struct v4l2_create_buffers
*create
)
840 struct vb2_queue
*vq
;
842 vq
= v4l2_m2m_get_vq(m2m_ctx
, create
->format
.type
);
843 return vb2_create_bufs(vq
, create
);
845 EXPORT_SYMBOL_GPL(v4l2_m2m_create_bufs
);
847 int v4l2_m2m_expbuf(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
848 struct v4l2_exportbuffer
*eb
)
850 struct vb2_queue
*vq
;
852 vq
= v4l2_m2m_get_vq(m2m_ctx
, eb
->type
);
853 return vb2_expbuf(vq
, eb
);
855 EXPORT_SYMBOL_GPL(v4l2_m2m_expbuf
);
857 int v4l2_m2m_streamon(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
858 enum v4l2_buf_type type
)
860 struct vb2_queue
*vq
;
863 vq
= v4l2_m2m_get_vq(m2m_ctx
, type
);
864 ret
= vb2_streamon(vq
, type
);
866 v4l2_m2m_try_schedule(m2m_ctx
);
870 EXPORT_SYMBOL_GPL(v4l2_m2m_streamon
);
872 int v4l2_m2m_streamoff(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
873 enum v4l2_buf_type type
)
875 struct v4l2_m2m_dev
*m2m_dev
;
876 struct v4l2_m2m_queue_ctx
*q_ctx
;
877 unsigned long flags_job
, flags
;
880 /* wait until the current context is dequeued from job_queue */
881 v4l2_m2m_cancel_job(m2m_ctx
);
883 q_ctx
= get_queue_ctx(m2m_ctx
, type
);
884 ret
= vb2_streamoff(&q_ctx
->q
, type
);
888 m2m_dev
= m2m_ctx
->m2m_dev
;
889 spin_lock_irqsave(&m2m_dev
->job_spinlock
, flags_job
);
890 /* We should not be scheduled anymore, since we're dropping a queue. */
891 if (m2m_ctx
->job_flags
& TRANS_QUEUED
)
892 list_del(&m2m_ctx
->queue
);
893 m2m_ctx
->job_flags
= 0;
895 spin_lock_irqsave(&q_ctx
->rdy_spinlock
, flags
);
896 /* Drop queue, since streamoff returns device to the same state as after
897 * calling reqbufs. */
898 INIT_LIST_HEAD(&q_ctx
->rdy_queue
);
900 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
902 if (m2m_dev
->curr_ctx
== m2m_ctx
) {
903 m2m_dev
->curr_ctx
= NULL
;
904 wake_up(&m2m_ctx
->finished
);
906 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags_job
);
910 EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff
);
912 static __poll_t
v4l2_m2m_poll_for_data(struct file
*file
,
913 struct v4l2_m2m_ctx
*m2m_ctx
,
914 struct poll_table_struct
*wait
)
916 struct vb2_queue
*src_q
, *dst_q
;
920 src_q
= v4l2_m2m_get_src_vq(m2m_ctx
);
921 dst_q
= v4l2_m2m_get_dst_vq(m2m_ctx
);
924 * There has to be at least one buffer queued on each queued_list, which
925 * means either in driver already or waiting for driver to claim it
926 * and start processing.
928 if ((!vb2_is_streaming(src_q
) || src_q
->error
||
929 list_empty(&src_q
->queued_list
)) &&
930 (!vb2_is_streaming(dst_q
) || dst_q
->error
||
931 (list_empty(&dst_q
->queued_list
) && !dst_q
->last_buffer_dequeued
)))
934 spin_lock_irqsave(&src_q
->done_lock
, flags
);
935 if (!list_empty(&src_q
->done_list
))
936 rc
|= EPOLLOUT
| EPOLLWRNORM
;
937 spin_unlock_irqrestore(&src_q
->done_lock
, flags
);
939 spin_lock_irqsave(&dst_q
->done_lock
, flags
);
941 * If the last buffer was dequeued from the capture queue, signal
942 * userspace. DQBUF(CAPTURE) will return -EPIPE.
944 if (!list_empty(&dst_q
->done_list
) || dst_q
->last_buffer_dequeued
)
945 rc
|= EPOLLIN
| EPOLLRDNORM
;
946 spin_unlock_irqrestore(&dst_q
->done_lock
, flags
);
951 __poll_t
v4l2_m2m_poll(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
952 struct poll_table_struct
*wait
)
954 struct video_device
*vfd
= video_devdata(file
);
955 struct vb2_queue
*src_q
= v4l2_m2m_get_src_vq(m2m_ctx
);
956 struct vb2_queue
*dst_q
= v4l2_m2m_get_dst_vq(m2m_ctx
);
957 __poll_t req_events
= poll_requested_events(wait
);
961 * poll_wait() MUST be called on the first invocation on all the
962 * potential queues of interest, even if we are not interested in their
963 * events during this first call. Failure to do so will result in
964 * queue's events to be ignored because the poll_table won't be capable
965 * of adding new wait queues thereafter.
967 poll_wait(file
, &src_q
->done_wq
, wait
);
968 poll_wait(file
, &dst_q
->done_wq
, wait
);
970 if (req_events
& (EPOLLOUT
| EPOLLWRNORM
| EPOLLIN
| EPOLLRDNORM
))
971 rc
= v4l2_m2m_poll_for_data(file
, m2m_ctx
, wait
);
973 if (test_bit(V4L2_FL_USES_V4L2_FH
, &vfd
->flags
)) {
974 struct v4l2_fh
*fh
= file
->private_data
;
976 poll_wait(file
, &fh
->wait
, wait
);
977 if (v4l2_event_pending(fh
))
983 EXPORT_SYMBOL_GPL(v4l2_m2m_poll
);
985 int v4l2_m2m_mmap(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
986 struct vm_area_struct
*vma
)
988 unsigned long offset
= vma
->vm_pgoff
<< PAGE_SHIFT
;
989 struct vb2_queue
*vq
;
991 if (offset
< DST_QUEUE_OFF_BASE
) {
992 vq
= v4l2_m2m_get_src_vq(m2m_ctx
);
994 vq
= v4l2_m2m_get_dst_vq(m2m_ctx
);
995 vma
->vm_pgoff
-= (DST_QUEUE_OFF_BASE
>> PAGE_SHIFT
);
998 return vb2_mmap(vq
, vma
);
1000 EXPORT_SYMBOL(v4l2_m2m_mmap
);
1003 unsigned long v4l2_m2m_get_unmapped_area(struct file
*file
, unsigned long addr
,
1004 unsigned long len
, unsigned long pgoff
,
1005 unsigned long flags
)
1007 struct v4l2_fh
*fh
= file
->private_data
;
1008 unsigned long offset
= pgoff
<< PAGE_SHIFT
;
1009 struct vb2_queue
*vq
;
1011 if (offset
< DST_QUEUE_OFF_BASE
) {
1012 vq
= v4l2_m2m_get_src_vq(fh
->m2m_ctx
);
1014 vq
= v4l2_m2m_get_dst_vq(fh
->m2m_ctx
);
1015 pgoff
-= (DST_QUEUE_OFF_BASE
>> PAGE_SHIFT
);
1018 return vb2_get_unmapped_area(vq
, addr
, len
, pgoff
, flags
);
1020 EXPORT_SYMBOL_GPL(v4l2_m2m_get_unmapped_area
);
1023 #if defined(CONFIG_MEDIA_CONTROLLER)
1024 void v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev
*m2m_dev
)
1026 media_remove_intf_links(&m2m_dev
->intf_devnode
->intf
);
1027 media_devnode_remove(m2m_dev
->intf_devnode
);
1029 media_entity_remove_links(m2m_dev
->source
);
1030 media_entity_remove_links(&m2m_dev
->sink
);
1031 media_entity_remove_links(&m2m_dev
->proc
);
1032 media_device_unregister_entity(m2m_dev
->source
);
1033 media_device_unregister_entity(&m2m_dev
->sink
);
1034 media_device_unregister_entity(&m2m_dev
->proc
);
1035 kfree(m2m_dev
->source
->name
);
1036 kfree(m2m_dev
->sink
.name
);
1037 kfree(m2m_dev
->proc
.name
);
1039 EXPORT_SYMBOL_GPL(v4l2_m2m_unregister_media_controller
);
1041 static int v4l2_m2m_register_entity(struct media_device
*mdev
,
1042 struct v4l2_m2m_dev
*m2m_dev
, enum v4l2_m2m_entity_type type
,
1043 struct video_device
*vdev
, int function
)
1045 struct media_entity
*entity
;
1046 struct media_pad
*pads
;
1053 case MEM2MEM_ENT_TYPE_SOURCE
:
1054 entity
= m2m_dev
->source
;
1055 pads
= &m2m_dev
->source_pad
;
1056 pads
[0].flags
= MEDIA_PAD_FL_SOURCE
;
1059 case MEM2MEM_ENT_TYPE_SINK
:
1060 entity
= &m2m_dev
->sink
;
1061 pads
= &m2m_dev
->sink_pad
;
1062 pads
[0].flags
= MEDIA_PAD_FL_SINK
;
1065 case MEM2MEM_ENT_TYPE_PROC
:
1066 entity
= &m2m_dev
->proc
;
1067 pads
= m2m_dev
->proc_pads
;
1068 pads
[0].flags
= MEDIA_PAD_FL_SINK
;
1069 pads
[1].flags
= MEDIA_PAD_FL_SOURCE
;
1076 entity
->obj_type
= MEDIA_ENTITY_TYPE_BASE
;
1077 if (type
!= MEM2MEM_ENT_TYPE_PROC
) {
1078 entity
->info
.dev
.major
= VIDEO_MAJOR
;
1079 entity
->info
.dev
.minor
= vdev
->minor
;
1081 len
= strlen(vdev
->name
) + 2 + strlen(m2m_entity_name
[type
]);
1082 name
= kmalloc(len
, GFP_KERNEL
);
1085 snprintf(name
, len
, "%s-%s", vdev
->name
, m2m_entity_name
[type
]);
1086 entity
->name
= name
;
1087 entity
->function
= function
;
1089 ret
= media_entity_pads_init(entity
, num_pads
, pads
);
1091 kfree(entity
->name
);
1092 entity
->name
= NULL
;
1095 ret
= media_device_register_entity(mdev
, entity
);
1097 kfree(entity
->name
);
1098 entity
->name
= NULL
;
1105 int v4l2_m2m_register_media_controller(struct v4l2_m2m_dev
*m2m_dev
,
1106 struct video_device
*vdev
, int function
)
1108 struct media_device
*mdev
= vdev
->v4l2_dev
->mdev
;
1109 struct media_link
*link
;
1115 /* A memory-to-memory device consists in two
1116 * DMA engine and one video processing entities.
1117 * The DMA engine entities are linked to a V4L interface
1120 /* Create the three entities with their pads */
1121 m2m_dev
->source
= &vdev
->entity
;
1122 ret
= v4l2_m2m_register_entity(mdev
, m2m_dev
,
1123 MEM2MEM_ENT_TYPE_SOURCE
, vdev
, MEDIA_ENT_F_IO_V4L
);
1126 ret
= v4l2_m2m_register_entity(mdev
, m2m_dev
,
1127 MEM2MEM_ENT_TYPE_PROC
, vdev
, function
);
1129 goto err_rel_entity0
;
1130 ret
= v4l2_m2m_register_entity(mdev
, m2m_dev
,
1131 MEM2MEM_ENT_TYPE_SINK
, vdev
, MEDIA_ENT_F_IO_V4L
);
1133 goto err_rel_entity1
;
1135 /* Connect the three entities */
1136 ret
= media_create_pad_link(m2m_dev
->source
, 0, &m2m_dev
->proc
, 0,
1137 MEDIA_LNK_FL_IMMUTABLE
| MEDIA_LNK_FL_ENABLED
);
1139 goto err_rel_entity2
;
1141 ret
= media_create_pad_link(&m2m_dev
->proc
, 1, &m2m_dev
->sink
, 0,
1142 MEDIA_LNK_FL_IMMUTABLE
| MEDIA_LNK_FL_ENABLED
);
1146 /* Create video interface */
1147 m2m_dev
->intf_devnode
= media_devnode_create(mdev
,
1148 MEDIA_INTF_T_V4L_VIDEO
, 0,
1149 VIDEO_MAJOR
, vdev
->minor
);
1150 if (!m2m_dev
->intf_devnode
) {
1155 /* Connect the two DMA engines to the interface */
1156 link
= media_create_intf_link(m2m_dev
->source
,
1157 &m2m_dev
->intf_devnode
->intf
,
1158 MEDIA_LNK_FL_IMMUTABLE
| MEDIA_LNK_FL_ENABLED
);
1161 goto err_rm_devnode
;
1164 link
= media_create_intf_link(&m2m_dev
->sink
,
1165 &m2m_dev
->intf_devnode
->intf
,
1166 MEDIA_LNK_FL_IMMUTABLE
| MEDIA_LNK_FL_ENABLED
);
1169 goto err_rm_intf_link
;
1174 media_remove_intf_links(&m2m_dev
->intf_devnode
->intf
);
1176 media_devnode_remove(m2m_dev
->intf_devnode
);
1178 media_entity_remove_links(&m2m_dev
->sink
);
1180 media_entity_remove_links(&m2m_dev
->proc
);
1181 media_entity_remove_links(m2m_dev
->source
);
1183 media_device_unregister_entity(&m2m_dev
->proc
);
1184 kfree(m2m_dev
->proc
.name
);
1186 media_device_unregister_entity(&m2m_dev
->sink
);
1187 kfree(m2m_dev
->sink
.name
);
1189 media_device_unregister_entity(m2m_dev
->source
);
1190 kfree(m2m_dev
->source
->name
);
1194 EXPORT_SYMBOL_GPL(v4l2_m2m_register_media_controller
);
1197 struct v4l2_m2m_dev
*v4l2_m2m_init(const struct v4l2_m2m_ops
*m2m_ops
)
1199 struct v4l2_m2m_dev
*m2m_dev
;
1201 if (!m2m_ops
|| WARN_ON(!m2m_ops
->device_run
))
1202 return ERR_PTR(-EINVAL
);
1204 m2m_dev
= kzalloc(sizeof *m2m_dev
, GFP_KERNEL
);
1206 return ERR_PTR(-ENOMEM
);
1208 m2m_dev
->curr_ctx
= NULL
;
1209 m2m_dev
->m2m_ops
= m2m_ops
;
1210 INIT_LIST_HEAD(&m2m_dev
->job_queue
);
1211 spin_lock_init(&m2m_dev
->job_spinlock
);
1212 INIT_WORK(&m2m_dev
->job_work
, v4l2_m2m_device_run_work
);
1216 EXPORT_SYMBOL_GPL(v4l2_m2m_init
);
1218 void v4l2_m2m_release(struct v4l2_m2m_dev
*m2m_dev
)
1222 EXPORT_SYMBOL_GPL(v4l2_m2m_release
);
1224 struct v4l2_m2m_ctx
*v4l2_m2m_ctx_init(struct v4l2_m2m_dev
*m2m_dev
,
1226 int (*queue_init
)(void *priv
, struct vb2_queue
*src_vq
, struct vb2_queue
*dst_vq
))
1228 struct v4l2_m2m_ctx
*m2m_ctx
;
1229 struct v4l2_m2m_queue_ctx
*out_q_ctx
, *cap_q_ctx
;
1232 m2m_ctx
= kzalloc(sizeof *m2m_ctx
, GFP_KERNEL
);
1234 return ERR_PTR(-ENOMEM
);
1236 m2m_ctx
->priv
= drv_priv
;
1237 m2m_ctx
->m2m_dev
= m2m_dev
;
1238 init_waitqueue_head(&m2m_ctx
->finished
);
1240 out_q_ctx
= &m2m_ctx
->out_q_ctx
;
1241 cap_q_ctx
= &m2m_ctx
->cap_q_ctx
;
1243 INIT_LIST_HEAD(&out_q_ctx
->rdy_queue
);
1244 INIT_LIST_HEAD(&cap_q_ctx
->rdy_queue
);
1245 spin_lock_init(&out_q_ctx
->rdy_spinlock
);
1246 spin_lock_init(&cap_q_ctx
->rdy_spinlock
);
1248 INIT_LIST_HEAD(&m2m_ctx
->queue
);
1250 ret
= queue_init(drv_priv
, &out_q_ctx
->q
, &cap_q_ctx
->q
);
1255 * Both queues should use same the mutex to lock the m2m context.
1256 * This lock is used in some v4l2_m2m_* helpers.
1258 if (WARN_ON(out_q_ctx
->q
.lock
!= cap_q_ctx
->q
.lock
)) {
1262 m2m_ctx
->q_lock
= out_q_ctx
->q
.lock
;
1267 return ERR_PTR(ret
);
1269 EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init
);
1271 void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx
*m2m_ctx
)
1273 /* wait until the current context is dequeued from job_queue */
1274 v4l2_m2m_cancel_job(m2m_ctx
);
1276 vb2_queue_release(&m2m_ctx
->cap_q_ctx
.q
);
1277 vb2_queue_release(&m2m_ctx
->out_q_ctx
.q
);
1281 EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release
);
1283 void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx
*m2m_ctx
,
1284 struct vb2_v4l2_buffer
*vbuf
)
1286 struct v4l2_m2m_buffer
*b
= container_of(vbuf
,
1287 struct v4l2_m2m_buffer
, vb
);
1288 struct v4l2_m2m_queue_ctx
*q_ctx
;
1289 unsigned long flags
;
1291 q_ctx
= get_queue_ctx(m2m_ctx
, vbuf
->vb2_buf
.vb2_queue
->type
);
1295 spin_lock_irqsave(&q_ctx
->rdy_spinlock
, flags
);
1296 list_add_tail(&b
->list
, &q_ctx
->rdy_queue
);
1298 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
1300 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue
);
1302 void v4l2_m2m_buf_copy_metadata(const struct vb2_v4l2_buffer
*out_vb
,
1303 struct vb2_v4l2_buffer
*cap_vb
,
1304 bool copy_frame_flags
)
1306 u32 mask
= V4L2_BUF_FLAG_TIMECODE
| V4L2_BUF_FLAG_TSTAMP_SRC_MASK
;
1308 if (copy_frame_flags
)
1309 mask
|= V4L2_BUF_FLAG_KEYFRAME
| V4L2_BUF_FLAG_PFRAME
|
1310 V4L2_BUF_FLAG_BFRAME
;
1312 cap_vb
->vb2_buf
.timestamp
= out_vb
->vb2_buf
.timestamp
;
1314 if (out_vb
->flags
& V4L2_BUF_FLAG_TIMECODE
)
1315 cap_vb
->timecode
= out_vb
->timecode
;
1316 cap_vb
->field
= out_vb
->field
;
1317 cap_vb
->flags
&= ~mask
;
1318 cap_vb
->flags
|= out_vb
->flags
& mask
;
1319 cap_vb
->vb2_buf
.copied_timestamp
= 1;
1321 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_copy_metadata
);
1323 void v4l2_m2m_request_queue(struct media_request
*req
)
1325 struct media_request_object
*obj
, *obj_safe
;
1326 struct v4l2_m2m_ctx
*m2m_ctx
= NULL
;
1329 * Queue all objects. Note that buffer objects are at the end of the
1330 * objects list, after all other object types. Once buffer objects
1331 * are queued, the driver might delete them immediately (if the driver
1332 * processes the buffer at once), so we have to use
1333 * list_for_each_entry_safe() to handle the case where the object we
1336 list_for_each_entry_safe(obj
, obj_safe
, &req
->objects
, list
) {
1337 struct v4l2_m2m_ctx
*m2m_ctx_obj
;
1338 struct vb2_buffer
*vb
;
1340 if (!obj
->ops
->queue
)
1343 if (vb2_request_object_is_buffer(obj
)) {
1345 vb
= container_of(obj
, struct vb2_buffer
, req_obj
);
1346 WARN_ON(!V4L2_TYPE_IS_OUTPUT(vb
->vb2_queue
->type
));
1347 m2m_ctx_obj
= container_of(vb
->vb2_queue
,
1348 struct v4l2_m2m_ctx
,
1350 WARN_ON(m2m_ctx
&& m2m_ctx_obj
!= m2m_ctx
);
1351 m2m_ctx
= m2m_ctx_obj
;
1355 * The buffer we queue here can in theory be immediately
1356 * unbound, hence the use of list_for_each_entry_safe()
1357 * above and why we call the queue op last.
1359 obj
->ops
->queue(obj
);
1365 v4l2_m2m_try_schedule(m2m_ctx
);
1367 EXPORT_SYMBOL_GPL(v4l2_m2m_request_queue
);
1369 /* Videobuf2 ioctl helpers */
1371 int v4l2_m2m_ioctl_reqbufs(struct file
*file
, void *priv
,
1372 struct v4l2_requestbuffers
*rb
)
1374 struct v4l2_fh
*fh
= file
->private_data
;
1376 return v4l2_m2m_reqbufs(file
, fh
->m2m_ctx
, rb
);
1378 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_reqbufs
);
1380 int v4l2_m2m_ioctl_create_bufs(struct file
*file
, void *priv
,
1381 struct v4l2_create_buffers
*create
)
1383 struct v4l2_fh
*fh
= file
->private_data
;
1385 return v4l2_m2m_create_bufs(file
, fh
->m2m_ctx
, create
);
1387 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_create_bufs
);
1389 int v4l2_m2m_ioctl_remove_bufs(struct file
*file
, void *priv
,
1390 struct v4l2_remove_buffers
*remove
)
1392 struct v4l2_fh
*fh
= file
->private_data
;
1393 struct vb2_queue
*q
= v4l2_m2m_get_vq(fh
->m2m_ctx
, remove
->type
);
1397 if (q
->type
!= remove
->type
)
1400 return vb2_core_remove_bufs(q
, remove
->index
, remove
->count
);
1402 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_remove_bufs
);
1404 int v4l2_m2m_ioctl_querybuf(struct file
*file
, void *priv
,
1405 struct v4l2_buffer
*buf
)
1407 struct v4l2_fh
*fh
= file
->private_data
;
1409 return v4l2_m2m_querybuf(file
, fh
->m2m_ctx
, buf
);
1411 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_querybuf
);
1413 int v4l2_m2m_ioctl_qbuf(struct file
*file
, void *priv
,
1414 struct v4l2_buffer
*buf
)
1416 struct v4l2_fh
*fh
= file
->private_data
;
1418 return v4l2_m2m_qbuf(file
, fh
->m2m_ctx
, buf
);
1420 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_qbuf
);
1422 int v4l2_m2m_ioctl_dqbuf(struct file
*file
, void *priv
,
1423 struct v4l2_buffer
*buf
)
1425 struct v4l2_fh
*fh
= file
->private_data
;
1427 return v4l2_m2m_dqbuf(file
, fh
->m2m_ctx
, buf
);
1429 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_dqbuf
);
1431 int v4l2_m2m_ioctl_prepare_buf(struct file
*file
, void *priv
,
1432 struct v4l2_buffer
*buf
)
1434 struct v4l2_fh
*fh
= file
->private_data
;
1436 return v4l2_m2m_prepare_buf(file
, fh
->m2m_ctx
, buf
);
1438 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_prepare_buf
);
1440 int v4l2_m2m_ioctl_expbuf(struct file
*file
, void *priv
,
1441 struct v4l2_exportbuffer
*eb
)
1443 struct v4l2_fh
*fh
= file
->private_data
;
1445 return v4l2_m2m_expbuf(file
, fh
->m2m_ctx
, eb
);
1447 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_expbuf
);
1449 int v4l2_m2m_ioctl_streamon(struct file
*file
, void *priv
,
1450 enum v4l2_buf_type type
)
1452 struct v4l2_fh
*fh
= file
->private_data
;
1454 return v4l2_m2m_streamon(file
, fh
->m2m_ctx
, type
);
1456 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamon
);
1458 int v4l2_m2m_ioctl_streamoff(struct file
*file
, void *priv
,
1459 enum v4l2_buf_type type
)
1461 struct v4l2_fh
*fh
= file
->private_data
;
1463 return v4l2_m2m_streamoff(file
, fh
->m2m_ctx
, type
);
1465 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamoff
);
1467 int v4l2_m2m_ioctl_try_encoder_cmd(struct file
*file
, void *fh
,
1468 struct v4l2_encoder_cmd
*ec
)
1470 if (ec
->cmd
!= V4L2_ENC_CMD_STOP
&& ec
->cmd
!= V4L2_ENC_CMD_START
)
1476 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_try_encoder_cmd
);
1478 int v4l2_m2m_ioctl_try_decoder_cmd(struct file
*file
, void *fh
,
1479 struct v4l2_decoder_cmd
*dc
)
1481 if (dc
->cmd
!= V4L2_DEC_CMD_STOP
&& dc
->cmd
!= V4L2_DEC_CMD_START
)
1486 if (dc
->cmd
== V4L2_DEC_CMD_STOP
) {
1488 } else if (dc
->cmd
== V4L2_DEC_CMD_START
) {
1489 dc
->start
.speed
= 0;
1490 dc
->start
.format
= V4L2_DEC_START_FMT_NONE
;
1494 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_try_decoder_cmd
);
1497 * Updates the encoding state on ENC_CMD_STOP/ENC_CMD_START
1498 * Should be called from the encoder driver encoder_cmd() callback
1500 int v4l2_m2m_encoder_cmd(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
1501 struct v4l2_encoder_cmd
*ec
)
1503 if (ec
->cmd
!= V4L2_ENC_CMD_STOP
&& ec
->cmd
!= V4L2_ENC_CMD_START
)
1506 if (ec
->cmd
== V4L2_ENC_CMD_STOP
)
1507 return v4l2_update_last_buf_state(m2m_ctx
);
1509 if (m2m_ctx
->is_draining
)
1512 if (m2m_ctx
->has_stopped
)
1513 m2m_ctx
->has_stopped
= false;
1517 EXPORT_SYMBOL_GPL(v4l2_m2m_encoder_cmd
);
1520 * Updates the decoding state on DEC_CMD_STOP/DEC_CMD_START
1521 * Should be called from the decoder driver decoder_cmd() callback
1523 int v4l2_m2m_decoder_cmd(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
1524 struct v4l2_decoder_cmd
*dc
)
1526 if (dc
->cmd
!= V4L2_DEC_CMD_STOP
&& dc
->cmd
!= V4L2_DEC_CMD_START
)
1529 if (dc
->cmd
== V4L2_DEC_CMD_STOP
)
1530 return v4l2_update_last_buf_state(m2m_ctx
);
1532 if (m2m_ctx
->is_draining
)
1535 if (m2m_ctx
->has_stopped
)
1536 m2m_ctx
->has_stopped
= false;
1540 EXPORT_SYMBOL_GPL(v4l2_m2m_decoder_cmd
);
1542 int v4l2_m2m_ioctl_encoder_cmd(struct file
*file
, void *priv
,
1543 struct v4l2_encoder_cmd
*ec
)
1545 struct v4l2_fh
*fh
= file
->private_data
;
1547 return v4l2_m2m_encoder_cmd(file
, fh
->m2m_ctx
, ec
);
1549 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_encoder_cmd
);
1551 int v4l2_m2m_ioctl_decoder_cmd(struct file
*file
, void *priv
,
1552 struct v4l2_decoder_cmd
*dc
)
1554 struct v4l2_fh
*fh
= file
->private_data
;
1556 return v4l2_m2m_decoder_cmd(file
, fh
->m2m_ctx
, dc
);
1558 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_decoder_cmd
);
1560 int v4l2_m2m_ioctl_stateless_try_decoder_cmd(struct file
*file
, void *fh
,
1561 struct v4l2_decoder_cmd
*dc
)
1563 if (dc
->cmd
!= V4L2_DEC_CMD_FLUSH
)
1570 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_stateless_try_decoder_cmd
);
1572 int v4l2_m2m_ioctl_stateless_decoder_cmd(struct file
*file
, void *priv
,
1573 struct v4l2_decoder_cmd
*dc
)
1575 struct v4l2_fh
*fh
= file
->private_data
;
1576 struct vb2_v4l2_buffer
*out_vb
, *cap_vb
;
1577 struct v4l2_m2m_dev
*m2m_dev
= fh
->m2m_ctx
->m2m_dev
;
1578 unsigned long flags
;
1581 ret
= v4l2_m2m_ioctl_stateless_try_decoder_cmd(file
, priv
, dc
);
1585 spin_lock_irqsave(&m2m_dev
->job_spinlock
, flags
);
1586 out_vb
= v4l2_m2m_last_src_buf(fh
->m2m_ctx
);
1587 cap_vb
= v4l2_m2m_last_dst_buf(fh
->m2m_ctx
);
1590 * If there is an out buffer pending, then clear any HOLD flag.
1592 * By clearing this flag we ensure that when this output
1593 * buffer is processed any held capture buffer will be released.
1596 out_vb
->flags
&= ~V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF
;
1597 } else if (cap_vb
&& cap_vb
->is_held
) {
1599 * If there were no output buffers, but there is a
1600 * capture buffer that is held, then release that
1603 cap_vb
->is_held
= false;
1604 v4l2_m2m_dst_buf_remove(fh
->m2m_ctx
);
1605 v4l2_m2m_buf_done(cap_vb
, VB2_BUF_STATE_DONE
);
1607 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
1611 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_stateless_decoder_cmd
);
1614 * v4l2_file_operations helpers. It is assumed here same lock is used
1615 * for the output and the capture buffer queue.
1618 int v4l2_m2m_fop_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1620 struct v4l2_fh
*fh
= file
->private_data
;
1622 return v4l2_m2m_mmap(file
, fh
->m2m_ctx
, vma
);
1624 EXPORT_SYMBOL_GPL(v4l2_m2m_fop_mmap
);
1626 __poll_t
v4l2_m2m_fop_poll(struct file
*file
, poll_table
*wait
)
1628 struct v4l2_fh
*fh
= file
->private_data
;
1629 struct v4l2_m2m_ctx
*m2m_ctx
= fh
->m2m_ctx
;
1632 if (m2m_ctx
->q_lock
)
1633 mutex_lock(m2m_ctx
->q_lock
);
1635 ret
= v4l2_m2m_poll(file
, m2m_ctx
, wait
);
1637 if (m2m_ctx
->q_lock
)
1638 mutex_unlock(m2m_ctx
->q_lock
);
1642 EXPORT_SYMBOL_GPL(v4l2_m2m_fop_poll
);