1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Memory-to-memory device framework for Video for Linux 2 and videobuf.
5 * Helper functions for devices that use videobuf buffers for both their
6 * source and destination.
8 * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
9 * Pawel Osciak, <pawel@osciak.com>
10 * Marek Szyprowski, <m.szyprowski@samsung.com>
12 #include <linux/module.h>
13 #include <linux/sched.h>
14 #include <linux/slab.h>
16 #include <media/media-device.h>
17 #include <media/videobuf2-v4l2.h>
18 #include <media/v4l2-mem2mem.h>
19 #include <media/v4l2-dev.h>
20 #include <media/v4l2-device.h>
21 #include <media/v4l2-fh.h>
22 #include <media/v4l2-event.h>
24 MODULE_DESCRIPTION("Mem to mem device framework for videobuf");
25 MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>");
26 MODULE_LICENSE("GPL");
29 module_param(debug
, bool, 0644);
31 #define dprintk(fmt, arg...) \
34 printk(KERN_DEBUG "%s: " fmt, __func__, ## arg);\
38 /* Instance is already queued on the job_queue */
39 #define TRANS_QUEUED (1 << 0)
40 /* Instance is currently running in hardware */
41 #define TRANS_RUNNING (1 << 1)
42 /* Instance is currently aborting */
43 #define TRANS_ABORT (1 << 2)
46 /* The job queue is not running new jobs */
47 #define QUEUE_PAUSED (1 << 0)
50 /* Offset base for buffers on the destination queue - used to distinguish
51 * between source and destination buffers when mmapping - they receive the same
52 * offsets but for different queues */
53 #define DST_QUEUE_OFF_BASE (1 << 30)
55 enum v4l2_m2m_entity_type
{
56 MEM2MEM_ENT_TYPE_SOURCE
,
57 MEM2MEM_ENT_TYPE_SINK
,
61 static const char * const m2m_entity_name
[] = {
68 * struct v4l2_m2m_dev - per-device context
69 * @source: &struct media_entity pointer with the source entity
70 * Used only when the M2M device is registered via
71 * v4l2_m2m_unregister_media_controller().
72 * @source_pad: &struct media_pad with the source pad.
73 * Used only when the M2M device is registered via
74 * v4l2_m2m_unregister_media_controller().
75 * @sink: &struct media_entity pointer with the sink entity
76 * Used only when the M2M device is registered via
77 * v4l2_m2m_unregister_media_controller().
78 * @sink_pad: &struct media_pad with the sink pad.
79 * Used only when the M2M device is registered via
80 * v4l2_m2m_unregister_media_controller().
81 * @proc: &struct media_entity pointer with the M2M device itself.
82 * @proc_pads: &struct media_pad with the @proc pads.
83 * Used only when the M2M device is registered via
84 * v4l2_m2m_unregister_media_controller().
85 * @intf_devnode: &struct media_intf devnode pointer with the interface
86 * with controls the M2M device.
87 * @curr_ctx: currently running instance
88 * @job_queue: instances queued to run
89 * @job_spinlock: protects job_queue
90 * @job_work: worker to run queued jobs.
91 * @job_queue_flags: flags of the queue status, %QUEUE_PAUSED.
92 * @m2m_ops: driver callbacks
95 struct v4l2_m2m_ctx
*curr_ctx
;
96 #ifdef CONFIG_MEDIA_CONTROLLER
97 struct media_entity
*source
;
98 struct media_pad source_pad
;
99 struct media_entity sink
;
100 struct media_pad sink_pad
;
101 struct media_entity proc
;
102 struct media_pad proc_pads
[2];
103 struct media_intf_devnode
*intf_devnode
;
106 struct list_head job_queue
;
107 spinlock_t job_spinlock
;
108 struct work_struct job_work
;
109 unsigned long job_queue_flags
;
111 const struct v4l2_m2m_ops
*m2m_ops
;
114 static struct v4l2_m2m_queue_ctx
*get_queue_ctx(struct v4l2_m2m_ctx
*m2m_ctx
,
115 enum v4l2_buf_type type
)
117 if (V4L2_TYPE_IS_OUTPUT(type
))
118 return &m2m_ctx
->out_q_ctx
;
120 return &m2m_ctx
->cap_q_ctx
;
123 struct vb2_queue
*v4l2_m2m_get_vq(struct v4l2_m2m_ctx
*m2m_ctx
,
124 enum v4l2_buf_type type
)
126 struct v4l2_m2m_queue_ctx
*q_ctx
;
128 q_ctx
= get_queue_ctx(m2m_ctx
, type
);
134 EXPORT_SYMBOL(v4l2_m2m_get_vq
);
136 struct vb2_v4l2_buffer
*v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx
*q_ctx
)
138 struct v4l2_m2m_buffer
*b
;
141 spin_lock_irqsave(&q_ctx
->rdy_spinlock
, flags
);
143 if (list_empty(&q_ctx
->rdy_queue
)) {
144 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
148 b
= list_first_entry(&q_ctx
->rdy_queue
, struct v4l2_m2m_buffer
, list
);
149 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
152 EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf
);
154 struct vb2_v4l2_buffer
*v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx
*q_ctx
)
156 struct v4l2_m2m_buffer
*b
;
159 spin_lock_irqsave(&q_ctx
->rdy_spinlock
, flags
);
161 if (list_empty(&q_ctx
->rdy_queue
)) {
162 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
166 b
= list_last_entry(&q_ctx
->rdy_queue
, struct v4l2_m2m_buffer
, list
);
167 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
170 EXPORT_SYMBOL_GPL(v4l2_m2m_last_buf
);
172 struct vb2_v4l2_buffer
*v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx
*q_ctx
)
174 struct v4l2_m2m_buffer
*b
;
177 spin_lock_irqsave(&q_ctx
->rdy_spinlock
, flags
);
178 if (list_empty(&q_ctx
->rdy_queue
)) {
179 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
182 b
= list_first_entry(&q_ctx
->rdy_queue
, struct v4l2_m2m_buffer
, list
);
185 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
189 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove
);
191 void v4l2_m2m_buf_remove_by_buf(struct v4l2_m2m_queue_ctx
*q_ctx
,
192 struct vb2_v4l2_buffer
*vbuf
)
194 struct v4l2_m2m_buffer
*b
;
197 spin_lock_irqsave(&q_ctx
->rdy_spinlock
, flags
);
198 b
= container_of(vbuf
, struct v4l2_m2m_buffer
, vb
);
201 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
203 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_buf
);
205 struct vb2_v4l2_buffer
*
206 v4l2_m2m_buf_remove_by_idx(struct v4l2_m2m_queue_ctx
*q_ctx
, unsigned int idx
)
209 struct v4l2_m2m_buffer
*b
, *tmp
;
210 struct vb2_v4l2_buffer
*ret
= NULL
;
213 spin_lock_irqsave(&q_ctx
->rdy_spinlock
, flags
);
214 list_for_each_entry_safe(b
, tmp
, &q_ctx
->rdy_queue
, list
) {
215 if (b
->vb
.vb2_buf
.index
== idx
) {
222 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
226 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_idx
);
229 * Scheduling handlers
232 void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev
*m2m_dev
)
237 spin_lock_irqsave(&m2m_dev
->job_spinlock
, flags
);
238 if (m2m_dev
->curr_ctx
)
239 ret
= m2m_dev
->curr_ctx
->priv
;
240 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
244 EXPORT_SYMBOL(v4l2_m2m_get_curr_priv
);
247 * v4l2_m2m_try_run() - select next job to perform and run it if possible
248 * @m2m_dev: per-device context
250 * Get next transaction (if present) from the waiting jobs list and run it.
252 * Note that this function can run on a given v4l2_m2m_ctx context,
253 * but call .device_run for another context.
255 static void v4l2_m2m_try_run(struct v4l2_m2m_dev
*m2m_dev
)
259 spin_lock_irqsave(&m2m_dev
->job_spinlock
, flags
);
260 if (NULL
!= m2m_dev
->curr_ctx
) {
261 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
262 dprintk("Another instance is running, won't run now\n");
266 if (list_empty(&m2m_dev
->job_queue
)) {
267 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
268 dprintk("No job pending\n");
272 if (m2m_dev
->job_queue_flags
& QUEUE_PAUSED
) {
273 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
274 dprintk("Running new jobs is paused\n");
278 m2m_dev
->curr_ctx
= list_first_entry(&m2m_dev
->job_queue
,
279 struct v4l2_m2m_ctx
, queue
);
280 m2m_dev
->curr_ctx
->job_flags
|= TRANS_RUNNING
;
281 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
283 dprintk("Running job on m2m_ctx: %p\n", m2m_dev
->curr_ctx
);
284 m2m_dev
->m2m_ops
->device_run(m2m_dev
->curr_ctx
->priv
);
288 * __v4l2_m2m_try_queue() - queue a job
289 * @m2m_dev: m2m device
290 * @m2m_ctx: m2m context
292 * Check if this context is ready to queue a job.
294 * This function can run in interrupt context.
296 static void __v4l2_m2m_try_queue(struct v4l2_m2m_dev
*m2m_dev
,
297 struct v4l2_m2m_ctx
*m2m_ctx
)
299 unsigned long flags_job
;
300 struct vb2_v4l2_buffer
*dst
, *src
;
302 dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx
);
304 if (!m2m_ctx
->out_q_ctx
.q
.streaming
305 || !m2m_ctx
->cap_q_ctx
.q
.streaming
) {
306 dprintk("Streaming needs to be on for both queues\n");
310 spin_lock_irqsave(&m2m_dev
->job_spinlock
, flags_job
);
312 /* If the context is aborted then don't schedule it */
313 if (m2m_ctx
->job_flags
& TRANS_ABORT
) {
314 dprintk("Aborted context\n");
318 if (m2m_ctx
->job_flags
& TRANS_QUEUED
) {
319 dprintk("On job queue already\n");
323 src
= v4l2_m2m_next_src_buf(m2m_ctx
);
324 dst
= v4l2_m2m_next_dst_buf(m2m_ctx
);
325 if (!src
&& !m2m_ctx
->out_q_ctx
.buffered
) {
326 dprintk("No input buffers available\n");
329 if (!dst
&& !m2m_ctx
->cap_q_ctx
.buffered
) {
330 dprintk("No output buffers available\n");
334 m2m_ctx
->new_frame
= true;
336 if (src
&& dst
&& dst
->is_held
&&
337 dst
->vb2_buf
.copied_timestamp
&&
338 dst
->vb2_buf
.timestamp
!= src
->vb2_buf
.timestamp
) {
339 dst
->is_held
= false;
340 v4l2_m2m_dst_buf_remove(m2m_ctx
);
341 v4l2_m2m_buf_done(dst
, VB2_BUF_STATE_DONE
);
342 dst
= v4l2_m2m_next_dst_buf(m2m_ctx
);
344 if (!dst
&& !m2m_ctx
->cap_q_ctx
.buffered
) {
345 dprintk("No output buffers available after returning held buffer\n");
350 if (src
&& dst
&& (m2m_ctx
->out_q_ctx
.q
.subsystem_flags
&
351 VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF
))
352 m2m_ctx
->new_frame
= !dst
->vb2_buf
.copied_timestamp
||
353 dst
->vb2_buf
.timestamp
!= src
->vb2_buf
.timestamp
;
355 if (m2m_ctx
->has_stopped
) {
356 dprintk("Device has stopped\n");
360 if (m2m_dev
->m2m_ops
->job_ready
361 && (!m2m_dev
->m2m_ops
->job_ready(m2m_ctx
->priv
))) {
362 dprintk("Driver not ready\n");
366 list_add_tail(&m2m_ctx
->queue
, &m2m_dev
->job_queue
);
367 m2m_ctx
->job_flags
|= TRANS_QUEUED
;
370 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags_job
);
374 * v4l2_m2m_try_schedule() - schedule and possibly run a job for any context
375 * @m2m_ctx: m2m context
377 * Check if this context is ready to queue a job. If suitable,
378 * run the next queued job on the mem2mem device.
380 * This function shouldn't run in interrupt context.
382 * Note that v4l2_m2m_try_schedule() can schedule one job for this context,
383 * and then run another job for another context.
385 void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx
*m2m_ctx
)
387 struct v4l2_m2m_dev
*m2m_dev
= m2m_ctx
->m2m_dev
;
389 __v4l2_m2m_try_queue(m2m_dev
, m2m_ctx
);
390 v4l2_m2m_try_run(m2m_dev
);
392 EXPORT_SYMBOL_GPL(v4l2_m2m_try_schedule
);
395 * v4l2_m2m_device_run_work() - run pending jobs for the context
396 * @work: Work structure used for scheduling the execution of this function.
398 static void v4l2_m2m_device_run_work(struct work_struct
*work
)
400 struct v4l2_m2m_dev
*m2m_dev
=
401 container_of(work
, struct v4l2_m2m_dev
, job_work
);
403 v4l2_m2m_try_run(m2m_dev
);
407 * v4l2_m2m_cancel_job() - cancel pending jobs for the context
408 * @m2m_ctx: m2m context with jobs to be canceled
410 * In case of streamoff or release called on any context,
411 * 1] If the context is currently running, then abort job will be called
412 * 2] If the context is queued, then the context will be removed from
415 static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx
*m2m_ctx
)
417 struct v4l2_m2m_dev
*m2m_dev
;
420 m2m_dev
= m2m_ctx
->m2m_dev
;
421 spin_lock_irqsave(&m2m_dev
->job_spinlock
, flags
);
423 m2m_ctx
->job_flags
|= TRANS_ABORT
;
424 if (m2m_ctx
->job_flags
& TRANS_RUNNING
) {
425 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
426 if (m2m_dev
->m2m_ops
->job_abort
)
427 m2m_dev
->m2m_ops
->job_abort(m2m_ctx
->priv
);
428 dprintk("m2m_ctx %p running, will wait to complete\n", m2m_ctx
);
429 wait_event(m2m_ctx
->finished
,
430 !(m2m_ctx
->job_flags
& TRANS_RUNNING
));
431 } else if (m2m_ctx
->job_flags
& TRANS_QUEUED
) {
432 list_del(&m2m_ctx
->queue
);
433 m2m_ctx
->job_flags
&= ~(TRANS_QUEUED
| TRANS_RUNNING
);
434 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
435 dprintk("m2m_ctx: %p had been on queue and was removed\n",
438 /* Do nothing, was not on queue/running */
439 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
444 * Schedule the next job, called from v4l2_m2m_job_finish() or
445 * v4l2_m2m_buf_done_and_job_finish().
447 static void v4l2_m2m_schedule_next_job(struct v4l2_m2m_dev
*m2m_dev
,
448 struct v4l2_m2m_ctx
*m2m_ctx
)
451 * This instance might have more buffers ready, but since we do not
452 * allow more than one job on the job_queue per instance, each has
453 * to be scheduled separately after the previous one finishes.
455 __v4l2_m2m_try_queue(m2m_dev
, m2m_ctx
);
458 * We might be running in atomic context,
459 * but the job must be run in non-atomic context.
461 schedule_work(&m2m_dev
->job_work
);
465 * Assumes job_spinlock is held, called from v4l2_m2m_job_finish() or
466 * v4l2_m2m_buf_done_and_job_finish().
468 static bool _v4l2_m2m_job_finish(struct v4l2_m2m_dev
*m2m_dev
,
469 struct v4l2_m2m_ctx
*m2m_ctx
)
471 if (!m2m_dev
->curr_ctx
|| m2m_dev
->curr_ctx
!= m2m_ctx
) {
472 dprintk("Called by an instance not currently running\n");
476 list_del(&m2m_dev
->curr_ctx
->queue
);
477 m2m_dev
->curr_ctx
->job_flags
&= ~(TRANS_QUEUED
| TRANS_RUNNING
);
478 wake_up(&m2m_dev
->curr_ctx
->finished
);
479 m2m_dev
->curr_ctx
= NULL
;
483 void v4l2_m2m_job_finish(struct v4l2_m2m_dev
*m2m_dev
,
484 struct v4l2_m2m_ctx
*m2m_ctx
)
490 * This function should not be used for drivers that support
491 * holding capture buffers. Those should use
492 * v4l2_m2m_buf_done_and_job_finish() instead.
494 WARN_ON(m2m_ctx
->out_q_ctx
.q
.subsystem_flags
&
495 VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF
);
496 spin_lock_irqsave(&m2m_dev
->job_spinlock
, flags
);
497 schedule_next
= _v4l2_m2m_job_finish(m2m_dev
, m2m_ctx
);
498 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
501 v4l2_m2m_schedule_next_job(m2m_dev
, m2m_ctx
);
503 EXPORT_SYMBOL(v4l2_m2m_job_finish
);
505 void v4l2_m2m_buf_done_and_job_finish(struct v4l2_m2m_dev
*m2m_dev
,
506 struct v4l2_m2m_ctx
*m2m_ctx
,
507 enum vb2_buffer_state state
)
509 struct vb2_v4l2_buffer
*src_buf
, *dst_buf
;
510 bool schedule_next
= false;
513 spin_lock_irqsave(&m2m_dev
->job_spinlock
, flags
);
514 src_buf
= v4l2_m2m_src_buf_remove(m2m_ctx
);
515 dst_buf
= v4l2_m2m_next_dst_buf(m2m_ctx
);
517 if (WARN_ON(!src_buf
|| !dst_buf
))
519 dst_buf
->is_held
= src_buf
->flags
& V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF
;
520 if (!dst_buf
->is_held
) {
521 v4l2_m2m_dst_buf_remove(m2m_ctx
);
522 v4l2_m2m_buf_done(dst_buf
, state
);
525 * If the request API is being used, returning the OUTPUT
526 * (src) buffer will wake-up any process waiting on the
527 * request file descriptor.
529 * Therefore, return the CAPTURE (dst) buffer first,
530 * to avoid signalling the request file descriptor
531 * before the CAPTURE buffer is done.
533 v4l2_m2m_buf_done(src_buf
, state
);
534 schedule_next
= _v4l2_m2m_job_finish(m2m_dev
, m2m_ctx
);
536 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
539 v4l2_m2m_schedule_next_job(m2m_dev
, m2m_ctx
);
541 EXPORT_SYMBOL(v4l2_m2m_buf_done_and_job_finish
);
543 void v4l2_m2m_suspend(struct v4l2_m2m_dev
*m2m_dev
)
546 struct v4l2_m2m_ctx
*curr_ctx
;
548 spin_lock_irqsave(&m2m_dev
->job_spinlock
, flags
);
549 m2m_dev
->job_queue_flags
|= QUEUE_PAUSED
;
550 curr_ctx
= m2m_dev
->curr_ctx
;
551 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
554 wait_event(curr_ctx
->finished
,
555 !(curr_ctx
->job_flags
& TRANS_RUNNING
));
557 EXPORT_SYMBOL(v4l2_m2m_suspend
);
559 void v4l2_m2m_resume(struct v4l2_m2m_dev
*m2m_dev
)
563 spin_lock_irqsave(&m2m_dev
->job_spinlock
, flags
);
564 m2m_dev
->job_queue_flags
&= ~QUEUE_PAUSED
;
565 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
567 v4l2_m2m_try_run(m2m_dev
);
569 EXPORT_SYMBOL(v4l2_m2m_resume
);
571 int v4l2_m2m_reqbufs(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
572 struct v4l2_requestbuffers
*reqbufs
)
574 struct vb2_queue
*vq
;
577 vq
= v4l2_m2m_get_vq(m2m_ctx
, reqbufs
->type
);
578 ret
= vb2_reqbufs(vq
, reqbufs
);
579 /* If count == 0, then the owner has released all buffers and he
580 is no longer owner of the queue. Otherwise we have an owner. */
582 vq
->owner
= reqbufs
->count
? file
->private_data
: NULL
;
586 EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs
);
588 int v4l2_m2m_querybuf(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
589 struct v4l2_buffer
*buf
)
591 struct vb2_queue
*vq
;
595 vq
= v4l2_m2m_get_vq(m2m_ctx
, buf
->type
);
596 ret
= vb2_querybuf(vq
, buf
);
598 /* Adjust MMAP memory offsets for the CAPTURE queue */
599 if (buf
->memory
== V4L2_MEMORY_MMAP
&& V4L2_TYPE_IS_CAPTURE(vq
->type
)) {
600 if (V4L2_TYPE_IS_MULTIPLANAR(vq
->type
)) {
601 for (i
= 0; i
< buf
->length
; ++i
)
602 buf
->m
.planes
[i
].m
.mem_offset
603 += DST_QUEUE_OFF_BASE
;
605 buf
->m
.offset
+= DST_QUEUE_OFF_BASE
;
611 EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf
);
614 * This will add the LAST flag and mark the buffer management
616 * This is called when the last capture buffer must be flagged as LAST
617 * in draining mode from the encoder/decoder driver buf_queue() callback
618 * or from v4l2_update_last_buf_state() when a capture buffer is available.
620 void v4l2_m2m_last_buffer_done(struct v4l2_m2m_ctx
*m2m_ctx
,
621 struct vb2_v4l2_buffer
*vbuf
)
623 vbuf
->flags
|= V4L2_BUF_FLAG_LAST
;
624 vb2_buffer_done(&vbuf
->vb2_buf
, VB2_BUF_STATE_DONE
);
626 v4l2_m2m_mark_stopped(m2m_ctx
);
628 EXPORT_SYMBOL_GPL(v4l2_m2m_last_buffer_done
);
630 /* When stop command is issued, update buffer management state */
631 static int v4l2_update_last_buf_state(struct v4l2_m2m_ctx
*m2m_ctx
)
633 struct vb2_v4l2_buffer
*next_dst_buf
;
635 if (m2m_ctx
->is_draining
)
638 if (m2m_ctx
->has_stopped
)
641 m2m_ctx
->last_src_buf
= v4l2_m2m_last_src_buf(m2m_ctx
);
642 m2m_ctx
->is_draining
= true;
645 * The processing of the last output buffer queued before
646 * the STOP command is expected to mark the buffer management
647 * state as stopped with v4l2_m2m_mark_stopped().
649 if (m2m_ctx
->last_src_buf
)
653 * In case the output queue is empty, try to mark the last capture
656 next_dst_buf
= v4l2_m2m_dst_buf_remove(m2m_ctx
);
659 * Wait for the next queued one in encoder/decoder driver
660 * buf_queue() callback using the v4l2_m2m_dst_buf_is_last()
661 * helper or in v4l2_m2m_qbuf() if encoder/decoder is not yet
664 m2m_ctx
->next_buf_last
= true;
668 v4l2_m2m_last_buffer_done(m2m_ctx
, next_dst_buf
);
674 * Updates the encoding/decoding buffer management state, should
675 * be called from encoder/decoder drivers start_streaming()
677 void v4l2_m2m_update_start_streaming_state(struct v4l2_m2m_ctx
*m2m_ctx
,
680 /* If start streaming again, untag the last output buffer */
681 if (V4L2_TYPE_IS_OUTPUT(q
->type
))
682 m2m_ctx
->last_src_buf
= NULL
;
684 EXPORT_SYMBOL_GPL(v4l2_m2m_update_start_streaming_state
);
687 * Updates the encoding/decoding buffer management state, should
688 * be called from encoder/decoder driver stop_streaming()
690 void v4l2_m2m_update_stop_streaming_state(struct v4l2_m2m_ctx
*m2m_ctx
,
693 if (V4L2_TYPE_IS_OUTPUT(q
->type
)) {
695 * If in draining state, either mark next dst buffer as
696 * done or flag next one to be marked as done either
697 * in encoder/decoder driver buf_queue() callback using
698 * the v4l2_m2m_dst_buf_is_last() helper or in v4l2_m2m_qbuf()
699 * if encoder/decoder is not yet streaming
701 if (m2m_ctx
->is_draining
) {
702 struct vb2_v4l2_buffer
*next_dst_buf
;
704 m2m_ctx
->last_src_buf
= NULL
;
705 next_dst_buf
= v4l2_m2m_dst_buf_remove(m2m_ctx
);
707 m2m_ctx
->next_buf_last
= true;
709 v4l2_m2m_last_buffer_done(m2m_ctx
,
713 v4l2_m2m_clear_state(m2m_ctx
);
716 EXPORT_SYMBOL_GPL(v4l2_m2m_update_stop_streaming_state
);
718 static void v4l2_m2m_force_last_buf_done(struct v4l2_m2m_ctx
*m2m_ctx
,
721 struct vb2_buffer
*vb
;
722 struct vb2_v4l2_buffer
*vbuf
;
725 if (WARN_ON(q
->is_output
))
727 if (list_empty(&q
->queued_list
))
730 vb
= list_first_entry(&q
->queued_list
, struct vb2_buffer
, queued_entry
);
731 for (i
= 0; i
< vb
->num_planes
; i
++)
732 vb2_set_plane_payload(vb
, i
, 0);
735 * Since the buffer hasn't been queued to the ready queue,
736 * mark is active and owned before marking it LAST and DONE
738 vb
->state
= VB2_BUF_STATE_ACTIVE
;
739 atomic_inc(&q
->owned_by_drv_count
);
741 vbuf
= to_vb2_v4l2_buffer(vb
);
742 vbuf
->field
= V4L2_FIELD_NONE
;
744 v4l2_m2m_last_buffer_done(m2m_ctx
, vbuf
);
747 int v4l2_m2m_qbuf(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
748 struct v4l2_buffer
*buf
)
750 struct video_device
*vdev
= video_devdata(file
);
751 struct vb2_queue
*vq
;
754 vq
= v4l2_m2m_get_vq(m2m_ctx
, buf
->type
);
755 if (V4L2_TYPE_IS_CAPTURE(vq
->type
) &&
756 (buf
->flags
& V4L2_BUF_FLAG_REQUEST_FD
)) {
757 dprintk("%s: requests cannot be used with capture buffers\n",
762 ret
= vb2_qbuf(vq
, vdev
->v4l2_dev
->mdev
, buf
);
767 * If the capture queue is streaming, but streaming hasn't started
768 * on the device, but was asked to stop, mark the previously queued
769 * buffer as DONE with LAST flag since it won't be queued on the
772 if (V4L2_TYPE_IS_CAPTURE(vq
->type
) &&
773 vb2_is_streaming(vq
) && !vb2_start_streaming_called(vq
) &&
774 (v4l2_m2m_has_stopped(m2m_ctx
) || v4l2_m2m_dst_buf_is_last(m2m_ctx
)))
775 v4l2_m2m_force_last_buf_done(m2m_ctx
, vq
);
776 else if (!(buf
->flags
& V4L2_BUF_FLAG_IN_REQUEST
))
777 v4l2_m2m_try_schedule(m2m_ctx
);
781 EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf
);
783 int v4l2_m2m_dqbuf(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
784 struct v4l2_buffer
*buf
)
786 struct vb2_queue
*vq
;
788 vq
= v4l2_m2m_get_vq(m2m_ctx
, buf
->type
);
789 return vb2_dqbuf(vq
, buf
, file
->f_flags
& O_NONBLOCK
);
791 EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf
);
793 int v4l2_m2m_prepare_buf(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
794 struct v4l2_buffer
*buf
)
796 struct video_device
*vdev
= video_devdata(file
);
797 struct vb2_queue
*vq
;
799 vq
= v4l2_m2m_get_vq(m2m_ctx
, buf
->type
);
800 return vb2_prepare_buf(vq
, vdev
->v4l2_dev
->mdev
, buf
);
802 EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf
);
804 int v4l2_m2m_create_bufs(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
805 struct v4l2_create_buffers
*create
)
807 struct vb2_queue
*vq
;
809 vq
= v4l2_m2m_get_vq(m2m_ctx
, create
->format
.type
);
810 return vb2_create_bufs(vq
, create
);
812 EXPORT_SYMBOL_GPL(v4l2_m2m_create_bufs
);
814 int v4l2_m2m_expbuf(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
815 struct v4l2_exportbuffer
*eb
)
817 struct vb2_queue
*vq
;
819 vq
= v4l2_m2m_get_vq(m2m_ctx
, eb
->type
);
820 return vb2_expbuf(vq
, eb
);
822 EXPORT_SYMBOL_GPL(v4l2_m2m_expbuf
);
824 int v4l2_m2m_streamon(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
825 enum v4l2_buf_type type
)
827 struct vb2_queue
*vq
;
830 vq
= v4l2_m2m_get_vq(m2m_ctx
, type
);
831 ret
= vb2_streamon(vq
, type
);
833 v4l2_m2m_try_schedule(m2m_ctx
);
837 EXPORT_SYMBOL_GPL(v4l2_m2m_streamon
);
839 int v4l2_m2m_streamoff(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
840 enum v4l2_buf_type type
)
842 struct v4l2_m2m_dev
*m2m_dev
;
843 struct v4l2_m2m_queue_ctx
*q_ctx
;
844 unsigned long flags_job
, flags
;
847 /* wait until the current context is dequeued from job_queue */
848 v4l2_m2m_cancel_job(m2m_ctx
);
850 q_ctx
= get_queue_ctx(m2m_ctx
, type
);
851 ret
= vb2_streamoff(&q_ctx
->q
, type
);
855 m2m_dev
= m2m_ctx
->m2m_dev
;
856 spin_lock_irqsave(&m2m_dev
->job_spinlock
, flags_job
);
857 /* We should not be scheduled anymore, since we're dropping a queue. */
858 if (m2m_ctx
->job_flags
& TRANS_QUEUED
)
859 list_del(&m2m_ctx
->queue
);
860 m2m_ctx
->job_flags
= 0;
862 spin_lock_irqsave(&q_ctx
->rdy_spinlock
, flags
);
863 /* Drop queue, since streamoff returns device to the same state as after
864 * calling reqbufs. */
865 INIT_LIST_HEAD(&q_ctx
->rdy_queue
);
867 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
869 if (m2m_dev
->curr_ctx
== m2m_ctx
) {
870 m2m_dev
->curr_ctx
= NULL
;
871 wake_up(&m2m_ctx
->finished
);
873 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags_job
);
877 EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff
);
879 static __poll_t
v4l2_m2m_poll_for_data(struct file
*file
,
880 struct v4l2_m2m_ctx
*m2m_ctx
,
881 struct poll_table_struct
*wait
)
883 struct vb2_queue
*src_q
, *dst_q
;
887 src_q
= v4l2_m2m_get_src_vq(m2m_ctx
);
888 dst_q
= v4l2_m2m_get_dst_vq(m2m_ctx
);
890 poll_wait(file
, &src_q
->done_wq
, wait
);
891 poll_wait(file
, &dst_q
->done_wq
, wait
);
894 * There has to be at least one buffer queued on each queued_list, which
895 * means either in driver already or waiting for driver to claim it
896 * and start processing.
898 if ((!src_q
->streaming
|| src_q
->error
||
899 list_empty(&src_q
->queued_list
)) &&
900 (!dst_q
->streaming
|| dst_q
->error
||
901 list_empty(&dst_q
->queued_list
)))
904 spin_lock_irqsave(&src_q
->done_lock
, flags
);
905 if (!list_empty(&src_q
->done_list
))
906 rc
|= EPOLLOUT
| EPOLLWRNORM
;
907 spin_unlock_irqrestore(&src_q
->done_lock
, flags
);
909 spin_lock_irqsave(&dst_q
->done_lock
, flags
);
911 * If the last buffer was dequeued from the capture queue, signal
912 * userspace. DQBUF(CAPTURE) will return -EPIPE.
914 if (!list_empty(&dst_q
->done_list
) || dst_q
->last_buffer_dequeued
)
915 rc
|= EPOLLIN
| EPOLLRDNORM
;
916 spin_unlock_irqrestore(&dst_q
->done_lock
, flags
);
921 __poll_t
v4l2_m2m_poll(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
922 struct poll_table_struct
*wait
)
924 struct video_device
*vfd
= video_devdata(file
);
925 __poll_t req_events
= poll_requested_events(wait
);
928 if (req_events
& (EPOLLOUT
| EPOLLWRNORM
| EPOLLIN
| EPOLLRDNORM
))
929 rc
= v4l2_m2m_poll_for_data(file
, m2m_ctx
, wait
);
931 if (test_bit(V4L2_FL_USES_V4L2_FH
, &vfd
->flags
)) {
932 struct v4l2_fh
*fh
= file
->private_data
;
934 poll_wait(file
, &fh
->wait
, wait
);
935 if (v4l2_event_pending(fh
))
941 EXPORT_SYMBOL_GPL(v4l2_m2m_poll
);
943 int v4l2_m2m_mmap(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
944 struct vm_area_struct
*vma
)
946 unsigned long offset
= vma
->vm_pgoff
<< PAGE_SHIFT
;
947 struct vb2_queue
*vq
;
949 if (offset
< DST_QUEUE_OFF_BASE
) {
950 vq
= v4l2_m2m_get_src_vq(m2m_ctx
);
952 vq
= v4l2_m2m_get_dst_vq(m2m_ctx
);
953 vma
->vm_pgoff
-= (DST_QUEUE_OFF_BASE
>> PAGE_SHIFT
);
956 return vb2_mmap(vq
, vma
);
958 EXPORT_SYMBOL(v4l2_m2m_mmap
);
960 #if defined(CONFIG_MEDIA_CONTROLLER)
961 void v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev
*m2m_dev
)
963 media_remove_intf_links(&m2m_dev
->intf_devnode
->intf
);
964 media_devnode_remove(m2m_dev
->intf_devnode
);
966 media_entity_remove_links(m2m_dev
->source
);
967 media_entity_remove_links(&m2m_dev
->sink
);
968 media_entity_remove_links(&m2m_dev
->proc
);
969 media_device_unregister_entity(m2m_dev
->source
);
970 media_device_unregister_entity(&m2m_dev
->sink
);
971 media_device_unregister_entity(&m2m_dev
->proc
);
972 kfree(m2m_dev
->source
->name
);
973 kfree(m2m_dev
->sink
.name
);
974 kfree(m2m_dev
->proc
.name
);
976 EXPORT_SYMBOL_GPL(v4l2_m2m_unregister_media_controller
);
978 static int v4l2_m2m_register_entity(struct media_device
*mdev
,
979 struct v4l2_m2m_dev
*m2m_dev
, enum v4l2_m2m_entity_type type
,
980 struct video_device
*vdev
, int function
)
982 struct media_entity
*entity
;
983 struct media_pad
*pads
;
990 case MEM2MEM_ENT_TYPE_SOURCE
:
991 entity
= m2m_dev
->source
;
992 pads
= &m2m_dev
->source_pad
;
993 pads
[0].flags
= MEDIA_PAD_FL_SOURCE
;
996 case MEM2MEM_ENT_TYPE_SINK
:
997 entity
= &m2m_dev
->sink
;
998 pads
= &m2m_dev
->sink_pad
;
999 pads
[0].flags
= MEDIA_PAD_FL_SINK
;
1002 case MEM2MEM_ENT_TYPE_PROC
:
1003 entity
= &m2m_dev
->proc
;
1004 pads
= m2m_dev
->proc_pads
;
1005 pads
[0].flags
= MEDIA_PAD_FL_SINK
;
1006 pads
[1].flags
= MEDIA_PAD_FL_SOURCE
;
1013 entity
->obj_type
= MEDIA_ENTITY_TYPE_BASE
;
1014 if (type
!= MEM2MEM_ENT_TYPE_PROC
) {
1015 entity
->info
.dev
.major
= VIDEO_MAJOR
;
1016 entity
->info
.dev
.minor
= vdev
->minor
;
1018 len
= strlen(vdev
->name
) + 2 + strlen(m2m_entity_name
[type
]);
1019 name
= kmalloc(len
, GFP_KERNEL
);
1022 snprintf(name
, len
, "%s-%s", vdev
->name
, m2m_entity_name
[type
]);
1023 entity
->name
= name
;
1024 entity
->function
= function
;
1026 ret
= media_entity_pads_init(entity
, num_pads
, pads
);
1029 ret
= media_device_register_entity(mdev
, entity
);
1036 int v4l2_m2m_register_media_controller(struct v4l2_m2m_dev
*m2m_dev
,
1037 struct video_device
*vdev
, int function
)
1039 struct media_device
*mdev
= vdev
->v4l2_dev
->mdev
;
1040 struct media_link
*link
;
1046 /* A memory-to-memory device consists in two
1047 * DMA engine and one video processing entities.
1048 * The DMA engine entities are linked to a V4L interface
1051 /* Create the three entities with their pads */
1052 m2m_dev
->source
= &vdev
->entity
;
1053 ret
= v4l2_m2m_register_entity(mdev
, m2m_dev
,
1054 MEM2MEM_ENT_TYPE_SOURCE
, vdev
, MEDIA_ENT_F_IO_V4L
);
1057 ret
= v4l2_m2m_register_entity(mdev
, m2m_dev
,
1058 MEM2MEM_ENT_TYPE_PROC
, vdev
, function
);
1060 goto err_rel_entity0
;
1061 ret
= v4l2_m2m_register_entity(mdev
, m2m_dev
,
1062 MEM2MEM_ENT_TYPE_SINK
, vdev
, MEDIA_ENT_F_IO_V4L
);
1064 goto err_rel_entity1
;
1066 /* Connect the three entities */
1067 ret
= media_create_pad_link(m2m_dev
->source
, 0, &m2m_dev
->proc
, 0,
1068 MEDIA_LNK_FL_IMMUTABLE
| MEDIA_LNK_FL_ENABLED
);
1070 goto err_rel_entity2
;
1072 ret
= media_create_pad_link(&m2m_dev
->proc
, 1, &m2m_dev
->sink
, 0,
1073 MEDIA_LNK_FL_IMMUTABLE
| MEDIA_LNK_FL_ENABLED
);
1077 /* Create video interface */
1078 m2m_dev
->intf_devnode
= media_devnode_create(mdev
,
1079 MEDIA_INTF_T_V4L_VIDEO
, 0,
1080 VIDEO_MAJOR
, vdev
->minor
);
1081 if (!m2m_dev
->intf_devnode
) {
1086 /* Connect the two DMA engines to the interface */
1087 link
= media_create_intf_link(m2m_dev
->source
,
1088 &m2m_dev
->intf_devnode
->intf
,
1089 MEDIA_LNK_FL_IMMUTABLE
| MEDIA_LNK_FL_ENABLED
);
1092 goto err_rm_devnode
;
1095 link
= media_create_intf_link(&m2m_dev
->sink
,
1096 &m2m_dev
->intf_devnode
->intf
,
1097 MEDIA_LNK_FL_IMMUTABLE
| MEDIA_LNK_FL_ENABLED
);
1100 goto err_rm_intf_link
;
1105 media_remove_intf_links(&m2m_dev
->intf_devnode
->intf
);
1107 media_devnode_remove(m2m_dev
->intf_devnode
);
1109 media_entity_remove_links(&m2m_dev
->sink
);
1111 media_entity_remove_links(&m2m_dev
->proc
);
1112 media_entity_remove_links(m2m_dev
->source
);
1114 media_device_unregister_entity(&m2m_dev
->proc
);
1115 kfree(m2m_dev
->proc
.name
);
1117 media_device_unregister_entity(&m2m_dev
->sink
);
1118 kfree(m2m_dev
->sink
.name
);
1120 media_device_unregister_entity(m2m_dev
->source
);
1121 kfree(m2m_dev
->source
->name
);
1125 EXPORT_SYMBOL_GPL(v4l2_m2m_register_media_controller
);
1128 struct v4l2_m2m_dev
*v4l2_m2m_init(const struct v4l2_m2m_ops
*m2m_ops
)
1130 struct v4l2_m2m_dev
*m2m_dev
;
1132 if (!m2m_ops
|| WARN_ON(!m2m_ops
->device_run
))
1133 return ERR_PTR(-EINVAL
);
1135 m2m_dev
= kzalloc(sizeof *m2m_dev
, GFP_KERNEL
);
1137 return ERR_PTR(-ENOMEM
);
1139 m2m_dev
->curr_ctx
= NULL
;
1140 m2m_dev
->m2m_ops
= m2m_ops
;
1141 INIT_LIST_HEAD(&m2m_dev
->job_queue
);
1142 spin_lock_init(&m2m_dev
->job_spinlock
);
1143 INIT_WORK(&m2m_dev
->job_work
, v4l2_m2m_device_run_work
);
1147 EXPORT_SYMBOL_GPL(v4l2_m2m_init
);
1149 void v4l2_m2m_release(struct v4l2_m2m_dev
*m2m_dev
)
1153 EXPORT_SYMBOL_GPL(v4l2_m2m_release
);
1155 struct v4l2_m2m_ctx
*v4l2_m2m_ctx_init(struct v4l2_m2m_dev
*m2m_dev
,
1157 int (*queue_init
)(void *priv
, struct vb2_queue
*src_vq
, struct vb2_queue
*dst_vq
))
1159 struct v4l2_m2m_ctx
*m2m_ctx
;
1160 struct v4l2_m2m_queue_ctx
*out_q_ctx
, *cap_q_ctx
;
1163 m2m_ctx
= kzalloc(sizeof *m2m_ctx
, GFP_KERNEL
);
1165 return ERR_PTR(-ENOMEM
);
1167 m2m_ctx
->priv
= drv_priv
;
1168 m2m_ctx
->m2m_dev
= m2m_dev
;
1169 init_waitqueue_head(&m2m_ctx
->finished
);
1171 out_q_ctx
= &m2m_ctx
->out_q_ctx
;
1172 cap_q_ctx
= &m2m_ctx
->cap_q_ctx
;
1174 INIT_LIST_HEAD(&out_q_ctx
->rdy_queue
);
1175 INIT_LIST_HEAD(&cap_q_ctx
->rdy_queue
);
1176 spin_lock_init(&out_q_ctx
->rdy_spinlock
);
1177 spin_lock_init(&cap_q_ctx
->rdy_spinlock
);
1179 INIT_LIST_HEAD(&m2m_ctx
->queue
);
1181 ret
= queue_init(drv_priv
, &out_q_ctx
->q
, &cap_q_ctx
->q
);
1186 * Both queues should use same the mutex to lock the m2m context.
1187 * This lock is used in some v4l2_m2m_* helpers.
1189 if (WARN_ON(out_q_ctx
->q
.lock
!= cap_q_ctx
->q
.lock
)) {
1193 m2m_ctx
->q_lock
= out_q_ctx
->q
.lock
;
1198 return ERR_PTR(ret
);
1200 EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init
);
1202 void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx
*m2m_ctx
)
1204 /* wait until the current context is dequeued from job_queue */
1205 v4l2_m2m_cancel_job(m2m_ctx
);
1207 vb2_queue_release(&m2m_ctx
->cap_q_ctx
.q
);
1208 vb2_queue_release(&m2m_ctx
->out_q_ctx
.q
);
1212 EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release
);
1214 void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx
*m2m_ctx
,
1215 struct vb2_v4l2_buffer
*vbuf
)
1217 struct v4l2_m2m_buffer
*b
= container_of(vbuf
,
1218 struct v4l2_m2m_buffer
, vb
);
1219 struct v4l2_m2m_queue_ctx
*q_ctx
;
1220 unsigned long flags
;
1222 q_ctx
= get_queue_ctx(m2m_ctx
, vbuf
->vb2_buf
.vb2_queue
->type
);
1226 spin_lock_irqsave(&q_ctx
->rdy_spinlock
, flags
);
1227 list_add_tail(&b
->list
, &q_ctx
->rdy_queue
);
1229 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
1231 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue
);
1233 void v4l2_m2m_buf_copy_metadata(const struct vb2_v4l2_buffer
*out_vb
,
1234 struct vb2_v4l2_buffer
*cap_vb
,
1235 bool copy_frame_flags
)
1237 u32 mask
= V4L2_BUF_FLAG_TIMECODE
| V4L2_BUF_FLAG_TSTAMP_SRC_MASK
;
1239 if (copy_frame_flags
)
1240 mask
|= V4L2_BUF_FLAG_KEYFRAME
| V4L2_BUF_FLAG_PFRAME
|
1241 V4L2_BUF_FLAG_BFRAME
;
1243 cap_vb
->vb2_buf
.timestamp
= out_vb
->vb2_buf
.timestamp
;
1245 if (out_vb
->flags
& V4L2_BUF_FLAG_TIMECODE
)
1246 cap_vb
->timecode
= out_vb
->timecode
;
1247 cap_vb
->field
= out_vb
->field
;
1248 cap_vb
->flags
&= ~mask
;
1249 cap_vb
->flags
|= out_vb
->flags
& mask
;
1250 cap_vb
->vb2_buf
.copied_timestamp
= 1;
1252 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_copy_metadata
);
1254 void v4l2_m2m_request_queue(struct media_request
*req
)
1256 struct media_request_object
*obj
, *obj_safe
;
1257 struct v4l2_m2m_ctx
*m2m_ctx
= NULL
;
1260 * Queue all objects. Note that buffer objects are at the end of the
1261 * objects list, after all other object types. Once buffer objects
1262 * are queued, the driver might delete them immediately (if the driver
1263 * processes the buffer at once), so we have to use
1264 * list_for_each_entry_safe() to handle the case where the object we
1267 list_for_each_entry_safe(obj
, obj_safe
, &req
->objects
, list
) {
1268 struct v4l2_m2m_ctx
*m2m_ctx_obj
;
1269 struct vb2_buffer
*vb
;
1271 if (!obj
->ops
->queue
)
1274 if (vb2_request_object_is_buffer(obj
)) {
1276 vb
= container_of(obj
, struct vb2_buffer
, req_obj
);
1277 WARN_ON(!V4L2_TYPE_IS_OUTPUT(vb
->vb2_queue
->type
));
1278 m2m_ctx_obj
= container_of(vb
->vb2_queue
,
1279 struct v4l2_m2m_ctx
,
1281 WARN_ON(m2m_ctx
&& m2m_ctx_obj
!= m2m_ctx
);
1282 m2m_ctx
= m2m_ctx_obj
;
1286 * The buffer we queue here can in theory be immediately
1287 * unbound, hence the use of list_for_each_entry_safe()
1288 * above and why we call the queue op last.
1290 obj
->ops
->queue(obj
);
1296 v4l2_m2m_try_schedule(m2m_ctx
);
1298 EXPORT_SYMBOL_GPL(v4l2_m2m_request_queue
);
1300 /* Videobuf2 ioctl helpers */
1302 int v4l2_m2m_ioctl_reqbufs(struct file
*file
, void *priv
,
1303 struct v4l2_requestbuffers
*rb
)
1305 struct v4l2_fh
*fh
= file
->private_data
;
1307 return v4l2_m2m_reqbufs(file
, fh
->m2m_ctx
, rb
);
1309 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_reqbufs
);
1311 int v4l2_m2m_ioctl_create_bufs(struct file
*file
, void *priv
,
1312 struct v4l2_create_buffers
*create
)
1314 struct v4l2_fh
*fh
= file
->private_data
;
1316 return v4l2_m2m_create_bufs(file
, fh
->m2m_ctx
, create
);
1318 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_create_bufs
);
1320 int v4l2_m2m_ioctl_querybuf(struct file
*file
, void *priv
,
1321 struct v4l2_buffer
*buf
)
1323 struct v4l2_fh
*fh
= file
->private_data
;
1325 return v4l2_m2m_querybuf(file
, fh
->m2m_ctx
, buf
);
1327 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_querybuf
);
1329 int v4l2_m2m_ioctl_qbuf(struct file
*file
, void *priv
,
1330 struct v4l2_buffer
*buf
)
1332 struct v4l2_fh
*fh
= file
->private_data
;
1334 return v4l2_m2m_qbuf(file
, fh
->m2m_ctx
, buf
);
1336 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_qbuf
);
1338 int v4l2_m2m_ioctl_dqbuf(struct file
*file
, void *priv
,
1339 struct v4l2_buffer
*buf
)
1341 struct v4l2_fh
*fh
= file
->private_data
;
1343 return v4l2_m2m_dqbuf(file
, fh
->m2m_ctx
, buf
);
1345 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_dqbuf
);
1347 int v4l2_m2m_ioctl_prepare_buf(struct file
*file
, void *priv
,
1348 struct v4l2_buffer
*buf
)
1350 struct v4l2_fh
*fh
= file
->private_data
;
1352 return v4l2_m2m_prepare_buf(file
, fh
->m2m_ctx
, buf
);
1354 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_prepare_buf
);
1356 int v4l2_m2m_ioctl_expbuf(struct file
*file
, void *priv
,
1357 struct v4l2_exportbuffer
*eb
)
1359 struct v4l2_fh
*fh
= file
->private_data
;
1361 return v4l2_m2m_expbuf(file
, fh
->m2m_ctx
, eb
);
1363 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_expbuf
);
1365 int v4l2_m2m_ioctl_streamon(struct file
*file
, void *priv
,
1366 enum v4l2_buf_type type
)
1368 struct v4l2_fh
*fh
= file
->private_data
;
1370 return v4l2_m2m_streamon(file
, fh
->m2m_ctx
, type
);
1372 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamon
);
1374 int v4l2_m2m_ioctl_streamoff(struct file
*file
, void *priv
,
1375 enum v4l2_buf_type type
)
1377 struct v4l2_fh
*fh
= file
->private_data
;
1379 return v4l2_m2m_streamoff(file
, fh
->m2m_ctx
, type
);
1381 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamoff
);
1383 int v4l2_m2m_ioctl_try_encoder_cmd(struct file
*file
, void *fh
,
1384 struct v4l2_encoder_cmd
*ec
)
1386 if (ec
->cmd
!= V4L2_ENC_CMD_STOP
&& ec
->cmd
!= V4L2_ENC_CMD_START
)
1392 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_try_encoder_cmd
);
1394 int v4l2_m2m_ioctl_try_decoder_cmd(struct file
*file
, void *fh
,
1395 struct v4l2_decoder_cmd
*dc
)
1397 if (dc
->cmd
!= V4L2_DEC_CMD_STOP
&& dc
->cmd
!= V4L2_DEC_CMD_START
)
1402 if (dc
->cmd
== V4L2_DEC_CMD_STOP
) {
1404 } else if (dc
->cmd
== V4L2_DEC_CMD_START
) {
1405 dc
->start
.speed
= 0;
1406 dc
->start
.format
= V4L2_DEC_START_FMT_NONE
;
1410 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_try_decoder_cmd
);
1413 * Updates the encoding state on ENC_CMD_STOP/ENC_CMD_START
1414 * Should be called from the encoder driver encoder_cmd() callback
1416 int v4l2_m2m_encoder_cmd(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
1417 struct v4l2_encoder_cmd
*ec
)
1419 if (ec
->cmd
!= V4L2_ENC_CMD_STOP
&& ec
->cmd
!= V4L2_ENC_CMD_START
)
1422 if (ec
->cmd
== V4L2_ENC_CMD_STOP
)
1423 return v4l2_update_last_buf_state(m2m_ctx
);
1425 if (m2m_ctx
->is_draining
)
1428 if (m2m_ctx
->has_stopped
)
1429 m2m_ctx
->has_stopped
= false;
1433 EXPORT_SYMBOL_GPL(v4l2_m2m_encoder_cmd
);
1436 * Updates the decoding state on DEC_CMD_STOP/DEC_CMD_START
1437 * Should be called from the decoder driver decoder_cmd() callback
1439 int v4l2_m2m_decoder_cmd(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
1440 struct v4l2_decoder_cmd
*dc
)
1442 if (dc
->cmd
!= V4L2_DEC_CMD_STOP
&& dc
->cmd
!= V4L2_DEC_CMD_START
)
1445 if (dc
->cmd
== V4L2_DEC_CMD_STOP
)
1446 return v4l2_update_last_buf_state(m2m_ctx
);
1448 if (m2m_ctx
->is_draining
)
1451 if (m2m_ctx
->has_stopped
)
1452 m2m_ctx
->has_stopped
= false;
1456 EXPORT_SYMBOL_GPL(v4l2_m2m_decoder_cmd
);
1458 int v4l2_m2m_ioctl_encoder_cmd(struct file
*file
, void *priv
,
1459 struct v4l2_encoder_cmd
*ec
)
1461 struct v4l2_fh
*fh
= file
->private_data
;
1463 return v4l2_m2m_encoder_cmd(file
, fh
->m2m_ctx
, ec
);
1465 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_encoder_cmd
);
1467 int v4l2_m2m_ioctl_decoder_cmd(struct file
*file
, void *priv
,
1468 struct v4l2_decoder_cmd
*dc
)
1470 struct v4l2_fh
*fh
= file
->private_data
;
1472 return v4l2_m2m_decoder_cmd(file
, fh
->m2m_ctx
, dc
);
1474 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_decoder_cmd
);
1476 int v4l2_m2m_ioctl_stateless_try_decoder_cmd(struct file
*file
, void *fh
,
1477 struct v4l2_decoder_cmd
*dc
)
1479 if (dc
->cmd
!= V4L2_DEC_CMD_FLUSH
)
1486 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_stateless_try_decoder_cmd
);
1488 int v4l2_m2m_ioctl_stateless_decoder_cmd(struct file
*file
, void *priv
,
1489 struct v4l2_decoder_cmd
*dc
)
1491 struct v4l2_fh
*fh
= file
->private_data
;
1492 struct vb2_v4l2_buffer
*out_vb
, *cap_vb
;
1493 struct v4l2_m2m_dev
*m2m_dev
= fh
->m2m_ctx
->m2m_dev
;
1494 unsigned long flags
;
1497 ret
= v4l2_m2m_ioctl_stateless_try_decoder_cmd(file
, priv
, dc
);
1501 spin_lock_irqsave(&m2m_dev
->job_spinlock
, flags
);
1502 out_vb
= v4l2_m2m_last_src_buf(fh
->m2m_ctx
);
1503 cap_vb
= v4l2_m2m_last_dst_buf(fh
->m2m_ctx
);
1506 * If there is an out buffer pending, then clear any HOLD flag.
1508 * By clearing this flag we ensure that when this output
1509 * buffer is processed any held capture buffer will be released.
1512 out_vb
->flags
&= ~V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF
;
1513 } else if (cap_vb
&& cap_vb
->is_held
) {
1515 * If there were no output buffers, but there is a
1516 * capture buffer that is held, then release that
1519 cap_vb
->is_held
= false;
1520 v4l2_m2m_dst_buf_remove(fh
->m2m_ctx
);
1521 v4l2_m2m_buf_done(cap_vb
, VB2_BUF_STATE_DONE
);
1523 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
1527 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_stateless_decoder_cmd
);
1530 * v4l2_file_operations helpers. It is assumed here same lock is used
1531 * for the output and the capture buffer queue.
1534 int v4l2_m2m_fop_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1536 struct v4l2_fh
*fh
= file
->private_data
;
1538 return v4l2_m2m_mmap(file
, fh
->m2m_ctx
, vma
);
1540 EXPORT_SYMBOL_GPL(v4l2_m2m_fop_mmap
);
1542 __poll_t
v4l2_m2m_fop_poll(struct file
*file
, poll_table
*wait
)
1544 struct v4l2_fh
*fh
= file
->private_data
;
1545 struct v4l2_m2m_ctx
*m2m_ctx
= fh
->m2m_ctx
;
1548 if (m2m_ctx
->q_lock
)
1549 mutex_lock(m2m_ctx
->q_lock
);
1551 ret
= v4l2_m2m_poll(file
, m2m_ctx
, wait
);
1553 if (m2m_ctx
->q_lock
)
1554 mutex_unlock(m2m_ctx
->q_lock
);
1558 EXPORT_SYMBOL_GPL(v4l2_m2m_fop_poll
);