1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Memory-to-memory device framework for Video for Linux 2 and videobuf.
5 * Helper functions for devices that use videobuf buffers for both their
6 * source and destination.
8 * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
9 * Pawel Osciak, <pawel@osciak.com>
10 * Marek Szyprowski, <m.szyprowski@samsung.com>
12 #include <linux/module.h>
13 #include <linux/sched.h>
14 #include <linux/slab.h>
16 #include <media/media-device.h>
17 #include <media/videobuf2-v4l2.h>
18 #include <media/v4l2-mem2mem.h>
19 #include <media/v4l2-dev.h>
20 #include <media/v4l2-device.h>
21 #include <media/v4l2-fh.h>
22 #include <media/v4l2-event.h>
24 MODULE_DESCRIPTION("Mem to mem device framework for videobuf");
25 MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>");
26 MODULE_LICENSE("GPL");
29 module_param(debug
, bool, 0644);
31 #define dprintk(fmt, arg...) \
34 printk(KERN_DEBUG "%s: " fmt, __func__, ## arg);\
38 /* Instance is already queued on the job_queue */
39 #define TRANS_QUEUED (1 << 0)
40 /* Instance is currently running in hardware */
41 #define TRANS_RUNNING (1 << 1)
42 /* Instance is currently aborting */
43 #define TRANS_ABORT (1 << 2)
46 /* Offset base for buffers on the destination queue - used to distinguish
47 * between source and destination buffers when mmapping - they receive the same
48 * offsets but for different queues */
49 #define DST_QUEUE_OFF_BASE (1 << 30)
51 enum v4l2_m2m_entity_type
{
52 MEM2MEM_ENT_TYPE_SOURCE
,
53 MEM2MEM_ENT_TYPE_SINK
,
57 static const char * const m2m_entity_name
[] = {
64 * struct v4l2_m2m_dev - per-device context
65 * @source: &struct media_entity pointer with the source entity
66 * Used only when the M2M device is registered via
67 * v4l2_m2m_unregister_media_controller().
68 * @source_pad: &struct media_pad with the source pad.
69 * Used only when the M2M device is registered via
70 * v4l2_m2m_unregister_media_controller().
71 * @sink: &struct media_entity pointer with the sink entity
72 * Used only when the M2M device is registered via
73 * v4l2_m2m_unregister_media_controller().
74 * @sink_pad: &struct media_pad with the sink pad.
75 * Used only when the M2M device is registered via
76 * v4l2_m2m_unregister_media_controller().
77 * @proc: &struct media_entity pointer with the M2M device itself.
78 * @proc_pads: &struct media_pad with the @proc pads.
79 * Used only when the M2M device is registered via
80 * v4l2_m2m_unregister_media_controller().
81 * @intf_devnode: &struct media_intf devnode pointer with the interface
82 * with controls the M2M device.
83 * @curr_ctx: currently running instance
84 * @job_queue: instances queued to run
85 * @job_spinlock: protects job_queue
86 * @job_work: worker to run queued jobs.
87 * @m2m_ops: driver callbacks
90 struct v4l2_m2m_ctx
*curr_ctx
;
91 #ifdef CONFIG_MEDIA_CONTROLLER
92 struct media_entity
*source
;
93 struct media_pad source_pad
;
94 struct media_entity sink
;
95 struct media_pad sink_pad
;
96 struct media_entity proc
;
97 struct media_pad proc_pads
[2];
98 struct media_intf_devnode
*intf_devnode
;
101 struct list_head job_queue
;
102 spinlock_t job_spinlock
;
103 struct work_struct job_work
;
105 const struct v4l2_m2m_ops
*m2m_ops
;
108 static struct v4l2_m2m_queue_ctx
*get_queue_ctx(struct v4l2_m2m_ctx
*m2m_ctx
,
109 enum v4l2_buf_type type
)
111 if (V4L2_TYPE_IS_OUTPUT(type
))
112 return &m2m_ctx
->out_q_ctx
;
114 return &m2m_ctx
->cap_q_ctx
;
117 struct vb2_queue
*v4l2_m2m_get_vq(struct v4l2_m2m_ctx
*m2m_ctx
,
118 enum v4l2_buf_type type
)
120 struct v4l2_m2m_queue_ctx
*q_ctx
;
122 q_ctx
= get_queue_ctx(m2m_ctx
, type
);
128 EXPORT_SYMBOL(v4l2_m2m_get_vq
);
130 struct vb2_v4l2_buffer
*v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx
*q_ctx
)
132 struct v4l2_m2m_buffer
*b
;
135 spin_lock_irqsave(&q_ctx
->rdy_spinlock
, flags
);
137 if (list_empty(&q_ctx
->rdy_queue
)) {
138 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
142 b
= list_first_entry(&q_ctx
->rdy_queue
, struct v4l2_m2m_buffer
, list
);
143 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
146 EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf
);
148 struct vb2_v4l2_buffer
*v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx
*q_ctx
)
150 struct v4l2_m2m_buffer
*b
;
153 spin_lock_irqsave(&q_ctx
->rdy_spinlock
, flags
);
155 if (list_empty(&q_ctx
->rdy_queue
)) {
156 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
160 b
= list_last_entry(&q_ctx
->rdy_queue
, struct v4l2_m2m_buffer
, list
);
161 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
164 EXPORT_SYMBOL_GPL(v4l2_m2m_last_buf
);
166 struct vb2_v4l2_buffer
*v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx
*q_ctx
)
168 struct v4l2_m2m_buffer
*b
;
171 spin_lock_irqsave(&q_ctx
->rdy_spinlock
, flags
);
172 if (list_empty(&q_ctx
->rdy_queue
)) {
173 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
176 b
= list_first_entry(&q_ctx
->rdy_queue
, struct v4l2_m2m_buffer
, list
);
179 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
183 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove
);
185 void v4l2_m2m_buf_remove_by_buf(struct v4l2_m2m_queue_ctx
*q_ctx
,
186 struct vb2_v4l2_buffer
*vbuf
)
188 struct v4l2_m2m_buffer
*b
;
191 spin_lock_irqsave(&q_ctx
->rdy_spinlock
, flags
);
192 b
= container_of(vbuf
, struct v4l2_m2m_buffer
, vb
);
195 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
197 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_buf
);
199 struct vb2_v4l2_buffer
*
200 v4l2_m2m_buf_remove_by_idx(struct v4l2_m2m_queue_ctx
*q_ctx
, unsigned int idx
)
203 struct v4l2_m2m_buffer
*b
, *tmp
;
204 struct vb2_v4l2_buffer
*ret
= NULL
;
207 spin_lock_irqsave(&q_ctx
->rdy_spinlock
, flags
);
208 list_for_each_entry_safe(b
, tmp
, &q_ctx
->rdy_queue
, list
) {
209 if (b
->vb
.vb2_buf
.index
== idx
) {
216 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
220 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_idx
);
223 * Scheduling handlers
226 void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev
*m2m_dev
)
231 spin_lock_irqsave(&m2m_dev
->job_spinlock
, flags
);
232 if (m2m_dev
->curr_ctx
)
233 ret
= m2m_dev
->curr_ctx
->priv
;
234 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
238 EXPORT_SYMBOL(v4l2_m2m_get_curr_priv
);
241 * v4l2_m2m_try_run() - select next job to perform and run it if possible
242 * @m2m_dev: per-device context
244 * Get next transaction (if present) from the waiting jobs list and run it.
246 * Note that this function can run on a given v4l2_m2m_ctx context,
247 * but call .device_run for another context.
249 static void v4l2_m2m_try_run(struct v4l2_m2m_dev
*m2m_dev
)
253 spin_lock_irqsave(&m2m_dev
->job_spinlock
, flags
);
254 if (NULL
!= m2m_dev
->curr_ctx
) {
255 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
256 dprintk("Another instance is running, won't run now\n");
260 if (list_empty(&m2m_dev
->job_queue
)) {
261 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
262 dprintk("No job pending\n");
266 m2m_dev
->curr_ctx
= list_first_entry(&m2m_dev
->job_queue
,
267 struct v4l2_m2m_ctx
, queue
);
268 m2m_dev
->curr_ctx
->job_flags
|= TRANS_RUNNING
;
269 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
271 dprintk("Running job on m2m_ctx: %p\n", m2m_dev
->curr_ctx
);
272 m2m_dev
->m2m_ops
->device_run(m2m_dev
->curr_ctx
->priv
);
276 * __v4l2_m2m_try_queue() - queue a job
277 * @m2m_dev: m2m device
278 * @m2m_ctx: m2m context
280 * Check if this context is ready to queue a job.
282 * This function can run in interrupt context.
284 static void __v4l2_m2m_try_queue(struct v4l2_m2m_dev
*m2m_dev
,
285 struct v4l2_m2m_ctx
*m2m_ctx
)
287 unsigned long flags_job
, flags_out
, flags_cap
;
289 dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx
);
291 if (!m2m_ctx
->out_q_ctx
.q
.streaming
292 || !m2m_ctx
->cap_q_ctx
.q
.streaming
) {
293 dprintk("Streaming needs to be on for both queues\n");
297 spin_lock_irqsave(&m2m_dev
->job_spinlock
, flags_job
);
299 /* If the context is aborted then don't schedule it */
300 if (m2m_ctx
->job_flags
& TRANS_ABORT
) {
301 dprintk("Aborted context\n");
305 if (m2m_ctx
->job_flags
& TRANS_QUEUED
) {
306 dprintk("On job queue already\n");
310 spin_lock_irqsave(&m2m_ctx
->out_q_ctx
.rdy_spinlock
, flags_out
);
311 if (list_empty(&m2m_ctx
->out_q_ctx
.rdy_queue
)
312 && !m2m_ctx
->out_q_ctx
.buffered
) {
313 dprintk("No input buffers available\n");
316 spin_lock_irqsave(&m2m_ctx
->cap_q_ctx
.rdy_spinlock
, flags_cap
);
317 if (list_empty(&m2m_ctx
->cap_q_ctx
.rdy_queue
)
318 && !m2m_ctx
->cap_q_ctx
.buffered
) {
319 dprintk("No output buffers available\n");
322 spin_unlock_irqrestore(&m2m_ctx
->cap_q_ctx
.rdy_spinlock
, flags_cap
);
323 spin_unlock_irqrestore(&m2m_ctx
->out_q_ctx
.rdy_spinlock
, flags_out
);
325 if (m2m_dev
->m2m_ops
->job_ready
326 && (!m2m_dev
->m2m_ops
->job_ready(m2m_ctx
->priv
))) {
327 dprintk("Driver not ready\n");
331 list_add_tail(&m2m_ctx
->queue
, &m2m_dev
->job_queue
);
332 m2m_ctx
->job_flags
|= TRANS_QUEUED
;
334 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags_job
);
338 spin_unlock_irqrestore(&m2m_ctx
->cap_q_ctx
.rdy_spinlock
, flags_cap
);
340 spin_unlock_irqrestore(&m2m_ctx
->out_q_ctx
.rdy_spinlock
, flags_out
);
342 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags_job
);
346 * v4l2_m2m_try_schedule() - schedule and possibly run a job for any context
347 * @m2m_ctx: m2m context
349 * Check if this context is ready to queue a job. If suitable,
350 * run the next queued job on the mem2mem device.
352 * This function shouldn't run in interrupt context.
354 * Note that v4l2_m2m_try_schedule() can schedule one job for this context,
355 * and then run another job for another context.
357 void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx
*m2m_ctx
)
359 struct v4l2_m2m_dev
*m2m_dev
= m2m_ctx
->m2m_dev
;
361 __v4l2_m2m_try_queue(m2m_dev
, m2m_ctx
);
362 v4l2_m2m_try_run(m2m_dev
);
364 EXPORT_SYMBOL_GPL(v4l2_m2m_try_schedule
);
367 * v4l2_m2m_device_run_work() - run pending jobs for the context
368 * @work: Work structure used for scheduling the execution of this function.
370 static void v4l2_m2m_device_run_work(struct work_struct
*work
)
372 struct v4l2_m2m_dev
*m2m_dev
=
373 container_of(work
, struct v4l2_m2m_dev
, job_work
);
375 v4l2_m2m_try_run(m2m_dev
);
379 * v4l2_m2m_cancel_job() - cancel pending jobs for the context
380 * @m2m_ctx: m2m context with jobs to be canceled
382 * In case of streamoff or release called on any context,
383 * 1] If the context is currently running, then abort job will be called
384 * 2] If the context is queued, then the context will be removed from
387 static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx
*m2m_ctx
)
389 struct v4l2_m2m_dev
*m2m_dev
;
392 m2m_dev
= m2m_ctx
->m2m_dev
;
393 spin_lock_irqsave(&m2m_dev
->job_spinlock
, flags
);
395 m2m_ctx
->job_flags
|= TRANS_ABORT
;
396 if (m2m_ctx
->job_flags
& TRANS_RUNNING
) {
397 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
398 if (m2m_dev
->m2m_ops
->job_abort
)
399 m2m_dev
->m2m_ops
->job_abort(m2m_ctx
->priv
);
400 dprintk("m2m_ctx %p running, will wait to complete\n", m2m_ctx
);
401 wait_event(m2m_ctx
->finished
,
402 !(m2m_ctx
->job_flags
& TRANS_RUNNING
));
403 } else if (m2m_ctx
->job_flags
& TRANS_QUEUED
) {
404 list_del(&m2m_ctx
->queue
);
405 m2m_ctx
->job_flags
&= ~(TRANS_QUEUED
| TRANS_RUNNING
);
406 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
407 dprintk("m2m_ctx: %p had been on queue and was removed\n",
410 /* Do nothing, was not on queue/running */
411 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
415 void v4l2_m2m_job_finish(struct v4l2_m2m_dev
*m2m_dev
,
416 struct v4l2_m2m_ctx
*m2m_ctx
)
420 spin_lock_irqsave(&m2m_dev
->job_spinlock
, flags
);
421 if (!m2m_dev
->curr_ctx
|| m2m_dev
->curr_ctx
!= m2m_ctx
) {
422 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
423 dprintk("Called by an instance not currently running\n");
427 list_del(&m2m_dev
->curr_ctx
->queue
);
428 m2m_dev
->curr_ctx
->job_flags
&= ~(TRANS_QUEUED
| TRANS_RUNNING
);
429 wake_up(&m2m_dev
->curr_ctx
->finished
);
430 m2m_dev
->curr_ctx
= NULL
;
432 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
434 /* This instance might have more buffers ready, but since we do not
435 * allow more than one job on the job_queue per instance, each has
436 * to be scheduled separately after the previous one finishes. */
437 __v4l2_m2m_try_queue(m2m_dev
, m2m_ctx
);
439 /* We might be running in atomic context,
440 * but the job must be run in non-atomic context.
442 schedule_work(&m2m_dev
->job_work
);
444 EXPORT_SYMBOL(v4l2_m2m_job_finish
);
446 int v4l2_m2m_reqbufs(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
447 struct v4l2_requestbuffers
*reqbufs
)
449 struct vb2_queue
*vq
;
452 vq
= v4l2_m2m_get_vq(m2m_ctx
, reqbufs
->type
);
453 ret
= vb2_reqbufs(vq
, reqbufs
);
454 /* If count == 0, then the owner has released all buffers and he
455 is no longer owner of the queue. Otherwise we have an owner. */
457 vq
->owner
= reqbufs
->count
? file
->private_data
: NULL
;
461 EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs
);
463 int v4l2_m2m_querybuf(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
464 struct v4l2_buffer
*buf
)
466 struct vb2_queue
*vq
;
470 vq
= v4l2_m2m_get_vq(m2m_ctx
, buf
->type
);
471 ret
= vb2_querybuf(vq
, buf
);
473 /* Adjust MMAP memory offsets for the CAPTURE queue */
474 if (buf
->memory
== V4L2_MEMORY_MMAP
&& !V4L2_TYPE_IS_OUTPUT(vq
->type
)) {
475 if (V4L2_TYPE_IS_MULTIPLANAR(vq
->type
)) {
476 for (i
= 0; i
< buf
->length
; ++i
)
477 buf
->m
.planes
[i
].m
.mem_offset
478 += DST_QUEUE_OFF_BASE
;
480 buf
->m
.offset
+= DST_QUEUE_OFF_BASE
;
486 EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf
);
488 int v4l2_m2m_qbuf(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
489 struct v4l2_buffer
*buf
)
491 struct video_device
*vdev
= video_devdata(file
);
492 struct vb2_queue
*vq
;
495 vq
= v4l2_m2m_get_vq(m2m_ctx
, buf
->type
);
496 if (!V4L2_TYPE_IS_OUTPUT(vq
->type
) &&
497 (buf
->flags
& V4L2_BUF_FLAG_REQUEST_FD
)) {
498 dprintk("%s: requests cannot be used with capture buffers\n",
502 ret
= vb2_qbuf(vq
, vdev
->v4l2_dev
->mdev
, buf
);
503 if (!ret
&& !(buf
->flags
& V4L2_BUF_FLAG_IN_REQUEST
))
504 v4l2_m2m_try_schedule(m2m_ctx
);
508 EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf
);
510 int v4l2_m2m_dqbuf(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
511 struct v4l2_buffer
*buf
)
513 struct vb2_queue
*vq
;
515 vq
= v4l2_m2m_get_vq(m2m_ctx
, buf
->type
);
516 return vb2_dqbuf(vq
, buf
, file
->f_flags
& O_NONBLOCK
);
518 EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf
);
520 int v4l2_m2m_prepare_buf(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
521 struct v4l2_buffer
*buf
)
523 struct video_device
*vdev
= video_devdata(file
);
524 struct vb2_queue
*vq
;
526 vq
= v4l2_m2m_get_vq(m2m_ctx
, buf
->type
);
527 return vb2_prepare_buf(vq
, vdev
->v4l2_dev
->mdev
, buf
);
529 EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf
);
531 int v4l2_m2m_create_bufs(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
532 struct v4l2_create_buffers
*create
)
534 struct vb2_queue
*vq
;
536 vq
= v4l2_m2m_get_vq(m2m_ctx
, create
->format
.type
);
537 return vb2_create_bufs(vq
, create
);
539 EXPORT_SYMBOL_GPL(v4l2_m2m_create_bufs
);
541 int v4l2_m2m_expbuf(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
542 struct v4l2_exportbuffer
*eb
)
544 struct vb2_queue
*vq
;
546 vq
= v4l2_m2m_get_vq(m2m_ctx
, eb
->type
);
547 return vb2_expbuf(vq
, eb
);
549 EXPORT_SYMBOL_GPL(v4l2_m2m_expbuf
);
551 int v4l2_m2m_streamon(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
552 enum v4l2_buf_type type
)
554 struct vb2_queue
*vq
;
557 vq
= v4l2_m2m_get_vq(m2m_ctx
, type
);
558 ret
= vb2_streamon(vq
, type
);
560 v4l2_m2m_try_schedule(m2m_ctx
);
564 EXPORT_SYMBOL_GPL(v4l2_m2m_streamon
);
566 int v4l2_m2m_streamoff(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
567 enum v4l2_buf_type type
)
569 struct v4l2_m2m_dev
*m2m_dev
;
570 struct v4l2_m2m_queue_ctx
*q_ctx
;
571 unsigned long flags_job
, flags
;
574 /* wait until the current context is dequeued from job_queue */
575 v4l2_m2m_cancel_job(m2m_ctx
);
577 q_ctx
= get_queue_ctx(m2m_ctx
, type
);
578 ret
= vb2_streamoff(&q_ctx
->q
, type
);
582 m2m_dev
= m2m_ctx
->m2m_dev
;
583 spin_lock_irqsave(&m2m_dev
->job_spinlock
, flags_job
);
584 /* We should not be scheduled anymore, since we're dropping a queue. */
585 if (m2m_ctx
->job_flags
& TRANS_QUEUED
)
586 list_del(&m2m_ctx
->queue
);
587 m2m_ctx
->job_flags
= 0;
589 spin_lock_irqsave(&q_ctx
->rdy_spinlock
, flags
);
590 /* Drop queue, since streamoff returns device to the same state as after
591 * calling reqbufs. */
592 INIT_LIST_HEAD(&q_ctx
->rdy_queue
);
594 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
596 if (m2m_dev
->curr_ctx
== m2m_ctx
) {
597 m2m_dev
->curr_ctx
= NULL
;
598 wake_up(&m2m_ctx
->finished
);
600 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags_job
);
604 EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff
);
606 __poll_t
v4l2_m2m_poll(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
607 struct poll_table_struct
*wait
)
609 struct video_device
*vfd
= video_devdata(file
);
610 __poll_t req_events
= poll_requested_events(wait
);
611 struct vb2_queue
*src_q
, *dst_q
;
612 struct vb2_buffer
*src_vb
= NULL
, *dst_vb
= NULL
;
616 src_q
= v4l2_m2m_get_src_vq(m2m_ctx
);
617 dst_q
= v4l2_m2m_get_dst_vq(m2m_ctx
);
619 poll_wait(file
, &src_q
->done_wq
, wait
);
620 poll_wait(file
, &dst_q
->done_wq
, wait
);
622 if (test_bit(V4L2_FL_USES_V4L2_FH
, &vfd
->flags
)) {
623 struct v4l2_fh
*fh
= file
->private_data
;
625 poll_wait(file
, &fh
->wait
, wait
);
626 if (v4l2_event_pending(fh
))
628 if (!(req_events
& (EPOLLOUT
| EPOLLWRNORM
| EPOLLIN
| EPOLLRDNORM
)))
633 * There has to be at least one buffer queued on each queued_list, which
634 * means either in driver already or waiting for driver to claim it
635 * and start processing.
637 if ((!src_q
->streaming
|| src_q
->error
||
638 list_empty(&src_q
->queued_list
)) &&
639 (!dst_q
->streaming
|| dst_q
->error
||
640 list_empty(&dst_q
->queued_list
))) {
645 spin_lock_irqsave(&dst_q
->done_lock
, flags
);
646 if (list_empty(&dst_q
->done_list
)) {
648 * If the last buffer was dequeued from the capture queue,
649 * return immediately. DQBUF will return -EPIPE.
651 if (dst_q
->last_buffer_dequeued
) {
652 spin_unlock_irqrestore(&dst_q
->done_lock
, flags
);
653 return rc
| EPOLLIN
| EPOLLRDNORM
;
656 spin_unlock_irqrestore(&dst_q
->done_lock
, flags
);
658 spin_lock_irqsave(&src_q
->done_lock
, flags
);
659 if (!list_empty(&src_q
->done_list
))
660 src_vb
= list_first_entry(&src_q
->done_list
, struct vb2_buffer
,
662 if (src_vb
&& (src_vb
->state
== VB2_BUF_STATE_DONE
663 || src_vb
->state
== VB2_BUF_STATE_ERROR
))
664 rc
|= EPOLLOUT
| EPOLLWRNORM
;
665 spin_unlock_irqrestore(&src_q
->done_lock
, flags
);
667 spin_lock_irqsave(&dst_q
->done_lock
, flags
);
668 if (!list_empty(&dst_q
->done_list
))
669 dst_vb
= list_first_entry(&dst_q
->done_list
, struct vb2_buffer
,
671 if (dst_vb
&& (dst_vb
->state
== VB2_BUF_STATE_DONE
672 || dst_vb
->state
== VB2_BUF_STATE_ERROR
))
673 rc
|= EPOLLIN
| EPOLLRDNORM
;
674 spin_unlock_irqrestore(&dst_q
->done_lock
, flags
);
679 EXPORT_SYMBOL_GPL(v4l2_m2m_poll
);
681 int v4l2_m2m_mmap(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
682 struct vm_area_struct
*vma
)
684 unsigned long offset
= vma
->vm_pgoff
<< PAGE_SHIFT
;
685 struct vb2_queue
*vq
;
687 if (offset
< DST_QUEUE_OFF_BASE
) {
688 vq
= v4l2_m2m_get_src_vq(m2m_ctx
);
690 vq
= v4l2_m2m_get_dst_vq(m2m_ctx
);
691 vma
->vm_pgoff
-= (DST_QUEUE_OFF_BASE
>> PAGE_SHIFT
);
694 return vb2_mmap(vq
, vma
);
696 EXPORT_SYMBOL(v4l2_m2m_mmap
);
698 #if defined(CONFIG_MEDIA_CONTROLLER)
699 void v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev
*m2m_dev
)
701 media_remove_intf_links(&m2m_dev
->intf_devnode
->intf
);
702 media_devnode_remove(m2m_dev
->intf_devnode
);
704 media_entity_remove_links(m2m_dev
->source
);
705 media_entity_remove_links(&m2m_dev
->sink
);
706 media_entity_remove_links(&m2m_dev
->proc
);
707 media_device_unregister_entity(m2m_dev
->source
);
708 media_device_unregister_entity(&m2m_dev
->sink
);
709 media_device_unregister_entity(&m2m_dev
->proc
);
710 kfree(m2m_dev
->source
->name
);
711 kfree(m2m_dev
->sink
.name
);
712 kfree(m2m_dev
->proc
.name
);
714 EXPORT_SYMBOL_GPL(v4l2_m2m_unregister_media_controller
);
716 static int v4l2_m2m_register_entity(struct media_device
*mdev
,
717 struct v4l2_m2m_dev
*m2m_dev
, enum v4l2_m2m_entity_type type
,
718 struct video_device
*vdev
, int function
)
720 struct media_entity
*entity
;
721 struct media_pad
*pads
;
728 case MEM2MEM_ENT_TYPE_SOURCE
:
729 entity
= m2m_dev
->source
;
730 pads
= &m2m_dev
->source_pad
;
731 pads
[0].flags
= MEDIA_PAD_FL_SOURCE
;
734 case MEM2MEM_ENT_TYPE_SINK
:
735 entity
= &m2m_dev
->sink
;
736 pads
= &m2m_dev
->sink_pad
;
737 pads
[0].flags
= MEDIA_PAD_FL_SINK
;
740 case MEM2MEM_ENT_TYPE_PROC
:
741 entity
= &m2m_dev
->proc
;
742 pads
= m2m_dev
->proc_pads
;
743 pads
[0].flags
= MEDIA_PAD_FL_SINK
;
744 pads
[1].flags
= MEDIA_PAD_FL_SOURCE
;
751 entity
->obj_type
= MEDIA_ENTITY_TYPE_BASE
;
752 if (type
!= MEM2MEM_ENT_TYPE_PROC
) {
753 entity
->info
.dev
.major
= VIDEO_MAJOR
;
754 entity
->info
.dev
.minor
= vdev
->minor
;
756 len
= strlen(vdev
->name
) + 2 + strlen(m2m_entity_name
[type
]);
757 name
= kmalloc(len
, GFP_KERNEL
);
760 snprintf(name
, len
, "%s-%s", vdev
->name
, m2m_entity_name
[type
]);
762 entity
->function
= function
;
764 ret
= media_entity_pads_init(entity
, num_pads
, pads
);
767 ret
= media_device_register_entity(mdev
, entity
);
774 int v4l2_m2m_register_media_controller(struct v4l2_m2m_dev
*m2m_dev
,
775 struct video_device
*vdev
, int function
)
777 struct media_device
*mdev
= vdev
->v4l2_dev
->mdev
;
778 struct media_link
*link
;
784 /* A memory-to-memory device consists in two
785 * DMA engine and one video processing entities.
786 * The DMA engine entities are linked to a V4L interface
789 /* Create the three entities with their pads */
790 m2m_dev
->source
= &vdev
->entity
;
791 ret
= v4l2_m2m_register_entity(mdev
, m2m_dev
,
792 MEM2MEM_ENT_TYPE_SOURCE
, vdev
, MEDIA_ENT_F_IO_V4L
);
795 ret
= v4l2_m2m_register_entity(mdev
, m2m_dev
,
796 MEM2MEM_ENT_TYPE_PROC
, vdev
, function
);
798 goto err_rel_entity0
;
799 ret
= v4l2_m2m_register_entity(mdev
, m2m_dev
,
800 MEM2MEM_ENT_TYPE_SINK
, vdev
, MEDIA_ENT_F_IO_V4L
);
802 goto err_rel_entity1
;
804 /* Connect the three entities */
805 ret
= media_create_pad_link(m2m_dev
->source
, 0, &m2m_dev
->proc
, 1,
806 MEDIA_LNK_FL_IMMUTABLE
| MEDIA_LNK_FL_ENABLED
);
808 goto err_rel_entity2
;
810 ret
= media_create_pad_link(&m2m_dev
->proc
, 0, &m2m_dev
->sink
, 0,
811 MEDIA_LNK_FL_IMMUTABLE
| MEDIA_LNK_FL_ENABLED
);
815 /* Create video interface */
816 m2m_dev
->intf_devnode
= media_devnode_create(mdev
,
817 MEDIA_INTF_T_V4L_VIDEO
, 0,
818 VIDEO_MAJOR
, vdev
->minor
);
819 if (!m2m_dev
->intf_devnode
) {
824 /* Connect the two DMA engines to the interface */
825 link
= media_create_intf_link(m2m_dev
->source
,
826 &m2m_dev
->intf_devnode
->intf
,
827 MEDIA_LNK_FL_IMMUTABLE
| MEDIA_LNK_FL_ENABLED
);
833 link
= media_create_intf_link(&m2m_dev
->sink
,
834 &m2m_dev
->intf_devnode
->intf
,
835 MEDIA_LNK_FL_IMMUTABLE
| MEDIA_LNK_FL_ENABLED
);
838 goto err_rm_intf_link
;
843 media_remove_intf_links(&m2m_dev
->intf_devnode
->intf
);
845 media_devnode_remove(m2m_dev
->intf_devnode
);
847 media_entity_remove_links(&m2m_dev
->sink
);
849 media_entity_remove_links(&m2m_dev
->proc
);
850 media_entity_remove_links(m2m_dev
->source
);
852 media_device_unregister_entity(&m2m_dev
->proc
);
853 kfree(m2m_dev
->proc
.name
);
855 media_device_unregister_entity(&m2m_dev
->sink
);
856 kfree(m2m_dev
->sink
.name
);
858 media_device_unregister_entity(m2m_dev
->source
);
859 kfree(m2m_dev
->source
->name
);
863 EXPORT_SYMBOL_GPL(v4l2_m2m_register_media_controller
);
866 struct v4l2_m2m_dev
*v4l2_m2m_init(const struct v4l2_m2m_ops
*m2m_ops
)
868 struct v4l2_m2m_dev
*m2m_dev
;
870 if (!m2m_ops
|| WARN_ON(!m2m_ops
->device_run
))
871 return ERR_PTR(-EINVAL
);
873 m2m_dev
= kzalloc(sizeof *m2m_dev
, GFP_KERNEL
);
875 return ERR_PTR(-ENOMEM
);
877 m2m_dev
->curr_ctx
= NULL
;
878 m2m_dev
->m2m_ops
= m2m_ops
;
879 INIT_LIST_HEAD(&m2m_dev
->job_queue
);
880 spin_lock_init(&m2m_dev
->job_spinlock
);
881 INIT_WORK(&m2m_dev
->job_work
, v4l2_m2m_device_run_work
);
885 EXPORT_SYMBOL_GPL(v4l2_m2m_init
);
887 void v4l2_m2m_release(struct v4l2_m2m_dev
*m2m_dev
)
891 EXPORT_SYMBOL_GPL(v4l2_m2m_release
);
893 struct v4l2_m2m_ctx
*v4l2_m2m_ctx_init(struct v4l2_m2m_dev
*m2m_dev
,
895 int (*queue_init
)(void *priv
, struct vb2_queue
*src_vq
, struct vb2_queue
*dst_vq
))
897 struct v4l2_m2m_ctx
*m2m_ctx
;
898 struct v4l2_m2m_queue_ctx
*out_q_ctx
, *cap_q_ctx
;
901 m2m_ctx
= kzalloc(sizeof *m2m_ctx
, GFP_KERNEL
);
903 return ERR_PTR(-ENOMEM
);
905 m2m_ctx
->priv
= drv_priv
;
906 m2m_ctx
->m2m_dev
= m2m_dev
;
907 init_waitqueue_head(&m2m_ctx
->finished
);
909 out_q_ctx
= &m2m_ctx
->out_q_ctx
;
910 cap_q_ctx
= &m2m_ctx
->cap_q_ctx
;
912 INIT_LIST_HEAD(&out_q_ctx
->rdy_queue
);
913 INIT_LIST_HEAD(&cap_q_ctx
->rdy_queue
);
914 spin_lock_init(&out_q_ctx
->rdy_spinlock
);
915 spin_lock_init(&cap_q_ctx
->rdy_spinlock
);
917 INIT_LIST_HEAD(&m2m_ctx
->queue
);
919 ret
= queue_init(drv_priv
, &out_q_ctx
->q
, &cap_q_ctx
->q
);
924 * Both queues should use same the mutex to lock the m2m context.
925 * This lock is used in some v4l2_m2m_* helpers.
927 if (WARN_ON(out_q_ctx
->q
.lock
!= cap_q_ctx
->q
.lock
)) {
931 m2m_ctx
->q_lock
= out_q_ctx
->q
.lock
;
938 EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init
);
940 void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx
*m2m_ctx
)
942 /* wait until the current context is dequeued from job_queue */
943 v4l2_m2m_cancel_job(m2m_ctx
);
945 vb2_queue_release(&m2m_ctx
->cap_q_ctx
.q
);
946 vb2_queue_release(&m2m_ctx
->out_q_ctx
.q
);
950 EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release
);
952 void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx
*m2m_ctx
,
953 struct vb2_v4l2_buffer
*vbuf
)
955 struct v4l2_m2m_buffer
*b
= container_of(vbuf
,
956 struct v4l2_m2m_buffer
, vb
);
957 struct v4l2_m2m_queue_ctx
*q_ctx
;
960 q_ctx
= get_queue_ctx(m2m_ctx
, vbuf
->vb2_buf
.vb2_queue
->type
);
964 spin_lock_irqsave(&q_ctx
->rdy_spinlock
, flags
);
965 list_add_tail(&b
->list
, &q_ctx
->rdy_queue
);
967 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
969 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue
);
971 void v4l2_m2m_buf_copy_metadata(const struct vb2_v4l2_buffer
*out_vb
,
972 struct vb2_v4l2_buffer
*cap_vb
,
973 bool copy_frame_flags
)
975 u32 mask
= V4L2_BUF_FLAG_TIMECODE
| V4L2_BUF_FLAG_TSTAMP_SRC_MASK
;
977 if (copy_frame_flags
)
978 mask
|= V4L2_BUF_FLAG_KEYFRAME
| V4L2_BUF_FLAG_PFRAME
|
979 V4L2_BUF_FLAG_BFRAME
;
981 cap_vb
->vb2_buf
.timestamp
= out_vb
->vb2_buf
.timestamp
;
983 if (out_vb
->flags
& V4L2_BUF_FLAG_TIMECODE
)
984 cap_vb
->timecode
= out_vb
->timecode
;
985 cap_vb
->field
= out_vb
->field
;
986 cap_vb
->flags
&= ~mask
;
987 cap_vb
->flags
|= out_vb
->flags
& mask
;
988 cap_vb
->vb2_buf
.copied_timestamp
= 1;
990 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_copy_metadata
);
992 void v4l2_m2m_request_queue(struct media_request
*req
)
994 struct media_request_object
*obj
, *obj_safe
;
995 struct v4l2_m2m_ctx
*m2m_ctx
= NULL
;
998 * Queue all objects. Note that buffer objects are at the end of the
999 * objects list, after all other object types. Once buffer objects
1000 * are queued, the driver might delete them immediately (if the driver
1001 * processes the buffer at once), so we have to use
1002 * list_for_each_entry_safe() to handle the case where the object we
1005 list_for_each_entry_safe(obj
, obj_safe
, &req
->objects
, list
) {
1006 struct v4l2_m2m_ctx
*m2m_ctx_obj
;
1007 struct vb2_buffer
*vb
;
1009 if (!obj
->ops
->queue
)
1012 if (vb2_request_object_is_buffer(obj
)) {
1014 vb
= container_of(obj
, struct vb2_buffer
, req_obj
);
1015 WARN_ON(!V4L2_TYPE_IS_OUTPUT(vb
->vb2_queue
->type
));
1016 m2m_ctx_obj
= container_of(vb
->vb2_queue
,
1017 struct v4l2_m2m_ctx
,
1019 WARN_ON(m2m_ctx
&& m2m_ctx_obj
!= m2m_ctx
);
1020 m2m_ctx
= m2m_ctx_obj
;
1024 * The buffer we queue here can in theory be immediately
1025 * unbound, hence the use of list_for_each_entry_safe()
1026 * above and why we call the queue op last.
1028 obj
->ops
->queue(obj
);
1034 v4l2_m2m_try_schedule(m2m_ctx
);
1036 EXPORT_SYMBOL_GPL(v4l2_m2m_request_queue
);
1038 /* Videobuf2 ioctl helpers */
1040 int v4l2_m2m_ioctl_reqbufs(struct file
*file
, void *priv
,
1041 struct v4l2_requestbuffers
*rb
)
1043 struct v4l2_fh
*fh
= file
->private_data
;
1045 return v4l2_m2m_reqbufs(file
, fh
->m2m_ctx
, rb
);
1047 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_reqbufs
);
1049 int v4l2_m2m_ioctl_create_bufs(struct file
*file
, void *priv
,
1050 struct v4l2_create_buffers
*create
)
1052 struct v4l2_fh
*fh
= file
->private_data
;
1054 return v4l2_m2m_create_bufs(file
, fh
->m2m_ctx
, create
);
1056 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_create_bufs
);
1058 int v4l2_m2m_ioctl_querybuf(struct file
*file
, void *priv
,
1059 struct v4l2_buffer
*buf
)
1061 struct v4l2_fh
*fh
= file
->private_data
;
1063 return v4l2_m2m_querybuf(file
, fh
->m2m_ctx
, buf
);
1065 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_querybuf
);
1067 int v4l2_m2m_ioctl_qbuf(struct file
*file
, void *priv
,
1068 struct v4l2_buffer
*buf
)
1070 struct v4l2_fh
*fh
= file
->private_data
;
1072 return v4l2_m2m_qbuf(file
, fh
->m2m_ctx
, buf
);
1074 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_qbuf
);
1076 int v4l2_m2m_ioctl_dqbuf(struct file
*file
, void *priv
,
1077 struct v4l2_buffer
*buf
)
1079 struct v4l2_fh
*fh
= file
->private_data
;
1081 return v4l2_m2m_dqbuf(file
, fh
->m2m_ctx
, buf
);
1083 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_dqbuf
);
1085 int v4l2_m2m_ioctl_prepare_buf(struct file
*file
, void *priv
,
1086 struct v4l2_buffer
*buf
)
1088 struct v4l2_fh
*fh
= file
->private_data
;
1090 return v4l2_m2m_prepare_buf(file
, fh
->m2m_ctx
, buf
);
1092 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_prepare_buf
);
1094 int v4l2_m2m_ioctl_expbuf(struct file
*file
, void *priv
,
1095 struct v4l2_exportbuffer
*eb
)
1097 struct v4l2_fh
*fh
= file
->private_data
;
1099 return v4l2_m2m_expbuf(file
, fh
->m2m_ctx
, eb
);
1101 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_expbuf
);
1103 int v4l2_m2m_ioctl_streamon(struct file
*file
, void *priv
,
1104 enum v4l2_buf_type type
)
1106 struct v4l2_fh
*fh
= file
->private_data
;
1108 return v4l2_m2m_streamon(file
, fh
->m2m_ctx
, type
);
1110 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamon
);
1112 int v4l2_m2m_ioctl_streamoff(struct file
*file
, void *priv
,
1113 enum v4l2_buf_type type
)
1115 struct v4l2_fh
*fh
= file
->private_data
;
1117 return v4l2_m2m_streamoff(file
, fh
->m2m_ctx
, type
);
1119 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamoff
);
1121 int v4l2_m2m_ioctl_try_encoder_cmd(struct file
*file
, void *fh
,
1122 struct v4l2_encoder_cmd
*ec
)
1124 if (ec
->cmd
!= V4L2_ENC_CMD_STOP
&& ec
->cmd
!= V4L2_ENC_CMD_START
)
1130 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_try_encoder_cmd
);
1132 int v4l2_m2m_ioctl_try_decoder_cmd(struct file
*file
, void *fh
,
1133 struct v4l2_decoder_cmd
*dc
)
1135 if (dc
->cmd
!= V4L2_DEC_CMD_STOP
&& dc
->cmd
!= V4L2_DEC_CMD_START
)
1140 if (dc
->cmd
== V4L2_DEC_CMD_STOP
) {
1142 } else if (dc
->cmd
== V4L2_DEC_CMD_START
) {
1143 dc
->start
.speed
= 0;
1144 dc
->start
.format
= V4L2_DEC_START_FMT_NONE
;
1148 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_try_decoder_cmd
);
1151 * v4l2_file_operations helpers. It is assumed here same lock is used
1152 * for the output and the capture buffer queue.
1155 int v4l2_m2m_fop_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1157 struct v4l2_fh
*fh
= file
->private_data
;
1159 return v4l2_m2m_mmap(file
, fh
->m2m_ctx
, vma
);
1161 EXPORT_SYMBOL_GPL(v4l2_m2m_fop_mmap
);
1163 __poll_t
v4l2_m2m_fop_poll(struct file
*file
, poll_table
*wait
)
1165 struct v4l2_fh
*fh
= file
->private_data
;
1166 struct v4l2_m2m_ctx
*m2m_ctx
= fh
->m2m_ctx
;
1169 if (m2m_ctx
->q_lock
)
1170 mutex_lock(m2m_ctx
->q_lock
);
1172 ret
= v4l2_m2m_poll(file
, m2m_ctx
, wait
);
1174 if (m2m_ctx
->q_lock
)
1175 mutex_unlock(m2m_ctx
->q_lock
);
1179 EXPORT_SYMBOL_GPL(v4l2_m2m_fop_poll
);