1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Memory-to-memory device framework for Video for Linux 2 and videobuf.
5 * Helper functions for devices that use videobuf buffers for both their
6 * source and destination.
8 * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
9 * Pawel Osciak, <pawel@osciak.com>
10 * Marek Szyprowski, <m.szyprowski@samsung.com>
12 #include <linux/module.h>
13 #include <linux/sched.h>
14 #include <linux/slab.h>
16 #include <media/media-device.h>
17 #include <media/videobuf2-v4l2.h>
18 #include <media/v4l2-mem2mem.h>
19 #include <media/v4l2-dev.h>
20 #include <media/v4l2-device.h>
21 #include <media/v4l2-fh.h>
22 #include <media/v4l2-event.h>
24 MODULE_DESCRIPTION("Mem to mem device framework for videobuf");
25 MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>");
26 MODULE_LICENSE("GPL");
29 module_param(debug
, bool, 0644);
31 #define dprintk(fmt, arg...) \
34 printk(KERN_DEBUG "%s: " fmt, __func__, ## arg);\
38 /* Instance is already queued on the job_queue */
39 #define TRANS_QUEUED (1 << 0)
40 /* Instance is currently running in hardware */
41 #define TRANS_RUNNING (1 << 1)
42 /* Instance is currently aborting */
43 #define TRANS_ABORT (1 << 2)
46 /* Offset base for buffers on the destination queue - used to distinguish
47 * between source and destination buffers when mmapping - they receive the same
48 * offsets but for different queues */
49 #define DST_QUEUE_OFF_BASE (1 << 30)
51 enum v4l2_m2m_entity_type
{
52 MEM2MEM_ENT_TYPE_SOURCE
,
53 MEM2MEM_ENT_TYPE_SINK
,
57 static const char * const m2m_entity_name
[] = {
64 * struct v4l2_m2m_dev - per-device context
65 * @source: &struct media_entity pointer with the source entity
66 * Used only when the M2M device is registered via
67 * v4l2_m2m_unregister_media_controller().
68 * @source_pad: &struct media_pad with the source pad.
69 * Used only when the M2M device is registered via
70 * v4l2_m2m_unregister_media_controller().
71 * @sink: &struct media_entity pointer with the sink entity
72 * Used only when the M2M device is registered via
73 * v4l2_m2m_unregister_media_controller().
74 * @sink_pad: &struct media_pad with the sink pad.
75 * Used only when the M2M device is registered via
76 * v4l2_m2m_unregister_media_controller().
77 * @proc: &struct media_entity pointer with the M2M device itself.
78 * @proc_pads: &struct media_pad with the @proc pads.
79 * Used only when the M2M device is registered via
80 * v4l2_m2m_unregister_media_controller().
81 * @intf_devnode: &struct media_intf devnode pointer with the interface
82 * with controls the M2M device.
83 * @curr_ctx: currently running instance
84 * @job_queue: instances queued to run
85 * @job_spinlock: protects job_queue
86 * @job_work: worker to run queued jobs.
87 * @m2m_ops: driver callbacks
90 struct v4l2_m2m_ctx
*curr_ctx
;
91 #ifdef CONFIG_MEDIA_CONTROLLER
92 struct media_entity
*source
;
93 struct media_pad source_pad
;
94 struct media_entity sink
;
95 struct media_pad sink_pad
;
96 struct media_entity proc
;
97 struct media_pad proc_pads
[2];
98 struct media_intf_devnode
*intf_devnode
;
101 struct list_head job_queue
;
102 spinlock_t job_spinlock
;
103 struct work_struct job_work
;
105 const struct v4l2_m2m_ops
*m2m_ops
;
108 static struct v4l2_m2m_queue_ctx
*get_queue_ctx(struct v4l2_m2m_ctx
*m2m_ctx
,
109 enum v4l2_buf_type type
)
111 if (V4L2_TYPE_IS_OUTPUT(type
))
112 return &m2m_ctx
->out_q_ctx
;
114 return &m2m_ctx
->cap_q_ctx
;
117 struct vb2_queue
*v4l2_m2m_get_vq(struct v4l2_m2m_ctx
*m2m_ctx
,
118 enum v4l2_buf_type type
)
120 struct v4l2_m2m_queue_ctx
*q_ctx
;
122 q_ctx
= get_queue_ctx(m2m_ctx
, type
);
128 EXPORT_SYMBOL(v4l2_m2m_get_vq
);
130 struct vb2_v4l2_buffer
*v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx
*q_ctx
)
132 struct v4l2_m2m_buffer
*b
;
135 spin_lock_irqsave(&q_ctx
->rdy_spinlock
, flags
);
137 if (list_empty(&q_ctx
->rdy_queue
)) {
138 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
142 b
= list_first_entry(&q_ctx
->rdy_queue
, struct v4l2_m2m_buffer
, list
);
143 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
146 EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf
);
148 struct vb2_v4l2_buffer
*v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx
*q_ctx
)
150 struct v4l2_m2m_buffer
*b
;
153 spin_lock_irqsave(&q_ctx
->rdy_spinlock
, flags
);
155 if (list_empty(&q_ctx
->rdy_queue
)) {
156 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
160 b
= list_last_entry(&q_ctx
->rdy_queue
, struct v4l2_m2m_buffer
, list
);
161 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
164 EXPORT_SYMBOL_GPL(v4l2_m2m_last_buf
);
166 struct vb2_v4l2_buffer
*v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx
*q_ctx
)
168 struct v4l2_m2m_buffer
*b
;
171 spin_lock_irqsave(&q_ctx
->rdy_spinlock
, flags
);
172 if (list_empty(&q_ctx
->rdy_queue
)) {
173 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
176 b
= list_first_entry(&q_ctx
->rdy_queue
, struct v4l2_m2m_buffer
, list
);
179 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
183 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove
);
185 void v4l2_m2m_buf_remove_by_buf(struct v4l2_m2m_queue_ctx
*q_ctx
,
186 struct vb2_v4l2_buffer
*vbuf
)
188 struct v4l2_m2m_buffer
*b
;
191 spin_lock_irqsave(&q_ctx
->rdy_spinlock
, flags
);
192 b
= container_of(vbuf
, struct v4l2_m2m_buffer
, vb
);
195 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
197 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_buf
);
199 struct vb2_v4l2_buffer
*
200 v4l2_m2m_buf_remove_by_idx(struct v4l2_m2m_queue_ctx
*q_ctx
, unsigned int idx
)
203 struct v4l2_m2m_buffer
*b
, *tmp
;
204 struct vb2_v4l2_buffer
*ret
= NULL
;
207 spin_lock_irqsave(&q_ctx
->rdy_spinlock
, flags
);
208 list_for_each_entry_safe(b
, tmp
, &q_ctx
->rdy_queue
, list
) {
209 if (b
->vb
.vb2_buf
.index
== idx
) {
216 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
220 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_idx
);
223 * Scheduling handlers
226 void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev
*m2m_dev
)
231 spin_lock_irqsave(&m2m_dev
->job_spinlock
, flags
);
232 if (m2m_dev
->curr_ctx
)
233 ret
= m2m_dev
->curr_ctx
->priv
;
234 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
238 EXPORT_SYMBOL(v4l2_m2m_get_curr_priv
);
241 * v4l2_m2m_try_run() - select next job to perform and run it if possible
242 * @m2m_dev: per-device context
244 * Get next transaction (if present) from the waiting jobs list and run it.
246 * Note that this function can run on a given v4l2_m2m_ctx context,
247 * but call .device_run for another context.
249 static void v4l2_m2m_try_run(struct v4l2_m2m_dev
*m2m_dev
)
253 spin_lock_irqsave(&m2m_dev
->job_spinlock
, flags
);
254 if (NULL
!= m2m_dev
->curr_ctx
) {
255 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
256 dprintk("Another instance is running, won't run now\n");
260 if (list_empty(&m2m_dev
->job_queue
)) {
261 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
262 dprintk("No job pending\n");
266 m2m_dev
->curr_ctx
= list_first_entry(&m2m_dev
->job_queue
,
267 struct v4l2_m2m_ctx
, queue
);
268 m2m_dev
->curr_ctx
->job_flags
|= TRANS_RUNNING
;
269 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
271 dprintk("Running job on m2m_ctx: %p\n", m2m_dev
->curr_ctx
);
272 m2m_dev
->m2m_ops
->device_run(m2m_dev
->curr_ctx
->priv
);
276 * __v4l2_m2m_try_queue() - queue a job
277 * @m2m_dev: m2m device
278 * @m2m_ctx: m2m context
280 * Check if this context is ready to queue a job.
282 * This function can run in interrupt context.
284 static void __v4l2_m2m_try_queue(struct v4l2_m2m_dev
*m2m_dev
,
285 struct v4l2_m2m_ctx
*m2m_ctx
)
287 unsigned long flags_job
, flags_out
, flags_cap
;
289 dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx
);
291 if (!m2m_ctx
->out_q_ctx
.q
.streaming
292 || !m2m_ctx
->cap_q_ctx
.q
.streaming
) {
293 dprintk("Streaming needs to be on for both queues\n");
297 spin_lock_irqsave(&m2m_dev
->job_spinlock
, flags_job
);
299 /* If the context is aborted then don't schedule it */
300 if (m2m_ctx
->job_flags
& TRANS_ABORT
) {
301 dprintk("Aborted context\n");
305 if (m2m_ctx
->job_flags
& TRANS_QUEUED
) {
306 dprintk("On job queue already\n");
310 spin_lock_irqsave(&m2m_ctx
->out_q_ctx
.rdy_spinlock
, flags_out
);
311 if (list_empty(&m2m_ctx
->out_q_ctx
.rdy_queue
)
312 && !m2m_ctx
->out_q_ctx
.buffered
) {
313 dprintk("No input buffers available\n");
316 spin_lock_irqsave(&m2m_ctx
->cap_q_ctx
.rdy_spinlock
, flags_cap
);
317 if (list_empty(&m2m_ctx
->cap_q_ctx
.rdy_queue
)
318 && !m2m_ctx
->cap_q_ctx
.buffered
) {
319 dprintk("No output buffers available\n");
322 spin_unlock_irqrestore(&m2m_ctx
->cap_q_ctx
.rdy_spinlock
, flags_cap
);
323 spin_unlock_irqrestore(&m2m_ctx
->out_q_ctx
.rdy_spinlock
, flags_out
);
325 if (m2m_dev
->m2m_ops
->job_ready
326 && (!m2m_dev
->m2m_ops
->job_ready(m2m_ctx
->priv
))) {
327 dprintk("Driver not ready\n");
331 list_add_tail(&m2m_ctx
->queue
, &m2m_dev
->job_queue
);
332 m2m_ctx
->job_flags
|= TRANS_QUEUED
;
334 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags_job
);
338 spin_unlock_irqrestore(&m2m_ctx
->cap_q_ctx
.rdy_spinlock
, flags_cap
);
340 spin_unlock_irqrestore(&m2m_ctx
->out_q_ctx
.rdy_spinlock
, flags_out
);
342 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags_job
);
346 * v4l2_m2m_try_schedule() - schedule and possibly run a job for any context
347 * @m2m_ctx: m2m context
349 * Check if this context is ready to queue a job. If suitable,
350 * run the next queued job on the mem2mem device.
352 * This function shouldn't run in interrupt context.
354 * Note that v4l2_m2m_try_schedule() can schedule one job for this context,
355 * and then run another job for another context.
357 void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx
*m2m_ctx
)
359 struct v4l2_m2m_dev
*m2m_dev
= m2m_ctx
->m2m_dev
;
361 __v4l2_m2m_try_queue(m2m_dev
, m2m_ctx
);
362 v4l2_m2m_try_run(m2m_dev
);
364 EXPORT_SYMBOL_GPL(v4l2_m2m_try_schedule
);
367 * v4l2_m2m_device_run_work() - run pending jobs for the context
368 * @work: Work structure used for scheduling the execution of this function.
370 static void v4l2_m2m_device_run_work(struct work_struct
*work
)
372 struct v4l2_m2m_dev
*m2m_dev
=
373 container_of(work
, struct v4l2_m2m_dev
, job_work
);
375 v4l2_m2m_try_run(m2m_dev
);
379 * v4l2_m2m_cancel_job() - cancel pending jobs for the context
380 * @m2m_ctx: m2m context with jobs to be canceled
382 * In case of streamoff or release called on any context,
383 * 1] If the context is currently running, then abort job will be called
384 * 2] If the context is queued, then the context will be removed from
387 static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx
*m2m_ctx
)
389 struct v4l2_m2m_dev
*m2m_dev
;
392 m2m_dev
= m2m_ctx
->m2m_dev
;
393 spin_lock_irqsave(&m2m_dev
->job_spinlock
, flags
);
395 m2m_ctx
->job_flags
|= TRANS_ABORT
;
396 if (m2m_ctx
->job_flags
& TRANS_RUNNING
) {
397 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
398 if (m2m_dev
->m2m_ops
->job_abort
)
399 m2m_dev
->m2m_ops
->job_abort(m2m_ctx
->priv
);
400 dprintk("m2m_ctx %p running, will wait to complete\n", m2m_ctx
);
401 wait_event(m2m_ctx
->finished
,
402 !(m2m_ctx
->job_flags
& TRANS_RUNNING
));
403 } else if (m2m_ctx
->job_flags
& TRANS_QUEUED
) {
404 list_del(&m2m_ctx
->queue
);
405 m2m_ctx
->job_flags
&= ~(TRANS_QUEUED
| TRANS_RUNNING
);
406 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
407 dprintk("m2m_ctx: %p had been on queue and was removed\n",
410 /* Do nothing, was not on queue/running */
411 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
415 void v4l2_m2m_job_finish(struct v4l2_m2m_dev
*m2m_dev
,
416 struct v4l2_m2m_ctx
*m2m_ctx
)
420 spin_lock_irqsave(&m2m_dev
->job_spinlock
, flags
);
421 if (!m2m_dev
->curr_ctx
|| m2m_dev
->curr_ctx
!= m2m_ctx
) {
422 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
423 dprintk("Called by an instance not currently running\n");
427 list_del(&m2m_dev
->curr_ctx
->queue
);
428 m2m_dev
->curr_ctx
->job_flags
&= ~(TRANS_QUEUED
| TRANS_RUNNING
);
429 wake_up(&m2m_dev
->curr_ctx
->finished
);
430 m2m_dev
->curr_ctx
= NULL
;
432 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
434 /* This instance might have more buffers ready, but since we do not
435 * allow more than one job on the job_queue per instance, each has
436 * to be scheduled separately after the previous one finishes. */
437 __v4l2_m2m_try_queue(m2m_dev
, m2m_ctx
);
439 /* We might be running in atomic context,
440 * but the job must be run in non-atomic context.
442 schedule_work(&m2m_dev
->job_work
);
444 EXPORT_SYMBOL(v4l2_m2m_job_finish
);
446 int v4l2_m2m_reqbufs(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
447 struct v4l2_requestbuffers
*reqbufs
)
449 struct vb2_queue
*vq
;
452 vq
= v4l2_m2m_get_vq(m2m_ctx
, reqbufs
->type
);
453 ret
= vb2_reqbufs(vq
, reqbufs
);
454 /* If count == 0, then the owner has released all buffers and he
455 is no longer owner of the queue. Otherwise we have an owner. */
457 vq
->owner
= reqbufs
->count
? file
->private_data
: NULL
;
461 EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs
);
463 int v4l2_m2m_querybuf(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
464 struct v4l2_buffer
*buf
)
466 struct vb2_queue
*vq
;
470 vq
= v4l2_m2m_get_vq(m2m_ctx
, buf
->type
);
471 ret
= vb2_querybuf(vq
, buf
);
473 /* Adjust MMAP memory offsets for the CAPTURE queue */
474 if (buf
->memory
== V4L2_MEMORY_MMAP
&& !V4L2_TYPE_IS_OUTPUT(vq
->type
)) {
475 if (V4L2_TYPE_IS_MULTIPLANAR(vq
->type
)) {
476 for (i
= 0; i
< buf
->length
; ++i
)
477 buf
->m
.planes
[i
].m
.mem_offset
478 += DST_QUEUE_OFF_BASE
;
480 buf
->m
.offset
+= DST_QUEUE_OFF_BASE
;
486 EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf
);
488 int v4l2_m2m_qbuf(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
489 struct v4l2_buffer
*buf
)
491 struct video_device
*vdev
= video_devdata(file
);
492 struct vb2_queue
*vq
;
495 vq
= v4l2_m2m_get_vq(m2m_ctx
, buf
->type
);
496 if (!V4L2_TYPE_IS_OUTPUT(vq
->type
) &&
497 (buf
->flags
& V4L2_BUF_FLAG_REQUEST_FD
)) {
498 dprintk("%s: requests cannot be used with capture buffers\n",
502 ret
= vb2_qbuf(vq
, vdev
->v4l2_dev
->mdev
, buf
);
503 if (!ret
&& !(buf
->flags
& V4L2_BUF_FLAG_IN_REQUEST
))
504 v4l2_m2m_try_schedule(m2m_ctx
);
508 EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf
);
510 int v4l2_m2m_dqbuf(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
511 struct v4l2_buffer
*buf
)
513 struct vb2_queue
*vq
;
515 vq
= v4l2_m2m_get_vq(m2m_ctx
, buf
->type
);
516 return vb2_dqbuf(vq
, buf
, file
->f_flags
& O_NONBLOCK
);
518 EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf
);
520 int v4l2_m2m_prepare_buf(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
521 struct v4l2_buffer
*buf
)
523 struct video_device
*vdev
= video_devdata(file
);
524 struct vb2_queue
*vq
;
526 vq
= v4l2_m2m_get_vq(m2m_ctx
, buf
->type
);
527 return vb2_prepare_buf(vq
, vdev
->v4l2_dev
->mdev
, buf
);
529 EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf
);
531 int v4l2_m2m_create_bufs(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
532 struct v4l2_create_buffers
*create
)
534 struct vb2_queue
*vq
;
536 vq
= v4l2_m2m_get_vq(m2m_ctx
, create
->format
.type
);
537 return vb2_create_bufs(vq
, create
);
539 EXPORT_SYMBOL_GPL(v4l2_m2m_create_bufs
);
541 int v4l2_m2m_expbuf(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
542 struct v4l2_exportbuffer
*eb
)
544 struct vb2_queue
*vq
;
546 vq
= v4l2_m2m_get_vq(m2m_ctx
, eb
->type
);
547 return vb2_expbuf(vq
, eb
);
549 EXPORT_SYMBOL_GPL(v4l2_m2m_expbuf
);
551 int v4l2_m2m_streamon(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
552 enum v4l2_buf_type type
)
554 struct vb2_queue
*vq
;
557 vq
= v4l2_m2m_get_vq(m2m_ctx
, type
);
558 ret
= vb2_streamon(vq
, type
);
560 v4l2_m2m_try_schedule(m2m_ctx
);
564 EXPORT_SYMBOL_GPL(v4l2_m2m_streamon
);
566 int v4l2_m2m_streamoff(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
567 enum v4l2_buf_type type
)
569 struct v4l2_m2m_dev
*m2m_dev
;
570 struct v4l2_m2m_queue_ctx
*q_ctx
;
571 unsigned long flags_job
, flags
;
574 /* wait until the current context is dequeued from job_queue */
575 v4l2_m2m_cancel_job(m2m_ctx
);
577 q_ctx
= get_queue_ctx(m2m_ctx
, type
);
578 ret
= vb2_streamoff(&q_ctx
->q
, type
);
582 m2m_dev
= m2m_ctx
->m2m_dev
;
583 spin_lock_irqsave(&m2m_dev
->job_spinlock
, flags_job
);
584 /* We should not be scheduled anymore, since we're dropping a queue. */
585 if (m2m_ctx
->job_flags
& TRANS_QUEUED
)
586 list_del(&m2m_ctx
->queue
);
587 m2m_ctx
->job_flags
= 0;
589 spin_lock_irqsave(&q_ctx
->rdy_spinlock
, flags
);
590 /* Drop queue, since streamoff returns device to the same state as after
591 * calling reqbufs. */
592 INIT_LIST_HEAD(&q_ctx
->rdy_queue
);
594 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
596 if (m2m_dev
->curr_ctx
== m2m_ctx
) {
597 m2m_dev
->curr_ctx
= NULL
;
598 wake_up(&m2m_ctx
->finished
);
600 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags_job
);
604 EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff
);
606 static __poll_t
v4l2_m2m_poll_for_data(struct file
*file
,
607 struct v4l2_m2m_ctx
*m2m_ctx
,
608 struct poll_table_struct
*wait
)
610 struct vb2_queue
*src_q
, *dst_q
;
611 struct vb2_buffer
*src_vb
= NULL
, *dst_vb
= NULL
;
615 src_q
= v4l2_m2m_get_src_vq(m2m_ctx
);
616 dst_q
= v4l2_m2m_get_dst_vq(m2m_ctx
);
618 poll_wait(file
, &src_q
->done_wq
, wait
);
619 poll_wait(file
, &dst_q
->done_wq
, wait
);
622 * There has to be at least one buffer queued on each queued_list, which
623 * means either in driver already or waiting for driver to claim it
624 * and start processing.
626 if ((!src_q
->streaming
|| src_q
->error
||
627 list_empty(&src_q
->queued_list
)) &&
628 (!dst_q
->streaming
|| dst_q
->error
||
629 list_empty(&dst_q
->queued_list
)))
632 spin_lock_irqsave(&dst_q
->done_lock
, flags
);
633 if (list_empty(&dst_q
->done_list
)) {
635 * If the last buffer was dequeued from the capture queue,
636 * return immediately. DQBUF will return -EPIPE.
638 if (dst_q
->last_buffer_dequeued
) {
639 spin_unlock_irqrestore(&dst_q
->done_lock
, flags
);
640 return EPOLLIN
| EPOLLRDNORM
;
643 spin_unlock_irqrestore(&dst_q
->done_lock
, flags
);
645 spin_lock_irqsave(&src_q
->done_lock
, flags
);
646 if (!list_empty(&src_q
->done_list
))
647 src_vb
= list_first_entry(&src_q
->done_list
, struct vb2_buffer
,
649 if (src_vb
&& (src_vb
->state
== VB2_BUF_STATE_DONE
650 || src_vb
->state
== VB2_BUF_STATE_ERROR
))
651 rc
|= EPOLLOUT
| EPOLLWRNORM
;
652 spin_unlock_irqrestore(&src_q
->done_lock
, flags
);
654 spin_lock_irqsave(&dst_q
->done_lock
, flags
);
655 if (!list_empty(&dst_q
->done_list
))
656 dst_vb
= list_first_entry(&dst_q
->done_list
, struct vb2_buffer
,
658 if (dst_vb
&& (dst_vb
->state
== VB2_BUF_STATE_DONE
659 || dst_vb
->state
== VB2_BUF_STATE_ERROR
))
660 rc
|= EPOLLIN
| EPOLLRDNORM
;
661 spin_unlock_irqrestore(&dst_q
->done_lock
, flags
);
666 __poll_t
v4l2_m2m_poll(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
667 struct poll_table_struct
*wait
)
669 struct video_device
*vfd
= video_devdata(file
);
670 __poll_t req_events
= poll_requested_events(wait
);
673 if (req_events
& (EPOLLOUT
| EPOLLWRNORM
| EPOLLIN
| EPOLLRDNORM
))
674 rc
= v4l2_m2m_poll_for_data(file
, m2m_ctx
, wait
);
676 if (test_bit(V4L2_FL_USES_V4L2_FH
, &vfd
->flags
)) {
677 struct v4l2_fh
*fh
= file
->private_data
;
679 poll_wait(file
, &fh
->wait
, wait
);
680 if (v4l2_event_pending(fh
))
686 EXPORT_SYMBOL_GPL(v4l2_m2m_poll
);
688 int v4l2_m2m_mmap(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
689 struct vm_area_struct
*vma
)
691 unsigned long offset
= vma
->vm_pgoff
<< PAGE_SHIFT
;
692 struct vb2_queue
*vq
;
694 if (offset
< DST_QUEUE_OFF_BASE
) {
695 vq
= v4l2_m2m_get_src_vq(m2m_ctx
);
697 vq
= v4l2_m2m_get_dst_vq(m2m_ctx
);
698 vma
->vm_pgoff
-= (DST_QUEUE_OFF_BASE
>> PAGE_SHIFT
);
701 return vb2_mmap(vq
, vma
);
703 EXPORT_SYMBOL(v4l2_m2m_mmap
);
705 #if defined(CONFIG_MEDIA_CONTROLLER)
706 void v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev
*m2m_dev
)
708 media_remove_intf_links(&m2m_dev
->intf_devnode
->intf
);
709 media_devnode_remove(m2m_dev
->intf_devnode
);
711 media_entity_remove_links(m2m_dev
->source
);
712 media_entity_remove_links(&m2m_dev
->sink
);
713 media_entity_remove_links(&m2m_dev
->proc
);
714 media_device_unregister_entity(m2m_dev
->source
);
715 media_device_unregister_entity(&m2m_dev
->sink
);
716 media_device_unregister_entity(&m2m_dev
->proc
);
717 kfree(m2m_dev
->source
->name
);
718 kfree(m2m_dev
->sink
.name
);
719 kfree(m2m_dev
->proc
.name
);
721 EXPORT_SYMBOL_GPL(v4l2_m2m_unregister_media_controller
);
723 static int v4l2_m2m_register_entity(struct media_device
*mdev
,
724 struct v4l2_m2m_dev
*m2m_dev
, enum v4l2_m2m_entity_type type
,
725 struct video_device
*vdev
, int function
)
727 struct media_entity
*entity
;
728 struct media_pad
*pads
;
735 case MEM2MEM_ENT_TYPE_SOURCE
:
736 entity
= m2m_dev
->source
;
737 pads
= &m2m_dev
->source_pad
;
738 pads
[0].flags
= MEDIA_PAD_FL_SOURCE
;
741 case MEM2MEM_ENT_TYPE_SINK
:
742 entity
= &m2m_dev
->sink
;
743 pads
= &m2m_dev
->sink_pad
;
744 pads
[0].flags
= MEDIA_PAD_FL_SINK
;
747 case MEM2MEM_ENT_TYPE_PROC
:
748 entity
= &m2m_dev
->proc
;
749 pads
= m2m_dev
->proc_pads
;
750 pads
[0].flags
= MEDIA_PAD_FL_SINK
;
751 pads
[1].flags
= MEDIA_PAD_FL_SOURCE
;
758 entity
->obj_type
= MEDIA_ENTITY_TYPE_BASE
;
759 if (type
!= MEM2MEM_ENT_TYPE_PROC
) {
760 entity
->info
.dev
.major
= VIDEO_MAJOR
;
761 entity
->info
.dev
.minor
= vdev
->minor
;
763 len
= strlen(vdev
->name
) + 2 + strlen(m2m_entity_name
[type
]);
764 name
= kmalloc(len
, GFP_KERNEL
);
767 snprintf(name
, len
, "%s-%s", vdev
->name
, m2m_entity_name
[type
]);
769 entity
->function
= function
;
771 ret
= media_entity_pads_init(entity
, num_pads
, pads
);
774 ret
= media_device_register_entity(mdev
, entity
);
781 int v4l2_m2m_register_media_controller(struct v4l2_m2m_dev
*m2m_dev
,
782 struct video_device
*vdev
, int function
)
784 struct media_device
*mdev
= vdev
->v4l2_dev
->mdev
;
785 struct media_link
*link
;
791 /* A memory-to-memory device consists in two
792 * DMA engine and one video processing entities.
793 * The DMA engine entities are linked to a V4L interface
796 /* Create the three entities with their pads */
797 m2m_dev
->source
= &vdev
->entity
;
798 ret
= v4l2_m2m_register_entity(mdev
, m2m_dev
,
799 MEM2MEM_ENT_TYPE_SOURCE
, vdev
, MEDIA_ENT_F_IO_V4L
);
802 ret
= v4l2_m2m_register_entity(mdev
, m2m_dev
,
803 MEM2MEM_ENT_TYPE_PROC
, vdev
, function
);
805 goto err_rel_entity0
;
806 ret
= v4l2_m2m_register_entity(mdev
, m2m_dev
,
807 MEM2MEM_ENT_TYPE_SINK
, vdev
, MEDIA_ENT_F_IO_V4L
);
809 goto err_rel_entity1
;
811 /* Connect the three entities */
812 ret
= media_create_pad_link(m2m_dev
->source
, 0, &m2m_dev
->proc
, 0,
813 MEDIA_LNK_FL_IMMUTABLE
| MEDIA_LNK_FL_ENABLED
);
815 goto err_rel_entity2
;
817 ret
= media_create_pad_link(&m2m_dev
->proc
, 1, &m2m_dev
->sink
, 0,
818 MEDIA_LNK_FL_IMMUTABLE
| MEDIA_LNK_FL_ENABLED
);
822 /* Create video interface */
823 m2m_dev
->intf_devnode
= media_devnode_create(mdev
,
824 MEDIA_INTF_T_V4L_VIDEO
, 0,
825 VIDEO_MAJOR
, vdev
->minor
);
826 if (!m2m_dev
->intf_devnode
) {
831 /* Connect the two DMA engines to the interface */
832 link
= media_create_intf_link(m2m_dev
->source
,
833 &m2m_dev
->intf_devnode
->intf
,
834 MEDIA_LNK_FL_IMMUTABLE
| MEDIA_LNK_FL_ENABLED
);
840 link
= media_create_intf_link(&m2m_dev
->sink
,
841 &m2m_dev
->intf_devnode
->intf
,
842 MEDIA_LNK_FL_IMMUTABLE
| MEDIA_LNK_FL_ENABLED
);
845 goto err_rm_intf_link
;
850 media_remove_intf_links(&m2m_dev
->intf_devnode
->intf
);
852 media_devnode_remove(m2m_dev
->intf_devnode
);
854 media_entity_remove_links(&m2m_dev
->sink
);
856 media_entity_remove_links(&m2m_dev
->proc
);
857 media_entity_remove_links(m2m_dev
->source
);
859 media_device_unregister_entity(&m2m_dev
->proc
);
860 kfree(m2m_dev
->proc
.name
);
862 media_device_unregister_entity(&m2m_dev
->sink
);
863 kfree(m2m_dev
->sink
.name
);
865 media_device_unregister_entity(m2m_dev
->source
);
866 kfree(m2m_dev
->source
->name
);
870 EXPORT_SYMBOL_GPL(v4l2_m2m_register_media_controller
);
873 struct v4l2_m2m_dev
*v4l2_m2m_init(const struct v4l2_m2m_ops
*m2m_ops
)
875 struct v4l2_m2m_dev
*m2m_dev
;
877 if (!m2m_ops
|| WARN_ON(!m2m_ops
->device_run
))
878 return ERR_PTR(-EINVAL
);
880 m2m_dev
= kzalloc(sizeof *m2m_dev
, GFP_KERNEL
);
882 return ERR_PTR(-ENOMEM
);
884 m2m_dev
->curr_ctx
= NULL
;
885 m2m_dev
->m2m_ops
= m2m_ops
;
886 INIT_LIST_HEAD(&m2m_dev
->job_queue
);
887 spin_lock_init(&m2m_dev
->job_spinlock
);
888 INIT_WORK(&m2m_dev
->job_work
, v4l2_m2m_device_run_work
);
892 EXPORT_SYMBOL_GPL(v4l2_m2m_init
);
894 void v4l2_m2m_release(struct v4l2_m2m_dev
*m2m_dev
)
898 EXPORT_SYMBOL_GPL(v4l2_m2m_release
);
900 struct v4l2_m2m_ctx
*v4l2_m2m_ctx_init(struct v4l2_m2m_dev
*m2m_dev
,
902 int (*queue_init
)(void *priv
, struct vb2_queue
*src_vq
, struct vb2_queue
*dst_vq
))
904 struct v4l2_m2m_ctx
*m2m_ctx
;
905 struct v4l2_m2m_queue_ctx
*out_q_ctx
, *cap_q_ctx
;
908 m2m_ctx
= kzalloc(sizeof *m2m_ctx
, GFP_KERNEL
);
910 return ERR_PTR(-ENOMEM
);
912 m2m_ctx
->priv
= drv_priv
;
913 m2m_ctx
->m2m_dev
= m2m_dev
;
914 init_waitqueue_head(&m2m_ctx
->finished
);
916 out_q_ctx
= &m2m_ctx
->out_q_ctx
;
917 cap_q_ctx
= &m2m_ctx
->cap_q_ctx
;
919 INIT_LIST_HEAD(&out_q_ctx
->rdy_queue
);
920 INIT_LIST_HEAD(&cap_q_ctx
->rdy_queue
);
921 spin_lock_init(&out_q_ctx
->rdy_spinlock
);
922 spin_lock_init(&cap_q_ctx
->rdy_spinlock
);
924 INIT_LIST_HEAD(&m2m_ctx
->queue
);
926 ret
= queue_init(drv_priv
, &out_q_ctx
->q
, &cap_q_ctx
->q
);
931 * Both queues should use same the mutex to lock the m2m context.
932 * This lock is used in some v4l2_m2m_* helpers.
934 if (WARN_ON(out_q_ctx
->q
.lock
!= cap_q_ctx
->q
.lock
)) {
938 m2m_ctx
->q_lock
= out_q_ctx
->q
.lock
;
945 EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init
);
947 void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx
*m2m_ctx
)
949 /* wait until the current context is dequeued from job_queue */
950 v4l2_m2m_cancel_job(m2m_ctx
);
952 vb2_queue_release(&m2m_ctx
->cap_q_ctx
.q
);
953 vb2_queue_release(&m2m_ctx
->out_q_ctx
.q
);
957 EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release
);
959 void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx
*m2m_ctx
,
960 struct vb2_v4l2_buffer
*vbuf
)
962 struct v4l2_m2m_buffer
*b
= container_of(vbuf
,
963 struct v4l2_m2m_buffer
, vb
);
964 struct v4l2_m2m_queue_ctx
*q_ctx
;
967 q_ctx
= get_queue_ctx(m2m_ctx
, vbuf
->vb2_buf
.vb2_queue
->type
);
971 spin_lock_irqsave(&q_ctx
->rdy_spinlock
, flags
);
972 list_add_tail(&b
->list
, &q_ctx
->rdy_queue
);
974 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
976 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue
);
978 void v4l2_m2m_buf_copy_metadata(const struct vb2_v4l2_buffer
*out_vb
,
979 struct vb2_v4l2_buffer
*cap_vb
,
980 bool copy_frame_flags
)
982 u32 mask
= V4L2_BUF_FLAG_TIMECODE
| V4L2_BUF_FLAG_TSTAMP_SRC_MASK
;
984 if (copy_frame_flags
)
985 mask
|= V4L2_BUF_FLAG_KEYFRAME
| V4L2_BUF_FLAG_PFRAME
|
986 V4L2_BUF_FLAG_BFRAME
;
988 cap_vb
->vb2_buf
.timestamp
= out_vb
->vb2_buf
.timestamp
;
990 if (out_vb
->flags
& V4L2_BUF_FLAG_TIMECODE
)
991 cap_vb
->timecode
= out_vb
->timecode
;
992 cap_vb
->field
= out_vb
->field
;
993 cap_vb
->flags
&= ~mask
;
994 cap_vb
->flags
|= out_vb
->flags
& mask
;
995 cap_vb
->vb2_buf
.copied_timestamp
= 1;
997 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_copy_metadata
);
999 void v4l2_m2m_request_queue(struct media_request
*req
)
1001 struct media_request_object
*obj
, *obj_safe
;
1002 struct v4l2_m2m_ctx
*m2m_ctx
= NULL
;
1005 * Queue all objects. Note that buffer objects are at the end of the
1006 * objects list, after all other object types. Once buffer objects
1007 * are queued, the driver might delete them immediately (if the driver
1008 * processes the buffer at once), so we have to use
1009 * list_for_each_entry_safe() to handle the case where the object we
1012 list_for_each_entry_safe(obj
, obj_safe
, &req
->objects
, list
) {
1013 struct v4l2_m2m_ctx
*m2m_ctx_obj
;
1014 struct vb2_buffer
*vb
;
1016 if (!obj
->ops
->queue
)
1019 if (vb2_request_object_is_buffer(obj
)) {
1021 vb
= container_of(obj
, struct vb2_buffer
, req_obj
);
1022 WARN_ON(!V4L2_TYPE_IS_OUTPUT(vb
->vb2_queue
->type
));
1023 m2m_ctx_obj
= container_of(vb
->vb2_queue
,
1024 struct v4l2_m2m_ctx
,
1026 WARN_ON(m2m_ctx
&& m2m_ctx_obj
!= m2m_ctx
);
1027 m2m_ctx
= m2m_ctx_obj
;
1031 * The buffer we queue here can in theory be immediately
1032 * unbound, hence the use of list_for_each_entry_safe()
1033 * above and why we call the queue op last.
1035 obj
->ops
->queue(obj
);
1041 v4l2_m2m_try_schedule(m2m_ctx
);
1043 EXPORT_SYMBOL_GPL(v4l2_m2m_request_queue
);
1045 /* Videobuf2 ioctl helpers */
1047 int v4l2_m2m_ioctl_reqbufs(struct file
*file
, void *priv
,
1048 struct v4l2_requestbuffers
*rb
)
1050 struct v4l2_fh
*fh
= file
->private_data
;
1052 return v4l2_m2m_reqbufs(file
, fh
->m2m_ctx
, rb
);
1054 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_reqbufs
);
1056 int v4l2_m2m_ioctl_create_bufs(struct file
*file
, void *priv
,
1057 struct v4l2_create_buffers
*create
)
1059 struct v4l2_fh
*fh
= file
->private_data
;
1061 return v4l2_m2m_create_bufs(file
, fh
->m2m_ctx
, create
);
1063 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_create_bufs
);
1065 int v4l2_m2m_ioctl_querybuf(struct file
*file
, void *priv
,
1066 struct v4l2_buffer
*buf
)
1068 struct v4l2_fh
*fh
= file
->private_data
;
1070 return v4l2_m2m_querybuf(file
, fh
->m2m_ctx
, buf
);
1072 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_querybuf
);
1074 int v4l2_m2m_ioctl_qbuf(struct file
*file
, void *priv
,
1075 struct v4l2_buffer
*buf
)
1077 struct v4l2_fh
*fh
= file
->private_data
;
1079 return v4l2_m2m_qbuf(file
, fh
->m2m_ctx
, buf
);
1081 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_qbuf
);
1083 int v4l2_m2m_ioctl_dqbuf(struct file
*file
, void *priv
,
1084 struct v4l2_buffer
*buf
)
1086 struct v4l2_fh
*fh
= file
->private_data
;
1088 return v4l2_m2m_dqbuf(file
, fh
->m2m_ctx
, buf
);
1090 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_dqbuf
);
1092 int v4l2_m2m_ioctl_prepare_buf(struct file
*file
, void *priv
,
1093 struct v4l2_buffer
*buf
)
1095 struct v4l2_fh
*fh
= file
->private_data
;
1097 return v4l2_m2m_prepare_buf(file
, fh
->m2m_ctx
, buf
);
1099 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_prepare_buf
);
1101 int v4l2_m2m_ioctl_expbuf(struct file
*file
, void *priv
,
1102 struct v4l2_exportbuffer
*eb
)
1104 struct v4l2_fh
*fh
= file
->private_data
;
1106 return v4l2_m2m_expbuf(file
, fh
->m2m_ctx
, eb
);
1108 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_expbuf
);
1110 int v4l2_m2m_ioctl_streamon(struct file
*file
, void *priv
,
1111 enum v4l2_buf_type type
)
1113 struct v4l2_fh
*fh
= file
->private_data
;
1115 return v4l2_m2m_streamon(file
, fh
->m2m_ctx
, type
);
1117 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamon
);
1119 int v4l2_m2m_ioctl_streamoff(struct file
*file
, void *priv
,
1120 enum v4l2_buf_type type
)
1122 struct v4l2_fh
*fh
= file
->private_data
;
1124 return v4l2_m2m_streamoff(file
, fh
->m2m_ctx
, type
);
1126 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamoff
);
1128 int v4l2_m2m_ioctl_try_encoder_cmd(struct file
*file
, void *fh
,
1129 struct v4l2_encoder_cmd
*ec
)
1131 if (ec
->cmd
!= V4L2_ENC_CMD_STOP
&& ec
->cmd
!= V4L2_ENC_CMD_START
)
1137 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_try_encoder_cmd
);
1139 int v4l2_m2m_ioctl_try_decoder_cmd(struct file
*file
, void *fh
,
1140 struct v4l2_decoder_cmd
*dc
)
1142 if (dc
->cmd
!= V4L2_DEC_CMD_STOP
&& dc
->cmd
!= V4L2_DEC_CMD_START
)
1147 if (dc
->cmd
== V4L2_DEC_CMD_STOP
) {
1149 } else if (dc
->cmd
== V4L2_DEC_CMD_START
) {
1150 dc
->start
.speed
= 0;
1151 dc
->start
.format
= V4L2_DEC_START_FMT_NONE
;
1155 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_try_decoder_cmd
);
1158 * v4l2_file_operations helpers. It is assumed here same lock is used
1159 * for the output and the capture buffer queue.
1162 int v4l2_m2m_fop_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1164 struct v4l2_fh
*fh
= file
->private_data
;
1166 return v4l2_m2m_mmap(file
, fh
->m2m_ctx
, vma
);
1168 EXPORT_SYMBOL_GPL(v4l2_m2m_fop_mmap
);
1170 __poll_t
v4l2_m2m_fop_poll(struct file
*file
, poll_table
*wait
)
1172 struct v4l2_fh
*fh
= file
->private_data
;
1173 struct v4l2_m2m_ctx
*m2m_ctx
= fh
->m2m_ctx
;
1176 if (m2m_ctx
->q_lock
)
1177 mutex_lock(m2m_ctx
->q_lock
);
1179 ret
= v4l2_m2m_poll(file
, m2m_ctx
, wait
);
1181 if (m2m_ctx
->q_lock
)
1182 mutex_unlock(m2m_ctx
->q_lock
);
1186 EXPORT_SYMBOL_GPL(v4l2_m2m_fop_poll
);