2 * Memory-to-memory device framework for Video for Linux 2 and videobuf.
4 * Helper functions for devices that use videobuf buffers for both their
5 * source and destination.
7 * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
8 * Pawel Osciak, <pawel@osciak.com>
9 * Marek Szyprowski, <m.szyprowski@samsung.com>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
16 #include <linux/module.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
20 #include <media/videobuf2-core.h>
21 #include <media/v4l2-mem2mem.h>
23 MODULE_DESCRIPTION("Mem to mem device framework for videobuf");
24 MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>");
25 MODULE_LICENSE("GPL");
28 module_param(debug
, bool, 0644);
30 #define dprintk(fmt, arg...) \
33 printk(KERN_DEBUG "%s: " fmt, __func__, ## arg);\
37 /* Instance is already queued on the job_queue */
38 #define TRANS_QUEUED (1 << 0)
39 /* Instance is currently running in hardware */
40 #define TRANS_RUNNING (1 << 1)
43 /* Offset base for buffers on the destination queue - used to distinguish
44 * between source and destination buffers when mmapping - they receive the same
45 * offsets but for different queues */
46 #define DST_QUEUE_OFF_BASE (1 << 30)
50 * struct v4l2_m2m_dev - per-device context
51 * @curr_ctx: currently running instance
52 * @job_queue: instances queued to run
53 * @job_spinlock: protects job_queue
54 * @m2m_ops: driver callbacks
57 struct v4l2_m2m_ctx
*curr_ctx
;
59 struct list_head job_queue
;
60 spinlock_t job_spinlock
;
62 struct v4l2_m2m_ops
*m2m_ops
;
65 static struct v4l2_m2m_queue_ctx
*get_queue_ctx(struct v4l2_m2m_ctx
*m2m_ctx
,
66 enum v4l2_buf_type type
)
68 if (V4L2_TYPE_IS_OUTPUT(type
))
69 return &m2m_ctx
->out_q_ctx
;
71 return &m2m_ctx
->cap_q_ctx
;
75 * v4l2_m2m_get_vq() - return vb2_queue for the given type
77 struct vb2_queue
*v4l2_m2m_get_vq(struct v4l2_m2m_ctx
*m2m_ctx
,
78 enum v4l2_buf_type type
)
80 struct v4l2_m2m_queue_ctx
*q_ctx
;
82 q_ctx
= get_queue_ctx(m2m_ctx
, type
);
88 EXPORT_SYMBOL(v4l2_m2m_get_vq
);
91 * v4l2_m2m_next_buf() - return next buffer from the list of ready buffers
93 void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx
*q_ctx
)
95 struct v4l2_m2m_buffer
*b
= NULL
;
98 spin_lock_irqsave(&q_ctx
->rdy_spinlock
, flags
);
100 if (list_empty(&q_ctx
->rdy_queue
)) {
101 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
105 b
= list_entry(q_ctx
->rdy_queue
.next
, struct v4l2_m2m_buffer
, list
);
106 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
109 EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf
);
112 * v4l2_m2m_buf_remove() - take off a buffer from the list of ready buffers and
115 void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx
*q_ctx
)
117 struct v4l2_m2m_buffer
*b
= NULL
;
120 spin_lock_irqsave(&q_ctx
->rdy_spinlock
, flags
);
121 if (list_empty(&q_ctx
->rdy_queue
)) {
122 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
125 b
= list_entry(q_ctx
->rdy_queue
.next
, struct v4l2_m2m_buffer
, list
);
128 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
132 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove
);
135 * Scheduling handlers
139 * v4l2_m2m_get_curr_priv() - return driver private data for the currently
140 * running instance or NULL if no instance is running
142 void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev
*m2m_dev
)
147 spin_lock_irqsave(&m2m_dev
->job_spinlock
, flags
);
148 if (m2m_dev
->curr_ctx
)
149 ret
= m2m_dev
->curr_ctx
->priv
;
150 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
154 EXPORT_SYMBOL(v4l2_m2m_get_curr_priv
);
157 * v4l2_m2m_try_run() - select next job to perform and run it if possible
159 * Get next transaction (if present) from the waiting jobs list and run it.
161 static void v4l2_m2m_try_run(struct v4l2_m2m_dev
*m2m_dev
)
165 spin_lock_irqsave(&m2m_dev
->job_spinlock
, flags
);
166 if (NULL
!= m2m_dev
->curr_ctx
) {
167 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
168 dprintk("Another instance is running, won't run now\n");
172 if (list_empty(&m2m_dev
->job_queue
)) {
173 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
174 dprintk("No job pending\n");
178 m2m_dev
->curr_ctx
= list_entry(m2m_dev
->job_queue
.next
,
179 struct v4l2_m2m_ctx
, queue
);
180 m2m_dev
->curr_ctx
->job_flags
|= TRANS_RUNNING
;
181 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
183 m2m_dev
->m2m_ops
->device_run(m2m_dev
->curr_ctx
->priv
);
187 * v4l2_m2m_try_schedule() - check whether an instance is ready to be added to
188 * the pending job queue and add it if so.
189 * @m2m_ctx: m2m context assigned to the instance to be checked
191 * There are three basic requirements an instance has to meet to be able to run:
192 * 1) at least one source buffer has to be queued,
193 * 2) at least one destination buffer has to be queued,
194 * 3) streaming has to be on.
196 * There may also be additional, custom requirements. In such case the driver
197 * should supply a custom callback (job_ready in v4l2_m2m_ops) that should
198 * return 1 if the instance is ready.
199 * An example of the above could be an instance that requires more than one
200 * src/dst buffer per transaction.
202 static void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx
*m2m_ctx
)
204 struct v4l2_m2m_dev
*m2m_dev
;
205 unsigned long flags_job
, flags
;
207 m2m_dev
= m2m_ctx
->m2m_dev
;
208 dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx
);
210 if (!m2m_ctx
->out_q_ctx
.q
.streaming
211 || !m2m_ctx
->cap_q_ctx
.q
.streaming
) {
212 dprintk("Streaming needs to be on for both queues\n");
216 spin_lock_irqsave(&m2m_dev
->job_spinlock
, flags_job
);
217 if (m2m_ctx
->job_flags
& TRANS_QUEUED
) {
218 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags_job
);
219 dprintk("On job queue already\n");
223 spin_lock_irqsave(&m2m_ctx
->out_q_ctx
.rdy_spinlock
, flags
);
224 if (list_empty(&m2m_ctx
->out_q_ctx
.rdy_queue
)) {
225 spin_unlock_irqrestore(&m2m_ctx
->out_q_ctx
.rdy_spinlock
, flags
);
226 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags_job
);
227 dprintk("No input buffers available\n");
230 if (list_empty(&m2m_ctx
->cap_q_ctx
.rdy_queue
)) {
231 spin_unlock_irqrestore(&m2m_ctx
->out_q_ctx
.rdy_spinlock
, flags
);
232 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags_job
);
233 dprintk("No output buffers available\n");
236 spin_unlock_irqrestore(&m2m_ctx
->out_q_ctx
.rdy_spinlock
, flags
);
238 if (m2m_dev
->m2m_ops
->job_ready
239 && (!m2m_dev
->m2m_ops
->job_ready(m2m_ctx
->priv
))) {
240 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags_job
);
241 dprintk("Driver not ready\n");
245 list_add_tail(&m2m_ctx
->queue
, &m2m_dev
->job_queue
);
246 m2m_ctx
->job_flags
|= TRANS_QUEUED
;
248 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags_job
);
250 v4l2_m2m_try_run(m2m_dev
);
254 * v4l2_m2m_job_finish() - inform the framework that a job has been finished
255 * and have it clean up
257 * Called by a driver to yield back the device after it has finished with it.
258 * Should be called as soon as possible after reaching a state which allows
259 * other instances to take control of the device.
261 * This function has to be called only after device_run() callback has been
262 * called on the driver. To prevent recursion, it should not be called directly
263 * from the device_run() callback though.
265 void v4l2_m2m_job_finish(struct v4l2_m2m_dev
*m2m_dev
,
266 struct v4l2_m2m_ctx
*m2m_ctx
)
270 spin_lock_irqsave(&m2m_dev
->job_spinlock
, flags
);
271 if (!m2m_dev
->curr_ctx
|| m2m_dev
->curr_ctx
!= m2m_ctx
) {
272 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
273 dprintk("Called by an instance not currently running\n");
277 list_del(&m2m_dev
->curr_ctx
->queue
);
278 m2m_dev
->curr_ctx
->job_flags
&= ~(TRANS_QUEUED
| TRANS_RUNNING
);
279 wake_up(&m2m_dev
->curr_ctx
->finished
);
280 m2m_dev
->curr_ctx
= NULL
;
282 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
284 /* This instance might have more buffers ready, but since we do not
285 * allow more than one job on the job_queue per instance, each has
286 * to be scheduled separately after the previous one finishes. */
287 v4l2_m2m_try_schedule(m2m_ctx
);
288 v4l2_m2m_try_run(m2m_dev
);
290 EXPORT_SYMBOL(v4l2_m2m_job_finish
);
293 * v4l2_m2m_reqbufs() - multi-queue-aware REQBUFS multiplexer
295 int v4l2_m2m_reqbufs(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
296 struct v4l2_requestbuffers
*reqbufs
)
298 struct vb2_queue
*vq
;
300 vq
= v4l2_m2m_get_vq(m2m_ctx
, reqbufs
->type
);
301 return vb2_reqbufs(vq
, reqbufs
);
303 EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs
);
306 * v4l2_m2m_querybuf() - multi-queue-aware QUERYBUF multiplexer
308 * See v4l2_m2m_mmap() documentation for details.
310 int v4l2_m2m_querybuf(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
311 struct v4l2_buffer
*buf
)
313 struct vb2_queue
*vq
;
317 vq
= v4l2_m2m_get_vq(m2m_ctx
, buf
->type
);
318 ret
= vb2_querybuf(vq
, buf
);
320 /* Adjust MMAP memory offsets for the CAPTURE queue */
321 if (buf
->memory
== V4L2_MEMORY_MMAP
&& !V4L2_TYPE_IS_OUTPUT(vq
->type
)) {
322 if (V4L2_TYPE_IS_MULTIPLANAR(vq
->type
)) {
323 for (i
= 0; i
< buf
->length
; ++i
)
324 buf
->m
.planes
[i
].m
.mem_offset
325 += DST_QUEUE_OFF_BASE
;
327 buf
->m
.offset
+= DST_QUEUE_OFF_BASE
;
333 EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf
);
336 * v4l2_m2m_qbuf() - enqueue a source or destination buffer, depending on
339 int v4l2_m2m_qbuf(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
340 struct v4l2_buffer
*buf
)
342 struct vb2_queue
*vq
;
345 vq
= v4l2_m2m_get_vq(m2m_ctx
, buf
->type
);
346 ret
= vb2_qbuf(vq
, buf
);
348 v4l2_m2m_try_schedule(m2m_ctx
);
352 EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf
);
355 * v4l2_m2m_dqbuf() - dequeue a source or destination buffer, depending on
358 int v4l2_m2m_dqbuf(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
359 struct v4l2_buffer
*buf
)
361 struct vb2_queue
*vq
;
363 vq
= v4l2_m2m_get_vq(m2m_ctx
, buf
->type
);
364 return vb2_dqbuf(vq
, buf
, file
->f_flags
& O_NONBLOCK
);
366 EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf
);
369 * v4l2_m2m_streamon() - turn on streaming for a video queue
371 int v4l2_m2m_streamon(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
372 enum v4l2_buf_type type
)
374 struct vb2_queue
*vq
;
377 vq
= v4l2_m2m_get_vq(m2m_ctx
, type
);
378 ret
= vb2_streamon(vq
, type
);
380 v4l2_m2m_try_schedule(m2m_ctx
);
384 EXPORT_SYMBOL_GPL(v4l2_m2m_streamon
);
387 * v4l2_m2m_streamoff() - turn off streaming for a video queue
389 int v4l2_m2m_streamoff(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
390 enum v4l2_buf_type type
)
392 struct vb2_queue
*vq
;
394 vq
= v4l2_m2m_get_vq(m2m_ctx
, type
);
395 return vb2_streamoff(vq
, type
);
397 EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff
);
400 * v4l2_m2m_poll() - poll replacement, for destination buffers only
402 * Call from the driver's poll() function. Will poll both queues. If a buffer
403 * is available to dequeue (with dqbuf) from the source queue, this will
404 * indicate that a non-blocking write can be performed, while read will be
405 * returned in case of the destination queue.
407 unsigned int v4l2_m2m_poll(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
408 struct poll_table_struct
*wait
)
410 struct vb2_queue
*src_q
, *dst_q
;
411 struct vb2_buffer
*src_vb
= NULL
, *dst_vb
= NULL
;
415 src_q
= v4l2_m2m_get_src_vq(m2m_ctx
);
416 dst_q
= v4l2_m2m_get_dst_vq(m2m_ctx
);
419 * There has to be at least one buffer queued on each queued_list, which
420 * means either in driver already or waiting for driver to claim it
421 * and start processing.
423 if ((!src_q
->streaming
|| list_empty(&src_q
->queued_list
))
424 && (!dst_q
->streaming
|| list_empty(&dst_q
->queued_list
))) {
429 if (m2m_ctx
->m2m_dev
->m2m_ops
->unlock
)
430 m2m_ctx
->m2m_dev
->m2m_ops
->unlock(m2m_ctx
->priv
);
432 poll_wait(file
, &src_q
->done_wq
, wait
);
433 poll_wait(file
, &dst_q
->done_wq
, wait
);
435 if (m2m_ctx
->m2m_dev
->m2m_ops
->lock
)
436 m2m_ctx
->m2m_dev
->m2m_ops
->lock(m2m_ctx
->priv
);
438 spin_lock_irqsave(&src_q
->done_lock
, flags
);
439 if (!list_empty(&src_q
->done_list
))
440 src_vb
= list_first_entry(&src_q
->done_list
, struct vb2_buffer
,
442 if (src_vb
&& (src_vb
->state
== VB2_BUF_STATE_DONE
443 || src_vb
->state
== VB2_BUF_STATE_ERROR
))
444 rc
|= POLLOUT
| POLLWRNORM
;
445 spin_unlock_irqrestore(&src_q
->done_lock
, flags
);
447 spin_lock_irqsave(&dst_q
->done_lock
, flags
);
448 if (!list_empty(&dst_q
->done_list
))
449 dst_vb
= list_first_entry(&dst_q
->done_list
, struct vb2_buffer
,
451 if (dst_vb
&& (dst_vb
->state
== VB2_BUF_STATE_DONE
452 || dst_vb
->state
== VB2_BUF_STATE_ERROR
))
453 rc
|= POLLIN
| POLLRDNORM
;
454 spin_unlock_irqrestore(&dst_q
->done_lock
, flags
);
459 EXPORT_SYMBOL_GPL(v4l2_m2m_poll
);
462 * v4l2_m2m_mmap() - source and destination queues-aware mmap multiplexer
464 * Call from driver's mmap() function. Will handle mmap() for both queues
465 * seamlessly for videobuffer, which will receive normal per-queue offsets and
466 * proper videobuf queue pointers. The differentiation is made outside videobuf
467 * by adding a predefined offset to buffers from one of the queues and
468 * subtracting it before passing it back to videobuf. Only drivers (and
469 * thus applications) receive modified offsets.
471 int v4l2_m2m_mmap(struct file
*file
, struct v4l2_m2m_ctx
*m2m_ctx
,
472 struct vm_area_struct
*vma
)
474 unsigned long offset
= vma
->vm_pgoff
<< PAGE_SHIFT
;
475 struct vb2_queue
*vq
;
477 if (offset
< DST_QUEUE_OFF_BASE
) {
478 vq
= v4l2_m2m_get_src_vq(m2m_ctx
);
480 vq
= v4l2_m2m_get_dst_vq(m2m_ctx
);
481 vma
->vm_pgoff
-= (DST_QUEUE_OFF_BASE
>> PAGE_SHIFT
);
484 return vb2_mmap(vq
, vma
);
486 EXPORT_SYMBOL(v4l2_m2m_mmap
);
489 * v4l2_m2m_init() - initialize per-driver m2m data
491 * Usually called from driver's probe() function.
493 struct v4l2_m2m_dev
*v4l2_m2m_init(struct v4l2_m2m_ops
*m2m_ops
)
495 struct v4l2_m2m_dev
*m2m_dev
;
498 return ERR_PTR(-EINVAL
);
500 BUG_ON(!m2m_ops
->device_run
);
501 BUG_ON(!m2m_ops
->job_abort
);
503 m2m_dev
= kzalloc(sizeof *m2m_dev
, GFP_KERNEL
);
505 return ERR_PTR(-ENOMEM
);
507 m2m_dev
->curr_ctx
= NULL
;
508 m2m_dev
->m2m_ops
= m2m_ops
;
509 INIT_LIST_HEAD(&m2m_dev
->job_queue
);
510 spin_lock_init(&m2m_dev
->job_spinlock
);
514 EXPORT_SYMBOL_GPL(v4l2_m2m_init
);
517 * v4l2_m2m_release() - cleans up and frees a m2m_dev structure
519 * Usually called from driver's remove() function.
521 void v4l2_m2m_release(struct v4l2_m2m_dev
*m2m_dev
)
525 EXPORT_SYMBOL_GPL(v4l2_m2m_release
);
528 * v4l2_m2m_ctx_init() - allocate and initialize a m2m context
529 * @priv - driver's instance private data
530 * @m2m_dev - a previously initialized m2m_dev struct
531 * @vq_init - a callback for queue type-specific initialization function to be
532 * used for initializing videobuf_queues
534 * Usually called from driver's open() function.
536 struct v4l2_m2m_ctx
*v4l2_m2m_ctx_init(struct v4l2_m2m_dev
*m2m_dev
,
538 int (*queue_init
)(void *priv
, struct vb2_queue
*src_vq
, struct vb2_queue
*dst_vq
))
540 struct v4l2_m2m_ctx
*m2m_ctx
;
541 struct v4l2_m2m_queue_ctx
*out_q_ctx
, *cap_q_ctx
;
544 m2m_ctx
= kzalloc(sizeof *m2m_ctx
, GFP_KERNEL
);
546 return ERR_PTR(-ENOMEM
);
548 m2m_ctx
->priv
= drv_priv
;
549 m2m_ctx
->m2m_dev
= m2m_dev
;
550 init_waitqueue_head(&m2m_ctx
->finished
);
552 out_q_ctx
= &m2m_ctx
->out_q_ctx
;
553 cap_q_ctx
= &m2m_ctx
->cap_q_ctx
;
555 INIT_LIST_HEAD(&out_q_ctx
->rdy_queue
);
556 INIT_LIST_HEAD(&cap_q_ctx
->rdy_queue
);
557 spin_lock_init(&out_q_ctx
->rdy_spinlock
);
558 spin_lock_init(&cap_q_ctx
->rdy_spinlock
);
560 INIT_LIST_HEAD(&m2m_ctx
->queue
);
562 ret
= queue_init(drv_priv
, &out_q_ctx
->q
, &cap_q_ctx
->q
);
572 EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init
);
575 * v4l2_m2m_ctx_release() - release m2m context
577 * Usually called from driver's release() function.
579 void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx
*m2m_ctx
)
581 struct v4l2_m2m_dev
*m2m_dev
;
584 m2m_dev
= m2m_ctx
->m2m_dev
;
586 spin_lock_irqsave(&m2m_dev
->job_spinlock
, flags
);
587 if (m2m_ctx
->job_flags
& TRANS_RUNNING
) {
588 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
589 m2m_dev
->m2m_ops
->job_abort(m2m_ctx
->priv
);
590 dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx
);
591 wait_event(m2m_ctx
->finished
, !(m2m_ctx
->job_flags
& TRANS_RUNNING
));
592 } else if (m2m_ctx
->job_flags
& TRANS_QUEUED
) {
593 list_del(&m2m_ctx
->queue
);
594 m2m_ctx
->job_flags
&= ~(TRANS_QUEUED
| TRANS_RUNNING
);
595 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
596 dprintk("m2m_ctx: %p had been on queue and was removed\n",
599 /* Do nothing, was not on queue/running */
600 spin_unlock_irqrestore(&m2m_dev
->job_spinlock
, flags
);
603 vb2_queue_release(&m2m_ctx
->cap_q_ctx
.q
);
604 vb2_queue_release(&m2m_ctx
->out_q_ctx
.q
);
608 EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release
);
611 * v4l2_m2m_buf_queue() - add a buffer to the proper ready buffers list.
613 * Call from buf_queue(), videobuf_queue_ops callback.
615 void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx
*m2m_ctx
, struct vb2_buffer
*vb
)
617 struct v4l2_m2m_buffer
*b
= container_of(vb
, struct v4l2_m2m_buffer
, vb
);
618 struct v4l2_m2m_queue_ctx
*q_ctx
;
621 q_ctx
= get_queue_ctx(m2m_ctx
, vb
->vb2_queue
->type
);
625 spin_lock_irqsave(&q_ctx
->rdy_spinlock
, flags
);
626 list_add_tail(&b
->list
, &q_ctx
->rdy_queue
);
628 spin_unlock_irqrestore(&q_ctx
->rdy_spinlock
, flags
);
630 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue
);