2 * generic helper functions for handling video4linux capture buffers
4 * (c) 2007 Mauro Carvalho Chehab, <mchehab@kernel.org>
6 * Highly based on video-buf written originally by:
7 * (c) 2001,02 Gerd Knorr <kraxel@bytesex.org>
8 * (c) 2006 Mauro Carvalho Chehab, <mchehab@kernel.org>
9 * (c) 2006 Ted Walther and John Sokol
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2
16 #include <linux/init.h>
17 #include <linux/module.h>
18 #include <linux/moduleparam.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/interrupt.h>
24 #include <media/videobuf-core.h>
26 #define MAGIC_BUFFER 0x20070728
27 #define MAGIC_CHECK(is, should) \
29 if (unlikely((is) != (should))) { \
31 "magic mismatch: %x (expected %x)\n", \
38 module_param(debug
, int, 0644);
40 MODULE_DESCRIPTION("helper module to manage video4linux buffers");
41 MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@kernel.org>");
42 MODULE_LICENSE("GPL");
44 #define dprintk(level, fmt, arg...) \
47 printk(KERN_DEBUG "vbuf: " fmt, ## arg); \
50 /* --------------------------------------------------------------------- */
52 #define CALL(q, f, arg...) \
53 ((q->int_ops->f) ? q->int_ops->f(arg) : 0)
54 #define CALLPTR(q, f, arg...) \
55 ((q->int_ops->f) ? q->int_ops->f(arg) : NULL)
57 struct videobuf_buffer
*videobuf_alloc_vb(struct videobuf_queue
*q
)
59 struct videobuf_buffer
*vb
;
61 BUG_ON(q
->msize
< sizeof(*vb
));
63 if (!q
->int_ops
|| !q
->int_ops
->alloc_vb
) {
64 printk(KERN_ERR
"No specific ops defined!\n");
68 vb
= q
->int_ops
->alloc_vb(q
->msize
);
70 init_waitqueue_head(&vb
->done
);
71 vb
->magic
= MAGIC_BUFFER
;
76 EXPORT_SYMBOL_GPL(videobuf_alloc_vb
);
78 static int state_neither_active_nor_queued(struct videobuf_queue
*q
,
79 struct videobuf_buffer
*vb
)
84 spin_lock_irqsave(q
->irqlock
, flags
);
85 rc
= vb
->state
!= VIDEOBUF_ACTIVE
&& vb
->state
!= VIDEOBUF_QUEUED
;
86 spin_unlock_irqrestore(q
->irqlock
, flags
);
90 int videobuf_waiton(struct videobuf_queue
*q
, struct videobuf_buffer
*vb
,
91 int non_blocking
, int intr
)
96 MAGIC_CHECK(vb
->magic
, MAGIC_BUFFER
);
99 if (state_neither_active_nor_queued(q
, vb
))
104 is_ext_locked
= q
->ext_lock
&& mutex_is_locked(q
->ext_lock
);
106 /* Release vdev lock to prevent this wait from blocking outside access to
109 mutex_unlock(q
->ext_lock
);
111 ret
= wait_event_interruptible(vb
->done
,
112 state_neither_active_nor_queued(q
, vb
));
114 wait_event(vb
->done
, state_neither_active_nor_queued(q
, vb
));
117 mutex_lock(q
->ext_lock
);
121 EXPORT_SYMBOL_GPL(videobuf_waiton
);
123 int videobuf_iolock(struct videobuf_queue
*q
, struct videobuf_buffer
*vb
,
124 struct v4l2_framebuffer
*fbuf
)
126 MAGIC_CHECK(vb
->magic
, MAGIC_BUFFER
);
127 MAGIC_CHECK(q
->int_ops
->magic
, MAGIC_QTYPE_OPS
);
129 return CALL(q
, iolock
, q
, vb
, fbuf
);
131 EXPORT_SYMBOL_GPL(videobuf_iolock
);
133 void *videobuf_queue_to_vaddr(struct videobuf_queue
*q
,
134 struct videobuf_buffer
*buf
)
136 if (q
->int_ops
->vaddr
)
137 return q
->int_ops
->vaddr(buf
);
140 EXPORT_SYMBOL_GPL(videobuf_queue_to_vaddr
);
142 /* --------------------------------------------------------------------- */
145 void videobuf_queue_core_init(struct videobuf_queue
*q
,
146 const struct videobuf_queue_ops
*ops
,
149 enum v4l2_buf_type type
,
150 enum v4l2_field field
,
153 struct videobuf_qtype_ops
*int_ops
,
154 struct mutex
*ext_lock
)
157 memset(q
, 0, sizeof(*q
));
158 q
->irqlock
= irqlock
;
159 q
->ext_lock
= ext_lock
;
166 q
->int_ops
= int_ops
;
168 /* All buffer operations are mandatory */
169 BUG_ON(!q
->ops
->buf_setup
);
170 BUG_ON(!q
->ops
->buf_prepare
);
171 BUG_ON(!q
->ops
->buf_queue
);
172 BUG_ON(!q
->ops
->buf_release
);
174 /* Lock is mandatory for queue_cancel to work */
177 /* Having implementations for abstract methods are mandatory */
180 mutex_init(&q
->vb_lock
);
181 init_waitqueue_head(&q
->wait
);
182 INIT_LIST_HEAD(&q
->stream
);
184 EXPORT_SYMBOL_GPL(videobuf_queue_core_init
);
186 /* Locking: Only usage in bttv unsafe find way to remove */
187 int videobuf_queue_is_busy(struct videobuf_queue
*q
)
191 MAGIC_CHECK(q
->int_ops
->magic
, MAGIC_QTYPE_OPS
);
194 dprintk(1, "busy: streaming active\n");
198 dprintk(1, "busy: pending read #1\n");
202 dprintk(1, "busy: pending read #2\n");
205 for (i
= 0; i
< VIDEO_MAX_FRAME
; i
++) {
206 if (NULL
== q
->bufs
[i
])
208 if (q
->bufs
[i
]->map
) {
209 dprintk(1, "busy: buffer #%d mapped\n", i
);
212 if (q
->bufs
[i
]->state
== VIDEOBUF_QUEUED
) {
213 dprintk(1, "busy: buffer #%d queued\n", i
);
216 if (q
->bufs
[i
]->state
== VIDEOBUF_ACTIVE
) {
217 dprintk(1, "busy: buffer #%d avtive\n", i
);
223 EXPORT_SYMBOL_GPL(videobuf_queue_is_busy
);
226 * __videobuf_free() - free all the buffers and their control structures
228 * This function can only be called if streaming/reading is off, i.e. no buffers
229 * are under control of the driver.
231 /* Locking: Caller holds q->vb_lock */
232 static int __videobuf_free(struct videobuf_queue
*q
)
236 dprintk(1, "%s\n", __func__
);
240 if (q
->streaming
|| q
->reading
) {
241 dprintk(1, "Cannot free buffers when streaming or reading\n");
245 MAGIC_CHECK(q
->int_ops
->magic
, MAGIC_QTYPE_OPS
);
247 for (i
= 0; i
< VIDEO_MAX_FRAME
; i
++)
248 if (q
->bufs
[i
] && q
->bufs
[i
]->map
) {
249 dprintk(1, "Cannot free mmapped buffers\n");
253 for (i
= 0; i
< VIDEO_MAX_FRAME
; i
++) {
254 if (NULL
== q
->bufs
[i
])
256 q
->ops
->buf_release(q
, q
->bufs
[i
]);
264 /* Locking: Caller holds q->vb_lock */
265 void videobuf_queue_cancel(struct videobuf_queue
*q
)
267 unsigned long flags
= 0;
272 wake_up_interruptible_sync(&q
->wait
);
274 /* remove queued buffers from list */
275 spin_lock_irqsave(q
->irqlock
, flags
);
276 for (i
= 0; i
< VIDEO_MAX_FRAME
; i
++) {
277 if (NULL
== q
->bufs
[i
])
279 if (q
->bufs
[i
]->state
== VIDEOBUF_QUEUED
) {
280 list_del(&q
->bufs
[i
]->queue
);
281 q
->bufs
[i
]->state
= VIDEOBUF_ERROR
;
282 wake_up_all(&q
->bufs
[i
]->done
);
285 spin_unlock_irqrestore(q
->irqlock
, flags
);
287 /* free all buffers + clear queue */
288 for (i
= 0; i
< VIDEO_MAX_FRAME
; i
++) {
289 if (NULL
== q
->bufs
[i
])
291 q
->ops
->buf_release(q
, q
->bufs
[i
]);
293 INIT_LIST_HEAD(&q
->stream
);
295 EXPORT_SYMBOL_GPL(videobuf_queue_cancel
);
297 /* --------------------------------------------------------------------- */
299 /* Locking: Caller holds q->vb_lock */
300 enum v4l2_field
videobuf_next_field(struct videobuf_queue
*q
)
302 enum v4l2_field field
= q
->field
;
304 BUG_ON(V4L2_FIELD_ANY
== field
);
306 if (V4L2_FIELD_ALTERNATE
== field
) {
307 if (V4L2_FIELD_TOP
== q
->last
) {
308 field
= V4L2_FIELD_BOTTOM
;
309 q
->last
= V4L2_FIELD_BOTTOM
;
311 field
= V4L2_FIELD_TOP
;
312 q
->last
= V4L2_FIELD_TOP
;
317 EXPORT_SYMBOL_GPL(videobuf_next_field
);
319 /* Locking: Caller holds q->vb_lock */
320 static void videobuf_status(struct videobuf_queue
*q
, struct v4l2_buffer
*b
,
321 struct videobuf_buffer
*vb
, enum v4l2_buf_type type
)
323 MAGIC_CHECK(vb
->magic
, MAGIC_BUFFER
);
324 MAGIC_CHECK(q
->int_ops
->magic
, MAGIC_QTYPE_OPS
);
329 b
->memory
= vb
->memory
;
331 case V4L2_MEMORY_MMAP
:
332 b
->m
.offset
= vb
->boff
;
333 b
->length
= vb
->bsize
;
335 case V4L2_MEMORY_USERPTR
:
336 b
->m
.userptr
= vb
->baddr
;
337 b
->length
= vb
->bsize
;
339 case V4L2_MEMORY_OVERLAY
:
340 b
->m
.offset
= vb
->boff
;
342 case V4L2_MEMORY_DMABUF
:
343 /* DMABUF is not handled in videobuf framework */
347 b
->flags
= V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC
;
349 b
->flags
|= V4L2_BUF_FLAG_MAPPED
;
352 case VIDEOBUF_PREPARED
:
353 case VIDEOBUF_QUEUED
:
354 case VIDEOBUF_ACTIVE
:
355 b
->flags
|= V4L2_BUF_FLAG_QUEUED
;
358 b
->flags
|= V4L2_BUF_FLAG_ERROR
;
361 b
->flags
|= V4L2_BUF_FLAG_DONE
;
363 case VIDEOBUF_NEEDS_INIT
:
369 b
->field
= vb
->field
;
370 b
->timestamp
= vb
->ts
;
371 b
->bytesused
= vb
->size
;
372 b
->sequence
= vb
->field_count
>> 1;
375 int videobuf_mmap_free(struct videobuf_queue
*q
)
378 videobuf_queue_lock(q
);
379 ret
= __videobuf_free(q
);
380 videobuf_queue_unlock(q
);
383 EXPORT_SYMBOL_GPL(videobuf_mmap_free
);
385 /* Locking: Caller holds q->vb_lock */
386 int __videobuf_mmap_setup(struct videobuf_queue
*q
,
387 unsigned int bcount
, unsigned int bsize
,
388 enum v4l2_memory memory
)
393 MAGIC_CHECK(q
->int_ops
->magic
, MAGIC_QTYPE_OPS
);
395 err
= __videobuf_free(q
);
399 /* Allocate and initialize buffers */
400 for (i
= 0; i
< bcount
; i
++) {
401 q
->bufs
[i
] = videobuf_alloc_vb(q
);
403 if (NULL
== q
->bufs
[i
])
407 q
->bufs
[i
]->memory
= memory
;
408 q
->bufs
[i
]->bsize
= bsize
;
410 case V4L2_MEMORY_MMAP
:
411 q
->bufs
[i
]->boff
= PAGE_ALIGN(bsize
) * i
;
413 case V4L2_MEMORY_USERPTR
:
414 case V4L2_MEMORY_OVERLAY
:
415 case V4L2_MEMORY_DMABUF
:
424 dprintk(1, "mmap setup: %d buffers, %d bytes each\n", i
, bsize
);
428 EXPORT_SYMBOL_GPL(__videobuf_mmap_setup
);
430 int videobuf_mmap_setup(struct videobuf_queue
*q
,
431 unsigned int bcount
, unsigned int bsize
,
432 enum v4l2_memory memory
)
435 videobuf_queue_lock(q
);
436 ret
= __videobuf_mmap_setup(q
, bcount
, bsize
, memory
);
437 videobuf_queue_unlock(q
);
440 EXPORT_SYMBOL_GPL(videobuf_mmap_setup
);
442 int videobuf_reqbufs(struct videobuf_queue
*q
,
443 struct v4l2_requestbuffers
*req
)
445 unsigned int size
, count
;
448 if (req
->memory
!= V4L2_MEMORY_MMAP
&&
449 req
->memory
!= V4L2_MEMORY_USERPTR
&&
450 req
->memory
!= V4L2_MEMORY_OVERLAY
) {
451 dprintk(1, "reqbufs: memory type invalid\n");
455 videobuf_queue_lock(q
);
456 if (req
->type
!= q
->type
) {
457 dprintk(1, "reqbufs: queue type invalid\n");
463 dprintk(1, "reqbufs: streaming already exists\n");
467 if (!list_empty(&q
->stream
)) {
468 dprintk(1, "reqbufs: stream running\n");
473 if (req
->count
== 0) {
474 dprintk(1, "reqbufs: count invalid (%d)\n", req
->count
);
475 retval
= __videobuf_free(q
);
480 if (count
> VIDEO_MAX_FRAME
)
481 count
= VIDEO_MAX_FRAME
;
483 q
->ops
->buf_setup(q
, &count
, &size
);
484 dprintk(1, "reqbufs: bufs=%d, size=0x%x [%u pages total]\n",
486 (unsigned int)((count
* PAGE_ALIGN(size
)) >> PAGE_SHIFT
));
488 retval
= __videobuf_mmap_setup(q
, count
, size
, req
->memory
);
490 dprintk(1, "reqbufs: mmap setup returned %d\n", retval
);
498 videobuf_queue_unlock(q
);
501 EXPORT_SYMBOL_GPL(videobuf_reqbufs
);
503 int videobuf_querybuf(struct videobuf_queue
*q
, struct v4l2_buffer
*b
)
507 videobuf_queue_lock(q
);
508 if (unlikely(b
->type
!= q
->type
)) {
509 dprintk(1, "querybuf: Wrong type.\n");
512 if (unlikely(b
->index
>= VIDEO_MAX_FRAME
)) {
513 dprintk(1, "querybuf: index out of range.\n");
516 if (unlikely(NULL
== q
->bufs
[b
->index
])) {
517 dprintk(1, "querybuf: buffer is null.\n");
521 videobuf_status(q
, b
, q
->bufs
[b
->index
], q
->type
);
525 videobuf_queue_unlock(q
);
528 EXPORT_SYMBOL_GPL(videobuf_querybuf
);
530 int videobuf_qbuf(struct videobuf_queue
*q
, struct v4l2_buffer
*b
)
532 struct videobuf_buffer
*buf
;
533 enum v4l2_field field
;
534 unsigned long flags
= 0;
537 MAGIC_CHECK(q
->int_ops
->magic
, MAGIC_QTYPE_OPS
);
539 if (b
->memory
== V4L2_MEMORY_MMAP
)
540 down_read(¤t
->mm
->mmap_sem
);
542 videobuf_queue_lock(q
);
545 dprintk(1, "qbuf: Reading running...\n");
549 if (b
->type
!= q
->type
) {
550 dprintk(1, "qbuf: Wrong type.\n");
553 if (b
->index
>= VIDEO_MAX_FRAME
) {
554 dprintk(1, "qbuf: index out of range.\n");
557 buf
= q
->bufs
[b
->index
];
559 dprintk(1, "qbuf: buffer is null.\n");
562 MAGIC_CHECK(buf
->magic
, MAGIC_BUFFER
);
563 if (buf
->memory
!= b
->memory
) {
564 dprintk(1, "qbuf: memory type is wrong.\n");
567 if (buf
->state
!= VIDEOBUF_NEEDS_INIT
&& buf
->state
!= VIDEOBUF_IDLE
) {
568 dprintk(1, "qbuf: buffer is already queued or active.\n");
573 case V4L2_MEMORY_MMAP
:
574 if (0 == buf
->baddr
) {
575 dprintk(1, "qbuf: mmap requested but buffer addr is zero!\n");
578 if (q
->type
== V4L2_BUF_TYPE_VIDEO_OUTPUT
579 || q
->type
== V4L2_BUF_TYPE_VBI_OUTPUT
580 || q
->type
== V4L2_BUF_TYPE_SLICED_VBI_OUTPUT
581 || q
->type
== V4L2_BUF_TYPE_SDR_OUTPUT
) {
582 buf
->size
= b
->bytesused
;
583 buf
->field
= b
->field
;
584 buf
->ts
= b
->timestamp
;
587 case V4L2_MEMORY_USERPTR
:
588 if (b
->length
< buf
->bsize
) {
589 dprintk(1, "qbuf: buffer length is not enough\n");
592 if (VIDEOBUF_NEEDS_INIT
!= buf
->state
&&
593 buf
->baddr
!= b
->m
.userptr
)
594 q
->ops
->buf_release(q
, buf
);
595 buf
->baddr
= b
->m
.userptr
;
597 case V4L2_MEMORY_OVERLAY
:
598 buf
->boff
= b
->m
.offset
;
601 dprintk(1, "qbuf: wrong memory type\n");
605 dprintk(1, "qbuf: requesting next field\n");
606 field
= videobuf_next_field(q
);
607 retval
= q
->ops
->buf_prepare(q
, buf
, field
);
609 dprintk(1, "qbuf: buffer_prepare returned %d\n", retval
);
613 list_add_tail(&buf
->stream
, &q
->stream
);
615 spin_lock_irqsave(q
->irqlock
, flags
);
616 q
->ops
->buf_queue(q
, buf
);
617 spin_unlock_irqrestore(q
->irqlock
, flags
);
619 dprintk(1, "qbuf: succeeded\n");
621 wake_up_interruptible_sync(&q
->wait
);
624 videobuf_queue_unlock(q
);
626 if (b
->memory
== V4L2_MEMORY_MMAP
)
627 up_read(¤t
->mm
->mmap_sem
);
631 EXPORT_SYMBOL_GPL(videobuf_qbuf
);
633 /* Locking: Caller holds q->vb_lock */
634 static int stream_next_buffer_check_queue(struct videobuf_queue
*q
, int noblock
)
640 dprintk(1, "next_buffer: Not streaming\n");
645 if (list_empty(&q
->stream
)) {
648 dprintk(2, "next_buffer: no buffers to dequeue\n");
651 dprintk(2, "next_buffer: waiting on buffer\n");
653 /* Drop lock to avoid deadlock with qbuf */
654 videobuf_queue_unlock(q
);
656 /* Checking list_empty and streaming is safe without
657 * locks because we goto checks to validate while
658 * holding locks before proceeding */
659 retval
= wait_event_interruptible(q
->wait
,
660 !list_empty(&q
->stream
) || !q
->streaming
);
661 videobuf_queue_lock(q
);
676 /* Locking: Caller holds q->vb_lock */
677 static int stream_next_buffer(struct videobuf_queue
*q
,
678 struct videobuf_buffer
**vb
, int nonblocking
)
681 struct videobuf_buffer
*buf
= NULL
;
683 retval
= stream_next_buffer_check_queue(q
, nonblocking
);
687 buf
= list_entry(q
->stream
.next
, struct videobuf_buffer
, stream
);
688 retval
= videobuf_waiton(q
, buf
, nonblocking
, 1);
697 int videobuf_dqbuf(struct videobuf_queue
*q
,
698 struct v4l2_buffer
*b
, int nonblocking
)
700 struct videobuf_buffer
*buf
= NULL
;
703 MAGIC_CHECK(q
->int_ops
->magic
, MAGIC_QTYPE_OPS
);
705 memset(b
, 0, sizeof(*b
));
706 videobuf_queue_lock(q
);
708 retval
= stream_next_buffer(q
, &buf
, nonblocking
);
710 dprintk(1, "dqbuf: next_buffer error: %i\n", retval
);
714 switch (buf
->state
) {
716 dprintk(1, "dqbuf: state is error\n");
719 dprintk(1, "dqbuf: state is done\n");
722 dprintk(1, "dqbuf: state invalid\n");
726 CALL(q
, sync
, q
, buf
);
727 videobuf_status(q
, b
, buf
, q
->type
);
728 list_del(&buf
->stream
);
729 buf
->state
= VIDEOBUF_IDLE
;
730 b
->flags
&= ~V4L2_BUF_FLAG_DONE
;
732 videobuf_queue_unlock(q
);
735 EXPORT_SYMBOL_GPL(videobuf_dqbuf
);
737 int videobuf_streamon(struct videobuf_queue
*q
)
739 struct videobuf_buffer
*buf
;
740 unsigned long flags
= 0;
743 videobuf_queue_lock(q
);
751 spin_lock_irqsave(q
->irqlock
, flags
);
752 list_for_each_entry(buf
, &q
->stream
, stream
)
753 if (buf
->state
== VIDEOBUF_PREPARED
)
754 q
->ops
->buf_queue(q
, buf
);
755 spin_unlock_irqrestore(q
->irqlock
, flags
);
757 wake_up_interruptible_sync(&q
->wait
);
759 videobuf_queue_unlock(q
);
762 EXPORT_SYMBOL_GPL(videobuf_streamon
);
764 /* Locking: Caller holds q->vb_lock */
765 static int __videobuf_streamoff(struct videobuf_queue
*q
)
770 videobuf_queue_cancel(q
);
775 int videobuf_streamoff(struct videobuf_queue
*q
)
779 videobuf_queue_lock(q
);
780 retval
= __videobuf_streamoff(q
);
781 videobuf_queue_unlock(q
);
785 EXPORT_SYMBOL_GPL(videobuf_streamoff
);
787 /* Locking: Caller holds q->vb_lock */
788 static ssize_t
videobuf_read_zerocopy(struct videobuf_queue
*q
,
790 size_t count
, loff_t
*ppos
)
792 enum v4l2_field field
;
793 unsigned long flags
= 0;
796 MAGIC_CHECK(q
->int_ops
->magic
, MAGIC_QTYPE_OPS
);
799 q
->read_buf
= videobuf_alloc_vb(q
);
800 if (NULL
== q
->read_buf
)
803 q
->read_buf
->memory
= V4L2_MEMORY_USERPTR
;
804 q
->read_buf
->baddr
= (unsigned long)data
;
805 q
->read_buf
->bsize
= count
;
807 field
= videobuf_next_field(q
);
808 retval
= q
->ops
->buf_prepare(q
, q
->read_buf
, field
);
812 /* start capture & wait */
813 spin_lock_irqsave(q
->irqlock
, flags
);
814 q
->ops
->buf_queue(q
, q
->read_buf
);
815 spin_unlock_irqrestore(q
->irqlock
, flags
);
816 retval
= videobuf_waiton(q
, q
->read_buf
, 0, 0);
818 CALL(q
, sync
, q
, q
->read_buf
);
819 if (VIDEOBUF_ERROR
== q
->read_buf
->state
)
822 retval
= q
->read_buf
->size
;
827 q
->ops
->buf_release(q
, q
->read_buf
);
833 static int __videobuf_copy_to_user(struct videobuf_queue
*q
,
834 struct videobuf_buffer
*buf
,
835 char __user
*data
, size_t count
,
838 void *vaddr
= CALLPTR(q
, vaddr
, buf
);
840 /* copy to userspace */
841 if (count
> buf
->size
- q
->read_off
)
842 count
= buf
->size
- q
->read_off
;
844 if (copy_to_user(data
, vaddr
+ q
->read_off
, count
))
850 static int __videobuf_copy_stream(struct videobuf_queue
*q
,
851 struct videobuf_buffer
*buf
,
852 char __user
*data
, size_t count
, size_t pos
,
853 int vbihack
, int nonblocking
)
855 unsigned int *fc
= CALLPTR(q
, vaddr
, buf
);
858 /* dirty, undocumented hack -- pass the frame counter
859 * within the last four bytes of each vbi data block.
860 * We need that one to maintain backward compatibility
861 * to all vbi decoding software out there ... */
862 fc
+= (buf
->size
>> 2) - 1;
863 *fc
= buf
->field_count
>> 1;
864 dprintk(1, "vbihack: %d\n", *fc
);
867 /* copy stuff using the common method */
868 count
= __videobuf_copy_to_user(q
, buf
, data
, count
, nonblocking
);
870 if ((count
== -EFAULT
) && (pos
== 0))
876 ssize_t
videobuf_read_one(struct videobuf_queue
*q
,
877 char __user
*data
, size_t count
, loff_t
*ppos
,
880 enum v4l2_field field
;
881 unsigned long flags
= 0;
882 unsigned size
= 0, nbufs
= 1;
885 MAGIC_CHECK(q
->int_ops
->magic
, MAGIC_QTYPE_OPS
);
887 videobuf_queue_lock(q
);
889 q
->ops
->buf_setup(q
, &nbufs
, &size
);
891 if (NULL
== q
->read_buf
&&
894 retval
= videobuf_read_zerocopy(q
, data
, count
, ppos
);
895 if (retval
>= 0 || retval
== -EIO
)
898 /* fallback to kernel bounce buffer on failures */
901 if (NULL
== q
->read_buf
) {
902 /* need to capture a new frame */
904 q
->read_buf
= videobuf_alloc_vb(q
);
906 dprintk(1, "video alloc=0x%p\n", q
->read_buf
);
907 if (NULL
== q
->read_buf
)
909 q
->read_buf
->memory
= V4L2_MEMORY_USERPTR
;
910 q
->read_buf
->bsize
= count
; /* preferred size */
911 field
= videobuf_next_field(q
);
912 retval
= q
->ops
->buf_prepare(q
, q
->read_buf
, field
);
920 spin_lock_irqsave(q
->irqlock
, flags
);
921 q
->ops
->buf_queue(q
, q
->read_buf
);
922 spin_unlock_irqrestore(q
->irqlock
, flags
);
927 /* wait until capture is done */
928 retval
= videobuf_waiton(q
, q
->read_buf
, nonblocking
, 1);
932 CALL(q
, sync
, q
, q
->read_buf
);
934 if (VIDEOBUF_ERROR
== q
->read_buf
->state
) {
935 /* catch I/O errors */
936 q
->ops
->buf_release(q
, q
->read_buf
);
943 /* Copy to userspace */
944 retval
= __videobuf_copy_to_user(q
, q
->read_buf
, data
, count
, nonblocking
);
948 q
->read_off
+= retval
;
949 if (q
->read_off
== q
->read_buf
->size
) {
950 /* all data copied, cleanup */
951 q
->ops
->buf_release(q
, q
->read_buf
);
957 videobuf_queue_unlock(q
);
960 EXPORT_SYMBOL_GPL(videobuf_read_one
);
962 /* Locking: Caller holds q->vb_lock */
963 static int __videobuf_read_start(struct videobuf_queue
*q
)
965 enum v4l2_field field
;
966 unsigned long flags
= 0;
967 unsigned int count
= 0, size
= 0;
970 q
->ops
->buf_setup(q
, &count
, &size
);
973 if (count
> VIDEO_MAX_FRAME
)
974 count
= VIDEO_MAX_FRAME
;
975 size
= PAGE_ALIGN(size
);
977 err
= __videobuf_mmap_setup(q
, count
, size
, V4L2_MEMORY_USERPTR
);
983 for (i
= 0; i
< count
; i
++) {
984 field
= videobuf_next_field(q
);
985 err
= q
->ops
->buf_prepare(q
, q
->bufs
[i
], field
);
988 list_add_tail(&q
->bufs
[i
]->stream
, &q
->stream
);
990 spin_lock_irqsave(q
->irqlock
, flags
);
991 for (i
= 0; i
< count
; i
++)
992 q
->ops
->buf_queue(q
, q
->bufs
[i
]);
993 spin_unlock_irqrestore(q
->irqlock
, flags
);
998 static void __videobuf_read_stop(struct videobuf_queue
*q
)
1002 videobuf_queue_cancel(q
);
1004 INIT_LIST_HEAD(&q
->stream
);
1005 for (i
= 0; i
< VIDEO_MAX_FRAME
; i
++) {
1006 if (NULL
== q
->bufs
[i
])
1014 int videobuf_read_start(struct videobuf_queue
*q
)
1018 videobuf_queue_lock(q
);
1019 rc
= __videobuf_read_start(q
);
1020 videobuf_queue_unlock(q
);
1024 EXPORT_SYMBOL_GPL(videobuf_read_start
);
1026 void videobuf_read_stop(struct videobuf_queue
*q
)
1028 videobuf_queue_lock(q
);
1029 __videobuf_read_stop(q
);
1030 videobuf_queue_unlock(q
);
1032 EXPORT_SYMBOL_GPL(videobuf_read_stop
);
1034 void videobuf_stop(struct videobuf_queue
*q
)
1036 videobuf_queue_lock(q
);
1039 __videobuf_streamoff(q
);
1042 __videobuf_read_stop(q
);
1044 videobuf_queue_unlock(q
);
1046 EXPORT_SYMBOL_GPL(videobuf_stop
);
1048 ssize_t
videobuf_read_stream(struct videobuf_queue
*q
,
1049 char __user
*data
, size_t count
, loff_t
*ppos
,
1050 int vbihack
, int nonblocking
)
1053 unsigned long flags
= 0;
1055 MAGIC_CHECK(q
->int_ops
->magic
, MAGIC_QTYPE_OPS
);
1057 dprintk(2, "%s\n", __func__
);
1058 videobuf_queue_lock(q
);
1063 retval
= __videobuf_read_start(q
);
1070 /* get / wait for data */
1071 if (NULL
== q
->read_buf
) {
1072 q
->read_buf
= list_entry(q
->stream
.next
,
1073 struct videobuf_buffer
,
1075 list_del(&q
->read_buf
->stream
);
1078 rc
= videobuf_waiton(q
, q
->read_buf
, nonblocking
, 1);
1085 if (q
->read_buf
->state
== VIDEOBUF_DONE
) {
1086 rc
= __videobuf_copy_stream(q
, q
->read_buf
, data
+ retval
, count
,
1087 retval
, vbihack
, nonblocking
);
1097 q
->read_off
= q
->read_buf
->size
;
1102 /* requeue buffer when done with copying */
1103 if (q
->read_off
== q
->read_buf
->size
) {
1104 list_add_tail(&q
->read_buf
->stream
,
1106 spin_lock_irqsave(q
->irqlock
, flags
);
1107 q
->ops
->buf_queue(q
, q
->read_buf
);
1108 spin_unlock_irqrestore(q
->irqlock
, flags
);
1116 videobuf_queue_unlock(q
);
1119 EXPORT_SYMBOL_GPL(videobuf_read_stream
);
1121 __poll_t
videobuf_poll_stream(struct file
*file
,
1122 struct videobuf_queue
*q
,
1125 __poll_t req_events
= poll_requested_events(wait
);
1126 struct videobuf_buffer
*buf
= NULL
;
1129 videobuf_queue_lock(q
);
1131 if (!list_empty(&q
->stream
))
1132 buf
= list_entry(q
->stream
.next
,
1133 struct videobuf_buffer
, stream
);
1134 } else if (req_events
& (EPOLLIN
| EPOLLRDNORM
)) {
1136 __videobuf_read_start(q
);
1139 } else if (NULL
== q
->read_buf
) {
1140 q
->read_buf
= list_entry(q
->stream
.next
,
1141 struct videobuf_buffer
,
1143 list_del(&q
->read_buf
->stream
);
1152 poll_wait(file
, &buf
->done
, wait
);
1153 if (buf
->state
== VIDEOBUF_DONE
||
1154 buf
->state
== VIDEOBUF_ERROR
) {
1156 case V4L2_BUF_TYPE_VIDEO_OUTPUT
:
1157 case V4L2_BUF_TYPE_VBI_OUTPUT
:
1158 case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT
:
1159 case V4L2_BUF_TYPE_SDR_OUTPUT
:
1160 rc
= EPOLLOUT
| EPOLLWRNORM
;
1163 rc
= EPOLLIN
| EPOLLRDNORM
;
1168 videobuf_queue_unlock(q
);
1171 EXPORT_SYMBOL_GPL(videobuf_poll_stream
);
1173 int videobuf_mmap_mapper(struct videobuf_queue
*q
, struct vm_area_struct
*vma
)
1178 MAGIC_CHECK(q
->int_ops
->magic
, MAGIC_QTYPE_OPS
);
1180 if (!(vma
->vm_flags
& VM_WRITE
) || !(vma
->vm_flags
& VM_SHARED
)) {
1181 dprintk(1, "mmap appl bug: PROT_WRITE and MAP_SHARED are required\n");
1185 videobuf_queue_lock(q
);
1186 for (i
= 0; i
< VIDEO_MAX_FRAME
; i
++) {
1187 struct videobuf_buffer
*buf
= q
->bufs
[i
];
1189 if (buf
&& buf
->memory
== V4L2_MEMORY_MMAP
&&
1190 buf
->boff
== (vma
->vm_pgoff
<< PAGE_SHIFT
)) {
1191 rc
= CALL(q
, mmap_mapper
, q
, buf
, vma
);
1195 videobuf_queue_unlock(q
);
1199 EXPORT_SYMBOL_GPL(videobuf_mmap_mapper
);