2 * generic helper functions for handling video4linux capture buffers
4 * (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org>
6 * Highly based on video-buf written originally by:
7 * (c) 2001,02 Gerd Knorr <kraxel@bytesex.org>
8 * (c) 2006 Mauro Carvalho Chehab, <mchehab@infradead.org>
9 * (c) 2006 Ted Walther and John Sokol
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2
16 #include <linux/init.h>
17 #include <linux/module.h>
18 #include <linux/moduleparam.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/interrupt.h>
24 #include <media/videobuf-core.h>
26 #define MAGIC_BUFFER 0x20070728
27 #define MAGIC_CHECK(is, should) \
29 if (unlikely((is) != (should))) { \
31 "magic mismatch: %x (expected %x)\n", \
38 module_param(debug
, int, 0644);
40 MODULE_DESCRIPTION("helper module to manage video4linux buffers");
41 MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
42 MODULE_LICENSE("GPL");
44 #define dprintk(level, fmt, arg...) \
47 printk(KERN_DEBUG "vbuf: " fmt, ## arg); \
50 /* --------------------------------------------------------------------- */
52 #define CALL(q, f, arg...) \
53 ((q->int_ops->f) ? q->int_ops->f(arg) : 0)
55 struct videobuf_buffer
*videobuf_alloc_vb(struct videobuf_queue
*q
)
57 struct videobuf_buffer
*vb
;
59 BUG_ON(q
->msize
< sizeof(*vb
));
61 if (!q
->int_ops
|| !q
->int_ops
->alloc_vb
) {
62 printk(KERN_ERR
"No specific ops defined!\n");
66 vb
= q
->int_ops
->alloc_vb(q
->msize
);
68 init_waitqueue_head(&vb
->done
);
69 vb
->magic
= MAGIC_BUFFER
;
74 EXPORT_SYMBOL_GPL(videobuf_alloc_vb
);
76 static int is_state_active_or_queued(struct videobuf_queue
*q
, struct videobuf_buffer
*vb
)
81 spin_lock_irqsave(q
->irqlock
, flags
);
82 rc
= vb
->state
!= VIDEOBUF_ACTIVE
&& vb
->state
!= VIDEOBUF_QUEUED
;
83 spin_unlock_irqrestore(q
->irqlock
, flags
);
87 int videobuf_waiton(struct videobuf_queue
*q
, struct videobuf_buffer
*vb
,
88 int non_blocking
, int intr
)
93 MAGIC_CHECK(vb
->magic
, MAGIC_BUFFER
);
96 if (is_state_active_or_queued(q
, vb
))
101 is_ext_locked
= q
->ext_lock
&& mutex_is_locked(q
->ext_lock
);
103 /* Release vdev lock to prevent this wait from blocking outside access to
106 mutex_unlock(q
->ext_lock
);
108 ret
= wait_event_interruptible(vb
->done
, is_state_active_or_queued(q
, vb
));
110 wait_event(vb
->done
, is_state_active_or_queued(q
, vb
));
113 mutex_lock(q
->ext_lock
);
117 EXPORT_SYMBOL_GPL(videobuf_waiton
);
119 int videobuf_iolock(struct videobuf_queue
*q
, struct videobuf_buffer
*vb
,
120 struct v4l2_framebuffer
*fbuf
)
122 MAGIC_CHECK(vb
->magic
, MAGIC_BUFFER
);
123 MAGIC_CHECK(q
->int_ops
->magic
, MAGIC_QTYPE_OPS
);
125 return CALL(q
, iolock
, q
, vb
, fbuf
);
127 EXPORT_SYMBOL_GPL(videobuf_iolock
);
129 void *videobuf_queue_to_vaddr(struct videobuf_queue
*q
,
130 struct videobuf_buffer
*buf
)
132 if (q
->int_ops
->vaddr
)
133 return q
->int_ops
->vaddr(buf
);
136 EXPORT_SYMBOL_GPL(videobuf_queue_to_vaddr
);
138 /* --------------------------------------------------------------------- */
141 void videobuf_queue_core_init(struct videobuf_queue
*q
,
142 const struct videobuf_queue_ops
*ops
,
145 enum v4l2_buf_type type
,
146 enum v4l2_field field
,
149 struct videobuf_qtype_ops
*int_ops
,
150 struct mutex
*ext_lock
)
153 memset(q
, 0, sizeof(*q
));
154 q
->irqlock
= irqlock
;
155 q
->ext_lock
= ext_lock
;
162 q
->int_ops
= int_ops
;
164 /* All buffer operations are mandatory */
165 BUG_ON(!q
->ops
->buf_setup
);
166 BUG_ON(!q
->ops
->buf_prepare
);
167 BUG_ON(!q
->ops
->buf_queue
);
168 BUG_ON(!q
->ops
->buf_release
);
170 /* Lock is mandatory for queue_cancel to work */
173 /* Having implementations for abstract methods are mandatory */
176 mutex_init(&q
->vb_lock
);
177 init_waitqueue_head(&q
->wait
);
178 INIT_LIST_HEAD(&q
->stream
);
180 EXPORT_SYMBOL_GPL(videobuf_queue_core_init
);
182 /* Locking: Only usage in bttv unsafe find way to remove */
183 int videobuf_queue_is_busy(struct videobuf_queue
*q
)
187 MAGIC_CHECK(q
->int_ops
->magic
, MAGIC_QTYPE_OPS
);
190 dprintk(1, "busy: streaming active\n");
194 dprintk(1, "busy: pending read #1\n");
198 dprintk(1, "busy: pending read #2\n");
201 for (i
= 0; i
< VIDEO_MAX_FRAME
; i
++) {
202 if (NULL
== q
->bufs
[i
])
204 if (q
->bufs
[i
]->map
) {
205 dprintk(1, "busy: buffer #%d mapped\n", i
);
208 if (q
->bufs
[i
]->state
== VIDEOBUF_QUEUED
) {
209 dprintk(1, "busy: buffer #%d queued\n", i
);
212 if (q
->bufs
[i
]->state
== VIDEOBUF_ACTIVE
) {
213 dprintk(1, "busy: buffer #%d avtive\n", i
);
219 EXPORT_SYMBOL_GPL(videobuf_queue_is_busy
);
222 * __videobuf_free() - free all the buffers and their control structures
224 * This function can only be called if streaming/reading is off, i.e. no buffers
225 * are under control of the driver.
227 /* Locking: Caller holds q->vb_lock */
228 static int __videobuf_free(struct videobuf_queue
*q
)
232 dprintk(1, "%s\n", __func__
);
236 if (q
->streaming
|| q
->reading
) {
237 dprintk(1, "Cannot free buffers when streaming or reading\n");
241 MAGIC_CHECK(q
->int_ops
->magic
, MAGIC_QTYPE_OPS
);
243 for (i
= 0; i
< VIDEO_MAX_FRAME
; i
++)
244 if (q
->bufs
[i
] && q
->bufs
[i
]->map
) {
245 dprintk(1, "Cannot free mmapped buffers\n");
249 for (i
= 0; i
< VIDEO_MAX_FRAME
; i
++) {
250 if (NULL
== q
->bufs
[i
])
252 q
->ops
->buf_release(q
, q
->bufs
[i
]);
260 /* Locking: Caller holds q->vb_lock */
261 void videobuf_queue_cancel(struct videobuf_queue
*q
)
263 unsigned long flags
= 0;
268 wake_up_interruptible_sync(&q
->wait
);
270 /* remove queued buffers from list */
271 spin_lock_irqsave(q
->irqlock
, flags
);
272 for (i
= 0; i
< VIDEO_MAX_FRAME
; i
++) {
273 if (NULL
== q
->bufs
[i
])
275 if (q
->bufs
[i
]->state
== VIDEOBUF_QUEUED
) {
276 list_del(&q
->bufs
[i
]->queue
);
277 q
->bufs
[i
]->state
= VIDEOBUF_ERROR
;
278 wake_up_all(&q
->bufs
[i
]->done
);
281 spin_unlock_irqrestore(q
->irqlock
, flags
);
283 /* free all buffers + clear queue */
284 for (i
= 0; i
< VIDEO_MAX_FRAME
; i
++) {
285 if (NULL
== q
->bufs
[i
])
287 q
->ops
->buf_release(q
, q
->bufs
[i
]);
289 INIT_LIST_HEAD(&q
->stream
);
291 EXPORT_SYMBOL_GPL(videobuf_queue_cancel
);
293 /* --------------------------------------------------------------------- */
295 /* Locking: Caller holds q->vb_lock */
296 enum v4l2_field
videobuf_next_field(struct videobuf_queue
*q
)
298 enum v4l2_field field
= q
->field
;
300 BUG_ON(V4L2_FIELD_ANY
== field
);
302 if (V4L2_FIELD_ALTERNATE
== field
) {
303 if (V4L2_FIELD_TOP
== q
->last
) {
304 field
= V4L2_FIELD_BOTTOM
;
305 q
->last
= V4L2_FIELD_BOTTOM
;
307 field
= V4L2_FIELD_TOP
;
308 q
->last
= V4L2_FIELD_TOP
;
313 EXPORT_SYMBOL_GPL(videobuf_next_field
);
315 /* Locking: Caller holds q->vb_lock */
316 static void videobuf_status(struct videobuf_queue
*q
, struct v4l2_buffer
*b
,
317 struct videobuf_buffer
*vb
, enum v4l2_buf_type type
)
319 MAGIC_CHECK(vb
->magic
, MAGIC_BUFFER
);
320 MAGIC_CHECK(q
->int_ops
->magic
, MAGIC_QTYPE_OPS
);
325 b
->memory
= vb
->memory
;
327 case V4L2_MEMORY_MMAP
:
328 b
->m
.offset
= vb
->boff
;
329 b
->length
= vb
->bsize
;
331 case V4L2_MEMORY_USERPTR
:
332 b
->m
.userptr
= vb
->baddr
;
333 b
->length
= vb
->bsize
;
335 case V4L2_MEMORY_OVERLAY
:
336 b
->m
.offset
= vb
->boff
;
338 case V4L2_MEMORY_DMABUF
:
339 /* DMABUF is not handled in videobuf framework */
343 b
->flags
= V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC
;
345 b
->flags
|= V4L2_BUF_FLAG_MAPPED
;
348 case VIDEOBUF_PREPARED
:
349 case VIDEOBUF_QUEUED
:
350 case VIDEOBUF_ACTIVE
:
351 b
->flags
|= V4L2_BUF_FLAG_QUEUED
;
354 b
->flags
|= V4L2_BUF_FLAG_ERROR
;
357 b
->flags
|= V4L2_BUF_FLAG_DONE
;
359 case VIDEOBUF_NEEDS_INIT
:
365 b
->field
= vb
->field
;
366 b
->timestamp
= vb
->ts
;
367 b
->bytesused
= vb
->size
;
368 b
->sequence
= vb
->field_count
>> 1;
371 int videobuf_mmap_free(struct videobuf_queue
*q
)
374 videobuf_queue_lock(q
);
375 ret
= __videobuf_free(q
);
376 videobuf_queue_unlock(q
);
379 EXPORT_SYMBOL_GPL(videobuf_mmap_free
);
381 /* Locking: Caller holds q->vb_lock */
382 int __videobuf_mmap_setup(struct videobuf_queue
*q
,
383 unsigned int bcount
, unsigned int bsize
,
384 enum v4l2_memory memory
)
389 MAGIC_CHECK(q
->int_ops
->magic
, MAGIC_QTYPE_OPS
);
391 err
= __videobuf_free(q
);
395 /* Allocate and initialize buffers */
396 for (i
= 0; i
< bcount
; i
++) {
397 q
->bufs
[i
] = videobuf_alloc_vb(q
);
399 if (NULL
== q
->bufs
[i
])
403 q
->bufs
[i
]->memory
= memory
;
404 q
->bufs
[i
]->bsize
= bsize
;
406 case V4L2_MEMORY_MMAP
:
407 q
->bufs
[i
]->boff
= PAGE_ALIGN(bsize
) * i
;
409 case V4L2_MEMORY_USERPTR
:
410 case V4L2_MEMORY_OVERLAY
:
411 case V4L2_MEMORY_DMABUF
:
420 dprintk(1, "mmap setup: %d buffers, %d bytes each\n", i
, bsize
);
424 EXPORT_SYMBOL_GPL(__videobuf_mmap_setup
);
426 int videobuf_mmap_setup(struct videobuf_queue
*q
,
427 unsigned int bcount
, unsigned int bsize
,
428 enum v4l2_memory memory
)
431 videobuf_queue_lock(q
);
432 ret
= __videobuf_mmap_setup(q
, bcount
, bsize
, memory
);
433 videobuf_queue_unlock(q
);
436 EXPORT_SYMBOL_GPL(videobuf_mmap_setup
);
438 int videobuf_reqbufs(struct videobuf_queue
*q
,
439 struct v4l2_requestbuffers
*req
)
441 unsigned int size
, count
;
444 if (req
->count
< 1) {
445 dprintk(1, "reqbufs: count invalid (%d)\n", req
->count
);
449 if (req
->memory
!= V4L2_MEMORY_MMAP
&&
450 req
->memory
!= V4L2_MEMORY_USERPTR
&&
451 req
->memory
!= V4L2_MEMORY_OVERLAY
) {
452 dprintk(1, "reqbufs: memory type invalid\n");
456 videobuf_queue_lock(q
);
457 if (req
->type
!= q
->type
) {
458 dprintk(1, "reqbufs: queue type invalid\n");
464 dprintk(1, "reqbufs: streaming already exists\n");
468 if (!list_empty(&q
->stream
)) {
469 dprintk(1, "reqbufs: stream running\n");
475 if (count
> VIDEO_MAX_FRAME
)
476 count
= VIDEO_MAX_FRAME
;
478 q
->ops
->buf_setup(q
, &count
, &size
);
479 dprintk(1, "reqbufs: bufs=%d, size=0x%x [%u pages total]\n",
481 (unsigned int)((count
* PAGE_ALIGN(size
)) >> PAGE_SHIFT
));
483 retval
= __videobuf_mmap_setup(q
, count
, size
, req
->memory
);
485 dprintk(1, "reqbufs: mmap setup returned %d\n", retval
);
493 videobuf_queue_unlock(q
);
496 EXPORT_SYMBOL_GPL(videobuf_reqbufs
);
498 int videobuf_querybuf(struct videobuf_queue
*q
, struct v4l2_buffer
*b
)
502 videobuf_queue_lock(q
);
503 if (unlikely(b
->type
!= q
->type
)) {
504 dprintk(1, "querybuf: Wrong type.\n");
507 if (unlikely(b
->index
>= VIDEO_MAX_FRAME
)) {
508 dprintk(1, "querybuf: index out of range.\n");
511 if (unlikely(NULL
== q
->bufs
[b
->index
])) {
512 dprintk(1, "querybuf: buffer is null.\n");
516 videobuf_status(q
, b
, q
->bufs
[b
->index
], q
->type
);
520 videobuf_queue_unlock(q
);
523 EXPORT_SYMBOL_GPL(videobuf_querybuf
);
525 int videobuf_qbuf(struct videobuf_queue
*q
, struct v4l2_buffer
*b
)
527 struct videobuf_buffer
*buf
;
528 enum v4l2_field field
;
529 unsigned long flags
= 0;
532 MAGIC_CHECK(q
->int_ops
->magic
, MAGIC_QTYPE_OPS
);
534 if (b
->memory
== V4L2_MEMORY_MMAP
)
535 down_read(¤t
->mm
->mmap_sem
);
537 videobuf_queue_lock(q
);
540 dprintk(1, "qbuf: Reading running...\n");
544 if (b
->type
!= q
->type
) {
545 dprintk(1, "qbuf: Wrong type.\n");
548 if (b
->index
>= VIDEO_MAX_FRAME
) {
549 dprintk(1, "qbuf: index out of range.\n");
552 buf
= q
->bufs
[b
->index
];
554 dprintk(1, "qbuf: buffer is null.\n");
557 MAGIC_CHECK(buf
->magic
, MAGIC_BUFFER
);
558 if (buf
->memory
!= b
->memory
) {
559 dprintk(1, "qbuf: memory type is wrong.\n");
562 if (buf
->state
!= VIDEOBUF_NEEDS_INIT
&& buf
->state
!= VIDEOBUF_IDLE
) {
563 dprintk(1, "qbuf: buffer is already queued or active.\n");
568 case V4L2_MEMORY_MMAP
:
569 if (0 == buf
->baddr
) {
570 dprintk(1, "qbuf: mmap requested "
571 "but buffer addr is zero!\n");
574 if (q
->type
== V4L2_BUF_TYPE_VIDEO_OUTPUT
575 || q
->type
== V4L2_BUF_TYPE_VBI_OUTPUT
576 || q
->type
== V4L2_BUF_TYPE_SLICED_VBI_OUTPUT
) {
577 buf
->size
= b
->bytesused
;
578 buf
->field
= b
->field
;
579 buf
->ts
= b
->timestamp
;
582 case V4L2_MEMORY_USERPTR
:
583 if (b
->length
< buf
->bsize
) {
584 dprintk(1, "qbuf: buffer length is not enough\n");
587 if (VIDEOBUF_NEEDS_INIT
!= buf
->state
&&
588 buf
->baddr
!= b
->m
.userptr
)
589 q
->ops
->buf_release(q
, buf
);
590 buf
->baddr
= b
->m
.userptr
;
592 case V4L2_MEMORY_OVERLAY
:
593 buf
->boff
= b
->m
.offset
;
596 dprintk(1, "qbuf: wrong memory type\n");
600 dprintk(1, "qbuf: requesting next field\n");
601 field
= videobuf_next_field(q
);
602 retval
= q
->ops
->buf_prepare(q
, buf
, field
);
604 dprintk(1, "qbuf: buffer_prepare returned %d\n", retval
);
608 list_add_tail(&buf
->stream
, &q
->stream
);
610 spin_lock_irqsave(q
->irqlock
, flags
);
611 q
->ops
->buf_queue(q
, buf
);
612 spin_unlock_irqrestore(q
->irqlock
, flags
);
614 dprintk(1, "qbuf: succeeded\n");
616 wake_up_interruptible_sync(&q
->wait
);
619 videobuf_queue_unlock(q
);
621 if (b
->memory
== V4L2_MEMORY_MMAP
)
622 up_read(¤t
->mm
->mmap_sem
);
626 EXPORT_SYMBOL_GPL(videobuf_qbuf
);
628 /* Locking: Caller holds q->vb_lock */
629 static int stream_next_buffer_check_queue(struct videobuf_queue
*q
, int noblock
)
635 dprintk(1, "next_buffer: Not streaming\n");
640 if (list_empty(&q
->stream
)) {
643 dprintk(2, "next_buffer: no buffers to dequeue\n");
646 dprintk(2, "next_buffer: waiting on buffer\n");
648 /* Drop lock to avoid deadlock with qbuf */
649 videobuf_queue_unlock(q
);
651 /* Checking list_empty and streaming is safe without
652 * locks because we goto checks to validate while
653 * holding locks before proceeding */
654 retval
= wait_event_interruptible(q
->wait
,
655 !list_empty(&q
->stream
) || !q
->streaming
);
656 videobuf_queue_lock(q
);
671 /* Locking: Caller holds q->vb_lock */
672 static int stream_next_buffer(struct videobuf_queue
*q
,
673 struct videobuf_buffer
**vb
, int nonblocking
)
676 struct videobuf_buffer
*buf
= NULL
;
678 retval
= stream_next_buffer_check_queue(q
, nonblocking
);
682 buf
= list_entry(q
->stream
.next
, struct videobuf_buffer
, stream
);
683 retval
= videobuf_waiton(q
, buf
, nonblocking
, 1);
692 int videobuf_dqbuf(struct videobuf_queue
*q
,
693 struct v4l2_buffer
*b
, int nonblocking
)
695 struct videobuf_buffer
*buf
= NULL
;
698 MAGIC_CHECK(q
->int_ops
->magic
, MAGIC_QTYPE_OPS
);
700 memset(b
, 0, sizeof(*b
));
701 videobuf_queue_lock(q
);
703 retval
= stream_next_buffer(q
, &buf
, nonblocking
);
705 dprintk(1, "dqbuf: next_buffer error: %i\n", retval
);
709 switch (buf
->state
) {
711 dprintk(1, "dqbuf: state is error\n");
714 dprintk(1, "dqbuf: state is done\n");
717 dprintk(1, "dqbuf: state invalid\n");
721 CALL(q
, sync
, q
, buf
);
722 videobuf_status(q
, b
, buf
, q
->type
);
723 list_del(&buf
->stream
);
724 buf
->state
= VIDEOBUF_IDLE
;
725 b
->flags
&= ~V4L2_BUF_FLAG_DONE
;
727 videobuf_queue_unlock(q
);
730 EXPORT_SYMBOL_GPL(videobuf_dqbuf
);
732 int videobuf_streamon(struct videobuf_queue
*q
)
734 struct videobuf_buffer
*buf
;
735 unsigned long flags
= 0;
738 videobuf_queue_lock(q
);
746 spin_lock_irqsave(q
->irqlock
, flags
);
747 list_for_each_entry(buf
, &q
->stream
, stream
)
748 if (buf
->state
== VIDEOBUF_PREPARED
)
749 q
->ops
->buf_queue(q
, buf
);
750 spin_unlock_irqrestore(q
->irqlock
, flags
);
752 wake_up_interruptible_sync(&q
->wait
);
754 videobuf_queue_unlock(q
);
757 EXPORT_SYMBOL_GPL(videobuf_streamon
);
759 /* Locking: Caller holds q->vb_lock */
760 static int __videobuf_streamoff(struct videobuf_queue
*q
)
765 videobuf_queue_cancel(q
);
770 int videobuf_streamoff(struct videobuf_queue
*q
)
774 videobuf_queue_lock(q
);
775 retval
= __videobuf_streamoff(q
);
776 videobuf_queue_unlock(q
);
780 EXPORT_SYMBOL_GPL(videobuf_streamoff
);
782 /* Locking: Caller holds q->vb_lock */
783 static ssize_t
videobuf_read_zerocopy(struct videobuf_queue
*q
,
785 size_t count
, loff_t
*ppos
)
787 enum v4l2_field field
;
788 unsigned long flags
= 0;
791 MAGIC_CHECK(q
->int_ops
->magic
, MAGIC_QTYPE_OPS
);
794 q
->read_buf
= videobuf_alloc_vb(q
);
795 if (NULL
== q
->read_buf
)
798 q
->read_buf
->memory
= V4L2_MEMORY_USERPTR
;
799 q
->read_buf
->baddr
= (unsigned long)data
;
800 q
->read_buf
->bsize
= count
;
802 field
= videobuf_next_field(q
);
803 retval
= q
->ops
->buf_prepare(q
, q
->read_buf
, field
);
807 /* start capture & wait */
808 spin_lock_irqsave(q
->irqlock
, flags
);
809 q
->ops
->buf_queue(q
, q
->read_buf
);
810 spin_unlock_irqrestore(q
->irqlock
, flags
);
811 retval
= videobuf_waiton(q
, q
->read_buf
, 0, 0);
813 CALL(q
, sync
, q
, q
->read_buf
);
814 if (VIDEOBUF_ERROR
== q
->read_buf
->state
)
817 retval
= q
->read_buf
->size
;
822 q
->ops
->buf_release(q
, q
->read_buf
);
828 static int __videobuf_copy_to_user(struct videobuf_queue
*q
,
829 struct videobuf_buffer
*buf
,
830 char __user
*data
, size_t count
,
833 void *vaddr
= CALL(q
, vaddr
, buf
);
835 /* copy to userspace */
836 if (count
> buf
->size
- q
->read_off
)
837 count
= buf
->size
- q
->read_off
;
839 if (copy_to_user(data
, vaddr
+ q
->read_off
, count
))
845 static int __videobuf_copy_stream(struct videobuf_queue
*q
,
846 struct videobuf_buffer
*buf
,
847 char __user
*data
, size_t count
, size_t pos
,
848 int vbihack
, int nonblocking
)
850 unsigned int *fc
= CALL(q
, vaddr
, buf
);
853 /* dirty, undocumented hack -- pass the frame counter
854 * within the last four bytes of each vbi data block.
855 * We need that one to maintain backward compatibility
856 * to all vbi decoding software out there ... */
857 fc
+= (buf
->size
>> 2) - 1;
858 *fc
= buf
->field_count
>> 1;
859 dprintk(1, "vbihack: %d\n", *fc
);
862 /* copy stuff using the common method */
863 count
= __videobuf_copy_to_user(q
, buf
, data
, count
, nonblocking
);
865 if ((count
== -EFAULT
) && (pos
== 0))
871 ssize_t
videobuf_read_one(struct videobuf_queue
*q
,
872 char __user
*data
, size_t count
, loff_t
*ppos
,
875 enum v4l2_field field
;
876 unsigned long flags
= 0;
877 unsigned size
= 0, nbufs
= 1;
880 MAGIC_CHECK(q
->int_ops
->magic
, MAGIC_QTYPE_OPS
);
882 videobuf_queue_lock(q
);
884 q
->ops
->buf_setup(q
, &nbufs
, &size
);
886 if (NULL
== q
->read_buf
&&
889 retval
= videobuf_read_zerocopy(q
, data
, count
, ppos
);
890 if (retval
>= 0 || retval
== -EIO
)
893 /* fallback to kernel bounce buffer on failures */
896 if (NULL
== q
->read_buf
) {
897 /* need to capture a new frame */
899 q
->read_buf
= videobuf_alloc_vb(q
);
901 dprintk(1, "video alloc=0x%p\n", q
->read_buf
);
902 if (NULL
== q
->read_buf
)
904 q
->read_buf
->memory
= V4L2_MEMORY_USERPTR
;
905 q
->read_buf
->bsize
= count
; /* preferred size */
906 field
= videobuf_next_field(q
);
907 retval
= q
->ops
->buf_prepare(q
, q
->read_buf
, field
);
915 spin_lock_irqsave(q
->irqlock
, flags
);
916 q
->ops
->buf_queue(q
, q
->read_buf
);
917 spin_unlock_irqrestore(q
->irqlock
, flags
);
922 /* wait until capture is done */
923 retval
= videobuf_waiton(q
, q
->read_buf
, nonblocking
, 1);
927 CALL(q
, sync
, q
, q
->read_buf
);
929 if (VIDEOBUF_ERROR
== q
->read_buf
->state
) {
930 /* catch I/O errors */
931 q
->ops
->buf_release(q
, q
->read_buf
);
938 /* Copy to userspace */
939 retval
= __videobuf_copy_to_user(q
, q
->read_buf
, data
, count
, nonblocking
);
943 q
->read_off
+= retval
;
944 if (q
->read_off
== q
->read_buf
->size
) {
945 /* all data copied, cleanup */
946 q
->ops
->buf_release(q
, q
->read_buf
);
952 videobuf_queue_unlock(q
);
955 EXPORT_SYMBOL_GPL(videobuf_read_one
);
957 /* Locking: Caller holds q->vb_lock */
958 static int __videobuf_read_start(struct videobuf_queue
*q
)
960 enum v4l2_field field
;
961 unsigned long flags
= 0;
962 unsigned int count
= 0, size
= 0;
965 q
->ops
->buf_setup(q
, &count
, &size
);
968 if (count
> VIDEO_MAX_FRAME
)
969 count
= VIDEO_MAX_FRAME
;
970 size
= PAGE_ALIGN(size
);
972 err
= __videobuf_mmap_setup(q
, count
, size
, V4L2_MEMORY_USERPTR
);
978 for (i
= 0; i
< count
; i
++) {
979 field
= videobuf_next_field(q
);
980 err
= q
->ops
->buf_prepare(q
, q
->bufs
[i
], field
);
983 list_add_tail(&q
->bufs
[i
]->stream
, &q
->stream
);
985 spin_lock_irqsave(q
->irqlock
, flags
);
986 for (i
= 0; i
< count
; i
++)
987 q
->ops
->buf_queue(q
, q
->bufs
[i
]);
988 spin_unlock_irqrestore(q
->irqlock
, flags
);
993 static void __videobuf_read_stop(struct videobuf_queue
*q
)
997 videobuf_queue_cancel(q
);
999 INIT_LIST_HEAD(&q
->stream
);
1000 for (i
= 0; i
< VIDEO_MAX_FRAME
; i
++) {
1001 if (NULL
== q
->bufs
[i
])
1009 int videobuf_read_start(struct videobuf_queue
*q
)
1013 videobuf_queue_lock(q
);
1014 rc
= __videobuf_read_start(q
);
1015 videobuf_queue_unlock(q
);
1019 EXPORT_SYMBOL_GPL(videobuf_read_start
);
1021 void videobuf_read_stop(struct videobuf_queue
*q
)
1023 videobuf_queue_lock(q
);
1024 __videobuf_read_stop(q
);
1025 videobuf_queue_unlock(q
);
1027 EXPORT_SYMBOL_GPL(videobuf_read_stop
);
1029 void videobuf_stop(struct videobuf_queue
*q
)
1031 videobuf_queue_lock(q
);
1034 __videobuf_streamoff(q
);
1037 __videobuf_read_stop(q
);
1039 videobuf_queue_unlock(q
);
1041 EXPORT_SYMBOL_GPL(videobuf_stop
);
1043 ssize_t
videobuf_read_stream(struct videobuf_queue
*q
,
1044 char __user
*data
, size_t count
, loff_t
*ppos
,
1045 int vbihack
, int nonblocking
)
1048 unsigned long flags
= 0;
1050 MAGIC_CHECK(q
->int_ops
->magic
, MAGIC_QTYPE_OPS
);
1052 dprintk(2, "%s\n", __func__
);
1053 videobuf_queue_lock(q
);
1058 retval
= __videobuf_read_start(q
);
1065 /* get / wait for data */
1066 if (NULL
== q
->read_buf
) {
1067 q
->read_buf
= list_entry(q
->stream
.next
,
1068 struct videobuf_buffer
,
1070 list_del(&q
->read_buf
->stream
);
1073 rc
= videobuf_waiton(q
, q
->read_buf
, nonblocking
, 1);
1080 if (q
->read_buf
->state
== VIDEOBUF_DONE
) {
1081 rc
= __videobuf_copy_stream(q
, q
->read_buf
, data
+ retval
, count
,
1082 retval
, vbihack
, nonblocking
);
1092 q
->read_off
= q
->read_buf
->size
;
1097 /* requeue buffer when done with copying */
1098 if (q
->read_off
== q
->read_buf
->size
) {
1099 list_add_tail(&q
->read_buf
->stream
,
1101 spin_lock_irqsave(q
->irqlock
, flags
);
1102 q
->ops
->buf_queue(q
, q
->read_buf
);
1103 spin_unlock_irqrestore(q
->irqlock
, flags
);
1111 videobuf_queue_unlock(q
);
1114 EXPORT_SYMBOL_GPL(videobuf_read_stream
);
1116 unsigned int videobuf_poll_stream(struct file
*file
,
1117 struct videobuf_queue
*q
,
1120 unsigned long req_events
= poll_requested_events(wait
);
1121 struct videobuf_buffer
*buf
= NULL
;
1122 unsigned int rc
= 0;
1124 videobuf_queue_lock(q
);
1126 if (!list_empty(&q
->stream
))
1127 buf
= list_entry(q
->stream
.next
,
1128 struct videobuf_buffer
, stream
);
1129 } else if (req_events
& (POLLIN
| POLLRDNORM
)) {
1131 __videobuf_read_start(q
);
1134 } else if (NULL
== q
->read_buf
) {
1135 q
->read_buf
= list_entry(q
->stream
.next
,
1136 struct videobuf_buffer
,
1138 list_del(&q
->read_buf
->stream
);
1147 poll_wait(file
, &buf
->done
, wait
);
1148 if (buf
->state
== VIDEOBUF_DONE
||
1149 buf
->state
== VIDEOBUF_ERROR
) {
1151 case V4L2_BUF_TYPE_VIDEO_OUTPUT
:
1152 case V4L2_BUF_TYPE_VBI_OUTPUT
:
1153 case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT
:
1154 rc
= POLLOUT
| POLLWRNORM
;
1157 rc
= POLLIN
| POLLRDNORM
;
1162 videobuf_queue_unlock(q
);
1165 EXPORT_SYMBOL_GPL(videobuf_poll_stream
);
1167 int videobuf_mmap_mapper(struct videobuf_queue
*q
, struct vm_area_struct
*vma
)
1172 MAGIC_CHECK(q
->int_ops
->magic
, MAGIC_QTYPE_OPS
);
1174 if (!(vma
->vm_flags
& VM_WRITE
) || !(vma
->vm_flags
& VM_SHARED
)) {
1175 dprintk(1, "mmap appl bug: PROT_WRITE and MAP_SHARED are required\n");
1179 videobuf_queue_lock(q
);
1180 for (i
= 0; i
< VIDEO_MAX_FRAME
; i
++) {
1181 struct videobuf_buffer
*buf
= q
->bufs
[i
];
1183 if (buf
&& buf
->memory
== V4L2_MEMORY_MMAP
&&
1184 buf
->boff
== (vma
->vm_pgoff
<< PAGE_SHIFT
)) {
1185 rc
= CALL(q
, mmap_mapper
, q
, buf
, vma
);
1189 videobuf_queue_unlock(q
);
1193 EXPORT_SYMBOL_GPL(videobuf_mmap_mapper
);