4 * TI OMAP3 ISP - Video buffers queue handling
6 * Copyright (C) 2010 Nokia Corporation
8 * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
9 * Sakari Ailus <sakari.ailus@iki.fi>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
26 #include <asm/cacheflush.h>
27 #include <linux/dma-mapping.h>
29 #include <linux/pagemap.h>
30 #include <linux/poll.h>
31 #include <linux/scatterlist.h>
32 #include <linux/sched.h>
33 #include <linux/slab.h>
34 #include <linux/vmalloc.h>
38 /* -----------------------------------------------------------------------------
39 * Video buffers management
43 * isp_video_buffer_cache_sync - Keep the buffers coherent between CPU and ISP
45 * The typical operation required here is Cache Invalidation across
46 * the (user space) buffer address range. And this _must_ be done
47 * at QBUF stage (and *only* at QBUF).
49 * We try to use optimal cache invalidation function:
51 * - used when the number of pages are _low_.
52 * - it becomes quite slow as the number of pages increase.
53 * - for 648x492 viewfinder (150 pages) it takes 1.3 ms.
54 * - for 5 Mpix buffer (2491 pages) it takes between 25-50 ms.
57 * - used when the number of pages are _high_.
58 * - time taken in the range of 500-900 us.
59 * - has a higher penalty but, as whole dcache + icache is invalidated
62 * FIXME: dmac_inv_range crashes randomly on the user space buffer
63 * address. Fall back to flush_cache_all for now.
65 #define ISP_CACHE_FLUSH_PAGES_MAX 0
67 static void isp_video_buffer_cache_sync(struct isp_video_buffer
*buf
)
72 if (buf
->vbuf
.m
.userptr
== 0 || buf
->npages
== 0 ||
73 buf
->npages
> ISP_CACHE_FLUSH_PAGES_MAX
)
76 dmac_map_area((void *)buf
->vbuf
.m
.userptr
, buf
->vbuf
.length
,
78 outer_inv_range(buf
->vbuf
.m
.userptr
,
79 buf
->vbuf
.m
.userptr
+ buf
->vbuf
.length
);
84 * isp_video_buffer_lock_vma - Prevent VMAs from being unmapped
86 * Lock the VMAs underlying the given buffer into memory. This avoids the
87 * userspace buffer mapping from being swapped out, making VIPT cache handling
90 * Note that the pages will not be freed as the buffers have been locked to
91 * memory using by a call to get_user_pages(), but the userspace mapping could
92 * still disappear if the VMAs are not locked. This is caused by the memory
93 * management code trying to be as lock-less as possible, which results in the
94 * userspace mapping manager not finding out that the pages are locked under
97 static int isp_video_buffer_lock_vma(struct isp_video_buffer
*buf
, int lock
)
99 struct vm_area_struct
*vma
;
104 if (buf
->vbuf
.memory
== V4L2_MEMORY_MMAP
)
107 /* We can be called from workqueue context if the current task dies to
108 * unlock the VMAs. In that case there's no current memory management
109 * context so unlocking can't be performed, but the VMAs have been or
110 * are getting destroyed anyway so it doesn't really matter.
112 if (!current
|| !current
->mm
)
113 return lock
? -EINVAL
: 0;
115 start
= buf
->vbuf
.m
.userptr
;
116 end
= buf
->vbuf
.m
.userptr
+ buf
->vbuf
.length
- 1;
118 down_write(¤t
->mm
->mmap_sem
);
119 spin_lock(¤t
->mm
->page_table_lock
);
122 vma
= find_vma(current
->mm
, start
);
129 vma
->vm_flags
|= VM_LOCKED
;
131 vma
->vm_flags
&= ~VM_LOCKED
;
133 start
= vma
->vm_end
+ 1;
134 } while (vma
->vm_end
< end
);
137 buf
->vm_flags
|= VM_LOCKED
;
139 buf
->vm_flags
&= ~VM_LOCKED
;
142 spin_unlock(¤t
->mm
->page_table_lock
);
143 up_write(¤t
->mm
->mmap_sem
);
148 * isp_video_buffer_sglist_kernel - Build a scatter list for a vmalloc'ed buffer
150 * Iterate over the vmalloc'ed area and create a scatter list entry for every
153 static int isp_video_buffer_sglist_kernel(struct isp_video_buffer
*buf
)
155 struct scatterlist
*sglist
;
161 npages
= PAGE_ALIGN(buf
->vbuf
.length
) >> PAGE_SHIFT
;
163 sglist
= vmalloc(npages
* sizeof(*sglist
));
167 sg_init_table(sglist
, npages
);
169 for (i
= 0; i
< npages
; ++i
, addr
+= PAGE_SIZE
) {
170 struct page
*page
= vmalloc_to_page(addr
);
172 if (page
== NULL
|| PageHighMem(page
)) {
177 sg_set_page(&sglist
[i
], page
, PAGE_SIZE
, 0);
181 buf
->sglist
= sglist
;
187 * isp_video_buffer_sglist_user - Build a scatter list for a userspace buffer
189 * Walk the buffer pages list and create a 1:1 mapping to a scatter list.
191 static int isp_video_buffer_sglist_user(struct isp_video_buffer
*buf
)
193 struct scatterlist
*sglist
;
194 unsigned int offset
= buf
->offset
;
197 sglist
= vmalloc(buf
->npages
* sizeof(*sglist
));
201 sg_init_table(sglist
, buf
->npages
);
203 for (i
= 0; i
< buf
->npages
; ++i
) {
204 if (PageHighMem(buf
->pages
[i
])) {
209 sg_set_page(&sglist
[i
], buf
->pages
[i
], PAGE_SIZE
- offset
,
214 buf
->sglen
= buf
->npages
;
215 buf
->sglist
= sglist
;
221 * isp_video_buffer_sglist_pfnmap - Build a scatter list for a VM_PFNMAP buffer
223 * Create a scatter list of physically contiguous pages starting at the buffer
224 * memory physical address.
226 static int isp_video_buffer_sglist_pfnmap(struct isp_video_buffer
*buf
)
228 struct scatterlist
*sglist
;
229 unsigned int offset
= buf
->offset
;
230 unsigned long pfn
= buf
->paddr
>> PAGE_SHIFT
;
233 sglist
= vmalloc(buf
->npages
* sizeof(*sglist
));
237 sg_init_table(sglist
, buf
->npages
);
239 for (i
= 0; i
< buf
->npages
; ++i
, ++pfn
) {
240 sg_set_page(&sglist
[i
], pfn_to_page(pfn
), PAGE_SIZE
- offset
,
242 /* PFNMAP buffers will not get DMA-mapped, set the DMA address
245 sg_dma_address(&sglist
[i
]) = (pfn
<< PAGE_SHIFT
) + offset
;
249 buf
->sglen
= buf
->npages
;
250 buf
->sglist
= sglist
;
256 * isp_video_buffer_cleanup - Release pages for a userspace VMA.
258 * Release pages locked by a call isp_video_buffer_prepare_user and free the
261 static void isp_video_buffer_cleanup(struct isp_video_buffer
*buf
)
263 enum dma_data_direction direction
;
266 if (buf
->queue
->ops
->buffer_cleanup
)
267 buf
->queue
->ops
->buffer_cleanup(buf
);
269 if (!(buf
->vm_flags
& VM_PFNMAP
)) {
270 direction
= buf
->vbuf
.type
== V4L2_BUF_TYPE_VIDEO_CAPTURE
271 ? DMA_FROM_DEVICE
: DMA_TO_DEVICE
;
272 dma_unmap_sg(buf
->queue
->dev
, buf
->sglist
, buf
->sglen
,
280 if (buf
->pages
!= NULL
) {
281 isp_video_buffer_lock_vma(buf
, 0);
283 for (i
= 0; i
< buf
->npages
; ++i
)
284 page_cache_release(buf
->pages
[i
]);
291 buf
->skip_cache
= false;
295 * isp_video_buffer_prepare_user - Pin userspace VMA pages to memory.
297 * This function creates a list of pages for a userspace VMA. The number of
298 * pages is first computed based on the buffer size, and pages are then
299 * retrieved by a call to get_user_pages.
301 * Pages are pinned to memory by get_user_pages, making them available for DMA
302 * transfers. However, due to memory management optimization, it seems the
303 * get_user_pages doesn't guarantee that the pinned pages will not be written
304 * to swap and removed from the userspace mapping(s). When this happens, a page
305 * fault can be generated when accessing those unmapped pages.
307 * If the fault is triggered by a page table walk caused by VIPT cache
308 * management operations, the page fault handler might oops if the MM semaphore
309 * is held, as it can't handle kernel page faults in that case. To fix that, a
310 * fixup entry needs to be added to the cache management code, or the userspace
311 * VMA must be locked to avoid removing pages from the userspace mapping in the
314 * If the number of pages retrieved is smaller than the number required by the
315 * buffer size, the function returns -EFAULT.
317 static int isp_video_buffer_prepare_user(struct isp_video_buffer
*buf
)
324 data
= buf
->vbuf
.m
.userptr
;
325 first
= (data
& PAGE_MASK
) >> PAGE_SHIFT
;
326 last
= ((data
+ buf
->vbuf
.length
- 1) & PAGE_MASK
) >> PAGE_SHIFT
;
328 buf
->offset
= data
& ~PAGE_MASK
;
329 buf
->npages
= last
- first
+ 1;
330 buf
->pages
= vmalloc(buf
->npages
* sizeof(buf
->pages
[0]));
331 if (buf
->pages
== NULL
)
334 down_read(¤t
->mm
->mmap_sem
);
335 ret
= get_user_pages(current
, current
->mm
, data
& PAGE_MASK
,
337 buf
->vbuf
.type
== V4L2_BUF_TYPE_VIDEO_CAPTURE
, 0,
339 up_read(¤t
->mm
->mmap_sem
);
341 if (ret
!= buf
->npages
) {
342 buf
->npages
= ret
< 0 ? 0 : ret
;
343 isp_video_buffer_cleanup(buf
);
347 ret
= isp_video_buffer_lock_vma(buf
, 1);
349 isp_video_buffer_cleanup(buf
);
355 * isp_video_buffer_prepare_pfnmap - Validate a VM_PFNMAP userspace buffer
357 * Userspace VM_PFNMAP buffers are supported only if they are contiguous in
358 * memory and if they span a single VMA.
360 * Return 0 if the buffer is valid, or -EFAULT otherwise.
362 static int isp_video_buffer_prepare_pfnmap(struct isp_video_buffer
*buf
)
364 struct vm_area_struct
*vma
;
365 unsigned long prev_pfn
;
366 unsigned long this_pfn
;
372 start
= buf
->vbuf
.m
.userptr
;
373 end
= buf
->vbuf
.m
.userptr
+ buf
->vbuf
.length
- 1;
375 buf
->offset
= start
& ~PAGE_MASK
;
376 buf
->npages
= (end
>> PAGE_SHIFT
) - (start
>> PAGE_SHIFT
) + 1;
379 down_read(¤t
->mm
->mmap_sem
);
380 vma
= find_vma(current
->mm
, start
);
381 if (vma
== NULL
|| vma
->vm_end
< end
)
384 for (prev_pfn
= 0; start
<= end
; start
+= PAGE_SIZE
) {
385 ret
= follow_pfn(vma
, start
, &this_pfn
);
390 pa
= this_pfn
<< PAGE_SHIFT
;
391 else if (this_pfn
!= prev_pfn
+ 1) {
399 buf
->paddr
= pa
+ buf
->offset
;
403 up_read(¤t
->mm
->mmap_sem
);
408 * isp_video_buffer_prepare_vm_flags - Get VMA flags for a userspace address
410 * This function locates the VMAs for the buffer's userspace address and checks
411 * that their flags match. The only flag that we need to care for at the moment
414 * The buffer vm_flags field is set to the first VMA flags.
416 * Return -EFAULT if no VMA can be found for part of the buffer, or if the VMAs
417 * have incompatible flags.
419 static int isp_video_buffer_prepare_vm_flags(struct isp_video_buffer
*buf
)
421 struct vm_area_struct
*vma
;
422 pgprot_t vm_page_prot
;
427 start
= buf
->vbuf
.m
.userptr
;
428 end
= buf
->vbuf
.m
.userptr
+ buf
->vbuf
.length
- 1;
430 down_read(¤t
->mm
->mmap_sem
);
433 vma
= find_vma(current
->mm
, start
);
437 if (start
== buf
->vbuf
.m
.userptr
) {
438 buf
->vm_flags
= vma
->vm_flags
;
439 vm_page_prot
= vma
->vm_page_prot
;
442 if ((buf
->vm_flags
^ vma
->vm_flags
) & VM_PFNMAP
)
445 if (vm_page_prot
!= vma
->vm_page_prot
)
448 start
= vma
->vm_end
+ 1;
449 } while (vma
->vm_end
< end
);
451 /* Skip cache management to enhance performances for non-cached or
452 * write-combining buffers.
454 if (vm_page_prot
== pgprot_noncached(vm_page_prot
) ||
455 vm_page_prot
== pgprot_writecombine(vm_page_prot
))
456 buf
->skip_cache
= true;
461 up_read(¤t
->mm
->mmap_sem
);
466 * isp_video_buffer_prepare - Make a buffer ready for operation
468 * Preparing a buffer involves:
470 * - validating VMAs (userspace buffers only)
471 * - locking pages and VMAs into memory (userspace buffers only)
472 * - building page and scatter-gather lists
473 * - mapping buffers for DMA operation
474 * - performing driver-specific preparation
476 * The function must be called in userspace context with a valid mm context
477 * (this excludes cleanup paths such as sys_close when the userspace process
480 static int isp_video_buffer_prepare(struct isp_video_buffer
*buf
)
482 enum dma_data_direction direction
;
485 switch (buf
->vbuf
.memory
) {
486 case V4L2_MEMORY_MMAP
:
487 ret
= isp_video_buffer_sglist_kernel(buf
);
490 case V4L2_MEMORY_USERPTR
:
491 ret
= isp_video_buffer_prepare_vm_flags(buf
);
495 if (buf
->vm_flags
& VM_PFNMAP
) {
496 ret
= isp_video_buffer_prepare_pfnmap(buf
);
500 ret
= isp_video_buffer_sglist_pfnmap(buf
);
502 ret
= isp_video_buffer_prepare_user(buf
);
506 ret
= isp_video_buffer_sglist_user(buf
);
517 if (!(buf
->vm_flags
& VM_PFNMAP
)) {
518 direction
= buf
->vbuf
.type
== V4L2_BUF_TYPE_VIDEO_CAPTURE
519 ? DMA_FROM_DEVICE
: DMA_TO_DEVICE
;
520 ret
= dma_map_sg(buf
->queue
->dev
, buf
->sglist
, buf
->sglen
,
522 if (ret
!= buf
->sglen
) {
528 if (buf
->queue
->ops
->buffer_prepare
)
529 ret
= buf
->queue
->ops
->buffer_prepare(buf
);
533 isp_video_buffer_cleanup(buf
);
541 * isp_video_queue_query - Query the status of a given buffer
543 * Locking: must be called with the queue lock held.
545 static void isp_video_buffer_query(struct isp_video_buffer
*buf
,
546 struct v4l2_buffer
*vbuf
)
548 memcpy(vbuf
, &buf
->vbuf
, sizeof(*vbuf
));
550 if (buf
->vma_use_count
)
551 vbuf
->flags
|= V4L2_BUF_FLAG_MAPPED
;
553 switch (buf
->state
) {
554 case ISP_BUF_STATE_ERROR
:
555 vbuf
->flags
|= V4L2_BUF_FLAG_ERROR
;
556 case ISP_BUF_STATE_DONE
:
557 vbuf
->flags
|= V4L2_BUF_FLAG_DONE
;
558 case ISP_BUF_STATE_QUEUED
:
559 case ISP_BUF_STATE_ACTIVE
:
560 vbuf
->flags
|= V4L2_BUF_FLAG_QUEUED
;
562 case ISP_BUF_STATE_IDLE
:
569 * isp_video_buffer_wait - Wait for a buffer to be ready
571 * In non-blocking mode, return immediately with 0 if the buffer is ready or
572 * -EAGAIN if the buffer is in the QUEUED or ACTIVE state.
574 * In blocking mode, wait (interruptibly but with no timeout) on the buffer wait
575 * queue using the same condition.
577 static int isp_video_buffer_wait(struct isp_video_buffer
*buf
, int nonblocking
)
580 return (buf
->state
!= ISP_BUF_STATE_QUEUED
&&
581 buf
->state
!= ISP_BUF_STATE_ACTIVE
)
585 return wait_event_interruptible(buf
->wait
,
586 buf
->state
!= ISP_BUF_STATE_QUEUED
&&
587 buf
->state
!= ISP_BUF_STATE_ACTIVE
);
590 /* -----------------------------------------------------------------------------
595 * isp_video_queue_free - Free video buffers memory
597 * Buffers can only be freed if the queue isn't streaming and if no buffer is
598 * mapped to userspace. Return -EBUSY if those conditions aren't statisfied.
600 * This function must be called with the queue lock held.
602 static int isp_video_queue_free(struct isp_video_queue
*queue
)
606 if (queue
->streaming
)
609 for (i
= 0; i
< queue
->count
; ++i
) {
610 if (queue
->buffers
[i
]->vma_use_count
!= 0)
614 for (i
= 0; i
< queue
->count
; ++i
) {
615 struct isp_video_buffer
*buf
= queue
->buffers
[i
];
617 isp_video_buffer_cleanup(buf
);
623 queue
->buffers
[i
] = NULL
;
626 INIT_LIST_HEAD(&queue
->queue
);
632 * isp_video_queue_alloc - Allocate video buffers memory
634 * This function must be called with the queue lock held.
636 static int isp_video_queue_alloc(struct isp_video_queue
*queue
,
637 unsigned int nbuffers
,
638 unsigned int size
, enum v4l2_memory memory
)
640 struct isp_video_buffer
*buf
;
645 /* Start by freeing the buffers. */
646 ret
= isp_video_queue_free(queue
);
650 /* Bail out of no buffers should be allocated. */
654 /* Initialize the allocated buffers. */
655 for (i
= 0; i
< nbuffers
; ++i
) {
656 buf
= kzalloc(queue
->bufsize
, GFP_KERNEL
);
660 if (memory
== V4L2_MEMORY_MMAP
) {
661 /* Allocate video buffers memory for mmap mode. Align
662 * the size to the page size.
664 mem
= vmalloc_32_user(PAGE_ALIGN(size
));
670 buf
->vbuf
.m
.offset
= i
* PAGE_ALIGN(size
);
675 buf
->vbuf
.length
= size
;
676 buf
->vbuf
.type
= queue
->type
;
677 buf
->vbuf
.field
= V4L2_FIELD_NONE
;
678 buf
->vbuf
.memory
= memory
;
681 init_waitqueue_head(&buf
->wait
);
683 queue
->buffers
[i
] = buf
;
694 * omap3isp_video_queue_cleanup - Clean up the video buffers queue
695 * @queue: Video buffers queue
697 * Free all allocated resources and clean up the video buffers queue. The queue
698 * must not be busy (no ongoing video stream) and buffers must have been
701 * Return 0 on success or -EBUSY if the queue is busy or buffers haven't been
704 int omap3isp_video_queue_cleanup(struct isp_video_queue
*queue
)
706 return isp_video_queue_free(queue
);
710 * omap3isp_video_queue_init - Initialize the video buffers queue
711 * @queue: Video buffers queue
712 * @type: V4L2 buffer type (capture or output)
713 * @ops: Driver-specific queue operations
714 * @dev: Device used for DMA operations
715 * @bufsize: Size of the driver-specific buffer structure
717 * Initialize the video buffers queue with the supplied parameters.
719 * The queue type must be one of V4L2_BUF_TYPE_VIDEO_CAPTURE or
720 * V4L2_BUF_TYPE_VIDEO_OUTPUT. Other buffer types are not supported yet.
722 * Buffer objects will be allocated using the given buffer size to allow room
723 * for driver-specific fields. Driver-specific buffer structures must start
724 * with a struct isp_video_buffer field. Drivers with no driver-specific buffer
725 * structure must pass the size of the isp_video_buffer structure in the bufsize
728 * Return 0 on success.
730 int omap3isp_video_queue_init(struct isp_video_queue
*queue
,
731 enum v4l2_buf_type type
,
732 const struct isp_video_queue_operations
*ops
,
733 struct device
*dev
, unsigned int bufsize
)
735 INIT_LIST_HEAD(&queue
->queue
);
736 mutex_init(&queue
->lock
);
737 spin_lock_init(&queue
->irqlock
);
742 queue
->bufsize
= bufsize
;
747 /* -----------------------------------------------------------------------------
752 * omap3isp_video_queue_reqbufs - Allocate video buffers memory
754 * This function is intended to be used as a VIDIOC_REQBUFS ioctl handler. It
755 * allocated video buffer objects and, for MMAP buffers, buffer memory.
757 * If the number of buffers is 0, all buffers are freed and the function returns
758 * without performing any allocation.
760 * If the number of buffers is not 0, currently allocated buffers (if any) are
761 * freed and the requested number of buffers are allocated. Depending on
762 * driver-specific requirements and on memory availability, a number of buffer
763 * smaller or bigger than requested can be allocated. This isn't considered as
766 * Return 0 on success or one of the following error codes:
768 * -EINVAL if the buffer type or index are invalid
769 * -EBUSY if the queue is busy (streaming or buffers mapped)
770 * -ENOMEM if the buffers can't be allocated due to an out-of-memory condition
772 int omap3isp_video_queue_reqbufs(struct isp_video_queue
*queue
,
773 struct v4l2_requestbuffers
*rb
)
775 unsigned int nbuffers
= rb
->count
;
779 if (rb
->type
!= queue
->type
)
782 queue
->ops
->queue_prepare(queue
, &nbuffers
, &size
);
786 nbuffers
= min_t(unsigned int, nbuffers
, ISP_VIDEO_MAX_BUFFERS
);
788 mutex_lock(&queue
->lock
);
790 ret
= isp_video_queue_alloc(queue
, nbuffers
, size
, rb
->memory
);
798 mutex_unlock(&queue
->lock
);
803 * omap3isp_video_queue_querybuf - Query the status of a buffer in a queue
805 * This function is intended to be used as a VIDIOC_QUERYBUF ioctl handler. It
806 * returns the status of a given video buffer.
808 * Return 0 on success or -EINVAL if the buffer type or index are invalid.
810 int omap3isp_video_queue_querybuf(struct isp_video_queue
*queue
,
811 struct v4l2_buffer
*vbuf
)
813 struct isp_video_buffer
*buf
;
816 if (vbuf
->type
!= queue
->type
)
819 mutex_lock(&queue
->lock
);
821 if (vbuf
->index
>= queue
->count
) {
826 buf
= queue
->buffers
[vbuf
->index
];
827 isp_video_buffer_query(buf
, vbuf
);
830 mutex_unlock(&queue
->lock
);
835 * omap3isp_video_queue_qbuf - Queue a buffer
837 * This function is intended to be used as a VIDIOC_QBUF ioctl handler.
839 * The v4l2_buffer structure passed from userspace is first sanity tested. If
840 * sane, the buffer is then processed and added to the main queue and, if the
841 * queue is streaming, to the IRQ queue.
843 * Before being enqueued, USERPTR buffers are checked for address changes. If
844 * the buffer has a different userspace address, the old memory area is unlocked
845 * and the new memory area is locked.
847 int omap3isp_video_queue_qbuf(struct isp_video_queue
*queue
,
848 struct v4l2_buffer
*vbuf
)
850 struct isp_video_buffer
*buf
;
854 if (vbuf
->type
!= queue
->type
)
857 mutex_lock(&queue
->lock
);
859 if (vbuf
->index
>= queue
->count
)
862 buf
= queue
->buffers
[vbuf
->index
];
864 if (vbuf
->memory
!= buf
->vbuf
.memory
)
867 if (buf
->state
!= ISP_BUF_STATE_IDLE
)
870 if (vbuf
->memory
== V4L2_MEMORY_USERPTR
&&
871 vbuf
->length
< buf
->vbuf
.length
)
874 if (vbuf
->memory
== V4L2_MEMORY_USERPTR
&&
875 vbuf
->m
.userptr
!= buf
->vbuf
.m
.userptr
) {
876 isp_video_buffer_cleanup(buf
);
877 buf
->vbuf
.m
.userptr
= vbuf
->m
.userptr
;
881 if (!buf
->prepared
) {
882 ret
= isp_video_buffer_prepare(buf
);
888 isp_video_buffer_cache_sync(buf
);
890 buf
->state
= ISP_BUF_STATE_QUEUED
;
891 list_add_tail(&buf
->stream
, &queue
->queue
);
893 if (queue
->streaming
) {
894 spin_lock_irqsave(&queue
->irqlock
, flags
);
895 queue
->ops
->buffer_queue(buf
);
896 spin_unlock_irqrestore(&queue
->irqlock
, flags
);
902 mutex_unlock(&queue
->lock
);
907 * omap3isp_video_queue_dqbuf - Dequeue a buffer
909 * This function is intended to be used as a VIDIOC_DQBUF ioctl handler.
911 * The v4l2_buffer structure passed from userspace is first sanity tested. If
912 * sane, the buffer is then processed and added to the main queue and, if the
913 * queue is streaming, to the IRQ queue.
915 * Before being enqueued, USERPTR buffers are checked for address changes. If
916 * the buffer has a different userspace address, the old memory area is unlocked
917 * and the new memory area is locked.
919 int omap3isp_video_queue_dqbuf(struct isp_video_queue
*queue
,
920 struct v4l2_buffer
*vbuf
, int nonblocking
)
922 struct isp_video_buffer
*buf
;
925 if (vbuf
->type
!= queue
->type
)
928 mutex_lock(&queue
->lock
);
930 if (list_empty(&queue
->queue
)) {
935 buf
= list_first_entry(&queue
->queue
, struct isp_video_buffer
, stream
);
936 ret
= isp_video_buffer_wait(buf
, nonblocking
);
940 list_del(&buf
->stream
);
942 isp_video_buffer_query(buf
, vbuf
);
943 buf
->state
= ISP_BUF_STATE_IDLE
;
944 vbuf
->flags
&= ~V4L2_BUF_FLAG_QUEUED
;
947 mutex_unlock(&queue
->lock
);
952 * omap3isp_video_queue_streamon - Start streaming
954 * This function is intended to be used as a VIDIOC_STREAMON ioctl handler. It
955 * starts streaming on the queue and calls the buffer_queue operation for all
958 * Return 0 on success.
960 int omap3isp_video_queue_streamon(struct isp_video_queue
*queue
)
962 struct isp_video_buffer
*buf
;
965 mutex_lock(&queue
->lock
);
967 if (queue
->streaming
)
970 queue
->streaming
= 1;
972 spin_lock_irqsave(&queue
->irqlock
, flags
);
973 list_for_each_entry(buf
, &queue
->queue
, stream
)
974 queue
->ops
->buffer_queue(buf
);
975 spin_unlock_irqrestore(&queue
->irqlock
, flags
);
978 mutex_unlock(&queue
->lock
);
983 * omap3isp_video_queue_streamoff - Stop streaming
985 * This function is intended to be used as a VIDIOC_STREAMOFF ioctl handler. It
986 * stops streaming on the queue and wakes up all the buffers.
988 * Drivers must stop the hardware and synchronize with interrupt handlers and/or
989 * delayed works before calling this function to make sure no buffer will be
990 * touched by the driver and/or hardware.
992 void omap3isp_video_queue_streamoff(struct isp_video_queue
*queue
)
994 struct isp_video_buffer
*buf
;
998 mutex_lock(&queue
->lock
);
1000 if (!queue
->streaming
)
1003 queue
->streaming
= 0;
1005 spin_lock_irqsave(&queue
->irqlock
, flags
);
1006 for (i
= 0; i
< queue
->count
; ++i
) {
1007 buf
= queue
->buffers
[i
];
1009 if (buf
->state
== ISP_BUF_STATE_ACTIVE
)
1010 wake_up(&buf
->wait
);
1012 buf
->state
= ISP_BUF_STATE_IDLE
;
1014 spin_unlock_irqrestore(&queue
->irqlock
, flags
);
1016 INIT_LIST_HEAD(&queue
->queue
);
1019 mutex_unlock(&queue
->lock
);
1023 * omap3isp_video_queue_discard_done - Discard all buffers marked as DONE
1025 * This function is intended to be used with suspend/resume operations. It
1026 * discards all 'done' buffers as they would be too old to be requested after
1029 * Drivers must stop the hardware and synchronize with interrupt handlers and/or
1030 * delayed works before calling this function to make sure no buffer will be
1031 * touched by the driver and/or hardware.
1033 void omap3isp_video_queue_discard_done(struct isp_video_queue
*queue
)
1035 struct isp_video_buffer
*buf
;
1038 mutex_lock(&queue
->lock
);
1040 if (!queue
->streaming
)
1043 for (i
= 0; i
< queue
->count
; ++i
) {
1044 buf
= queue
->buffers
[i
];
1046 if (buf
->state
== ISP_BUF_STATE_DONE
)
1047 buf
->state
= ISP_BUF_STATE_ERROR
;
1051 mutex_unlock(&queue
->lock
);
1054 static void isp_video_queue_vm_open(struct vm_area_struct
*vma
)
1056 struct isp_video_buffer
*buf
= vma
->vm_private_data
;
1058 buf
->vma_use_count
++;
1061 static void isp_video_queue_vm_close(struct vm_area_struct
*vma
)
1063 struct isp_video_buffer
*buf
= vma
->vm_private_data
;
1065 buf
->vma_use_count
--;
1068 static const struct vm_operations_struct isp_video_queue_vm_ops
= {
1069 .open
= isp_video_queue_vm_open
,
1070 .close
= isp_video_queue_vm_close
,
1074 * omap3isp_video_queue_mmap - Map buffers to userspace
1076 * This function is intended to be used as an mmap() file operation handler. It
1077 * maps a buffer to userspace based on the VMA offset.
1079 * Only buffers of memory type MMAP are supported.
1081 int omap3isp_video_queue_mmap(struct isp_video_queue
*queue
,
1082 struct vm_area_struct
*vma
)
1084 struct isp_video_buffer
*uninitialized_var(buf
);
1089 mutex_lock(&queue
->lock
);
1091 for (i
= 0; i
< queue
->count
; ++i
) {
1092 buf
= queue
->buffers
[i
];
1093 if ((buf
->vbuf
.m
.offset
>> PAGE_SHIFT
) == vma
->vm_pgoff
)
1097 if (i
== queue
->count
) {
1102 size
= vma
->vm_end
- vma
->vm_start
;
1104 if (buf
->vbuf
.memory
!= V4L2_MEMORY_MMAP
||
1105 size
!= PAGE_ALIGN(buf
->vbuf
.length
)) {
1110 ret
= remap_vmalloc_range(vma
, buf
->vaddr
, 0);
1114 vma
->vm_ops
= &isp_video_queue_vm_ops
;
1115 vma
->vm_private_data
= buf
;
1116 isp_video_queue_vm_open(vma
);
1119 mutex_unlock(&queue
->lock
);
1124 * omap3isp_video_queue_poll - Poll video queue state
1126 * This function is intended to be used as a poll() file operation handler. It
1127 * polls the state of the video buffer at the front of the queue and returns an
1130 * If no buffer is present at the front of the queue, POLLERR is returned.
1132 unsigned int omap3isp_video_queue_poll(struct isp_video_queue
*queue
,
1133 struct file
*file
, poll_table
*wait
)
1135 struct isp_video_buffer
*buf
;
1136 unsigned int mask
= 0;
1138 mutex_lock(&queue
->lock
);
1139 if (list_empty(&queue
->queue
)) {
1143 buf
= list_first_entry(&queue
->queue
, struct isp_video_buffer
, stream
);
1145 poll_wait(file
, &buf
->wait
, wait
);
1146 if (buf
->state
== ISP_BUF_STATE_DONE
||
1147 buf
->state
== ISP_BUF_STATE_ERROR
) {
1148 if (queue
->type
== V4L2_BUF_TYPE_VIDEO_CAPTURE
)
1149 mask
|= POLLIN
| POLLRDNORM
;
1151 mask
|= POLLOUT
| POLLWRNORM
;
1155 mutex_unlock(&queue
->lock
);