Merge branches 'timers-core-for-linus' and 'timers-urgent-for-linus' of git://git...
[linux/fpc-iii.git] / drivers / media / platform / omap3isp / ispvideo.c
blobf4f591652432df898ea924a1a8f2585b08785a1f
1 /*
2 * ispvideo.c
4 * TI OMAP3 ISP - Generic video node
6 * Copyright (C) 2009-2010 Nokia Corporation
8 * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
9 * Sakari Ailus <sakari.ailus@iki.fi>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
16 #include <asm/cacheflush.h>
17 #include <linux/clk.h>
18 #include <linux/mm.h>
19 #include <linux/module.h>
20 #include <linux/pagemap.h>
21 #include <linux/scatterlist.h>
22 #include <linux/sched.h>
23 #include <linux/slab.h>
24 #include <linux/vmalloc.h>
25 #include <media/v4l2-dev.h>
26 #include <media/v4l2-ioctl.h>
27 #include <media/videobuf2-dma-contig.h>
29 #include "ispvideo.h"
30 #include "isp.h"
33 /* -----------------------------------------------------------------------------
34 * Helper functions
38 * NOTE: When adding new media bus codes, always remember to add
39 * corresponding in-memory formats to the table below!!!
41 static struct isp_format_info formats[] = {
42 { MEDIA_BUS_FMT_Y8_1X8, MEDIA_BUS_FMT_Y8_1X8,
43 MEDIA_BUS_FMT_Y8_1X8, MEDIA_BUS_FMT_Y8_1X8,
44 V4L2_PIX_FMT_GREY, 8, 1, },
45 { MEDIA_BUS_FMT_Y10_1X10, MEDIA_BUS_FMT_Y10_1X10,
46 MEDIA_BUS_FMT_Y10_1X10, MEDIA_BUS_FMT_Y8_1X8,
47 V4L2_PIX_FMT_Y10, 10, 2, },
48 { MEDIA_BUS_FMT_Y12_1X12, MEDIA_BUS_FMT_Y10_1X10,
49 MEDIA_BUS_FMT_Y12_1X12, MEDIA_BUS_FMT_Y8_1X8,
50 V4L2_PIX_FMT_Y12, 12, 2, },
51 { MEDIA_BUS_FMT_SBGGR8_1X8, MEDIA_BUS_FMT_SBGGR8_1X8,
52 MEDIA_BUS_FMT_SBGGR8_1X8, MEDIA_BUS_FMT_SBGGR8_1X8,
53 V4L2_PIX_FMT_SBGGR8, 8, 1, },
54 { MEDIA_BUS_FMT_SGBRG8_1X8, MEDIA_BUS_FMT_SGBRG8_1X8,
55 MEDIA_BUS_FMT_SGBRG8_1X8, MEDIA_BUS_FMT_SGBRG8_1X8,
56 V4L2_PIX_FMT_SGBRG8, 8, 1, },
57 { MEDIA_BUS_FMT_SGRBG8_1X8, MEDIA_BUS_FMT_SGRBG8_1X8,
58 MEDIA_BUS_FMT_SGRBG8_1X8, MEDIA_BUS_FMT_SGRBG8_1X8,
59 V4L2_PIX_FMT_SGRBG8, 8, 1, },
60 { MEDIA_BUS_FMT_SRGGB8_1X8, MEDIA_BUS_FMT_SRGGB8_1X8,
61 MEDIA_BUS_FMT_SRGGB8_1X8, MEDIA_BUS_FMT_SRGGB8_1X8,
62 V4L2_PIX_FMT_SRGGB8, 8, 1, },
63 { MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8, MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8,
64 MEDIA_BUS_FMT_SBGGR10_1X10, 0,
65 V4L2_PIX_FMT_SBGGR10DPCM8, 8, 1, },
66 { MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8, MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8,
67 MEDIA_BUS_FMT_SGBRG10_1X10, 0,
68 V4L2_PIX_FMT_SGBRG10DPCM8, 8, 1, },
69 { MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8, MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8,
70 MEDIA_BUS_FMT_SGRBG10_1X10, 0,
71 V4L2_PIX_FMT_SGRBG10DPCM8, 8, 1, },
72 { MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8, MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8,
73 MEDIA_BUS_FMT_SRGGB10_1X10, 0,
74 V4L2_PIX_FMT_SRGGB10DPCM8, 8, 1, },
75 { MEDIA_BUS_FMT_SBGGR10_1X10, MEDIA_BUS_FMT_SBGGR10_1X10,
76 MEDIA_BUS_FMT_SBGGR10_1X10, MEDIA_BUS_FMT_SBGGR8_1X8,
77 V4L2_PIX_FMT_SBGGR10, 10, 2, },
78 { MEDIA_BUS_FMT_SGBRG10_1X10, MEDIA_BUS_FMT_SGBRG10_1X10,
79 MEDIA_BUS_FMT_SGBRG10_1X10, MEDIA_BUS_FMT_SGBRG8_1X8,
80 V4L2_PIX_FMT_SGBRG10, 10, 2, },
81 { MEDIA_BUS_FMT_SGRBG10_1X10, MEDIA_BUS_FMT_SGRBG10_1X10,
82 MEDIA_BUS_FMT_SGRBG10_1X10, MEDIA_BUS_FMT_SGRBG8_1X8,
83 V4L2_PIX_FMT_SGRBG10, 10, 2, },
84 { MEDIA_BUS_FMT_SRGGB10_1X10, MEDIA_BUS_FMT_SRGGB10_1X10,
85 MEDIA_BUS_FMT_SRGGB10_1X10, MEDIA_BUS_FMT_SRGGB8_1X8,
86 V4L2_PIX_FMT_SRGGB10, 10, 2, },
87 { MEDIA_BUS_FMT_SBGGR12_1X12, MEDIA_BUS_FMT_SBGGR10_1X10,
88 MEDIA_BUS_FMT_SBGGR12_1X12, MEDIA_BUS_FMT_SBGGR8_1X8,
89 V4L2_PIX_FMT_SBGGR12, 12, 2, },
90 { MEDIA_BUS_FMT_SGBRG12_1X12, MEDIA_BUS_FMT_SGBRG10_1X10,
91 MEDIA_BUS_FMT_SGBRG12_1X12, MEDIA_BUS_FMT_SGBRG8_1X8,
92 V4L2_PIX_FMT_SGBRG12, 12, 2, },
93 { MEDIA_BUS_FMT_SGRBG12_1X12, MEDIA_BUS_FMT_SGRBG10_1X10,
94 MEDIA_BUS_FMT_SGRBG12_1X12, MEDIA_BUS_FMT_SGRBG8_1X8,
95 V4L2_PIX_FMT_SGRBG12, 12, 2, },
96 { MEDIA_BUS_FMT_SRGGB12_1X12, MEDIA_BUS_FMT_SRGGB10_1X10,
97 MEDIA_BUS_FMT_SRGGB12_1X12, MEDIA_BUS_FMT_SRGGB8_1X8,
98 V4L2_PIX_FMT_SRGGB12, 12, 2, },
99 { MEDIA_BUS_FMT_UYVY8_1X16, MEDIA_BUS_FMT_UYVY8_1X16,
100 MEDIA_BUS_FMT_UYVY8_1X16, 0,
101 V4L2_PIX_FMT_UYVY, 16, 2, },
102 { MEDIA_BUS_FMT_YUYV8_1X16, MEDIA_BUS_FMT_YUYV8_1X16,
103 MEDIA_BUS_FMT_YUYV8_1X16, 0,
104 V4L2_PIX_FMT_YUYV, 16, 2, },
105 { MEDIA_BUS_FMT_UYVY8_2X8, MEDIA_BUS_FMT_UYVY8_2X8,
106 MEDIA_BUS_FMT_UYVY8_2X8, 0,
107 V4L2_PIX_FMT_UYVY, 8, 2, },
108 { MEDIA_BUS_FMT_YUYV8_2X8, MEDIA_BUS_FMT_YUYV8_2X8,
109 MEDIA_BUS_FMT_YUYV8_2X8, 0,
110 V4L2_PIX_FMT_YUYV, 8, 2, },
111 /* Empty entry to catch the unsupported pixel code (0) used by the CCDC
112 * module and avoid NULL pointer dereferences.
114 { 0, }
117 const struct isp_format_info *omap3isp_video_format_info(u32 code)
119 unsigned int i;
121 for (i = 0; i < ARRAY_SIZE(formats); ++i) {
122 if (formats[i].code == code)
123 return &formats[i];
126 return NULL;
130 * isp_video_mbus_to_pix - Convert v4l2_mbus_framefmt to v4l2_pix_format
131 * @video: ISP video instance
132 * @mbus: v4l2_mbus_framefmt format (input)
133 * @pix: v4l2_pix_format format (output)
135 * Fill the output pix structure with information from the input mbus format.
136 * The bytesperline and sizeimage fields are computed from the requested bytes
137 * per line value in the pix format and information from the video instance.
139 * Return the number of padding bytes at end of line.
141 static unsigned int isp_video_mbus_to_pix(const struct isp_video *video,
142 const struct v4l2_mbus_framefmt *mbus,
143 struct v4l2_pix_format *pix)
145 unsigned int bpl = pix->bytesperline;
146 unsigned int min_bpl;
147 unsigned int i;
149 memset(pix, 0, sizeof(*pix));
150 pix->width = mbus->width;
151 pix->height = mbus->height;
153 for (i = 0; i < ARRAY_SIZE(formats); ++i) {
154 if (formats[i].code == mbus->code)
155 break;
158 if (WARN_ON(i == ARRAY_SIZE(formats)))
159 return 0;
161 min_bpl = pix->width * formats[i].bpp;
163 /* Clamp the requested bytes per line value. If the maximum bytes per
164 * line value is zero, the module doesn't support user configurable line
165 * sizes. Override the requested value with the minimum in that case.
167 if (video->bpl_max)
168 bpl = clamp(bpl, min_bpl, video->bpl_max);
169 else
170 bpl = min_bpl;
172 if (!video->bpl_zero_padding || bpl != min_bpl)
173 bpl = ALIGN(bpl, video->bpl_alignment);
175 pix->pixelformat = formats[i].pixelformat;
176 pix->bytesperline = bpl;
177 pix->sizeimage = pix->bytesperline * pix->height;
178 pix->colorspace = mbus->colorspace;
179 pix->field = mbus->field;
181 return bpl - min_bpl;
184 static void isp_video_pix_to_mbus(const struct v4l2_pix_format *pix,
185 struct v4l2_mbus_framefmt *mbus)
187 unsigned int i;
189 memset(mbus, 0, sizeof(*mbus));
190 mbus->width = pix->width;
191 mbus->height = pix->height;
193 /* Skip the last format in the loop so that it will be selected if no
194 * match is found.
196 for (i = 0; i < ARRAY_SIZE(formats) - 1; ++i) {
197 if (formats[i].pixelformat == pix->pixelformat)
198 break;
201 mbus->code = formats[i].code;
202 mbus->colorspace = pix->colorspace;
203 mbus->field = pix->field;
206 static struct v4l2_subdev *
207 isp_video_remote_subdev(struct isp_video *video, u32 *pad)
209 struct media_pad *remote;
211 remote = media_entity_remote_pad(&video->pad);
213 if (remote == NULL ||
214 media_entity_type(remote->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
215 return NULL;
217 if (pad)
218 *pad = remote->index;
220 return media_entity_to_v4l2_subdev(remote->entity);
223 /* Return a pointer to the ISP video instance at the far end of the pipeline. */
224 static int isp_video_get_graph_data(struct isp_video *video,
225 struct isp_pipeline *pipe)
227 struct media_entity_graph graph;
228 struct media_entity *entity = &video->video.entity;
229 struct media_device *mdev = entity->parent;
230 struct isp_video *far_end = NULL;
232 mutex_lock(&mdev->graph_mutex);
233 media_entity_graph_walk_start(&graph, entity);
235 while ((entity = media_entity_graph_walk_next(&graph))) {
236 struct isp_video *__video;
238 pipe->entities |= 1 << entity->id;
240 if (far_end != NULL)
241 continue;
243 if (entity == &video->video.entity)
244 continue;
246 if (media_entity_type(entity) != MEDIA_ENT_T_DEVNODE)
247 continue;
249 __video = to_isp_video(media_entity_to_video_device(entity));
250 if (__video->type != video->type)
251 far_end = __video;
254 mutex_unlock(&mdev->graph_mutex);
256 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
257 pipe->input = far_end;
258 pipe->output = video;
259 } else {
260 if (far_end == NULL)
261 return -EPIPE;
263 pipe->input = video;
264 pipe->output = far_end;
267 return 0;
270 static int
271 __isp_video_get_format(struct isp_video *video, struct v4l2_format *format)
273 struct v4l2_subdev_format fmt;
274 struct v4l2_subdev *subdev;
275 u32 pad;
276 int ret;
278 subdev = isp_video_remote_subdev(video, &pad);
279 if (subdev == NULL)
280 return -EINVAL;
282 fmt.pad = pad;
283 fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
285 mutex_lock(&video->mutex);
286 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
287 mutex_unlock(&video->mutex);
289 if (ret)
290 return ret;
292 format->type = video->type;
293 return isp_video_mbus_to_pix(video, &fmt.format, &format->fmt.pix);
296 static int
297 isp_video_check_format(struct isp_video *video, struct isp_video_fh *vfh)
299 struct v4l2_format format;
300 int ret;
302 memcpy(&format, &vfh->format, sizeof(format));
303 ret = __isp_video_get_format(video, &format);
304 if (ret < 0)
305 return ret;
307 if (vfh->format.fmt.pix.pixelformat != format.fmt.pix.pixelformat ||
308 vfh->format.fmt.pix.height != format.fmt.pix.height ||
309 vfh->format.fmt.pix.width != format.fmt.pix.width ||
310 vfh->format.fmt.pix.bytesperline != format.fmt.pix.bytesperline ||
311 vfh->format.fmt.pix.sizeimage != format.fmt.pix.sizeimage ||
312 vfh->format.fmt.pix.field != format.fmt.pix.field)
313 return -EINVAL;
315 return 0;
318 /* -----------------------------------------------------------------------------
319 * Video queue operations
322 static int isp_video_queue_setup(struct vb2_queue *queue,
323 const void *parg,
324 unsigned int *count, unsigned int *num_planes,
325 unsigned int sizes[], void *alloc_ctxs[])
327 struct isp_video_fh *vfh = vb2_get_drv_priv(queue);
328 struct isp_video *video = vfh->video;
330 *num_planes = 1;
332 sizes[0] = vfh->format.fmt.pix.sizeimage;
333 if (sizes[0] == 0)
334 return -EINVAL;
336 alloc_ctxs[0] = video->alloc_ctx;
338 *count = min(*count, video->capture_mem / PAGE_ALIGN(sizes[0]));
340 return 0;
343 static int isp_video_buffer_prepare(struct vb2_buffer *buf)
345 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(buf);
346 struct isp_video_fh *vfh = vb2_get_drv_priv(buf->vb2_queue);
347 struct isp_buffer *buffer = to_isp_buffer(vbuf);
348 struct isp_video *video = vfh->video;
349 dma_addr_t addr;
351 /* Refuse to prepare the buffer is the video node has registered an
352 * error. We don't need to take any lock here as the operation is
353 * inherently racy. The authoritative check will be performed in the
354 * queue handler, which can't return an error, this check is just a best
355 * effort to notify userspace as early as possible.
357 if (unlikely(video->error))
358 return -EIO;
360 addr = vb2_dma_contig_plane_dma_addr(buf, 0);
361 if (!IS_ALIGNED(addr, 32)) {
362 dev_dbg(video->isp->dev,
363 "Buffer address must be aligned to 32 bytes boundary.\n");
364 return -EINVAL;
367 vb2_set_plane_payload(&buffer->vb.vb2_buf, 0,
368 vfh->format.fmt.pix.sizeimage);
369 buffer->dma = addr;
371 return 0;
375 * isp_video_buffer_queue - Add buffer to streaming queue
376 * @buf: Video buffer
378 * In memory-to-memory mode, start streaming on the pipeline if buffers are
379 * queued on both the input and the output, if the pipeline isn't already busy.
380 * If the pipeline is busy, it will be restarted in the output module interrupt
381 * handler.
383 static void isp_video_buffer_queue(struct vb2_buffer *buf)
385 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(buf);
386 struct isp_video_fh *vfh = vb2_get_drv_priv(buf->vb2_queue);
387 struct isp_buffer *buffer = to_isp_buffer(vbuf);
388 struct isp_video *video = vfh->video;
389 struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
390 enum isp_pipeline_state state;
391 unsigned long flags;
392 unsigned int empty;
393 unsigned int start;
395 spin_lock_irqsave(&video->irqlock, flags);
397 if (unlikely(video->error)) {
398 vb2_buffer_done(&buffer->vb.vb2_buf, VB2_BUF_STATE_ERROR);
399 spin_unlock_irqrestore(&video->irqlock, flags);
400 return;
403 empty = list_empty(&video->dmaqueue);
404 list_add_tail(&buffer->irqlist, &video->dmaqueue);
406 spin_unlock_irqrestore(&video->irqlock, flags);
408 if (empty) {
409 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
410 state = ISP_PIPELINE_QUEUE_OUTPUT;
411 else
412 state = ISP_PIPELINE_QUEUE_INPUT;
414 spin_lock_irqsave(&pipe->lock, flags);
415 pipe->state |= state;
416 video->ops->queue(video, buffer);
417 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_QUEUED;
419 start = isp_pipeline_ready(pipe);
420 if (start)
421 pipe->state |= ISP_PIPELINE_STREAM;
422 spin_unlock_irqrestore(&pipe->lock, flags);
424 if (start)
425 omap3isp_pipeline_set_stream(pipe,
426 ISP_PIPELINE_STREAM_SINGLESHOT);
430 static const struct vb2_ops isp_video_queue_ops = {
431 .queue_setup = isp_video_queue_setup,
432 .buf_prepare = isp_video_buffer_prepare,
433 .buf_queue = isp_video_buffer_queue,
437 * omap3isp_video_buffer_next - Complete the current buffer and return the next
438 * @video: ISP video object
440 * Remove the current video buffer from the DMA queue and fill its timestamp and
441 * field count before handing it back to videobuf2.
443 * For capture video nodes the buffer state is set to VB2_BUF_STATE_DONE if no
444 * error has been flagged in the pipeline, or to VB2_BUF_STATE_ERROR otherwise.
445 * For video output nodes the buffer state is always set to VB2_BUF_STATE_DONE.
447 * The DMA queue is expected to contain at least one buffer.
449 * Return a pointer to the next buffer in the DMA queue, or NULL if the queue is
450 * empty.
452 struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video)
454 struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
455 enum isp_pipeline_state state;
456 struct isp_buffer *buf;
457 unsigned long flags;
459 spin_lock_irqsave(&video->irqlock, flags);
460 if (WARN_ON(list_empty(&video->dmaqueue))) {
461 spin_unlock_irqrestore(&video->irqlock, flags);
462 return NULL;
465 buf = list_first_entry(&video->dmaqueue, struct isp_buffer,
466 irqlist);
467 list_del(&buf->irqlist);
468 spin_unlock_irqrestore(&video->irqlock, flags);
470 v4l2_get_timestamp(&buf->vb.timestamp);
472 /* Do frame number propagation only if this is the output video node.
473 * Frame number either comes from the CSI receivers or it gets
474 * incremented here if H3A is not active.
475 * Note: There is no guarantee that the output buffer will finish
476 * first, so the input number might lag behind by 1 in some cases.
478 if (video == pipe->output && !pipe->do_propagation)
479 buf->vb.sequence =
480 atomic_inc_return(&pipe->frame_number);
481 else
482 buf->vb.sequence = atomic_read(&pipe->frame_number);
484 if (pipe->field != V4L2_FIELD_NONE)
485 buf->vb.sequence /= 2;
487 buf->vb.field = pipe->field;
489 /* Report pipeline errors to userspace on the capture device side. */
490 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->error) {
491 state = VB2_BUF_STATE_ERROR;
492 pipe->error = false;
493 } else {
494 state = VB2_BUF_STATE_DONE;
497 vb2_buffer_done(&buf->vb.vb2_buf, state);
499 spin_lock_irqsave(&video->irqlock, flags);
501 if (list_empty(&video->dmaqueue)) {
502 spin_unlock_irqrestore(&video->irqlock, flags);
504 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
505 state = ISP_PIPELINE_QUEUE_OUTPUT
506 | ISP_PIPELINE_STREAM;
507 else
508 state = ISP_PIPELINE_QUEUE_INPUT
509 | ISP_PIPELINE_STREAM;
511 spin_lock_irqsave(&pipe->lock, flags);
512 pipe->state &= ~state;
513 if (video->pipe.stream_state == ISP_PIPELINE_STREAM_CONTINUOUS)
514 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN;
515 spin_unlock_irqrestore(&pipe->lock, flags);
516 return NULL;
519 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->input != NULL) {
520 spin_lock(&pipe->lock);
521 pipe->state &= ~ISP_PIPELINE_STREAM;
522 spin_unlock(&pipe->lock);
525 buf = list_first_entry(&video->dmaqueue, struct isp_buffer,
526 irqlist);
528 spin_unlock_irqrestore(&video->irqlock, flags);
530 return buf;
534 * omap3isp_video_cancel_stream - Cancel stream on a video node
535 * @video: ISP video object
537 * Cancelling a stream mark all buffers on the video node as erroneous and makes
538 * sure no new buffer can be queued.
540 void omap3isp_video_cancel_stream(struct isp_video *video)
542 unsigned long flags;
544 spin_lock_irqsave(&video->irqlock, flags);
546 while (!list_empty(&video->dmaqueue)) {
547 struct isp_buffer *buf;
549 buf = list_first_entry(&video->dmaqueue,
550 struct isp_buffer, irqlist);
551 list_del(&buf->irqlist);
552 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
555 video->error = true;
557 spin_unlock_irqrestore(&video->irqlock, flags);
561 * omap3isp_video_resume - Perform resume operation on the buffers
562 * @video: ISP video object
563 * @continuous: Pipeline is in single shot mode if 0 or continuous mode otherwise
565 * This function is intended to be used on suspend/resume scenario. It
566 * requests video queue layer to discard buffers marked as DONE if it's in
567 * continuous mode and requests ISP modules to queue again the ACTIVE buffer
568 * if there's any.
570 void omap3isp_video_resume(struct isp_video *video, int continuous)
572 struct isp_buffer *buf = NULL;
574 if (continuous && video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
575 mutex_lock(&video->queue_lock);
576 vb2_discard_done(video->queue);
577 mutex_unlock(&video->queue_lock);
580 if (!list_empty(&video->dmaqueue)) {
581 buf = list_first_entry(&video->dmaqueue,
582 struct isp_buffer, irqlist);
583 video->ops->queue(video, buf);
584 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_QUEUED;
585 } else {
586 if (continuous)
587 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN;
591 /* -----------------------------------------------------------------------------
592 * V4L2 ioctls
595 static int
596 isp_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
598 struct isp_video *video = video_drvdata(file);
600 strlcpy(cap->driver, ISP_VIDEO_DRIVER_NAME, sizeof(cap->driver));
601 strlcpy(cap->card, video->video.name, sizeof(cap->card));
602 strlcpy(cap->bus_info, "media", sizeof(cap->bus_info));
604 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT
605 | V4L2_CAP_STREAMING | V4L2_CAP_DEVICE_CAPS;
607 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
608 cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
609 else
610 cap->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
612 return 0;
615 static int
616 isp_video_get_format(struct file *file, void *fh, struct v4l2_format *format)
618 struct isp_video_fh *vfh = to_isp_video_fh(fh);
619 struct isp_video *video = video_drvdata(file);
621 if (format->type != video->type)
622 return -EINVAL;
624 mutex_lock(&video->mutex);
625 *format = vfh->format;
626 mutex_unlock(&video->mutex);
628 return 0;
631 static int
632 isp_video_set_format(struct file *file, void *fh, struct v4l2_format *format)
634 struct isp_video_fh *vfh = to_isp_video_fh(fh);
635 struct isp_video *video = video_drvdata(file);
636 struct v4l2_mbus_framefmt fmt;
638 if (format->type != video->type)
639 return -EINVAL;
641 /* Replace unsupported field orders with sane defaults. */
642 switch (format->fmt.pix.field) {
643 case V4L2_FIELD_NONE:
644 /* Progressive is supported everywhere. */
645 break;
646 case V4L2_FIELD_ALTERNATE:
647 /* ALTERNATE is not supported on output nodes. */
648 if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
649 format->fmt.pix.field = V4L2_FIELD_NONE;
650 break;
651 case V4L2_FIELD_INTERLACED:
652 /* The ISP has no concept of video standard, select the
653 * top-bottom order when the unqualified interlaced order is
654 * requested.
656 format->fmt.pix.field = V4L2_FIELD_INTERLACED_TB;
657 /* Fall-through */
658 case V4L2_FIELD_INTERLACED_TB:
659 case V4L2_FIELD_INTERLACED_BT:
660 /* Interlaced orders are only supported at the CCDC output. */
661 if (video != &video->isp->isp_ccdc.video_out)
662 format->fmt.pix.field = V4L2_FIELD_NONE;
663 break;
664 case V4L2_FIELD_TOP:
665 case V4L2_FIELD_BOTTOM:
666 case V4L2_FIELD_SEQ_TB:
667 case V4L2_FIELD_SEQ_BT:
668 default:
669 /* All other field orders are currently unsupported, default to
670 * progressive.
672 format->fmt.pix.field = V4L2_FIELD_NONE;
673 break;
676 /* Fill the bytesperline and sizeimage fields by converting to media bus
677 * format and back to pixel format.
679 isp_video_pix_to_mbus(&format->fmt.pix, &fmt);
680 isp_video_mbus_to_pix(video, &fmt, &format->fmt.pix);
682 mutex_lock(&video->mutex);
683 vfh->format = *format;
684 mutex_unlock(&video->mutex);
686 return 0;
689 static int
690 isp_video_try_format(struct file *file, void *fh, struct v4l2_format *format)
692 struct isp_video *video = video_drvdata(file);
693 struct v4l2_subdev_format fmt;
694 struct v4l2_subdev *subdev;
695 u32 pad;
696 int ret;
698 if (format->type != video->type)
699 return -EINVAL;
701 subdev = isp_video_remote_subdev(video, &pad);
702 if (subdev == NULL)
703 return -EINVAL;
705 isp_video_pix_to_mbus(&format->fmt.pix, &fmt.format);
707 fmt.pad = pad;
708 fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
709 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
710 if (ret)
711 return ret == -ENOIOCTLCMD ? -ENOTTY : ret;
713 isp_video_mbus_to_pix(video, &fmt.format, &format->fmt.pix);
714 return 0;
717 static int
718 isp_video_cropcap(struct file *file, void *fh, struct v4l2_cropcap *cropcap)
720 struct isp_video *video = video_drvdata(file);
721 struct v4l2_subdev *subdev;
722 int ret;
724 subdev = isp_video_remote_subdev(video, NULL);
725 if (subdev == NULL)
726 return -EINVAL;
728 mutex_lock(&video->mutex);
729 ret = v4l2_subdev_call(subdev, video, cropcap, cropcap);
730 mutex_unlock(&video->mutex);
732 return ret == -ENOIOCTLCMD ? -ENOTTY : ret;
735 static int
736 isp_video_get_crop(struct file *file, void *fh, struct v4l2_crop *crop)
738 struct isp_video *video = video_drvdata(file);
739 struct v4l2_subdev_format format;
740 struct v4l2_subdev *subdev;
741 u32 pad;
742 int ret;
744 subdev = isp_video_remote_subdev(video, &pad);
745 if (subdev == NULL)
746 return -EINVAL;
748 /* Try the get crop operation first and fallback to get format if not
749 * implemented.
751 ret = v4l2_subdev_call(subdev, video, g_crop, crop);
752 if (ret != -ENOIOCTLCMD)
753 return ret;
755 format.pad = pad;
756 format.which = V4L2_SUBDEV_FORMAT_ACTIVE;
757 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &format);
758 if (ret < 0)
759 return ret == -ENOIOCTLCMD ? -ENOTTY : ret;
761 crop->c.left = 0;
762 crop->c.top = 0;
763 crop->c.width = format.format.width;
764 crop->c.height = format.format.height;
766 return 0;
769 static int
770 isp_video_set_crop(struct file *file, void *fh, const struct v4l2_crop *crop)
772 struct isp_video *video = video_drvdata(file);
773 struct v4l2_subdev *subdev;
774 int ret;
776 subdev = isp_video_remote_subdev(video, NULL);
777 if (subdev == NULL)
778 return -EINVAL;
780 mutex_lock(&video->mutex);
781 ret = v4l2_subdev_call(subdev, video, s_crop, crop);
782 mutex_unlock(&video->mutex);
784 return ret == -ENOIOCTLCMD ? -ENOTTY : ret;
787 static int
788 isp_video_get_param(struct file *file, void *fh, struct v4l2_streamparm *a)
790 struct isp_video_fh *vfh = to_isp_video_fh(fh);
791 struct isp_video *video = video_drvdata(file);
793 if (video->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
794 video->type != a->type)
795 return -EINVAL;
797 memset(a, 0, sizeof(*a));
798 a->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
799 a->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
800 a->parm.output.timeperframe = vfh->timeperframe;
802 return 0;
805 static int
806 isp_video_set_param(struct file *file, void *fh, struct v4l2_streamparm *a)
808 struct isp_video_fh *vfh = to_isp_video_fh(fh);
809 struct isp_video *video = video_drvdata(file);
811 if (video->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
812 video->type != a->type)
813 return -EINVAL;
815 if (a->parm.output.timeperframe.denominator == 0)
816 a->parm.output.timeperframe.denominator = 1;
818 vfh->timeperframe = a->parm.output.timeperframe;
820 return 0;
823 static int
824 isp_video_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *rb)
826 struct isp_video_fh *vfh = to_isp_video_fh(fh);
827 struct isp_video *video = video_drvdata(file);
828 int ret;
830 mutex_lock(&video->queue_lock);
831 ret = vb2_reqbufs(&vfh->queue, rb);
832 mutex_unlock(&video->queue_lock);
834 return ret;
837 static int
838 isp_video_querybuf(struct file *file, void *fh, struct v4l2_buffer *b)
840 struct isp_video_fh *vfh = to_isp_video_fh(fh);
841 struct isp_video *video = video_drvdata(file);
842 int ret;
844 mutex_lock(&video->queue_lock);
845 ret = vb2_querybuf(&vfh->queue, b);
846 mutex_unlock(&video->queue_lock);
848 return ret;
851 static int
852 isp_video_qbuf(struct file *file, void *fh, struct v4l2_buffer *b)
854 struct isp_video_fh *vfh = to_isp_video_fh(fh);
855 struct isp_video *video = video_drvdata(file);
856 int ret;
858 mutex_lock(&video->queue_lock);
859 ret = vb2_qbuf(&vfh->queue, b);
860 mutex_unlock(&video->queue_lock);
862 return ret;
865 static int
866 isp_video_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b)
868 struct isp_video_fh *vfh = to_isp_video_fh(fh);
869 struct isp_video *video = video_drvdata(file);
870 int ret;
872 mutex_lock(&video->queue_lock);
873 ret = vb2_dqbuf(&vfh->queue, b, file->f_flags & O_NONBLOCK);
874 mutex_unlock(&video->queue_lock);
876 return ret;
879 static int isp_video_check_external_subdevs(struct isp_video *video,
880 struct isp_pipeline *pipe)
882 struct isp_device *isp = video->isp;
883 struct media_entity *ents[] = {
884 &isp->isp_csi2a.subdev.entity,
885 &isp->isp_csi2c.subdev.entity,
886 &isp->isp_ccp2.subdev.entity,
887 &isp->isp_ccdc.subdev.entity
889 struct media_pad *source_pad;
890 struct media_entity *source = NULL;
891 struct media_entity *sink;
892 struct v4l2_subdev_format fmt;
893 struct v4l2_ext_controls ctrls;
894 struct v4l2_ext_control ctrl;
895 unsigned int i;
896 int ret;
898 /* Memory-to-memory pipelines have no external subdev. */
899 if (pipe->input != NULL)
900 return 0;
902 for (i = 0; i < ARRAY_SIZE(ents); i++) {
903 /* Is the entity part of the pipeline? */
904 if (!(pipe->entities & (1 << ents[i]->id)))
905 continue;
907 /* ISP entities have always sink pad == 0. Find source. */
908 source_pad = media_entity_remote_pad(&ents[i]->pads[0]);
909 if (source_pad == NULL)
910 continue;
912 source = source_pad->entity;
913 sink = ents[i];
914 break;
917 if (!source) {
918 dev_warn(isp->dev, "can't find source, failing now\n");
919 return -EINVAL;
922 if (media_entity_type(source) != MEDIA_ENT_T_V4L2_SUBDEV)
923 return 0;
925 pipe->external = media_entity_to_v4l2_subdev(source);
927 fmt.pad = source_pad->index;
928 fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
929 ret = v4l2_subdev_call(media_entity_to_v4l2_subdev(sink),
930 pad, get_fmt, NULL, &fmt);
931 if (unlikely(ret < 0)) {
932 dev_warn(isp->dev, "get_fmt returned null!\n");
933 return ret;
936 pipe->external_width =
937 omap3isp_video_format_info(fmt.format.code)->width;
939 memset(&ctrls, 0, sizeof(ctrls));
940 memset(&ctrl, 0, sizeof(ctrl));
942 ctrl.id = V4L2_CID_PIXEL_RATE;
944 ctrls.count = 1;
945 ctrls.controls = &ctrl;
947 ret = v4l2_g_ext_ctrls(pipe->external->ctrl_handler, &ctrls);
948 if (ret < 0) {
949 dev_warn(isp->dev, "no pixel rate control in subdev %s\n",
950 pipe->external->name);
951 return ret;
954 pipe->external_rate = ctrl.value64;
956 if (pipe->entities & (1 << isp->isp_ccdc.subdev.entity.id)) {
957 unsigned int rate = UINT_MAX;
959 * Check that maximum allowed CCDC pixel rate isn't
960 * exceeded by the pixel rate.
962 omap3isp_ccdc_max_rate(&isp->isp_ccdc, &rate);
963 if (pipe->external_rate > rate)
964 return -ENOSPC;
967 return 0;
971 * Stream management
973 * Every ISP pipeline has a single input and a single output. The input can be
974 * either a sensor or a video node. The output is always a video node.
976 * As every pipeline has an output video node, the ISP video objects at the
977 * pipeline output stores the pipeline state. It tracks the streaming state of
978 * both the input and output, as well as the availability of buffers.
980 * In sensor-to-memory mode, frames are always available at the pipeline input.
981 * Starting the sensor usually requires I2C transfers and must be done in
982 * interruptible context. The pipeline is started and stopped synchronously
983 * to the stream on/off commands. All modules in the pipeline will get their
984 * subdev set stream handler called. The module at the end of the pipeline must
985 * delay starting the hardware until buffers are available at its output.
987 * In memory-to-memory mode, starting/stopping the stream requires
988 * synchronization between the input and output. ISP modules can't be stopped
989 * in the middle of a frame, and at least some of the modules seem to become
990 * busy as soon as they're started, even if they don't receive a frame start
991 * event. For that reason frames need to be processed in single-shot mode. The
992 * driver needs to wait until a frame is completely processed and written to
993 * memory before restarting the pipeline for the next frame. Pipelined
994 * processing might be possible but requires more testing.
996 * Stream start must be delayed until buffers are available at both the input
997 * and output. The pipeline must be started in the videobuf queue callback with
998 * the buffers queue spinlock held. The modules subdev set stream operation must
999 * not sleep.
1001 static int
1002 isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
1004 struct isp_video_fh *vfh = to_isp_video_fh(fh);
1005 struct isp_video *video = video_drvdata(file);
1006 enum isp_pipeline_state state;
1007 struct isp_pipeline *pipe;
1008 unsigned long flags;
1009 int ret;
1011 if (type != video->type)
1012 return -EINVAL;
1014 mutex_lock(&video->stream_lock);
1016 /* Start streaming on the pipeline. No link touching an entity in the
1017 * pipeline can be activated or deactivated once streaming is started.
1019 pipe = video->video.entity.pipe
1020 ? to_isp_pipeline(&video->video.entity) : &video->pipe;
1022 pipe->entities = 0;
1024 /* TODO: Implement PM QoS */
1025 pipe->l3_ick = clk_get_rate(video->isp->clock[ISP_CLK_L3_ICK]);
1026 pipe->max_rate = pipe->l3_ick;
1028 ret = media_entity_pipeline_start(&video->video.entity, &pipe->pipe);
1029 if (ret < 0)
1030 goto err_pipeline_start;
1032 /* Verify that the currently configured format matches the output of
1033 * the connected subdev.
1035 ret = isp_video_check_format(video, vfh);
1036 if (ret < 0)
1037 goto err_check_format;
1039 video->bpl_padding = ret;
1040 video->bpl_value = vfh->format.fmt.pix.bytesperline;
1042 ret = isp_video_get_graph_data(video, pipe);
1043 if (ret < 0)
1044 goto err_check_format;
1046 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
1047 state = ISP_PIPELINE_STREAM_OUTPUT | ISP_PIPELINE_IDLE_OUTPUT;
1048 else
1049 state = ISP_PIPELINE_STREAM_INPUT | ISP_PIPELINE_IDLE_INPUT;
1051 ret = isp_video_check_external_subdevs(video, pipe);
1052 if (ret < 0)
1053 goto err_check_format;
1055 pipe->error = false;
1057 spin_lock_irqsave(&pipe->lock, flags);
1058 pipe->state &= ~ISP_PIPELINE_STREAM;
1059 pipe->state |= state;
1060 spin_unlock_irqrestore(&pipe->lock, flags);
1062 /* Set the maximum time per frame as the value requested by userspace.
1063 * This is a soft limit that can be overridden if the hardware doesn't
1064 * support the request limit.
1066 if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
1067 pipe->max_timeperframe = vfh->timeperframe;
1069 video->queue = &vfh->queue;
1070 INIT_LIST_HEAD(&video->dmaqueue);
1071 atomic_set(&pipe->frame_number, -1);
1072 pipe->field = vfh->format.fmt.pix.field;
1074 mutex_lock(&video->queue_lock);
1075 ret = vb2_streamon(&vfh->queue, type);
1076 mutex_unlock(&video->queue_lock);
1077 if (ret < 0)
1078 goto err_check_format;
1080 /* In sensor-to-memory mode, the stream can be started synchronously
1081 * to the stream on command. In memory-to-memory mode, it will be
1082 * started when buffers are queued on both the input and output.
1084 if (pipe->input == NULL) {
1085 ret = omap3isp_pipeline_set_stream(pipe,
1086 ISP_PIPELINE_STREAM_CONTINUOUS);
1087 if (ret < 0)
1088 goto err_set_stream;
1089 spin_lock_irqsave(&video->irqlock, flags);
1090 if (list_empty(&video->dmaqueue))
1091 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN;
1092 spin_unlock_irqrestore(&video->irqlock, flags);
1095 mutex_unlock(&video->stream_lock);
1096 return 0;
1098 err_set_stream:
1099 mutex_lock(&video->queue_lock);
1100 vb2_streamoff(&vfh->queue, type);
1101 mutex_unlock(&video->queue_lock);
1102 err_check_format:
1103 media_entity_pipeline_stop(&video->video.entity);
1104 err_pipeline_start:
1105 /* TODO: Implement PM QoS */
1106 /* The DMA queue must be emptied here, otherwise CCDC interrupts that
1107 * will get triggered the next time the CCDC is powered up will try to
1108 * access buffers that might have been freed but still present in the
1109 * DMA queue. This can easily get triggered if the above
1110 * omap3isp_pipeline_set_stream() call fails on a system with a
1111 * free-running sensor.
1113 INIT_LIST_HEAD(&video->dmaqueue);
1114 video->queue = NULL;
1116 mutex_unlock(&video->stream_lock);
1117 return ret;
1120 static int
1121 isp_video_streamoff(struct file *file, void *fh, enum v4l2_buf_type type)
1123 struct isp_video_fh *vfh = to_isp_video_fh(fh);
1124 struct isp_video *video = video_drvdata(file);
1125 struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
1126 enum isp_pipeline_state state;
1127 unsigned int streaming;
1128 unsigned long flags;
1130 if (type != video->type)
1131 return -EINVAL;
1133 mutex_lock(&video->stream_lock);
1135 /* Make sure we're not streaming yet. */
1136 mutex_lock(&video->queue_lock);
1137 streaming = vb2_is_streaming(&vfh->queue);
1138 mutex_unlock(&video->queue_lock);
1140 if (!streaming)
1141 goto done;
1143 /* Update the pipeline state. */
1144 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
1145 state = ISP_PIPELINE_STREAM_OUTPUT
1146 | ISP_PIPELINE_QUEUE_OUTPUT;
1147 else
1148 state = ISP_PIPELINE_STREAM_INPUT
1149 | ISP_PIPELINE_QUEUE_INPUT;
1151 spin_lock_irqsave(&pipe->lock, flags);
1152 pipe->state &= ~state;
1153 spin_unlock_irqrestore(&pipe->lock, flags);
1155 /* Stop the stream. */
1156 omap3isp_pipeline_set_stream(pipe, ISP_PIPELINE_STREAM_STOPPED);
1157 omap3isp_video_cancel_stream(video);
1159 mutex_lock(&video->queue_lock);
1160 vb2_streamoff(&vfh->queue, type);
1161 mutex_unlock(&video->queue_lock);
1162 video->queue = NULL;
1163 video->error = false;
1165 /* TODO: Implement PM QoS */
1166 media_entity_pipeline_stop(&video->video.entity);
1168 done:
1169 mutex_unlock(&video->stream_lock);
1170 return 0;
1173 static int
1174 isp_video_enum_input(struct file *file, void *fh, struct v4l2_input *input)
1176 if (input->index > 0)
1177 return -EINVAL;
1179 strlcpy(input->name, "camera", sizeof(input->name));
1180 input->type = V4L2_INPUT_TYPE_CAMERA;
1182 return 0;
1185 static int
1186 isp_video_g_input(struct file *file, void *fh, unsigned int *input)
1188 *input = 0;
1190 return 0;
1193 static int
1194 isp_video_s_input(struct file *file, void *fh, unsigned int input)
1196 return input == 0 ? 0 : -EINVAL;
1199 static const struct v4l2_ioctl_ops isp_video_ioctl_ops = {
1200 .vidioc_querycap = isp_video_querycap,
1201 .vidioc_g_fmt_vid_cap = isp_video_get_format,
1202 .vidioc_s_fmt_vid_cap = isp_video_set_format,
1203 .vidioc_try_fmt_vid_cap = isp_video_try_format,
1204 .vidioc_g_fmt_vid_out = isp_video_get_format,
1205 .vidioc_s_fmt_vid_out = isp_video_set_format,
1206 .vidioc_try_fmt_vid_out = isp_video_try_format,
1207 .vidioc_cropcap = isp_video_cropcap,
1208 .vidioc_g_crop = isp_video_get_crop,
1209 .vidioc_s_crop = isp_video_set_crop,
1210 .vidioc_g_parm = isp_video_get_param,
1211 .vidioc_s_parm = isp_video_set_param,
1212 .vidioc_reqbufs = isp_video_reqbufs,
1213 .vidioc_querybuf = isp_video_querybuf,
1214 .vidioc_qbuf = isp_video_qbuf,
1215 .vidioc_dqbuf = isp_video_dqbuf,
1216 .vidioc_streamon = isp_video_streamon,
1217 .vidioc_streamoff = isp_video_streamoff,
1218 .vidioc_enum_input = isp_video_enum_input,
1219 .vidioc_g_input = isp_video_g_input,
1220 .vidioc_s_input = isp_video_s_input,
1223 /* -----------------------------------------------------------------------------
1224 * V4L2 file operations
1227 static int isp_video_open(struct file *file)
1229 struct isp_video *video = video_drvdata(file);
1230 struct isp_video_fh *handle;
1231 struct vb2_queue *queue;
1232 int ret = 0;
1234 handle = kzalloc(sizeof(*handle), GFP_KERNEL);
1235 if (handle == NULL)
1236 return -ENOMEM;
1238 v4l2_fh_init(&handle->vfh, &video->video);
1239 v4l2_fh_add(&handle->vfh);
1241 /* If this is the first user, initialise the pipeline. */
1242 if (omap3isp_get(video->isp) == NULL) {
1243 ret = -EBUSY;
1244 goto done;
1247 ret = omap3isp_pipeline_pm_use(&video->video.entity, 1);
1248 if (ret < 0) {
1249 omap3isp_put(video->isp);
1250 goto done;
1253 queue = &handle->queue;
1254 queue->type = video->type;
1255 queue->io_modes = VB2_MMAP | VB2_USERPTR;
1256 queue->drv_priv = handle;
1257 queue->ops = &isp_video_queue_ops;
1258 queue->mem_ops = &vb2_dma_contig_memops;
1259 queue->buf_struct_size = sizeof(struct isp_buffer);
1260 queue->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
1262 ret = vb2_queue_init(&handle->queue);
1263 if (ret < 0) {
1264 omap3isp_put(video->isp);
1265 goto done;
1268 memset(&handle->format, 0, sizeof(handle->format));
1269 handle->format.type = video->type;
1270 handle->timeperframe.denominator = 1;
1272 handle->video = video;
1273 file->private_data = &handle->vfh;
1275 done:
1276 if (ret < 0) {
1277 v4l2_fh_del(&handle->vfh);
1278 kfree(handle);
1281 return ret;
1284 static int isp_video_release(struct file *file)
1286 struct isp_video *video = video_drvdata(file);
1287 struct v4l2_fh *vfh = file->private_data;
1288 struct isp_video_fh *handle = to_isp_video_fh(vfh);
1290 /* Disable streaming and free the buffers queue resources. */
1291 isp_video_streamoff(file, vfh, video->type);
1293 mutex_lock(&video->queue_lock);
1294 vb2_queue_release(&handle->queue);
1295 mutex_unlock(&video->queue_lock);
1297 omap3isp_pipeline_pm_use(&video->video.entity, 0);
1299 /* Release the file handle. */
1300 v4l2_fh_del(vfh);
1301 kfree(handle);
1302 file->private_data = NULL;
1304 omap3isp_put(video->isp);
1306 return 0;
1309 static unsigned int isp_video_poll(struct file *file, poll_table *wait)
1311 struct isp_video_fh *vfh = to_isp_video_fh(file->private_data);
1312 struct isp_video *video = video_drvdata(file);
1313 int ret;
1315 mutex_lock(&video->queue_lock);
1316 ret = vb2_poll(&vfh->queue, file, wait);
1317 mutex_unlock(&video->queue_lock);
1319 return ret;
1322 static int isp_video_mmap(struct file *file, struct vm_area_struct *vma)
1324 struct isp_video_fh *vfh = to_isp_video_fh(file->private_data);
1326 return vb2_mmap(&vfh->queue, vma);
1329 static struct v4l2_file_operations isp_video_fops = {
1330 .owner = THIS_MODULE,
1331 .unlocked_ioctl = video_ioctl2,
1332 .open = isp_video_open,
1333 .release = isp_video_release,
1334 .poll = isp_video_poll,
1335 .mmap = isp_video_mmap,
1338 /* -----------------------------------------------------------------------------
1339 * ISP video core
1342 static const struct isp_video_operations isp_video_dummy_ops = {
1345 int omap3isp_video_init(struct isp_video *video, const char *name)
1347 const char *direction;
1348 int ret;
1350 switch (video->type) {
1351 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
1352 direction = "output";
1353 video->pad.flags = MEDIA_PAD_FL_SINK
1354 | MEDIA_PAD_FL_MUST_CONNECT;
1355 break;
1356 case V4L2_BUF_TYPE_VIDEO_OUTPUT:
1357 direction = "input";
1358 video->pad.flags = MEDIA_PAD_FL_SOURCE
1359 | MEDIA_PAD_FL_MUST_CONNECT;
1360 video->video.vfl_dir = VFL_DIR_TX;
1361 break;
1363 default:
1364 return -EINVAL;
1367 video->alloc_ctx = vb2_dma_contig_init_ctx(video->isp->dev);
1368 if (IS_ERR(video->alloc_ctx))
1369 return PTR_ERR(video->alloc_ctx);
1371 ret = media_entity_init(&video->video.entity, 1, &video->pad, 0);
1372 if (ret < 0) {
1373 vb2_dma_contig_cleanup_ctx(video->alloc_ctx);
1374 return ret;
1377 mutex_init(&video->mutex);
1378 atomic_set(&video->active, 0);
1380 spin_lock_init(&video->pipe.lock);
1381 mutex_init(&video->stream_lock);
1382 mutex_init(&video->queue_lock);
1383 spin_lock_init(&video->irqlock);
1385 /* Initialize the video device. */
1386 if (video->ops == NULL)
1387 video->ops = &isp_video_dummy_ops;
1389 video->video.fops = &isp_video_fops;
1390 snprintf(video->video.name, sizeof(video->video.name),
1391 "OMAP3 ISP %s %s", name, direction);
1392 video->video.vfl_type = VFL_TYPE_GRABBER;
1393 video->video.release = video_device_release_empty;
1394 video->video.ioctl_ops = &isp_video_ioctl_ops;
1395 video->pipe.stream_state = ISP_PIPELINE_STREAM_STOPPED;
1397 video_set_drvdata(&video->video, video);
1399 return 0;
1402 void omap3isp_video_cleanup(struct isp_video *video)
1404 vb2_dma_contig_cleanup_ctx(video->alloc_ctx);
1405 media_entity_cleanup(&video->video.entity);
1406 mutex_destroy(&video->queue_lock);
1407 mutex_destroy(&video->stream_lock);
1408 mutex_destroy(&video->mutex);
1411 int omap3isp_video_register(struct isp_video *video, struct v4l2_device *vdev)
1413 int ret;
1415 video->video.v4l2_dev = vdev;
1417 ret = video_register_device(&video->video, VFL_TYPE_GRABBER, -1);
1418 if (ret < 0)
1419 dev_err(video->isp->dev,
1420 "%s: could not register video device (%d)\n",
1421 __func__, ret);
1423 return ret;
1426 void omap3isp_video_unregister(struct isp_video *video)
1428 if (video_is_registered(&video->video))
1429 video_unregister_device(&video->video);