rt2800: initialize BBP_R104 on proper subroutines
[linux/fpc-iii.git] / drivers / media / platform / exynos-gsc / gsc-m2m.c
blob40a73f7d20daaefa006fa7afdef47e6ca529ef04
1 /*
2 * Copyright (c) 2011 - 2012 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com
5 * Samsung EXYNOS5 SoC series G-Scaler driver
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published
9 * by the Free Software Foundation, either version 2 of the License,
10 * or (at your option) any later version.
13 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/types.h>
16 #include <linux/errno.h>
17 #include <linux/bug.h>
18 #include <linux/interrupt.h>
19 #include <linux/workqueue.h>
20 #include <linux/device.h>
21 #include <linux/platform_device.h>
22 #include <linux/list.h>
23 #include <linux/io.h>
24 #include <linux/slab.h>
25 #include <linux/clk.h>
27 #include <media/v4l2-ioctl.h>
29 #include "gsc-core.h"
31 static int gsc_m2m_ctx_stop_req(struct gsc_ctx *ctx)
33 struct gsc_ctx *curr_ctx;
34 struct gsc_dev *gsc = ctx->gsc_dev;
35 int ret;
37 curr_ctx = v4l2_m2m_get_curr_priv(gsc->m2m.m2m_dev);
38 if (!gsc_m2m_pending(gsc) || (curr_ctx != ctx))
39 return 0;
41 gsc_ctx_state_lock_set(GSC_CTX_STOP_REQ, ctx);
42 ret = wait_event_timeout(gsc->irq_queue,
43 !gsc_ctx_state_is_set(GSC_CTX_STOP_REQ, ctx),
44 GSC_SHUTDOWN_TIMEOUT);
46 return ret == 0 ? -ETIMEDOUT : ret;
49 static int gsc_m2m_start_streaming(struct vb2_queue *q, unsigned int count)
51 struct gsc_ctx *ctx = q->drv_priv;
52 int ret;
54 ret = pm_runtime_get_sync(&ctx->gsc_dev->pdev->dev);
55 return ret > 0 ? 0 : ret;
58 static int gsc_m2m_stop_streaming(struct vb2_queue *q)
60 struct gsc_ctx *ctx = q->drv_priv;
61 int ret;
63 ret = gsc_m2m_ctx_stop_req(ctx);
64 if (ret == -ETIMEDOUT)
65 gsc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR);
67 pm_runtime_put(&ctx->gsc_dev->pdev->dev);
69 return 0;
72 void gsc_m2m_job_finish(struct gsc_ctx *ctx, int vb_state)
74 struct vb2_buffer *src_vb, *dst_vb;
76 if (!ctx || !ctx->m2m_ctx)
77 return;
79 src_vb = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
80 dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
82 if (src_vb && dst_vb) {
83 src_vb->v4l2_buf.timestamp = dst_vb->v4l2_buf.timestamp;
84 src_vb->v4l2_buf.timecode = dst_vb->v4l2_buf.timecode;
86 v4l2_m2m_buf_done(src_vb, vb_state);
87 v4l2_m2m_buf_done(dst_vb, vb_state);
89 v4l2_m2m_job_finish(ctx->gsc_dev->m2m.m2m_dev,
90 ctx->m2m_ctx);
95 static void gsc_m2m_job_abort(void *priv)
97 struct gsc_ctx *ctx = priv;
98 int ret;
100 ret = gsc_m2m_ctx_stop_req(ctx);
101 if (ret == -ETIMEDOUT)
102 gsc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR);
105 static int gsc_get_bufs(struct gsc_ctx *ctx)
107 struct gsc_frame *s_frame, *d_frame;
108 struct vb2_buffer *src_vb, *dst_vb;
109 int ret;
111 s_frame = &ctx->s_frame;
112 d_frame = &ctx->d_frame;
114 src_vb = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
115 ret = gsc_prepare_addr(ctx, src_vb, s_frame, &s_frame->addr);
116 if (ret)
117 return ret;
119 dst_vb = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
120 ret = gsc_prepare_addr(ctx, dst_vb, d_frame, &d_frame->addr);
121 if (ret)
122 return ret;
124 dst_vb->v4l2_buf.timestamp = src_vb->v4l2_buf.timestamp;
126 return 0;
129 static void gsc_m2m_device_run(void *priv)
131 struct gsc_ctx *ctx = priv;
132 struct gsc_dev *gsc;
133 unsigned long flags;
134 int ret;
135 bool is_set = false;
137 if (WARN(!ctx, "null hardware context\n"))
138 return;
140 gsc = ctx->gsc_dev;
141 spin_lock_irqsave(&gsc->slock, flags);
143 set_bit(ST_M2M_PEND, &gsc->state);
145 /* Reconfigure hardware if the context has changed. */
146 if (gsc->m2m.ctx != ctx) {
147 pr_debug("gsc->m2m.ctx = 0x%p, current_ctx = 0x%p",
148 gsc->m2m.ctx, ctx);
149 ctx->state |= GSC_PARAMS;
150 gsc->m2m.ctx = ctx;
153 is_set = (ctx->state & GSC_CTX_STOP_REQ) ? 1 : 0;
154 ctx->state &= ~GSC_CTX_STOP_REQ;
155 if (is_set) {
156 wake_up(&gsc->irq_queue);
157 goto put_device;
160 ret = gsc_get_bufs(ctx);
161 if (ret) {
162 pr_err("Wrong address");
163 goto put_device;
166 gsc_set_prefbuf(gsc, &ctx->s_frame);
167 gsc_hw_set_input_addr(gsc, &ctx->s_frame.addr, GSC_M2M_BUF_NUM);
168 gsc_hw_set_output_addr(gsc, &ctx->d_frame.addr, GSC_M2M_BUF_NUM);
170 if (ctx->state & GSC_PARAMS) {
171 gsc_hw_set_input_buf_masking(gsc, GSC_M2M_BUF_NUM, false);
172 gsc_hw_set_output_buf_masking(gsc, GSC_M2M_BUF_NUM, false);
173 gsc_hw_set_frm_done_irq_mask(gsc, false);
174 gsc_hw_set_gsc_irq_enable(gsc, true);
176 if (gsc_set_scaler_info(ctx)) {
177 pr_err("Scaler setup error");
178 goto put_device;
181 gsc_hw_set_input_path(ctx);
182 gsc_hw_set_in_size(ctx);
183 gsc_hw_set_in_image_format(ctx);
185 gsc_hw_set_output_path(ctx);
186 gsc_hw_set_out_size(ctx);
187 gsc_hw_set_out_image_format(ctx);
189 gsc_hw_set_prescaler(ctx);
190 gsc_hw_set_mainscaler(ctx);
191 gsc_hw_set_rotation(ctx);
192 gsc_hw_set_global_alpha(ctx);
195 /* update shadow registers */
196 gsc_hw_set_sfr_update(ctx);
198 ctx->state &= ~GSC_PARAMS;
199 gsc_hw_enable_control(gsc, true);
201 spin_unlock_irqrestore(&gsc->slock, flags);
202 return;
204 put_device:
205 ctx->state &= ~GSC_PARAMS;
206 spin_unlock_irqrestore(&gsc->slock, flags);
209 static int gsc_m2m_queue_setup(struct vb2_queue *vq,
210 const struct v4l2_format *fmt,
211 unsigned int *num_buffers, unsigned int *num_planes,
212 unsigned int sizes[], void *allocators[])
214 struct gsc_ctx *ctx = vb2_get_drv_priv(vq);
215 struct gsc_frame *frame;
216 int i;
218 frame = ctx_get_frame(ctx, vq->type);
219 if (IS_ERR(frame))
220 return PTR_ERR(frame);
222 if (!frame->fmt)
223 return -EINVAL;
225 *num_planes = frame->fmt->num_planes;
226 for (i = 0; i < frame->fmt->num_planes; i++) {
227 sizes[i] = frame->payload[i];
228 allocators[i] = ctx->gsc_dev->alloc_ctx;
230 return 0;
233 static int gsc_m2m_buf_prepare(struct vb2_buffer *vb)
235 struct gsc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
236 struct gsc_frame *frame;
237 int i;
239 frame = ctx_get_frame(ctx, vb->vb2_queue->type);
240 if (IS_ERR(frame))
241 return PTR_ERR(frame);
243 if (!V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
244 for (i = 0; i < frame->fmt->num_planes; i++)
245 vb2_set_plane_payload(vb, i, frame->payload[i]);
248 return 0;
251 static void gsc_m2m_buf_queue(struct vb2_buffer *vb)
253 struct gsc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
255 pr_debug("ctx: %p, ctx->state: 0x%x", ctx, ctx->state);
257 if (ctx->m2m_ctx)
258 v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
261 static struct vb2_ops gsc_m2m_qops = {
262 .queue_setup = gsc_m2m_queue_setup,
263 .buf_prepare = gsc_m2m_buf_prepare,
264 .buf_queue = gsc_m2m_buf_queue,
265 .wait_prepare = gsc_unlock,
266 .wait_finish = gsc_lock,
267 .stop_streaming = gsc_m2m_stop_streaming,
268 .start_streaming = gsc_m2m_start_streaming,
271 static int gsc_m2m_querycap(struct file *file, void *fh,
272 struct v4l2_capability *cap)
274 struct gsc_ctx *ctx = fh_to_ctx(fh);
275 struct gsc_dev *gsc = ctx->gsc_dev;
277 strlcpy(cap->driver, gsc->pdev->name, sizeof(cap->driver));
278 strlcpy(cap->card, gsc->pdev->name, sizeof(cap->card));
279 strlcpy(cap->bus_info, "platform", sizeof(cap->bus_info));
280 cap->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M_MPLANE |
281 V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_VIDEO_OUTPUT_MPLANE;
283 cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
284 return 0;
287 static int gsc_m2m_enum_fmt_mplane(struct file *file, void *priv,
288 struct v4l2_fmtdesc *f)
290 return gsc_enum_fmt_mplane(f);
293 static int gsc_m2m_g_fmt_mplane(struct file *file, void *fh,
294 struct v4l2_format *f)
296 struct gsc_ctx *ctx = fh_to_ctx(fh);
298 return gsc_g_fmt_mplane(ctx, f);
301 static int gsc_m2m_try_fmt_mplane(struct file *file, void *fh,
302 struct v4l2_format *f)
304 struct gsc_ctx *ctx = fh_to_ctx(fh);
306 return gsc_try_fmt_mplane(ctx, f);
309 static int gsc_m2m_s_fmt_mplane(struct file *file, void *fh,
310 struct v4l2_format *f)
312 struct gsc_ctx *ctx = fh_to_ctx(fh);
313 struct vb2_queue *vq;
314 struct gsc_frame *frame;
315 struct v4l2_pix_format_mplane *pix;
316 int i, ret = 0;
318 ret = gsc_m2m_try_fmt_mplane(file, fh, f);
319 if (ret)
320 return ret;
322 vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
324 if (vb2_is_streaming(vq)) {
325 pr_err("queue (%d) busy", f->type);
326 return -EBUSY;
329 if (V4L2_TYPE_IS_OUTPUT(f->type))
330 frame = &ctx->s_frame;
331 else
332 frame = &ctx->d_frame;
334 pix = &f->fmt.pix_mp;
335 frame->fmt = find_fmt(&pix->pixelformat, NULL, 0);
336 frame->colorspace = pix->colorspace;
337 if (!frame->fmt)
338 return -EINVAL;
340 for (i = 0; i < frame->fmt->num_planes; i++)
341 frame->payload[i] = pix->plane_fmt[i].sizeimage;
343 gsc_set_frame_size(frame, pix->width, pix->height);
345 if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
346 gsc_ctx_state_lock_set(GSC_PARAMS | GSC_DST_FMT, ctx);
347 else
348 gsc_ctx_state_lock_set(GSC_PARAMS | GSC_SRC_FMT, ctx);
350 pr_debug("f_w: %d, f_h: %d", frame->f_width, frame->f_height);
352 return 0;
355 static int gsc_m2m_reqbufs(struct file *file, void *fh,
356 struct v4l2_requestbuffers *reqbufs)
358 struct gsc_ctx *ctx = fh_to_ctx(fh);
359 struct gsc_dev *gsc = ctx->gsc_dev;
360 struct gsc_frame *frame;
361 u32 max_cnt;
363 max_cnt = (reqbufs->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) ?
364 gsc->variant->in_buf_cnt : gsc->variant->out_buf_cnt;
365 if (reqbufs->count > max_cnt) {
366 return -EINVAL;
367 } else if (reqbufs->count == 0) {
368 if (reqbufs->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
369 gsc_ctx_state_lock_clear(GSC_SRC_FMT, ctx);
370 else
371 gsc_ctx_state_lock_clear(GSC_DST_FMT, ctx);
374 frame = ctx_get_frame(ctx, reqbufs->type);
376 return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
379 static int gsc_m2m_expbuf(struct file *file, void *fh,
380 struct v4l2_exportbuffer *eb)
382 struct gsc_ctx *ctx = fh_to_ctx(fh);
383 return v4l2_m2m_expbuf(file, ctx->m2m_ctx, eb);
386 static int gsc_m2m_querybuf(struct file *file, void *fh,
387 struct v4l2_buffer *buf)
389 struct gsc_ctx *ctx = fh_to_ctx(fh);
390 return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf);
393 static int gsc_m2m_qbuf(struct file *file, void *fh,
394 struct v4l2_buffer *buf)
396 struct gsc_ctx *ctx = fh_to_ctx(fh);
397 return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
400 static int gsc_m2m_dqbuf(struct file *file, void *fh,
401 struct v4l2_buffer *buf)
403 struct gsc_ctx *ctx = fh_to_ctx(fh);
404 return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
407 static int gsc_m2m_streamon(struct file *file, void *fh,
408 enum v4l2_buf_type type)
410 struct gsc_ctx *ctx = fh_to_ctx(fh);
412 /* The source and target color format need to be set */
413 if (V4L2_TYPE_IS_OUTPUT(type)) {
414 if (!gsc_ctx_state_is_set(GSC_SRC_FMT, ctx))
415 return -EINVAL;
416 } else if (!gsc_ctx_state_is_set(GSC_DST_FMT, ctx)) {
417 return -EINVAL;
420 return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
423 static int gsc_m2m_streamoff(struct file *file, void *fh,
424 enum v4l2_buf_type type)
426 struct gsc_ctx *ctx = fh_to_ctx(fh);
427 return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
430 /* Return 1 if rectangle a is enclosed in rectangle b, or 0 otherwise. */
431 static int is_rectangle_enclosed(struct v4l2_rect *a, struct v4l2_rect *b)
433 if (a->left < b->left || a->top < b->top)
434 return 0;
436 if (a->left + a->width > b->left + b->width)
437 return 0;
439 if (a->top + a->height > b->top + b->height)
440 return 0;
442 return 1;
445 static int gsc_m2m_g_selection(struct file *file, void *fh,
446 struct v4l2_selection *s)
448 struct gsc_frame *frame;
449 struct gsc_ctx *ctx = fh_to_ctx(fh);
451 if ((s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) &&
452 (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE))
453 return -EINVAL;
455 frame = ctx_get_frame(ctx, s->type);
456 if (IS_ERR(frame))
457 return PTR_ERR(frame);
459 switch (s->target) {
460 case V4L2_SEL_TGT_COMPOSE_DEFAULT:
461 case V4L2_SEL_TGT_COMPOSE_BOUNDS:
462 case V4L2_SEL_TGT_CROP_BOUNDS:
463 case V4L2_SEL_TGT_CROP_DEFAULT:
464 s->r.left = 0;
465 s->r.top = 0;
466 s->r.width = frame->f_width;
467 s->r.height = frame->f_height;
468 return 0;
470 case V4L2_SEL_TGT_COMPOSE:
471 case V4L2_SEL_TGT_CROP:
472 s->r.left = frame->crop.left;
473 s->r.top = frame->crop.top;
474 s->r.width = frame->crop.width;
475 s->r.height = frame->crop.height;
476 return 0;
479 return -EINVAL;
482 static int gsc_m2m_s_selection(struct file *file, void *fh,
483 struct v4l2_selection *s)
485 struct gsc_frame *frame;
486 struct gsc_ctx *ctx = fh_to_ctx(fh);
487 struct v4l2_crop cr;
488 struct gsc_variant *variant = ctx->gsc_dev->variant;
489 int ret;
491 cr.type = s->type;
492 cr.c = s->r;
494 if ((s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) &&
495 (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE))
496 return -EINVAL;
498 ret = gsc_try_crop(ctx, &cr);
499 if (ret)
500 return ret;
502 if (s->flags & V4L2_SEL_FLAG_LE &&
503 !is_rectangle_enclosed(&cr.c, &s->r))
504 return -ERANGE;
506 if (s->flags & V4L2_SEL_FLAG_GE &&
507 !is_rectangle_enclosed(&s->r, &cr.c))
508 return -ERANGE;
510 s->r = cr.c;
512 switch (s->target) {
513 case V4L2_SEL_TGT_COMPOSE_BOUNDS:
514 case V4L2_SEL_TGT_COMPOSE_DEFAULT:
515 case V4L2_SEL_TGT_COMPOSE:
516 frame = &ctx->s_frame;
517 break;
519 case V4L2_SEL_TGT_CROP_BOUNDS:
520 case V4L2_SEL_TGT_CROP:
521 case V4L2_SEL_TGT_CROP_DEFAULT:
522 frame = &ctx->d_frame;
523 break;
525 default:
526 return -EINVAL;
529 /* Check to see if scaling ratio is within supported range */
530 if (gsc_ctx_state_is_set(GSC_DST_FMT | GSC_SRC_FMT, ctx)) {
531 if (s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
532 ret = gsc_check_scaler_ratio(variant, cr.c.width,
533 cr.c.height, ctx->d_frame.crop.width,
534 ctx->d_frame.crop.height,
535 ctx->gsc_ctrls.rotate->val, ctx->out_path);
536 } else {
537 ret = gsc_check_scaler_ratio(variant,
538 ctx->s_frame.crop.width,
539 ctx->s_frame.crop.height, cr.c.width,
540 cr.c.height, ctx->gsc_ctrls.rotate->val,
541 ctx->out_path);
544 if (ret) {
545 pr_err("Out of scaler range");
546 return -EINVAL;
550 frame->crop = cr.c;
552 gsc_ctx_state_lock_set(GSC_PARAMS, ctx);
553 return 0;
556 static const struct v4l2_ioctl_ops gsc_m2m_ioctl_ops = {
557 .vidioc_querycap = gsc_m2m_querycap,
558 .vidioc_enum_fmt_vid_cap_mplane = gsc_m2m_enum_fmt_mplane,
559 .vidioc_enum_fmt_vid_out_mplane = gsc_m2m_enum_fmt_mplane,
560 .vidioc_g_fmt_vid_cap_mplane = gsc_m2m_g_fmt_mplane,
561 .vidioc_g_fmt_vid_out_mplane = gsc_m2m_g_fmt_mplane,
562 .vidioc_try_fmt_vid_cap_mplane = gsc_m2m_try_fmt_mplane,
563 .vidioc_try_fmt_vid_out_mplane = gsc_m2m_try_fmt_mplane,
564 .vidioc_s_fmt_vid_cap_mplane = gsc_m2m_s_fmt_mplane,
565 .vidioc_s_fmt_vid_out_mplane = gsc_m2m_s_fmt_mplane,
566 .vidioc_reqbufs = gsc_m2m_reqbufs,
567 .vidioc_expbuf = gsc_m2m_expbuf,
568 .vidioc_querybuf = gsc_m2m_querybuf,
569 .vidioc_qbuf = gsc_m2m_qbuf,
570 .vidioc_dqbuf = gsc_m2m_dqbuf,
571 .vidioc_streamon = gsc_m2m_streamon,
572 .vidioc_streamoff = gsc_m2m_streamoff,
573 .vidioc_g_selection = gsc_m2m_g_selection,
574 .vidioc_s_selection = gsc_m2m_s_selection
577 static int queue_init(void *priv, struct vb2_queue *src_vq,
578 struct vb2_queue *dst_vq)
580 struct gsc_ctx *ctx = priv;
581 int ret;
583 memset(src_vq, 0, sizeof(*src_vq));
584 src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
585 src_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
586 src_vq->drv_priv = ctx;
587 src_vq->ops = &gsc_m2m_qops;
588 src_vq->mem_ops = &vb2_dma_contig_memops;
589 src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
590 src_vq->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY;
592 ret = vb2_queue_init(src_vq);
593 if (ret)
594 return ret;
596 memset(dst_vq, 0, sizeof(*dst_vq));
597 dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
598 dst_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
599 dst_vq->drv_priv = ctx;
600 dst_vq->ops = &gsc_m2m_qops;
601 dst_vq->mem_ops = &vb2_dma_contig_memops;
602 dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
603 dst_vq->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY;
605 return vb2_queue_init(dst_vq);
608 static int gsc_m2m_open(struct file *file)
610 struct gsc_dev *gsc = video_drvdata(file);
611 struct gsc_ctx *ctx = NULL;
612 int ret;
614 pr_debug("pid: %d, state: 0x%lx", task_pid_nr(current), gsc->state);
616 if (mutex_lock_interruptible(&gsc->lock))
617 return -ERESTARTSYS;
619 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
620 if (!ctx) {
621 ret = -ENOMEM;
622 goto unlock;
625 v4l2_fh_init(&ctx->fh, gsc->m2m.vfd);
626 ret = gsc_ctrls_create(ctx);
627 if (ret)
628 goto error_fh;
630 /* Use separate control handler per file handle */
631 ctx->fh.ctrl_handler = &ctx->ctrl_handler;
632 file->private_data = &ctx->fh;
633 v4l2_fh_add(&ctx->fh);
635 ctx->gsc_dev = gsc;
636 /* Default color format */
637 ctx->s_frame.fmt = get_format(0);
638 ctx->d_frame.fmt = get_format(0);
639 /* Setup the device context for mem2mem mode. */
640 ctx->state = GSC_CTX_M2M;
641 ctx->flags = 0;
642 ctx->in_path = GSC_DMA;
643 ctx->out_path = GSC_DMA;
645 ctx->m2m_ctx = v4l2_m2m_ctx_init(gsc->m2m.m2m_dev, ctx, queue_init);
646 if (IS_ERR(ctx->m2m_ctx)) {
647 pr_err("Failed to initialize m2m context");
648 ret = PTR_ERR(ctx->m2m_ctx);
649 goto error_ctrls;
652 if (gsc->m2m.refcnt++ == 0)
653 set_bit(ST_M2M_OPEN, &gsc->state);
655 pr_debug("gsc m2m driver is opened, ctx(0x%p)", ctx);
657 mutex_unlock(&gsc->lock);
658 return 0;
660 error_ctrls:
661 gsc_ctrls_delete(ctx);
662 error_fh:
663 v4l2_fh_del(&ctx->fh);
664 v4l2_fh_exit(&ctx->fh);
665 kfree(ctx);
666 unlock:
667 mutex_unlock(&gsc->lock);
668 return ret;
671 static int gsc_m2m_release(struct file *file)
673 struct gsc_ctx *ctx = fh_to_ctx(file->private_data);
674 struct gsc_dev *gsc = ctx->gsc_dev;
676 pr_debug("pid: %d, state: 0x%lx, refcnt= %d",
677 task_pid_nr(current), gsc->state, gsc->m2m.refcnt);
679 mutex_lock(&gsc->lock);
681 v4l2_m2m_ctx_release(ctx->m2m_ctx);
682 gsc_ctrls_delete(ctx);
683 v4l2_fh_del(&ctx->fh);
684 v4l2_fh_exit(&ctx->fh);
686 if (--gsc->m2m.refcnt <= 0)
687 clear_bit(ST_M2M_OPEN, &gsc->state);
688 kfree(ctx);
690 mutex_unlock(&gsc->lock);
691 return 0;
694 static unsigned int gsc_m2m_poll(struct file *file,
695 struct poll_table_struct *wait)
697 struct gsc_ctx *ctx = fh_to_ctx(file->private_data);
698 struct gsc_dev *gsc = ctx->gsc_dev;
699 int ret;
701 if (mutex_lock_interruptible(&gsc->lock))
702 return -ERESTARTSYS;
704 ret = v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
705 mutex_unlock(&gsc->lock);
707 return ret;
710 static int gsc_m2m_mmap(struct file *file, struct vm_area_struct *vma)
712 struct gsc_ctx *ctx = fh_to_ctx(file->private_data);
713 struct gsc_dev *gsc = ctx->gsc_dev;
714 int ret;
716 if (mutex_lock_interruptible(&gsc->lock))
717 return -ERESTARTSYS;
719 ret = v4l2_m2m_mmap(file, ctx->m2m_ctx, vma);
720 mutex_unlock(&gsc->lock);
722 return ret;
725 static const struct v4l2_file_operations gsc_m2m_fops = {
726 .owner = THIS_MODULE,
727 .open = gsc_m2m_open,
728 .release = gsc_m2m_release,
729 .poll = gsc_m2m_poll,
730 .unlocked_ioctl = video_ioctl2,
731 .mmap = gsc_m2m_mmap,
734 static struct v4l2_m2m_ops gsc_m2m_ops = {
735 .device_run = gsc_m2m_device_run,
736 .job_abort = gsc_m2m_job_abort,
739 int gsc_register_m2m_device(struct gsc_dev *gsc)
741 struct platform_device *pdev;
742 int ret;
744 if (!gsc)
745 return -ENODEV;
747 pdev = gsc->pdev;
749 gsc->vdev.fops = &gsc_m2m_fops;
750 gsc->vdev.ioctl_ops = &gsc_m2m_ioctl_ops;
751 gsc->vdev.release = video_device_release_empty;
752 gsc->vdev.lock = &gsc->lock;
753 gsc->vdev.vfl_dir = VFL_DIR_M2M;
754 snprintf(gsc->vdev.name, sizeof(gsc->vdev.name), "%s.%d:m2m",
755 GSC_MODULE_NAME, gsc->id);
757 video_set_drvdata(&gsc->vdev, gsc);
759 gsc->m2m.vfd = &gsc->vdev;
760 gsc->m2m.m2m_dev = v4l2_m2m_init(&gsc_m2m_ops);
761 if (IS_ERR(gsc->m2m.m2m_dev)) {
762 dev_err(&pdev->dev, "failed to initialize v4l2-m2m device\n");
763 ret = PTR_ERR(gsc->m2m.m2m_dev);
764 goto err_m2m_r1;
767 ret = video_register_device(&gsc->vdev, VFL_TYPE_GRABBER, -1);
768 if (ret) {
769 dev_err(&pdev->dev,
770 "%s(): failed to register video device\n", __func__);
771 goto err_m2m_r2;
774 pr_debug("gsc m2m driver registered as /dev/video%d", gsc->vdev.num);
775 return 0;
777 err_m2m_r2:
778 v4l2_m2m_release(gsc->m2m.m2m_dev);
779 err_m2m_r1:
780 video_device_release(gsc->m2m.vfd);
782 return ret;
785 void gsc_unregister_m2m_device(struct gsc_dev *gsc)
787 if (gsc)
788 v4l2_m2m_release(gsc->m2m.m2m_dev);