2 * Samsung TV Mixer driver
4 * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
6 * Tomasz Stanislawski, <t.stanislaws@samsung.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published
10 * by the Free Software Foundation. either version 2 of the License,
11 * or (at your option) any later version
14 #define pr_fmt(fmt) "s5p-tv (mixer): " fmt
18 #include <media/v4l2-ioctl.h>
19 #include <linux/videodev2.h>
21 #include <linux/module.h>
22 #include <linux/platform_device.h>
23 #include <linux/timer.h>
24 #include <media/videobuf2-dma-contig.h>
26 static int find_reg_callback(struct device
*dev
, void *p
)
28 struct v4l2_subdev
**sd
= p
;
30 *sd
= dev_get_drvdata(dev
);
31 /* non-zero value stops iteration */
35 static struct v4l2_subdev
*find_and_register_subdev(
36 struct mxr_device
*mdev
, char *module_name
)
38 struct device_driver
*drv
;
39 struct v4l2_subdev
*sd
= NULL
;
42 /* TODO: add waiting until probe is finished */
43 drv
= driver_find(module_name
, &platform_bus_type
);
45 mxr_warn(mdev
, "module %s is missing\n", module_name
);
48 /* driver refcnt is increased, it is safe to iterate over devices */
49 ret
= driver_for_each_device(drv
, NULL
, &sd
, find_reg_callback
);
50 /* ret == 0 means that find_reg_callback was never executed */
52 mxr_warn(mdev
, "module %s provides no subdev!\n", module_name
);
55 /* v4l2_device_register_subdev detects if sd is NULL */
56 ret
= v4l2_device_register_subdev(&mdev
->v4l2_dev
, sd
);
58 mxr_warn(mdev
, "failed to register subdev %s\n", sd
->name
);
66 int mxr_acquire_video(struct mxr_device
*mdev
,
67 struct mxr_output_conf
*output_conf
, int output_count
)
69 struct device
*dev
= mdev
->dev
;
70 struct v4l2_device
*v4l2_dev
= &mdev
->v4l2_dev
;
73 struct v4l2_subdev
*sd
;
75 strlcpy(v4l2_dev
->name
, dev_name(mdev
->dev
), sizeof(v4l2_dev
->name
));
76 /* prepare context for V4L2 device */
77 ret
= v4l2_device_register(dev
, v4l2_dev
);
79 mxr_err(mdev
, "could not register v4l2 device.\n");
83 mdev
->alloc_ctx
= vb2_dma_contig_init_ctx(mdev
->dev
);
84 if (IS_ERR_OR_NULL(mdev
->alloc_ctx
)) {
85 mxr_err(mdev
, "could not acquire vb2 allocator\n");
89 /* registering outputs */
91 for (i
= 0; i
< output_count
; ++i
) {
92 struct mxr_output_conf
*conf
= &output_conf
[i
];
93 struct mxr_output
*out
;
95 sd
= find_and_register_subdev(mdev
, conf
->module_name
);
96 /* trying to register next output */
99 out
= kzalloc(sizeof(*out
), GFP_KERNEL
);
101 mxr_err(mdev
, "no memory for '%s'\n",
104 /* registered subdevs are removed in fail_v4l2_dev */
107 strlcpy(out
->name
, conf
->output_name
, sizeof(out
->name
));
109 out
->cookie
= conf
->cookie
;
110 mdev
->output
[mdev
->output_cnt
++] = out
;
111 mxr_info(mdev
, "added output '%s' from module '%s'\n",
112 conf
->output_name
, conf
->module_name
);
113 /* checking if maximal number of outputs is reached */
114 if (mdev
->output_cnt
>= MXR_MAX_OUTPUTS
)
118 if (mdev
->output_cnt
== 0) {
119 mxr_err(mdev
, "failed to register any output\n");
121 /* skipping fail_output because there is nothing to free */
122 goto fail_vb2_allocator
;
128 /* kfree is NULL-safe */
129 for (i
= 0; i
< mdev
->output_cnt
; ++i
)
130 kfree(mdev
->output
[i
]);
131 memset(mdev
->output
, 0, sizeof(mdev
->output
));
134 /* freeing allocator context */
135 vb2_dma_contig_cleanup_ctx(mdev
->alloc_ctx
);
138 /* NOTE: automatically unregister all subdevs */
139 v4l2_device_unregister(v4l2_dev
);
145 void mxr_release_video(struct mxr_device
*mdev
)
149 /* kfree is NULL-safe */
150 for (i
= 0; i
< mdev
->output_cnt
; ++i
)
151 kfree(mdev
->output
[i
]);
153 vb2_dma_contig_cleanup_ctx(mdev
->alloc_ctx
);
154 v4l2_device_unregister(&mdev
->v4l2_dev
);
157 static int mxr_querycap(struct file
*file
, void *priv
,
158 struct v4l2_capability
*cap
)
160 struct mxr_layer
*layer
= video_drvdata(file
);
162 mxr_dbg(layer
->mdev
, "%s:%d\n", __func__
, __LINE__
);
164 strlcpy(cap
->driver
, MXR_DRIVER_NAME
, sizeof(cap
->driver
));
165 strlcpy(cap
->card
, layer
->vfd
.name
, sizeof(cap
->card
));
166 sprintf(cap
->bus_info
, "%d", layer
->idx
);
167 cap
->device_caps
= V4L2_CAP_STREAMING
| V4L2_CAP_VIDEO_OUTPUT_MPLANE
;
168 cap
->capabilities
= cap
->device_caps
| V4L2_CAP_DEVICE_CAPS
;
173 static void mxr_geometry_dump(struct mxr_device
*mdev
, struct mxr_geometry
*geo
)
175 mxr_dbg(mdev
, "src.full_size = (%u, %u)\n",
176 geo
->src
.full_width
, geo
->src
.full_height
);
177 mxr_dbg(mdev
, "src.size = (%u, %u)\n",
178 geo
->src
.width
, geo
->src
.height
);
179 mxr_dbg(mdev
, "src.offset = (%u, %u)\n",
180 geo
->src
.x_offset
, geo
->src
.y_offset
);
181 mxr_dbg(mdev
, "dst.full_size = (%u, %u)\n",
182 geo
->dst
.full_width
, geo
->dst
.full_height
);
183 mxr_dbg(mdev
, "dst.size = (%u, %u)\n",
184 geo
->dst
.width
, geo
->dst
.height
);
185 mxr_dbg(mdev
, "dst.offset = (%u, %u)\n",
186 geo
->dst
.x_offset
, geo
->dst
.y_offset
);
187 mxr_dbg(mdev
, "ratio = (%u, %u)\n",
188 geo
->x_ratio
, geo
->y_ratio
);
191 static void mxr_layer_default_geo(struct mxr_layer
*layer
)
193 struct mxr_device
*mdev
= layer
->mdev
;
194 struct v4l2_mbus_framefmt mbus_fmt
;
196 memset(&layer
->geo
, 0, sizeof(layer
->geo
));
198 mxr_get_mbus_fmt(mdev
, &mbus_fmt
);
200 layer
->geo
.dst
.full_width
= mbus_fmt
.width
;
201 layer
->geo
.dst
.full_height
= mbus_fmt
.height
;
202 layer
->geo
.dst
.width
= layer
->geo
.dst
.full_width
;
203 layer
->geo
.dst
.height
= layer
->geo
.dst
.full_height
;
204 layer
->geo
.dst
.field
= mbus_fmt
.field
;
206 layer
->geo
.src
.full_width
= mbus_fmt
.width
;
207 layer
->geo
.src
.full_height
= mbus_fmt
.height
;
208 layer
->geo
.src
.width
= layer
->geo
.src
.full_width
;
209 layer
->geo
.src
.height
= layer
->geo
.src
.full_height
;
211 mxr_geometry_dump(mdev
, &layer
->geo
);
212 layer
->ops
.fix_geometry(layer
, MXR_GEOMETRY_SINK
, 0);
213 mxr_geometry_dump(mdev
, &layer
->geo
);
216 static void mxr_layer_update_output(struct mxr_layer
*layer
)
218 struct mxr_device
*mdev
= layer
->mdev
;
219 struct v4l2_mbus_framefmt mbus_fmt
;
221 mxr_get_mbus_fmt(mdev
, &mbus_fmt
);
222 /* checking if update is needed */
223 if (layer
->geo
.dst
.full_width
== mbus_fmt
.width
&&
224 layer
->geo
.dst
.full_height
== mbus_fmt
.width
)
227 layer
->geo
.dst
.full_width
= mbus_fmt
.width
;
228 layer
->geo
.dst
.full_height
= mbus_fmt
.height
;
229 layer
->geo
.dst
.field
= mbus_fmt
.field
;
230 layer
->ops
.fix_geometry(layer
, MXR_GEOMETRY_SINK
, 0);
232 mxr_geometry_dump(mdev
, &layer
->geo
);
235 static const struct mxr_format
*find_format_by_fourcc(
236 struct mxr_layer
*layer
, unsigned long fourcc
);
237 static const struct mxr_format
*find_format_by_index(
238 struct mxr_layer
*layer
, unsigned long index
);
240 static int mxr_enum_fmt(struct file
*file
, void *priv
,
241 struct v4l2_fmtdesc
*f
)
243 struct mxr_layer
*layer
= video_drvdata(file
);
244 struct mxr_device
*mdev
= layer
->mdev
;
245 const struct mxr_format
*fmt
;
247 mxr_dbg(mdev
, "%s\n", __func__
);
248 fmt
= find_format_by_index(layer
, f
->index
);
252 strlcpy(f
->description
, fmt
->name
, sizeof(f
->description
));
253 f
->pixelformat
= fmt
->fourcc
;
258 static unsigned int divup(unsigned int divident
, unsigned int divisor
)
260 return (divident
+ divisor
- 1) / divisor
;
263 unsigned long mxr_get_plane_size(const struct mxr_block
*blk
,
264 unsigned int width
, unsigned int height
)
266 unsigned int bl_width
= divup(width
, blk
->width
);
267 unsigned int bl_height
= divup(height
, blk
->height
);
269 return bl_width
* bl_height
* blk
->size
;
272 static void mxr_mplane_fill(struct v4l2_plane_pix_format
*planes
,
273 const struct mxr_format
*fmt
, u32 width
, u32 height
)
277 /* checking if nothing to fill */
281 memset(planes
, 0, sizeof(*planes
) * fmt
->num_subframes
);
282 for (i
= 0; i
< fmt
->num_planes
; ++i
) {
283 struct v4l2_plane_pix_format
*plane
= planes
284 + fmt
->plane2subframe
[i
];
285 const struct mxr_block
*blk
= &fmt
->plane
[i
];
286 u32 bl_width
= divup(width
, blk
->width
);
287 u32 bl_height
= divup(height
, blk
->height
);
288 u32 sizeimage
= bl_width
* bl_height
* blk
->size
;
289 u16 bytesperline
= bl_width
* blk
->size
/ blk
->height
;
291 plane
->sizeimage
+= sizeimage
;
292 plane
->bytesperline
= max(plane
->bytesperline
, bytesperline
);
296 static int mxr_g_fmt(struct file
*file
, void *priv
,
297 struct v4l2_format
*f
)
299 struct mxr_layer
*layer
= video_drvdata(file
);
300 struct v4l2_pix_format_mplane
*pix
= &f
->fmt
.pix_mp
;
302 mxr_dbg(layer
->mdev
, "%s:%d\n", __func__
, __LINE__
);
304 pix
->width
= layer
->geo
.src
.full_width
;
305 pix
->height
= layer
->geo
.src
.full_height
;
306 pix
->field
= V4L2_FIELD_NONE
;
307 pix
->pixelformat
= layer
->fmt
->fourcc
;
308 pix
->colorspace
= layer
->fmt
->colorspace
;
309 mxr_mplane_fill(pix
->plane_fmt
, layer
->fmt
, pix
->width
, pix
->height
);
314 static int mxr_s_fmt(struct file
*file
, void *priv
,
315 struct v4l2_format
*f
)
317 struct mxr_layer
*layer
= video_drvdata(file
);
318 const struct mxr_format
*fmt
;
319 struct v4l2_pix_format_mplane
*pix
;
320 struct mxr_device
*mdev
= layer
->mdev
;
321 struct mxr_geometry
*geo
= &layer
->geo
;
323 mxr_dbg(mdev
, "%s:%d\n", __func__
, __LINE__
);
325 pix
= &f
->fmt
.pix_mp
;
326 fmt
= find_format_by_fourcc(layer
, pix
->pixelformat
);
328 mxr_warn(mdev
, "not recognized fourcc: %08x\n",
333 /* set source size to highest accepted value */
334 geo
->src
.full_width
= max(geo
->dst
.full_width
, pix
->width
);
335 geo
->src
.full_height
= max(geo
->dst
.full_height
, pix
->height
);
336 layer
->ops
.fix_geometry(layer
, MXR_GEOMETRY_SOURCE
, 0);
337 mxr_geometry_dump(mdev
, &layer
->geo
);
338 /* set cropping to total visible screen */
339 geo
->src
.width
= pix
->width
;
340 geo
->src
.height
= pix
->height
;
341 geo
->src
.x_offset
= 0;
342 geo
->src
.y_offset
= 0;
343 /* assure consistency of geometry */
344 layer
->ops
.fix_geometry(layer
, MXR_GEOMETRY_CROP
, MXR_NO_OFFSET
);
345 mxr_geometry_dump(mdev
, &layer
->geo
);
346 /* set full size to lowest possible value */
347 geo
->src
.full_width
= 0;
348 geo
->src
.full_height
= 0;
349 layer
->ops
.fix_geometry(layer
, MXR_GEOMETRY_SOURCE
, 0);
350 mxr_geometry_dump(mdev
, &layer
->geo
);
352 /* returning results */
353 mxr_g_fmt(file
, priv
, f
);
358 static int mxr_g_selection(struct file
*file
, void *fh
,
359 struct v4l2_selection
*s
)
361 struct mxr_layer
*layer
= video_drvdata(file
);
362 struct mxr_geometry
*geo
= &layer
->geo
;
364 mxr_dbg(layer
->mdev
, "%s:%d\n", __func__
, __LINE__
);
366 if (s
->type
!= V4L2_BUF_TYPE_VIDEO_OUTPUT
&&
367 s
->type
!= V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE
)
371 case V4L2_SEL_TGT_CROP
:
372 s
->r
.left
= geo
->src
.x_offset
;
373 s
->r
.top
= geo
->src
.y_offset
;
374 s
->r
.width
= geo
->src
.width
;
375 s
->r
.height
= geo
->src
.height
;
377 case V4L2_SEL_TGT_CROP_DEFAULT
:
378 case V4L2_SEL_TGT_CROP_BOUNDS
:
381 s
->r
.width
= geo
->src
.full_width
;
382 s
->r
.height
= geo
->src
.full_height
;
384 case V4L2_SEL_TGT_COMPOSE
:
385 case V4L2_SEL_TGT_COMPOSE_PADDED
:
386 s
->r
.left
= geo
->dst
.x_offset
;
387 s
->r
.top
= geo
->dst
.y_offset
;
388 s
->r
.width
= geo
->dst
.width
;
389 s
->r
.height
= geo
->dst
.height
;
391 case V4L2_SEL_TGT_COMPOSE_DEFAULT
:
392 case V4L2_SEL_TGT_COMPOSE_BOUNDS
:
395 s
->r
.width
= geo
->dst
.full_width
;
396 s
->r
.height
= geo
->dst
.full_height
;
405 /* returns 1 if rectangle 'a' is inside 'b' */
406 static int mxr_is_rect_inside(struct v4l2_rect
*a
, struct v4l2_rect
*b
)
408 if (a
->left
< b
->left
)
412 if (a
->left
+ a
->width
> b
->left
+ b
->width
)
414 if (a
->top
+ a
->height
> b
->top
+ b
->height
)
419 static int mxr_s_selection(struct file
*file
, void *fh
,
420 struct v4l2_selection
*s
)
422 struct mxr_layer
*layer
= video_drvdata(file
);
423 struct mxr_geometry
*geo
= &layer
->geo
;
424 struct mxr_crop
*target
= NULL
;
425 enum mxr_geometry_stage stage
;
426 struct mxr_geometry tmp
;
427 struct v4l2_rect res
;
429 memset(&res
, 0, sizeof(res
));
431 mxr_dbg(layer
->mdev
, "%s: rect: %dx%d@%d,%d\n", __func__
,
432 s
->r
.width
, s
->r
.height
, s
->r
.left
, s
->r
.top
);
434 if (s
->type
!= V4L2_BUF_TYPE_VIDEO_OUTPUT
&&
435 s
->type
!= V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE
)
439 /* ignore read-only targets */
440 case V4L2_SEL_TGT_CROP_DEFAULT
:
441 case V4L2_SEL_TGT_CROP_BOUNDS
:
442 res
.width
= geo
->src
.full_width
;
443 res
.height
= geo
->src
.full_height
;
446 /* ignore read-only targets */
447 case V4L2_SEL_TGT_COMPOSE_DEFAULT
:
448 case V4L2_SEL_TGT_COMPOSE_BOUNDS
:
449 res
.width
= geo
->dst
.full_width
;
450 res
.height
= geo
->dst
.full_height
;
453 case V4L2_SEL_TGT_CROP
:
455 stage
= MXR_GEOMETRY_CROP
;
457 case V4L2_SEL_TGT_COMPOSE
:
458 case V4L2_SEL_TGT_COMPOSE_PADDED
:
460 stage
= MXR_GEOMETRY_COMPOSE
;
465 /* apply change and update geometry if needed */
467 /* backup current geometry if setup fails */
468 memcpy(&tmp
, geo
, sizeof(tmp
));
470 /* apply requested selection */
471 target
->x_offset
= s
->r
.left
;
472 target
->y_offset
= s
->r
.top
;
473 target
->width
= s
->r
.width
;
474 target
->height
= s
->r
.height
;
476 layer
->ops
.fix_geometry(layer
, stage
, s
->flags
);
478 /* retrieve update selection rectangle */
479 res
.left
= target
->x_offset
;
480 res
.top
= target
->y_offset
;
481 res
.width
= target
->width
;
482 res
.height
= target
->height
;
484 mxr_geometry_dump(layer
->mdev
, &layer
->geo
);
487 /* checking if the rectangle satisfies constraints */
488 if ((s
->flags
& V4L2_SEL_FLAG_LE
) && !mxr_is_rect_inside(&res
, &s
->r
))
490 if ((s
->flags
& V4L2_SEL_FLAG_GE
) && !mxr_is_rect_inside(&s
->r
, &res
))
493 /* return result rectangle */
498 /* restore old geometry, which is not touched if target is NULL */
500 memcpy(geo
, &tmp
, sizeof(tmp
));
504 static int mxr_enum_dv_timings(struct file
*file
, void *fh
,
505 struct v4l2_enum_dv_timings
*timings
)
507 struct mxr_layer
*layer
= video_drvdata(file
);
508 struct mxr_device
*mdev
= layer
->mdev
;
511 /* lock protects from changing sd_out */
512 mutex_lock(&mdev
->mutex
);
513 ret
= v4l2_subdev_call(to_outsd(mdev
), video
, enum_dv_timings
, timings
);
514 mutex_unlock(&mdev
->mutex
);
516 return ret
? -EINVAL
: 0;
519 static int mxr_s_dv_timings(struct file
*file
, void *fh
,
520 struct v4l2_dv_timings
*timings
)
522 struct mxr_layer
*layer
= video_drvdata(file
);
523 struct mxr_device
*mdev
= layer
->mdev
;
526 /* lock protects from changing sd_out */
527 mutex_lock(&mdev
->mutex
);
529 /* timings change cannot be done while there is an entity
530 * dependant on output configuration
532 if (mdev
->n_output
> 0) {
533 mutex_unlock(&mdev
->mutex
);
537 ret
= v4l2_subdev_call(to_outsd(mdev
), video
, s_dv_timings
, timings
);
539 mutex_unlock(&mdev
->mutex
);
541 mxr_layer_update_output(layer
);
543 /* any failure should return EINVAL according to V4L2 doc */
544 return ret
? -EINVAL
: 0;
547 static int mxr_g_dv_timings(struct file
*file
, void *fh
,
548 struct v4l2_dv_timings
*timings
)
550 struct mxr_layer
*layer
= video_drvdata(file
);
551 struct mxr_device
*mdev
= layer
->mdev
;
554 /* lock protects from changing sd_out */
555 mutex_lock(&mdev
->mutex
);
556 ret
= v4l2_subdev_call(to_outsd(mdev
), video
, g_dv_timings
, timings
);
557 mutex_unlock(&mdev
->mutex
);
559 return ret
? -EINVAL
: 0;
562 static int mxr_dv_timings_cap(struct file
*file
, void *fh
,
563 struct v4l2_dv_timings_cap
*cap
)
565 struct mxr_layer
*layer
= video_drvdata(file
);
566 struct mxr_device
*mdev
= layer
->mdev
;
569 /* lock protects from changing sd_out */
570 mutex_lock(&mdev
->mutex
);
571 ret
= v4l2_subdev_call(to_outsd(mdev
), video
, dv_timings_cap
, cap
);
572 mutex_unlock(&mdev
->mutex
);
574 return ret
? -EINVAL
: 0;
577 static int mxr_s_std(struct file
*file
, void *fh
, v4l2_std_id norm
)
579 struct mxr_layer
*layer
= video_drvdata(file
);
580 struct mxr_device
*mdev
= layer
->mdev
;
583 /* lock protects from changing sd_out */
584 mutex_lock(&mdev
->mutex
);
586 /* standard change cannot be done while there is an entity
587 * dependant on output configuration
589 if (mdev
->n_output
> 0) {
590 mutex_unlock(&mdev
->mutex
);
594 ret
= v4l2_subdev_call(to_outsd(mdev
), video
, s_std_output
, norm
);
596 mutex_unlock(&mdev
->mutex
);
598 mxr_layer_update_output(layer
);
600 return ret
? -EINVAL
: 0;
603 static int mxr_g_std(struct file
*file
, void *fh
, v4l2_std_id
*norm
)
605 struct mxr_layer
*layer
= video_drvdata(file
);
606 struct mxr_device
*mdev
= layer
->mdev
;
609 /* lock protects from changing sd_out */
610 mutex_lock(&mdev
->mutex
);
611 ret
= v4l2_subdev_call(to_outsd(mdev
), video
, g_std_output
, norm
);
612 mutex_unlock(&mdev
->mutex
);
614 return ret
? -EINVAL
: 0;
617 static int mxr_enum_output(struct file
*file
, void *fh
, struct v4l2_output
*a
)
619 struct mxr_layer
*layer
= video_drvdata(file
);
620 struct mxr_device
*mdev
= layer
->mdev
;
621 struct mxr_output
*out
;
622 struct v4l2_subdev
*sd
;
624 if (a
->index
>= mdev
->output_cnt
)
626 out
= mdev
->output
[a
->index
];
629 strlcpy(a
->name
, out
->name
, sizeof(a
->name
));
631 /* try to obtain supported tv norms */
632 v4l2_subdev_call(sd
, video
, g_tvnorms_output
, &a
->std
);
634 if (sd
->ops
->video
&& sd
->ops
->video
->s_dv_timings
)
635 a
->capabilities
|= V4L2_OUT_CAP_DV_TIMINGS
;
636 if (sd
->ops
->video
&& sd
->ops
->video
->s_std_output
)
637 a
->capabilities
|= V4L2_OUT_CAP_STD
;
638 a
->type
= V4L2_OUTPUT_TYPE_ANALOG
;
643 static int mxr_s_output(struct file
*file
, void *fh
, unsigned int i
)
645 struct video_device
*vfd
= video_devdata(file
);
646 struct mxr_layer
*layer
= video_drvdata(file
);
647 struct mxr_device
*mdev
= layer
->mdev
;
649 if (i
>= mdev
->output_cnt
|| mdev
->output
[i
] == NULL
)
652 mutex_lock(&mdev
->mutex
);
653 if (mdev
->n_output
> 0) {
654 mutex_unlock(&mdev
->mutex
);
657 mdev
->current_output
= i
;
659 v4l2_subdev_call(to_outsd(mdev
), video
, g_tvnorms_output
,
661 mutex_unlock(&mdev
->mutex
);
663 /* update layers geometry */
664 mxr_layer_update_output(layer
);
666 mxr_dbg(mdev
, "tvnorms = %08llx\n", vfd
->tvnorms
);
671 static int mxr_g_output(struct file
*file
, void *fh
, unsigned int *p
)
673 struct mxr_layer
*layer
= video_drvdata(file
);
674 struct mxr_device
*mdev
= layer
->mdev
;
676 mutex_lock(&mdev
->mutex
);
677 *p
= mdev
->current_output
;
678 mutex_unlock(&mdev
->mutex
);
683 static int mxr_reqbufs(struct file
*file
, void *priv
,
684 struct v4l2_requestbuffers
*p
)
686 struct mxr_layer
*layer
= video_drvdata(file
);
688 mxr_dbg(layer
->mdev
, "%s:%d\n", __func__
, __LINE__
);
689 return vb2_reqbufs(&layer
->vb_queue
, p
);
692 static int mxr_querybuf(struct file
*file
, void *priv
, struct v4l2_buffer
*p
)
694 struct mxr_layer
*layer
= video_drvdata(file
);
696 mxr_dbg(layer
->mdev
, "%s:%d\n", __func__
, __LINE__
);
697 return vb2_querybuf(&layer
->vb_queue
, p
);
700 static int mxr_qbuf(struct file
*file
, void *priv
, struct v4l2_buffer
*p
)
702 struct mxr_layer
*layer
= video_drvdata(file
);
704 mxr_dbg(layer
->mdev
, "%s:%d(%d)\n", __func__
, __LINE__
, p
->index
);
705 return vb2_qbuf(&layer
->vb_queue
, p
);
708 static int mxr_dqbuf(struct file
*file
, void *priv
, struct v4l2_buffer
*p
)
710 struct mxr_layer
*layer
= video_drvdata(file
);
712 mxr_dbg(layer
->mdev
, "%s:%d\n", __func__
, __LINE__
);
713 return vb2_dqbuf(&layer
->vb_queue
, p
, file
->f_flags
& O_NONBLOCK
);
716 static int mxr_expbuf(struct file
*file
, void *priv
,
717 struct v4l2_exportbuffer
*eb
)
719 struct mxr_layer
*layer
= video_drvdata(file
);
721 mxr_dbg(layer
->mdev
, "%s:%d\n", __func__
, __LINE__
);
722 return vb2_expbuf(&layer
->vb_queue
, eb
);
725 static int mxr_streamon(struct file
*file
, void *priv
, enum v4l2_buf_type i
)
727 struct mxr_layer
*layer
= video_drvdata(file
);
729 mxr_dbg(layer
->mdev
, "%s:%d\n", __func__
, __LINE__
);
730 return vb2_streamon(&layer
->vb_queue
, i
);
733 static int mxr_streamoff(struct file
*file
, void *priv
, enum v4l2_buf_type i
)
735 struct mxr_layer
*layer
= video_drvdata(file
);
737 mxr_dbg(layer
->mdev
, "%s:%d\n", __func__
, __LINE__
);
738 return vb2_streamoff(&layer
->vb_queue
, i
);
741 static const struct v4l2_ioctl_ops mxr_ioctl_ops
= {
742 .vidioc_querycap
= mxr_querycap
,
743 /* format handling */
744 .vidioc_enum_fmt_vid_out_mplane
= mxr_enum_fmt
,
745 .vidioc_s_fmt_vid_out_mplane
= mxr_s_fmt
,
746 .vidioc_g_fmt_vid_out_mplane
= mxr_g_fmt
,
748 .vidioc_reqbufs
= mxr_reqbufs
,
749 .vidioc_querybuf
= mxr_querybuf
,
750 .vidioc_qbuf
= mxr_qbuf
,
751 .vidioc_dqbuf
= mxr_dqbuf
,
752 .vidioc_expbuf
= mxr_expbuf
,
753 /* Streaming control */
754 .vidioc_streamon
= mxr_streamon
,
755 .vidioc_streamoff
= mxr_streamoff
,
756 /* DV Timings functions */
757 .vidioc_enum_dv_timings
= mxr_enum_dv_timings
,
758 .vidioc_s_dv_timings
= mxr_s_dv_timings
,
759 .vidioc_g_dv_timings
= mxr_g_dv_timings
,
760 .vidioc_dv_timings_cap
= mxr_dv_timings_cap
,
761 /* analog TV standard functions */
762 .vidioc_s_std
= mxr_s_std
,
763 .vidioc_g_std
= mxr_g_std
,
764 /* Output handling */
765 .vidioc_enum_output
= mxr_enum_output
,
766 .vidioc_s_output
= mxr_s_output
,
767 .vidioc_g_output
= mxr_g_output
,
768 /* selection ioctls */
769 .vidioc_g_selection
= mxr_g_selection
,
770 .vidioc_s_selection
= mxr_s_selection
,
773 static int mxr_video_open(struct file
*file
)
775 struct mxr_layer
*layer
= video_drvdata(file
);
776 struct mxr_device
*mdev
= layer
->mdev
;
779 mxr_dbg(mdev
, "%s:%d\n", __func__
, __LINE__
);
780 if (mutex_lock_interruptible(&layer
->mutex
))
782 /* assure device probe is finished */
783 wait_for_device_probe();
784 /* creating context for file descriptor */
785 ret
= v4l2_fh_open(file
);
787 mxr_err(mdev
, "v4l2_fh_open failed\n");
791 /* leaving if layer is already initialized */
792 if (!v4l2_fh_is_singular_file(file
))
795 /* FIXME: should power be enabled on open? */
796 ret
= mxr_power_get(mdev
);
798 mxr_err(mdev
, "power on failed\n");
802 ret
= vb2_queue_init(&layer
->vb_queue
);
804 mxr_err(mdev
, "failed to initialize vb2 queue\n");
807 /* set default format, first on the list */
808 layer
->fmt
= layer
->fmt_array
[0];
809 /* setup default geometry */
810 mxr_layer_default_geo(layer
);
811 mutex_unlock(&layer
->mutex
);
819 v4l2_fh_release(file
);
822 mutex_unlock(&layer
->mutex
);
828 mxr_video_poll(struct file
*file
, struct poll_table_struct
*wait
)
830 struct mxr_layer
*layer
= video_drvdata(file
);
833 mxr_dbg(layer
->mdev
, "%s:%d\n", __func__
, __LINE__
);
835 mutex_lock(&layer
->mutex
);
836 res
= vb2_poll(&layer
->vb_queue
, file
, wait
);
837 mutex_unlock(&layer
->mutex
);
841 static int mxr_video_mmap(struct file
*file
, struct vm_area_struct
*vma
)
843 struct mxr_layer
*layer
= video_drvdata(file
);
846 mxr_dbg(layer
->mdev
, "%s:%d\n", __func__
, __LINE__
);
848 if (mutex_lock_interruptible(&layer
->mutex
))
850 ret
= vb2_mmap(&layer
->vb_queue
, vma
);
851 mutex_unlock(&layer
->mutex
);
855 static int mxr_video_release(struct file
*file
)
857 struct mxr_layer
*layer
= video_drvdata(file
);
859 mxr_dbg(layer
->mdev
, "%s:%d\n", __func__
, __LINE__
);
860 mutex_lock(&layer
->mutex
);
861 if (v4l2_fh_is_singular_file(file
)) {
862 vb2_queue_release(&layer
->vb_queue
);
863 mxr_power_put(layer
->mdev
);
865 v4l2_fh_release(file
);
866 mutex_unlock(&layer
->mutex
);
870 static const struct v4l2_file_operations mxr_fops
= {
871 .owner
= THIS_MODULE
,
872 .open
= mxr_video_open
,
873 .poll
= mxr_video_poll
,
874 .mmap
= mxr_video_mmap
,
875 .release
= mxr_video_release
,
876 .unlocked_ioctl
= video_ioctl2
,
879 static int queue_setup(struct vb2_queue
*vq
, const struct v4l2_format
*pfmt
,
880 unsigned int *nbuffers
, unsigned int *nplanes
, unsigned int sizes
[],
883 struct mxr_layer
*layer
= vb2_get_drv_priv(vq
);
884 const struct mxr_format
*fmt
= layer
->fmt
;
886 struct mxr_device
*mdev
= layer
->mdev
;
887 struct v4l2_plane_pix_format planes
[3];
889 mxr_dbg(mdev
, "%s\n", __func__
);
890 /* checking if format was configured */
893 mxr_dbg(mdev
, "fmt = %s\n", fmt
->name
);
894 mxr_mplane_fill(planes
, fmt
, layer
->geo
.src
.full_width
,
895 layer
->geo
.src
.full_height
);
897 *nplanes
= fmt
->num_subframes
;
898 for (i
= 0; i
< fmt
->num_subframes
; ++i
) {
899 alloc_ctxs
[i
] = layer
->mdev
->alloc_ctx
;
900 sizes
[i
] = planes
[i
].sizeimage
;
901 mxr_dbg(mdev
, "size[%d] = %08x\n", i
, sizes
[i
]);
910 static void buf_queue(struct vb2_buffer
*vb
)
912 struct mxr_buffer
*buffer
= container_of(vb
, struct mxr_buffer
, vb
);
913 struct mxr_layer
*layer
= vb2_get_drv_priv(vb
->vb2_queue
);
914 struct mxr_device
*mdev
= layer
->mdev
;
917 spin_lock_irqsave(&layer
->enq_slock
, flags
);
918 list_add_tail(&buffer
->list
, &layer
->enq_list
);
919 spin_unlock_irqrestore(&layer
->enq_slock
, flags
);
921 mxr_dbg(mdev
, "queuing buffer\n");
924 static void wait_lock(struct vb2_queue
*vq
)
926 struct mxr_layer
*layer
= vb2_get_drv_priv(vq
);
928 mxr_dbg(layer
->mdev
, "%s\n", __func__
);
929 mutex_lock(&layer
->mutex
);
932 static void wait_unlock(struct vb2_queue
*vq
)
934 struct mxr_layer
*layer
= vb2_get_drv_priv(vq
);
936 mxr_dbg(layer
->mdev
, "%s\n", __func__
);
937 mutex_unlock(&layer
->mutex
);
940 static int start_streaming(struct vb2_queue
*vq
, unsigned int count
)
942 struct mxr_layer
*layer
= vb2_get_drv_priv(vq
);
943 struct mxr_device
*mdev
= layer
->mdev
;
946 mxr_dbg(mdev
, "%s\n", __func__
);
949 mxr_dbg(mdev
, "no output buffers queued\n");
953 /* block any changes in output configuration */
954 mxr_output_get(mdev
);
956 mxr_layer_update_output(layer
);
957 layer
->ops
.format_set(layer
);
958 /* enabling layer in hardware */
959 spin_lock_irqsave(&layer
->enq_slock
, flags
);
960 layer
->state
= MXR_LAYER_STREAMING
;
961 spin_unlock_irqrestore(&layer
->enq_slock
, flags
);
963 layer
->ops
.stream_set(layer
, MXR_ENABLE
);
964 mxr_streamer_get(mdev
);
969 static void mxr_watchdog(unsigned long arg
)
971 struct mxr_layer
*layer
= (struct mxr_layer
*) arg
;
972 struct mxr_device
*mdev
= layer
->mdev
;
975 mxr_err(mdev
, "watchdog fired for layer %s\n", layer
->vfd
.name
);
977 spin_lock_irqsave(&layer
->enq_slock
, flags
);
979 if (layer
->update_buf
== layer
->shadow_buf
)
980 layer
->update_buf
= NULL
;
981 if (layer
->update_buf
) {
982 vb2_buffer_done(&layer
->update_buf
->vb
, VB2_BUF_STATE_ERROR
);
983 layer
->update_buf
= NULL
;
985 if (layer
->shadow_buf
) {
986 vb2_buffer_done(&layer
->shadow_buf
->vb
, VB2_BUF_STATE_ERROR
);
987 layer
->shadow_buf
= NULL
;
989 spin_unlock_irqrestore(&layer
->enq_slock
, flags
);
992 static int stop_streaming(struct vb2_queue
*vq
)
994 struct mxr_layer
*layer
= vb2_get_drv_priv(vq
);
995 struct mxr_device
*mdev
= layer
->mdev
;
997 struct timer_list watchdog
;
998 struct mxr_buffer
*buf
, *buf_tmp
;
1000 mxr_dbg(mdev
, "%s\n", __func__
);
1002 spin_lock_irqsave(&layer
->enq_slock
, flags
);
1005 layer
->state
= MXR_LAYER_STREAMING_FINISH
;
1007 /* set all buffer to be done */
1008 list_for_each_entry_safe(buf
, buf_tmp
, &layer
->enq_list
, list
) {
1009 list_del(&buf
->list
);
1010 vb2_buffer_done(&buf
->vb
, VB2_BUF_STATE_ERROR
);
1013 spin_unlock_irqrestore(&layer
->enq_slock
, flags
);
1015 /* give 1 seconds to complete to complete last buffers */
1016 setup_timer_on_stack(&watchdog
, mxr_watchdog
,
1017 (unsigned long)layer
);
1018 mod_timer(&watchdog
, jiffies
+ msecs_to_jiffies(1000));
1020 /* wait until all buffers are goes to done state */
1021 vb2_wait_for_all_buffers(vq
);
1023 /* stop timer if all synchronization is done */
1024 del_timer_sync(&watchdog
);
1025 destroy_timer_on_stack(&watchdog
);
1027 /* stopping hardware */
1028 spin_lock_irqsave(&layer
->enq_slock
, flags
);
1029 layer
->state
= MXR_LAYER_IDLE
;
1030 spin_unlock_irqrestore(&layer
->enq_slock
, flags
);
1032 /* disabling layer in hardware */
1033 layer
->ops
.stream_set(layer
, MXR_DISABLE
);
1034 /* remove one streamer */
1035 mxr_streamer_put(mdev
);
1036 /* allow changes in output configuration */
1037 mxr_output_put(mdev
);
1041 static struct vb2_ops mxr_video_qops
= {
1042 .queue_setup
= queue_setup
,
1043 .buf_queue
= buf_queue
,
1044 .wait_prepare
= wait_unlock
,
1045 .wait_finish
= wait_lock
,
1046 .start_streaming
= start_streaming
,
1047 .stop_streaming
= stop_streaming
,
1050 /* FIXME: try to put this functions to mxr_base_layer_create */
1051 int mxr_base_layer_register(struct mxr_layer
*layer
)
1053 struct mxr_device
*mdev
= layer
->mdev
;
1056 ret
= video_register_device(&layer
->vfd
, VFL_TYPE_GRABBER
, -1);
1058 mxr_err(mdev
, "failed to register video device\n");
1060 mxr_info(mdev
, "registered layer %s as /dev/video%d\n",
1061 layer
->vfd
.name
, layer
->vfd
.num
);
1065 void mxr_base_layer_unregister(struct mxr_layer
*layer
)
1067 video_unregister_device(&layer
->vfd
);
1070 void mxr_layer_release(struct mxr_layer
*layer
)
1072 if (layer
->ops
.release
)
1073 layer
->ops
.release(layer
);
1076 void mxr_base_layer_release(struct mxr_layer
*layer
)
1081 static void mxr_vfd_release(struct video_device
*vdev
)
1083 pr_info("video device release\n");
1086 struct mxr_layer
*mxr_base_layer_create(struct mxr_device
*mdev
,
1087 int idx
, char *name
, struct mxr_layer_ops
*ops
)
1089 struct mxr_layer
*layer
;
1091 layer
= kzalloc(sizeof(*layer
), GFP_KERNEL
);
1092 if (layer
== NULL
) {
1093 mxr_err(mdev
, "not enough memory for layer.\n");
1101 spin_lock_init(&layer
->enq_slock
);
1102 INIT_LIST_HEAD(&layer
->enq_list
);
1103 mutex_init(&layer
->mutex
);
1105 layer
->vfd
= (struct video_device
) {
1107 .release
= mxr_vfd_release
,
1109 .vfl_dir
= VFL_DIR_TX
,
1110 .ioctl_ops
= &mxr_ioctl_ops
,
1112 strlcpy(layer
->vfd
.name
, name
, sizeof(layer
->vfd
.name
));
1113 /* let framework control PRIORITY */
1114 set_bit(V4L2_FL_USE_FH_PRIO
, &layer
->vfd
.flags
);
1116 video_set_drvdata(&layer
->vfd
, layer
);
1117 layer
->vfd
.lock
= &layer
->mutex
;
1118 layer
->vfd
.v4l2_dev
= &mdev
->v4l2_dev
;
1120 layer
->vb_queue
= (struct vb2_queue
) {
1121 .type
= V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE
,
1122 .io_modes
= VB2_MMAP
| VB2_USERPTR
| VB2_DMABUF
,
1124 .buf_struct_size
= sizeof(struct mxr_buffer
),
1125 .ops
= &mxr_video_qops
,
1126 .mem_ops
= &vb2_dma_contig_memops
,
1135 static const struct mxr_format
*find_format_by_fourcc(
1136 struct mxr_layer
*layer
, unsigned long fourcc
)
1140 for (i
= 0; i
< layer
->fmt_array_size
; ++i
)
1141 if (layer
->fmt_array
[i
]->fourcc
== fourcc
)
1142 return layer
->fmt_array
[i
];
1146 static const struct mxr_format
*find_format_by_index(
1147 struct mxr_layer
*layer
, unsigned long index
)
1149 if (index
>= layer
->fmt_array_size
)
1151 return layer
->fmt_array
[index
];