2 * vivid-kthread-cap.h - video/vbi capture thread support functions.
4 * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
6 * This program is free software; you may redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
10 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
11 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
12 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
13 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
14 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
15 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
16 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
20 #include <linux/module.h>
21 #include <linux/errno.h>
22 #include <linux/kernel.h>
23 #include <linux/init.h>
24 #include <linux/sched.h>
25 #include <linux/slab.h>
26 #include <linux/font.h>
27 #include <linux/mutex.h>
28 #include <linux/videodev2.h>
29 #include <linux/kthread.h>
30 #include <linux/freezer.h>
31 #include <linux/random.h>
32 #include <linux/v4l2-dv-timings.h>
33 #include <asm/div64.h>
34 #include <media/videobuf2-vmalloc.h>
35 #include <media/v4l2-dv-timings.h>
36 #include <media/v4l2-ioctl.h>
37 #include <media/v4l2-fh.h>
38 #include <media/v4l2-event.h>
39 #include <media/v4l2-rect.h>
41 #include "vivid-core.h"
42 #include "vivid-vid-common.h"
43 #include "vivid-vid-cap.h"
44 #include "vivid-vid-out.h"
45 #include "vivid-radio-common.h"
46 #include "vivid-radio-rx.h"
47 #include "vivid-radio-tx.h"
48 #include "vivid-sdr-cap.h"
49 #include "vivid-vbi-cap.h"
50 #include "vivid-vbi-out.h"
51 #include "vivid-osd.h"
52 #include "vivid-ctrls.h"
53 #include "vivid-kthread-cap.h"
55 static inline v4l2_std_id
vivid_get_std_cap(const struct vivid_dev
*dev
)
57 if (vivid_is_sdtv_cap(dev
))
62 static void copy_pix(struct vivid_dev
*dev
, int win_y
, int win_x
,
63 u16
*cap
, const u16
*osd
)
66 int left
= dev
->overlay_out_left
;
67 int top
= dev
->overlay_out_top
;
68 int fb_x
= win_x
+ left
;
69 int fb_y
= win_y
+ top
;
74 if (dev
->bitmap_out
) {
75 const u8
*p
= dev
->bitmap_out
;
76 unsigned stride
= (dev
->compose_out
.width
+ 7) / 8;
78 win_x
-= dev
->compose_out
.left
;
79 win_y
-= dev
->compose_out
.top
;
80 if (!(p
[stride
* win_y
+ win_x
/ 8] & (1 << (win_x
& 7))))
84 for (i
= 0; i
< dev
->clipcount_out
; i
++) {
85 struct v4l2_rect
*r
= &dev
->clips_out
[i
].c
;
87 if (fb_y
>= r
->top
&& fb_y
< r
->top
+ r
->height
&&
88 fb_x
>= r
->left
&& fb_x
< r
->left
+ r
->width
)
91 if ((dev
->fbuf_out_flags
& V4L2_FBUF_FLAG_CHROMAKEY
) &&
92 *osd
!= dev
->chromakey_out
)
94 if ((dev
->fbuf_out_flags
& V4L2_FBUF_FLAG_SRC_CHROMAKEY
) &&
95 out
== dev
->chromakey_out
)
97 if (dev
->fmt_cap
->alpha_mask
) {
98 if ((dev
->fbuf_out_flags
& V4L2_FBUF_FLAG_GLOBAL_ALPHA
) &&
99 dev
->global_alpha_out
)
101 if ((dev
->fbuf_out_flags
& V4L2_FBUF_FLAG_LOCAL_ALPHA
) &&
102 *cap
& dev
->fmt_cap
->alpha_mask
)
104 if ((dev
->fbuf_out_flags
& V4L2_FBUF_FLAG_LOCAL_INV_ALPHA
) &&
105 !(*cap
& dev
->fmt_cap
->alpha_mask
))
111 static void blend_line(struct vivid_dev
*dev
, unsigned y_offset
, unsigned x_offset
,
112 u8
*vcapbuf
, const u8
*vosdbuf
,
113 unsigned width
, unsigned pixsize
)
117 for (x
= 0; x
< width
; x
++, vcapbuf
+= pixsize
, vosdbuf
+= pixsize
) {
118 copy_pix(dev
, y_offset
, x_offset
+ x
,
119 (u16
*)vcapbuf
, (const u16
*)vosdbuf
);
123 static void scale_line(const u8
*src
, u8
*dst
, unsigned srcw
, unsigned dstw
, unsigned twopixsize
)
125 /* Coarse scaling with Bresenham */
133 * We always combine two pixels to prevent color bleed in the packed
138 int_part
= srcw
/ dstw
;
139 fract_part
= srcw
% dstw
;
140 for (x
= 0; x
< dstw
; x
++, dst
+= twopixsize
) {
141 memcpy(dst
, src
+ src_x
* twopixsize
, twopixsize
);
152 * Precalculate the rectangles needed to perform video looping:
154 * The nominal pipeline is that the video output buffer is cropped by
155 * crop_out, scaled to compose_out, overlaid with the output overlay,
156 * cropped on the capture side by crop_cap and scaled again to the video
157 * capture buffer using compose_cap.
159 * To keep things efficient we calculate the intersection of compose_out
160 * and crop_cap (since that's the only part of the video that will
161 * actually end up in the capture buffer), determine which part of the
162 * video output buffer that is and which part of the video capture buffer
163 * so we can scale the video straight from the output buffer to the capture
164 * buffer without any intermediate steps.
166 * If we need to deal with an output overlay, then there is no choice and
167 * that intermediate step still has to be taken. For the output overlay
168 * support we calculate the intersection of the framebuffer and the overlay
169 * window (which may be partially or wholly outside of the framebuffer
170 * itself) and the intersection of that with loop_vid_copy (i.e. the part of
171 * the actual looped video that will be overlaid). The result is calculated
172 * both in framebuffer coordinates (loop_fb_copy) and compose_out coordinates
173 * (loop_vid_overlay). Finally calculate the part of the capture buffer that
174 * will receive that overlaid video.
176 static void vivid_precalc_copy_rects(struct vivid_dev
*dev
)
178 /* Framebuffer rectangle */
179 struct v4l2_rect r_fb
= {
180 0, 0, dev
->display_width
, dev
->display_height
182 /* Overlay window rectangle in framebuffer coordinates */
183 struct v4l2_rect r_overlay
= {
184 dev
->overlay_out_left
, dev
->overlay_out_top
,
185 dev
->compose_out
.width
, dev
->compose_out
.height
188 v4l2_rect_intersect(&dev
->loop_vid_copy
, &dev
->crop_cap
, &dev
->compose_out
);
190 dev
->loop_vid_out
= dev
->loop_vid_copy
;
191 v4l2_rect_scale(&dev
->loop_vid_out
, &dev
->compose_out
, &dev
->crop_out
);
192 dev
->loop_vid_out
.left
+= dev
->crop_out
.left
;
193 dev
->loop_vid_out
.top
+= dev
->crop_out
.top
;
195 dev
->loop_vid_cap
= dev
->loop_vid_copy
;
196 v4l2_rect_scale(&dev
->loop_vid_cap
, &dev
->crop_cap
, &dev
->compose_cap
);
199 "loop_vid_copy: %dx%d@%dx%d loop_vid_out: %dx%d@%dx%d loop_vid_cap: %dx%d@%dx%d\n",
200 dev
->loop_vid_copy
.width
, dev
->loop_vid_copy
.height
,
201 dev
->loop_vid_copy
.left
, dev
->loop_vid_copy
.top
,
202 dev
->loop_vid_out
.width
, dev
->loop_vid_out
.height
,
203 dev
->loop_vid_out
.left
, dev
->loop_vid_out
.top
,
204 dev
->loop_vid_cap
.width
, dev
->loop_vid_cap
.height
,
205 dev
->loop_vid_cap
.left
, dev
->loop_vid_cap
.top
);
207 v4l2_rect_intersect(&r_overlay
, &r_fb
, &r_overlay
);
209 /* shift r_overlay to the same origin as compose_out */
210 r_overlay
.left
+= dev
->compose_out
.left
- dev
->overlay_out_left
;
211 r_overlay
.top
+= dev
->compose_out
.top
- dev
->overlay_out_top
;
213 v4l2_rect_intersect(&dev
->loop_vid_overlay
, &r_overlay
, &dev
->loop_vid_copy
);
214 dev
->loop_fb_copy
= dev
->loop_vid_overlay
;
216 /* shift dev->loop_fb_copy back again to the fb origin */
217 dev
->loop_fb_copy
.left
-= dev
->compose_out
.left
- dev
->overlay_out_left
;
218 dev
->loop_fb_copy
.top
-= dev
->compose_out
.top
- dev
->overlay_out_top
;
220 dev
->loop_vid_overlay_cap
= dev
->loop_vid_overlay
;
221 v4l2_rect_scale(&dev
->loop_vid_overlay_cap
, &dev
->crop_cap
, &dev
->compose_cap
);
224 "loop_fb_copy: %dx%d@%dx%d loop_vid_overlay: %dx%d@%dx%d loop_vid_overlay_cap: %dx%d@%dx%d\n",
225 dev
->loop_fb_copy
.width
, dev
->loop_fb_copy
.height
,
226 dev
->loop_fb_copy
.left
, dev
->loop_fb_copy
.top
,
227 dev
->loop_vid_overlay
.width
, dev
->loop_vid_overlay
.height
,
228 dev
->loop_vid_overlay
.left
, dev
->loop_vid_overlay
.top
,
229 dev
->loop_vid_overlay_cap
.width
, dev
->loop_vid_overlay_cap
.height
,
230 dev
->loop_vid_overlay_cap
.left
, dev
->loop_vid_overlay_cap
.top
);
233 static void *plane_vaddr(struct tpg_data
*tpg
, struct vivid_buffer
*buf
,
234 unsigned p
, unsigned bpl
[TPG_MAX_PLANES
], unsigned h
)
239 if (p
== 0 || tpg_g_buffers(tpg
) > 1)
240 return vb2_plane_vaddr(&buf
->vb
.vb2_buf
, p
);
241 vbuf
= vb2_plane_vaddr(&buf
->vb
.vb2_buf
, 0);
242 for (i
= 0; i
< p
; i
++)
243 vbuf
+= bpl
[i
] * h
/ tpg
->vdownsampling
[i
];
247 static int vivid_copy_buffer(struct vivid_dev
*dev
, unsigned p
, u8
*vcapbuf
,
248 struct vivid_buffer
*vid_cap_buf
)
250 bool blank
= dev
->must_blank
[vid_cap_buf
->vb
.vb2_buf
.index
];
251 struct tpg_data
*tpg
= &dev
->tpg
;
252 struct vivid_buffer
*vid_out_buf
= NULL
;
253 unsigned vdiv
= dev
->fmt_out
->vdownsampling
[p
];
254 unsigned twopixsize
= tpg_g_twopixelsize(tpg
, p
);
255 unsigned img_width
= tpg_hdiv(tpg
, p
, dev
->compose_cap
.width
);
256 unsigned img_height
= dev
->compose_cap
.height
;
257 unsigned stride_cap
= tpg
->bytesperline
[p
];
258 unsigned stride_out
= dev
->bytesperline_out
[p
];
259 unsigned stride_osd
= dev
->display_byte_stride
;
260 unsigned hmax
= (img_height
* tpg
->perc_fill
) / 100;
264 bool blend
= dev
->bitmap_out
|| dev
->clipcount_out
|| dev
->fbuf_out_flags
;
265 /* Coarse scaling with Bresenham */
266 unsigned vid_out_int_part
;
267 unsigned vid_out_fract_part
;
268 unsigned vid_out_y
= 0;
269 unsigned vid_out_error
= 0;
270 unsigned vid_overlay_int_part
= 0;
271 unsigned vid_overlay_fract_part
= 0;
272 unsigned vid_overlay_y
= 0;
273 unsigned vid_overlay_error
= 0;
274 unsigned vid_cap_left
= tpg_hdiv(tpg
, p
, dev
->loop_vid_cap
.left
);
275 unsigned vid_cap_right
;
278 vid_out_int_part
= dev
->loop_vid_out
.height
/ dev
->loop_vid_cap
.height
;
279 vid_out_fract_part
= dev
->loop_vid_out
.height
% dev
->loop_vid_cap
.height
;
281 if (!list_empty(&dev
->vid_out_active
))
282 vid_out_buf
= list_entry(dev
->vid_out_active
.next
,
283 struct vivid_buffer
, list
);
284 if (vid_out_buf
== NULL
)
287 vid_cap_buf
->vb
.field
= vid_out_buf
->vb
.field
;
289 voutbuf
= plane_vaddr(tpg
, vid_out_buf
, p
,
290 dev
->bytesperline_out
, dev
->fmt_out_rect
.height
);
291 if (p
< dev
->fmt_out
->buffers
)
292 voutbuf
+= vid_out_buf
->vb
.vb2_buf
.planes
[p
].data_offset
;
293 voutbuf
+= tpg_hdiv(tpg
, p
, dev
->loop_vid_out
.left
) +
294 (dev
->loop_vid_out
.top
/ vdiv
) * stride_out
;
295 vcapbuf
+= tpg_hdiv(tpg
, p
, dev
->compose_cap
.left
) +
296 (dev
->compose_cap
.top
/ vdiv
) * stride_cap
;
298 if (dev
->loop_vid_copy
.width
== 0 || dev
->loop_vid_copy
.height
== 0) {
300 * If there is nothing to copy, then just fill the capture window
303 for (y
= 0; y
< hmax
/ vdiv
; y
++, vcapbuf
+= stride_cap
)
304 memcpy(vcapbuf
, tpg
->black_line
[p
], img_width
);
308 if (dev
->overlay_out_enabled
&&
309 dev
->loop_vid_overlay
.width
&& dev
->loop_vid_overlay
.height
) {
310 vosdbuf
= dev
->video_vbase
;
311 vosdbuf
+= (dev
->loop_fb_copy
.left
* twopixsize
) / 2 +
312 dev
->loop_fb_copy
.top
* stride_osd
;
313 vid_overlay_int_part
= dev
->loop_vid_overlay
.height
/
314 dev
->loop_vid_overlay_cap
.height
;
315 vid_overlay_fract_part
= dev
->loop_vid_overlay
.height
%
316 dev
->loop_vid_overlay_cap
.height
;
319 vid_cap_right
= tpg_hdiv(tpg
, p
, dev
->loop_vid_cap
.left
+ dev
->loop_vid_cap
.width
);
320 /* quick is true if no video scaling is needed */
321 quick
= dev
->loop_vid_out
.width
== dev
->loop_vid_cap
.width
;
323 dev
->cur_scaled_line
= dev
->loop_vid_out
.height
;
324 for (y
= 0; y
< hmax
; y
+= vdiv
, vcapbuf
+= stride_cap
) {
325 /* osdline is true if this line requires overlay blending */
326 bool osdline
= vosdbuf
&& y
>= dev
->loop_vid_overlay_cap
.top
&&
327 y
< dev
->loop_vid_overlay_cap
.top
+ dev
->loop_vid_overlay_cap
.height
;
330 * If this line of the capture buffer doesn't get any video, then
331 * just fill with black.
333 if (y
< dev
->loop_vid_cap
.top
||
334 y
>= dev
->loop_vid_cap
.top
+ dev
->loop_vid_cap
.height
) {
335 memcpy(vcapbuf
, tpg
->black_line
[p
], img_width
);
339 /* fill the left border with black */
340 if (dev
->loop_vid_cap
.left
)
341 memcpy(vcapbuf
, tpg
->black_line
[p
], vid_cap_left
);
343 /* fill the right border with black */
344 if (vid_cap_right
< img_width
)
345 memcpy(vcapbuf
+ vid_cap_right
, tpg
->black_line
[p
],
346 img_width
- vid_cap_right
);
348 if (quick
&& !osdline
) {
349 memcpy(vcapbuf
+ vid_cap_left
,
350 voutbuf
+ vid_out_y
* stride_out
,
351 tpg_hdiv(tpg
, p
, dev
->loop_vid_cap
.width
));
352 goto update_vid_out_y
;
354 if (dev
->cur_scaled_line
== vid_out_y
) {
355 memcpy(vcapbuf
+ vid_cap_left
, dev
->scaled_line
,
356 tpg_hdiv(tpg
, p
, dev
->loop_vid_cap
.width
));
357 goto update_vid_out_y
;
360 scale_line(voutbuf
+ vid_out_y
* stride_out
, dev
->scaled_line
,
361 tpg_hdiv(tpg
, p
, dev
->loop_vid_out
.width
),
362 tpg_hdiv(tpg
, p
, dev
->loop_vid_cap
.width
),
363 tpg_g_twopixelsize(tpg
, p
));
366 * Offset in bytes within loop_vid_copy to the start of the
367 * loop_vid_overlay rectangle.
370 ((dev
->loop_vid_overlay
.left
- dev
->loop_vid_copy
.left
) *
372 u8
*osd
= vosdbuf
+ vid_overlay_y
* stride_osd
;
374 scale_line(voutbuf
+ vid_out_y
* stride_out
, dev
->blended_line
,
375 dev
->loop_vid_out
.width
, dev
->loop_vid_copy
.width
,
376 tpg_g_twopixelsize(tpg
, p
));
378 blend_line(dev
, vid_overlay_y
+ dev
->loop_vid_overlay
.top
,
379 dev
->loop_vid_overlay
.left
,
380 dev
->blended_line
+ offset
, osd
,
381 dev
->loop_vid_overlay
.width
, twopixsize
/ 2);
383 memcpy(dev
->blended_line
+ offset
,
384 osd
, (dev
->loop_vid_overlay
.width
* twopixsize
) / 2);
385 scale_line(dev
->blended_line
, dev
->scaled_line
,
386 dev
->loop_vid_copy
.width
, dev
->loop_vid_cap
.width
,
387 tpg_g_twopixelsize(tpg
, p
));
389 dev
->cur_scaled_line
= vid_out_y
;
390 memcpy(vcapbuf
+ vid_cap_left
, dev
->scaled_line
,
391 tpg_hdiv(tpg
, p
, dev
->loop_vid_cap
.width
));
395 vid_overlay_y
+= vid_overlay_int_part
;
396 vid_overlay_error
+= vid_overlay_fract_part
;
397 if (vid_overlay_error
>= dev
->loop_vid_overlay_cap
.height
) {
398 vid_overlay_error
-= dev
->loop_vid_overlay_cap
.height
;
402 vid_out_y
+= vid_out_int_part
;
403 vid_out_error
+= vid_out_fract_part
;
404 if (vid_out_error
>= dev
->loop_vid_cap
.height
/ vdiv
) {
405 vid_out_error
-= dev
->loop_vid_cap
.height
/ vdiv
;
412 for (; y
< img_height
; y
+= vdiv
, vcapbuf
+= stride_cap
)
413 memcpy(vcapbuf
, tpg
->contrast_line
[p
], img_width
);
417 static void vivid_fillbuff(struct vivid_dev
*dev
, struct vivid_buffer
*buf
)
419 struct tpg_data
*tpg
= &dev
->tpg
;
420 unsigned factor
= V4L2_FIELD_HAS_T_OR_B(dev
->field_cap
) ? 2 : 1;
421 unsigned line_height
= 16 / factor
;
422 bool is_tv
= vivid_is_sdtv_cap(dev
);
423 bool is_60hz
= is_tv
&& (dev
->std_cap
& V4L2_STD_525_60
);
426 u8
*basep
[TPG_MAX_PLANES
][2];
430 bool is_loop
= false;
432 if (dev
->loop_video
&& dev
->can_loop_video
&&
433 ((vivid_is_svid_cap(dev
) &&
434 !VIVID_INVALID_SIGNAL(dev
->std_signal_mode
)) ||
435 (vivid_is_hdmi_cap(dev
) &&
436 !VIVID_INVALID_SIGNAL(dev
->dv_timings_signal_mode
))))
439 buf
->vb
.sequence
= dev
->vid_cap_seq_count
;
441 * Take the timestamp now if the timestamp source is set to
442 * "Start of Exposure".
444 if (dev
->tstamp_src_is_soe
)
445 buf
->vb
.vb2_buf
.timestamp
= ktime_get_ns();
446 if (dev
->field_cap
== V4L2_FIELD_ALTERNATE
) {
448 * 60 Hz standards start with the bottom field, 50 Hz standards
449 * with the top field. So if the 0-based seq_count is even,
450 * then the field is TOP for 50 Hz and BOTTOM for 60 Hz
453 buf
->vb
.field
= ((dev
->vid_cap_seq_count
& 1) ^ is_60hz
) ?
454 V4L2_FIELD_BOTTOM
: V4L2_FIELD_TOP
;
456 * The sequence counter counts frames, not fields. So divide
459 buf
->vb
.sequence
/= 2;
461 buf
->vb
.field
= dev
->field_cap
;
463 tpg_s_field(tpg
, buf
->vb
.field
,
464 dev
->field_cap
== V4L2_FIELD_ALTERNATE
);
465 tpg_s_perc_fill_blank(tpg
, dev
->must_blank
[buf
->vb
.vb2_buf
.index
]);
467 vivid_precalc_copy_rects(dev
);
469 for (p
= 0; p
< tpg_g_planes(tpg
); p
++) {
470 void *vbuf
= plane_vaddr(tpg
, buf
, p
,
471 tpg
->bytesperline
, tpg
->buf_height
);
474 * The first plane of a multiplanar format has a non-zero
475 * data_offset. This helps testing whether the application
476 * correctly supports non-zero data offsets.
478 if (p
< tpg_g_buffers(tpg
) && dev
->fmt_cap
->data_offset
[p
]) {
479 memset(vbuf
, dev
->fmt_cap
->data_offset
[p
] & 0xff,
480 dev
->fmt_cap
->data_offset
[p
]);
481 vbuf
+= dev
->fmt_cap
->data_offset
[p
];
483 tpg_calc_text_basep(tpg
, basep
, p
, vbuf
);
484 if (!is_loop
|| vivid_copy_buffer(dev
, p
, vbuf
, buf
))
485 tpg_fill_plane_buffer(tpg
, vivid_get_std_cap(dev
),
488 dev
->must_blank
[buf
->vb
.vb2_buf
.index
] = false;
490 /* Updates stream time, only update at the start of a new frame. */
491 if (dev
->field_cap
!= V4L2_FIELD_ALTERNATE
||
492 (buf
->vb
.sequence
& 1) == 0)
494 jiffies_to_msecs(jiffies
- dev
->jiffies_vid_cap
);
496 ms
= dev
->ms_vid_cap
;
497 if (dev
->osd_mode
<= 1) {
498 snprintf(str
, sizeof(str
), " %02d:%02d:%02d:%03d %u%s",
499 (ms
/ (60 * 60 * 1000)) % 24,
500 (ms
/ (60 * 1000)) % 60,
504 (dev
->field_cap
== V4L2_FIELD_ALTERNATE
) ?
505 (buf
->vb
.field
== V4L2_FIELD_TOP
?
506 " top" : " bottom") : "");
507 tpg_gen_text(tpg
, basep
, line
++ * line_height
, 16, str
);
509 if (dev
->osd_mode
== 0) {
510 snprintf(str
, sizeof(str
), " %dx%d, input %d ",
511 dev
->src_rect
.width
, dev
->src_rect
.height
, dev
->input
);
512 tpg_gen_text(tpg
, basep
, line
++ * line_height
, 16, str
);
514 gain
= v4l2_ctrl_g_ctrl(dev
->gain
);
515 mutex_lock(dev
->ctrl_hdl_user_vid
.lock
);
516 snprintf(str
, sizeof(str
),
517 " brightness %3d, contrast %3d, saturation %3d, hue %d ",
518 dev
->brightness
->cur
.val
,
519 dev
->contrast
->cur
.val
,
520 dev
->saturation
->cur
.val
,
522 tpg_gen_text(tpg
, basep
, line
++ * line_height
, 16, str
);
523 snprintf(str
, sizeof(str
),
524 " autogain %d, gain %3d, alpha 0x%02x ",
525 dev
->autogain
->cur
.val
, gain
, dev
->alpha
->cur
.val
);
526 mutex_unlock(dev
->ctrl_hdl_user_vid
.lock
);
527 tpg_gen_text(tpg
, basep
, line
++ * line_height
, 16, str
);
528 mutex_lock(dev
->ctrl_hdl_user_aud
.lock
);
529 snprintf(str
, sizeof(str
),
530 " volume %3d, mute %d ",
531 dev
->volume
->cur
.val
, dev
->mute
->cur
.val
);
532 mutex_unlock(dev
->ctrl_hdl_user_aud
.lock
);
533 tpg_gen_text(tpg
, basep
, line
++ * line_height
, 16, str
);
534 mutex_lock(dev
->ctrl_hdl_user_gen
.lock
);
535 snprintf(str
, sizeof(str
), " int32 %d, int64 %lld, bitmask %08x ",
537 *dev
->int64
->p_cur
.p_s64
,
538 dev
->bitmask
->cur
.val
);
539 tpg_gen_text(tpg
, basep
, line
++ * line_height
, 16, str
);
540 snprintf(str
, sizeof(str
), " boolean %d, menu %s, string \"%s\" ",
541 dev
->boolean
->cur
.val
,
542 dev
->menu
->qmenu
[dev
->menu
->cur
.val
],
543 dev
->string
->p_cur
.p_char
);
544 tpg_gen_text(tpg
, basep
, line
++ * line_height
, 16, str
);
545 snprintf(str
, sizeof(str
), " integer_menu %lld, value %d ",
546 dev
->int_menu
->qmenu_int
[dev
->int_menu
->cur
.val
],
547 dev
->int_menu
->cur
.val
);
548 mutex_unlock(dev
->ctrl_hdl_user_gen
.lock
);
549 tpg_gen_text(tpg
, basep
, line
++ * line_height
, 16, str
);
550 if (dev
->button_pressed
) {
551 dev
->button_pressed
--;
552 snprintf(str
, sizeof(str
), " button pressed!");
553 tpg_gen_text(tpg
, basep
, line
++ * line_height
, 16, str
);
556 if (vivid_is_hdmi_cap(dev
)) {
557 snprintf(str
, sizeof(str
),
558 " OSD \"%s\"", dev
->osd
);
559 tpg_gen_text(tpg
, basep
, line
++ * line_height
,
562 if (dev
->osd_jiffies
&&
563 time_is_before_jiffies(dev
->osd_jiffies
+ 5 * HZ
)) {
565 dev
->osd_jiffies
= 0;
571 * If "End of Frame" is specified at the timestamp source, then take
574 if (!dev
->tstamp_src_is_soe
)
575 buf
->vb
.vb2_buf
.timestamp
= ktime_get_ns();
576 buf
->vb
.vb2_buf
.timestamp
+= dev
->time_wrap_offset
;
580 * Return true if this pixel coordinate is a valid video pixel.
582 static bool valid_pix(struct vivid_dev
*dev
, int win_y
, int win_x
, int fb_y
, int fb_x
)
586 if (dev
->bitmap_cap
) {
588 * Only if the corresponding bit in the bitmap is set can
589 * the video pixel be shown. Coordinates are relative to
590 * the overlay window set by VIDIOC_S_FMT.
592 const u8
*p
= dev
->bitmap_cap
;
593 unsigned stride
= (dev
->compose_cap
.width
+ 7) / 8;
595 if (!(p
[stride
* win_y
+ win_x
/ 8] & (1 << (win_x
& 7))))
599 for (i
= 0; i
< dev
->clipcount_cap
; i
++) {
601 * Only if the framebuffer coordinate is not in any of the
602 * clip rectangles will be video pixel be shown.
604 struct v4l2_rect
*r
= &dev
->clips_cap
[i
].c
;
606 if (fb_y
>= r
->top
&& fb_y
< r
->top
+ r
->height
&&
607 fb_x
>= r
->left
&& fb_x
< r
->left
+ r
->width
)
614 * Draw the image into the overlay buffer.
615 * Note that the combination of overlay and multiplanar is not supported.
617 static void vivid_overlay(struct vivid_dev
*dev
, struct vivid_buffer
*buf
)
619 struct tpg_data
*tpg
= &dev
->tpg
;
620 unsigned pixsize
= tpg_g_twopixelsize(tpg
, 0) / 2;
621 void *vbase
= dev
->fb_vbase_cap
;
622 void *vbuf
= vb2_plane_vaddr(&buf
->vb
.vb2_buf
, 0);
623 unsigned img_width
= dev
->compose_cap
.width
;
624 unsigned img_height
= dev
->compose_cap
.height
;
625 unsigned stride
= tpg
->bytesperline
[0];
626 /* if quick is true, then valid_pix() doesn't have to be called */
627 bool quick
= dev
->bitmap_cap
== NULL
&& dev
->clipcount_cap
== 0;
628 int x
, y
, w
, out_x
= 0;
631 * Overlay support is only supported for formats that have a twopixelsize
632 * that's >= 2. Warn and bail out if that's not the case.
634 if (WARN_ON(pixsize
== 0))
636 if ((dev
->overlay_cap_field
== V4L2_FIELD_TOP
||
637 dev
->overlay_cap_field
== V4L2_FIELD_BOTTOM
) &&
638 dev
->overlay_cap_field
!= buf
->vb
.field
)
641 vbuf
+= dev
->compose_cap
.left
* pixsize
+ dev
->compose_cap
.top
* stride
;
642 x
= dev
->overlay_cap_left
;
649 w
= dev
->fb_cap
.fmt
.width
- x
;
655 if (dev
->overlay_cap_top
>= 0)
656 vbase
+= dev
->overlay_cap_top
* dev
->fb_cap
.fmt
.bytesperline
;
657 for (y
= dev
->overlay_cap_top
;
658 y
< dev
->overlay_cap_top
+ (int)img_height
;
659 y
++, vbuf
+= stride
) {
662 if (y
< 0 || y
> dev
->fb_cap
.fmt
.height
)
665 memcpy(vbase
+ x
* pixsize
,
666 vbuf
+ out_x
* pixsize
, w
* pixsize
);
667 vbase
+= dev
->fb_cap
.fmt
.bytesperline
;
670 for (px
= 0; px
< w
; px
++) {
671 if (!valid_pix(dev
, y
- dev
->overlay_cap_top
,
672 px
+ out_x
, y
, px
+ x
))
674 memcpy(vbase
+ (px
+ x
) * pixsize
,
675 vbuf
+ (px
+ out_x
) * pixsize
,
678 vbase
+= dev
->fb_cap
.fmt
.bytesperline
;
682 static void vivid_thread_vid_cap_tick(struct vivid_dev
*dev
, int dropped_bufs
)
684 struct vivid_buffer
*vid_cap_buf
= NULL
;
685 struct vivid_buffer
*vbi_cap_buf
= NULL
;
687 dprintk(dev
, 1, "Video Capture Thread Tick\n");
689 while (dropped_bufs
-- > 1)
690 tpg_update_mv_count(&dev
->tpg
,
691 dev
->field_cap
== V4L2_FIELD_NONE
||
692 dev
->field_cap
== V4L2_FIELD_ALTERNATE
);
694 /* Drop a certain percentage of buffers. */
695 if (dev
->perc_dropped_buffers
&&
696 prandom_u32_max(100) < dev
->perc_dropped_buffers
)
699 spin_lock(&dev
->slock
);
700 if (!list_empty(&dev
->vid_cap_active
)) {
701 vid_cap_buf
= list_entry(dev
->vid_cap_active
.next
, struct vivid_buffer
, list
);
702 list_del(&vid_cap_buf
->list
);
704 if (!list_empty(&dev
->vbi_cap_active
)) {
705 if (dev
->field_cap
!= V4L2_FIELD_ALTERNATE
||
706 (dev
->vbi_cap_seq_count
& 1)) {
707 vbi_cap_buf
= list_entry(dev
->vbi_cap_active
.next
,
708 struct vivid_buffer
, list
);
709 list_del(&vbi_cap_buf
->list
);
712 spin_unlock(&dev
->slock
);
714 if (!vid_cap_buf
&& !vbi_cap_buf
)
719 vivid_fillbuff(dev
, vid_cap_buf
);
720 dprintk(dev
, 1, "filled buffer %d\n",
721 vid_cap_buf
->vb
.vb2_buf
.index
);
724 if (dev
->overlay_cap_owner
&& dev
->fb_cap
.base
&&
725 dev
->fb_cap
.fmt
.pixelformat
== dev
->fmt_cap
->fourcc
)
726 vivid_overlay(dev
, vid_cap_buf
);
728 vb2_buffer_done(&vid_cap_buf
->vb
.vb2_buf
, dev
->dqbuf_error
?
729 VB2_BUF_STATE_ERROR
: VB2_BUF_STATE_DONE
);
730 dprintk(dev
, 2, "vid_cap buffer %d done\n",
731 vid_cap_buf
->vb
.vb2_buf
.index
);
735 if (dev
->stream_sliced_vbi_cap
)
736 vivid_sliced_vbi_cap_process(dev
, vbi_cap_buf
);
738 vivid_raw_vbi_cap_process(dev
, vbi_cap_buf
);
739 vb2_buffer_done(&vbi_cap_buf
->vb
.vb2_buf
, dev
->dqbuf_error
?
740 VB2_BUF_STATE_ERROR
: VB2_BUF_STATE_DONE
);
741 dprintk(dev
, 2, "vbi_cap %d done\n",
742 vbi_cap_buf
->vb
.vb2_buf
.index
);
744 dev
->dqbuf_error
= false;
747 /* Update the test pattern movement counters */
748 tpg_update_mv_count(&dev
->tpg
, dev
->field_cap
== V4L2_FIELD_NONE
||
749 dev
->field_cap
== V4L2_FIELD_ALTERNATE
);
752 static int vivid_thread_vid_cap(void *data
)
754 struct vivid_dev
*dev
= data
;
755 u64 numerators_since_start
;
756 u64 buffers_since_start
;
757 u64 next_jiffies_since_start
;
758 unsigned long jiffies_since_start
;
759 unsigned long cur_jiffies
;
760 unsigned wait_jiffies
;
762 unsigned denominator
;
765 dprintk(dev
, 1, "Video Capture Thread Start\n");
769 /* Resets frame counters */
770 dev
->cap_seq_offset
= 0;
771 dev
->cap_seq_count
= 0;
772 dev
->cap_seq_resync
= false;
773 dev
->jiffies_vid_cap
= jiffies
;
777 if (kthread_should_stop())
780 mutex_lock(&dev
->mutex
);
781 cur_jiffies
= jiffies
;
782 if (dev
->cap_seq_resync
) {
783 dev
->jiffies_vid_cap
= cur_jiffies
;
784 dev
->cap_seq_offset
= dev
->cap_seq_count
+ 1;
785 dev
->cap_seq_count
= 0;
786 dev
->cap_seq_resync
= false;
788 numerator
= dev
->timeperframe_vid_cap
.numerator
;
789 denominator
= dev
->timeperframe_vid_cap
.denominator
;
791 if (dev
->field_cap
== V4L2_FIELD_ALTERNATE
)
794 /* Calculate the number of jiffies since we started streaming */
795 jiffies_since_start
= cur_jiffies
- dev
->jiffies_vid_cap
;
796 /* Get the number of buffers streamed since the start */
797 buffers_since_start
= (u64
)jiffies_since_start
* denominator
+
798 (HZ
* numerator
) / 2;
799 do_div(buffers_since_start
, HZ
* numerator
);
802 * After more than 0xf0000000 (rounded down to a multiple of
803 * 'jiffies-per-day' to ease jiffies_to_msecs calculation)
804 * jiffies have passed since we started streaming reset the
805 * counters and keep track of the sequence offset.
807 if (jiffies_since_start
> JIFFIES_RESYNC
) {
808 dev
->jiffies_vid_cap
= cur_jiffies
;
809 dev
->cap_seq_offset
= buffers_since_start
;
810 buffers_since_start
= 0;
812 dropped_bufs
= buffers_since_start
+ dev
->cap_seq_offset
- dev
->cap_seq_count
;
813 dev
->cap_seq_count
= buffers_since_start
+ dev
->cap_seq_offset
;
814 dev
->vid_cap_seq_count
= dev
->cap_seq_count
- dev
->vid_cap_seq_start
;
815 dev
->vbi_cap_seq_count
= dev
->cap_seq_count
- dev
->vbi_cap_seq_start
;
817 vivid_thread_vid_cap_tick(dev
, dropped_bufs
);
820 * Calculate the number of 'numerators' streamed since we started,
821 * including the current buffer.
823 numerators_since_start
= ++buffers_since_start
* numerator
;
825 /* And the number of jiffies since we started */
826 jiffies_since_start
= jiffies
- dev
->jiffies_vid_cap
;
828 mutex_unlock(&dev
->mutex
);
831 * Calculate when that next buffer is supposed to start
832 * in jiffies since we started streaming.
834 next_jiffies_since_start
= numerators_since_start
* HZ
+
836 do_div(next_jiffies_since_start
, denominator
);
837 /* If it is in the past, then just schedule asap */
838 if (next_jiffies_since_start
< jiffies_since_start
)
839 next_jiffies_since_start
= jiffies_since_start
;
841 wait_jiffies
= next_jiffies_since_start
- jiffies_since_start
;
842 schedule_timeout_interruptible(wait_jiffies
? wait_jiffies
: 1);
844 dprintk(dev
, 1, "Video Capture Thread End\n");
848 static void vivid_grab_controls(struct vivid_dev
*dev
, bool grab
)
850 v4l2_ctrl_grab(dev
->ctrl_has_crop_cap
, grab
);
851 v4l2_ctrl_grab(dev
->ctrl_has_compose_cap
, grab
);
852 v4l2_ctrl_grab(dev
->ctrl_has_scaler_cap
, grab
);
855 int vivid_start_generating_vid_cap(struct vivid_dev
*dev
, bool *pstreaming
)
857 dprintk(dev
, 1, "%s\n", __func__
);
859 if (dev
->kthread_vid_cap
) {
860 u32 seq_count
= dev
->cap_seq_count
+ dev
->seq_wrap
* 128;
862 if (pstreaming
== &dev
->vid_cap_streaming
)
863 dev
->vid_cap_seq_start
= seq_count
;
865 dev
->vbi_cap_seq_start
= seq_count
;
870 /* Resets frame counters */
871 tpg_init_mv_count(&dev
->tpg
);
873 dev
->vid_cap_seq_start
= dev
->seq_wrap
* 128;
874 dev
->vbi_cap_seq_start
= dev
->seq_wrap
* 128;
876 dev
->kthread_vid_cap
= kthread_run(vivid_thread_vid_cap
, dev
,
877 "%s-vid-cap", dev
->v4l2_dev
.name
);
879 if (IS_ERR(dev
->kthread_vid_cap
)) {
880 v4l2_err(&dev
->v4l2_dev
, "kernel_thread() failed\n");
881 return PTR_ERR(dev
->kthread_vid_cap
);
884 vivid_grab_controls(dev
, true);
886 dprintk(dev
, 1, "returning from %s\n", __func__
);
890 void vivid_stop_generating_vid_cap(struct vivid_dev
*dev
, bool *pstreaming
)
892 dprintk(dev
, 1, "%s\n", __func__
);
894 if (dev
->kthread_vid_cap
== NULL
)
898 if (pstreaming
== &dev
->vid_cap_streaming
) {
899 /* Release all active buffers */
900 while (!list_empty(&dev
->vid_cap_active
)) {
901 struct vivid_buffer
*buf
;
903 buf
= list_entry(dev
->vid_cap_active
.next
,
904 struct vivid_buffer
, list
);
905 list_del(&buf
->list
);
906 vb2_buffer_done(&buf
->vb
.vb2_buf
, VB2_BUF_STATE_ERROR
);
907 dprintk(dev
, 2, "vid_cap buffer %d done\n",
908 buf
->vb
.vb2_buf
.index
);
912 if (pstreaming
== &dev
->vbi_cap_streaming
) {
913 while (!list_empty(&dev
->vbi_cap_active
)) {
914 struct vivid_buffer
*buf
;
916 buf
= list_entry(dev
->vbi_cap_active
.next
,
917 struct vivid_buffer
, list
);
918 list_del(&buf
->list
);
919 vb2_buffer_done(&buf
->vb
.vb2_buf
, VB2_BUF_STATE_ERROR
);
920 dprintk(dev
, 2, "vbi_cap buffer %d done\n",
921 buf
->vb
.vb2_buf
.index
);
925 if (dev
->vid_cap_streaming
|| dev
->vbi_cap_streaming
)
928 /* shutdown control thread */
929 vivid_grab_controls(dev
, false);
930 mutex_unlock(&dev
->mutex
);
931 kthread_stop(dev
->kthread_vid_cap
);
932 dev
->kthread_vid_cap
= NULL
;
933 mutex_lock(&dev
->mutex
);