1 // SPDX-License-Identifier: GPL-2.0-or-later
5 * A generic video device interface for the LINUX operating system
6 * using a set of device structures/vectors for low level operations.
8 * This file replaces the videodev.c file that comes with the
9 * regular kernel distribution.
11 * Author: Bill Dirks <bill@thedirks.org>
12 * based on code by Alan Cox, <alan@cymru.net>
16 * Video capture interface for Linux
18 * A generic video device interface for the LINUX operating system
19 * using a set of device structures/vectors for low level operations.
21 * Author: Alan Cox, <alan@lxorguk.ukuu.org.uk>
27 * Video4linux 1/2 integration by Justin Schoeman
28 * <justin@suntiger.ee.up.ac.za>
29 * 2.4 PROCFS support ported from 2.4 kernels by
30 * Iñaki García Etxebarria <garetxe@euskalnet.net>
31 * Makefile fix by "W. Michael Petullo" <mike@flyn.org>
32 * 2.4 devfs support ported from 2.4 kernels by
33 * Dan Merillat <dan@merillat.org>
34 * Added Gerd Knorrs v4l1 enhancements (Justin Schoeman)
37 #include <linux/module.h>
38 #include <linux/types.h>
39 #include <linux/kernel.h>
41 #include <linux/string.h>
42 #include <linux/errno.h>
43 #include <linux/uaccess.h>
45 #include <asm/div64.h>
46 #include <media/v4l2-common.h>
47 #include <media/v4l2-device.h>
48 #include <media/v4l2-ctrls.h>
50 #include <linux/videodev2.h>
54 * V 4 L 2 D R I V E R H E L P E R A P I
59 * Video Standard Operations (contributed by Michael Schimek)
62 /* Helper functions for control handling */
64 /* Fill in a struct v4l2_queryctrl */
65 int v4l2_ctrl_query_fill(struct v4l2_queryctrl
*qctrl
, s32 _min
, s32 _max
, s32 _step
, s32 _def
)
73 v4l2_ctrl_fill(qctrl
->id
, &name
, &qctrl
->type
,
74 &min
, &max
, &step
, &def
, &qctrl
->flags
);
82 qctrl
->default_value
= def
;
83 qctrl
->reserved
[0] = qctrl
->reserved
[1] = 0;
84 strscpy(qctrl
->name
, name
, sizeof(qctrl
->name
));
87 EXPORT_SYMBOL(v4l2_ctrl_query_fill
);
89 /* Clamp x to be between min and max, aligned to a multiple of 2^align. min
90 * and max don't have to be aligned, but there must be at least one valid
91 * value. E.g., min=17,max=31,align=4 is not allowed as there are no multiples
92 * of 16 between 17 and 31. */
93 static unsigned int clamp_align(unsigned int x
, unsigned int min
,
94 unsigned int max
, unsigned int align
)
96 /* Bits that must be zero to be aligned */
97 unsigned int mask
= ~((1 << align
) - 1);
99 /* Clamp to aligned min and max */
100 x
= clamp(x
, (min
+ ~mask
) & mask
, max
& mask
);
102 /* Round to nearest aligned value */
104 x
= (x
+ (1 << (align
- 1))) & mask
;
109 static unsigned int clamp_roundup(unsigned int x
, unsigned int min
,
110 unsigned int max
, unsigned int alignment
)
112 x
= clamp(x
, min
, max
);
114 x
= round_up(x
, alignment
);
119 void v4l_bound_align_image(u32
*w
, unsigned int wmin
, unsigned int wmax
,
121 u32
*h
, unsigned int hmin
, unsigned int hmax
,
122 unsigned int halign
, unsigned int salign
)
124 *w
= clamp_align(*w
, wmin
, wmax
, walign
);
125 *h
= clamp_align(*h
, hmin
, hmax
, halign
);
127 /* Usually we don't need to align the size and are done now. */
131 /* How much alignment do we have? */
134 /* Enough to satisfy the image alignment? */
135 if (walign
+ halign
< salign
) {
136 /* Max walign where there is still a valid width */
137 unsigned int wmaxa
= __fls(wmax
^ (wmin
- 1));
138 /* Max halign where there is still a valid height */
139 unsigned int hmaxa
= __fls(hmax
^ (hmin
- 1));
141 /* up the smaller alignment until we have enough */
143 if (halign
>= hmaxa
||
144 (walign
<= halign
&& walign
< wmaxa
)) {
145 *w
= clamp_align(*w
, wmin
, wmax
, walign
+ 1);
148 *h
= clamp_align(*h
, hmin
, hmax
, halign
+ 1);
151 } while (halign
+ walign
< salign
);
154 EXPORT_SYMBOL_GPL(v4l_bound_align_image
);
157 __v4l2_find_nearest_size(const void *array
, size_t array_size
,
158 size_t entry_size
, size_t width_offset
,
159 size_t height_offset
, s32 width
, s32 height
)
161 u32 error
, min_error
= U32_MAX
;
162 const void *best
= NULL
;
168 for (i
= 0; i
< array_size
; i
++, array
+= entry_size
) {
169 const u32
*entry_width
= array
+ width_offset
;
170 const u32
*entry_height
= array
+ height_offset
;
172 error
= abs(*entry_width
- width
) + abs(*entry_height
- height
);
173 if (error
> min_error
)
184 EXPORT_SYMBOL_GPL(__v4l2_find_nearest_size
);
186 int v4l2_g_parm_cap(struct video_device
*vdev
,
187 struct v4l2_subdev
*sd
, struct v4l2_streamparm
*a
)
189 struct v4l2_subdev_frame_interval ival
= { 0 };
192 if (a
->type
!= V4L2_BUF_TYPE_VIDEO_CAPTURE
&&
193 a
->type
!= V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE
)
196 if (vdev
->device_caps
& V4L2_CAP_READWRITE
)
197 a
->parm
.capture
.readbuffers
= 2;
198 if (v4l2_subdev_has_op(sd
, pad
, get_frame_interval
))
199 a
->parm
.capture
.capability
= V4L2_CAP_TIMEPERFRAME
;
200 ret
= v4l2_subdev_call_state_active(sd
, pad
, get_frame_interval
, &ival
);
202 a
->parm
.capture
.timeperframe
= ival
.interval
;
205 EXPORT_SYMBOL_GPL(v4l2_g_parm_cap
);
207 int v4l2_s_parm_cap(struct video_device
*vdev
,
208 struct v4l2_subdev
*sd
, struct v4l2_streamparm
*a
)
210 struct v4l2_subdev_frame_interval ival
= {
211 .interval
= a
->parm
.capture
.timeperframe
215 if (a
->type
!= V4L2_BUF_TYPE_VIDEO_CAPTURE
&&
216 a
->type
!= V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE
)
219 memset(&a
->parm
, 0, sizeof(a
->parm
));
220 if (vdev
->device_caps
& V4L2_CAP_READWRITE
)
221 a
->parm
.capture
.readbuffers
= 2;
223 a
->parm
.capture
.readbuffers
= 0;
225 if (v4l2_subdev_has_op(sd
, pad
, get_frame_interval
))
226 a
->parm
.capture
.capability
= V4L2_CAP_TIMEPERFRAME
;
227 ret
= v4l2_subdev_call_state_active(sd
, pad
, set_frame_interval
, &ival
);
229 a
->parm
.capture
.timeperframe
= ival
.interval
;
232 EXPORT_SYMBOL_GPL(v4l2_s_parm_cap
);
234 const struct v4l2_format_info
*v4l2_format_info(u32 format
)
236 static const struct v4l2_format_info formats
[] = {
238 { .format
= V4L2_PIX_FMT_BGR24
, .pixel_enc
= V4L2_PIXEL_ENC_RGB
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 3, 0, 0, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 1, .vdiv
= 1 },
239 { .format
= V4L2_PIX_FMT_RGB24
, .pixel_enc
= V4L2_PIXEL_ENC_RGB
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 3, 0, 0, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 1, .vdiv
= 1 },
240 { .format
= V4L2_PIX_FMT_HSV24
, .pixel_enc
= V4L2_PIXEL_ENC_RGB
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 3, 0, 0, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 1, .vdiv
= 1 },
241 { .format
= V4L2_PIX_FMT_BGR32
, .pixel_enc
= V4L2_PIXEL_ENC_RGB
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 4, 0, 0, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 1, .vdiv
= 1 },
242 { .format
= V4L2_PIX_FMT_XBGR32
, .pixel_enc
= V4L2_PIXEL_ENC_RGB
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 4, 0, 0, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 1, .vdiv
= 1 },
243 { .format
= V4L2_PIX_FMT_BGRX32
, .pixel_enc
= V4L2_PIXEL_ENC_RGB
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 4, 0, 0, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 1, .vdiv
= 1 },
244 { .format
= V4L2_PIX_FMT_RGB32
, .pixel_enc
= V4L2_PIXEL_ENC_RGB
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 4, 0, 0, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 1, .vdiv
= 1 },
245 { .format
= V4L2_PIX_FMT_XRGB32
, .pixel_enc
= V4L2_PIXEL_ENC_RGB
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 4, 0, 0, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 1, .vdiv
= 1 },
246 { .format
= V4L2_PIX_FMT_RGBX32
, .pixel_enc
= V4L2_PIXEL_ENC_RGB
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 4, 0, 0, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 1, .vdiv
= 1 },
247 { .format
= V4L2_PIX_FMT_HSV32
, .pixel_enc
= V4L2_PIXEL_ENC_RGB
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 4, 0, 0, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 1, .vdiv
= 1 },
248 { .format
= V4L2_PIX_FMT_ARGB32
, .pixel_enc
= V4L2_PIXEL_ENC_RGB
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 4, 0, 0, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 1, .vdiv
= 1 },
249 { .format
= V4L2_PIX_FMT_RGBA32
, .pixel_enc
= V4L2_PIXEL_ENC_RGB
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 4, 0, 0, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 1, .vdiv
= 1 },
250 { .format
= V4L2_PIX_FMT_ABGR32
, .pixel_enc
= V4L2_PIXEL_ENC_RGB
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 4, 0, 0, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 1, .vdiv
= 1 },
251 { .format
= V4L2_PIX_FMT_BGRA32
, .pixel_enc
= V4L2_PIXEL_ENC_RGB
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 4, 0, 0, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 1, .vdiv
= 1 },
252 { .format
= V4L2_PIX_FMT_RGB565
, .pixel_enc
= V4L2_PIXEL_ENC_RGB
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 2, 0, 0, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 1, .vdiv
= 1 },
253 { .format
= V4L2_PIX_FMT_RGB555
, .pixel_enc
= V4L2_PIXEL_ENC_RGB
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 2, 0, 0, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 1, .vdiv
= 1 },
254 { .format
= V4L2_PIX_FMT_BGR666
, .pixel_enc
= V4L2_PIXEL_ENC_RGB
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 4, 0, 0, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 1, .vdiv
= 1 },
255 { .format
= V4L2_PIX_FMT_BGR48_12
, .pixel_enc
= V4L2_PIXEL_ENC_RGB
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 6, 0, 0, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 1, .vdiv
= 1 },
256 { .format
= V4L2_PIX_FMT_BGR48
, .pixel_enc
= V4L2_PIXEL_ENC_RGB
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 6, 0, 0, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 1, .vdiv
= 1 },
257 { .format
= V4L2_PIX_FMT_RGB48
, .pixel_enc
= V4L2_PIXEL_ENC_RGB
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 6, 0, 0, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 1, .vdiv
= 1 },
258 { .format
= V4L2_PIX_FMT_ABGR64_12
, .pixel_enc
= V4L2_PIXEL_ENC_RGB
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 8, 0, 0, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 1, .vdiv
= 1 },
259 { .format
= V4L2_PIX_FMT_RGBA1010102
, .pixel_enc
= V4L2_PIXEL_ENC_RGB
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 4, 0, 0, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 1, .vdiv
= 1 },
260 { .format
= V4L2_PIX_FMT_RGBX1010102
, .pixel_enc
= V4L2_PIXEL_ENC_RGB
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 4, 0, 0, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 1, .vdiv
= 1 },
261 { .format
= V4L2_PIX_FMT_ARGB2101010
, .pixel_enc
= V4L2_PIXEL_ENC_RGB
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 4, 0, 0, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 1, .vdiv
= 1 },
263 /* YUV packed formats */
264 { .format
= V4L2_PIX_FMT_YUYV
, .pixel_enc
= V4L2_PIXEL_ENC_YUV
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 2, 0, 0, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 2, .vdiv
= 1 },
265 { .format
= V4L2_PIX_FMT_YVYU
, .pixel_enc
= V4L2_PIXEL_ENC_YUV
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 2, 0, 0, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 2, .vdiv
= 1 },
266 { .format
= V4L2_PIX_FMT_UYVY
, .pixel_enc
= V4L2_PIXEL_ENC_YUV
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 2, 0, 0, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 2, .vdiv
= 1 },
267 { .format
= V4L2_PIX_FMT_VYUY
, .pixel_enc
= V4L2_PIXEL_ENC_YUV
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 2, 0, 0, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 2, .vdiv
= 1 },
268 { .format
= V4L2_PIX_FMT_Y210
, .pixel_enc
= V4L2_PIXEL_ENC_YUV
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 4, 0, 0, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 2, .vdiv
= 1 },
269 { .format
= V4L2_PIX_FMT_Y212
, .pixel_enc
= V4L2_PIXEL_ENC_YUV
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 4, 0, 0, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 2, .vdiv
= 1 },
270 { .format
= V4L2_PIX_FMT_Y216
, .pixel_enc
= V4L2_PIXEL_ENC_YUV
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 4, 0, 0, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 2, .vdiv
= 1 },
271 { .format
= V4L2_PIX_FMT_YUV48_12
, .pixel_enc
= V4L2_PIXEL_ENC_YUV
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 6, 0, 0, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 1, .vdiv
= 1 },
272 { .format
= V4L2_PIX_FMT_MT2110T
, .pixel_enc
= V4L2_PIXEL_ENC_YUV
, .mem_planes
= 2, .comp_planes
= 2, .bpp
= { 5, 10, 0, 0 }, .bpp_div
= { 4, 4, 1, 1 }, .hdiv
= 2, .vdiv
= 2,
273 .block_w
= { 16, 8, 0, 0 }, .block_h
= { 32, 16, 0, 0 }},
274 { .format
= V4L2_PIX_FMT_MT2110R
, .pixel_enc
= V4L2_PIXEL_ENC_YUV
, .mem_planes
= 2, .comp_planes
= 2, .bpp
= { 5, 10, 0, 0 }, .bpp_div
= { 4, 4, 1, 1 }, .hdiv
= 2, .vdiv
= 2,
275 .block_w
= { 16, 8, 0, 0 }, .block_h
= { 32, 16, 0, 0 }},
277 /* YUV planar formats */
278 { .format
= V4L2_PIX_FMT_NV12
, .pixel_enc
= V4L2_PIXEL_ENC_YUV
, .mem_planes
= 1, .comp_planes
= 2, .bpp
= { 1, 2, 0, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 2, .vdiv
= 2 },
279 { .format
= V4L2_PIX_FMT_NV21
, .pixel_enc
= V4L2_PIXEL_ENC_YUV
, .mem_planes
= 1, .comp_planes
= 2, .bpp
= { 1, 2, 0, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 2, .vdiv
= 2 },
280 { .format
= V4L2_PIX_FMT_NV16
, .pixel_enc
= V4L2_PIXEL_ENC_YUV
, .mem_planes
= 1, .comp_planes
= 2, .bpp
= { 1, 2, 0, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 2, .vdiv
= 1 },
281 { .format
= V4L2_PIX_FMT_NV61
, .pixel_enc
= V4L2_PIXEL_ENC_YUV
, .mem_planes
= 1, .comp_planes
= 2, .bpp
= { 1, 2, 0, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 2, .vdiv
= 1 },
282 { .format
= V4L2_PIX_FMT_NV24
, .pixel_enc
= V4L2_PIXEL_ENC_YUV
, .mem_planes
= 1, .comp_planes
= 2, .bpp
= { 1, 2, 0, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 1, .vdiv
= 1 },
283 { .format
= V4L2_PIX_FMT_NV42
, .pixel_enc
= V4L2_PIXEL_ENC_YUV
, .mem_planes
= 1, .comp_planes
= 2, .bpp
= { 1, 2, 0, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 1, .vdiv
= 1 },
284 { .format
= V4L2_PIX_FMT_P010
, .pixel_enc
= V4L2_PIXEL_ENC_YUV
, .mem_planes
= 1, .comp_planes
= 2, .bpp
= { 2, 2, 0, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 2, .vdiv
= 1 },
285 { .format
= V4L2_PIX_FMT_P012
, .pixel_enc
= V4L2_PIXEL_ENC_YUV
, .mem_planes
= 1, .comp_planes
= 2, .bpp
= { 2, 4, 0, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 2, .vdiv
= 2 },
287 { .format
= V4L2_PIX_FMT_YUV410
, .pixel_enc
= V4L2_PIXEL_ENC_YUV
, .mem_planes
= 1, .comp_planes
= 3, .bpp
= { 1, 1, 1, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 4, .vdiv
= 4 },
288 { .format
= V4L2_PIX_FMT_YVU410
, .pixel_enc
= V4L2_PIXEL_ENC_YUV
, .mem_planes
= 1, .comp_planes
= 3, .bpp
= { 1, 1, 1, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 4, .vdiv
= 4 },
289 { .format
= V4L2_PIX_FMT_YUV411P
, .pixel_enc
= V4L2_PIXEL_ENC_YUV
, .mem_planes
= 1, .comp_planes
= 3, .bpp
= { 1, 1, 1, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 4, .vdiv
= 1 },
290 { .format
= V4L2_PIX_FMT_YUV420
, .pixel_enc
= V4L2_PIXEL_ENC_YUV
, .mem_planes
= 1, .comp_planes
= 3, .bpp
= { 1, 1, 1, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 2, .vdiv
= 2 },
291 { .format
= V4L2_PIX_FMT_YVU420
, .pixel_enc
= V4L2_PIXEL_ENC_YUV
, .mem_planes
= 1, .comp_planes
= 3, .bpp
= { 1, 1, 1, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 2, .vdiv
= 2 },
292 { .format
= V4L2_PIX_FMT_YUV422P
, .pixel_enc
= V4L2_PIXEL_ENC_YUV
, .mem_planes
= 1, .comp_planes
= 3, .bpp
= { 1, 1, 1, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 2, .vdiv
= 1 },
293 { .format
= V4L2_PIX_FMT_GREY
, .pixel_enc
= V4L2_PIXEL_ENC_YUV
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 1, 0, 0, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 1, .vdiv
= 1 },
295 /* Tiled YUV formats */
296 { .format
= V4L2_PIX_FMT_NV12_4L4
, .pixel_enc
= V4L2_PIXEL_ENC_YUV
, .mem_planes
= 1, .comp_planes
= 2, .bpp
= { 1, 2, 0, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 2, .vdiv
= 2 },
297 { .format
= V4L2_PIX_FMT_NV15_4L4
, .pixel_enc
= V4L2_PIXEL_ENC_YUV
, .mem_planes
= 1, .comp_planes
= 2, .bpp
= { 5, 10, 0, 0 }, .bpp_div
= { 4, 4, 1, 1 }, .hdiv
= 2, .vdiv
= 2,
298 .block_w
= { 4, 2, 0, 0 }, .block_h
= { 1, 1, 0, 0 }},
299 { .format
= V4L2_PIX_FMT_P010_4L4
, .pixel_enc
= V4L2_PIXEL_ENC_YUV
, .mem_planes
= 1, .comp_planes
= 2, .bpp
= { 2, 4, 0, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 2, .vdiv
= 2 },
301 /* YUV planar formats, non contiguous variant */
302 { .format
= V4L2_PIX_FMT_YUV420M
, .pixel_enc
= V4L2_PIXEL_ENC_YUV
, .mem_planes
= 3, .comp_planes
= 3, .bpp
= { 1, 1, 1, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 2, .vdiv
= 2 },
303 { .format
= V4L2_PIX_FMT_YVU420M
, .pixel_enc
= V4L2_PIXEL_ENC_YUV
, .mem_planes
= 3, .comp_planes
= 3, .bpp
= { 1, 1, 1, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 2, .vdiv
= 2 },
304 { .format
= V4L2_PIX_FMT_YUV422M
, .pixel_enc
= V4L2_PIXEL_ENC_YUV
, .mem_planes
= 3, .comp_planes
= 3, .bpp
= { 1, 1, 1, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 2, .vdiv
= 1 },
305 { .format
= V4L2_PIX_FMT_YVU422M
, .pixel_enc
= V4L2_PIXEL_ENC_YUV
, .mem_planes
= 3, .comp_planes
= 3, .bpp
= { 1, 1, 1, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 2, .vdiv
= 1 },
306 { .format
= V4L2_PIX_FMT_YUV444M
, .pixel_enc
= V4L2_PIXEL_ENC_YUV
, .mem_planes
= 3, .comp_planes
= 3, .bpp
= { 1, 1, 1, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 1, .vdiv
= 1 },
307 { .format
= V4L2_PIX_FMT_YVU444M
, .pixel_enc
= V4L2_PIXEL_ENC_YUV
, .mem_planes
= 3, .comp_planes
= 3, .bpp
= { 1, 1, 1, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 1, .vdiv
= 1 },
309 { .format
= V4L2_PIX_FMT_NV12M
, .pixel_enc
= V4L2_PIXEL_ENC_YUV
, .mem_planes
= 2, .comp_planes
= 2, .bpp
= { 1, 2, 0, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 2, .vdiv
= 2 },
310 { .format
= V4L2_PIX_FMT_NV21M
, .pixel_enc
= V4L2_PIXEL_ENC_YUV
, .mem_planes
= 2, .comp_planes
= 2, .bpp
= { 1, 2, 0, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 2, .vdiv
= 2 },
311 { .format
= V4L2_PIX_FMT_NV16M
, .pixel_enc
= V4L2_PIXEL_ENC_YUV
, .mem_planes
= 2, .comp_planes
= 2, .bpp
= { 1, 2, 0, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 2, .vdiv
= 1 },
312 { .format
= V4L2_PIX_FMT_NV61M
, .pixel_enc
= V4L2_PIXEL_ENC_YUV
, .mem_planes
= 2, .comp_planes
= 2, .bpp
= { 1, 2, 0, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 2, .vdiv
= 1 },
313 { .format
= V4L2_PIX_FMT_P012M
, .pixel_enc
= V4L2_PIXEL_ENC_YUV
, .mem_planes
= 2, .comp_planes
= 2, .bpp
= { 2, 4, 0, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 2, .vdiv
= 2 },
315 /* Bayer RGB formats */
316 { .format
= V4L2_PIX_FMT_SBGGR8
, .pixel_enc
= V4L2_PIXEL_ENC_BAYER
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 1, 0, 0, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 1, .vdiv
= 1 },
317 { .format
= V4L2_PIX_FMT_SGBRG8
, .pixel_enc
= V4L2_PIXEL_ENC_BAYER
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 1, 0, 0, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 1, .vdiv
= 1 },
318 { .format
= V4L2_PIX_FMT_SGRBG8
, .pixel_enc
= V4L2_PIXEL_ENC_BAYER
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 1, 0, 0, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 1, .vdiv
= 1 },
319 { .format
= V4L2_PIX_FMT_SRGGB8
, .pixel_enc
= V4L2_PIXEL_ENC_BAYER
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 1, 0, 0, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 1, .vdiv
= 1 },
320 { .format
= V4L2_PIX_FMT_SBGGR10
, .pixel_enc
= V4L2_PIXEL_ENC_BAYER
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 2, 0, 0, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 1, .vdiv
= 1 },
321 { .format
= V4L2_PIX_FMT_SGBRG10
, .pixel_enc
= V4L2_PIXEL_ENC_BAYER
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 2, 0, 0, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 1, .vdiv
= 1 },
322 { .format
= V4L2_PIX_FMT_SGRBG10
, .pixel_enc
= V4L2_PIXEL_ENC_BAYER
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 2, 0, 0, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 1, .vdiv
= 1 },
323 { .format
= V4L2_PIX_FMT_SRGGB10
, .pixel_enc
= V4L2_PIXEL_ENC_BAYER
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 2, 0, 0, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 1, .vdiv
= 1 },
324 { .format
= V4L2_PIX_FMT_SBGGR10ALAW8
, .pixel_enc
= V4L2_PIXEL_ENC_BAYER
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 1, 0, 0, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 1, .vdiv
= 1 },
325 { .format
= V4L2_PIX_FMT_SGBRG10ALAW8
, .pixel_enc
= V4L2_PIXEL_ENC_BAYER
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 1, 0, 0, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 1, .vdiv
= 1 },
326 { .format
= V4L2_PIX_FMT_SGRBG10ALAW8
, .pixel_enc
= V4L2_PIXEL_ENC_BAYER
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 1, 0, 0, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 1, .vdiv
= 1 },
327 { .format
= V4L2_PIX_FMT_SRGGB10ALAW8
, .pixel_enc
= V4L2_PIXEL_ENC_BAYER
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 1, 0, 0, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 1, .vdiv
= 1 },
328 { .format
= V4L2_PIX_FMT_SBGGR10DPCM8
, .pixel_enc
= V4L2_PIXEL_ENC_BAYER
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 1, 0, 0, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 1, .vdiv
= 1 },
329 { .format
= V4L2_PIX_FMT_SGBRG10DPCM8
, .pixel_enc
= V4L2_PIXEL_ENC_BAYER
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 1, 0, 0, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 1, .vdiv
= 1 },
330 { .format
= V4L2_PIX_FMT_SGRBG10DPCM8
, .pixel_enc
= V4L2_PIXEL_ENC_BAYER
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 1, 0, 0, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 1, .vdiv
= 1 },
331 { .format
= V4L2_PIX_FMT_SRGGB10DPCM8
, .pixel_enc
= V4L2_PIXEL_ENC_BAYER
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 1, 0, 0, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 1, .vdiv
= 1 },
332 { .format
= V4L2_PIX_FMT_SBGGR12
, .pixel_enc
= V4L2_PIXEL_ENC_BAYER
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 2, 0, 0, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 1, .vdiv
= 1 },
333 { .format
= V4L2_PIX_FMT_SGBRG12
, .pixel_enc
= V4L2_PIXEL_ENC_BAYER
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 2, 0, 0, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 1, .vdiv
= 1 },
334 { .format
= V4L2_PIX_FMT_SGRBG12
, .pixel_enc
= V4L2_PIXEL_ENC_BAYER
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 2, 0, 0, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 1, .vdiv
= 1 },
335 { .format
= V4L2_PIX_FMT_SRGGB12
, .pixel_enc
= V4L2_PIXEL_ENC_BAYER
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 2, 0, 0, 0 }, .bpp_div
= { 1, 1, 1, 1 }, .hdiv
= 1, .vdiv
= 1 },
339 for (i
= 0; i
< ARRAY_SIZE(formats
); ++i
)
340 if (formats
[i
].format
== format
)
344 EXPORT_SYMBOL(v4l2_format_info
);
346 static inline unsigned int v4l2_format_block_width(const struct v4l2_format_info
*info
, int plane
)
348 if (!info
->block_w
[plane
])
350 return info
->block_w
[plane
];
353 static inline unsigned int v4l2_format_block_height(const struct v4l2_format_info
*info
, int plane
)
355 if (!info
->block_h
[plane
])
357 return info
->block_h
[plane
];
360 void v4l2_apply_frmsize_constraints(u32
*width
, u32
*height
,
361 const struct v4l2_frmsize_stepwise
*frmsize
)
367 * Clamp width/height to meet min/max constraints and round it up to
368 * macroblock alignment.
370 *width
= clamp_roundup(*width
, frmsize
->min_width
, frmsize
->max_width
,
371 frmsize
->step_width
);
372 *height
= clamp_roundup(*height
, frmsize
->min_height
, frmsize
->max_height
,
373 frmsize
->step_height
);
375 EXPORT_SYMBOL_GPL(v4l2_apply_frmsize_constraints
);
377 int v4l2_fill_pixfmt_mp(struct v4l2_pix_format_mplane
*pixfmt
,
378 u32 pixelformat
, u32 width
, u32 height
)
380 const struct v4l2_format_info
*info
;
381 struct v4l2_plane_pix_format
*plane
;
384 info
= v4l2_format_info(pixelformat
);
388 pixfmt
->width
= width
;
389 pixfmt
->height
= height
;
390 pixfmt
->pixelformat
= pixelformat
;
391 pixfmt
->num_planes
= info
->mem_planes
;
393 if (info
->mem_planes
== 1) {
394 plane
= &pixfmt
->plane_fmt
[0];
395 plane
->bytesperline
= ALIGN(width
, v4l2_format_block_width(info
, 0)) * info
->bpp
[0] / info
->bpp_div
[0];
396 plane
->sizeimage
= 0;
398 for (i
= 0; i
< info
->comp_planes
; i
++) {
399 unsigned int hdiv
= (i
== 0) ? 1 : info
->hdiv
;
400 unsigned int vdiv
= (i
== 0) ? 1 : info
->vdiv
;
401 unsigned int aligned_width
;
402 unsigned int aligned_height
;
404 aligned_width
= ALIGN(width
, v4l2_format_block_width(info
, i
));
405 aligned_height
= ALIGN(height
, v4l2_format_block_height(info
, i
));
407 plane
->sizeimage
+= info
->bpp
[i
] *
408 DIV_ROUND_UP(aligned_width
, hdiv
) *
409 DIV_ROUND_UP(aligned_height
, vdiv
) / info
->bpp_div
[i
];
412 for (i
= 0; i
< info
->comp_planes
; i
++) {
413 unsigned int hdiv
= (i
== 0) ? 1 : info
->hdiv
;
414 unsigned int vdiv
= (i
== 0) ? 1 : info
->vdiv
;
415 unsigned int aligned_width
;
416 unsigned int aligned_height
;
418 aligned_width
= ALIGN(width
, v4l2_format_block_width(info
, i
));
419 aligned_height
= ALIGN(height
, v4l2_format_block_height(info
, i
));
421 plane
= &pixfmt
->plane_fmt
[i
];
422 plane
->bytesperline
=
423 info
->bpp
[i
] * DIV_ROUND_UP(aligned_width
, hdiv
) / info
->bpp_div
[i
];
425 plane
->bytesperline
* DIV_ROUND_UP(aligned_height
, vdiv
);
430 EXPORT_SYMBOL_GPL(v4l2_fill_pixfmt_mp
);
432 int v4l2_fill_pixfmt(struct v4l2_pix_format
*pixfmt
, u32 pixelformat
,
433 u32 width
, u32 height
)
435 const struct v4l2_format_info
*info
;
438 info
= v4l2_format_info(pixelformat
);
442 /* Single planar API cannot be used for multi plane formats. */
443 if (info
->mem_planes
> 1)
446 pixfmt
->width
= width
;
447 pixfmt
->height
= height
;
448 pixfmt
->pixelformat
= pixelformat
;
449 pixfmt
->bytesperline
= ALIGN(width
, v4l2_format_block_width(info
, 0)) * info
->bpp
[0] / info
->bpp_div
[0];
450 pixfmt
->sizeimage
= 0;
452 for (i
= 0; i
< info
->comp_planes
; i
++) {
453 unsigned int hdiv
= (i
== 0) ? 1 : info
->hdiv
;
454 unsigned int vdiv
= (i
== 0) ? 1 : info
->vdiv
;
455 unsigned int aligned_width
;
456 unsigned int aligned_height
;
458 aligned_width
= ALIGN(width
, v4l2_format_block_width(info
, i
));
459 aligned_height
= ALIGN(height
, v4l2_format_block_height(info
, i
));
461 pixfmt
->sizeimage
+= info
->bpp
[i
] *
462 DIV_ROUND_UP(aligned_width
, hdiv
) *
463 DIV_ROUND_UP(aligned_height
, vdiv
) / info
->bpp_div
[i
];
467 EXPORT_SYMBOL_GPL(v4l2_fill_pixfmt
);
469 s64
v4l2_get_link_freq(struct v4l2_ctrl_handler
*handler
, unsigned int mul
,
472 struct v4l2_ctrl
*ctrl
;
475 ctrl
= v4l2_ctrl_find(handler
, V4L2_CID_LINK_FREQ
);
477 struct v4l2_querymenu qm
= { .id
= V4L2_CID_LINK_FREQ
};
480 qm
.index
= v4l2_ctrl_g_ctrl(ctrl
);
482 ret
= v4l2_querymenu(handler
, &qm
);
491 ctrl
= v4l2_ctrl_find(handler
, V4L2_CID_PIXEL_RATE
);
495 freq
= div_u64(v4l2_ctrl_g_ctrl_int64(ctrl
) * mul
, div
);
497 pr_warn("%s: Link frequency estimated using pixel rate: result might be inaccurate\n",
499 pr_warn("%s: Consider implementing support for V4L2_CID_LINK_FREQ in the transmitter driver\n",
503 return freq
> 0 ? freq
: -EINVAL
;
505 EXPORT_SYMBOL_GPL(v4l2_get_link_freq
);
508 * Simplify a fraction using a simple continued fraction decomposition. The
509 * idea here is to convert fractions such as 333333/10000000 to 1/30 using
510 * 32 bit arithmetic only. The algorithm is not perfect and relies upon two
511 * arbitrary parameters to remove non-significative terms from the simple
512 * continued fraction decomposition. Using 8 and 333 for n_terms and threshold
513 * respectively seems to give nice results.
515 void v4l2_simplify_fraction(u32
*numerator
, u32
*denominator
,
516 unsigned int n_terms
, unsigned int threshold
)
522 an
= kmalloc_array(n_terms
, sizeof(*an
), GFP_KERNEL
);
527 * Convert the fraction to a simple continued fraction. See
528 * https://en.wikipedia.org/wiki/Continued_fraction
529 * Stop if the current term is bigger than or equal to the given
535 for (n
= 0; n
< n_terms
&& y
!= 0; ++n
) {
537 if (an
[n
] >= threshold
) {
548 /* Expand the simple continued fraction back to an integer fraction. */
552 for (i
= n
; i
> 0; --i
) {
562 EXPORT_SYMBOL_GPL(v4l2_simplify_fraction
);
565 * Convert a fraction to a frame interval in 100ns multiples. The idea here is
566 * to compute numerator / denominator * 10000000 using 32 bit fixed point
569 u32
v4l2_fraction_to_interval(u32 numerator
, u32 denominator
)
573 /* Saturate the result if the operation would overflow. */
574 if (denominator
== 0 ||
575 numerator
/denominator
>= ((u32
)-1)/10000000)
579 * Divide both the denominator and the multiplier by two until
580 * numerator * multiplier doesn't overflow. If anyone knows a better
581 * algorithm please let me know.
583 multiplier
= 10000000;
584 while (numerator
> ((u32
)-1)/multiplier
) {
589 return denominator
? numerator
* multiplier
/ denominator
: 0;
591 EXPORT_SYMBOL_GPL(v4l2_fraction_to_interval
);
593 int v4l2_link_freq_to_bitmap(struct device
*dev
, const u64
*fw_link_freqs
,
594 unsigned int num_of_fw_link_freqs
,
595 const s64
*driver_link_freqs
,
596 unsigned int num_of_driver_link_freqs
,
597 unsigned long *bitmap
)
603 if (!num_of_fw_link_freqs
) {
604 dev_err(dev
, "no link frequencies in firmware\n");
608 for (i
= 0; i
< num_of_fw_link_freqs
; i
++) {
611 for (j
= 0; j
< num_of_driver_link_freqs
; j
++) {
612 if (fw_link_freqs
[i
] != driver_link_freqs
[j
])
615 dev_dbg(dev
, "enabling link frequency %lld Hz\n",
616 driver_link_freqs
[j
]);
623 dev_err(dev
, "no matching link frequencies found\n");
625 dev_dbg(dev
, "specified in firmware:\n");
626 for (i
= 0; i
< num_of_fw_link_freqs
; i
++)
627 dev_dbg(dev
, "\t%llu Hz\n", fw_link_freqs
[i
]);
629 dev_dbg(dev
, "driver supported:\n");
630 for (i
= 0; i
< num_of_driver_link_freqs
; i
++)
631 dev_dbg(dev
, "\t%lld Hz\n", driver_link_freqs
[i
]);
638 EXPORT_SYMBOL_GPL(v4l2_link_freq_to_bitmap
);