1 // SPDX-License-Identifier: GPL-2.0-or-later
5 * A generic video device interface for the LINUX operating system
6 * using a set of device structures/vectors for low level operations.
8 * This file replaces the videodev.c file that comes with the
9 * regular kernel distribution.
11 * Author: Bill Dirks <bill@thedirks.org>
12 * based on code by Alan Cox, <alan@cymru.net>
16 * Video capture interface for Linux
18 * A generic video device interface for the LINUX operating system
19 * using a set of device structures/vectors for low level operations.
21 * Author: Alan Cox, <alan@lxorguk.ukuu.org.uk>
27 * Video4linux 1/2 integration by Justin Schoeman
28 * <justin@suntiger.ee.up.ac.za>
29 * 2.4 PROCFS support ported from 2.4 kernels by
30 * Iñaki García Etxebarria <garetxe@euskalnet.net>
31 * Makefile fix by "W. Michael Petullo" <mike@flyn.org>
32 * 2.4 devfs support ported from 2.4 kernels by
33 * Dan Merillat <dan@merillat.org>
34 * Added Gerd Knorrs v4l1 enhancements (Justin Schoeman)
37 #include <linux/module.h>
38 #include <linux/types.h>
39 #include <linux/kernel.h>
41 #include <linux/string.h>
42 #include <linux/errno.h>
43 #include <linux/uaccess.h>
45 #include <asm/div64.h>
46 #include <media/v4l2-common.h>
47 #include <media/v4l2-device.h>
48 #include <media/v4l2-ctrls.h>
50 #include <linux/videodev2.h>
54 * V 4 L 2 D R I V E R H E L P E R A P I
59 * Video Standard Operations (contributed by Michael Schimek)
62 /* Helper functions for control handling */
64 /* Fill in a struct v4l2_queryctrl */
65 int v4l2_ctrl_query_fill(struct v4l2_queryctrl
*qctrl
, s32 _min
, s32 _max
, s32 _step
, s32 _def
)
73 v4l2_ctrl_fill(qctrl
->id
, &name
, &qctrl
->type
,
74 &min
, &max
, &step
, &def
, &qctrl
->flags
);
82 qctrl
->default_value
= def
;
83 qctrl
->reserved
[0] = qctrl
->reserved
[1] = 0;
84 strscpy(qctrl
->name
, name
, sizeof(qctrl
->name
));
87 EXPORT_SYMBOL(v4l2_ctrl_query_fill
);
89 /* Clamp x to be between min and max, aligned to a multiple of 2^align. min
90 * and max don't have to be aligned, but there must be at least one valid
91 * value. E.g., min=17,max=31,align=4 is not allowed as there are no multiples
92 * of 16 between 17 and 31. */
93 static unsigned int clamp_align(unsigned int x
, unsigned int min
,
94 unsigned int max
, unsigned int align
)
96 /* Bits that must be zero to be aligned */
97 unsigned int mask
= ~((1 << align
) - 1);
99 /* Clamp to aligned min and max */
100 x
= clamp(x
, (min
+ ~mask
) & mask
, max
& mask
);
102 /* Round to nearest aligned value */
104 x
= (x
+ (1 << (align
- 1))) & mask
;
109 static unsigned int clamp_roundup(unsigned int x
, unsigned int min
,
110 unsigned int max
, unsigned int alignment
)
112 x
= clamp(x
, min
, max
);
114 x
= round_up(x
, alignment
);
119 void v4l_bound_align_image(u32
*w
, unsigned int wmin
, unsigned int wmax
,
121 u32
*h
, unsigned int hmin
, unsigned int hmax
,
122 unsigned int halign
, unsigned int salign
)
124 *w
= clamp_align(*w
, wmin
, wmax
, walign
);
125 *h
= clamp_align(*h
, hmin
, hmax
, halign
);
127 /* Usually we don't need to align the size and are done now. */
131 /* How much alignment do we have? */
134 /* Enough to satisfy the image alignment? */
135 if (walign
+ halign
< salign
) {
136 /* Max walign where there is still a valid width */
137 unsigned int wmaxa
= __fls(wmax
^ (wmin
- 1));
138 /* Max halign where there is still a valid height */
139 unsigned int hmaxa
= __fls(hmax
^ (hmin
- 1));
141 /* up the smaller alignment until we have enough */
143 if (halign
>= hmaxa
||
144 (walign
<= halign
&& walign
< wmaxa
)) {
145 *w
= clamp_align(*w
, wmin
, wmax
, walign
+ 1);
148 *h
= clamp_align(*h
, hmin
, hmax
, halign
+ 1);
151 } while (halign
+ walign
< salign
);
154 EXPORT_SYMBOL_GPL(v4l_bound_align_image
);
157 __v4l2_find_nearest_size(const void *array
, size_t array_size
,
158 size_t entry_size
, size_t width_offset
,
159 size_t height_offset
, s32 width
, s32 height
)
161 u32 error
, min_error
= U32_MAX
;
162 const void *best
= NULL
;
168 for (i
= 0; i
< array_size
; i
++, array
+= entry_size
) {
169 const u32
*entry_width
= array
+ width_offset
;
170 const u32
*entry_height
= array
+ height_offset
;
172 error
= abs(*entry_width
- width
) + abs(*entry_height
- height
);
173 if (error
> min_error
)
184 EXPORT_SYMBOL_GPL(__v4l2_find_nearest_size
);
186 int v4l2_g_parm_cap(struct video_device
*vdev
,
187 struct v4l2_subdev
*sd
, struct v4l2_streamparm
*a
)
189 struct v4l2_subdev_frame_interval ival
= { 0 };
192 if (a
->type
!= V4L2_BUF_TYPE_VIDEO_CAPTURE
&&
193 a
->type
!= V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE
)
196 if (vdev
->device_caps
& V4L2_CAP_READWRITE
)
197 a
->parm
.capture
.readbuffers
= 2;
198 if (v4l2_subdev_has_op(sd
, video
, g_frame_interval
))
199 a
->parm
.capture
.capability
= V4L2_CAP_TIMEPERFRAME
;
200 ret
= v4l2_subdev_call(sd
, video
, g_frame_interval
, &ival
);
202 a
->parm
.capture
.timeperframe
= ival
.interval
;
205 EXPORT_SYMBOL_GPL(v4l2_g_parm_cap
);
207 int v4l2_s_parm_cap(struct video_device
*vdev
,
208 struct v4l2_subdev
*sd
, struct v4l2_streamparm
*a
)
210 struct v4l2_subdev_frame_interval ival
= {
211 .interval
= a
->parm
.capture
.timeperframe
215 if (a
->type
!= V4L2_BUF_TYPE_VIDEO_CAPTURE
&&
216 a
->type
!= V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE
)
219 memset(&a
->parm
, 0, sizeof(a
->parm
));
220 if (vdev
->device_caps
& V4L2_CAP_READWRITE
)
221 a
->parm
.capture
.readbuffers
= 2;
223 a
->parm
.capture
.readbuffers
= 0;
225 if (v4l2_subdev_has_op(sd
, video
, g_frame_interval
))
226 a
->parm
.capture
.capability
= V4L2_CAP_TIMEPERFRAME
;
227 ret
= v4l2_subdev_call(sd
, video
, s_frame_interval
, &ival
);
229 a
->parm
.capture
.timeperframe
= ival
.interval
;
232 EXPORT_SYMBOL_GPL(v4l2_s_parm_cap
);
234 const struct v4l2_format_info
*v4l2_format_info(u32 format
)
236 static const struct v4l2_format_info formats
[] = {
238 { .format
= V4L2_PIX_FMT_BGR24
, .pixel_enc
= V4L2_PIXEL_ENC_RGB
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 3, 0, 0, 0 }, .hdiv
= 1, .vdiv
= 1 },
239 { .format
= V4L2_PIX_FMT_RGB24
, .pixel_enc
= V4L2_PIXEL_ENC_RGB
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 3, 0, 0, 0 }, .hdiv
= 1, .vdiv
= 1 },
240 { .format
= V4L2_PIX_FMT_HSV24
, .pixel_enc
= V4L2_PIXEL_ENC_RGB
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 3, 0, 0, 0 }, .hdiv
= 1, .vdiv
= 1 },
241 { .format
= V4L2_PIX_FMT_BGR32
, .pixel_enc
= V4L2_PIXEL_ENC_RGB
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 4, 0, 0, 0 }, .hdiv
= 1, .vdiv
= 1 },
242 { .format
= V4L2_PIX_FMT_XBGR32
, .pixel_enc
= V4L2_PIXEL_ENC_RGB
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 4, 0, 0, 0 }, .hdiv
= 1, .vdiv
= 1 },
243 { .format
= V4L2_PIX_FMT_BGRX32
, .pixel_enc
= V4L2_PIXEL_ENC_RGB
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 4, 0, 0, 0 }, .hdiv
= 1, .vdiv
= 1 },
244 { .format
= V4L2_PIX_FMT_RGB32
, .pixel_enc
= V4L2_PIXEL_ENC_RGB
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 4, 0, 0, 0 }, .hdiv
= 1, .vdiv
= 1 },
245 { .format
= V4L2_PIX_FMT_XRGB32
, .pixel_enc
= V4L2_PIXEL_ENC_RGB
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 4, 0, 0, 0 }, .hdiv
= 1, .vdiv
= 1 },
246 { .format
= V4L2_PIX_FMT_RGBX32
, .pixel_enc
= V4L2_PIXEL_ENC_RGB
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 4, 0, 0, 0 }, .hdiv
= 1, .vdiv
= 1 },
247 { .format
= V4L2_PIX_FMT_HSV32
, .pixel_enc
= V4L2_PIXEL_ENC_RGB
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 4, 0, 0, 0 }, .hdiv
= 1, .vdiv
= 1 },
248 { .format
= V4L2_PIX_FMT_ARGB32
, .pixel_enc
= V4L2_PIXEL_ENC_RGB
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 4, 0, 0, 0 }, .hdiv
= 1, .vdiv
= 1 },
249 { .format
= V4L2_PIX_FMT_RGBA32
, .pixel_enc
= V4L2_PIXEL_ENC_RGB
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 4, 0, 0, 0 }, .hdiv
= 1, .vdiv
= 1 },
250 { .format
= V4L2_PIX_FMT_ABGR32
, .pixel_enc
= V4L2_PIXEL_ENC_RGB
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 4, 0, 0, 0 }, .hdiv
= 1, .vdiv
= 1 },
251 { .format
= V4L2_PIX_FMT_BGRA32
, .pixel_enc
= V4L2_PIXEL_ENC_RGB
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 4, 0, 0, 0 }, .hdiv
= 1, .vdiv
= 1 },
252 { .format
= V4L2_PIX_FMT_RGB565
, .pixel_enc
= V4L2_PIXEL_ENC_RGB
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 2, 0, 0, 0 }, .hdiv
= 1, .vdiv
= 1 },
253 { .format
= V4L2_PIX_FMT_RGB555
, .pixel_enc
= V4L2_PIXEL_ENC_RGB
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 2, 0, 0, 0 }, .hdiv
= 1, .vdiv
= 1 },
254 { .format
= V4L2_PIX_FMT_BGR666
, .pixel_enc
= V4L2_PIXEL_ENC_RGB
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 4, 0, 0, 0 }, .hdiv
= 1, .vdiv
= 1 },
256 /* YUV packed formats */
257 { .format
= V4L2_PIX_FMT_YUYV
, .pixel_enc
= V4L2_PIXEL_ENC_YUV
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 2, 0, 0, 0 }, .hdiv
= 2, .vdiv
= 1 },
258 { .format
= V4L2_PIX_FMT_YVYU
, .pixel_enc
= V4L2_PIXEL_ENC_YUV
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 2, 0, 0, 0 }, .hdiv
= 2, .vdiv
= 1 },
259 { .format
= V4L2_PIX_FMT_UYVY
, .pixel_enc
= V4L2_PIXEL_ENC_YUV
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 2, 0, 0, 0 }, .hdiv
= 2, .vdiv
= 1 },
260 { .format
= V4L2_PIX_FMT_VYUY
, .pixel_enc
= V4L2_PIXEL_ENC_YUV
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 2, 0, 0, 0 }, .hdiv
= 2, .vdiv
= 1 },
262 /* YUV planar formats */
263 { .format
= V4L2_PIX_FMT_NV12
, .pixel_enc
= V4L2_PIXEL_ENC_YUV
, .mem_planes
= 1, .comp_planes
= 2, .bpp
= { 1, 2, 0, 0 }, .hdiv
= 2, .vdiv
= 2 },
264 { .format
= V4L2_PIX_FMT_NV21
, .pixel_enc
= V4L2_PIXEL_ENC_YUV
, .mem_planes
= 1, .comp_planes
= 2, .bpp
= { 1, 2, 0, 0 }, .hdiv
= 2, .vdiv
= 2 },
265 { .format
= V4L2_PIX_FMT_NV16
, .pixel_enc
= V4L2_PIXEL_ENC_YUV
, .mem_planes
= 1, .comp_planes
= 2, .bpp
= { 1, 2, 0, 0 }, .hdiv
= 2, .vdiv
= 1 },
266 { .format
= V4L2_PIX_FMT_NV61
, .pixel_enc
= V4L2_PIXEL_ENC_YUV
, .mem_planes
= 1, .comp_planes
= 2, .bpp
= { 1, 2, 0, 0 }, .hdiv
= 2, .vdiv
= 1 },
267 { .format
= V4L2_PIX_FMT_NV24
, .pixel_enc
= V4L2_PIXEL_ENC_YUV
, .mem_planes
= 1, .comp_planes
= 2, .bpp
= { 1, 2, 0, 0 }, .hdiv
= 1, .vdiv
= 1 },
268 { .format
= V4L2_PIX_FMT_NV42
, .pixel_enc
= V4L2_PIXEL_ENC_YUV
, .mem_planes
= 1, .comp_planes
= 2, .bpp
= { 1, 2, 0, 0 }, .hdiv
= 1, .vdiv
= 1 },
270 { .format
= V4L2_PIX_FMT_YUV410
, .pixel_enc
= V4L2_PIXEL_ENC_YUV
, .mem_planes
= 1, .comp_planes
= 3, .bpp
= { 1, 1, 1, 0 }, .hdiv
= 4, .vdiv
= 4 },
271 { .format
= V4L2_PIX_FMT_YVU410
, .pixel_enc
= V4L2_PIXEL_ENC_YUV
, .mem_planes
= 1, .comp_planes
= 3, .bpp
= { 1, 1, 1, 0 }, .hdiv
= 4, .vdiv
= 4 },
272 { .format
= V4L2_PIX_FMT_YUV411P
, .pixel_enc
= V4L2_PIXEL_ENC_YUV
, .mem_planes
= 1, .comp_planes
= 3, .bpp
= { 1, 1, 1, 0 }, .hdiv
= 4, .vdiv
= 1 },
273 { .format
= V4L2_PIX_FMT_YUV420
, .pixel_enc
= V4L2_PIXEL_ENC_YUV
, .mem_planes
= 1, .comp_planes
= 3, .bpp
= { 1, 1, 1, 0 }, .hdiv
= 2, .vdiv
= 2 },
274 { .format
= V4L2_PIX_FMT_YVU420
, .pixel_enc
= V4L2_PIXEL_ENC_YUV
, .mem_planes
= 1, .comp_planes
= 3, .bpp
= { 1, 1, 1, 0 }, .hdiv
= 2, .vdiv
= 2 },
275 { .format
= V4L2_PIX_FMT_YUV422P
, .pixel_enc
= V4L2_PIXEL_ENC_YUV
, .mem_planes
= 1, .comp_planes
= 3, .bpp
= { 1, 1, 1, 0 }, .hdiv
= 2, .vdiv
= 1 },
276 { .format
= V4L2_PIX_FMT_GREY
, .pixel_enc
= V4L2_PIXEL_ENC_YUV
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 1, 0, 0, 0 }, .hdiv
= 1, .vdiv
= 1 },
278 /* YUV planar formats, non contiguous variant */
279 { .format
= V4L2_PIX_FMT_YUV420M
, .pixel_enc
= V4L2_PIXEL_ENC_YUV
, .mem_planes
= 3, .comp_planes
= 3, .bpp
= { 1, 1, 1, 0 }, .hdiv
= 2, .vdiv
= 2 },
280 { .format
= V4L2_PIX_FMT_YVU420M
, .pixel_enc
= V4L2_PIXEL_ENC_YUV
, .mem_planes
= 3, .comp_planes
= 3, .bpp
= { 1, 1, 1, 0 }, .hdiv
= 2, .vdiv
= 2 },
281 { .format
= V4L2_PIX_FMT_YUV422M
, .pixel_enc
= V4L2_PIXEL_ENC_YUV
, .mem_planes
= 3, .comp_planes
= 3, .bpp
= { 1, 1, 1, 0 }, .hdiv
= 2, .vdiv
= 1 },
282 { .format
= V4L2_PIX_FMT_YVU422M
, .pixel_enc
= V4L2_PIXEL_ENC_YUV
, .mem_planes
= 3, .comp_planes
= 3, .bpp
= { 1, 1, 1, 0 }, .hdiv
= 2, .vdiv
= 1 },
283 { .format
= V4L2_PIX_FMT_YUV444M
, .pixel_enc
= V4L2_PIXEL_ENC_YUV
, .mem_planes
= 3, .comp_planes
= 3, .bpp
= { 1, 1, 1, 0 }, .hdiv
= 1, .vdiv
= 1 },
284 { .format
= V4L2_PIX_FMT_YVU444M
, .pixel_enc
= V4L2_PIXEL_ENC_YUV
, .mem_planes
= 3, .comp_planes
= 3, .bpp
= { 1, 1, 1, 0 }, .hdiv
= 1, .vdiv
= 1 },
286 { .format
= V4L2_PIX_FMT_NV12M
, .pixel_enc
= V4L2_PIXEL_ENC_YUV
, .mem_planes
= 2, .comp_planes
= 2, .bpp
= { 1, 2, 0, 0 }, .hdiv
= 2, .vdiv
= 2 },
287 { .format
= V4L2_PIX_FMT_NV21M
, .pixel_enc
= V4L2_PIXEL_ENC_YUV
, .mem_planes
= 2, .comp_planes
= 2, .bpp
= { 1, 2, 0, 0 }, .hdiv
= 2, .vdiv
= 2 },
288 { .format
= V4L2_PIX_FMT_NV16M
, .pixel_enc
= V4L2_PIXEL_ENC_YUV
, .mem_planes
= 2, .comp_planes
= 2, .bpp
= { 1, 2, 0, 0 }, .hdiv
= 2, .vdiv
= 1 },
289 { .format
= V4L2_PIX_FMT_NV61M
, .pixel_enc
= V4L2_PIXEL_ENC_YUV
, .mem_planes
= 2, .comp_planes
= 2, .bpp
= { 1, 2, 0, 0 }, .hdiv
= 2, .vdiv
= 1 },
291 /* Bayer RGB formats */
292 { .format
= V4L2_PIX_FMT_SBGGR8
, .pixel_enc
= V4L2_PIXEL_ENC_BAYER
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 1, 0, 0, 0 }, .hdiv
= 1, .vdiv
= 1 },
293 { .format
= V4L2_PIX_FMT_SGBRG8
, .pixel_enc
= V4L2_PIXEL_ENC_BAYER
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 1, 0, 0, 0 }, .hdiv
= 1, .vdiv
= 1 },
294 { .format
= V4L2_PIX_FMT_SGRBG8
, .pixel_enc
= V4L2_PIXEL_ENC_BAYER
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 1, 0, 0, 0 }, .hdiv
= 1, .vdiv
= 1 },
295 { .format
= V4L2_PIX_FMT_SRGGB8
, .pixel_enc
= V4L2_PIXEL_ENC_BAYER
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 1, 0, 0, 0 }, .hdiv
= 1, .vdiv
= 1 },
296 { .format
= V4L2_PIX_FMT_SBGGR10
, .pixel_enc
= V4L2_PIXEL_ENC_BAYER
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 2, 0, 0, 0 }, .hdiv
= 1, .vdiv
= 1 },
297 { .format
= V4L2_PIX_FMT_SGBRG10
, .pixel_enc
= V4L2_PIXEL_ENC_BAYER
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 2, 0, 0, 0 }, .hdiv
= 1, .vdiv
= 1 },
298 { .format
= V4L2_PIX_FMT_SGRBG10
, .pixel_enc
= V4L2_PIXEL_ENC_BAYER
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 2, 0, 0, 0 }, .hdiv
= 1, .vdiv
= 1 },
299 { .format
= V4L2_PIX_FMT_SRGGB10
, .pixel_enc
= V4L2_PIXEL_ENC_BAYER
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 2, 0, 0, 0 }, .hdiv
= 1, .vdiv
= 1 },
300 { .format
= V4L2_PIX_FMT_SBGGR10ALAW8
, .pixel_enc
= V4L2_PIXEL_ENC_BAYER
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 1, 0, 0, 0 }, .hdiv
= 1, .vdiv
= 1 },
301 { .format
= V4L2_PIX_FMT_SGBRG10ALAW8
, .pixel_enc
= V4L2_PIXEL_ENC_BAYER
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 1, 0, 0, 0 }, .hdiv
= 1, .vdiv
= 1 },
302 { .format
= V4L2_PIX_FMT_SGRBG10ALAW8
, .pixel_enc
= V4L2_PIXEL_ENC_BAYER
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 1, 0, 0, 0 }, .hdiv
= 1, .vdiv
= 1 },
303 { .format
= V4L2_PIX_FMT_SRGGB10ALAW8
, .pixel_enc
= V4L2_PIXEL_ENC_BAYER
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 1, 0, 0, 0 }, .hdiv
= 1, .vdiv
= 1 },
304 { .format
= V4L2_PIX_FMT_SBGGR10DPCM8
, .pixel_enc
= V4L2_PIXEL_ENC_BAYER
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 1, 0, 0, 0 }, .hdiv
= 1, .vdiv
= 1 },
305 { .format
= V4L2_PIX_FMT_SGBRG10DPCM8
, .pixel_enc
= V4L2_PIXEL_ENC_BAYER
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 1, 0, 0, 0 }, .hdiv
= 1, .vdiv
= 1 },
306 { .format
= V4L2_PIX_FMT_SGRBG10DPCM8
, .pixel_enc
= V4L2_PIXEL_ENC_BAYER
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 1, 0, 0, 0 }, .hdiv
= 1, .vdiv
= 1 },
307 { .format
= V4L2_PIX_FMT_SRGGB10DPCM8
, .pixel_enc
= V4L2_PIXEL_ENC_BAYER
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 1, 0, 0, 0 }, .hdiv
= 1, .vdiv
= 1 },
308 { .format
= V4L2_PIX_FMT_SBGGR12
, .pixel_enc
= V4L2_PIXEL_ENC_BAYER
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 2, 0, 0, 0 }, .hdiv
= 1, .vdiv
= 1 },
309 { .format
= V4L2_PIX_FMT_SGBRG12
, .pixel_enc
= V4L2_PIXEL_ENC_BAYER
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 2, 0, 0, 0 }, .hdiv
= 1, .vdiv
= 1 },
310 { .format
= V4L2_PIX_FMT_SGRBG12
, .pixel_enc
= V4L2_PIXEL_ENC_BAYER
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 2, 0, 0, 0 }, .hdiv
= 1, .vdiv
= 1 },
311 { .format
= V4L2_PIX_FMT_SRGGB12
, .pixel_enc
= V4L2_PIXEL_ENC_BAYER
, .mem_planes
= 1, .comp_planes
= 1, .bpp
= { 2, 0, 0, 0 }, .hdiv
= 1, .vdiv
= 1 },
315 for (i
= 0; i
< ARRAY_SIZE(formats
); ++i
)
316 if (formats
[i
].format
== format
)
320 EXPORT_SYMBOL(v4l2_format_info
);
322 static inline unsigned int v4l2_format_block_width(const struct v4l2_format_info
*info
, int plane
)
324 if (!info
->block_w
[plane
])
326 return info
->block_w
[plane
];
329 static inline unsigned int v4l2_format_block_height(const struct v4l2_format_info
*info
, int plane
)
331 if (!info
->block_h
[plane
])
333 return info
->block_h
[plane
];
336 void v4l2_apply_frmsize_constraints(u32
*width
, u32
*height
,
337 const struct v4l2_frmsize_stepwise
*frmsize
)
343 * Clamp width/height to meet min/max constraints and round it up to
344 * macroblock alignment.
346 *width
= clamp_roundup(*width
, frmsize
->min_width
, frmsize
->max_width
,
347 frmsize
->step_width
);
348 *height
= clamp_roundup(*height
, frmsize
->min_height
, frmsize
->max_height
,
349 frmsize
->step_height
);
351 EXPORT_SYMBOL_GPL(v4l2_apply_frmsize_constraints
);
353 int v4l2_fill_pixfmt_mp(struct v4l2_pix_format_mplane
*pixfmt
,
354 u32 pixelformat
, u32 width
, u32 height
)
356 const struct v4l2_format_info
*info
;
357 struct v4l2_plane_pix_format
*plane
;
360 info
= v4l2_format_info(pixelformat
);
364 pixfmt
->width
= width
;
365 pixfmt
->height
= height
;
366 pixfmt
->pixelformat
= pixelformat
;
367 pixfmt
->num_planes
= info
->mem_planes
;
369 if (info
->mem_planes
== 1) {
370 plane
= &pixfmt
->plane_fmt
[0];
371 plane
->bytesperline
= ALIGN(width
, v4l2_format_block_width(info
, 0)) * info
->bpp
[0];
372 plane
->sizeimage
= 0;
374 for (i
= 0; i
< info
->comp_planes
; i
++) {
375 unsigned int hdiv
= (i
== 0) ? 1 : info
->hdiv
;
376 unsigned int vdiv
= (i
== 0) ? 1 : info
->vdiv
;
377 unsigned int aligned_width
;
378 unsigned int aligned_height
;
380 aligned_width
= ALIGN(width
, v4l2_format_block_width(info
, i
));
381 aligned_height
= ALIGN(height
, v4l2_format_block_height(info
, i
));
383 plane
->sizeimage
+= info
->bpp
[i
] *
384 DIV_ROUND_UP(aligned_width
, hdiv
) *
385 DIV_ROUND_UP(aligned_height
, vdiv
);
388 for (i
= 0; i
< info
->comp_planes
; i
++) {
389 unsigned int hdiv
= (i
== 0) ? 1 : info
->hdiv
;
390 unsigned int vdiv
= (i
== 0) ? 1 : info
->vdiv
;
391 unsigned int aligned_width
;
392 unsigned int aligned_height
;
394 aligned_width
= ALIGN(width
, v4l2_format_block_width(info
, i
));
395 aligned_height
= ALIGN(height
, v4l2_format_block_height(info
, i
));
397 plane
= &pixfmt
->plane_fmt
[i
];
398 plane
->bytesperline
=
399 info
->bpp
[i
] * DIV_ROUND_UP(aligned_width
, hdiv
);
401 plane
->bytesperline
* DIV_ROUND_UP(aligned_height
, vdiv
);
406 EXPORT_SYMBOL_GPL(v4l2_fill_pixfmt_mp
);
408 int v4l2_fill_pixfmt(struct v4l2_pix_format
*pixfmt
, u32 pixelformat
,
409 u32 width
, u32 height
)
411 const struct v4l2_format_info
*info
;
414 info
= v4l2_format_info(pixelformat
);
418 /* Single planar API cannot be used for multi plane formats. */
419 if (info
->mem_planes
> 1)
422 pixfmt
->width
= width
;
423 pixfmt
->height
= height
;
424 pixfmt
->pixelformat
= pixelformat
;
425 pixfmt
->bytesperline
= ALIGN(width
, v4l2_format_block_width(info
, 0)) * info
->bpp
[0];
426 pixfmt
->sizeimage
= 0;
428 for (i
= 0; i
< info
->comp_planes
; i
++) {
429 unsigned int hdiv
= (i
== 0) ? 1 : info
->hdiv
;
430 unsigned int vdiv
= (i
== 0) ? 1 : info
->vdiv
;
431 unsigned int aligned_width
;
432 unsigned int aligned_height
;
434 aligned_width
= ALIGN(width
, v4l2_format_block_width(info
, i
));
435 aligned_height
= ALIGN(height
, v4l2_format_block_height(info
, i
));
437 pixfmt
->sizeimage
+= info
->bpp
[i
] *
438 DIV_ROUND_UP(aligned_width
, hdiv
) *
439 DIV_ROUND_UP(aligned_height
, vdiv
);
443 EXPORT_SYMBOL_GPL(v4l2_fill_pixfmt
);
445 s64
v4l2_get_link_rate(struct v4l2_ctrl_handler
*handler
, unsigned int mul
,
448 struct v4l2_ctrl
*ctrl
;
451 ctrl
= v4l2_ctrl_find(handler
, V4L2_CID_LINK_FREQ
);
453 struct v4l2_querymenu qm
= { .id
= V4L2_CID_LINK_FREQ
};
456 qm
.index
= v4l2_ctrl_g_ctrl(ctrl
);
458 ret
= v4l2_querymenu(handler
, &qm
);
467 ctrl
= v4l2_ctrl_find(handler
, V4L2_CID_PIXEL_RATE
);
471 freq
= div_u64(v4l2_ctrl_g_ctrl_int64(ctrl
) * mul
, div
);
474 return freq
> 0 ? freq
: -EINVAL
;
476 EXPORT_SYMBOL_GPL(v4l2_get_link_rate
);