aarch64: Add assembly support for -fsanitize=hwaddress tagged globals.
[libav.git] / libswscale / input.c
blob3bc475dc6923c0d401540f1e47ace5d107a03e17
1 /*
2 * Copyright (C) 2001-2003 Michael Niedermayer <michaelni@gmx.at>
4 * This file is part of Libav.
6 * Libav is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * Libav is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with Libav; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 #include <assert.h>
22 #include <math.h>
23 #include <stdint.h>
24 #include <stdio.h>
25 #include <string.h>
27 #include "libavutil/avutil.h"
28 #include "libavutil/bswap.h"
29 #include "libavutil/cpu.h"
30 #include "libavutil/intreadwrite.h"
31 #include "libavutil/mathematics.h"
32 #include "libavutil/pixdesc.h"
33 #include "config.h"
34 #include "rgb2rgb.h"
35 #include "swscale.h"
36 #include "swscale_internal.h"
38 #define RGB2YUV_SHIFT 15
39 #define BY ((int)(0.114 * 219 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
40 #define BV (-(int)(0.081 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
41 #define BU ((int)(0.500 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
42 #define GY ((int)(0.587 * 219 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
43 #define GV (-(int)(0.419 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
44 #define GU (-(int)(0.331 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
45 #define RY ((int)(0.299 * 219 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
46 #define RV ((int)(0.500 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
47 #define RU (-(int)(0.169 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
49 #define input_pixel(pos) (isBE(origin) ? AV_RB16(pos) : AV_RL16(pos))
51 #define r ((origin == AV_PIX_FMT_BGR48BE || origin == AV_PIX_FMT_BGR48LE) ? b_r : r_b)
52 #define b ((origin == AV_PIX_FMT_BGR48BE || origin == AV_PIX_FMT_BGR48LE) ? r_b : b_r)
54 static av_always_inline void rgb48ToY_c_template(uint16_t *dst,
55 const uint16_t *src, int width,
56 enum AVPixelFormat origin)
58 int i;
59 for (i = 0; i < width; i++) {
60 unsigned int r_b = input_pixel(&src[i * 3 + 0]);
61 unsigned int g = input_pixel(&src[i * 3 + 1]);
62 unsigned int b_r = input_pixel(&src[i * 3 + 2]);
64 dst[i] = (RY * r + GY * g + BY * b + (0x2001 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT;
68 static av_always_inline void rgb48ToUV_c_template(uint16_t *dstU,
69 uint16_t *dstV,
70 const uint16_t *src1,
71 const uint16_t *src2,
72 int width,
73 enum AVPixelFormat origin)
75 int i;
76 assert(src1 == src2);
77 for (i = 0; i < width; i++) {
78 int r_b = input_pixel(&src1[i * 3 + 0]);
79 int g = input_pixel(&src1[i * 3 + 1]);
80 int b_r = input_pixel(&src1[i * 3 + 2]);
82 dstU[i] = (RU * r + GU * g + BU * b + (0x10001 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT;
83 dstV[i] = (RV * r + GV * g + BV * b + (0x10001 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT;
87 static av_always_inline void rgb48ToUV_half_c_template(uint16_t *dstU,
88 uint16_t *dstV,
89 const uint16_t *src1,
90 const uint16_t *src2,
91 int width,
92 enum AVPixelFormat origin)
94 int i;
95 assert(src1 == src2);
96 for (i = 0; i < width; i++) {
97 int r_b = (input_pixel(&src1[6 * i + 0]) +
98 input_pixel(&src1[6 * i + 3]) + 1) >> 1;
99 int g = (input_pixel(&src1[6 * i + 1]) +
100 input_pixel(&src1[6 * i + 4]) + 1) >> 1;
101 int b_r = (input_pixel(&src1[6 * i + 2]) +
102 input_pixel(&src1[6 * i + 5]) + 1) >> 1;
104 dstU[i] = (RU * r + GU * g + BU * b + (0x10001 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT;
105 dstV[i] = (RV * r + GV * g + BV * b + (0x10001 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT;
109 #undef r
110 #undef b
111 #undef input_pixel
113 #define rgb48funcs(pattern, BE_LE, origin) \
114 static void pattern ## 48 ## BE_LE ## ToY_c(uint8_t *_dst, \
115 const uint8_t *_src, \
116 int width, \
117 uint32_t *unused) \
119 const uint16_t *src = (const uint16_t *)_src; \
120 uint16_t *dst = (uint16_t *)_dst; \
121 rgb48ToY_c_template(dst, src, width, origin); \
124 static void pattern ## 48 ## BE_LE ## ToUV_c(uint8_t *_dstU, \
125 uint8_t *_dstV, \
126 const uint8_t *_src1, \
127 const uint8_t *_src2, \
128 int width, \
129 uint32_t *unused) \
131 const uint16_t *src1 = (const uint16_t *)_src1, \
132 *src2 = (const uint16_t *)_src2; \
133 uint16_t *dstU = (uint16_t *)_dstU, \
134 *dstV = (uint16_t *)_dstV; \
135 rgb48ToUV_c_template(dstU, dstV, src1, src2, width, origin); \
138 static void pattern ## 48 ## BE_LE ## ToUV_half_c(uint8_t *_dstU, \
139 uint8_t *_dstV, \
140 const uint8_t *_src1, \
141 const uint8_t *_src2, \
142 int width, \
143 uint32_t *unused) \
145 const uint16_t *src1 = (const uint16_t *)_src1, \
146 *src2 = (const uint16_t *)_src2; \
147 uint16_t *dstU = (uint16_t *)_dstU, \
148 *dstV = (uint16_t *)_dstV; \
149 rgb48ToUV_half_c_template(dstU, dstV, src1, src2, width, origin); \
152 rgb48funcs(rgb, LE, AV_PIX_FMT_RGB48LE)
153 rgb48funcs(rgb, BE, AV_PIX_FMT_RGB48BE)
154 rgb48funcs(bgr, LE, AV_PIX_FMT_BGR48LE)
155 rgb48funcs(bgr, BE, AV_PIX_FMT_BGR48BE)
157 #define input_pixel(i) ((origin == AV_PIX_FMT_RGBA || \
158 origin == AV_PIX_FMT_BGRA || \
159 origin == AV_PIX_FMT_ARGB || \
160 origin == AV_PIX_FMT_ABGR) \
161 ? AV_RN32A(&src[(i) * 4]) \
162 : (isBE(origin) ? AV_RB16(&src[(i) * 2]) \
163 : AV_RL16(&src[(i) * 2])))
165 static av_always_inline void rgb16_32ToY_c_template(uint8_t *dst,
166 const uint8_t *src,
167 int width,
168 enum AVPixelFormat origin,
169 int shr, int shg,
170 int shb, int shp,
171 int maskr, int maskg,
172 int maskb, int rsh,
173 int gsh, int bsh, int S)
175 const int ry = RY << rsh, gy = GY << gsh, by = BY << bsh;
176 const unsigned rnd = 33u << (S - 1);
177 int i;
179 for (i = 0; i < width; i++) {
180 int px = input_pixel(i) >> shp;
181 int b = (px & maskb) >> shb;
182 int g = (px & maskg) >> shg;
183 int r = (px & maskr) >> shr;
185 dst[i] = (ry * r + gy * g + by * b + rnd) >> S;
189 static av_always_inline void rgb16_32ToUV_c_template(uint8_t *dstU,
190 uint8_t *dstV,
191 const uint8_t *src,
192 int width,
193 enum AVPixelFormat origin,
194 int shr, int shg,
195 int shb, int shp,
196 int maskr, int maskg,
197 int maskb, int rsh,
198 int gsh, int bsh, int S)
200 const int ru = RU << rsh, gu = GU << gsh, bu = BU << bsh,
201 rv = RV << rsh, gv = GV << gsh, bv = BV << bsh;
202 const unsigned rnd = 257u << (S - 1);
203 int i;
205 for (i = 0; i < width; i++) {
206 int px = input_pixel(i) >> shp;
207 int b = (px & maskb) >> shb;
208 int g = (px & maskg) >> shg;
209 int r = (px & maskr) >> shr;
211 dstU[i] = (ru * r + gu * g + bu * b + rnd) >> S;
212 dstV[i] = (rv * r + gv * g + bv * b + rnd) >> S;
216 static av_always_inline void rgb16_32ToUV_half_c_template(uint8_t *dstU,
217 uint8_t *dstV,
218 const uint8_t *src,
219 int width,
220 enum AVPixelFormat origin,
221 int shr, int shg,
222 int shb, int shp,
223 int maskr, int maskg,
224 int maskb, int rsh,
225 int gsh, int bsh, int S)
227 const int ru = RU << rsh, gu = GU << gsh, bu = BU << bsh,
228 rv = RV << rsh, gv = GV << gsh, bv = BV << bsh,
229 maskgx = ~(maskr | maskb);
230 const unsigned rnd = 257u << S;
231 int i;
233 maskr |= maskr << 1;
234 maskb |= maskb << 1;
235 maskg |= maskg << 1;
236 for (i = 0; i < width; i++) {
237 int px0 = input_pixel(2 * i + 0) >> shp;
238 int px1 = input_pixel(2 * i + 1) >> shp;
239 int b, r, g = (px0 & maskgx) + (px1 & maskgx);
240 int rb = px0 + px1 - g;
242 b = (rb & maskb) >> shb;
243 if (shp ||
244 origin == AV_PIX_FMT_BGR565LE || origin == AV_PIX_FMT_BGR565BE ||
245 origin == AV_PIX_FMT_RGB565LE || origin == AV_PIX_FMT_RGB565BE) {
246 g >>= shg;
247 } else {
248 g = (g & maskg) >> shg;
250 r = (rb & maskr) >> shr;
252 dstU[i] = (ru * r + gu * g + bu * b + rnd) >> (S + 1);
253 dstV[i] = (rv * r + gv * g + bv * b + rnd) >> (S + 1);
257 #undef input_pixel
259 #define rgb16_32_wrapper(fmt, name, shr, shg, shb, shp, maskr, \
260 maskg, maskb, rsh, gsh, bsh, S) \
261 static void name ## ToY_c(uint8_t *dst, const uint8_t *src, \
262 int width, uint32_t *unused) \
264 rgb16_32ToY_c_template(dst, src, width, fmt, shr, shg, shb, shp, \
265 maskr, maskg, maskb, rsh, gsh, bsh, S); \
268 static void name ## ToUV_c(uint8_t *dstU, uint8_t *dstV, \
269 const uint8_t *src, const uint8_t *dummy, \
270 int width, uint32_t *unused) \
272 rgb16_32ToUV_c_template(dstU, dstV, src, width, fmt, \
273 shr, shg, shb, shp, \
274 maskr, maskg, maskb, rsh, gsh, bsh, S); \
277 static void name ## ToUV_half_c(uint8_t *dstU, uint8_t *dstV, \
278 const uint8_t *src, \
279 const uint8_t *dummy, \
280 int width, uint32_t *unused) \
282 rgb16_32ToUV_half_c_template(dstU, dstV, src, width, fmt, \
283 shr, shg, shb, shp, \
284 maskr, maskg, maskb, \
285 rsh, gsh, bsh, S); \
288 rgb16_32_wrapper(AV_PIX_FMT_BGR32, bgr32, 16, 0, 0, 0, 0xFF0000, 0xFF00, 0x00FF, 8, 0, 8, RGB2YUV_SHIFT + 8)
289 rgb16_32_wrapper(AV_PIX_FMT_BGR32_1, bgr321, 16, 0, 0, 8, 0xFF0000, 0xFF00, 0x00FF, 8, 0, 8, RGB2YUV_SHIFT + 8)
290 rgb16_32_wrapper(AV_PIX_FMT_RGB32, rgb32, 0, 0, 16, 0, 0x00FF, 0xFF00, 0xFF0000, 8, 0, 8, RGB2YUV_SHIFT + 8)
291 rgb16_32_wrapper(AV_PIX_FMT_RGB32_1, rgb321, 0, 0, 16, 8, 0x00FF, 0xFF00, 0xFF0000, 8, 0, 8, RGB2YUV_SHIFT + 8)
292 rgb16_32_wrapper(AV_PIX_FMT_BGR565LE, bgr16le, 0, 0, 0, 0, 0x001F, 0x07E0, 0xF800, 11, 5, 0, RGB2YUV_SHIFT + 8)
293 rgb16_32_wrapper(AV_PIX_FMT_BGR555LE, bgr15le, 0, 0, 0, 0, 0x001F, 0x03E0, 0x7C00, 10, 5, 0, RGB2YUV_SHIFT + 7)
294 rgb16_32_wrapper(AV_PIX_FMT_BGR444LE, bgr12le, 0, 0, 0, 0, 0x000F, 0x00F0, 0x0F00, 8, 4, 0, RGB2YUV_SHIFT + 4)
295 rgb16_32_wrapper(AV_PIX_FMT_RGB565LE, rgb16le, 0, 0, 0, 0, 0xF800, 0x07E0, 0x001F, 0, 5, 11, RGB2YUV_SHIFT + 8)
296 rgb16_32_wrapper(AV_PIX_FMT_RGB555LE, rgb15le, 0, 0, 0, 0, 0x7C00, 0x03E0, 0x001F, 0, 5, 10, RGB2YUV_SHIFT + 7)
297 rgb16_32_wrapper(AV_PIX_FMT_RGB444LE, rgb12le, 0, 0, 0, 0, 0x0F00, 0x00F0, 0x000F, 0, 4, 8, RGB2YUV_SHIFT + 4)
298 rgb16_32_wrapper(AV_PIX_FMT_BGR565BE, bgr16be, 0, 0, 0, 0, 0x001F, 0x07E0, 0xF800, 11, 5, 0, RGB2YUV_SHIFT + 8)
299 rgb16_32_wrapper(AV_PIX_FMT_BGR555BE, bgr15be, 0, 0, 0, 0, 0x001F, 0x03E0, 0x7C00, 10, 5, 0, RGB2YUV_SHIFT + 7)
300 rgb16_32_wrapper(AV_PIX_FMT_BGR444BE, bgr12be, 0, 0, 0, 0, 0x000F, 0x00F0, 0x0F00, 8, 4, 0, RGB2YUV_SHIFT + 4)
301 rgb16_32_wrapper(AV_PIX_FMT_RGB565BE, rgb16be, 0, 0, 0, 0, 0xF800, 0x07E0, 0x001F, 0, 5, 11, RGB2YUV_SHIFT + 8)
302 rgb16_32_wrapper(AV_PIX_FMT_RGB555BE, rgb15be, 0, 0, 0, 0, 0x7C00, 0x03E0, 0x001F, 0, 5, 10, RGB2YUV_SHIFT + 7)
303 rgb16_32_wrapper(AV_PIX_FMT_RGB444BE, rgb12be, 0, 0, 0, 0, 0x0F00, 0x00F0, 0x000F, 0, 4, 8, RGB2YUV_SHIFT + 4)
305 static void abgrToA_c(uint8_t *dst, const uint8_t *src, int width,
306 uint32_t *unused)
308 int i;
309 for (i = 0; i < width; i++)
310 dst[i] = src[4 * i];
313 static void rgbaToA_c(uint8_t *dst, const uint8_t *src, int width,
314 uint32_t *unused)
316 int i;
317 for (i = 0; i < width; i++)
318 dst[i] = src[4 * i + 3];
321 static void palToY_c(uint8_t *dst, const uint8_t *src, int width, uint32_t *pal)
323 int i;
324 for (i = 0; i < width; i++) {
325 int d = src[i];
327 dst[i] = pal[d] & 0xFF;
331 static void palToUV_c(uint8_t *dstU, uint8_t *dstV,
332 const uint8_t *src1, const uint8_t *src2,
333 int width, uint32_t *pal)
335 int i;
336 assert(src1 == src2);
337 for (i = 0; i < width; i++) {
338 int p = pal[src1[i]];
340 dstU[i] = p >> 8;
341 dstV[i] = p >> 16;
345 static void monowhite2Y_c(uint8_t *dst, const uint8_t *src,
346 int width, uint32_t *unused)
348 int i, j;
349 width = (width + 7) >> 3;
350 for (i = 0; i < width; i++) {
351 int d = ~src[i];
352 for (j = 0; j < 8; j++)
353 dst[8 * i + j] = ((d >> (7 - j)) & 1) * 255;
357 static void monoblack2Y_c(uint8_t *dst, const uint8_t *src,
358 int width, uint32_t *unused)
360 int i, j;
361 width = (width + 7) >> 3;
362 for (i = 0; i < width; i++) {
363 int d = src[i];
364 for (j = 0; j < 8; j++)
365 dst[8 * i + j] = ((d >> (7 - j)) & 1) * 255;
369 static void yuy2ToY_c(uint8_t *dst, const uint8_t *src, int width,
370 uint32_t *unused)
372 int i;
373 for (i = 0; i < width; i++)
374 dst[i] = src[2 * i];
377 static void yuy2ToUV_c(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1,
378 const uint8_t *src2, int width, uint32_t *unused)
380 int i;
381 for (i = 0; i < width; i++) {
382 dstU[i] = src1[4 * i + 1];
383 dstV[i] = src1[4 * i + 3];
385 assert(src1 == src2);
388 static void yvy2ToUV_c(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1,
389 const uint8_t *src2, int width, uint32_t *unused)
391 int i;
392 for (i = 0; i < width; i++) {
393 dstV[i] = src1[4 * i + 1];
394 dstU[i] = src1[4 * i + 3];
396 assert(src1 == src2);
399 static void bswap16Y_c(uint8_t *_dst, const uint8_t *_src, int width,
400 uint32_t *unused)
402 int i;
403 const uint16_t *src = (const uint16_t *)_src;
404 uint16_t *dst = (uint16_t *)_dst;
405 for (i = 0; i < width; i++)
406 dst[i] = av_bswap16(src[i]);
409 static void bswap16UV_c(uint8_t *_dstU, uint8_t *_dstV, const uint8_t *_src1,
410 const uint8_t *_src2, int width, uint32_t *unused)
412 int i;
413 const uint16_t *src1 = (const uint16_t *)_src1,
414 *src2 = (const uint16_t *)_src2;
415 uint16_t *dstU = (uint16_t *)_dstU, *dstV = (uint16_t *)_dstV;
416 for (i = 0; i < width; i++) {
417 dstU[i] = av_bswap16(src1[i]);
418 dstV[i] = av_bswap16(src2[i]);
422 static void read_ya16le_gray_c(uint8_t *dst, const uint8_t *src, int width,
423 uint32_t *unused)
425 int i;
426 for (i = 0; i < width; i++)
427 AV_WN16(dst + i * 2, AV_RL16(src + i * 4));
430 static void read_ya16le_alpha_c(uint8_t *dst, const uint8_t *src, int width,
431 uint32_t *unused)
433 int i;
434 for (i = 0; i < width; i++)
435 AV_WN16(dst + i * 2, AV_RL16(src + i * 4 + 2));
438 static void read_ya16be_gray_c(uint8_t *dst, const uint8_t *src, int width,
439 uint32_t *unused)
441 int i;
442 for (i = 0; i < width; i++)
443 AV_WN16(dst + i * 2, AV_RB16(src + i * 4));
446 static void read_ya16be_alpha_c(uint8_t *dst, const uint8_t *src, int width,
447 uint32_t *unused)
449 int i;
450 for (i = 0; i < width; i++)
451 AV_WN16(dst + i * 2, AV_RB16(src + i * 4 + 2));
454 /* This is almost identical to the previous, end exists only because
455 * yuy2ToY/UV)(dst, src + 1, ...) would have 100% unaligned accesses. */
456 static void uyvyToY_c(uint8_t *dst, const uint8_t *src, int width,
457 uint32_t *unused)
459 int i;
460 for (i = 0; i < width; i++)
461 dst[i] = src[2 * i + 1];
464 static void uyvyToUV_c(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1,
465 const uint8_t *src2, int width, uint32_t *unused)
467 int i;
468 for (i = 0; i < width; i++) {
469 dstU[i] = src1[4 * i + 0];
470 dstV[i] = src1[4 * i + 2];
472 assert(src1 == src2);
475 static av_always_inline void nvXXtoUV_c(uint8_t *dst1, uint8_t *dst2,
476 const uint8_t *src, int width)
478 int i;
479 for (i = 0; i < width; i++) {
480 dst1[i] = src[2 * i + 0];
481 dst2[i] = src[2 * i + 1];
485 static void nv12ToUV_c(uint8_t *dstU, uint8_t *dstV,
486 const uint8_t *src1, const uint8_t *src2,
487 int width, uint32_t *unused)
489 nvXXtoUV_c(dstU, dstV, src1, width);
492 static void nv21ToUV_c(uint8_t *dstU, uint8_t *dstV,
493 const uint8_t *src1, const uint8_t *src2,
494 int width, uint32_t *unused)
496 nvXXtoUV_c(dstV, dstU, src1, width);
499 static void p010LEToY_c(uint8_t *dst, const uint8_t *src, int width, uint32_t *unused)
501 int i;
502 for (i = 0; i < width; i++) {
503 AV_WN16(dst + i * 2, AV_RL16(src + i * 2) >> 6);
507 static void p010BEToY_c(uint8_t *dst, const uint8_t *src, int width, uint32_t *unused)
509 int i;
510 for (i = 0; i < width; i++) {
511 AV_WN16(dst + i * 2, AV_RB16(src + i * 2) >> 6);
515 static void p010LEToUV_c(uint8_t *dstU, uint8_t *dstV,
516 const uint8_t *src1, const uint8_t *src2,
517 int width, uint32_t *unused)
519 int i;
520 for (i = 0; i < width; i++) {
521 AV_WN16(dstU + i * 2, AV_RL16(src1 + i * 4 + 0) >> 6);
522 AV_WN16(dstV + i * 2, AV_RL16(src1 + i * 4 + 2) >> 6);
526 static void p010BEToUV_c(uint8_t *dstU, uint8_t *dstV,
527 const uint8_t *src1, const uint8_t *src2,
528 int width, uint32_t *unused)
530 int i;
531 for (i = 0; i < width; i++) {
532 AV_WN16(dstU + i * 2, AV_RB16(src1 + i * 4 + 0) >> 6);
533 AV_WN16(dstV + i * 2, AV_RB16(src1 + i * 4 + 2) >> 6);
537 #define input_pixel(pos) (isBE(origin) ? AV_RB16(pos) : AV_RL16(pos))
539 static void bgr24ToY_c(uint8_t *dst, const uint8_t *src,
540 int width, uint32_t *unused)
542 int i;
543 for (i = 0; i < width; i++) {
544 int b = src[i * 3 + 0];
545 int g = src[i * 3 + 1];
546 int r = src[i * 3 + 2];
548 dst[i] = ((RY * r + GY * g + BY * b + (33 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT);
552 static void bgr24ToUV_c(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1,
553 const uint8_t *src2, int width, uint32_t *unused)
555 int i;
556 for (i = 0; i < width; i++) {
557 int b = src1[3 * i + 0];
558 int g = src1[3 * i + 1];
559 int r = src1[3 * i + 2];
561 dstU[i] = (RU * r + GU * g + BU * b + (257 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT;
562 dstV[i] = (RV * r + GV * g + BV * b + (257 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT;
564 assert(src1 == src2);
567 static void bgr24ToUV_half_c(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1,
568 const uint8_t *src2, int width, uint32_t *unused)
570 int i;
571 for (i = 0; i < width; i++) {
572 int b = src1[6 * i + 0] + src1[6 * i + 3];
573 int g = src1[6 * i + 1] + src1[6 * i + 4];
574 int r = src1[6 * i + 2] + src1[6 * i + 5];
576 dstU[i] = (RU * r + GU * g + BU * b + (257 << RGB2YUV_SHIFT)) >> (RGB2YUV_SHIFT + 1);
577 dstV[i] = (RV * r + GV * g + BV * b + (257 << RGB2YUV_SHIFT)) >> (RGB2YUV_SHIFT + 1);
579 assert(src1 == src2);
582 static void rgb24ToY_c(uint8_t *dst, const uint8_t *src, int width,
583 uint32_t *unused)
585 int i;
586 for (i = 0; i < width; i++) {
587 int r = src[i * 3 + 0];
588 int g = src[i * 3 + 1];
589 int b = src[i * 3 + 2];
591 dst[i] = ((RY * r + GY * g + BY * b + (33 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT);
595 static void rgb24ToUV_c(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1,
596 const uint8_t *src2, int width, uint32_t *unused)
598 int i;
599 assert(src1 == src2);
600 for (i = 0; i < width; i++) {
601 int r = src1[3 * i + 0];
602 int g = src1[3 * i + 1];
603 int b = src1[3 * i + 2];
605 dstU[i] = (RU * r + GU * g + BU * b + (257 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT;
606 dstV[i] = (RV * r + GV * g + BV * b + (257 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT;
610 static void rgb24ToUV_half_c(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1,
611 const uint8_t *src2, int width, uint32_t *unused)
613 int i;
614 assert(src1 == src2);
615 for (i = 0; i < width; i++) {
616 int r = src1[6 * i + 0] + src1[6 * i + 3];
617 int g = src1[6 * i + 1] + src1[6 * i + 4];
618 int b = src1[6 * i + 2] + src1[6 * i + 5];
620 dstU[i] = (RU * r + GU * g + BU * b + (257 << RGB2YUV_SHIFT)) >> (RGB2YUV_SHIFT + 1);
621 dstV[i] = (RV * r + GV * g + BV * b + (257 << RGB2YUV_SHIFT)) >> (RGB2YUV_SHIFT + 1);
625 static void planar_rgb_to_y(uint8_t *dst, const uint8_t *src[4], int width)
627 int i;
628 for (i = 0; i < width; i++) {
629 int g = src[0][i];
630 int b = src[1][i];
631 int r = src[2][i];
633 dst[i] = ((RY * r + GY * g + BY * b + (33 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT);
637 static void planar_rgb_to_a(uint8_t *dst, const uint8_t *src[4], int width)
639 int i;
640 for (i = 0; i < width; i++)
641 dst[i] = src[3][i];
644 static void planar_rgb_to_uv(uint8_t *dstU, uint8_t *dstV, const uint8_t *src[4], int width)
646 int i;
647 for (i = 0; i < width; i++) {
648 int g = src[0][i];
649 int b = src[1][i];
650 int r = src[2][i];
652 dstU[i] = (RU * r + GU * g + BU * b + (257 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT;
653 dstV[i] = (RV * r + GV * g + BV * b + (257 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT;
657 #define rdpx(src) \
658 is_be ? AV_RB16(src) : AV_RL16(src)
659 static av_always_inline void planar_rgb16_to_y(uint8_t *_dst, const uint8_t *_src[4],
660 int width, int bpc, int is_be)
662 int i;
663 const uint16_t **src = (const uint16_t **)_src;
664 uint16_t *dst = (uint16_t *)_dst;
665 for (i = 0; i < width; i++) {
666 int g = rdpx(src[0] + i);
667 int b = rdpx(src[1] + i);
668 int r = rdpx(src[2] + i);
670 dst[i] = ((RY * r + GY * g + BY * b + (33 << (RGB2YUV_SHIFT + bpc - 9))) >> RGB2YUV_SHIFT);
674 static av_always_inline void planar_rgb16_to_a(uint8_t *_dst, const uint8_t *_src[4],
675 int width, int bpc, int is_be)
677 int i;
678 const uint16_t **src = (const uint16_t **)_src;
679 uint16_t *dst = (uint16_t *)_dst;
680 int shift = bpc < 15 ? bpc : 14;
682 for (i = 0; i < width; i++) {
683 dst[i] = rdpx(src[3] + i) << (14 - shift);
687 static void planar_rgb9le_to_y(uint8_t *dst, const uint8_t *src[4], int w)
689 planar_rgb16_to_y(dst, src, w, 9, 0);
692 static void planar_rgb9be_to_y(uint8_t *dst, const uint8_t *src[4], int w)
694 planar_rgb16_to_y(dst, src, w, 9, 1);
697 static void planar_rgb10le_to_y(uint8_t *dst, const uint8_t *src[4], int w)
699 planar_rgb16_to_y(dst, src, w, 10, 0);
702 static void planar_rgb10le_to_a(uint8_t *dst, const uint8_t *src[4], int w)
704 planar_rgb16_to_a(dst, src, w, 10, 0);
707 static void planar_rgb10be_to_y(uint8_t *dst, const uint8_t *src[4], int w)
709 planar_rgb16_to_y(dst, src, w, 10, 1);
712 static void planar_rgb10be_to_a(uint8_t *dst, const uint8_t *src[4], int w)
714 planar_rgb16_to_a(dst, src, w, 10, 1);
717 static void planar_rgb12le_to_y(uint8_t *dst, const uint8_t *src[4], int w)
719 planar_rgb16_to_y(dst, src, w, 12, 0);
722 static void planar_rgb12le_to_a(uint8_t *dst, const uint8_t *src[4], int w)
724 planar_rgb16_to_a(dst, src, w, 12, 0);
727 static void planar_rgb12be_to_y(uint8_t *dst, const uint8_t *src[4], int w)
729 planar_rgb16_to_y(dst, src, w, 12, 1);
732 static void planar_rgb12be_to_a(uint8_t *dst, const uint8_t *src[4], int w)
734 planar_rgb16_to_a(dst, src, w, 12, 1);
737 static void planar_rgb16le_to_y(uint8_t *dst, const uint8_t *src[4], int w)
739 planar_rgb16_to_y(dst, src, w, 16, 0);
742 static void planar_rgb16le_to_a(uint8_t *dst, const uint8_t *src[4], int w)
744 planar_rgb16_to_a(dst, src, w, 16, 0);
747 static void planar_rgb16be_to_y(uint8_t *dst, const uint8_t *src[4], int w)
749 planar_rgb16_to_y(dst, src, w, 16, 1);
752 static void planar_rgb16be_to_a(uint8_t *dst, const uint8_t *src[4], int w)
754 planar_rgb16_to_a(dst, src, w, 16, 1);
757 static av_always_inline void planar_rgb16_to_uv(uint8_t *_dstU, uint8_t *_dstV,
758 const uint8_t *_src[4], int width,
759 int bpc, int is_be)
761 int i;
762 const uint16_t **src = (const uint16_t **)_src;
763 uint16_t *dstU = (uint16_t *)_dstU;
764 uint16_t *dstV = (uint16_t *)_dstV;
765 for (i = 0; i < width; i++) {
766 int g = rdpx(src[0] + i);
767 int b = rdpx(src[1] + i);
768 int r = rdpx(src[2] + i);
770 dstU[i] = (RU * r + GU * g + BU * b + (257 << (RGB2YUV_SHIFT + bpc - 9))) >> RGB2YUV_SHIFT;
771 dstV[i] = (RV * r + GV * g + BV * b + (257 << (RGB2YUV_SHIFT + bpc - 9))) >> RGB2YUV_SHIFT;
774 #undef rdpx
776 static void planar_rgb9le_to_uv(uint8_t *dstU, uint8_t *dstV,
777 const uint8_t *src[4], int w)
779 planar_rgb16_to_uv(dstU, dstV, src, w, 9, 0);
782 static void planar_rgb9be_to_uv(uint8_t *dstU, uint8_t *dstV,
783 const uint8_t *src[4], int w)
785 planar_rgb16_to_uv(dstU, dstV, src, w, 9, 1);
788 static void planar_rgb10le_to_uv(uint8_t *dstU, uint8_t *dstV,
789 const uint8_t *src[4], int w)
791 planar_rgb16_to_uv(dstU, dstV, src, w, 10, 0);
794 static void planar_rgb10be_to_uv(uint8_t *dstU, uint8_t *dstV,
795 const uint8_t *src[4], int w)
797 planar_rgb16_to_uv(dstU, dstV, src, w, 10, 1);
800 static void planar_rgb12le_to_uv(uint8_t *dstU, uint8_t *dstV,
801 const uint8_t *src[4], int w)
803 planar_rgb16_to_uv(dstU, dstV, src, w, 12, 0);
806 static void planar_rgb12be_to_uv(uint8_t *dstU, uint8_t *dstV,
807 const uint8_t *src[4], int w)
809 planar_rgb16_to_uv(dstU, dstV, src, w, 12, 1);
812 static void planar_rgb16le_to_uv(uint8_t *dstU, uint8_t *dstV,
813 const uint8_t *src[4], int w)
815 planar_rgb16_to_uv(dstU, dstV, src, w, 16, 0);
818 static void planar_rgb16be_to_uv(uint8_t *dstU, uint8_t *dstV,
819 const uint8_t *src[4], int w)
821 planar_rgb16_to_uv(dstU, dstV, src, w, 16, 1);
824 av_cold void ff_sws_init_input_funcs(SwsContext *c)
826 enum AVPixelFormat srcFormat = c->srcFormat;
828 c->chrToYV12 = NULL;
829 switch (srcFormat) {
830 case AV_PIX_FMT_YUYV422:
831 c->chrToYV12 = yuy2ToUV_c;
832 break;
833 case AV_PIX_FMT_YVYU422:
834 c->chrToYV12 = yvy2ToUV_c;
835 break;
836 case AV_PIX_FMT_UYVY422:
837 c->chrToYV12 = uyvyToUV_c;
838 break;
839 case AV_PIX_FMT_NV12:
840 c->chrToYV12 = nv12ToUV_c;
841 break;
842 case AV_PIX_FMT_NV21:
843 c->chrToYV12 = nv21ToUV_c;
844 break;
845 case AV_PIX_FMT_RGB8:
846 case AV_PIX_FMT_BGR8:
847 case AV_PIX_FMT_PAL8:
848 case AV_PIX_FMT_BGR4_BYTE:
849 case AV_PIX_FMT_RGB4_BYTE:
850 c->chrToYV12 = palToUV_c;
851 break;
852 case AV_PIX_FMT_GBRP9LE:
853 c->readChrPlanar = planar_rgb9le_to_uv;
854 break;
855 case AV_PIX_FMT_GBRAP10LE:
856 case AV_PIX_FMT_GBRP10LE:
857 c->readChrPlanar = planar_rgb10le_to_uv;
858 break;
859 case AV_PIX_FMT_GBRAP12LE:
860 case AV_PIX_FMT_GBRP12LE:
861 c->readChrPlanar = planar_rgb12le_to_uv;
862 break;
863 case AV_PIX_FMT_GBRAP16LE:
864 case AV_PIX_FMT_GBRP16LE:
865 c->readChrPlanar = planar_rgb16le_to_uv;
866 break;
867 case AV_PIX_FMT_GBRP9BE:
868 c->readChrPlanar = planar_rgb9be_to_uv;
869 break;
870 case AV_PIX_FMT_GBRAP10BE:
871 case AV_PIX_FMT_GBRP10BE:
872 c->readChrPlanar = planar_rgb10be_to_uv;
873 break;
874 case AV_PIX_FMT_GBRAP12BE:
875 case AV_PIX_FMT_GBRP12BE:
876 c->readChrPlanar = planar_rgb12be_to_uv;
877 break;
878 case AV_PIX_FMT_GBRAP16BE:
879 case AV_PIX_FMT_GBRP16BE:
880 c->readChrPlanar = planar_rgb16be_to_uv;
881 break;
882 case AV_PIX_FMT_GBRAP:
883 case AV_PIX_FMT_GBRP:
884 c->readChrPlanar = planar_rgb_to_uv;
885 break;
886 #if HAVE_BIGENDIAN
887 case AV_PIX_FMT_YUV420P9LE:
888 case AV_PIX_FMT_YUV422P9LE:
889 case AV_PIX_FMT_YUV444P9LE:
890 case AV_PIX_FMT_YUV420P10LE:
891 case AV_PIX_FMT_YUV422P10LE:
892 case AV_PIX_FMT_YUV444P10LE:
893 case AV_PIX_FMT_YUV420P12LE:
894 case AV_PIX_FMT_YUV422P12LE:
895 case AV_PIX_FMT_YUV444P12LE:
896 case AV_PIX_FMT_YUV420P16LE:
897 case AV_PIX_FMT_YUV422P16LE:
898 case AV_PIX_FMT_YUV444P16LE:
899 case AV_PIX_FMT_YUVA420P9LE:
900 case AV_PIX_FMT_YUVA422P9LE:
901 case AV_PIX_FMT_YUVA444P9LE:
902 case AV_PIX_FMT_YUVA420P10LE:
903 case AV_PIX_FMT_YUVA422P10LE:
904 case AV_PIX_FMT_YUVA444P10LE:
905 case AV_PIX_FMT_YUVA420P16LE:
906 case AV_PIX_FMT_YUVA422P16LE:
907 case AV_PIX_FMT_YUVA444P16LE:
908 c->chrToYV12 = bswap16UV_c;
909 break;
910 #else
911 case AV_PIX_FMT_YUV420P9BE:
912 case AV_PIX_FMT_YUV422P9BE:
913 case AV_PIX_FMT_YUV444P9BE:
914 case AV_PIX_FMT_YUV420P10BE:
915 case AV_PIX_FMT_YUV422P10BE:
916 case AV_PIX_FMT_YUV444P10BE:
917 case AV_PIX_FMT_YUV420P12BE:
918 case AV_PIX_FMT_YUV422P12BE:
919 case AV_PIX_FMT_YUV444P12BE:
920 case AV_PIX_FMT_YUV420P16BE:
921 case AV_PIX_FMT_YUV422P16BE:
922 case AV_PIX_FMT_YUV444P16BE:
923 case AV_PIX_FMT_YUVA420P9BE:
924 case AV_PIX_FMT_YUVA422P9BE:
925 case AV_PIX_FMT_YUVA444P9BE:
926 case AV_PIX_FMT_YUVA420P10BE:
927 case AV_PIX_FMT_YUVA422P10BE:
928 case AV_PIX_FMT_YUVA444P10BE:
929 case AV_PIX_FMT_YUVA420P16BE:
930 case AV_PIX_FMT_YUVA422P16BE:
931 case AV_PIX_FMT_YUVA444P16BE:
932 c->chrToYV12 = bswap16UV_c;
933 break;
934 #endif
935 case AV_PIX_FMT_P010LE:
936 c->chrToYV12 = p010LEToUV_c;
937 break;
938 case AV_PIX_FMT_P010BE:
939 c->chrToYV12 = p010BEToUV_c;
940 break;
942 if (c->chrSrcHSubSample) {
943 switch (srcFormat) {
944 case AV_PIX_FMT_RGB48BE:
945 c->chrToYV12 = rgb48BEToUV_half_c;
946 break;
947 case AV_PIX_FMT_RGB48LE:
948 c->chrToYV12 = rgb48LEToUV_half_c;
949 break;
950 case AV_PIX_FMT_BGR48BE:
951 c->chrToYV12 = bgr48BEToUV_half_c;
952 break;
953 case AV_PIX_FMT_BGR48LE:
954 c->chrToYV12 = bgr48LEToUV_half_c;
955 break;
956 case AV_PIX_FMT_RGB32:
957 c->chrToYV12 = bgr32ToUV_half_c;
958 break;
959 case AV_PIX_FMT_RGB32_1:
960 c->chrToYV12 = bgr321ToUV_half_c;
961 break;
962 case AV_PIX_FMT_BGR24:
963 c->chrToYV12 = bgr24ToUV_half_c;
964 break;
965 case AV_PIX_FMT_BGR565LE:
966 c->chrToYV12 = bgr16leToUV_half_c;
967 break;
968 case AV_PIX_FMT_BGR565BE:
969 c->chrToYV12 = bgr16beToUV_half_c;
970 break;
971 case AV_PIX_FMT_BGR555LE:
972 c->chrToYV12 = bgr15leToUV_half_c;
973 break;
974 case AV_PIX_FMT_BGR555BE:
975 c->chrToYV12 = bgr15beToUV_half_c;
976 break;
977 case AV_PIX_FMT_BGR444LE:
978 c->chrToYV12 = bgr12leToUV_half_c;
979 break;
980 case AV_PIX_FMT_BGR444BE:
981 c->chrToYV12 = bgr12beToUV_half_c;
982 break;
983 case AV_PIX_FMT_BGR32:
984 c->chrToYV12 = rgb32ToUV_half_c;
985 break;
986 case AV_PIX_FMT_BGR32_1:
987 c->chrToYV12 = rgb321ToUV_half_c;
988 break;
989 case AV_PIX_FMT_RGB24:
990 c->chrToYV12 = rgb24ToUV_half_c;
991 break;
992 case AV_PIX_FMT_RGB565LE:
993 c->chrToYV12 = rgb16leToUV_half_c;
994 break;
995 case AV_PIX_FMT_RGB565BE:
996 c->chrToYV12 = rgb16beToUV_half_c;
997 break;
998 case AV_PIX_FMT_RGB555LE:
999 c->chrToYV12 = rgb15leToUV_half_c;
1000 break;
1001 case AV_PIX_FMT_RGB555BE:
1002 c->chrToYV12 = rgb15beToUV_half_c;
1003 break;
1004 case AV_PIX_FMT_RGB444LE:
1005 c->chrToYV12 = rgb12leToUV_half_c;
1006 break;
1007 case AV_PIX_FMT_RGB444BE:
1008 c->chrToYV12 = rgb12beToUV_half_c;
1009 break;
1011 } else {
1012 switch (srcFormat) {
1013 case AV_PIX_FMT_RGB48BE:
1014 c->chrToYV12 = rgb48BEToUV_c;
1015 break;
1016 case AV_PIX_FMT_RGB48LE:
1017 c->chrToYV12 = rgb48LEToUV_c;
1018 break;
1019 case AV_PIX_FMT_BGR48BE:
1020 c->chrToYV12 = bgr48BEToUV_c;
1021 break;
1022 case AV_PIX_FMT_BGR48LE:
1023 c->chrToYV12 = bgr48LEToUV_c;
1024 break;
1025 case AV_PIX_FMT_RGB32:
1026 c->chrToYV12 = bgr32ToUV_c;
1027 break;
1028 case AV_PIX_FMT_RGB32_1:
1029 c->chrToYV12 = bgr321ToUV_c;
1030 break;
1031 case AV_PIX_FMT_BGR24:
1032 c->chrToYV12 = bgr24ToUV_c;
1033 break;
1034 case AV_PIX_FMT_BGR565LE:
1035 c->chrToYV12 = bgr16leToUV_c;
1036 break;
1037 case AV_PIX_FMT_BGR565BE:
1038 c->chrToYV12 = bgr16beToUV_c;
1039 break;
1040 case AV_PIX_FMT_BGR555LE:
1041 c->chrToYV12 = bgr15leToUV_c;
1042 break;
1043 case AV_PIX_FMT_BGR555BE:
1044 c->chrToYV12 = bgr15beToUV_c;
1045 break;
1046 case AV_PIX_FMT_BGR444LE:
1047 c->chrToYV12 = bgr12leToUV_c;
1048 break;
1049 case AV_PIX_FMT_BGR444BE:
1050 c->chrToYV12 = bgr12beToUV_c;
1051 break;
1052 case AV_PIX_FMT_BGR32:
1053 c->chrToYV12 = rgb32ToUV_c;
1054 break;
1055 case AV_PIX_FMT_BGR32_1:
1056 c->chrToYV12 = rgb321ToUV_c;
1057 break;
1058 case AV_PIX_FMT_RGB24:
1059 c->chrToYV12 = rgb24ToUV_c;
1060 break;
1061 case AV_PIX_FMT_RGB565LE:
1062 c->chrToYV12 = rgb16leToUV_c;
1063 break;
1064 case AV_PIX_FMT_RGB565BE:
1065 c->chrToYV12 = rgb16beToUV_c;
1066 break;
1067 case AV_PIX_FMT_RGB555LE:
1068 c->chrToYV12 = rgb15leToUV_c;
1069 break;
1070 case AV_PIX_FMT_RGB555BE:
1071 c->chrToYV12 = rgb15beToUV_c;
1072 break;
1073 case AV_PIX_FMT_RGB444LE:
1074 c->chrToYV12 = rgb12leToUV_c;
1075 break;
1076 case AV_PIX_FMT_RGB444BE:
1077 c->chrToYV12 = rgb12beToUV_c;
1078 break;
1082 c->lumToYV12 = NULL;
1083 c->alpToYV12 = NULL;
1084 switch (srcFormat) {
1085 case AV_PIX_FMT_GBRP9LE:
1086 c->readLumPlanar = planar_rgb9le_to_y;
1087 break;
1088 case AV_PIX_FMT_GBRAP10LE:
1089 c->readAlpPlanar = planar_rgb10le_to_a;
1090 case AV_PIX_FMT_GBRP10LE:
1091 c->readLumPlanar = planar_rgb10le_to_y;
1092 break;
1093 case AV_PIX_FMT_GBRAP12LE:
1094 c->readAlpPlanar = planar_rgb12le_to_a;
1095 case AV_PIX_FMT_GBRP12LE:
1096 c->readLumPlanar = planar_rgb12le_to_y;
1097 break;
1098 case AV_PIX_FMT_GBRAP16LE:
1099 c->readAlpPlanar = planar_rgb16le_to_a;
1100 case AV_PIX_FMT_GBRP16LE:
1101 c->readLumPlanar = planar_rgb16le_to_y;
1102 break;
1103 case AV_PIX_FMT_GBRP9BE:
1104 c->readLumPlanar = planar_rgb9be_to_y;
1105 break;
1106 case AV_PIX_FMT_GBRAP10BE:
1107 c->readAlpPlanar = planar_rgb10be_to_a;
1108 case AV_PIX_FMT_GBRP10BE:
1109 c->readLumPlanar = planar_rgb10be_to_y;
1110 break;
1111 case AV_PIX_FMT_GBRAP12BE:
1112 c->readAlpPlanar = planar_rgb12be_to_a;
1113 case AV_PIX_FMT_GBRP12BE:
1114 c->readLumPlanar = planar_rgb12be_to_y;
1115 break;
1116 case AV_PIX_FMT_GBRAP16BE:
1117 c->readAlpPlanar = planar_rgb16be_to_a;
1118 case AV_PIX_FMT_GBRP16BE:
1119 c->readLumPlanar = planar_rgb16be_to_y;
1120 break;
1121 case AV_PIX_FMT_GBRAP:
1122 c->readAlpPlanar = planar_rgb_to_a;
1123 case AV_PIX_FMT_GBRP:
1124 c->readLumPlanar = planar_rgb_to_y;
1125 break;
1126 #if HAVE_BIGENDIAN
1127 case AV_PIX_FMT_YUV420P9LE:
1128 case AV_PIX_FMT_YUV422P9LE:
1129 case AV_PIX_FMT_YUV444P9LE:
1130 case AV_PIX_FMT_YUV420P10LE:
1131 case AV_PIX_FMT_YUV422P10LE:
1132 case AV_PIX_FMT_YUV444P10LE:
1133 case AV_PIX_FMT_YUV420P12LE:
1134 case AV_PIX_FMT_YUV422P12LE:
1135 case AV_PIX_FMT_YUV444P12LE:
1136 case AV_PIX_FMT_YUV420P16LE:
1137 case AV_PIX_FMT_YUV422P16LE:
1138 case AV_PIX_FMT_YUV444P16LE:
1139 case AV_PIX_FMT_GRAY10LE:
1140 case AV_PIX_FMT_GRAY12LE:
1141 case AV_PIX_FMT_GRAY16LE:
1142 c->lumToYV12 = bswap16Y_c;
1143 break;
1144 case AV_PIX_FMT_YUVA420P9LE:
1145 case AV_PIX_FMT_YUVA422P9LE:
1146 case AV_PIX_FMT_YUVA444P9LE:
1147 case AV_PIX_FMT_YUVA420P10LE:
1148 case AV_PIX_FMT_YUVA422P10LE:
1149 case AV_PIX_FMT_YUVA444P10LE:
1150 case AV_PIX_FMT_YUVA420P16LE:
1151 case AV_PIX_FMT_YUVA422P16LE:
1152 case AV_PIX_FMT_YUVA444P16LE:
1153 c->lumToYV12 = bswap16Y_c;
1154 c->alpToYV12 = bswap16Y_c;
1155 break;
1156 #else
1157 case AV_PIX_FMT_YUV420P9BE:
1158 case AV_PIX_FMT_YUV422P9BE:
1159 case AV_PIX_FMT_YUV444P9BE:
1160 case AV_PIX_FMT_YUV420P10BE:
1161 case AV_PIX_FMT_YUV422P10BE:
1162 case AV_PIX_FMT_YUV444P10BE:
1163 case AV_PIX_FMT_YUV420P12BE:
1164 case AV_PIX_FMT_YUV422P12BE:
1165 case AV_PIX_FMT_YUV444P12BE:
1166 case AV_PIX_FMT_YUV420P16BE:
1167 case AV_PIX_FMT_YUV422P16BE:
1168 case AV_PIX_FMT_YUV444P16BE:
1169 case AV_PIX_FMT_GRAY10BE:
1170 case AV_PIX_FMT_GRAY12BE:
1171 case AV_PIX_FMT_GRAY16BE:
1172 c->lumToYV12 = bswap16Y_c;
1173 break;
1174 case AV_PIX_FMT_YUVA420P9BE:
1175 case AV_PIX_FMT_YUVA422P9BE:
1176 case AV_PIX_FMT_YUVA444P9BE:
1177 case AV_PIX_FMT_YUVA420P10BE:
1178 case AV_PIX_FMT_YUVA422P10BE:
1179 case AV_PIX_FMT_YUVA444P10BE:
1180 case AV_PIX_FMT_YUVA420P16BE:
1181 case AV_PIX_FMT_YUVA422P16BE:
1182 case AV_PIX_FMT_YUVA444P16BE:
1183 c->lumToYV12 = bswap16Y_c;
1184 c->alpToYV12 = bswap16Y_c;
1185 break;
1186 #endif
1187 case AV_PIX_FMT_YA16LE:
1188 c->lumToYV12 = read_ya16le_gray_c;
1189 c->alpToYV12 = read_ya16le_alpha_c;
1190 break;
1191 case AV_PIX_FMT_YA16BE:
1192 c->lumToYV12 = read_ya16be_gray_c;
1193 c->alpToYV12 = read_ya16be_alpha_c;
1194 break;
1195 case AV_PIX_FMT_YUYV422:
1196 case AV_PIX_FMT_YVYU422:
1197 case AV_PIX_FMT_YA8:
1198 c->lumToYV12 = yuy2ToY_c;
1199 break;
1200 case AV_PIX_FMT_UYVY422:
1201 c->lumToYV12 = uyvyToY_c;
1202 break;
1203 case AV_PIX_FMT_BGR24:
1204 c->lumToYV12 = bgr24ToY_c;
1205 break;
1206 case AV_PIX_FMT_BGR565LE:
1207 c->lumToYV12 = bgr16leToY_c;
1208 break;
1209 case AV_PIX_FMT_BGR565BE:
1210 c->lumToYV12 = bgr16beToY_c;
1211 break;
1212 case AV_PIX_FMT_BGR555LE:
1213 c->lumToYV12 = bgr15leToY_c;
1214 break;
1215 case AV_PIX_FMT_BGR555BE:
1216 c->lumToYV12 = bgr15beToY_c;
1217 break;
1218 case AV_PIX_FMT_BGR444LE:
1219 c->lumToYV12 = bgr12leToY_c;
1220 break;
1221 case AV_PIX_FMT_BGR444BE:
1222 c->lumToYV12 = bgr12beToY_c;
1223 break;
1224 case AV_PIX_FMT_RGB24:
1225 c->lumToYV12 = rgb24ToY_c;
1226 break;
1227 case AV_PIX_FMT_RGB565LE:
1228 c->lumToYV12 = rgb16leToY_c;
1229 break;
1230 case AV_PIX_FMT_RGB565BE:
1231 c->lumToYV12 = rgb16beToY_c;
1232 break;
1233 case AV_PIX_FMT_RGB555LE:
1234 c->lumToYV12 = rgb15leToY_c;
1235 break;
1236 case AV_PIX_FMT_RGB555BE:
1237 c->lumToYV12 = rgb15beToY_c;
1238 break;
1239 case AV_PIX_FMT_RGB444LE:
1240 c->lumToYV12 = rgb12leToY_c;
1241 break;
1242 case AV_PIX_FMT_RGB444BE:
1243 c->lumToYV12 = rgb12beToY_c;
1244 break;
1245 case AV_PIX_FMT_RGB8:
1246 case AV_PIX_FMT_BGR8:
1247 case AV_PIX_FMT_PAL8:
1248 case AV_PIX_FMT_BGR4_BYTE:
1249 case AV_PIX_FMT_RGB4_BYTE:
1250 c->lumToYV12 = palToY_c;
1251 break;
1252 case AV_PIX_FMT_MONOBLACK:
1253 c->lumToYV12 = monoblack2Y_c;
1254 break;
1255 case AV_PIX_FMT_MONOWHITE:
1256 c->lumToYV12 = monowhite2Y_c;
1257 break;
1258 case AV_PIX_FMT_RGB32:
1259 c->lumToYV12 = bgr32ToY_c;
1260 break;
1261 case AV_PIX_FMT_RGB32_1:
1262 c->lumToYV12 = bgr321ToY_c;
1263 break;
1264 case AV_PIX_FMT_BGR32:
1265 c->lumToYV12 = rgb32ToY_c;
1266 break;
1267 case AV_PIX_FMT_BGR32_1:
1268 c->lumToYV12 = rgb321ToY_c;
1269 break;
1270 case AV_PIX_FMT_RGB48BE:
1271 c->lumToYV12 = rgb48BEToY_c;
1272 break;
1273 case AV_PIX_FMT_RGB48LE:
1274 c->lumToYV12 = rgb48LEToY_c;
1275 break;
1276 case AV_PIX_FMT_BGR48BE:
1277 c->lumToYV12 = bgr48BEToY_c;
1278 break;
1279 case AV_PIX_FMT_BGR48LE:
1280 c->lumToYV12 = bgr48LEToY_c;
1281 break;
1282 case AV_PIX_FMT_P010LE:
1283 c->lumToYV12 = p010LEToY_c;
1284 break;
1285 case AV_PIX_FMT_P010BE:
1286 c->lumToYV12 = p010BEToY_c;
1287 break;
1289 if (c->alpPixBuf) {
1290 switch (srcFormat) {
1291 case AV_PIX_FMT_BGRA:
1292 case AV_PIX_FMT_RGBA:
1293 c->alpToYV12 = rgbaToA_c;
1294 break;
1295 case AV_PIX_FMT_ABGR:
1296 case AV_PIX_FMT_ARGB:
1297 c->alpToYV12 = abgrToA_c;
1298 break;
1299 case AV_PIX_FMT_YA8:
1300 c->alpToYV12 = uyvyToY_c;
1301 break;