2 * Copyright (C) 2001-2003 Michael Niedermayer <michaelni@gmx.at>
4 * This file is part of Libav.
6 * Libav is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * Libav is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with Libav; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 #include "libavutil/attributes.h"
27 #include "libavutil/avutil.h"
28 #include "libavutil/bswap.h"
29 #include "libavutil/cpu.h"
30 #include "libavutil/intreadwrite.h"
31 #include "libavutil/mathematics.h"
32 #include "libavutil/pixdesc.h"
36 #include "swscale_internal.h"
38 DECLARE_ALIGNED(8, static const uint8_t, dither_2x2_4
)[2][8]={
39 { 1, 3, 1, 3, 1, 3, 1, 3, },
40 { 2, 0, 2, 0, 2, 0, 2, 0, },
43 DECLARE_ALIGNED(8, static const uint8_t, dither_2x2_8
)[2][8]={
44 { 6, 2, 6, 2, 6, 2, 6, 2, },
45 { 0, 4, 0, 4, 0, 4, 0, 4, },
48 DECLARE_ALIGNED(8, const uint8_t, ff_dither_4x4_16
)[4][8] = {
49 { 8, 4, 11, 7, 8, 4, 11, 7, },
50 { 2, 14, 1, 13, 2, 14, 1, 13, },
51 { 10, 6, 9, 5, 10, 6, 9, 5, },
52 { 0, 12, 3, 15, 0, 12, 3, 15, },
55 DECLARE_ALIGNED(8, const uint8_t, ff_dither_8x8_32
)[8][8] = {
56 { 17, 9, 23, 15, 16, 8, 22, 14, },
57 { 5, 29, 3, 27, 4, 28, 2, 26, },
58 { 21, 13, 19, 11, 20, 12, 18, 10, },
59 { 0, 24, 6, 30, 1, 25, 7, 31, },
60 { 16, 8, 22, 14, 17, 9, 23, 15, },
61 { 4, 28, 2, 26, 5, 29, 3, 27, },
62 { 20, 12, 18, 10, 21, 13, 19, 11, },
63 { 1, 25, 7, 31, 0, 24, 6, 30, },
66 DECLARE_ALIGNED(8, const uint8_t, ff_dither_8x8_73
)[8][8] = {
67 { 0, 55, 14, 68, 3, 58, 17, 72, },
68 { 37, 18, 50, 32, 40, 22, 54, 35, },
69 { 9, 64, 5, 59, 13, 67, 8, 63, },
70 { 46, 27, 41, 23, 49, 31, 44, 26, },
71 { 2, 57, 16, 71, 1, 56, 15, 70, },
72 { 39, 21, 52, 34, 38, 19, 51, 33, },
73 { 11, 66, 7, 62, 10, 65, 6, 60, },
74 { 48, 30, 43, 25, 47, 29, 42, 24, },
78 DECLARE_ALIGNED(8, const uint8_t, ff_dither_8x8_220
)[8][8] = {
79 {117, 62, 158, 103, 113, 58, 155, 100, },
80 { 34, 199, 21, 186, 31, 196, 17, 182, },
81 {144, 89, 131, 76, 141, 86, 127, 72, },
82 { 0, 165, 41, 206, 10, 175, 52, 217, },
83 {110, 55, 151, 96, 120, 65, 162, 107, },
84 { 28, 193, 14, 179, 38, 203, 24, 189, },
85 {138, 83, 124, 69, 148, 93, 134, 79, },
86 { 7, 172, 48, 213, 3, 168, 45, 210, },
89 // tries to correct a gamma of 1.5
90 DECLARE_ALIGNED(8, const uint8_t, ff_dither_8x8_220
)[8][8] = {
91 { 0, 143, 18, 200, 2, 156, 25, 215, },
92 { 78, 28, 125, 64, 89, 36, 138, 74, },
93 { 10, 180, 3, 161, 16, 195, 8, 175, },
94 {109, 51, 93, 38, 121, 60, 105, 47, },
95 { 1, 152, 23, 210, 0, 147, 20, 205, },
96 { 85, 33, 134, 71, 81, 30, 130, 67, },
97 { 14, 190, 6, 171, 12, 185, 5, 166, },
98 {117, 57, 101, 44, 113, 54, 97, 41, },
101 // tries to correct a gamma of 2.0
102 DECLARE_ALIGNED(8, const uint8_t, ff_dither_8x8_220
)[8][8] = {
103 { 0, 124, 8, 193, 0, 140, 12, 213, },
104 { 55, 14, 104, 42, 66, 19, 119, 52, },
105 { 3, 168, 1, 145, 6, 187, 3, 162, },
106 { 86, 31, 70, 21, 99, 39, 82, 28, },
107 { 0, 134, 11, 206, 0, 129, 9, 200, },
108 { 62, 17, 114, 48, 58, 16, 109, 45, },
109 { 5, 181, 2, 157, 4, 175, 1, 151, },
110 { 95, 36, 78, 26, 90, 34, 74, 24, },
113 // tries to correct a gamma of 2.5
114 DECLARE_ALIGNED(8, const uint8_t, ff_dither_8x8_220
)[8][8] = {
115 { 0, 107, 3, 187, 0, 125, 6, 212, },
116 { 39, 7, 86, 28, 49, 11, 102, 36, },
117 { 1, 158, 0, 131, 3, 180, 1, 151, },
118 { 68, 19, 52, 12, 81, 25, 64, 17, },
119 { 0, 119, 5, 203, 0, 113, 4, 195, },
120 { 45, 9, 96, 33, 42, 8, 91, 30, },
121 { 2, 172, 1, 144, 2, 165, 0, 137, },
122 { 77, 23, 60, 15, 72, 21, 56, 14, },
126 #define output_pixel(pos, val, bias, signedness) \
128 AV_WB16(pos, bias + av_clip_ ## signedness ## 16(val >> shift)); \
130 AV_WL16(pos, bias + av_clip_ ## signedness ## 16(val >> shift)); \
133 // Shifting negative amounts is undefined in C
134 #define SHIFT_LEFT(val, shift) ((val) * (1 << (shift)))
136 static av_always_inline
void
137 yuv2plane1_16_c_template(const int32_t *src
, uint16_t *dest
, int dstW
,
138 int big_endian
, int output_bits
)
141 int shift
= 19 - output_bits
;
143 for (i
= 0; i
< dstW
; i
++) {
144 int val
= src
[i
] + (1 << (shift
- 1));
145 output_pixel(&dest
[i
], val
, 0, uint
);
149 static av_always_inline
void
150 yuv2planeX_16_c_template(const int16_t *filter
, int filterSize
,
151 const int32_t **src
, uint16_t *dest
, int dstW
,
152 int big_endian
, int output_bits
)
155 int shift
= 15 + 16 - output_bits
;
157 for (i
= 0; i
< dstW
; i
++) {
158 int val
= 1 << (30-output_bits
);
161 /* range of val is [0,0x7FFFFFFF], so 31 bits, but with lanczos/spline
162 * filters (or anything with negative coeffs, the range can be slightly
163 * wider in both directions. To account for this overflow, we subtract
164 * a constant so it always fits in the signed range (assuming a
165 * reasonable filterSize), and re-add that at the end. */
167 for (j
= 0; j
< filterSize
; j
++)
168 val
+= src
[j
][i
] * filter
[j
];
170 output_pixel(&dest
[i
], val
, 0x8000, int);
176 #define output_pixel(pos, val) \
178 AV_WB16(pos, av_clip_uintp2(val >> shift, output_bits)); \
180 AV_WL16(pos, av_clip_uintp2(val >> shift, output_bits)); \
183 static av_always_inline
void
184 yuv2plane1_10_c_template(const int16_t *src
, uint16_t *dest
, int dstW
,
185 int big_endian
, int output_bits
)
188 int shift
= 15 - output_bits
;
190 for (i
= 0; i
< dstW
; i
++) {
191 int val
= src
[i
] + (1 << (shift
- 1));
192 output_pixel(&dest
[i
], val
);
196 static av_always_inline
void
197 yuv2planeX_10_c_template(const int16_t *filter
, int filterSize
,
198 const int16_t **src
, uint16_t *dest
, int dstW
,
199 int big_endian
, int output_bits
)
202 int shift
= 11 + 16 - output_bits
;
204 for (i
= 0; i
< dstW
; i
++) {
205 int val
= 1 << (26-output_bits
);
208 for (j
= 0; j
< filterSize
; j
++)
209 val
+= src
[j
][i
] * filter
[j
];
211 output_pixel(&dest
[i
], val
);
217 #define yuv2NBPS(bits, BE_LE, is_be, template_size, typeX_t) \
218 static void yuv2plane1_ ## bits ## BE_LE ## _c(const int16_t *src, \
219 uint8_t *dest, int dstW, \
220 const uint8_t *dither, int offset)\
222 yuv2plane1_ ## template_size ## _c_template((const typeX_t *) src, \
223 (uint16_t *) dest, dstW, is_be, bits); \
225 static void yuv2planeX_ ## bits ## BE_LE ## _c(const int16_t *filter, int filterSize, \
226 const int16_t **src, uint8_t *dest, int dstW, \
227 const uint8_t *dither, int offset)\
229 yuv2planeX_## template_size ## _c_template(filter, \
230 filterSize, (const typeX_t **) src, \
231 (uint16_t *) dest, dstW, is_be, bits); \
233 yuv2NBPS( 9, BE
, 1, 10, int16_t)
234 yuv2NBPS( 9, LE
, 0, 10, int16_t)
235 yuv2NBPS(10, BE
, 1, 10, int16_t)
236 yuv2NBPS(10, LE
, 0, 10, int16_t)
237 yuv2NBPS(12, BE
, 1, 10, int16_t)
238 yuv2NBPS(12, LE
, 0, 10, int16_t)
239 yuv2NBPS(16, BE
, 1, 16, int32_t)
240 yuv2NBPS(16, LE
, 0, 16, int32_t)
242 static void yuv2planeX_8_c(const int16_t *filter
, int filterSize
,
243 const int16_t **src
, uint8_t *dest
, int dstW
,
244 const uint8_t *dither
, int offset
)
247 for (i
=0; i
<dstW
; i
++) {
248 int val
= dither
[(i
+ offset
) & 7] << 12;
250 for (j
=0; j
<filterSize
; j
++)
251 val
+= src
[j
][i
] * filter
[j
];
253 dest
[i
]= av_clip_uint8(val
>>19);
257 static void yuv2plane1_8_c(const int16_t *src
, uint8_t *dest
, int dstW
,
258 const uint8_t *dither
, int offset
)
261 for (i
=0; i
<dstW
; i
++) {
262 int val
= (src
[i
] + dither
[(i
+ offset
) & 7]) >> 7;
263 dest
[i
]= av_clip_uint8(val
);
267 static void yuv2nv12cX_c(SwsContext
*c
, const int16_t *chrFilter
, int chrFilterSize
,
268 const int16_t **chrUSrc
, const int16_t **chrVSrc
,
269 uint8_t *dest
, int chrDstW
)
271 enum AVPixelFormat dstFormat
= c
->dstFormat
;
272 const uint8_t *chrDither
= c
->chrDither8
;
275 if (dstFormat
== AV_PIX_FMT_NV12
)
276 for (i
=0; i
<chrDstW
; i
++) {
277 int u
= chrDither
[i
& 7] << 12;
278 int v
= chrDither
[(i
+ 3) & 7] << 12;
280 for (j
=0; j
<chrFilterSize
; j
++) {
281 u
+= chrUSrc
[j
][i
] * chrFilter
[j
];
282 v
+= chrVSrc
[j
][i
] * chrFilter
[j
];
285 dest
[2*i
]= av_clip_uint8(u
>>19);
286 dest
[2*i
+1]= av_clip_uint8(v
>>19);
289 for (i
=0; i
<chrDstW
; i
++) {
290 int u
= chrDither
[i
& 7] << 12;
291 int v
= chrDither
[(i
+ 3) & 7] << 12;
293 for (j
=0; j
<chrFilterSize
; j
++) {
294 u
+= chrUSrc
[j
][i
] * chrFilter
[j
];
295 v
+= chrVSrc
[j
][i
] * chrFilter
[j
];
298 dest
[2*i
]= av_clip_uint8(v
>>19);
299 dest
[2*i
+1]= av_clip_uint8(u
>>19);
303 #define accumulate_bit(acc, val) \
305 acc |= (val) >= (128 + 110)
306 #define output_pixel(pos, acc) \
307 if (target == AV_PIX_FMT_MONOBLACK) { \
313 static av_always_inline
void
314 yuv2mono_X_c_template(SwsContext
*c
, const int16_t *lumFilter
,
315 const int16_t **lumSrc
, int lumFilterSize
,
316 const int16_t *chrFilter
, const int16_t **chrUSrc
,
317 const int16_t **chrVSrc
, int chrFilterSize
,
318 const int16_t **alpSrc
, uint8_t *dest
, int dstW
,
319 int y
, enum AVPixelFormat target
)
321 const uint8_t * const d128
= ff_dither_8x8_220
[y
&7];
325 for (i
= 0; i
< dstW
; i
+= 2) {
330 for (j
= 0; j
< lumFilterSize
; j
++) {
331 Y1
+= lumSrc
[j
][i
] * lumFilter
[j
];
332 Y2
+= lumSrc
[j
][i
+1] * lumFilter
[j
];
336 if ((Y1
| Y2
) & 0x100) {
337 Y1
= av_clip_uint8(Y1
);
338 Y2
= av_clip_uint8(Y2
);
340 accumulate_bit(acc
, Y1
+ d128
[(i
+ 0) & 7]);
341 accumulate_bit(acc
, Y2
+ d128
[(i
+ 1) & 7]);
343 output_pixel(*dest
++, acc
);
348 output_pixel(*dest
, acc
);
352 static av_always_inline
void
353 yuv2mono_2_c_template(SwsContext
*c
, const int16_t *buf
[2],
354 const int16_t *ubuf
[2], const int16_t *vbuf
[2],
355 const int16_t *abuf
[2], uint8_t *dest
, int dstW
,
356 int yalpha
, int uvalpha
, int y
,
357 enum AVPixelFormat target
)
359 const int16_t *buf0
= buf
[0], *buf1
= buf
[1];
360 const uint8_t * const d128
= ff_dither_8x8_220
[y
& 7];
361 int yalpha1
= 4096 - yalpha
;
364 for (i
= 0; i
< dstW
; i
+= 8) {
367 Y
= (buf0
[i
+ 0] * yalpha1
+ buf1
[i
+ 0] * yalpha
) >> 19;
368 accumulate_bit(acc
, Y
+ d128
[0]);
369 Y
= (buf0
[i
+ 1] * yalpha1
+ buf1
[i
+ 1] * yalpha
) >> 19;
370 accumulate_bit(acc
, Y
+ d128
[1]);
371 Y
= (buf0
[i
+ 2] * yalpha1
+ buf1
[i
+ 2] * yalpha
) >> 19;
372 accumulate_bit(acc
, Y
+ d128
[2]);
373 Y
= (buf0
[i
+ 3] * yalpha1
+ buf1
[i
+ 3] * yalpha
) >> 19;
374 accumulate_bit(acc
, Y
+ d128
[3]);
375 Y
= (buf0
[i
+ 4] * yalpha1
+ buf1
[i
+ 4] * yalpha
) >> 19;
376 accumulate_bit(acc
, Y
+ d128
[4]);
377 Y
= (buf0
[i
+ 5] * yalpha1
+ buf1
[i
+ 5] * yalpha
) >> 19;
378 accumulate_bit(acc
, Y
+ d128
[5]);
379 Y
= (buf0
[i
+ 6] * yalpha1
+ buf1
[i
+ 6] * yalpha
) >> 19;
380 accumulate_bit(acc
, Y
+ d128
[6]);
381 Y
= (buf0
[i
+ 7] * yalpha1
+ buf1
[i
+ 7] * yalpha
) >> 19;
382 accumulate_bit(acc
, Y
+ d128
[7]);
384 output_pixel(*dest
++, acc
);
388 static av_always_inline
void
389 yuv2mono_1_c_template(SwsContext
*c
, const int16_t *buf0
,
390 const int16_t *ubuf
[2], const int16_t *vbuf
[2],
391 const int16_t *abuf0
, uint8_t *dest
, int dstW
,
392 int uvalpha
, int y
, enum AVPixelFormat target
)
394 const uint8_t * const d128
= ff_dither_8x8_220
[y
& 7];
397 for (i
= 0; i
< dstW
; i
+= 8) {
400 accumulate_bit(acc
, (buf0
[i
+ 0] >> 7) + d128
[0]);
401 accumulate_bit(acc
, (buf0
[i
+ 1] >> 7) + d128
[1]);
402 accumulate_bit(acc
, (buf0
[i
+ 2] >> 7) + d128
[2]);
403 accumulate_bit(acc
, (buf0
[i
+ 3] >> 7) + d128
[3]);
404 accumulate_bit(acc
, (buf0
[i
+ 4] >> 7) + d128
[4]);
405 accumulate_bit(acc
, (buf0
[i
+ 5] >> 7) + d128
[5]);
406 accumulate_bit(acc
, (buf0
[i
+ 6] >> 7) + d128
[6]);
407 accumulate_bit(acc
, (buf0
[i
+ 7] >> 7) + d128
[7]);
409 output_pixel(*dest
++, acc
);
414 #undef accumulate_bit
416 #define YUV2PACKEDWRAPPER(name, base, ext, fmt) \
417 static void name ## ext ## _X_c(SwsContext *c, const int16_t *lumFilter, \
418 const int16_t **lumSrc, int lumFilterSize, \
419 const int16_t *chrFilter, const int16_t **chrUSrc, \
420 const int16_t **chrVSrc, int chrFilterSize, \
421 const int16_t **alpSrc, uint8_t *dest, int dstW, \
424 name ## base ## _X_c_template(c, lumFilter, lumSrc, lumFilterSize, \
425 chrFilter, chrUSrc, chrVSrc, chrFilterSize, \
426 alpSrc, dest, dstW, y, fmt); \
429 static void name ## ext ## _2_c(SwsContext *c, const int16_t *buf[2], \
430 const int16_t *ubuf[2], const int16_t *vbuf[2], \
431 const int16_t *abuf[2], uint8_t *dest, int dstW, \
432 int yalpha, int uvalpha, int y) \
434 name ## base ## _2_c_template(c, buf, ubuf, vbuf, abuf, \
435 dest, dstW, yalpha, uvalpha, y, fmt); \
438 static void name ## ext ## _1_c(SwsContext *c, const int16_t *buf0, \
439 const int16_t *ubuf[2], const int16_t *vbuf[2], \
440 const int16_t *abuf0, uint8_t *dest, int dstW, \
441 int uvalpha, int y) \
443 name ## base ## _1_c_template(c, buf0, ubuf, vbuf, \
444 abuf0, dest, dstW, uvalpha, \
448 YUV2PACKEDWRAPPER(yuv2mono
,, white
, AV_PIX_FMT_MONOWHITE
)
449 YUV2PACKEDWRAPPER(yuv2mono
,, black
, AV_PIX_FMT_MONOBLACK
)
451 #define output_pixels(pos, Y1, U, Y2, V) \
452 if (target == AV_PIX_FMT_YUYV422) { \
453 dest[pos + 0] = Y1; \
455 dest[pos + 2] = Y2; \
457 } else if (target == AV_PIX_FMT_YVYU422) { \
458 dest[pos + 0] = Y1; \
460 dest[pos + 2] = Y2; \
462 } else { /* AV_PIX_FMT_UYVY422 */ \
464 dest[pos + 1] = Y1; \
466 dest[pos + 3] = Y2; \
469 static av_always_inline
void
470 yuv2422_X_c_template(SwsContext
*c
, const int16_t *lumFilter
,
471 const int16_t **lumSrc
, int lumFilterSize
,
472 const int16_t *chrFilter
, const int16_t **chrUSrc
,
473 const int16_t **chrVSrc
, int chrFilterSize
,
474 const int16_t **alpSrc
, uint8_t *dest
, int dstW
,
475 int y
, enum AVPixelFormat target
)
479 for (i
= 0; i
< ((dstW
+ 1) >> 1); i
++) {
486 for (j
= 0; j
< lumFilterSize
; j
++) {
487 Y1
+= lumSrc
[j
][i
* 2] * lumFilter
[j
];
488 Y2
+= lumSrc
[j
][i
* 2 + 1] * lumFilter
[j
];
490 for (j
= 0; j
< chrFilterSize
; j
++) {
491 U
+= chrUSrc
[j
][i
] * chrFilter
[j
];
492 V
+= chrVSrc
[j
][i
] * chrFilter
[j
];
498 if ((Y1
| Y2
| U
| V
) & 0x100) {
499 Y1
= av_clip_uint8(Y1
);
500 Y2
= av_clip_uint8(Y2
);
501 U
= av_clip_uint8(U
);
502 V
= av_clip_uint8(V
);
504 output_pixels(4*i
, Y1
, U
, Y2
, V
);
508 static av_always_inline
void
509 yuv2422_2_c_template(SwsContext
*c
, const int16_t *buf
[2],
510 const int16_t *ubuf
[2], const int16_t *vbuf
[2],
511 const int16_t *abuf
[2], uint8_t *dest
, int dstW
,
512 int yalpha
, int uvalpha
, int y
,
513 enum AVPixelFormat target
)
515 const int16_t *buf0
= buf
[0], *buf1
= buf
[1],
516 *ubuf0
= ubuf
[0], *ubuf1
= ubuf
[1],
517 *vbuf0
= vbuf
[0], *vbuf1
= vbuf
[1];
518 int yalpha1
= 4096 - yalpha
;
519 int uvalpha1
= 4096 - uvalpha
;
522 for (i
= 0; i
< ((dstW
+ 1) >> 1); i
++) {
523 int Y1
= (buf0
[i
* 2] * yalpha1
+ buf1
[i
* 2] * yalpha
) >> 19;
524 int Y2
= (buf0
[i
* 2 + 1] * yalpha1
+ buf1
[i
* 2 + 1] * yalpha
) >> 19;
525 int U
= (ubuf0
[i
] * uvalpha1
+ ubuf1
[i
] * uvalpha
) >> 19;
526 int V
= (vbuf0
[i
] * uvalpha1
+ vbuf1
[i
] * uvalpha
) >> 19;
528 Y1
= av_clip_uint8(Y1
);
529 Y2
= av_clip_uint8(Y2
);
530 U
= av_clip_uint8(U
);
531 V
= av_clip_uint8(V
);
533 output_pixels(i
* 4, Y1
, U
, Y2
, V
);
537 static av_always_inline
void
538 yuv2422_1_c_template(SwsContext
*c
, const int16_t *buf0
,
539 const int16_t *ubuf
[2], const int16_t *vbuf
[2],
540 const int16_t *abuf0
, uint8_t *dest
, int dstW
,
541 int uvalpha
, int y
, enum AVPixelFormat target
)
543 const int16_t *ubuf0
= ubuf
[0], *vbuf0
= vbuf
[0];
546 if (uvalpha
< 2048) {
547 for (i
= 0; i
< ((dstW
+ 1) >> 1); i
++) {
548 int Y1
= buf0
[i
* 2] >> 7;
549 int Y2
= buf0
[i
* 2 + 1] >> 7;
550 int U
= ubuf0
[i
] >> 7;
551 int V
= vbuf0
[i
] >> 7;
553 Y1
= av_clip_uint8(Y1
);
554 Y2
= av_clip_uint8(Y2
);
555 U
= av_clip_uint8(U
);
556 V
= av_clip_uint8(V
);
558 output_pixels(i
* 4, Y1
, U
, Y2
, V
);
561 const int16_t *ubuf1
= ubuf
[1], *vbuf1
= vbuf
[1];
562 for (i
= 0; i
< ((dstW
+ 1) >> 1); i
++) {
563 int Y1
= buf0
[i
* 2] >> 7;
564 int Y2
= buf0
[i
* 2 + 1] >> 7;
565 int U
= (ubuf0
[i
] + ubuf1
[i
]) >> 8;
566 int V
= (vbuf0
[i
] + vbuf1
[i
]) >> 8;
568 Y1
= av_clip_uint8(Y1
);
569 Y2
= av_clip_uint8(Y2
);
570 U
= av_clip_uint8(U
);
571 V
= av_clip_uint8(V
);
573 output_pixels(i
* 4, Y1
, U
, Y2
, V
);
580 YUV2PACKEDWRAPPER(yuv2
, 422, yuyv422
, AV_PIX_FMT_YUYV422
)
581 YUV2PACKEDWRAPPER(yuv2
, 422, yvyu422
, AV_PIX_FMT_YVYU422
)
582 YUV2PACKEDWRAPPER(yuv2
, 422, uyvy422
, AV_PIX_FMT_UYVY422
)
584 #define R_B ((target == AV_PIX_FMT_RGB48LE || target == AV_PIX_FMT_RGB48BE) ? R : B)
585 #define B_R ((target == AV_PIX_FMT_RGB48LE || target == AV_PIX_FMT_RGB48BE) ? B : R)
586 #define output_pixel(pos, val) \
587 if (isBE(target)) { \
593 static av_always_inline
void
594 yuv2rgb48_X_c_template(SwsContext
*c
, const int16_t *lumFilter
,
595 const int32_t **lumSrc
, int lumFilterSize
,
596 const int16_t *chrFilter
, const int32_t **chrUSrc
,
597 const int32_t **chrVSrc
, int chrFilterSize
,
598 const int32_t **alpSrc
, uint16_t *dest
, int dstW
,
599 int y
, enum AVPixelFormat target
)
603 for (i
= 0; i
< ((dstW
+ 1) >> 1); i
++) {
605 int Y1
= -0x40000000;
606 int Y2
= -0x40000000;
607 int U
= SHIFT_LEFT(-128, 23); // 19
608 int V
= SHIFT_LEFT(-128, 23);
611 for (j
= 0; j
< lumFilterSize
; j
++) {
612 Y1
+= lumSrc
[j
][i
* 2] * lumFilter
[j
];
613 Y2
+= lumSrc
[j
][i
* 2 + 1] * lumFilter
[j
];
615 for (j
= 0; j
< chrFilterSize
; j
++) {
616 U
+= chrUSrc
[j
][i
] * chrFilter
[j
];
617 V
+= chrVSrc
[j
][i
] * chrFilter
[j
];
620 // 8 bits: 12+15=27; 16 bits: 12+19=31
628 // 8 bits: 27 -> 17 bits, 16 bits: 31 - 14 = 17 bits
629 Y1
-= c
->yuv2rgb_y_offset
;
630 Y2
-= c
->yuv2rgb_y_offset
;
631 Y1
*= c
->yuv2rgb_y_coeff
;
632 Y2
*= c
->yuv2rgb_y_coeff
;
635 // 8 bits: 17 + 13 bits = 30 bits, 16 bits: 17 + 13 bits = 30 bits
637 R
= V
* c
->yuv2rgb_v2r_coeff
;
638 G
= V
* c
->yuv2rgb_v2g_coeff
+ U
* c
->yuv2rgb_u2g_coeff
;
639 B
= U
* c
->yuv2rgb_u2b_coeff
;
641 // 8 bits: 30 - 22 = 8 bits, 16 bits: 30 bits - 14 = 16 bits
642 output_pixel(&dest
[0], av_clip_uintp2(R_B
+ Y1
, 30) >> 14);
643 output_pixel(&dest
[1], av_clip_uintp2( G
+ Y1
, 30) >> 14);
644 output_pixel(&dest
[2], av_clip_uintp2(B_R
+ Y1
, 30) >> 14);
645 output_pixel(&dest
[3], av_clip_uintp2(R_B
+ Y2
, 30) >> 14);
646 output_pixel(&dest
[4], av_clip_uintp2( G
+ Y2
, 30) >> 14);
647 output_pixel(&dest
[5], av_clip_uintp2(B_R
+ Y2
, 30) >> 14);
652 static av_always_inline
void
653 yuv2rgb48_2_c_template(SwsContext
*c
, const int32_t *buf
[2],
654 const int32_t *ubuf
[2], const int32_t *vbuf
[2],
655 const int32_t *abuf
[2], uint16_t *dest
, int dstW
,
656 int yalpha
, int uvalpha
, int y
,
657 enum AVPixelFormat target
)
659 const int32_t *buf0
= buf
[0], *buf1
= buf
[1],
660 *ubuf0
= ubuf
[0], *ubuf1
= ubuf
[1],
661 *vbuf0
= vbuf
[0], *vbuf1
= vbuf
[1];
662 int yalpha1
= 4096 - yalpha
;
663 int uvalpha1
= 4096 - uvalpha
;
666 for (i
= 0; i
< ((dstW
+ 1) >> 1); i
++) {
667 int Y1
= (buf0
[i
* 2] * yalpha1
+ buf1
[i
* 2] * yalpha
) >> 14;
668 int Y2
= (buf0
[i
* 2 + 1] * yalpha1
+ buf1
[i
* 2 + 1] * yalpha
) >> 14;
669 int U
= (ubuf0
[i
] * uvalpha1
+ ubuf1
[i
] * uvalpha
+ SHIFT_LEFT(-128, 23)) >> 14;
670 int V
= (vbuf0
[i
] * uvalpha1
+ vbuf1
[i
] * uvalpha
+ SHIFT_LEFT(-128, 23)) >> 14;
673 Y1
-= c
->yuv2rgb_y_offset
;
674 Y2
-= c
->yuv2rgb_y_offset
;
675 Y1
*= c
->yuv2rgb_y_coeff
;
676 Y2
*= c
->yuv2rgb_y_coeff
;
680 R
= V
* c
->yuv2rgb_v2r_coeff
;
681 G
= V
* c
->yuv2rgb_v2g_coeff
+ U
* c
->yuv2rgb_u2g_coeff
;
682 B
= U
* c
->yuv2rgb_u2b_coeff
;
684 output_pixel(&dest
[0], av_clip_uintp2(R_B
+ Y1
, 30) >> 14);
685 output_pixel(&dest
[1], av_clip_uintp2( G
+ Y1
, 30) >> 14);
686 output_pixel(&dest
[2], av_clip_uintp2(B_R
+ Y1
, 30) >> 14);
687 output_pixel(&dest
[3], av_clip_uintp2(R_B
+ Y2
, 30) >> 14);
688 output_pixel(&dest
[4], av_clip_uintp2( G
+ Y2
, 30) >> 14);
689 output_pixel(&dest
[5], av_clip_uintp2(B_R
+ Y2
, 30) >> 14);
694 static av_always_inline
void
695 yuv2rgb48_1_c_template(SwsContext
*c
, const int32_t *buf0
,
696 const int32_t *ubuf
[2], const int32_t *vbuf
[2],
697 const int32_t *abuf0
, uint16_t *dest
, int dstW
,
698 int uvalpha
, int y
, enum AVPixelFormat target
)
700 const int32_t *ubuf0
= ubuf
[0], *vbuf0
= vbuf
[0];
703 if (uvalpha
< 2048) {
704 for (i
= 0; i
< ((dstW
+ 1) >> 1); i
++) {
705 int Y1
= (buf0
[i
* 2] ) >> 2;
706 int Y2
= (buf0
[i
* 2 + 1]) >> 2;
707 int U
= (ubuf0
[i
] + SHIFT_LEFT(-128, 11)) >> 2;
708 int V
= (vbuf0
[i
] + SHIFT_LEFT(-128, 11)) >> 2;
711 Y1
-= c
->yuv2rgb_y_offset
;
712 Y2
-= c
->yuv2rgb_y_offset
;
713 Y1
*= c
->yuv2rgb_y_coeff
;
714 Y2
*= c
->yuv2rgb_y_coeff
;
718 R
= V
* c
->yuv2rgb_v2r_coeff
;
719 G
= V
* c
->yuv2rgb_v2g_coeff
+ U
* c
->yuv2rgb_u2g_coeff
;
720 B
= U
* c
->yuv2rgb_u2b_coeff
;
722 output_pixel(&dest
[0], av_clip_uintp2(R_B
+ Y1
, 30) >> 14);
723 output_pixel(&dest
[1], av_clip_uintp2( G
+ Y1
, 30) >> 14);
724 output_pixel(&dest
[2], av_clip_uintp2(B_R
+ Y1
, 30) >> 14);
725 output_pixel(&dest
[3], av_clip_uintp2(R_B
+ Y2
, 30) >> 14);
726 output_pixel(&dest
[4], av_clip_uintp2( G
+ Y2
, 30) >> 14);
727 output_pixel(&dest
[5], av_clip_uintp2(B_R
+ Y2
, 30) >> 14);
731 const int32_t *ubuf1
= ubuf
[1], *vbuf1
= vbuf
[1];
732 for (i
= 0; i
< ((dstW
+ 1) >> 1); i
++) {
733 int Y1
= (buf0
[i
* 2] ) >> 2;
734 int Y2
= (buf0
[i
* 2 + 1]) >> 2;
735 int U
= (ubuf0
[i
] + ubuf1
[i
] + SHIFT_LEFT(-128, 12)) >> 3;
736 int V
= (vbuf0
[i
] + vbuf1
[i
] + SHIFT_LEFT(-128, 12)) >> 3;
739 Y1
-= c
->yuv2rgb_y_offset
;
740 Y2
-= c
->yuv2rgb_y_offset
;
741 Y1
*= c
->yuv2rgb_y_coeff
;
742 Y2
*= c
->yuv2rgb_y_coeff
;
746 R
= V
* c
->yuv2rgb_v2r_coeff
;
747 G
= V
* c
->yuv2rgb_v2g_coeff
+ U
* c
->yuv2rgb_u2g_coeff
;
748 B
= U
* c
->yuv2rgb_u2b_coeff
;
750 output_pixel(&dest
[0], av_clip_uintp2(R_B
+ Y1
, 30) >> 14);
751 output_pixel(&dest
[1], av_clip_uintp2( G
+ Y1
, 30) >> 14);
752 output_pixel(&dest
[2], av_clip_uintp2(B_R
+ Y1
, 30) >> 14);
753 output_pixel(&dest
[3], av_clip_uintp2(R_B
+ Y2
, 30) >> 14);
754 output_pixel(&dest
[4], av_clip_uintp2( G
+ Y2
, 30) >> 14);
755 output_pixel(&dest
[5], av_clip_uintp2(B_R
+ Y2
, 30) >> 14);
765 #define YUV2PACKED16WRAPPER(name, base, ext, fmt) \
766 static void name ## ext ## _X_c(SwsContext *c, const int16_t *lumFilter, \
767 const int16_t **_lumSrc, int lumFilterSize, \
768 const int16_t *chrFilter, const int16_t **_chrUSrc, \
769 const int16_t **_chrVSrc, int chrFilterSize, \
770 const int16_t **_alpSrc, uint8_t *_dest, int dstW, \
773 const int32_t **lumSrc = (const int32_t **) _lumSrc, \
774 **chrUSrc = (const int32_t **) _chrUSrc, \
775 **chrVSrc = (const int32_t **) _chrVSrc, \
776 **alpSrc = (const int32_t **) _alpSrc; \
777 uint16_t *dest = (uint16_t *) _dest; \
778 name ## base ## _X_c_template(c, lumFilter, lumSrc, lumFilterSize, \
779 chrFilter, chrUSrc, chrVSrc, chrFilterSize, \
780 alpSrc, dest, dstW, y, fmt); \
783 static void name ## ext ## _2_c(SwsContext *c, const int16_t *_buf[2], \
784 const int16_t *_ubuf[2], const int16_t *_vbuf[2], \
785 const int16_t *_abuf[2], uint8_t *_dest, int dstW, \
786 int yalpha, int uvalpha, int y) \
788 const int32_t **buf = (const int32_t **) _buf, \
789 **ubuf = (const int32_t **) _ubuf, \
790 **vbuf = (const int32_t **) _vbuf, \
791 **abuf = (const int32_t **) _abuf; \
792 uint16_t *dest = (uint16_t *) _dest; \
793 name ## base ## _2_c_template(c, buf, ubuf, vbuf, abuf, \
794 dest, dstW, yalpha, uvalpha, y, fmt); \
797 static void name ## ext ## _1_c(SwsContext *c, const int16_t *_buf0, \
798 const int16_t *_ubuf[2], const int16_t *_vbuf[2], \
799 const int16_t *_abuf0, uint8_t *_dest, int dstW, \
800 int uvalpha, int y) \
802 const int32_t *buf0 = (const int32_t *) _buf0, \
803 **ubuf = (const int32_t **) _ubuf, \
804 **vbuf = (const int32_t **) _vbuf, \
805 *abuf0 = (const int32_t *) _abuf0; \
806 uint16_t *dest = (uint16_t *) _dest; \
807 name ## base ## _1_c_template(c, buf0, ubuf, vbuf, abuf0, dest, \
808 dstW, uvalpha, y, fmt); \
811 YUV2PACKED16WRAPPER(yuv2
, rgb48
, rgb48be
, AV_PIX_FMT_RGB48BE
)
812 YUV2PACKED16WRAPPER(yuv2
, rgb48
, rgb48le
, AV_PIX_FMT_RGB48LE
)
813 YUV2PACKED16WRAPPER(yuv2
, rgb48
, bgr48be
, AV_PIX_FMT_BGR48BE
)
814 YUV2PACKED16WRAPPER(yuv2
, rgb48
, bgr48le
, AV_PIX_FMT_BGR48LE
)
817 * Write out 2 RGB pixels in the target pixel format. This function takes a
818 * R/G/B LUT as generated by ff_yuv2rgb_c_init_tables(), which takes care of
819 * things like endianness conversion and shifting. The caller takes care of
820 * setting the correct offset in these tables from the chroma (U/V) values.
821 * This function then uses the luminance (Y1/Y2) values to write out the
822 * correct RGB values into the destination buffer.
824 static av_always_inline
void
825 yuv2rgb_write(uint8_t *_dest
, int i
, unsigned Y1
, unsigned Y2
,
826 unsigned A1
, unsigned A2
,
827 const void *_r
, const void *_g
, const void *_b
, int y
,
828 enum AVPixelFormat target
, int hasAlpha
)
830 if (target
== AV_PIX_FMT_ARGB
|| target
== AV_PIX_FMT_RGBA
||
831 target
== AV_PIX_FMT_ABGR
|| target
== AV_PIX_FMT_BGRA
) {
832 uint32_t *dest
= (uint32_t *) _dest
;
833 const uint32_t *r
= (const uint32_t *) _r
;
834 const uint32_t *g
= (const uint32_t *) _g
;
835 const uint32_t *b
= (const uint32_t *) _b
;
838 int sh
= hasAlpha
? ((target
== AV_PIX_FMT_RGB32_1
|| target
== AV_PIX_FMT_BGR32_1
) ? 0 : 24) : 0;
840 dest
[i
* 2 + 0] = r
[Y1
] + g
[Y1
] + b
[Y1
] + (hasAlpha
? A1
<< sh
: 0);
841 dest
[i
* 2 + 1] = r
[Y2
] + g
[Y2
] + b
[Y2
] + (hasAlpha
? A2
<< sh
: 0);
844 int sh
= (target
== AV_PIX_FMT_RGB32_1
|| target
== AV_PIX_FMT_BGR32_1
) ? 0 : 24;
846 dest
[i
* 2 + 0] = r
[Y1
] + g
[Y1
] + b
[Y1
] + (A1
<< sh
);
847 dest
[i
* 2 + 1] = r
[Y2
] + g
[Y2
] + b
[Y2
] + (A2
<< sh
);
849 dest
[i
* 2 + 0] = r
[Y1
] + g
[Y1
] + b
[Y1
];
850 dest
[i
* 2 + 1] = r
[Y2
] + g
[Y2
] + b
[Y2
];
853 } else if (target
== AV_PIX_FMT_RGB24
|| target
== AV_PIX_FMT_BGR24
) {
854 uint8_t *dest
= (uint8_t *) _dest
;
855 const uint8_t *r
= (const uint8_t *) _r
;
856 const uint8_t *g
= (const uint8_t *) _g
;
857 const uint8_t *b
= (const uint8_t *) _b
;
859 #define r_b ((target == AV_PIX_FMT_RGB24) ? r : b)
860 #define b_r ((target == AV_PIX_FMT_RGB24) ? b : r)
861 dest
[i
* 6 + 0] = r_b
[Y1
];
862 dest
[i
* 6 + 1] = g
[Y1
];
863 dest
[i
* 6 + 2] = b_r
[Y1
];
864 dest
[i
* 6 + 3] = r_b
[Y2
];
865 dest
[i
* 6 + 4] = g
[Y2
];
866 dest
[i
* 6 + 5] = b_r
[Y2
];
869 } else if (target
== AV_PIX_FMT_RGB565
|| target
== AV_PIX_FMT_BGR565
||
870 target
== AV_PIX_FMT_RGB555
|| target
== AV_PIX_FMT_BGR555
||
871 target
== AV_PIX_FMT_RGB444
|| target
== AV_PIX_FMT_BGR444
) {
872 uint16_t *dest
= (uint16_t *) _dest
;
873 const uint16_t *r
= (const uint16_t *) _r
;
874 const uint16_t *g
= (const uint16_t *) _g
;
875 const uint16_t *b
= (const uint16_t *) _b
;
876 int dr1
, dg1
, db1
, dr2
, dg2
, db2
;
878 if (target
== AV_PIX_FMT_RGB565
|| target
== AV_PIX_FMT_BGR565
) {
879 dr1
= dither_2x2_8
[ y
& 1 ][0];
880 dg1
= dither_2x2_4
[ y
& 1 ][0];
881 db1
= dither_2x2_8
[(y
& 1) ^ 1][0];
882 dr2
= dither_2x2_8
[ y
& 1 ][1];
883 dg2
= dither_2x2_4
[ y
& 1 ][1];
884 db2
= dither_2x2_8
[(y
& 1) ^ 1][1];
885 } else if (target
== AV_PIX_FMT_RGB555
|| target
== AV_PIX_FMT_BGR555
) {
886 dr1
= dither_2x2_8
[ y
& 1 ][0];
887 dg1
= dither_2x2_8
[ y
& 1 ][1];
888 db1
= dither_2x2_8
[(y
& 1) ^ 1][0];
889 dr2
= dither_2x2_8
[ y
& 1 ][1];
890 dg2
= dither_2x2_8
[ y
& 1 ][0];
891 db2
= dither_2x2_8
[(y
& 1) ^ 1][1];
893 dr1
= ff_dither_4x4_16
[ y
& 3 ][0];
894 dg1
= ff_dither_4x4_16
[ y
& 3 ][1];
895 db1
= ff_dither_4x4_16
[(y
& 3) ^ 3][0];
896 dr2
= ff_dither_4x4_16
[ y
& 3 ][1];
897 dg2
= ff_dither_4x4_16
[ y
& 3 ][0];
898 db2
= ff_dither_4x4_16
[(y
& 3) ^ 3][1];
901 dest
[i
* 2 + 0] = r
[Y1
+ dr1
] + g
[Y1
+ dg1
] + b
[Y1
+ db1
];
902 dest
[i
* 2 + 1] = r
[Y2
+ dr2
] + g
[Y2
+ dg2
] + b
[Y2
+ db2
];
903 } else /* 8/4 bits */ {
904 uint8_t *dest
= (uint8_t *) _dest
;
905 const uint8_t *r
= (const uint8_t *) _r
;
906 const uint8_t *g
= (const uint8_t *) _g
;
907 const uint8_t *b
= (const uint8_t *) _b
;
908 int dr1
, dg1
, db1
, dr2
, dg2
, db2
;
910 if (target
== AV_PIX_FMT_RGB8
|| target
== AV_PIX_FMT_BGR8
) {
911 const uint8_t * const d64
= ff_dither_8x8_73
[y
& 7];
912 const uint8_t * const d32
= ff_dither_8x8_32
[y
& 7];
913 dr1
= dg1
= d32
[(i
* 2 + 0) & 7];
914 db1
= d64
[(i
* 2 + 0) & 7];
915 dr2
= dg2
= d32
[(i
* 2 + 1) & 7];
916 db2
= d64
[(i
* 2 + 1) & 7];
918 const uint8_t * const d64
= ff_dither_8x8_73
[y
& 7];
919 const uint8_t * const d128
= ff_dither_8x8_220
[y
& 7];
920 dr1
= db1
= d128
[(i
* 2 + 0) & 7];
921 dg1
= d64
[(i
* 2 + 0) & 7];
922 dr2
= db2
= d128
[(i
* 2 + 1) & 7];
923 dg2
= d64
[(i
* 2 + 1) & 7];
926 if (target
== AV_PIX_FMT_RGB4
|| target
== AV_PIX_FMT_BGR4
) {
927 dest
[i
] = r
[Y1
+ dr1
] + g
[Y1
+ dg1
] + b
[Y1
+ db1
] +
928 ((r
[Y2
+ dr2
] + g
[Y2
+ dg2
] + b
[Y2
+ db2
]) << 4);
930 dest
[i
* 2 + 0] = r
[Y1
+ dr1
] + g
[Y1
+ dg1
] + b
[Y1
+ db1
];
931 dest
[i
* 2 + 1] = r
[Y2
+ dr2
] + g
[Y2
+ dg2
] + b
[Y2
+ db2
];
936 static av_always_inline
void
937 yuv2rgb_X_c_template(SwsContext
*c
, const int16_t *lumFilter
,
938 const int16_t **lumSrc
, int lumFilterSize
,
939 const int16_t *chrFilter
, const int16_t **chrUSrc
,
940 const int16_t **chrVSrc
, int chrFilterSize
,
941 const int16_t **alpSrc
, uint8_t *dest
, int dstW
,
942 int y
, enum AVPixelFormat target
, int hasAlpha
)
946 for (i
= 0; i
< ((dstW
+ 1) >> 1); i
++) {
952 const void *r
, *g
, *b
;
954 for (j
= 0; j
< lumFilterSize
; j
++) {
955 Y1
+= lumSrc
[j
][i
* 2] * lumFilter
[j
];
956 Y2
+= lumSrc
[j
][i
* 2 + 1] * lumFilter
[j
];
958 for (j
= 0; j
< chrFilterSize
; j
++) {
959 U
+= chrUSrc
[j
][i
] * chrFilter
[j
];
960 V
+= chrVSrc
[j
][i
] * chrFilter
[j
];
966 if ((Y1
| Y2
| U
| V
) & 0x100) {
967 Y1
= av_clip_uint8(Y1
);
968 Y2
= av_clip_uint8(Y2
);
969 U
= av_clip_uint8(U
);
970 V
= av_clip_uint8(V
);
975 for (j
= 0; j
< lumFilterSize
; j
++) {
976 A1
+= alpSrc
[j
][i
* 2 ] * lumFilter
[j
];
977 A2
+= alpSrc
[j
][i
* 2 + 1] * lumFilter
[j
];
981 if ((A1
| A2
) & 0x100) {
982 A1
= av_clip_uint8(A1
);
983 A2
= av_clip_uint8(A2
);
987 /* FIXME fix tables so that clipping is not needed and then use _NOCLIP*/
989 g
= (c
->table_gU
[U
] + c
->table_gV
[V
]);
992 yuv2rgb_write(dest
, i
, Y1
, Y2
, hasAlpha
? A1
: 0, hasAlpha
? A2
: 0,
993 r
, g
, b
, y
, target
, hasAlpha
);
997 static av_always_inline
void
998 yuv2rgb_2_c_template(SwsContext
*c
, const int16_t *buf
[2],
999 const int16_t *ubuf
[2], const int16_t *vbuf
[2],
1000 const int16_t *abuf
[2], uint8_t *dest
, int dstW
,
1001 int yalpha
, int uvalpha
, int y
,
1002 enum AVPixelFormat target
, int hasAlpha
)
1004 const int16_t *buf0
= buf
[0], *buf1
= buf
[1],
1005 *ubuf0
= ubuf
[0], *ubuf1
= ubuf
[1],
1006 *vbuf0
= vbuf
[0], *vbuf1
= vbuf
[1],
1007 *abuf0
= hasAlpha
? abuf
[0] : NULL
,
1008 *abuf1
= hasAlpha
? abuf
[1] : NULL
;
1009 int yalpha1
= 4096 - yalpha
;
1010 int uvalpha1
= 4096 - uvalpha
;
1013 for (i
= 0; i
< ((dstW
+ 1) >> 1); i
++) {
1014 int Y1
= (buf0
[i
* 2] * yalpha1
+ buf1
[i
* 2] * yalpha
) >> 19;
1015 int Y2
= (buf0
[i
* 2 + 1] * yalpha1
+ buf1
[i
* 2 + 1] * yalpha
) >> 19;
1016 int U
= (ubuf0
[i
] * uvalpha1
+ ubuf1
[i
] * uvalpha
) >> 19;
1017 int V
= (vbuf0
[i
] * uvalpha1
+ vbuf1
[i
] * uvalpha
) >> 19;
1019 const void *r
, *g
, *b
;
1021 Y1
= av_clip_uint8(Y1
);
1022 Y2
= av_clip_uint8(Y2
);
1023 U
= av_clip_uint8(U
);
1024 V
= av_clip_uint8(V
);
1027 g
= (c
->table_gU
[U
] + c
->table_gV
[V
]);
1031 A1
= (abuf0
[i
* 2 ] * yalpha1
+ abuf1
[i
* 2 ] * yalpha
) >> 19;
1032 A2
= (abuf0
[i
* 2 + 1] * yalpha1
+ abuf1
[i
* 2 + 1] * yalpha
) >> 19;
1033 A1
= av_clip_uint8(A1
);
1034 A2
= av_clip_uint8(A2
);
1037 yuv2rgb_write(dest
, i
, Y1
, Y2
, hasAlpha
? A1
: 0, hasAlpha
? A2
: 0,
1038 r
, g
, b
, y
, target
, hasAlpha
);
1042 static av_always_inline
void
1043 yuv2rgb_1_c_template(SwsContext
*c
, const int16_t *buf0
,
1044 const int16_t *ubuf
[2], const int16_t *vbuf
[2],
1045 const int16_t *abuf0
, uint8_t *dest
, int dstW
,
1046 int uvalpha
, int y
, enum AVPixelFormat target
,
1049 const int16_t *ubuf0
= ubuf
[0], *vbuf0
= vbuf
[0];
1052 if (uvalpha
< 2048) {
1053 for (i
= 0; i
< ((dstW
+ 1) >> 1); i
++) {
1054 int Y1
= buf0
[i
* 2] >> 7;
1055 int Y2
= buf0
[i
* 2 + 1] >> 7;
1056 int U
= ubuf0
[i
] >> 7;
1057 int V
= vbuf0
[i
] >> 7;
1059 const void *r
, *g
, *b
;
1061 Y1
= av_clip_uint8(Y1
);
1062 Y2
= av_clip_uint8(Y2
);
1063 U
= av_clip_uint8(U
);
1064 V
= av_clip_uint8(V
);
1067 g
= (c
->table_gU
[U
] + c
->table_gV
[V
]);
1071 A1
= abuf0
[i
* 2 ] >> 7;
1072 A2
= abuf0
[i
* 2 + 1] >> 7;
1073 A1
= av_clip_uint8(A1
);
1074 A2
= av_clip_uint8(A2
);
1077 yuv2rgb_write(dest
, i
, Y1
, Y2
, hasAlpha
? A1
: 0, hasAlpha
? A2
: 0,
1078 r
, g
, b
, y
, target
, hasAlpha
);
1081 const int16_t *ubuf1
= ubuf
[1], *vbuf1
= vbuf
[1];
1082 for (i
= 0; i
< ((dstW
+ 1) >> 1); i
++) {
1083 int Y1
= buf0
[i
* 2] >> 7;
1084 int Y2
= buf0
[i
* 2 + 1] >> 7;
1085 int U
= (ubuf0
[i
] + ubuf1
[i
]) >> 8;
1086 int V
= (vbuf0
[i
] + vbuf1
[i
]) >> 8;
1088 const void *r
, *g
, *b
;
1090 Y1
= av_clip_uint8(Y1
);
1091 Y2
= av_clip_uint8(Y2
);
1092 U
= av_clip_uint8(U
);
1093 V
= av_clip_uint8(V
);
1096 g
= (c
->table_gU
[U
] + c
->table_gV
[V
]);
1100 A1
= abuf0
[i
* 2 ] >> 7;
1101 A2
= abuf0
[i
* 2 + 1] >> 7;
1102 A1
= av_clip_uint8(A1
);
1103 A2
= av_clip_uint8(A2
);
1106 yuv2rgb_write(dest
, i
, Y1
, Y2
, hasAlpha
? A1
: 0, hasAlpha
? A2
: 0,
1107 r
, g
, b
, y
, target
, hasAlpha
);
1112 #define YUV2RGBWRAPPERX(name, base, ext, fmt, hasAlpha) \
1113 static void name ## ext ## _X_c(SwsContext *c, const int16_t *lumFilter, \
1114 const int16_t **lumSrc, int lumFilterSize, \
1115 const int16_t *chrFilter, const int16_t **chrUSrc, \
1116 const int16_t **chrVSrc, int chrFilterSize, \
1117 const int16_t **alpSrc, uint8_t *dest, int dstW, \
1120 name ## base ## _X_c_template(c, lumFilter, lumSrc, lumFilterSize, \
1121 chrFilter, chrUSrc, chrVSrc, chrFilterSize, \
1122 alpSrc, dest, dstW, y, fmt, hasAlpha); \
1124 #define YUV2RGBWRAPPER(name, base, ext, fmt, hasAlpha) \
1125 YUV2RGBWRAPPERX(name, base, ext, fmt, hasAlpha) \
1126 static void name ## ext ## _2_c(SwsContext *c, const int16_t *buf[2], \
1127 const int16_t *ubuf[2], const int16_t *vbuf[2], \
1128 const int16_t *abuf[2], uint8_t *dest, int dstW, \
1129 int yalpha, int uvalpha, int y) \
1131 name ## base ## _2_c_template(c, buf, ubuf, vbuf, abuf, \
1132 dest, dstW, yalpha, uvalpha, y, fmt, hasAlpha); \
1135 static void name ## ext ## _1_c(SwsContext *c, const int16_t *buf0, \
1136 const int16_t *ubuf[2], const int16_t *vbuf[2], \
1137 const int16_t *abuf0, uint8_t *dest, int dstW, \
1138 int uvalpha, int y) \
1140 name ## base ## _1_c_template(c, buf0, ubuf, vbuf, abuf0, dest, \
1141 dstW, uvalpha, y, fmt, hasAlpha); \
1145 YUV2RGBWRAPPER(yuv2rgb
,, 32_1
, AV_PIX_FMT_RGB32_1
, CONFIG_SWSCALE_ALPHA
&& c
->alpPixBuf
)
1146 YUV2RGBWRAPPER(yuv2rgb
,, 32, AV_PIX_FMT_RGB32
, CONFIG_SWSCALE_ALPHA
&& c
->alpPixBuf
)
1148 #if CONFIG_SWSCALE_ALPHA
1149 YUV2RGBWRAPPER(yuv2rgb
,, a32_1
, AV_PIX_FMT_RGB32_1
, 1)
1150 YUV2RGBWRAPPER(yuv2rgb
,, a32
, AV_PIX_FMT_RGB32
, 1)
1152 YUV2RGBWRAPPER(yuv2rgb
,, x32_1
, AV_PIX_FMT_RGB32_1
, 0)
1153 YUV2RGBWRAPPER(yuv2rgb
,, x32
, AV_PIX_FMT_RGB32
, 0)
1155 YUV2RGBWRAPPER(yuv2
, rgb
, rgb24
, AV_PIX_FMT_RGB24
, 0)
1156 YUV2RGBWRAPPER(yuv2
, rgb
, bgr24
, AV_PIX_FMT_BGR24
, 0)
1157 YUV2RGBWRAPPER(yuv2rgb
,, 16, AV_PIX_FMT_RGB565
, 0)
1158 YUV2RGBWRAPPER(yuv2rgb
,, 15, AV_PIX_FMT_RGB555
, 0)
1159 YUV2RGBWRAPPER(yuv2rgb
,, 12, AV_PIX_FMT_RGB444
, 0)
1160 YUV2RGBWRAPPER(yuv2rgb
,, 8, AV_PIX_FMT_RGB8
, 0)
1161 YUV2RGBWRAPPER(yuv2rgb
,, 4, AV_PIX_FMT_RGB4
, 0)
1162 YUV2RGBWRAPPER(yuv2rgb
,, 4b
, AV_PIX_FMT_RGB4_BYTE
, 0)
1164 static av_always_inline
void
1165 yuv2rgb_full_X_c_template(SwsContext
*c
, const int16_t *lumFilter
,
1166 const int16_t **lumSrc
, int lumFilterSize
,
1167 const int16_t *chrFilter
, const int16_t **chrUSrc
,
1168 const int16_t **chrVSrc
, int chrFilterSize
,
1169 const int16_t **alpSrc
, uint8_t *dest
,
1170 int dstW
, int y
, enum AVPixelFormat target
, int hasAlpha
)
1173 int step
= (target
== AV_PIX_FMT_RGB24
|| target
== AV_PIX_FMT_BGR24
) ? 3 : 4;
1175 for (i
= 0; i
< dstW
; i
++) {
1178 int U
= SHIFT_LEFT(-128, 19);
1179 int V
= SHIFT_LEFT(-128, 19);
1182 for (j
= 0; j
< lumFilterSize
; j
++) {
1183 Y
+= lumSrc
[j
][i
] * lumFilter
[j
];
1185 for (j
= 0; j
< chrFilterSize
; j
++) {
1186 U
+= chrUSrc
[j
][i
] * chrFilter
[j
];
1187 V
+= chrVSrc
[j
][i
] * chrFilter
[j
];
1194 for (j
= 0; j
< lumFilterSize
; j
++) {
1195 A
+= alpSrc
[j
][i
] * lumFilter
[j
];
1199 A
= av_clip_uint8(A
);
1201 Y
-= c
->yuv2rgb_y_offset
;
1202 Y
*= c
->yuv2rgb_y_coeff
;
1204 R
= Y
+ V
*c
->yuv2rgb_v2r_coeff
;
1205 G
= Y
+ V
*c
->yuv2rgb_v2g_coeff
+ U
*c
->yuv2rgb_u2g_coeff
;
1206 B
= Y
+ U
*c
->yuv2rgb_u2b_coeff
;
1207 if ((R
| G
| B
) & 0xC0000000) {
1208 R
= av_clip_uintp2(R
, 30);
1209 G
= av_clip_uintp2(G
, 30);
1210 B
= av_clip_uintp2(B
, 30);
1214 case AV_PIX_FMT_ARGB
:
1215 dest
[0] = hasAlpha
? A
: 255;
1220 case AV_PIX_FMT_RGB24
:
1225 case AV_PIX_FMT_RGBA
:
1229 dest
[3] = hasAlpha
? A
: 255;
1231 case AV_PIX_FMT_ABGR
:
1232 dest
[0] = hasAlpha
? A
: 255;
1238 case AV_PIX_FMT_BGR24
:
1243 case AV_PIX_FMT_BGRA
:
1247 dest
[3] = hasAlpha
? A
: 255;
1255 YUV2RGBWRAPPERX(yuv2
, rgb_full
, bgra32_full
, AV_PIX_FMT_BGRA
, CONFIG_SWSCALE_ALPHA
&& c
->alpPixBuf
)
1256 YUV2RGBWRAPPERX(yuv2
, rgb_full
, abgr32_full
, AV_PIX_FMT_ABGR
, CONFIG_SWSCALE_ALPHA
&& c
->alpPixBuf
)
1257 YUV2RGBWRAPPERX(yuv2
, rgb_full
, rgba32_full
, AV_PIX_FMT_RGBA
, CONFIG_SWSCALE_ALPHA
&& c
->alpPixBuf
)
1258 YUV2RGBWRAPPERX(yuv2
, rgb_full
, argb32_full
, AV_PIX_FMT_ARGB
, CONFIG_SWSCALE_ALPHA
&& c
->alpPixBuf
)
1260 #if CONFIG_SWSCALE_ALPHA
1261 YUV2RGBWRAPPERX(yuv2
, rgb_full
, bgra32_full
, AV_PIX_FMT_BGRA
, 1)
1262 YUV2RGBWRAPPERX(yuv2
, rgb_full
, abgr32_full
, AV_PIX_FMT_ABGR
, 1)
1263 YUV2RGBWRAPPERX(yuv2
, rgb_full
, rgba32_full
, AV_PIX_FMT_RGBA
, 1)
1264 YUV2RGBWRAPPERX(yuv2
, rgb_full
, argb32_full
, AV_PIX_FMT_ARGB
, 1)
1266 YUV2RGBWRAPPERX(yuv2
, rgb_full
, bgrx32_full
, AV_PIX_FMT_BGRA
, 0)
1267 YUV2RGBWRAPPERX(yuv2
, rgb_full
, xbgr32_full
, AV_PIX_FMT_ABGR
, 0)
1268 YUV2RGBWRAPPERX(yuv2
, rgb_full
, rgbx32_full
, AV_PIX_FMT_RGBA
, 0)
1269 YUV2RGBWRAPPERX(yuv2
, rgb_full
, xrgb32_full
, AV_PIX_FMT_ARGB
, 0)
1271 YUV2RGBWRAPPERX(yuv2
, rgb_full
, bgr24_full
, AV_PIX_FMT_BGR24
, 0)
1272 YUV2RGBWRAPPERX(yuv2
, rgb_full
, rgb24_full
, AV_PIX_FMT_RGB24
, 0)
1275 yuv2gbrp_full_X_c(SwsContext
*c
, const int16_t *lumFilter
,
1276 const int16_t **lumSrc
, int lumFilterSize
,
1277 const int16_t *chrFilter
, const int16_t **chrUSrc
,
1278 const int16_t **chrVSrc
, int chrFilterSize
,
1279 const int16_t **alpSrc
, uint8_t **dest
,
1282 const AVPixFmtDescriptor
*desc
= av_pix_fmt_desc_get(c
->dstFormat
);
1284 int hasAlpha
= (desc
->flags
& AV_PIX_FMT_FLAG_ALPHA
) && alpSrc
;
1285 uint16_t **dest16
= (uint16_t**)dest
;
1286 int SH
= 22 + 8 - desc
->comp
[0].depth
;
1288 for (i
= 0; i
< dstW
; i
++) {
1291 int U
= (1 << 9) - (128 << 19);
1292 int V
= (1 << 9) - (128 << 19);
1295 for (j
= 0; j
< lumFilterSize
; j
++)
1296 Y
+= lumSrc
[j
][i
] * lumFilter
[j
];
1298 for (j
= 0; j
< chrFilterSize
; j
++) {
1299 U
+= chrUSrc
[j
][i
] * chrFilter
[j
];
1300 V
+= chrVSrc
[j
][i
] * chrFilter
[j
];
1310 for (j
= 0; j
< lumFilterSize
; j
++)
1311 A
+= alpSrc
[j
][i
] * lumFilter
[j
];
1316 A
= av_clip_uint8(A
);
1319 Y
-= c
->yuv2rgb_y_offset
;
1320 Y
*= c
->yuv2rgb_y_coeff
;
1322 R
= Y
+ V
* c
->yuv2rgb_v2r_coeff
;
1323 G
= Y
+ V
* c
->yuv2rgb_v2g_coeff
+ U
* c
->yuv2rgb_u2g_coeff
;
1324 B
= Y
+ U
* c
->yuv2rgb_u2b_coeff
;
1326 if ((R
| G
| B
) & 0xC0000000) {
1327 R
= av_clip_uintp2(R
, 30);
1328 G
= av_clip_uintp2(G
, 30);
1329 B
= av_clip_uintp2(B
, 30);
1333 dest16
[0][i
] = G
>> SH
;
1334 dest16
[1][i
] = B
>> SH
;
1335 dest16
[2][i
] = R
>> SH
;
1339 dest
[0][i
] = G
>> 22;
1340 dest
[1][i
] = B
>> 22;
1341 dest
[2][i
] = R
>> 22;
1346 if (SH
!= 22 && (!isBE(c
->dstFormat
)) != (!HAVE_BIGENDIAN
)) {
1347 for (i
= 0; i
< dstW
; i
++) {
1348 dest16
[0][i
] = av_bswap16(dest16
[0][i
]);
1349 dest16
[1][i
] = av_bswap16(dest16
[1][i
]);
1350 dest16
[2][i
] = av_bswap16(dest16
[2][i
]);
1352 dest16
[3][i
] = av_bswap16(dest16
[3][i
]);
1357 av_cold
void ff_sws_init_output_funcs(SwsContext
*c
,
1358 yuv2planar1_fn
*yuv2plane1
,
1359 yuv2planarX_fn
*yuv2planeX
,
1360 yuv2interleavedX_fn
*yuv2nv12cX
,
1361 yuv2packed1_fn
*yuv2packed1
,
1362 yuv2packed2_fn
*yuv2packed2
,
1363 yuv2packedX_fn
*yuv2packedX
,
1364 yuv2anyX_fn
*yuv2anyX
)
1366 enum AVPixelFormat dstFormat
= c
->dstFormat
;
1367 const AVPixFmtDescriptor
*desc
= av_pix_fmt_desc_get(dstFormat
);
1369 if (is16BPS(dstFormat
)) {
1370 *yuv2planeX
= isBE(dstFormat
) ? yuv2planeX_16BE_c
: yuv2planeX_16LE_c
;
1371 *yuv2plane1
= isBE(dstFormat
) ? yuv2plane1_16BE_c
: yuv2plane1_16LE_c
;
1372 } else if (is9_15BPS(dstFormat
)) {
1373 if (desc
->comp
[0].depth
== 9) {
1374 *yuv2planeX
= isBE(dstFormat
) ? yuv2planeX_9BE_c
: yuv2planeX_9LE_c
;
1375 *yuv2plane1
= isBE(dstFormat
) ? yuv2plane1_9BE_c
: yuv2plane1_9LE_c
;
1376 } else if (desc
->comp
[0].depth
== 10) {
1377 *yuv2planeX
= isBE(dstFormat
) ? yuv2planeX_10BE_c
: yuv2planeX_10LE_c
;
1378 *yuv2plane1
= isBE(dstFormat
) ? yuv2plane1_10BE_c
: yuv2plane1_10LE_c
;
1379 } else if (desc
->comp
[0].depth
== 12) {
1380 *yuv2planeX
= isBE(dstFormat
) ? yuv2planeX_12BE_c
: yuv2planeX_12LE_c
;
1381 *yuv2plane1
= isBE(dstFormat
) ? yuv2plane1_12BE_c
: yuv2plane1_12LE_c
;
1386 *yuv2plane1
= yuv2plane1_8_c
;
1387 *yuv2planeX
= yuv2planeX_8_c
;
1388 if (dstFormat
== AV_PIX_FMT_NV12
|| dstFormat
== AV_PIX_FMT_NV21
)
1389 *yuv2nv12cX
= yuv2nv12cX_c
;
1392 if(c
->flags
& SWS_FULL_CHR_H_INT
) {
1393 switch (dstFormat
) {
1394 case AV_PIX_FMT_RGBA
:
1396 *yuv2packedX
= yuv2rgba32_full_X_c
;
1398 #if CONFIG_SWSCALE_ALPHA
1400 *yuv2packedX
= yuv2rgba32_full_X_c
;
1402 #endif /* CONFIG_SWSCALE_ALPHA */
1404 *yuv2packedX
= yuv2rgbx32_full_X_c
;
1406 #endif /* !CONFIG_SMALL */
1408 case AV_PIX_FMT_ARGB
:
1410 *yuv2packedX
= yuv2argb32_full_X_c
;
1412 #if CONFIG_SWSCALE_ALPHA
1414 *yuv2packedX
= yuv2argb32_full_X_c
;
1416 #endif /* CONFIG_SWSCALE_ALPHA */
1418 *yuv2packedX
= yuv2xrgb32_full_X_c
;
1420 #endif /* !CONFIG_SMALL */
1422 case AV_PIX_FMT_BGRA
:
1424 *yuv2packedX
= yuv2bgra32_full_X_c
;
1426 #if CONFIG_SWSCALE_ALPHA
1428 *yuv2packedX
= yuv2bgra32_full_X_c
;
1430 #endif /* CONFIG_SWSCALE_ALPHA */
1432 *yuv2packedX
= yuv2bgrx32_full_X_c
;
1434 #endif /* !CONFIG_SMALL */
1436 case AV_PIX_FMT_ABGR
:
1438 *yuv2packedX
= yuv2abgr32_full_X_c
;
1440 #if CONFIG_SWSCALE_ALPHA
1442 *yuv2packedX
= yuv2abgr32_full_X_c
;
1444 #endif /* CONFIG_SWSCALE_ALPHA */
1446 *yuv2packedX
= yuv2xbgr32_full_X_c
;
1448 #endif /* !CONFIG_SMALL */
1450 case AV_PIX_FMT_RGB24
:
1451 *yuv2packedX
= yuv2rgb24_full_X_c
;
1453 case AV_PIX_FMT_BGR24
:
1454 *yuv2packedX
= yuv2bgr24_full_X_c
;
1456 case AV_PIX_FMT_GBRP
:
1457 case AV_PIX_FMT_GBRP9BE
:
1458 case AV_PIX_FMT_GBRP9LE
:
1459 case AV_PIX_FMT_GBRP10BE
:
1460 case AV_PIX_FMT_GBRP10LE
:
1461 case AV_PIX_FMT_GBRP12BE
:
1462 case AV_PIX_FMT_GBRP12LE
:
1463 case AV_PIX_FMT_GBRAP10BE
:
1464 case AV_PIX_FMT_GBRAP10LE
:
1465 case AV_PIX_FMT_GBRAP12BE
:
1466 case AV_PIX_FMT_GBRAP12LE
:
1467 case AV_PIX_FMT_GBRP16BE
:
1468 case AV_PIX_FMT_GBRP16LE
:
1469 case AV_PIX_FMT_GBRAP
:
1470 *yuv2anyX
= yuv2gbrp_full_X_c
;
1474 switch (dstFormat
) {
1475 case AV_PIX_FMT_RGB48LE
:
1476 *yuv2packed1
= yuv2rgb48le_1_c
;
1477 *yuv2packed2
= yuv2rgb48le_2_c
;
1478 *yuv2packedX
= yuv2rgb48le_X_c
;
1480 case AV_PIX_FMT_RGB48BE
:
1481 *yuv2packed1
= yuv2rgb48be_1_c
;
1482 *yuv2packed2
= yuv2rgb48be_2_c
;
1483 *yuv2packedX
= yuv2rgb48be_X_c
;
1485 case AV_PIX_FMT_BGR48LE
:
1486 *yuv2packed1
= yuv2bgr48le_1_c
;
1487 *yuv2packed2
= yuv2bgr48le_2_c
;
1488 *yuv2packedX
= yuv2bgr48le_X_c
;
1490 case AV_PIX_FMT_BGR48BE
:
1491 *yuv2packed1
= yuv2bgr48be_1_c
;
1492 *yuv2packed2
= yuv2bgr48be_2_c
;
1493 *yuv2packedX
= yuv2bgr48be_X_c
;
1495 case AV_PIX_FMT_RGB32
:
1496 case AV_PIX_FMT_BGR32
:
1498 *yuv2packed1
= yuv2rgb32_1_c
;
1499 *yuv2packed2
= yuv2rgb32_2_c
;
1500 *yuv2packedX
= yuv2rgb32_X_c
;
1502 #if CONFIG_SWSCALE_ALPHA
1504 *yuv2packed1
= yuv2rgba32_1_c
;
1505 *yuv2packed2
= yuv2rgba32_2_c
;
1506 *yuv2packedX
= yuv2rgba32_X_c
;
1508 #endif /* CONFIG_SWSCALE_ALPHA */
1510 *yuv2packed1
= yuv2rgbx32_1_c
;
1511 *yuv2packed2
= yuv2rgbx32_2_c
;
1512 *yuv2packedX
= yuv2rgbx32_X_c
;
1514 #endif /* !CONFIG_SMALL */
1516 case AV_PIX_FMT_RGB32_1
:
1517 case AV_PIX_FMT_BGR32_1
:
1519 *yuv2packed1
= yuv2rgb32_1_1_c
;
1520 *yuv2packed2
= yuv2rgb32_1_2_c
;
1521 *yuv2packedX
= yuv2rgb32_1_X_c
;
1523 #if CONFIG_SWSCALE_ALPHA
1525 *yuv2packed1
= yuv2rgba32_1_1_c
;
1526 *yuv2packed2
= yuv2rgba32_1_2_c
;
1527 *yuv2packedX
= yuv2rgba32_1_X_c
;
1529 #endif /* CONFIG_SWSCALE_ALPHA */
1531 *yuv2packed1
= yuv2rgbx32_1_1_c
;
1532 *yuv2packed2
= yuv2rgbx32_1_2_c
;
1533 *yuv2packedX
= yuv2rgbx32_1_X_c
;
1535 #endif /* !CONFIG_SMALL */
1537 case AV_PIX_FMT_RGB24
:
1538 *yuv2packed1
= yuv2rgb24_1_c
;
1539 *yuv2packed2
= yuv2rgb24_2_c
;
1540 *yuv2packedX
= yuv2rgb24_X_c
;
1542 case AV_PIX_FMT_BGR24
:
1543 *yuv2packed1
= yuv2bgr24_1_c
;
1544 *yuv2packed2
= yuv2bgr24_2_c
;
1545 *yuv2packedX
= yuv2bgr24_X_c
;
1547 case AV_PIX_FMT_RGB565LE
:
1548 case AV_PIX_FMT_RGB565BE
:
1549 case AV_PIX_FMT_BGR565LE
:
1550 case AV_PIX_FMT_BGR565BE
:
1551 *yuv2packed1
= yuv2rgb16_1_c
;
1552 *yuv2packed2
= yuv2rgb16_2_c
;
1553 *yuv2packedX
= yuv2rgb16_X_c
;
1555 case AV_PIX_FMT_RGB555LE
:
1556 case AV_PIX_FMT_RGB555BE
:
1557 case AV_PIX_FMT_BGR555LE
:
1558 case AV_PIX_FMT_BGR555BE
:
1559 *yuv2packed1
= yuv2rgb15_1_c
;
1560 *yuv2packed2
= yuv2rgb15_2_c
;
1561 *yuv2packedX
= yuv2rgb15_X_c
;
1563 case AV_PIX_FMT_RGB444LE
:
1564 case AV_PIX_FMT_RGB444BE
:
1565 case AV_PIX_FMT_BGR444LE
:
1566 case AV_PIX_FMT_BGR444BE
:
1567 *yuv2packed1
= yuv2rgb12_1_c
;
1568 *yuv2packed2
= yuv2rgb12_2_c
;
1569 *yuv2packedX
= yuv2rgb12_X_c
;
1571 case AV_PIX_FMT_RGB8
:
1572 case AV_PIX_FMT_BGR8
:
1573 *yuv2packed1
= yuv2rgb8_1_c
;
1574 *yuv2packed2
= yuv2rgb8_2_c
;
1575 *yuv2packedX
= yuv2rgb8_X_c
;
1577 case AV_PIX_FMT_RGB4
:
1578 case AV_PIX_FMT_BGR4
:
1579 *yuv2packed1
= yuv2rgb4_1_c
;
1580 *yuv2packed2
= yuv2rgb4_2_c
;
1581 *yuv2packedX
= yuv2rgb4_X_c
;
1583 case AV_PIX_FMT_RGB4_BYTE
:
1584 case AV_PIX_FMT_BGR4_BYTE
:
1585 *yuv2packed1
= yuv2rgb4b_1_c
;
1586 *yuv2packed2
= yuv2rgb4b_2_c
;
1587 *yuv2packedX
= yuv2rgb4b_X_c
;
1591 switch (dstFormat
) {
1592 case AV_PIX_FMT_MONOWHITE
:
1593 *yuv2packed1
= yuv2monowhite_1_c
;
1594 *yuv2packed2
= yuv2monowhite_2_c
;
1595 *yuv2packedX
= yuv2monowhite_X_c
;
1597 case AV_PIX_FMT_MONOBLACK
:
1598 *yuv2packed1
= yuv2monoblack_1_c
;
1599 *yuv2packed2
= yuv2monoblack_2_c
;
1600 *yuv2packedX
= yuv2monoblack_X_c
;
1602 case AV_PIX_FMT_YUYV422
:
1603 *yuv2packed1
= yuv2yuyv422_1_c
;
1604 *yuv2packed2
= yuv2yuyv422_2_c
;
1605 *yuv2packedX
= yuv2yuyv422_X_c
;
1607 case AV_PIX_FMT_YVYU422
:
1608 *yuv2packed1
= yuv2yvyu422_1_c
;
1609 *yuv2packed2
= yuv2yvyu422_2_c
;
1610 *yuv2packedX
= yuv2yvyu422_X_c
;
1612 case AV_PIX_FMT_UYVY422
:
1613 *yuv2packed1
= yuv2uyvy422_1_c
;
1614 *yuv2packed2
= yuv2uyvy422_2_c
;
1615 *yuv2packedX
= yuv2uyvy422_X_c
;