1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 // This webpage shows layout of YV12 and other YUV formats
6 // http://www.fourcc.org/yuv.php
7 // The actual conversion is best described here
8 // http://en.wikipedia.org/wiki/YUV
9 // An article on optimizing YUV conversion using tables instead of multiplies
10 // http://lestourtereaux.free.fr/papers/data/yuvrgb.pdf
12 // YV12 is a full plane of Y and a half height, half width chroma planes
13 // YV16 is a full plane of Y and a full height, half width chroma planes
15 // ARGB pixel format is output, which on little endian is stored as BGRA.
16 // The alpha is set to 255, allowing the application to use RGBA or RGB32.
18 #include "media/base/yuv_convert.h"
21 #include "base/lazy_instance.h"
22 #include "base/logging.h"
23 #include "base/macros.h"
24 #include "base/memory/aligned_memory.h"
25 #include "base/memory/scoped_ptr.h"
26 #include "base/third_party/dynamic_annotations/dynamic_annotations.h"
27 #include "build/build_config.h"
28 #include "media/base/simd/convert_rgb_to_yuv.h"
29 #include "media/base/simd/convert_yuv_to_rgb.h"
30 #include "media/base/simd/filter_yuv.h"
32 #if defined(ARCH_CPU_X86_FAMILY)
33 #if defined(COMPILER_MSVC)
40 // Assembly functions are declared without namespace.
41 extern "C" { void EmptyRegisterState_MMX(); } // extern "C"
45 typedef void (*FilterYUVRowsProc
)(uint8
*,
51 typedef void (*ConvertRGBToYUVProc
)(const uint8
*,
61 typedef void (*ConvertYUVToRGB32Proc
)(const uint8
*,
72 typedef void (*ConvertYUVAToARGBProc
)(const uint8
*,
85 typedef void (*ConvertYUVToRGB32RowProc
)(const uint8
*,
92 typedef void (*ConvertYUVAToARGBRowProc
)(const uint8
*,
100 typedef void (*ScaleYUVToRGB32RowProc
)(const uint8
*,
108 static FilterYUVRowsProc g_filter_yuv_rows_proc_
= NULL
;
109 static ConvertYUVToRGB32RowProc g_convert_yuv_to_rgb32_row_proc_
= NULL
;
110 static ScaleYUVToRGB32RowProc g_scale_yuv_to_rgb32_row_proc_
= NULL
;
111 static ScaleYUVToRGB32RowProc g_linear_scale_yuv_to_rgb32_row_proc_
= NULL
;
112 static ConvertRGBToYUVProc g_convert_rgb32_to_yuv_proc_
= NULL
;
113 static ConvertRGBToYUVProc g_convert_rgb24_to_yuv_proc_
= NULL
;
114 static ConvertYUVToRGB32Proc g_convert_yuv_to_rgb32_proc_
= NULL
;
115 static ConvertYUVAToARGBProc g_convert_yuva_to_argb_proc_
= NULL
;
117 static const int kYUVToRGBTableSize
= 256 * 4 * 4 * sizeof(int16
);
119 // base::AlignedMemory has a private operator new(), so wrap it in a struct so
120 // that we can put it in a LazyInstance::Leaky.
121 struct YUVToRGBTableWrapper
{
122 base::AlignedMemory
<kYUVToRGBTableSize
, 16> table
;
125 typedef base::LazyInstance
<YUVToRGBTableWrapper
>::Leaky
127 static YUVToRGBTable g_table_rec601
= LAZY_INSTANCE_INITIALIZER
;
128 static YUVToRGBTable g_table_jpeg
= LAZY_INSTANCE_INITIALIZER
;
129 static YUVToRGBTable g_table_rec709
= LAZY_INSTANCE_INITIALIZER
;
130 static const int16
* g_table_rec601_ptr
= NULL
;
131 static const int16
* g_table_jpeg_ptr
= NULL
;
132 static const int16
* g_table_rec709_ptr
= NULL
;
134 // Empty SIMD registers state after using them.
135 void EmptyRegisterStateStub() {}
136 #if defined(MEDIA_MMX_INTRINSICS_AVAILABLE)
137 void EmptyRegisterStateIntrinsic() { _mm_empty(); }
139 typedef void (*EmptyRegisterStateProc
)();
140 static EmptyRegisterStateProc g_empty_register_state_proc_
= NULL
;
142 // Get the appropriate value to bitshift by for vertical indices.
143 int GetVerticalShift(YUVType type
) {
156 const int16
* GetLookupTable(YUVType type
) {
160 return g_table_rec601_ptr
;
162 return g_table_jpeg_ptr
;
164 return g_table_rec709_ptr
;
170 // Populates a pre-allocated lookup table from a YUV->RGB matrix.
171 const int16
* PopulateYUVToRGBTable(const double matrix
[3][3],
174 // We'll have 4 sub-tables that lie contiguous in memory, one for each of Y,
176 const int kNumTables
= 4;
177 // Each table has 256 rows (for all possible 8-bit values).
178 const int kNumRows
= 256;
179 // Each row has 4 columns, for contributions to each of R, G, B and A.
180 const int kNumColumns
= 4;
181 // Each element is a fixed-point (10.6) 16-bit signed value.
182 const int kElementSize
= sizeof(int16
);
184 // Sanity check that our constants here match the size of the statically
187 kNumTables
* kNumRows
* kNumColumns
* kElementSize
== kYUVToRGBTableSize
,
188 "YUV lookup table size doesn't match expectation.");
190 // Y needs an offset of -16 for color ranges that ignore the lower 16 values,
191 // U and V get -128 to put them in [-128, 127] from [0, 255].
192 int offsets
[3] = {(full_range
? 0 : -16), -128, -128};
194 for (int i
= 0; i
< kNumRows
; ++i
) {
195 // Y, U, and V contributions to each of R, G, B and A.
196 for (int j
= 0; j
< 3; ++j
) {
197 #if defined(OS_ANDROID)
199 table
[(j
* kNumRows
+ i
) * kNumColumns
+ 0] =
200 matrix
[j
][0] * 64 * (i
+ offsets
[j
]) + 0.5;
201 table
[(j
* kNumRows
+ i
) * kNumColumns
+ 1] =
202 matrix
[j
][1] * 64 * (i
+ offsets
[j
]) + 0.5;
203 table
[(j
* kNumRows
+ i
) * kNumColumns
+ 2] =
204 matrix
[j
][2] * 64 * (i
+ offsets
[j
]) + 0.5;
206 // Other platforms are BGRA.
207 table
[(j
* kNumRows
+ i
) * kNumColumns
+ 0] =
208 matrix
[j
][2] * 64 * (i
+ offsets
[j
]) + 0.5;
209 table
[(j
* kNumRows
+ i
) * kNumColumns
+ 1] =
210 matrix
[j
][1] * 64 * (i
+ offsets
[j
]) + 0.5;
211 table
[(j
* kNumRows
+ i
) * kNumColumns
+ 2] =
212 matrix
[j
][0] * 64 * (i
+ offsets
[j
]) + 0.5;
214 // Alpha contributions from Y and V are always 0. U is set such that
215 // all values result in a full '255' alpha value.
216 table
[(j
* kNumRows
+ i
) * kNumColumns
+ 3] = (j
== 1) ? 256 * 64 - 1 : 0;
218 // And YUVA alpha is passed through as-is.
219 for (int k
= 0; k
< kNumTables
; ++k
)
220 table
[((kNumTables
- 1) * kNumRows
+ i
) * kNumColumns
+ k
] = i
;
226 void InitializeCPUSpecificYUVConversions() {
227 CHECK(!g_filter_yuv_rows_proc_
);
228 CHECK(!g_convert_yuv_to_rgb32_row_proc_
);
229 CHECK(!g_scale_yuv_to_rgb32_row_proc_
);
230 CHECK(!g_linear_scale_yuv_to_rgb32_row_proc_
);
231 CHECK(!g_convert_rgb32_to_yuv_proc_
);
232 CHECK(!g_convert_rgb24_to_yuv_proc_
);
233 CHECK(!g_convert_yuv_to_rgb32_proc_
);
234 CHECK(!g_convert_yuva_to_argb_proc_
);
235 CHECK(!g_empty_register_state_proc_
);
237 g_filter_yuv_rows_proc_
= FilterYUVRows_C
;
238 g_convert_yuv_to_rgb32_row_proc_
= ConvertYUVToRGB32Row_C
;
239 g_scale_yuv_to_rgb32_row_proc_
= ScaleYUVToRGB32Row_C
;
240 g_linear_scale_yuv_to_rgb32_row_proc_
= LinearScaleYUVToRGB32Row_C
;
241 g_convert_rgb32_to_yuv_proc_
= ConvertRGB32ToYUV_C
;
242 g_convert_rgb24_to_yuv_proc_
= ConvertRGB24ToYUV_C
;
243 g_convert_yuv_to_rgb32_proc_
= ConvertYUVToRGB32_C
;
244 g_convert_yuva_to_argb_proc_
= ConvertYUVAToARGB_C
;
245 g_empty_register_state_proc_
= EmptyRegisterStateStub
;
247 // Assembly code confuses MemorySanitizer. Also not available in iOS builds.
248 #if defined(ARCH_CPU_X86_FAMILY) && !defined(MEMORY_SANITIZER) && \
250 g_convert_yuva_to_argb_proc_
= ConvertYUVAToARGB_MMX
;
252 #if defined(MEDIA_MMX_INTRINSICS_AVAILABLE)
253 g_empty_register_state_proc_
= EmptyRegisterStateIntrinsic
;
255 g_empty_register_state_proc_
= EmptyRegisterState_MMX
;
258 g_convert_yuv_to_rgb32_row_proc_
= ConvertYUVToRGB32Row_SSE
;
259 g_convert_yuv_to_rgb32_proc_
= ConvertYUVToRGB32_SSE
;
261 g_filter_yuv_rows_proc_
= FilterYUVRows_SSE2
;
262 g_convert_rgb32_to_yuv_proc_
= ConvertRGB32ToYUV_SSE2
;
264 #if defined(ARCH_CPU_X86_64)
265 g_scale_yuv_to_rgb32_row_proc_
= ScaleYUVToRGB32Row_SSE2_X64
;
267 // Technically this should be in the MMX section, but MSVC will optimize out
268 // the export of LinearScaleYUVToRGB32Row_MMX, which is required by the unit
269 // tests, if that decision can be made at compile time. Since all X64 CPUs
270 // have SSE2, we can hack around this by making the selection here.
271 g_linear_scale_yuv_to_rgb32_row_proc_
= LinearScaleYUVToRGB32Row_MMX_X64
;
273 g_scale_yuv_to_rgb32_row_proc_
= ScaleYUVToRGB32Row_SSE
;
274 g_linear_scale_yuv_to_rgb32_row_proc_
= LinearScaleYUVToRGB32Row_SSE
;
278 if (cpu
.has_ssse3()) {
279 g_convert_rgb24_to_yuv_proc_
= &ConvertRGB24ToYUV_SSSE3
;
281 // TODO(hclam): Add ConvertRGB32ToYUV_SSSE3 when the cyan problem is solved.
282 // See: crbug.com/100462
286 // Initialize YUV conversion lookup tables.
288 // SD Rec601 YUV->RGB matrix, see http://www.fourcc.org/fccyvrgb.php
289 const double kRec601ConvertMatrix
[3][3] = {
290 {1.164, 1.164, 1.164}, {0.0, -0.391, 2.018}, {1.596, -0.813, 0.0},
293 // JPEG table, values from above link.
294 const double kJPEGConvertMatrix
[3][3] = {
295 {1.0, 1.0, 1.0}, {0.0, -0.34414, 1.772}, {1.402, -0.71414, 0.0},
298 // Rec709 "HD" color space, values from:
299 // http://www.equasys.de/colorconversion.html
300 const double kRec709ConvertMatrix
[3][3] = {
301 {1.164, 1.164, 1.164}, {0.0, -0.213, 2.112}, {1.793, -0.533, 0.0},
304 PopulateYUVToRGBTable(kRec601ConvertMatrix
, false,
305 g_table_rec601
.Get().table
.data_as
<int16
>());
306 PopulateYUVToRGBTable(kJPEGConvertMatrix
, true,
307 g_table_jpeg
.Get().table
.data_as
<int16
>());
308 PopulateYUVToRGBTable(kRec709ConvertMatrix
, false,
309 g_table_rec709
.Get().table
.data_as
<int16
>());
310 g_table_rec601_ptr
= g_table_rec601
.Get().table
.data_as
<int16
>();
311 g_table_rec709_ptr
= g_table_rec709
.Get().table
.data_as
<int16
>();
312 g_table_jpeg_ptr
= g_table_jpeg
.Get().table
.data_as
<int16
>();
315 // Empty SIMD registers state after using them.
316 void EmptyRegisterState() { g_empty_register_state_proc_(); }
318 // 16.16 fixed point arithmetic
319 const int kFractionBits
= 16;
320 const int kFractionMax
= 1 << kFractionBits
;
321 const int kFractionMask
= ((1 << kFractionBits
) - 1);
323 // Scale a frame of YUV to 32 bit ARGB.
324 void ScaleYUVToRGB32(const uint8
* y_buf
,
337 ScaleFilter filter
) {
338 // Handle zero sized sources and destinations.
339 if ((yuv_type
== YV12
&& (source_width
< 2 || source_height
< 2)) ||
340 (yuv_type
== YV16
&& (source_width
< 2 || source_height
< 1)) ||
341 width
== 0 || height
== 0)
344 const int16
* lookup_table
= GetLookupTable(yuv_type
);
346 // 4096 allows 3 buffers to fit in 12k.
347 // Helps performance on CPU with 16K L1 cache.
348 // Large enough for 3830x2160 and 30" displays which are 2560x1600.
349 const int kFilterBufferSize
= 4096;
350 // Disable filtering if the screen is too big (to avoid buffer overflows).
351 // This should never happen to regular users: they don't have monitors
352 // wider than 4096 pixels.
353 // TODO(fbarchard): Allow rotated videos to filter.
354 if (source_width
> kFilterBufferSize
|| view_rotate
)
355 filter
= FILTER_NONE
;
357 unsigned int y_shift
= GetVerticalShift(yuv_type
);
358 // Diagram showing origin and direction of source sampling.
364 // Rotations that start at right side of image.
365 if ((view_rotate
== ROTATE_180
) || (view_rotate
== ROTATE_270
) ||
366 (view_rotate
== MIRROR_ROTATE_0
) || (view_rotate
== MIRROR_ROTATE_90
)) {
367 y_buf
+= source_width
- 1;
368 u_buf
+= source_width
/ 2 - 1;
369 v_buf
+= source_width
/ 2 - 1;
370 source_width
= -source_width
;
372 // Rotations that start at bottom of image.
373 if ((view_rotate
== ROTATE_90
) || (view_rotate
== ROTATE_180
) ||
374 (view_rotate
== MIRROR_ROTATE_90
) || (view_rotate
== MIRROR_ROTATE_180
)) {
375 y_buf
+= (source_height
- 1) * y_pitch
;
376 u_buf
+= ((source_height
>> y_shift
) - 1) * uv_pitch
;
377 v_buf
+= ((source_height
>> y_shift
) - 1) * uv_pitch
;
378 source_height
= -source_height
;
381 int source_dx
= source_width
* kFractionMax
/ width
;
383 if ((view_rotate
== ROTATE_90
) || (view_rotate
== ROTATE_270
)) {
388 source_height
= source_width
;
390 int source_dy
= source_height
* kFractionMax
/ height
;
391 source_dx
= ((source_dy
>> kFractionBits
) * y_pitch
) << kFractionBits
;
392 if (view_rotate
== ROTATE_90
) {
395 source_height
= -source_height
;
402 // Need padding because FilterRows() will write 1 to 16 extra pixels
403 // after the end for SSE2 version.
404 uint8 yuvbuf
[16 + kFilterBufferSize
* 3 + 16];
406 reinterpret_cast<uint8
*>(reinterpret_cast<uintptr_t>(yuvbuf
+ 15) & ~15);
407 uint8
* ubuf
= ybuf
+ kFilterBufferSize
;
408 uint8
* vbuf
= ubuf
+ kFilterBufferSize
;
410 // TODO(fbarchard): Fixed point math is off by 1 on negatives.
412 // We take a y-coordinate in [0,1] space in the source image space, and
413 // transform to a y-coordinate in [0,1] space in the destination image space.
414 // Note that the coordinate endpoints lie on pixel boundaries, not on pixel
415 // centers: e.g. a two-pixel-high image will have pixel centers at 0.25 and
416 // 0.75. The formula is as follows (in fixed-point arithmetic):
417 // y_dst = dst_height * ((y_src + 0.5) / src_height)
418 // dst_pixel = clamp([0, dst_height - 1], floor(y_dst - 0.5))
419 // Implement this here as an accumulator + delta, to avoid expensive math
421 int source_y_subpixel_accum
=
422 ((kFractionMax
/ 2) * source_height
) / height
- (kFractionMax
/ 2);
423 int source_y_subpixel_delta
= ((1 << kFractionBits
) * source_height
) / height
;
425 // TODO(fbarchard): Split this into separate function for better efficiency.
426 for (int y
= 0; y
< height
; ++y
) {
427 uint8
* dest_pixel
= rgb_buf
+ y
* rgb_pitch
;
428 int source_y_subpixel
= source_y_subpixel_accum
;
429 source_y_subpixel_accum
+= source_y_subpixel_delta
;
430 if (source_y_subpixel
< 0)
431 source_y_subpixel
= 0;
432 else if (source_y_subpixel
> ((source_height
- 1) << kFractionBits
))
433 source_y_subpixel
= (source_height
- 1) << kFractionBits
;
435 const uint8
* y_ptr
= NULL
;
436 const uint8
* u_ptr
= NULL
;
437 const uint8
* v_ptr
= NULL
;
438 // Apply vertical filtering if necessary.
439 // TODO(fbarchard): Remove memcpy when not necessary.
440 if (filter
& media::FILTER_BILINEAR_V
) {
441 int source_y
= source_y_subpixel
>> kFractionBits
;
442 y_ptr
= y_buf
+ source_y
* y_pitch
;
443 u_ptr
= u_buf
+ (source_y
>> y_shift
) * uv_pitch
;
444 v_ptr
= v_buf
+ (source_y
>> y_shift
) * uv_pitch
;
446 // Vertical scaler uses 16.8 fixed point.
447 uint8 source_y_fraction
= (source_y_subpixel
& kFractionMask
) >> 8;
448 if (source_y_fraction
!= 0) {
449 g_filter_yuv_rows_proc_(
450 ybuf
, y_ptr
, y_ptr
+ y_pitch
, source_width
, source_y_fraction
);
452 memcpy(ybuf
, y_ptr
, source_width
);
455 ybuf
[source_width
] = ybuf
[source_width
- 1];
457 int uv_source_width
= (source_width
+ 1) / 2;
458 uint8 source_uv_fraction
;
460 // For formats with half-height UV planes, each even-numbered pixel row
461 // should not interpolate, since the next row to interpolate from should
462 // be a duplicate of the current row.
463 if (y_shift
&& (source_y
& 0x1) == 0)
464 source_uv_fraction
= 0;
466 source_uv_fraction
= source_y_fraction
;
468 if (source_uv_fraction
!= 0) {
469 g_filter_yuv_rows_proc_(
470 ubuf
, u_ptr
, u_ptr
+ uv_pitch
, uv_source_width
, source_uv_fraction
);
471 g_filter_yuv_rows_proc_(
472 vbuf
, v_ptr
, v_ptr
+ uv_pitch
, uv_source_width
, source_uv_fraction
);
474 memcpy(ubuf
, u_ptr
, uv_source_width
);
475 memcpy(vbuf
, v_ptr
, uv_source_width
);
479 ubuf
[uv_source_width
] = ubuf
[uv_source_width
- 1];
480 vbuf
[uv_source_width
] = vbuf
[uv_source_width
- 1];
482 // Offset by 1/2 pixel for center sampling.
483 int source_y
= (source_y_subpixel
+ (kFractionMax
/ 2)) >> kFractionBits
;
484 y_ptr
= y_buf
+ source_y
* y_pitch
;
485 u_ptr
= u_buf
+ (source_y
>> y_shift
) * uv_pitch
;
486 v_ptr
= v_buf
+ (source_y
>> y_shift
) * uv_pitch
;
488 if (source_dx
== kFractionMax
) { // Not scaled
489 g_convert_yuv_to_rgb32_row_proc_(y_ptr
, u_ptr
, v_ptr
, dest_pixel
, width
,
492 if (filter
& FILTER_BILINEAR_H
) {
493 g_linear_scale_yuv_to_rgb32_row_proc_(y_ptr
, u_ptr
, v_ptr
, dest_pixel
,
497 g_scale_yuv_to_rgb32_row_proc_(y_ptr
, u_ptr
, v_ptr
, dest_pixel
, width
,
498 source_dx
, lookup_table
);
503 g_empty_register_state_proc_();
506 // Scale a frame of YV12 to 32 bit ARGB for a specific rectangle.
507 void ScaleYUVToRGB32WithRect(const uint8
* y_buf
,
518 int dest_rect_bottom
,
522 // This routine doesn't currently support up-scaling.
523 CHECK_LE(dest_width
, source_width
);
524 CHECK_LE(dest_height
, source_height
);
526 // Sanity-check the destination rectangle.
527 DCHECK(dest_rect_left
>= 0 && dest_rect_right
<= dest_width
);
528 DCHECK(dest_rect_top
>= 0 && dest_rect_bottom
<= dest_height
);
529 DCHECK(dest_rect_right
> dest_rect_left
);
530 DCHECK(dest_rect_bottom
> dest_rect_top
);
532 const int16
* lookup_table
= GetLookupTable(YV12
);
534 // Fixed-point value of vertical and horizontal scale down factor.
535 // Values are in the format 16.16.
536 int y_step
= kFractionMax
* source_height
/ dest_height
;
537 int x_step
= kFractionMax
* source_width
/ dest_width
;
539 // Determine the coordinates of the rectangle in 16.16 coords.
540 // NB: Our origin is the *center* of the top/left pixel, NOT its top/left.
541 // If we're down-scaling by more than a factor of two, we start with a 50%
542 // fraction to avoid degenerating to point-sampling - we should really just
543 // fix the fraction at 50% for all pixels in that case.
544 int source_left
= dest_rect_left
* x_step
;
545 int source_right
= (dest_rect_right
- 1) * x_step
;
546 if (x_step
< kFractionMax
* 2) {
547 source_left
+= ((x_step
- kFractionMax
) / 2);
548 source_right
+= ((x_step
- kFractionMax
) / 2);
550 source_left
+= kFractionMax
/ 2;
551 source_right
+= kFractionMax
/ 2;
553 int source_top
= dest_rect_top
* y_step
;
554 if (y_step
< kFractionMax
* 2) {
555 source_top
+= ((y_step
- kFractionMax
) / 2);
557 source_top
+= kFractionMax
/ 2;
560 // Determine the parts of the Y, U and V buffers to interpolate.
561 int source_y_left
= source_left
>> kFractionBits
;
563 std::min((source_right
>> kFractionBits
) + 2, source_width
+ 1);
565 int source_uv_left
= source_y_left
/ 2;
566 int source_uv_right
= std::min((source_right
>> (kFractionBits
+ 1)) + 2,
567 (source_width
+ 1) / 2);
569 int source_y_width
= source_y_right
- source_y_left
;
570 int source_uv_width
= source_uv_right
- source_uv_left
;
572 // Determine number of pixels in each output row.
573 int dest_rect_width
= dest_rect_right
- dest_rect_left
;
575 // Intermediate buffer for vertical interpolation.
576 // 4096 bytes allows 3 buffers to fit in 12k, which fits in a 16K L1 cache,
577 // and is bigger than most users will generally need.
578 // The buffer is 16-byte aligned and padded with 16 extra bytes; some of the
579 // FilterYUVRowsProcs have alignment requirements, and the SSE version can
580 // write up to 16 bytes past the end of the buffer.
581 const int kFilterBufferSize
= 4096;
582 const bool kAvoidUsingOptimizedFilter
= source_width
> kFilterBufferSize
;
583 uint8 yuv_temp
[16 + kFilterBufferSize
* 3 + 16];
584 // memset() yuv_temp to 0 to avoid bogus warnings when running on Valgrind.
585 if (RunningOnValgrind())
586 memset(yuv_temp
, 0, sizeof(yuv_temp
));
587 uint8
* y_temp
= reinterpret_cast<uint8
*>(
588 reinterpret_cast<uintptr_t>(yuv_temp
+ 15) & ~15);
589 uint8
* u_temp
= y_temp
+ kFilterBufferSize
;
590 uint8
* v_temp
= u_temp
+ kFilterBufferSize
;
592 // Move to the top-left pixel of output.
593 rgb_buf
+= dest_rect_top
* rgb_pitch
;
594 rgb_buf
+= dest_rect_left
* 4;
596 // For each destination row perform interpolation and color space
597 // conversion to produce the output.
598 for (int row
= dest_rect_top
; row
< dest_rect_bottom
; ++row
) {
599 // Round the fixed-point y position to get the current row.
600 int source_row
= source_top
>> kFractionBits
;
601 int source_uv_row
= source_row
/ 2;
602 DCHECK(source_row
< source_height
);
604 // Locate the first row for each plane for interpolation.
605 const uint8
* y0_ptr
= y_buf
+ y_pitch
* source_row
+ source_y_left
;
606 const uint8
* u0_ptr
= u_buf
+ uv_pitch
* source_uv_row
+ source_uv_left
;
607 const uint8
* v0_ptr
= v_buf
+ uv_pitch
* source_uv_row
+ source_uv_left
;
608 const uint8
* y1_ptr
= NULL
;
609 const uint8
* u1_ptr
= NULL
;
610 const uint8
* v1_ptr
= NULL
;
612 // Locate the second row for interpolation, being careful not to overrun.
613 if (source_row
+ 1 >= source_height
) {
616 y1_ptr
= y0_ptr
+ y_pitch
;
618 if (source_uv_row
+ 1 >= (source_height
+ 1) / 2) {
622 u1_ptr
= u0_ptr
+ uv_pitch
;
623 v1_ptr
= v0_ptr
+ uv_pitch
;
626 if (!kAvoidUsingOptimizedFilter
) {
627 // Vertical scaler uses 16.8 fixed point.
628 uint8 fraction
= (source_top
& kFractionMask
) >> 8;
629 g_filter_yuv_rows_proc_(
630 y_temp
+ source_y_left
, y0_ptr
, y1_ptr
, source_y_width
, fraction
);
631 g_filter_yuv_rows_proc_(
632 u_temp
+ source_uv_left
, u0_ptr
, u1_ptr
, source_uv_width
, fraction
);
633 g_filter_yuv_rows_proc_(
634 v_temp
+ source_uv_left
, v0_ptr
, v1_ptr
, source_uv_width
, fraction
);
636 // Perform horizontal interpolation and color space conversion.
637 // TODO(hclam): Use the MMX version after more testing.
638 LinearScaleYUVToRGB32RowWithRange_C(y_temp
, u_temp
, v_temp
, rgb_buf
,
639 dest_rect_width
, source_left
, x_step
,
642 // If the frame is too large then we linear scale a single row.
643 LinearScaleYUVToRGB32RowWithRange_C(y0_ptr
, u0_ptr
, v0_ptr
, rgb_buf
,
644 dest_rect_width
, source_left
, x_step
,
648 // Advance vertically in the source and destination image.
649 source_top
+= y_step
;
650 rgb_buf
+= rgb_pitch
;
653 g_empty_register_state_proc_();
656 void ConvertRGB32ToYUV(const uint8
* rgbframe
,
665 g_convert_rgb32_to_yuv_proc_(rgbframe
,
676 void ConvertRGB24ToYUV(const uint8
* rgbframe
,
685 g_convert_rgb24_to_yuv_proc_(rgbframe
,
696 void ConvertYUY2ToYUV(const uint8
* src
,
702 for (int i
= 0; i
< height
/ 2; ++i
) {
703 for (int j
= 0; j
< (width
/ 2); ++j
) {
713 for (int j
= 0; j
< (width
/ 2); ++j
) {
722 void ConvertNV21ToYUV(const uint8
* src
,
728 int y_plane_size
= width
* height
;
729 memcpy(yplane
, src
, y_plane_size
);
732 int u_plane_size
= y_plane_size
>> 2;
733 for (int i
= 0; i
< u_plane_size
; ++i
) {
739 void ConvertYUVToRGB32(const uint8
* yplane
,
749 g_convert_yuv_to_rgb32_proc_(yplane
,
761 void ConvertYUVAToARGB(const uint8
* yplane
,
773 g_convert_yuva_to_argb_proc_(yplane
,