Add more logging to pin down Mac GPU bots random crashes at browser startup.
[chromium-blink-merge.git] / third_party / libwebp / enc / picture_csp.c
blob7875f625b7c36368bb2787d6f5e271a37764edc2
1 // Copyright 2014 Google Inc. All Rights Reserved.
2 //
3 // Use of this source code is governed by a BSD-style license
4 // that can be found in the COPYING file in the root of the source
5 // tree. An additional intellectual property rights grant can be found
6 // in the file PATENTS. All contributing project authors may
7 // be found in the AUTHORS file in the root of the source tree.
8 // -----------------------------------------------------------------------------
9 //
10 // WebPPicture utils for colorspace conversion
12 // Author: Skal (pascal.massimino@gmail.com)
14 #include <assert.h>
15 #include <stdlib.h>
16 #include <math.h>
18 #include "./vp8enci.h"
19 #include "../utils/random.h"
20 #include "../utils/utils.h"
21 #include "../dsp/yuv.h"
23 // Uncomment to disable gamma-compression during RGB->U/V averaging
24 #define USE_GAMMA_COMPRESSION
26 // If defined, use table to compute x / alpha.
27 #define USE_INVERSE_ALPHA_TABLE
29 static const union {
30 uint32_t argb;
31 uint8_t bytes[4];
32 } test_endian = { 0xff000000u };
33 #define ALPHA_IS_LAST (test_endian.bytes[3] == 0xff)
35 static WEBP_INLINE uint32_t MakeARGB32(int a, int r, int g, int b) {
36 return (((uint32_t)a << 24) | (r << 16) | (g << 8) | b);
39 //------------------------------------------------------------------------------
40 // Detection of non-trivial transparency
42 // Returns true if alpha[] has non-0xff values.
43 static int CheckNonOpaque(const uint8_t* alpha, int width, int height,
44 int x_step, int y_step) {
45 if (alpha == NULL) return 0;
46 while (height-- > 0) {
47 int x;
48 for (x = 0; x < width * x_step; x += x_step) {
49 if (alpha[x] != 0xff) return 1; // TODO(skal): check 4/8 bytes at a time.
51 alpha += y_step;
53 return 0;
56 // Checking for the presence of non-opaque alpha.
57 int WebPPictureHasTransparency(const WebPPicture* picture) {
58 if (picture == NULL) return 0;
59 if (!picture->use_argb) {
60 return CheckNonOpaque(picture->a, picture->width, picture->height,
61 1, picture->a_stride);
62 } else {
63 int x, y;
64 const uint32_t* argb = picture->argb;
65 if (argb == NULL) return 0;
66 for (y = 0; y < picture->height; ++y) {
67 for (x = 0; x < picture->width; ++x) {
68 if (argb[x] < 0xff000000u) return 1; // test any alpha values != 0xff
70 argb += picture->argb_stride;
73 return 0;
76 //------------------------------------------------------------------------------
77 // Code for gamma correction
79 #if defined(USE_GAMMA_COMPRESSION)
81 // gamma-compensates loss of resolution during chroma subsampling
82 #define kGamma 0.80 // for now we use a different gamma value than kGammaF
83 #define kGammaFix 12 // fixed-point precision for linear values
84 #define kGammaScale ((1 << kGammaFix) - 1)
85 #define kGammaTabFix 7 // fixed-point fractional bits precision
86 #define kGammaTabScale (1 << kGammaTabFix)
87 #define kGammaTabRounder (kGammaTabScale >> 1)
88 #define kGammaTabSize (1 << (kGammaFix - kGammaTabFix))
90 static int kLinearToGammaTab[kGammaTabSize + 1];
91 static uint16_t kGammaToLinearTab[256];
92 static int kGammaTablesOk = 0;
94 static void InitGammaTables(void) {
95 if (!kGammaTablesOk) {
96 int v;
97 const double scale = (double)(1 << kGammaTabFix) / kGammaScale;
98 const double norm = 1. / 255.;
99 for (v = 0; v <= 255; ++v) {
100 kGammaToLinearTab[v] =
101 (uint16_t)(pow(norm * v, kGamma) * kGammaScale + .5);
103 for (v = 0; v <= kGammaTabSize; ++v) {
104 kLinearToGammaTab[v] = (int)(255. * pow(scale * v, 1. / kGamma) + .5);
106 kGammaTablesOk = 1;
110 static WEBP_INLINE uint32_t GammaToLinear(uint8_t v) {
111 return kGammaToLinearTab[v];
114 static WEBP_INLINE int Interpolate(int v) {
115 const int tab_pos = v >> (kGammaTabFix + 2); // integer part
116 const int x = v & ((kGammaTabScale << 2) - 1); // fractional part
117 const int v0 = kLinearToGammaTab[tab_pos];
118 const int v1 = kLinearToGammaTab[tab_pos + 1];
119 const int y = v1 * x + v0 * ((kGammaTabScale << 2) - x); // interpolate
120 assert(tab_pos + 1 < kGammaTabSize + 1);
121 return y;
124 // Convert a linear value 'v' to YUV_FIX+2 fixed-point precision
125 // U/V value, suitable for RGBToU/V calls.
126 static WEBP_INLINE int LinearToGamma(uint32_t base_value, int shift) {
127 const int y = Interpolate(base_value << shift); // final uplifted value
128 return (y + kGammaTabRounder) >> kGammaTabFix; // descale
131 #else
133 static void InitGammaTables(void) {}
134 static WEBP_INLINE uint32_t GammaToLinear(uint8_t v) { return v; }
135 static WEBP_INLINE int LinearToGamma(uint32_t base_value, int shift) {
136 return (int)(base_value << shift);
139 #endif // USE_GAMMA_COMPRESSION
141 //------------------------------------------------------------------------------
142 // RGB -> YUV conversion
144 static int RGBToY(int r, int g, int b, VP8Random* const rg) {
145 return (rg == NULL) ? VP8RGBToY(r, g, b, YUV_HALF)
146 : VP8RGBToY(r, g, b, VP8RandomBits(rg, YUV_FIX));
149 static int RGBToU(int r, int g, int b, VP8Random* const rg) {
150 return (rg == NULL) ? VP8RGBToU(r, g, b, YUV_HALF << 2)
151 : VP8RGBToU(r, g, b, VP8RandomBits(rg, YUV_FIX + 2));
154 static int RGBToV(int r, int g, int b, VP8Random* const rg) {
155 return (rg == NULL) ? VP8RGBToV(r, g, b, YUV_HALF << 2)
156 : VP8RGBToV(r, g, b, VP8RandomBits(rg, YUV_FIX + 2));
159 //------------------------------------------------------------------------------
160 // Smart RGB->YUV conversion
162 static const int kNumIterations = 6;
163 static const int kMinDimensionIterativeConversion = 4;
165 // We use a-priori a different precision for storing RGB and Y/W components
166 // We could use YFIX=0 and only uint8_t for fixed_y_t, but it produces some
167 // banding sometimes. Better use extra precision.
168 // TODO(skal): cleanup once TFIX/YFIX values are fixed.
170 typedef int16_t fixed_t; // signed type with extra TFIX precision for UV
171 typedef uint16_t fixed_y_t; // unsigned type with extra YFIX precision for W
172 #define TFIX 6 // fixed-point precision of RGB
173 #define YFIX 2 // fixed point precision for Y/W
175 #define THALF ((1 << TFIX) >> 1)
176 #define MAX_Y_T ((256 << YFIX) - 1)
177 #define TROUNDER (1 << (YUV_FIX + TFIX - 1))
179 #if defined(USE_GAMMA_COMPRESSION)
181 // float variant of gamma-correction
182 // We use tables of different size and precision, along with a 'real-world'
183 // Gamma value close to ~2.
184 #define kGammaF 2.2
185 static float kGammaToLinearTabF[MAX_Y_T + 1]; // size scales with Y_FIX
186 static float kLinearToGammaTabF[kGammaTabSize + 2];
187 static int kGammaTablesFOk = 0;
189 static void InitGammaTablesF(void) {
190 if (!kGammaTablesFOk) {
191 int v;
192 const double norm = 1. / MAX_Y_T;
193 const double scale = 1. / kGammaTabSize;
194 for (v = 0; v <= MAX_Y_T; ++v) {
195 kGammaToLinearTabF[v] = (float)pow(norm * v, kGammaF);
197 for (v = 0; v <= kGammaTabSize; ++v) {
198 kLinearToGammaTabF[v] = (float)(MAX_Y_T * pow(scale * v, 1. / kGammaF));
200 // to prevent small rounding errors to cause read-overflow:
201 kLinearToGammaTabF[kGammaTabSize + 1] = kLinearToGammaTabF[kGammaTabSize];
202 kGammaTablesFOk = 1;
206 static WEBP_INLINE float GammaToLinearF(int v) {
207 return kGammaToLinearTabF[v];
210 static WEBP_INLINE float LinearToGammaF(float value) {
211 const float v = value * kGammaTabSize;
212 const int tab_pos = (int)v;
213 const float x = v - (float)tab_pos; // fractional part
214 const float v0 = kLinearToGammaTabF[tab_pos + 0];
215 const float v1 = kLinearToGammaTabF[tab_pos + 1];
216 const float y = v1 * x + v0 * (1.f - x); // interpolate
217 return y;
220 #else
222 static void InitGammaTablesF(void) {}
223 static WEBP_INLINE float GammaToLinearF(int v) {
224 const float norm = 1.f / MAX_Y_T;
225 return norm * v;
227 static WEBP_INLINE float LinearToGammaF(float value) {
228 return MAX_Y_T * value;
231 #endif // USE_GAMMA_COMPRESSION
233 //------------------------------------------------------------------------------
235 // precision: YFIX -> TFIX
236 static WEBP_INLINE int FixedYToW(int v) {
237 #if TFIX == YFIX
238 return v;
239 #elif TFIX >= YFIX
240 return v << (TFIX - YFIX);
241 #else
242 return v >> (YFIX - TFIX);
243 #endif
246 static WEBP_INLINE int FixedWToY(int v) {
247 #if TFIX == YFIX
248 return v;
249 #elif YFIX >= TFIX
250 return v << (YFIX - TFIX);
251 #else
252 return v >> (TFIX - YFIX);
253 #endif
256 static uint8_t clip_8b(fixed_t v) {
257 return (!(v & ~0xff)) ? (uint8_t)v : (v < 0) ? 0u : 255u;
260 static fixed_y_t clip_y(int y) {
261 return (!(y & ~MAX_Y_T)) ? (fixed_y_t)y : (y < 0) ? 0 : MAX_Y_T;
264 // precision: TFIX -> YFIX
265 static fixed_y_t clip_fixed_t(fixed_t v) {
266 const int y = FixedWToY(v);
267 const fixed_y_t w = clip_y(y);
268 return w;
271 //------------------------------------------------------------------------------
273 static int RGBToGray(int r, int g, int b) {
274 const int luma = 19595 * r + 38470 * g + 7471 * b + YUV_HALF;
275 return (luma >> YUV_FIX);
278 static float RGBToGrayF(float r, float g, float b) {
279 return 0.299f * r + 0.587f * g + 0.114f * b;
282 static float ScaleDown(int a, int b, int c, int d) {
283 const float A = GammaToLinearF(a);
284 const float B = GammaToLinearF(b);
285 const float C = GammaToLinearF(c);
286 const float D = GammaToLinearF(d);
287 return LinearToGammaF(0.25f * (A + B + C + D));
290 static WEBP_INLINE void UpdateW(const fixed_y_t* src, fixed_y_t* dst, int len) {
291 while (len-- > 0) {
292 const float R = GammaToLinearF(src[0]);
293 const float G = GammaToLinearF(src[1]);
294 const float B = GammaToLinearF(src[2]);
295 const float Y = RGBToGrayF(R, G, B);
296 *dst++ = (fixed_y_t)(LinearToGammaF(Y) + .5);
297 src += 3;
301 static WEBP_INLINE void UpdateChroma(const fixed_y_t* src1,
302 const fixed_y_t* src2,
303 fixed_t* dst, fixed_y_t* tmp, int len) {
304 while (len--> 0) {
305 const float r = ScaleDown(src1[0], src1[3], src2[0], src2[3]);
306 const float g = ScaleDown(src1[1], src1[4], src2[1], src2[4]);
307 const float b = ScaleDown(src1[2], src1[5], src2[2], src2[5]);
308 const float W = RGBToGrayF(r, g, b);
309 dst[0] = (fixed_t)FixedYToW((int)(r - W));
310 dst[1] = (fixed_t)FixedYToW((int)(g - W));
311 dst[2] = (fixed_t)FixedYToW((int)(b - W));
312 dst += 3;
313 src1 += 6;
314 src2 += 6;
315 if (tmp != NULL) {
316 tmp[0] = tmp[1] = clip_y((int)(W + .5));
317 tmp += 2;
322 //------------------------------------------------------------------------------
324 static WEBP_INLINE int Filter(const fixed_t* const A, const fixed_t* const B,
325 int rightwise) {
326 int v;
327 if (!rightwise) {
328 v = (A[0] * 9 + A[-3] * 3 + B[0] * 3 + B[-3]);
329 } else {
330 v = (A[0] * 9 + A[+3] * 3 + B[0] * 3 + B[+3]);
332 return (v + 8) >> 4;
335 static WEBP_INLINE int Filter2(int A, int B) { return (A * 3 + B + 2) >> 2; }
337 //------------------------------------------------------------------------------
339 // 8bit -> YFIX
340 static WEBP_INLINE fixed_y_t UpLift(uint8_t a) {
341 return ((fixed_y_t)a << YFIX) | (1 << (YFIX - 1));
344 static void ImportOneRow(const uint8_t* const r_ptr,
345 const uint8_t* const g_ptr,
346 const uint8_t* const b_ptr,
347 int step,
348 int pic_width,
349 fixed_y_t* const dst) {
350 int i;
351 for (i = 0; i < pic_width; ++i) {
352 const int off = i * step;
353 dst[3 * i + 0] = UpLift(r_ptr[off]);
354 dst[3 * i + 1] = UpLift(g_ptr[off]);
355 dst[3 * i + 2] = UpLift(b_ptr[off]);
357 if (pic_width & 1) { // replicate rightmost pixel
358 memcpy(dst + 3 * pic_width, dst + 3 * (pic_width - 1), 3 * sizeof(*dst));
362 static void InterpolateTwoRows(const fixed_y_t* const best_y,
363 const fixed_t* const prev_uv,
364 const fixed_t* const cur_uv,
365 const fixed_t* const next_uv,
366 int w,
367 fixed_y_t* const out1,
368 fixed_y_t* const out2) {
369 int i, k;
370 { // special boundary case for i==0
371 const int W0 = FixedYToW(best_y[0]);
372 const int W1 = FixedYToW(best_y[w]);
373 for (k = 0; k <= 2; ++k) {
374 out1[k] = clip_fixed_t(Filter2(cur_uv[k], prev_uv[k]) + W0);
375 out2[k] = clip_fixed_t(Filter2(cur_uv[k], next_uv[k]) + W1);
378 for (i = 1; i < w - 1; ++i) {
379 const int W0 = FixedYToW(best_y[i + 0]);
380 const int W1 = FixedYToW(best_y[i + w]);
381 const int off = 3 * (i >> 1);
382 for (k = 0; k <= 2; ++k) {
383 const int tmp0 = Filter(cur_uv + off + k, prev_uv + off + k, i & 1);
384 const int tmp1 = Filter(cur_uv + off + k, next_uv + off + k, i & 1);
385 out1[3 * i + k] = clip_fixed_t(tmp0 + W0);
386 out2[3 * i + k] = clip_fixed_t(tmp1 + W1);
389 { // special boundary case for i == w - 1
390 const int W0 = FixedYToW(best_y[i + 0]);
391 const int W1 = FixedYToW(best_y[i + w]);
392 const int off = 3 * (i >> 1);
393 for (k = 0; k <= 2; ++k) {
394 out1[3 * i + k] =
395 clip_fixed_t(Filter2(cur_uv[off + k], prev_uv[off + k]) + W0);
396 out2[3 * i + k] =
397 clip_fixed_t(Filter2(cur_uv[off + k], next_uv[off + k]) + W1);
402 static WEBP_INLINE uint8_t ConvertRGBToY(int r, int g, int b) {
403 const int luma = 16839 * r + 33059 * g + 6420 * b + TROUNDER;
404 return clip_8b(16 + (luma >> (YUV_FIX + TFIX)));
407 static WEBP_INLINE uint8_t ConvertRGBToU(int r, int g, int b) {
408 const int u = -9719 * r - 19081 * g + 28800 * b + TROUNDER;
409 return clip_8b(128 + (u >> (YUV_FIX + TFIX)));
412 static WEBP_INLINE uint8_t ConvertRGBToV(int r, int g, int b) {
413 const int v = +28800 * r - 24116 * g - 4684 * b + TROUNDER;
414 return clip_8b(128 + (v >> (YUV_FIX + TFIX)));
417 static int ConvertWRGBToYUV(const fixed_y_t* const best_y,
418 const fixed_t* const best_uv,
419 WebPPicture* const picture) {
420 int i, j;
421 const int w = (picture->width + 1) & ~1;
422 const int h = (picture->height + 1) & ~1;
423 const int uv_w = w >> 1;
424 const int uv_h = h >> 1;
425 for (j = 0; j < picture->height; ++j) {
426 for (i = 0; i < picture->width; ++i) {
427 const int off = 3 * ((i >> 1) + (j >> 1) * uv_w);
428 const int off2 = i + j * picture->y_stride;
429 const int W = FixedYToW(best_y[i + j * w]);
430 const int r = best_uv[off + 0] + W;
431 const int g = best_uv[off + 1] + W;
432 const int b = best_uv[off + 2] + W;
433 picture->y[off2] = ConvertRGBToY(r, g, b);
436 for (j = 0; j < uv_h; ++j) {
437 uint8_t* const dst_u = picture->u + j * picture->uv_stride;
438 uint8_t* const dst_v = picture->v + j * picture->uv_stride;
439 for (i = 0; i < uv_w; ++i) {
440 const int off = 3 * (i + j * uv_w);
441 const int r = best_uv[off + 0];
442 const int g = best_uv[off + 1];
443 const int b = best_uv[off + 2];
444 dst_u[i] = ConvertRGBToU(r, g, b);
445 dst_v[i] = ConvertRGBToV(r, g, b);
448 return 1;
451 //------------------------------------------------------------------------------
452 // Main function
454 #define SAFE_ALLOC(W, H, T) ((T*)WebPSafeMalloc((W) * (H), sizeof(T)))
456 static int PreprocessARGB(const uint8_t* const r_ptr,
457 const uint8_t* const g_ptr,
458 const uint8_t* const b_ptr,
459 int step, int rgb_stride,
460 WebPPicture* const picture) {
461 // we expand the right/bottom border if needed
462 const int w = (picture->width + 1) & ~1;
463 const int h = (picture->height + 1) & ~1;
464 const int uv_w = w >> 1;
465 const int uv_h = h >> 1;
466 int i, j, iter;
468 // TODO(skal): allocate one big memory chunk. But for now, it's easier
469 // for valgrind debugging to have several chunks.
470 fixed_y_t* const tmp_buffer = SAFE_ALLOC(w * 3, 2, fixed_y_t); // scratch
471 fixed_y_t* const best_y = SAFE_ALLOC(w, h, fixed_y_t);
472 fixed_y_t* const target_y = SAFE_ALLOC(w, h, fixed_y_t);
473 fixed_y_t* const best_rgb_y = SAFE_ALLOC(w, 2, fixed_y_t);
474 fixed_t* const best_uv = SAFE_ALLOC(uv_w * 3, uv_h, fixed_t);
475 fixed_t* const target_uv = SAFE_ALLOC(uv_w * 3, uv_h, fixed_t);
476 fixed_t* const best_rgb_uv = SAFE_ALLOC(uv_w * 3, 1, fixed_t);
477 int ok;
479 if (best_y == NULL || best_uv == NULL ||
480 target_y == NULL || target_uv == NULL ||
481 best_rgb_y == NULL || best_rgb_uv == NULL ||
482 tmp_buffer == NULL) {
483 ok = WebPEncodingSetError(picture, VP8_ENC_ERROR_OUT_OF_MEMORY);
484 goto End;
486 assert(picture->width >= kMinDimensionIterativeConversion);
487 assert(picture->height >= kMinDimensionIterativeConversion);
489 // Import RGB samples to W/RGB representation.
490 for (j = 0; j < picture->height; j += 2) {
491 const int is_last_row = (j == picture->height - 1);
492 fixed_y_t* const src1 = tmp_buffer;
493 fixed_y_t* const src2 = tmp_buffer + 3 * w;
494 const int off1 = j * rgb_stride;
495 const int off2 = off1 + rgb_stride;
496 const int uv_off = (j >> 1) * 3 * uv_w;
497 fixed_y_t* const dst_y = best_y + j * w;
499 // prepare two rows of input
500 ImportOneRow(r_ptr + off1, g_ptr + off1, b_ptr + off1,
501 step, picture->width, src1);
502 if (!is_last_row) {
503 ImportOneRow(r_ptr + off2, g_ptr + off2, b_ptr + off2,
504 step, picture->width, src2);
505 } else {
506 memcpy(src2, src1, 3 * w * sizeof(*src2));
508 UpdateW(src1, target_y + (j + 0) * w, w);
509 UpdateW(src2, target_y + (j + 1) * w, w);
510 UpdateChroma(src1, src2, target_uv + uv_off, dst_y, uv_w);
511 memcpy(best_uv + uv_off, target_uv + uv_off, 3 * uv_w * sizeof(*best_uv));
512 memcpy(dst_y + w, dst_y, w * sizeof(*dst_y));
515 // Iterate and resolve clipping conflicts.
516 for (iter = 0; iter < kNumIterations; ++iter) {
517 int k;
518 const fixed_t* cur_uv = best_uv;
519 const fixed_t* prev_uv = best_uv;
520 for (j = 0; j < h; j += 2) {
521 fixed_y_t* const src1 = tmp_buffer;
522 fixed_y_t* const src2 = tmp_buffer + 3 * w;
525 const fixed_t* const next_uv = cur_uv + ((j < h - 2) ? 3 * uv_w : 0);
526 InterpolateTwoRows(best_y + j * w, prev_uv, cur_uv, next_uv,
527 w, src1, src2);
528 prev_uv = cur_uv;
529 cur_uv = next_uv;
532 UpdateW(src1, best_rgb_y + 0 * w, w);
533 UpdateW(src2, best_rgb_y + 1 * w, w);
534 UpdateChroma(src1, src2, best_rgb_uv, NULL, uv_w);
536 // update two rows of Y and one row of RGB
537 for (i = 0; i < 2 * w; ++i) {
538 const int off = i + j * w;
539 const int diff_y = target_y[off] - best_rgb_y[i];
540 const int new_y = (int)best_y[off] + diff_y;
541 best_y[off] = clip_y(new_y);
543 for (i = 0; i < uv_w; ++i) {
544 const int off = 3 * (i + (j >> 1) * uv_w);
545 int W;
546 for (k = 0; k <= 2; ++k) {
547 const int diff_uv = (int)target_uv[off + k] - best_rgb_uv[3 * i + k];
548 best_uv[off + k] += diff_uv;
550 W = RGBToGray(best_uv[off + 0], best_uv[off + 1], best_uv[off + 2]);
551 for (k = 0; k <= 2; ++k) {
552 best_uv[off + k] -= W;
556 // TODO(skal): add early-termination criterion
559 // final reconstruction
560 ok = ConvertWRGBToYUV(best_y, best_uv, picture);
562 End:
563 WebPSafeFree(best_y);
564 WebPSafeFree(best_uv);
565 WebPSafeFree(target_y);
566 WebPSafeFree(target_uv);
567 WebPSafeFree(best_rgb_y);
568 WebPSafeFree(best_rgb_uv);
569 WebPSafeFree(tmp_buffer);
570 return ok;
572 #undef SAFE_ALLOC
574 //------------------------------------------------------------------------------
575 // "Fast" regular RGB->YUV
577 #define SUM4(ptr, step) LinearToGamma( \
578 GammaToLinear((ptr)[0]) + \
579 GammaToLinear((ptr)[(step)]) + \
580 GammaToLinear((ptr)[rgb_stride]) + \
581 GammaToLinear((ptr)[rgb_stride + (step)]), 0) \
583 #define SUM2(ptr) \
584 LinearToGamma(GammaToLinear((ptr)[0]) + GammaToLinear((ptr)[rgb_stride]), 1)
586 #define SUM2ALPHA(ptr) ((ptr)[0] + (ptr)[rgb_stride])
587 #define SUM4ALPHA(ptr) (SUM2ALPHA(ptr) + SUM2ALPHA((ptr) + 4))
589 #if defined(USE_INVERSE_ALPHA_TABLE)
591 static const int kAlphaFix = 19;
592 // Following table is (1 << kAlphaFix) / a. The (v * kInvAlpha[a]) >> kAlphaFix
593 // formula is then equal to v / a in most (99.6%) cases. Note that this table
594 // and constant are adjusted very tightly to fit 32b arithmetic.
595 // In particular, they use the fact that the operands for 'v / a' are actually
596 // derived as v = (a0.p0 + a1.p1 + a2.p2 + a3.p3) and a = a0 + a1 + a2 + a3
597 // with ai in [0..255] and pi in [0..1<<kGammaFix). The constraint to avoid
598 // overflow is: kGammaFix + kAlphaFix <= 31.
599 static const uint32_t kInvAlpha[4 * 0xff + 1] = {
600 0, /* alpha = 0 */
601 524288, 262144, 174762, 131072, 104857, 87381, 74898, 65536,
602 58254, 52428, 47662, 43690, 40329, 37449, 34952, 32768,
603 30840, 29127, 27594, 26214, 24966, 23831, 22795, 21845,
604 20971, 20164, 19418, 18724, 18078, 17476, 16912, 16384,
605 15887, 15420, 14979, 14563, 14169, 13797, 13443, 13107,
606 12787, 12483, 12192, 11915, 11650, 11397, 11155, 10922,
607 10699, 10485, 10280, 10082, 9892, 9709, 9532, 9362,
608 9198, 9039, 8886, 8738, 8594, 8456, 8322, 8192,
609 8065, 7943, 7825, 7710, 7598, 7489, 7384, 7281,
610 7182, 7084, 6990, 6898, 6808, 6721, 6636, 6553,
611 6472, 6393, 6316, 6241, 6168, 6096, 6026, 5957,
612 5890, 5825, 5761, 5698, 5637, 5577, 5518, 5461,
613 5405, 5349, 5295, 5242, 5190, 5140, 5090, 5041,
614 4993, 4946, 4899, 4854, 4809, 4766, 4723, 4681,
615 4639, 4599, 4559, 4519, 4481, 4443, 4405, 4369,
616 4332, 4297, 4262, 4228, 4194, 4161, 4128, 4096,
617 4064, 4032, 4002, 3971, 3942, 3912, 3883, 3855,
618 3826, 3799, 3771, 3744, 3718, 3692, 3666, 3640,
619 3615, 3591, 3566, 3542, 3518, 3495, 3472, 3449,
620 3426, 3404, 3382, 3360, 3339, 3318, 3297, 3276,
621 3256, 3236, 3216, 3196, 3177, 3158, 3139, 3120,
622 3102, 3084, 3066, 3048, 3030, 3013, 2995, 2978,
623 2962, 2945, 2928, 2912, 2896, 2880, 2864, 2849,
624 2833, 2818, 2803, 2788, 2774, 2759, 2744, 2730,
625 2716, 2702, 2688, 2674, 2661, 2647, 2634, 2621,
626 2608, 2595, 2582, 2570, 2557, 2545, 2532, 2520,
627 2508, 2496, 2484, 2473, 2461, 2449, 2438, 2427,
628 2416, 2404, 2394, 2383, 2372, 2361, 2351, 2340,
629 2330, 2319, 2309, 2299, 2289, 2279, 2269, 2259,
630 2250, 2240, 2231, 2221, 2212, 2202, 2193, 2184,
631 2175, 2166, 2157, 2148, 2139, 2131, 2122, 2114,
632 2105, 2097, 2088, 2080, 2072, 2064, 2056, 2048,
633 2040, 2032, 2024, 2016, 2008, 2001, 1993, 1985,
634 1978, 1971, 1963, 1956, 1949, 1941, 1934, 1927,
635 1920, 1913, 1906, 1899, 1892, 1885, 1879, 1872,
636 1865, 1859, 1852, 1846, 1839, 1833, 1826, 1820,
637 1814, 1807, 1801, 1795, 1789, 1783, 1777, 1771,
638 1765, 1759, 1753, 1747, 1741, 1736, 1730, 1724,
639 1718, 1713, 1707, 1702, 1696, 1691, 1685, 1680,
640 1675, 1669, 1664, 1659, 1653, 1648, 1643, 1638,
641 1633, 1628, 1623, 1618, 1613, 1608, 1603, 1598,
642 1593, 1588, 1583, 1579, 1574, 1569, 1565, 1560,
643 1555, 1551, 1546, 1542, 1537, 1533, 1528, 1524,
644 1519, 1515, 1510, 1506, 1502, 1497, 1493, 1489,
645 1485, 1481, 1476, 1472, 1468, 1464, 1460, 1456,
646 1452, 1448, 1444, 1440, 1436, 1432, 1428, 1424,
647 1420, 1416, 1413, 1409, 1405, 1401, 1398, 1394,
648 1390, 1387, 1383, 1379, 1376, 1372, 1368, 1365,
649 1361, 1358, 1354, 1351, 1347, 1344, 1340, 1337,
650 1334, 1330, 1327, 1323, 1320, 1317, 1314, 1310,
651 1307, 1304, 1300, 1297, 1294, 1291, 1288, 1285,
652 1281, 1278, 1275, 1272, 1269, 1266, 1263, 1260,
653 1257, 1254, 1251, 1248, 1245, 1242, 1239, 1236,
654 1233, 1230, 1227, 1224, 1222, 1219, 1216, 1213,
655 1210, 1208, 1205, 1202, 1199, 1197, 1194, 1191,
656 1188, 1186, 1183, 1180, 1178, 1175, 1172, 1170,
657 1167, 1165, 1162, 1159, 1157, 1154, 1152, 1149,
658 1147, 1144, 1142, 1139, 1137, 1134, 1132, 1129,
659 1127, 1125, 1122, 1120, 1117, 1115, 1113, 1110,
660 1108, 1106, 1103, 1101, 1099, 1096, 1094, 1092,
661 1089, 1087, 1085, 1083, 1081, 1078, 1076, 1074,
662 1072, 1069, 1067, 1065, 1063, 1061, 1059, 1057,
663 1054, 1052, 1050, 1048, 1046, 1044, 1042, 1040,
664 1038, 1036, 1034, 1032, 1030, 1028, 1026, 1024,
665 1022, 1020, 1018, 1016, 1014, 1012, 1010, 1008,
666 1006, 1004, 1002, 1000, 998, 996, 994, 992,
667 991, 989, 987, 985, 983, 981, 979, 978,
668 976, 974, 972, 970, 969, 967, 965, 963,
669 961, 960, 958, 956, 954, 953, 951, 949,
670 948, 946, 944, 942, 941, 939, 937, 936,
671 934, 932, 931, 929, 927, 926, 924, 923,
672 921, 919, 918, 916, 914, 913, 911, 910,
673 908, 907, 905, 903, 902, 900, 899, 897,
674 896, 894, 893, 891, 890, 888, 887, 885,
675 884, 882, 881, 879, 878, 876, 875, 873,
676 872, 870, 869, 868, 866, 865, 863, 862,
677 860, 859, 858, 856, 855, 853, 852, 851,
678 849, 848, 846, 845, 844, 842, 841, 840,
679 838, 837, 836, 834, 833, 832, 830, 829,
680 828, 826, 825, 824, 823, 821, 820, 819,
681 817, 816, 815, 814, 812, 811, 810, 809,
682 807, 806, 805, 804, 802, 801, 800, 799,
683 798, 796, 795, 794, 793, 791, 790, 789,
684 788, 787, 786, 784, 783, 782, 781, 780,
685 779, 777, 776, 775, 774, 773, 772, 771,
686 769, 768, 767, 766, 765, 764, 763, 762,
687 760, 759, 758, 757, 756, 755, 754, 753,
688 752, 751, 750, 748, 747, 746, 745, 744,
689 743, 742, 741, 740, 739, 738, 737, 736,
690 735, 734, 733, 732, 731, 730, 729, 728,
691 727, 726, 725, 724, 723, 722, 721, 720,
692 719, 718, 717, 716, 715, 714, 713, 712,
693 711, 710, 709, 708, 707, 706, 705, 704,
694 703, 702, 701, 700, 699, 699, 698, 697,
695 696, 695, 694, 693, 692, 691, 690, 689,
696 688, 688, 687, 686, 685, 684, 683, 682,
697 681, 680, 680, 679, 678, 677, 676, 675,
698 674, 673, 673, 672, 671, 670, 669, 668,
699 667, 667, 666, 665, 664, 663, 662, 661,
700 661, 660, 659, 658, 657, 657, 656, 655,
701 654, 653, 652, 652, 651, 650, 649, 648,
702 648, 647, 646, 645, 644, 644, 643, 642,
703 641, 640, 640, 639, 638, 637, 637, 636,
704 635, 634, 633, 633, 632, 631, 630, 630,
705 629, 628, 627, 627, 626, 625, 624, 624,
706 623, 622, 621, 621, 620, 619, 618, 618,
707 617, 616, 616, 615, 614, 613, 613, 612,
708 611, 611, 610, 609, 608, 608, 607, 606,
709 606, 605, 604, 604, 603, 602, 601, 601,
710 600, 599, 599, 598, 597, 597, 596, 595,
711 595, 594, 593, 593, 592, 591, 591, 590,
712 589, 589, 588, 587, 587, 586, 585, 585,
713 584, 583, 583, 582, 581, 581, 580, 579,
714 579, 578, 578, 577, 576, 576, 575, 574,
715 574, 573, 572, 572, 571, 571, 570, 569,
716 569, 568, 568, 567, 566, 566, 565, 564,
717 564, 563, 563, 562, 561, 561, 560, 560,
718 559, 558, 558, 557, 557, 556, 555, 555,
719 554, 554, 553, 553, 552, 551, 551, 550,
720 550, 549, 548, 548, 547, 547, 546, 546,
721 545, 544, 544, 543, 543, 542, 542, 541,
722 541, 540, 539, 539, 538, 538, 537, 537,
723 536, 536, 535, 534, 534, 533, 533, 532,
724 532, 531, 531, 530, 530, 529, 529, 528,
725 527, 527, 526, 526, 525, 525, 524, 524,
726 523, 523, 522, 522, 521, 521, 520, 520,
727 519, 519, 518, 518, 517, 517, 516, 516,
728 515, 515, 514, 514
731 // Note that LinearToGamma() expects the values to be premultiplied by 4,
732 // so we incorporate this factor 4 inside the DIVIDE_BY_ALPHA macro directly.
733 #define DIVIDE_BY_ALPHA(sum, a) (((sum) * kInvAlpha[(a)]) >> (kAlphaFix - 2))
735 #else
737 #define DIVIDE_BY_ALPHA(sum, a) (4 * (sum) / (a))
739 #endif // USE_INVERSE_ALPHA_TABLE
741 static WEBP_INLINE int LinearToGammaWeighted(const uint8_t* src,
742 const uint8_t* a_ptr,
743 uint32_t total_a, int step,
744 int rgb_stride) {
745 const uint32_t sum =
746 a_ptr[0] * GammaToLinear(src[0]) +
747 a_ptr[step] * GammaToLinear(src[step]) +
748 a_ptr[rgb_stride] * GammaToLinear(src[rgb_stride]) +
749 a_ptr[rgb_stride + step] * GammaToLinear(src[rgb_stride + step]);
750 assert(total_a > 0 && total_a <= 4 * 0xff);
751 #if defined(USE_INVERSE_ALPHA_TABLE)
752 assert((uint64_t)sum * kInvAlpha[total_a] < ((uint64_t)1 << 32));
753 #endif
754 return LinearToGamma(DIVIDE_BY_ALPHA(sum, total_a), 0);
757 static WEBP_INLINE void ConvertRowToY(const uint8_t* const r_ptr,
758 const uint8_t* const g_ptr,
759 const uint8_t* const b_ptr,
760 int step,
761 uint8_t* const dst_y,
762 int width,
763 VP8Random* const rg) {
764 int i, j;
765 for (i = 0, j = 0; i < width; ++i, j += step) {
766 dst_y[i] = RGBToY(r_ptr[j], g_ptr[j], b_ptr[j], rg);
770 static WEBP_INLINE void ConvertRowsToUVWithAlpha(const uint8_t* const r_ptr,
771 const uint8_t* const g_ptr,
772 const uint8_t* const b_ptr,
773 const uint8_t* const a_ptr,
774 int rgb_stride,
775 uint8_t* const dst_u,
776 uint8_t* const dst_v,
777 int width,
778 VP8Random* const rg) {
779 int i, j;
780 // we loop over 2x2 blocks and produce one U/V value for each.
781 for (i = 0, j = 0; i < (width >> 1); ++i, j += 2 * sizeof(uint32_t)) {
782 const uint32_t a = SUM4ALPHA(a_ptr + j);
783 int r, g, b;
784 if (a == 4 * 0xff || a == 0) {
785 r = SUM4(r_ptr + j, 4);
786 g = SUM4(g_ptr + j, 4);
787 b = SUM4(b_ptr + j, 4);
788 } else {
789 r = LinearToGammaWeighted(r_ptr + j, a_ptr + j, a, 4, rgb_stride);
790 g = LinearToGammaWeighted(g_ptr + j, a_ptr + j, a, 4, rgb_stride);
791 b = LinearToGammaWeighted(b_ptr + j, a_ptr + j, a, 4, rgb_stride);
793 dst_u[i] = RGBToU(r, g, b, rg);
794 dst_v[i] = RGBToV(r, g, b, rg);
796 if (width & 1) {
797 const uint32_t a = 2u * SUM2ALPHA(a_ptr + j);
798 int r, g, b;
799 if (a == 4 * 0xff || a == 0) {
800 r = SUM2(r_ptr + j);
801 g = SUM2(g_ptr + j);
802 b = SUM2(b_ptr + j);
803 } else {
804 r = LinearToGammaWeighted(r_ptr + j, a_ptr + j, a, 0, rgb_stride);
805 g = LinearToGammaWeighted(g_ptr + j, a_ptr + j, a, 0, rgb_stride);
806 b = LinearToGammaWeighted(b_ptr + j, a_ptr + j, a, 0, rgb_stride);
808 dst_u[i] = RGBToU(r, g, b, rg);
809 dst_v[i] = RGBToV(r, g, b, rg);
813 static WEBP_INLINE void ConvertRowsToUV(const uint8_t* const r_ptr,
814 const uint8_t* const g_ptr,
815 const uint8_t* const b_ptr,
816 int step, int rgb_stride,
817 uint8_t* const dst_u,
818 uint8_t* const dst_v,
819 int width,
820 VP8Random* const rg) {
821 int i, j;
822 for (i = 0, j = 0; i < (width >> 1); ++i, j += 2 * step) {
823 const int r = SUM4(r_ptr + j, step);
824 const int g = SUM4(g_ptr + j, step);
825 const int b = SUM4(b_ptr + j, step);
826 dst_u[i] = RGBToU(r, g, b, rg);
827 dst_v[i] = RGBToV(r, g, b, rg);
829 if (width & 1) {
830 const int r = SUM2(r_ptr + j);
831 const int g = SUM2(g_ptr + j);
832 const int b = SUM2(b_ptr + j);
833 dst_u[i] = RGBToU(r, g, b, rg);
834 dst_v[i] = RGBToV(r, g, b, rg);
838 static int ImportYUVAFromRGBA(const uint8_t* const r_ptr,
839 const uint8_t* const g_ptr,
840 const uint8_t* const b_ptr,
841 const uint8_t* const a_ptr,
842 int step, // bytes per pixel
843 int rgb_stride, // bytes per scanline
844 float dithering,
845 int use_iterative_conversion,
846 WebPPicture* const picture) {
847 int y;
848 const int width = picture->width;
849 const int height = picture->height;
850 const int has_alpha = CheckNonOpaque(a_ptr, width, height, step, rgb_stride);
852 picture->colorspace = has_alpha ? WEBP_YUV420A : WEBP_YUV420;
853 picture->use_argb = 0;
855 // disable smart conversion if source is too small (overkill).
856 if (width < kMinDimensionIterativeConversion ||
857 height < kMinDimensionIterativeConversion) {
858 use_iterative_conversion = 0;
861 if (!WebPPictureAllocYUVA(picture, width, height)) {
862 return 0;
864 if (has_alpha) {
865 WebPInitAlphaProcessing();
866 assert(step == 4);
867 #if defined(USE_INVERSE_ALPHA_TABLE)
868 assert(kAlphaFix + kGammaFix <= 31);
869 #endif
872 if (use_iterative_conversion) {
873 InitGammaTablesF();
874 if (!PreprocessARGB(r_ptr, g_ptr, b_ptr, step, rgb_stride, picture)) {
875 return 0;
877 if (has_alpha) {
878 WebPExtractAlpha(a_ptr, rgb_stride, width, height,
879 picture->a, picture->a_stride);
881 } else {
882 uint8_t* dst_y = picture->y;
883 uint8_t* dst_u = picture->u;
884 uint8_t* dst_v = picture->v;
885 uint8_t* dst_a = picture->a;
887 VP8Random base_rg;
888 VP8Random* rg = NULL;
889 if (dithering > 0.) {
890 VP8InitRandom(&base_rg, dithering);
891 rg = &base_rg;
894 InitGammaTables();
896 // Downsample Y/U/V planes, two rows at a time
897 for (y = 0; y < (height >> 1); ++y) {
898 int rows_have_alpha = has_alpha;
899 const int off1 = (2 * y + 0) * rgb_stride;
900 const int off2 = (2 * y + 1) * rgb_stride;
901 ConvertRowToY(r_ptr + off1, g_ptr + off1, b_ptr + off1, step,
902 dst_y, width, rg);
903 ConvertRowToY(r_ptr + off2, g_ptr + off2, b_ptr + off2, step,
904 dst_y + picture->y_stride, width, rg);
905 dst_y += 2 * picture->y_stride;
906 if (has_alpha) {
907 rows_have_alpha &= !WebPExtractAlpha(a_ptr + off1, rgb_stride,
908 width, 2,
909 dst_a, picture->a_stride);
910 dst_a += 2 * picture->a_stride;
912 if (!rows_have_alpha) {
913 ConvertRowsToUV(r_ptr + off1, g_ptr + off1, b_ptr + off1,
914 step, rgb_stride, dst_u, dst_v, width, rg);
915 } else {
916 ConvertRowsToUVWithAlpha(r_ptr + off1, g_ptr + off1, b_ptr + off1,
917 a_ptr + off1, rgb_stride,
918 dst_u, dst_v, width, rg);
920 dst_u += picture->uv_stride;
921 dst_v += picture->uv_stride;
923 if (height & 1) { // extra last row
924 const int off = 2 * y * rgb_stride;
925 int row_has_alpha = has_alpha;
926 ConvertRowToY(r_ptr + off, g_ptr + off, b_ptr + off, step,
927 dst_y, width, rg);
928 if (row_has_alpha) {
929 row_has_alpha &= !WebPExtractAlpha(a_ptr + off, 0, width, 1, dst_a, 0);
931 if (!row_has_alpha) {
932 ConvertRowsToUV(r_ptr + off, g_ptr + off, b_ptr + off,
933 step, 0, dst_u, dst_v, width, rg);
934 } else {
935 ConvertRowsToUVWithAlpha(r_ptr + off, g_ptr + off, b_ptr + off,
936 a_ptr + off, 0,
937 dst_u, dst_v, width, rg);
941 return 1;
944 #undef SUM4
945 #undef SUM2
946 #undef SUM4ALPHA
947 #undef SUM2ALPHA
949 //------------------------------------------------------------------------------
950 // call for ARGB->YUVA conversion
952 static int PictureARGBToYUVA(WebPPicture* picture, WebPEncCSP colorspace,
953 float dithering, int use_iterative_conversion) {
954 if (picture == NULL) return 0;
955 if (picture->argb == NULL) {
956 return WebPEncodingSetError(picture, VP8_ENC_ERROR_NULL_PARAMETER);
957 } else if ((colorspace & WEBP_CSP_UV_MASK) != WEBP_YUV420) {
958 return WebPEncodingSetError(picture, VP8_ENC_ERROR_INVALID_CONFIGURATION);
959 } else {
960 const uint8_t* const argb = (const uint8_t*)picture->argb;
961 const uint8_t* const r = ALPHA_IS_LAST ? argb + 2 : argb + 1;
962 const uint8_t* const g = ALPHA_IS_LAST ? argb + 1 : argb + 2;
963 const uint8_t* const b = ALPHA_IS_LAST ? argb + 0 : argb + 3;
964 const uint8_t* const a = ALPHA_IS_LAST ? argb + 3 : argb + 0;
966 picture->colorspace = WEBP_YUV420;
967 return ImportYUVAFromRGBA(r, g, b, a, 4, 4 * picture->argb_stride,
968 dithering, use_iterative_conversion, picture);
972 int WebPPictureARGBToYUVADithered(WebPPicture* picture, WebPEncCSP colorspace,
973 float dithering) {
974 return PictureARGBToYUVA(picture, colorspace, dithering, 0);
977 int WebPPictureARGBToYUVA(WebPPicture* picture, WebPEncCSP colorspace) {
978 return PictureARGBToYUVA(picture, colorspace, 0.f, 0);
981 #if WEBP_ENCODER_ABI_VERSION > 0x0204
982 int WebPPictureSmartARGBToYUVA(WebPPicture* picture) {
983 return PictureARGBToYUVA(picture, WEBP_YUV420, 0.f, 1);
985 #endif
987 //------------------------------------------------------------------------------
988 // call for YUVA -> ARGB conversion
990 int WebPPictureYUVAToARGB(WebPPicture* picture) {
991 if (picture == NULL) return 0;
992 if (picture->y == NULL || picture->u == NULL || picture->v == NULL) {
993 return WebPEncodingSetError(picture, VP8_ENC_ERROR_NULL_PARAMETER);
995 if ((picture->colorspace & WEBP_CSP_ALPHA_BIT) && picture->a == NULL) {
996 return WebPEncodingSetError(picture, VP8_ENC_ERROR_NULL_PARAMETER);
998 if ((picture->colorspace & WEBP_CSP_UV_MASK) != WEBP_YUV420) {
999 return WebPEncodingSetError(picture, VP8_ENC_ERROR_INVALID_CONFIGURATION);
1001 // Allocate a new argb buffer (discarding the previous one).
1002 if (!WebPPictureAllocARGB(picture, picture->width, picture->height)) return 0;
1003 picture->use_argb = 1;
1005 // Convert
1007 int y;
1008 const int width = picture->width;
1009 const int height = picture->height;
1010 const int argb_stride = 4 * picture->argb_stride;
1011 uint8_t* dst = (uint8_t*)picture->argb;
1012 const uint8_t *cur_u = picture->u, *cur_v = picture->v, *cur_y = picture->y;
1013 WebPUpsampleLinePairFunc upsample = WebPGetLinePairConverter(ALPHA_IS_LAST);
1015 // First row, with replicated top samples.
1016 upsample(cur_y, NULL, cur_u, cur_v, cur_u, cur_v, dst, NULL, width);
1017 cur_y += picture->y_stride;
1018 dst += argb_stride;
1019 // Center rows.
1020 for (y = 1; y + 1 < height; y += 2) {
1021 const uint8_t* const top_u = cur_u;
1022 const uint8_t* const top_v = cur_v;
1023 cur_u += picture->uv_stride;
1024 cur_v += picture->uv_stride;
1025 upsample(cur_y, cur_y + picture->y_stride, top_u, top_v, cur_u, cur_v,
1026 dst, dst + argb_stride, width);
1027 cur_y += 2 * picture->y_stride;
1028 dst += 2 * argb_stride;
1030 // Last row (if needed), with replicated bottom samples.
1031 if (height > 1 && !(height & 1)) {
1032 upsample(cur_y, NULL, cur_u, cur_v, cur_u, cur_v, dst, NULL, width);
1034 // Insert alpha values if needed, in replacement for the default 0xff ones.
1035 if (picture->colorspace & WEBP_CSP_ALPHA_BIT) {
1036 for (y = 0; y < height; ++y) {
1037 uint32_t* const argb_dst = picture->argb + y * picture->argb_stride;
1038 const uint8_t* const src = picture->a + y * picture->a_stride;
1039 int x;
1040 for (x = 0; x < width; ++x) {
1041 argb_dst[x] = (argb_dst[x] & 0x00ffffffu) | ((uint32_t)src[x] << 24);
1046 return 1;
1049 //------------------------------------------------------------------------------
1050 // automatic import / conversion
1052 static int Import(WebPPicture* const picture,
1053 const uint8_t* const rgb, int rgb_stride,
1054 int step, int swap_rb, int import_alpha) {
1055 int y;
1056 const uint8_t* const r_ptr = rgb + (swap_rb ? 2 : 0);
1057 const uint8_t* const g_ptr = rgb + 1;
1058 const uint8_t* const b_ptr = rgb + (swap_rb ? 0 : 2);
1059 const uint8_t* const a_ptr = import_alpha ? rgb + 3 : NULL;
1060 const int width = picture->width;
1061 const int height = picture->height;
1063 if (!picture->use_argb) {
1064 return ImportYUVAFromRGBA(r_ptr, g_ptr, b_ptr, a_ptr, step, rgb_stride,
1065 0.f /* no dithering */, 0, picture);
1067 if (!WebPPictureAlloc(picture)) return 0;
1069 assert(step >= (import_alpha ? 4 : 3));
1070 for (y = 0; y < height; ++y) {
1071 uint32_t* const dst = &picture->argb[y * picture->argb_stride];
1072 int x;
1073 for (x = 0; x < width; ++x) {
1074 const int offset = step * x + y * rgb_stride;
1075 dst[x] = MakeARGB32(import_alpha ? a_ptr[offset] : 0xff,
1076 r_ptr[offset], g_ptr[offset], b_ptr[offset]);
1079 return 1;
1082 // Public API
1084 int WebPPictureImportRGB(WebPPicture* picture,
1085 const uint8_t* rgb, int rgb_stride) {
1086 return (picture != NULL) ? Import(picture, rgb, rgb_stride, 3, 0, 0) : 0;
1089 int WebPPictureImportBGR(WebPPicture* picture,
1090 const uint8_t* rgb, int rgb_stride) {
1091 return (picture != NULL) ? Import(picture, rgb, rgb_stride, 3, 1, 0) : 0;
1094 int WebPPictureImportRGBA(WebPPicture* picture,
1095 const uint8_t* rgba, int rgba_stride) {
1096 return (picture != NULL) ? Import(picture, rgba, rgba_stride, 4, 0, 1) : 0;
1099 int WebPPictureImportBGRA(WebPPicture* picture,
1100 const uint8_t* rgba, int rgba_stride) {
1101 return (picture != NULL) ? Import(picture, rgba, rgba_stride, 4, 1, 1) : 0;
1104 int WebPPictureImportRGBX(WebPPicture* picture,
1105 const uint8_t* rgba, int rgba_stride) {
1106 return (picture != NULL) ? Import(picture, rgba, rgba_stride, 4, 0, 0) : 0;
1109 int WebPPictureImportBGRX(WebPPicture* picture,
1110 const uint8_t* rgba, int rgba_stride) {
1111 return (picture != NULL) ? Import(picture, rgba, rgba_stride, 4, 1, 0) : 0;
1114 //------------------------------------------------------------------------------