Update metrics verification for dev-proxy.
[chromium-blink-merge.git] / third_party / libwebp / dec / vp8l.c
blob81cf99fcac9f261da6e19c616e83cb177c2c9d62
1 // Copyright 2012 Google Inc. All Rights Reserved.
2 //
3 // Use of this source code is governed by a BSD-style license
4 // that can be found in the COPYING file in the root of the source
5 // tree. An additional intellectual property rights grant can be found
6 // in the file PATENTS. All contributing project authors may
7 // be found in the AUTHORS file in the root of the source tree.
8 // -----------------------------------------------------------------------------
9 //
10 // main entry for the decoder
12 // Authors: Vikas Arora (vikaas.arora@gmail.com)
13 // Jyrki Alakuijala (jyrki@google.com)
15 #include <stdlib.h>
17 #include "./alphai.h"
18 #include "./vp8li.h"
19 #include "../dsp/dsp.h"
20 #include "../dsp/lossless.h"
21 #include "../dsp/yuv.h"
22 #include "../utils/huffman.h"
23 #include "../utils/utils.h"
25 #define NUM_ARGB_CACHE_ROWS 16
27 static const int kCodeLengthLiterals = 16;
28 static const int kCodeLengthRepeatCode = 16;
29 static const int kCodeLengthExtraBits[3] = { 2, 3, 7 };
30 static const int kCodeLengthRepeatOffsets[3] = { 3, 3, 11 };
32 // -----------------------------------------------------------------------------
33 // Five Huffman codes are used at each meta code:
34 // 1. green + length prefix codes + color cache codes,
35 // 2. alpha,
36 // 3. red,
37 // 4. blue, and,
38 // 5. distance prefix codes.
39 typedef enum {
40 GREEN = 0,
41 RED = 1,
42 BLUE = 2,
43 ALPHA = 3,
44 DIST = 4
45 } HuffIndex;
47 static const uint16_t kAlphabetSize[HUFFMAN_CODES_PER_META_CODE] = {
48 NUM_LITERAL_CODES + NUM_LENGTH_CODES,
49 NUM_LITERAL_CODES, NUM_LITERAL_CODES, NUM_LITERAL_CODES,
50 NUM_DISTANCE_CODES
54 #define NUM_CODE_LENGTH_CODES 19
55 static const uint8_t kCodeLengthCodeOrder[NUM_CODE_LENGTH_CODES] = {
56 17, 18, 0, 1, 2, 3, 4, 5, 16, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
59 #define CODE_TO_PLANE_CODES 120
60 static const uint8_t kCodeToPlane[CODE_TO_PLANE_CODES] = {
61 0x18, 0x07, 0x17, 0x19, 0x28, 0x06, 0x27, 0x29, 0x16, 0x1a,
62 0x26, 0x2a, 0x38, 0x05, 0x37, 0x39, 0x15, 0x1b, 0x36, 0x3a,
63 0x25, 0x2b, 0x48, 0x04, 0x47, 0x49, 0x14, 0x1c, 0x35, 0x3b,
64 0x46, 0x4a, 0x24, 0x2c, 0x58, 0x45, 0x4b, 0x34, 0x3c, 0x03,
65 0x57, 0x59, 0x13, 0x1d, 0x56, 0x5a, 0x23, 0x2d, 0x44, 0x4c,
66 0x55, 0x5b, 0x33, 0x3d, 0x68, 0x02, 0x67, 0x69, 0x12, 0x1e,
67 0x66, 0x6a, 0x22, 0x2e, 0x54, 0x5c, 0x43, 0x4d, 0x65, 0x6b,
68 0x32, 0x3e, 0x78, 0x01, 0x77, 0x79, 0x53, 0x5d, 0x11, 0x1f,
69 0x64, 0x6c, 0x42, 0x4e, 0x76, 0x7a, 0x21, 0x2f, 0x75, 0x7b,
70 0x31, 0x3f, 0x63, 0x6d, 0x52, 0x5e, 0x00, 0x74, 0x7c, 0x41,
71 0x4f, 0x10, 0x20, 0x62, 0x6e, 0x30, 0x73, 0x7d, 0x51, 0x5f,
72 0x40, 0x72, 0x7e, 0x61, 0x6f, 0x50, 0x71, 0x7f, 0x60, 0x70
75 static int DecodeImageStream(int xsize, int ysize,
76 int is_level0,
77 VP8LDecoder* const dec,
78 uint32_t** const decoded_data);
80 //------------------------------------------------------------------------------
82 int VP8LCheckSignature(const uint8_t* const data, size_t size) {
83 return (size >= VP8L_FRAME_HEADER_SIZE &&
84 data[0] == VP8L_MAGIC_BYTE &&
85 (data[4] >> 5) == 0); // version
88 static int ReadImageInfo(VP8LBitReader* const br,
89 int* const width, int* const height,
90 int* const has_alpha) {
91 if (VP8LReadBits(br, 8) != VP8L_MAGIC_BYTE) return 0;
92 *width = VP8LReadBits(br, VP8L_IMAGE_SIZE_BITS) + 1;
93 *height = VP8LReadBits(br, VP8L_IMAGE_SIZE_BITS) + 1;
94 *has_alpha = VP8LReadBits(br, 1);
95 if (VP8LReadBits(br, VP8L_VERSION_BITS) != 0) return 0;
96 return 1;
99 int VP8LGetInfo(const uint8_t* data, size_t data_size,
100 int* const width, int* const height, int* const has_alpha) {
101 if (data == NULL || data_size < VP8L_FRAME_HEADER_SIZE) {
102 return 0; // not enough data
103 } else if (!VP8LCheckSignature(data, data_size)) {
104 return 0; // bad signature
105 } else {
106 int w, h, a;
107 VP8LBitReader br;
108 VP8LInitBitReader(&br, data, data_size);
109 if (!ReadImageInfo(&br, &w, &h, &a)) {
110 return 0;
112 if (width != NULL) *width = w;
113 if (height != NULL) *height = h;
114 if (has_alpha != NULL) *has_alpha = a;
115 return 1;
119 //------------------------------------------------------------------------------
121 static WEBP_INLINE int GetCopyDistance(int distance_symbol,
122 VP8LBitReader* const br) {
123 int extra_bits, offset;
124 if (distance_symbol < 4) {
125 return distance_symbol + 1;
127 extra_bits = (distance_symbol - 2) >> 1;
128 offset = (2 + (distance_symbol & 1)) << extra_bits;
129 return offset + VP8LReadBits(br, extra_bits) + 1;
132 static WEBP_INLINE int GetCopyLength(int length_symbol,
133 VP8LBitReader* const br) {
134 // Length and distance prefixes are encoded the same way.
135 return GetCopyDistance(length_symbol, br);
138 static WEBP_INLINE int PlaneCodeToDistance(int xsize, int plane_code) {
139 if (plane_code > CODE_TO_PLANE_CODES) {
140 return plane_code - CODE_TO_PLANE_CODES;
141 } else {
142 const int dist_code = kCodeToPlane[plane_code - 1];
143 const int yoffset = dist_code >> 4;
144 const int xoffset = 8 - (dist_code & 0xf);
145 const int dist = yoffset * xsize + xoffset;
146 return (dist >= 1) ? dist : 1; // dist<1 can happen if xsize is very small
150 //------------------------------------------------------------------------------
151 // Decodes the next Huffman code from bit-stream.
152 // FillBitWindow(br) needs to be called at minimum every second call
153 // to ReadSymbol, in order to pre-fetch enough bits.
154 static WEBP_INLINE int ReadSymbol(const HuffmanTree* tree,
155 VP8LBitReader* const br) {
156 const HuffmanTreeNode* node = tree->root_;
157 uint32_t bits = VP8LPrefetchBits(br);
158 int bitpos = br->bit_pos_;
159 // Check if we find the bit combination from the Huffman lookup table.
160 const int lut_ix = bits & (HUFF_LUT - 1);
161 const int lut_bits = tree->lut_bits_[lut_ix];
162 if (lut_bits <= HUFF_LUT_BITS) {
163 VP8LSetBitPos(br, bitpos + lut_bits);
164 return tree->lut_symbol_[lut_ix];
166 node += tree->lut_jump_[lut_ix];
167 bitpos += HUFF_LUT_BITS;
168 bits >>= HUFF_LUT_BITS;
170 // Decode the value from a binary tree.
171 assert(node != NULL);
172 do {
173 node = HuffmanTreeNextNode(node, bits & 1);
174 bits >>= 1;
175 ++bitpos;
176 } while (HuffmanTreeNodeIsNotLeaf(node));
177 VP8LSetBitPos(br, bitpos);
178 return node->symbol_;
181 static int ReadHuffmanCodeLengths(
182 VP8LDecoder* const dec, const int* const code_length_code_lengths,
183 int num_symbols, int* const code_lengths) {
184 int ok = 0;
185 VP8LBitReader* const br = &dec->br_;
186 int symbol;
187 int max_symbol;
188 int prev_code_len = DEFAULT_CODE_LENGTH;
189 HuffmanTree tree;
190 int huff_codes[NUM_CODE_LENGTH_CODES] = { 0 };
192 if (!VP8LHuffmanTreeBuildImplicit(&tree, code_length_code_lengths,
193 huff_codes, NUM_CODE_LENGTH_CODES)) {
194 dec->status_ = VP8_STATUS_BITSTREAM_ERROR;
195 return 0;
198 if (VP8LReadBits(br, 1)) { // use length
199 const int length_nbits = 2 + 2 * VP8LReadBits(br, 3);
200 max_symbol = 2 + VP8LReadBits(br, length_nbits);
201 if (max_symbol > num_symbols) {
202 dec->status_ = VP8_STATUS_BITSTREAM_ERROR;
203 goto End;
205 } else {
206 max_symbol = num_symbols;
209 symbol = 0;
210 while (symbol < num_symbols) {
211 int code_len;
212 if (max_symbol-- == 0) break;
213 VP8LFillBitWindow(br);
214 code_len = ReadSymbol(&tree, br);
215 if (code_len < kCodeLengthLiterals) {
216 code_lengths[symbol++] = code_len;
217 if (code_len != 0) prev_code_len = code_len;
218 } else {
219 const int use_prev = (code_len == kCodeLengthRepeatCode);
220 const int slot = code_len - kCodeLengthLiterals;
221 const int extra_bits = kCodeLengthExtraBits[slot];
222 const int repeat_offset = kCodeLengthRepeatOffsets[slot];
223 int repeat = VP8LReadBits(br, extra_bits) + repeat_offset;
224 if (symbol + repeat > num_symbols) {
225 dec->status_ = VP8_STATUS_BITSTREAM_ERROR;
226 goto End;
227 } else {
228 const int length = use_prev ? prev_code_len : 0;
229 while (repeat-- > 0) code_lengths[symbol++] = length;
233 ok = 1;
235 End:
236 VP8LHuffmanTreeFree(&tree);
237 return ok;
240 // 'code_lengths' is pre-allocated temporary buffer, used for creating Huffman
241 // tree.
242 static int ReadHuffmanCode(int alphabet_size, VP8LDecoder* const dec,
243 int* const code_lengths, int* const huff_codes,
244 HuffmanTree* const tree) {
245 int ok = 0;
246 VP8LBitReader* const br = &dec->br_;
247 const int simple_code = VP8LReadBits(br, 1);
249 if (simple_code) { // Read symbols, codes & code lengths directly.
250 int symbols[2];
251 int codes[2];
252 const int num_symbols = VP8LReadBits(br, 1) + 1;
253 const int first_symbol_len_code = VP8LReadBits(br, 1);
254 // The first code is either 1 bit or 8 bit code.
255 symbols[0] = VP8LReadBits(br, (first_symbol_len_code == 0) ? 1 : 8);
256 codes[0] = 0;
257 code_lengths[0] = num_symbols - 1;
258 // The second code (if present), is always 8 bit long.
259 if (num_symbols == 2) {
260 symbols[1] = VP8LReadBits(br, 8);
261 codes[1] = 1;
262 code_lengths[1] = num_symbols - 1;
264 ok = VP8LHuffmanTreeBuildExplicit(tree, code_lengths, codes, symbols,
265 alphabet_size, num_symbols);
266 } else { // Decode Huffman-coded code lengths.
267 int i;
268 int code_length_code_lengths[NUM_CODE_LENGTH_CODES] = { 0 };
269 const int num_codes = VP8LReadBits(br, 4) + 4;
270 if (num_codes > NUM_CODE_LENGTH_CODES) {
271 dec->status_ = VP8_STATUS_BITSTREAM_ERROR;
272 return 0;
275 memset(code_lengths, 0, alphabet_size * sizeof(*code_lengths));
277 for (i = 0; i < num_codes; ++i) {
278 code_length_code_lengths[kCodeLengthCodeOrder[i]] = VP8LReadBits(br, 3);
280 ok = ReadHuffmanCodeLengths(dec, code_length_code_lengths, alphabet_size,
281 code_lengths);
282 ok = ok && VP8LHuffmanTreeBuildImplicit(tree, code_lengths, huff_codes,
283 alphabet_size);
285 ok = ok && !br->error_;
286 if (!ok) {
287 dec->status_ = VP8_STATUS_BITSTREAM_ERROR;
288 return 0;
290 return 1;
293 static int ReadHuffmanCodes(VP8LDecoder* const dec, int xsize, int ysize,
294 int color_cache_bits, int allow_recursion) {
295 int i, j;
296 VP8LBitReader* const br = &dec->br_;
297 VP8LMetadata* const hdr = &dec->hdr_;
298 uint32_t* huffman_image = NULL;
299 HTreeGroup* htree_groups = NULL;
300 int num_htree_groups = 1;
301 int max_alphabet_size = 0;
302 int* code_lengths = NULL;
303 int* huff_codes = NULL;
305 if (allow_recursion && VP8LReadBits(br, 1)) {
306 // use meta Huffman codes.
307 const int huffman_precision = VP8LReadBits(br, 3) + 2;
308 const int huffman_xsize = VP8LSubSampleSize(xsize, huffman_precision);
309 const int huffman_ysize = VP8LSubSampleSize(ysize, huffman_precision);
310 const int huffman_pixs = huffman_xsize * huffman_ysize;
311 if (!DecodeImageStream(huffman_xsize, huffman_ysize, 0, dec,
312 &huffman_image)) {
313 dec->status_ = VP8_STATUS_BITSTREAM_ERROR;
314 goto Error;
316 hdr->huffman_subsample_bits_ = huffman_precision;
317 for (i = 0; i < huffman_pixs; ++i) {
318 // The huffman data is stored in red and green bytes.
319 const int group = (huffman_image[i] >> 8) & 0xffff;
320 huffman_image[i] = group;
321 if (group >= num_htree_groups) {
322 num_htree_groups = group + 1;
327 if (br->error_) goto Error;
329 // Find maximum alphabet size for the htree group.
330 for (j = 0; j < HUFFMAN_CODES_PER_META_CODE; ++j) {
331 int alphabet_size = kAlphabetSize[j];
332 if (j == 0 && color_cache_bits > 0) {
333 alphabet_size += 1 << color_cache_bits;
335 if (max_alphabet_size < alphabet_size) {
336 max_alphabet_size = alphabet_size;
340 htree_groups = VP8LHtreeGroupsNew(num_htree_groups);
341 code_lengths =
342 (int*)WebPSafeCalloc((uint64_t)max_alphabet_size, sizeof(*code_lengths));
343 huff_codes =
344 (int*)WebPSafeMalloc((uint64_t)max_alphabet_size, sizeof(*huff_codes));
346 if (htree_groups == NULL || code_lengths == NULL || huff_codes == NULL) {
347 dec->status_ = VP8_STATUS_OUT_OF_MEMORY;
348 goto Error;
351 for (i = 0; i < num_htree_groups; ++i) {
352 HuffmanTree* const htrees = htree_groups[i].htrees_;
353 for (j = 0; j < HUFFMAN_CODES_PER_META_CODE; ++j) {
354 int alphabet_size = kAlphabetSize[j];
355 HuffmanTree* const htree = htrees + j;
356 if (j == 0 && color_cache_bits > 0) {
357 alphabet_size += 1 << color_cache_bits;
359 if (!ReadHuffmanCode(alphabet_size, dec, code_lengths, huff_codes,
360 htree)) {
361 goto Error;
365 WebPSafeFree(huff_codes);
366 WebPSafeFree(code_lengths);
368 // All OK. Finalize pointers and return.
369 hdr->huffman_image_ = huffman_image;
370 hdr->num_htree_groups_ = num_htree_groups;
371 hdr->htree_groups_ = htree_groups;
372 return 1;
374 Error:
375 WebPSafeFree(huff_codes);
376 WebPSafeFree(code_lengths);
377 WebPSafeFree(huffman_image);
378 VP8LHtreeGroupsFree(htree_groups, num_htree_groups);
379 return 0;
382 //------------------------------------------------------------------------------
383 // Scaling.
385 static int AllocateAndInitRescaler(VP8LDecoder* const dec, VP8Io* const io) {
386 const int num_channels = 4;
387 const int in_width = io->mb_w;
388 const int out_width = io->scaled_width;
389 const int in_height = io->mb_h;
390 const int out_height = io->scaled_height;
391 const uint64_t work_size = 2 * num_channels * (uint64_t)out_width;
392 int32_t* work; // Rescaler work area.
393 const uint64_t scaled_data_size = num_channels * (uint64_t)out_width;
394 uint32_t* scaled_data; // Temporary storage for scaled BGRA data.
395 const uint64_t memory_size = sizeof(*dec->rescaler) +
396 work_size * sizeof(*work) +
397 scaled_data_size * sizeof(*scaled_data);
398 uint8_t* memory = (uint8_t*)WebPSafeCalloc(memory_size, sizeof(*memory));
399 if (memory == NULL) {
400 dec->status_ = VP8_STATUS_OUT_OF_MEMORY;
401 return 0;
403 assert(dec->rescaler_memory == NULL);
404 dec->rescaler_memory = memory;
406 dec->rescaler = (WebPRescaler*)memory;
407 memory += sizeof(*dec->rescaler);
408 work = (int32_t*)memory;
409 memory += work_size * sizeof(*work);
410 scaled_data = (uint32_t*)memory;
412 WebPRescalerInit(dec->rescaler, in_width, in_height, (uint8_t*)scaled_data,
413 out_width, out_height, 0, num_channels,
414 in_width, out_width, in_height, out_height, work);
415 return 1;
418 //------------------------------------------------------------------------------
419 // Export to ARGB
421 // We have special "export" function since we need to convert from BGRA
422 static int Export(WebPRescaler* const rescaler, WEBP_CSP_MODE colorspace,
423 int rgba_stride, uint8_t* const rgba) {
424 uint32_t* const src = (uint32_t*)rescaler->dst;
425 const int dst_width = rescaler->dst_width;
426 int num_lines_out = 0;
427 while (WebPRescalerHasPendingOutput(rescaler)) {
428 uint8_t* const dst = rgba + num_lines_out * rgba_stride;
429 WebPRescalerExportRow(rescaler, 0);
430 WebPMultARGBRow(src, dst_width, 1);
431 VP8LConvertFromBGRA(src, dst_width, colorspace, dst);
432 ++num_lines_out;
434 return num_lines_out;
437 // Emit scaled rows.
438 static int EmitRescaledRowsRGBA(const VP8LDecoder* const dec,
439 uint8_t* in, int in_stride, int mb_h,
440 uint8_t* const out, int out_stride) {
441 const WEBP_CSP_MODE colorspace = dec->output_->colorspace;
442 int num_lines_in = 0;
443 int num_lines_out = 0;
444 while (num_lines_in < mb_h) {
445 uint8_t* const row_in = in + num_lines_in * in_stride;
446 uint8_t* const row_out = out + num_lines_out * out_stride;
447 const int lines_left = mb_h - num_lines_in;
448 const int needed_lines = WebPRescaleNeededLines(dec->rescaler, lines_left);
449 assert(needed_lines > 0 && needed_lines <= lines_left);
450 WebPMultARGBRows(row_in, in_stride,
451 dec->rescaler->src_width, needed_lines, 0);
452 WebPRescalerImport(dec->rescaler, lines_left, row_in, in_stride);
453 num_lines_in += needed_lines;
454 num_lines_out += Export(dec->rescaler, colorspace, out_stride, row_out);
456 return num_lines_out;
459 // Emit rows without any scaling.
460 static int EmitRows(WEBP_CSP_MODE colorspace,
461 const uint8_t* row_in, int in_stride,
462 int mb_w, int mb_h,
463 uint8_t* const out, int out_stride) {
464 int lines = mb_h;
465 uint8_t* row_out = out;
466 while (lines-- > 0) {
467 VP8LConvertFromBGRA((const uint32_t*)row_in, mb_w, colorspace, row_out);
468 row_in += in_stride;
469 row_out += out_stride;
471 return mb_h; // Num rows out == num rows in.
474 //------------------------------------------------------------------------------
475 // Export to YUVA
477 // TODO(skal): should be in yuv.c
478 static void ConvertToYUVA(const uint32_t* const src, int width, int y_pos,
479 const WebPDecBuffer* const output) {
480 const WebPYUVABuffer* const buf = &output->u.YUVA;
481 // first, the luma plane
483 int i;
484 uint8_t* const y = buf->y + y_pos * buf->y_stride;
485 for (i = 0; i < width; ++i) {
486 const uint32_t p = src[i];
487 y[i] = VP8RGBToY((p >> 16) & 0xff, (p >> 8) & 0xff, (p >> 0) & 0xff,
488 YUV_HALF);
492 // then U/V planes
494 uint8_t* const u = buf->u + (y_pos >> 1) * buf->u_stride;
495 uint8_t* const v = buf->v + (y_pos >> 1) * buf->v_stride;
496 const int uv_width = width >> 1;
497 int i;
498 for (i = 0; i < uv_width; ++i) {
499 const uint32_t v0 = src[2 * i + 0];
500 const uint32_t v1 = src[2 * i + 1];
501 // VP8RGBToU/V expects four accumulated pixels. Hence we need to
502 // scale r/g/b value by a factor 2. We just shift v0/v1 one bit less.
503 const int r = ((v0 >> 15) & 0x1fe) + ((v1 >> 15) & 0x1fe);
504 const int g = ((v0 >> 7) & 0x1fe) + ((v1 >> 7) & 0x1fe);
505 const int b = ((v0 << 1) & 0x1fe) + ((v1 << 1) & 0x1fe);
506 if (!(y_pos & 1)) { // even lines: store values
507 u[i] = VP8RGBToU(r, g, b, YUV_HALF << 2);
508 v[i] = VP8RGBToV(r, g, b, YUV_HALF << 2);
509 } else { // odd lines: average with previous values
510 const int tmp_u = VP8RGBToU(r, g, b, YUV_HALF << 2);
511 const int tmp_v = VP8RGBToV(r, g, b, YUV_HALF << 2);
512 // Approximated average-of-four. But it's an acceptable diff.
513 u[i] = (u[i] + tmp_u + 1) >> 1;
514 v[i] = (v[i] + tmp_v + 1) >> 1;
517 if (width & 1) { // last pixel
518 const uint32_t v0 = src[2 * i + 0];
519 const int r = (v0 >> 14) & 0x3fc;
520 const int g = (v0 >> 6) & 0x3fc;
521 const int b = (v0 << 2) & 0x3fc;
522 if (!(y_pos & 1)) { // even lines
523 u[i] = VP8RGBToU(r, g, b, YUV_HALF << 2);
524 v[i] = VP8RGBToV(r, g, b, YUV_HALF << 2);
525 } else { // odd lines (note: we could just skip this)
526 const int tmp_u = VP8RGBToU(r, g, b, YUV_HALF << 2);
527 const int tmp_v = VP8RGBToV(r, g, b, YUV_HALF << 2);
528 u[i] = (u[i] + tmp_u + 1) >> 1;
529 v[i] = (v[i] + tmp_v + 1) >> 1;
533 // Lastly, store alpha if needed.
534 if (buf->a != NULL) {
535 int i;
536 uint8_t* const a = buf->a + y_pos * buf->a_stride;
537 for (i = 0; i < width; ++i) a[i] = (src[i] >> 24);
541 static int ExportYUVA(const VP8LDecoder* const dec, int y_pos) {
542 WebPRescaler* const rescaler = dec->rescaler;
543 uint32_t* const src = (uint32_t*)rescaler->dst;
544 const int dst_width = rescaler->dst_width;
545 int num_lines_out = 0;
546 while (WebPRescalerHasPendingOutput(rescaler)) {
547 WebPRescalerExportRow(rescaler, 0);
548 WebPMultARGBRow(src, dst_width, 1);
549 ConvertToYUVA(src, dst_width, y_pos, dec->output_);
550 ++y_pos;
551 ++num_lines_out;
553 return num_lines_out;
556 static int EmitRescaledRowsYUVA(const VP8LDecoder* const dec,
557 uint8_t* in, int in_stride, int mb_h) {
558 int num_lines_in = 0;
559 int y_pos = dec->last_out_row_;
560 while (num_lines_in < mb_h) {
561 const int lines_left = mb_h - num_lines_in;
562 const int needed_lines = WebPRescaleNeededLines(dec->rescaler, lines_left);
563 WebPMultARGBRows(in, in_stride, dec->rescaler->src_width, needed_lines, 0);
564 WebPRescalerImport(dec->rescaler, lines_left, in, in_stride);
565 num_lines_in += needed_lines;
566 in += needed_lines * in_stride;
567 y_pos += ExportYUVA(dec, y_pos);
569 return y_pos;
572 static int EmitRowsYUVA(const VP8LDecoder* const dec,
573 const uint8_t* in, int in_stride,
574 int mb_w, int num_rows) {
575 int y_pos = dec->last_out_row_;
576 while (num_rows-- > 0) {
577 ConvertToYUVA((const uint32_t*)in, mb_w, y_pos, dec->output_);
578 in += in_stride;
579 ++y_pos;
581 return y_pos;
584 //------------------------------------------------------------------------------
585 // Cropping.
587 // Sets io->mb_y, io->mb_h & io->mb_w according to start row, end row and
588 // crop options. Also updates the input data pointer, so that it points to the
589 // start of the cropped window. Note that pixels are in ARGB format even if
590 // 'in_data' is uint8_t*.
591 // Returns true if the crop window is not empty.
592 static int SetCropWindow(VP8Io* const io, int y_start, int y_end,
593 uint8_t** const in_data, int pixel_stride) {
594 assert(y_start < y_end);
595 assert(io->crop_left < io->crop_right);
596 if (y_end > io->crop_bottom) {
597 y_end = io->crop_bottom; // make sure we don't overflow on last row.
599 if (y_start < io->crop_top) {
600 const int delta = io->crop_top - y_start;
601 y_start = io->crop_top;
602 *in_data += delta * pixel_stride;
604 if (y_start >= y_end) return 0; // Crop window is empty.
606 *in_data += io->crop_left * sizeof(uint32_t);
608 io->mb_y = y_start - io->crop_top;
609 io->mb_w = io->crop_right - io->crop_left;
610 io->mb_h = y_end - y_start;
611 return 1; // Non-empty crop window.
614 //------------------------------------------------------------------------------
616 static WEBP_INLINE int GetMetaIndex(
617 const uint32_t* const image, int xsize, int bits, int x, int y) {
618 if (bits == 0) return 0;
619 return image[xsize * (y >> bits) + (x >> bits)];
622 static WEBP_INLINE HTreeGroup* GetHtreeGroupForPos(VP8LMetadata* const hdr,
623 int x, int y) {
624 const int meta_index = GetMetaIndex(hdr->huffman_image_, hdr->huffman_xsize_,
625 hdr->huffman_subsample_bits_, x, y);
626 assert(meta_index < hdr->num_htree_groups_);
627 return hdr->htree_groups_ + meta_index;
630 //------------------------------------------------------------------------------
631 // Main loop, with custom row-processing function
633 typedef void (*ProcessRowsFunc)(VP8LDecoder* const dec, int row);
635 static void ApplyInverseTransforms(VP8LDecoder* const dec, int num_rows,
636 const uint32_t* const rows) {
637 int n = dec->next_transform_;
638 const int cache_pixs = dec->width_ * num_rows;
639 const int start_row = dec->last_row_;
640 const int end_row = start_row + num_rows;
641 const uint32_t* rows_in = rows;
642 uint32_t* const rows_out = dec->argb_cache_;
644 // Inverse transforms.
645 // TODO: most transforms only need to operate on the cropped region only.
646 memcpy(rows_out, rows_in, cache_pixs * sizeof(*rows_out));
647 while (n-- > 0) {
648 VP8LTransform* const transform = &dec->transforms_[n];
649 VP8LInverseTransform(transform, start_row, end_row, rows_in, rows_out);
650 rows_in = rows_out;
654 // Special method for paletted alpha data.
655 static void ApplyInverseTransformsAlpha(VP8LDecoder* const dec, int num_rows,
656 const uint8_t* const rows) {
657 const int start_row = dec->last_row_;
658 const int end_row = start_row + num_rows;
659 const uint8_t* rows_in = rows;
660 uint8_t* rows_out = (uint8_t*)dec->io_->opaque + dec->io_->width * start_row;
661 VP8LTransform* const transform = &dec->transforms_[0];
662 assert(dec->next_transform_ == 1);
663 assert(transform->type_ == COLOR_INDEXING_TRANSFORM);
664 VP8LColorIndexInverseTransformAlpha(transform, start_row, end_row, rows_in,
665 rows_out);
668 // Processes (transforms, scales & color-converts) the rows decoded after the
669 // last call.
670 static void ProcessRows(VP8LDecoder* const dec, int row) {
671 const uint32_t* const rows = dec->pixels_ + dec->width_ * dec->last_row_;
672 const int num_rows = row - dec->last_row_;
674 if (num_rows <= 0) return; // Nothing to be done.
675 ApplyInverseTransforms(dec, num_rows, rows);
677 // Emit output.
679 VP8Io* const io = dec->io_;
680 uint8_t* rows_data = (uint8_t*)dec->argb_cache_;
681 const int in_stride = io->width * sizeof(uint32_t); // in unit of RGBA
682 if (!SetCropWindow(io, dec->last_row_, row, &rows_data, in_stride)) {
683 // Nothing to output (this time).
684 } else {
685 const WebPDecBuffer* const output = dec->output_;
686 if (output->colorspace < MODE_YUV) { // convert to RGBA
687 const WebPRGBABuffer* const buf = &output->u.RGBA;
688 uint8_t* const rgba = buf->rgba + dec->last_out_row_ * buf->stride;
689 const int num_rows_out = io->use_scaling ?
690 EmitRescaledRowsRGBA(dec, rows_data, in_stride, io->mb_h,
691 rgba, buf->stride) :
692 EmitRows(output->colorspace, rows_data, in_stride,
693 io->mb_w, io->mb_h, rgba, buf->stride);
694 // Update 'last_out_row_'.
695 dec->last_out_row_ += num_rows_out;
696 } else { // convert to YUVA
697 dec->last_out_row_ = io->use_scaling ?
698 EmitRescaledRowsYUVA(dec, rows_data, in_stride, io->mb_h) :
699 EmitRowsYUVA(dec, rows_data, in_stride, io->mb_w, io->mb_h);
701 assert(dec->last_out_row_ <= output->height);
705 // Update 'last_row_'.
706 dec->last_row_ = row;
707 assert(dec->last_row_ <= dec->height_);
710 // Row-processing for the special case when alpha data contains only one
711 // transform (color indexing), and trivial non-green literals.
712 static int Is8bOptimizable(const VP8LMetadata* const hdr) {
713 int i;
714 if (hdr->color_cache_size_ > 0) return 0;
715 // When the Huffman tree contains only one symbol, we can skip the
716 // call to ReadSymbol() for red/blue/alpha channels.
717 for (i = 0; i < hdr->num_htree_groups_; ++i) {
718 const HuffmanTree* const htrees = hdr->htree_groups_[i].htrees_;
719 if (htrees[RED].num_nodes_ > 1) return 0;
720 if (htrees[BLUE].num_nodes_ > 1) return 0;
721 if (htrees[ALPHA].num_nodes_ > 1) return 0;
723 return 1;
726 static void ExtractPalettedAlphaRows(VP8LDecoder* const dec, int row) {
727 const int num_rows = row - dec->last_row_;
728 const uint8_t* const in =
729 (uint8_t*)dec->pixels_ + dec->width_ * dec->last_row_;
730 if (num_rows > 0) {
731 ApplyInverseTransformsAlpha(dec, num_rows, in);
733 dec->last_row_ = dec->last_out_row_ = row;
736 static int DecodeAlphaData(VP8LDecoder* const dec, uint8_t* const data,
737 int width, int height, int last_row) {
738 int ok = 1;
739 int row = dec->last_pixel_ / width;
740 int col = dec->last_pixel_ % width;
741 VP8LBitReader* const br = &dec->br_;
742 VP8LMetadata* const hdr = &dec->hdr_;
743 const HTreeGroup* htree_group = GetHtreeGroupForPos(hdr, col, row);
744 int pos = dec->last_pixel_; // current position
745 const int end = width * height; // End of data
746 const int last = width * last_row; // Last pixel to decode
747 const int len_code_limit = NUM_LITERAL_CODES + NUM_LENGTH_CODES;
748 const int mask = hdr->huffman_mask_;
749 assert(htree_group != NULL);
750 assert(pos < end);
751 assert(last_row <= height);
752 assert(Is8bOptimizable(hdr));
754 while (!br->eos_ && pos < last) {
755 int code;
756 // Only update when changing tile.
757 if ((col & mask) == 0) {
758 htree_group = GetHtreeGroupForPos(hdr, col, row);
760 VP8LFillBitWindow(br);
761 code = ReadSymbol(&htree_group->htrees_[GREEN], br);
762 if (code < NUM_LITERAL_CODES) { // Literal
763 data[pos] = code;
764 ++pos;
765 ++col;
766 if (col >= width) {
767 col = 0;
768 ++row;
769 if (row % NUM_ARGB_CACHE_ROWS == 0) {
770 ExtractPalettedAlphaRows(dec, row);
773 } else if (code < len_code_limit) { // Backward reference
774 int dist_code, dist;
775 const int length_sym = code - NUM_LITERAL_CODES;
776 const int length = GetCopyLength(length_sym, br);
777 const int dist_symbol = ReadSymbol(&htree_group->htrees_[DIST], br);
778 VP8LFillBitWindow(br);
779 dist_code = GetCopyDistance(dist_symbol, br);
780 dist = PlaneCodeToDistance(width, dist_code);
781 if (pos >= dist && end - pos >= length) {
782 int i;
783 for (i = 0; i < length; ++i) data[pos + i] = data[pos + i - dist];
784 } else {
785 ok = 0;
786 goto End;
788 pos += length;
789 col += length;
790 while (col >= width) {
791 col -= width;
792 ++row;
793 if (row % NUM_ARGB_CACHE_ROWS == 0) {
794 ExtractPalettedAlphaRows(dec, row);
797 if (pos < last && (col & mask)) {
798 htree_group = GetHtreeGroupForPos(hdr, col, row);
800 } else { // Not reached
801 ok = 0;
802 goto End;
804 ok = !br->error_;
805 if (!ok) goto End;
807 // Process the remaining rows corresponding to last row-block.
808 ExtractPalettedAlphaRows(dec, row);
810 End:
811 if (br->error_ || !ok || (br->eos_ && pos < end)) {
812 ok = 0;
813 dec->status_ = br->eos_ ? VP8_STATUS_SUSPENDED
814 : VP8_STATUS_BITSTREAM_ERROR;
815 } else {
816 dec->last_pixel_ = (int)pos;
817 if (pos == end) dec->state_ = READ_DATA;
819 return ok;
822 static int DecodeImageData(VP8LDecoder* const dec, uint32_t* const data,
823 int width, int height, int last_row,
824 ProcessRowsFunc process_func) {
825 int ok = 1;
826 int row = dec->last_pixel_ / width;
827 int col = dec->last_pixel_ % width;
828 VP8LBitReader* const br = &dec->br_;
829 VP8LMetadata* const hdr = &dec->hdr_;
830 HTreeGroup* htree_group = GetHtreeGroupForPos(hdr, col, row);
831 uint32_t* src = data + dec->last_pixel_;
832 uint32_t* last_cached = src;
833 uint32_t* const src_end = data + width * height; // End of data
834 uint32_t* const src_last = data + width * last_row; // Last pixel to decode
835 const int len_code_limit = NUM_LITERAL_CODES + NUM_LENGTH_CODES;
836 const int color_cache_limit = len_code_limit + hdr->color_cache_size_;
837 VP8LColorCache* const color_cache =
838 (hdr->color_cache_size_ > 0) ? &hdr->color_cache_ : NULL;
839 const int mask = hdr->huffman_mask_;
840 assert(htree_group != NULL);
841 assert(src < src_end);
842 assert(src_last <= src_end);
844 while (!br->eos_ && src < src_last) {
845 int code;
846 // Only update when changing tile. Note we could use this test:
847 // if "((((prev_col ^ col) | prev_row ^ row)) > mask)" -> tile changed
848 // but that's actually slower and needs storing the previous col/row.
849 if ((col & mask) == 0) {
850 htree_group = GetHtreeGroupForPos(hdr, col, row);
852 VP8LFillBitWindow(br);
853 code = ReadSymbol(&htree_group->htrees_[GREEN], br);
854 if (code < NUM_LITERAL_CODES) { // Literal
855 int red, green, blue, alpha;
856 red = ReadSymbol(&htree_group->htrees_[RED], br);
857 green = code;
858 VP8LFillBitWindow(br);
859 blue = ReadSymbol(&htree_group->htrees_[BLUE], br);
860 alpha = ReadSymbol(&htree_group->htrees_[ALPHA], br);
861 *src = ((uint32_t)alpha << 24) | (red << 16) | (green << 8) | blue;
862 AdvanceByOne:
863 ++src;
864 ++col;
865 if (col >= width) {
866 col = 0;
867 ++row;
868 if ((row % NUM_ARGB_CACHE_ROWS == 0) && (process_func != NULL)) {
869 process_func(dec, row);
871 if (color_cache != NULL) {
872 while (last_cached < src) {
873 VP8LColorCacheInsert(color_cache, *last_cached++);
877 } else if (code < len_code_limit) { // Backward reference
878 int dist_code, dist;
879 const int length_sym = code - NUM_LITERAL_CODES;
880 const int length = GetCopyLength(length_sym, br);
881 const int dist_symbol = ReadSymbol(&htree_group->htrees_[DIST], br);
882 VP8LFillBitWindow(br);
883 dist_code = GetCopyDistance(dist_symbol, br);
884 dist = PlaneCodeToDistance(width, dist_code);
885 if (src - data < (ptrdiff_t)dist || src_end - src < (ptrdiff_t)length) {
886 ok = 0;
887 goto End;
888 } else {
889 int i;
890 for (i = 0; i < length; ++i) src[i] = src[i - dist];
891 src += length;
893 col += length;
894 while (col >= width) {
895 col -= width;
896 ++row;
897 if ((row % NUM_ARGB_CACHE_ROWS == 0) && (process_func != NULL)) {
898 process_func(dec, row);
901 if (src < src_last) {
902 if (col & mask) htree_group = GetHtreeGroupForPos(hdr, col, row);
903 if (color_cache != NULL) {
904 while (last_cached < src) {
905 VP8LColorCacheInsert(color_cache, *last_cached++);
909 } else if (code < color_cache_limit) { // Color cache
910 const int key = code - len_code_limit;
911 assert(color_cache != NULL);
912 while (last_cached < src) {
913 VP8LColorCacheInsert(color_cache, *last_cached++);
915 *src = VP8LColorCacheLookup(color_cache, key);
916 goto AdvanceByOne;
917 } else { // Not reached
918 ok = 0;
919 goto End;
921 ok = !br->error_;
922 if (!ok) goto End;
924 // Process the remaining rows corresponding to last row-block.
925 if (process_func != NULL) process_func(dec, row);
927 End:
928 if (br->error_ || !ok || (br->eos_ && src < src_end)) {
929 ok = 0;
930 dec->status_ = br->eos_ ? VP8_STATUS_SUSPENDED
931 : VP8_STATUS_BITSTREAM_ERROR;
932 } else {
933 dec->last_pixel_ = (int)(src - data);
934 if (src == src_end) dec->state_ = READ_DATA;
936 return ok;
939 // -----------------------------------------------------------------------------
940 // VP8LTransform
942 static void ClearTransform(VP8LTransform* const transform) {
943 WebPSafeFree(transform->data_);
944 transform->data_ = NULL;
947 // For security reason, we need to remap the color map to span
948 // the total possible bundled values, and not just the num_colors.
949 static int ExpandColorMap(int num_colors, VP8LTransform* const transform) {
950 int i;
951 const int final_num_colors = 1 << (8 >> transform->bits_);
952 uint32_t* const new_color_map =
953 (uint32_t*)WebPSafeMalloc((uint64_t)final_num_colors,
954 sizeof(*new_color_map));
955 if (new_color_map == NULL) {
956 return 0;
957 } else {
958 uint8_t* const data = (uint8_t*)transform->data_;
959 uint8_t* const new_data = (uint8_t*)new_color_map;
960 new_color_map[0] = transform->data_[0];
961 for (i = 4; i < 4 * num_colors; ++i) {
962 // Equivalent to AddPixelEq(), on a byte-basis.
963 new_data[i] = (data[i] + new_data[i - 4]) & 0xff;
965 for (; i < 4 * final_num_colors; ++i)
966 new_data[i] = 0; // black tail.
967 WebPSafeFree(transform->data_);
968 transform->data_ = new_color_map;
970 return 1;
973 static int ReadTransform(int* const xsize, int const* ysize,
974 VP8LDecoder* const dec) {
975 int ok = 1;
976 VP8LBitReader* const br = &dec->br_;
977 VP8LTransform* transform = &dec->transforms_[dec->next_transform_];
978 const VP8LImageTransformType type =
979 (VP8LImageTransformType)VP8LReadBits(br, 2);
981 // Each transform type can only be present once in the stream.
982 if (dec->transforms_seen_ & (1U << type)) {
983 return 0; // Already there, let's not accept the second same transform.
985 dec->transforms_seen_ |= (1U << type);
987 transform->type_ = type;
988 transform->xsize_ = *xsize;
989 transform->ysize_ = *ysize;
990 transform->data_ = NULL;
991 ++dec->next_transform_;
992 assert(dec->next_transform_ <= NUM_TRANSFORMS);
994 switch (type) {
995 case PREDICTOR_TRANSFORM:
996 case CROSS_COLOR_TRANSFORM:
997 transform->bits_ = VP8LReadBits(br, 3) + 2;
998 ok = DecodeImageStream(VP8LSubSampleSize(transform->xsize_,
999 transform->bits_),
1000 VP8LSubSampleSize(transform->ysize_,
1001 transform->bits_),
1002 0, dec, &transform->data_);
1003 break;
1004 case COLOR_INDEXING_TRANSFORM: {
1005 const int num_colors = VP8LReadBits(br, 8) + 1;
1006 const int bits = (num_colors > 16) ? 0
1007 : (num_colors > 4) ? 1
1008 : (num_colors > 2) ? 2
1009 : 3;
1010 *xsize = VP8LSubSampleSize(transform->xsize_, bits);
1011 transform->bits_ = bits;
1012 ok = DecodeImageStream(num_colors, 1, 0, dec, &transform->data_);
1013 ok = ok && ExpandColorMap(num_colors, transform);
1014 break;
1016 case SUBTRACT_GREEN:
1017 break;
1018 default:
1019 assert(0); // can't happen
1020 break;
1023 return ok;
1026 // -----------------------------------------------------------------------------
1027 // VP8LMetadata
1029 static void InitMetadata(VP8LMetadata* const hdr) {
1030 assert(hdr);
1031 memset(hdr, 0, sizeof(*hdr));
1034 static void ClearMetadata(VP8LMetadata* const hdr) {
1035 assert(hdr);
1037 WebPSafeFree(hdr->huffman_image_);
1038 VP8LHtreeGroupsFree(hdr->htree_groups_, hdr->num_htree_groups_);
1039 VP8LColorCacheClear(&hdr->color_cache_);
1040 InitMetadata(hdr);
1043 // -----------------------------------------------------------------------------
1044 // VP8LDecoder
1046 VP8LDecoder* VP8LNew(void) {
1047 VP8LDecoder* const dec = (VP8LDecoder*)WebPSafeCalloc(1ULL, sizeof(*dec));
1048 if (dec == NULL) return NULL;
1049 dec->status_ = VP8_STATUS_OK;
1050 dec->action_ = READ_DIM;
1051 dec->state_ = READ_DIM;
1053 VP8LDspInit(); // Init critical function pointers.
1055 return dec;
1058 void VP8LClear(VP8LDecoder* const dec) {
1059 int i;
1060 if (dec == NULL) return;
1061 ClearMetadata(&dec->hdr_);
1063 WebPSafeFree(dec->pixels_);
1064 dec->pixels_ = NULL;
1065 for (i = 0; i < dec->next_transform_; ++i) {
1066 ClearTransform(&dec->transforms_[i]);
1068 dec->next_transform_ = 0;
1069 dec->transforms_seen_ = 0;
1071 WebPSafeFree(dec->rescaler_memory);
1072 dec->rescaler_memory = NULL;
1074 dec->output_ = NULL; // leave no trace behind
1077 void VP8LDelete(VP8LDecoder* const dec) {
1078 if (dec != NULL) {
1079 VP8LClear(dec);
1080 WebPSafeFree(dec);
1084 static void UpdateDecoder(VP8LDecoder* const dec, int width, int height) {
1085 VP8LMetadata* const hdr = &dec->hdr_;
1086 const int num_bits = hdr->huffman_subsample_bits_;
1087 dec->width_ = width;
1088 dec->height_ = height;
1090 hdr->huffman_xsize_ = VP8LSubSampleSize(width, num_bits);
1091 hdr->huffman_mask_ = (num_bits == 0) ? ~0 : (1 << num_bits) - 1;
1094 static int DecodeImageStream(int xsize, int ysize,
1095 int is_level0,
1096 VP8LDecoder* const dec,
1097 uint32_t** const decoded_data) {
1098 int ok = 1;
1099 int transform_xsize = xsize;
1100 int transform_ysize = ysize;
1101 VP8LBitReader* const br = &dec->br_;
1102 VP8LMetadata* const hdr = &dec->hdr_;
1103 uint32_t* data = NULL;
1104 int color_cache_bits = 0;
1106 // Read the transforms (may recurse).
1107 if (is_level0) {
1108 while (ok && VP8LReadBits(br, 1)) {
1109 ok = ReadTransform(&transform_xsize, &transform_ysize, dec);
1113 // Color cache
1114 if (ok && VP8LReadBits(br, 1)) {
1115 color_cache_bits = VP8LReadBits(br, 4);
1116 ok = (color_cache_bits >= 1 && color_cache_bits <= MAX_CACHE_BITS);
1117 if (!ok) {
1118 dec->status_ = VP8_STATUS_BITSTREAM_ERROR;
1119 goto End;
1123 // Read the Huffman codes (may recurse).
1124 ok = ok && ReadHuffmanCodes(dec, transform_xsize, transform_ysize,
1125 color_cache_bits, is_level0);
1126 if (!ok) {
1127 dec->status_ = VP8_STATUS_BITSTREAM_ERROR;
1128 goto End;
1131 // Finish setting up the color-cache
1132 if (color_cache_bits > 0) {
1133 hdr->color_cache_size_ = 1 << color_cache_bits;
1134 if (!VP8LColorCacheInit(&hdr->color_cache_, color_cache_bits)) {
1135 dec->status_ = VP8_STATUS_OUT_OF_MEMORY;
1136 ok = 0;
1137 goto End;
1139 } else {
1140 hdr->color_cache_size_ = 0;
1142 UpdateDecoder(dec, transform_xsize, transform_ysize);
1144 if (is_level0) { // level 0 complete
1145 dec->state_ = READ_HDR;
1146 goto End;
1150 const uint64_t total_size = (uint64_t)transform_xsize * transform_ysize;
1151 data = (uint32_t*)WebPSafeMalloc(total_size, sizeof(*data));
1152 if (data == NULL) {
1153 dec->status_ = VP8_STATUS_OUT_OF_MEMORY;
1154 ok = 0;
1155 goto End;
1159 // Use the Huffman trees to decode the LZ77 encoded data.
1160 ok = DecodeImageData(dec, data, transform_xsize, transform_ysize,
1161 transform_ysize, NULL);
1162 ok = ok && !br->error_;
1164 End:
1166 if (!ok) {
1167 WebPSafeFree(data);
1168 ClearMetadata(hdr);
1169 // If not enough data (br.eos_) resulted in BIT_STREAM_ERROR, update the
1170 // status appropriately.
1171 if (dec->status_ == VP8_STATUS_BITSTREAM_ERROR && dec->br_.eos_) {
1172 dec->status_ = VP8_STATUS_SUSPENDED;
1174 } else {
1175 if (decoded_data != NULL) {
1176 *decoded_data = data;
1177 } else {
1178 // We allocate image data in this function only for transforms. At level 0
1179 // (that is: not the transforms), we shouldn't have allocated anything.
1180 assert(data == NULL);
1181 assert(is_level0);
1183 dec->last_pixel_ = 0; // Reset for future DECODE_DATA_FUNC() calls.
1184 if (!is_level0) ClearMetadata(hdr); // Clean up temporary data behind.
1186 return ok;
1189 //------------------------------------------------------------------------------
1190 // Allocate internal buffers dec->pixels_ and dec->argb_cache_.
1191 static int AllocateInternalBuffers32b(VP8LDecoder* const dec, int final_width) {
1192 const uint64_t num_pixels = (uint64_t)dec->width_ * dec->height_;
1193 // Scratch buffer corresponding to top-prediction row for transforming the
1194 // first row in the row-blocks. Not needed for paletted alpha.
1195 const uint64_t cache_top_pixels = (uint16_t)final_width;
1196 // Scratch buffer for temporary BGRA storage. Not needed for paletted alpha.
1197 const uint64_t cache_pixels = (uint64_t)final_width * NUM_ARGB_CACHE_ROWS;
1198 const uint64_t total_num_pixels =
1199 num_pixels + cache_top_pixels + cache_pixels;
1201 assert(dec->width_ <= final_width);
1202 dec->pixels_ = (uint32_t*)WebPSafeMalloc(total_num_pixels, sizeof(uint32_t));
1203 if (dec->pixels_ == NULL) {
1204 dec->argb_cache_ = NULL; // for sanity check
1205 dec->status_ = VP8_STATUS_OUT_OF_MEMORY;
1206 return 0;
1208 dec->argb_cache_ = dec->pixels_ + num_pixels + cache_top_pixels;
1209 return 1;
1212 static int AllocateInternalBuffers8b(VP8LDecoder* const dec) {
1213 const uint64_t total_num_pixels = (uint64_t)dec->width_ * dec->height_;
1214 dec->argb_cache_ = NULL; // for sanity check
1215 dec->pixels_ = (uint32_t*)WebPSafeMalloc(total_num_pixels, sizeof(uint8_t));
1216 if (dec->pixels_ == NULL) {
1217 dec->status_ = VP8_STATUS_OUT_OF_MEMORY;
1218 return 0;
1220 return 1;
1223 //------------------------------------------------------------------------------
1225 // Special row-processing that only stores the alpha data.
1226 static void ExtractAlphaRows(VP8LDecoder* const dec, int row) {
1227 const int num_rows = row - dec->last_row_;
1228 const uint32_t* const in = dec->pixels_ + dec->width_ * dec->last_row_;
1230 if (num_rows <= 0) return; // Nothing to be done.
1231 ApplyInverseTransforms(dec, num_rows, in);
1233 // Extract alpha (which is stored in the green plane).
1235 const int width = dec->io_->width; // the final width (!= dec->width_)
1236 const int cache_pixs = width * num_rows;
1237 uint8_t* const dst = (uint8_t*)dec->io_->opaque + width * dec->last_row_;
1238 const uint32_t* const src = dec->argb_cache_;
1239 int i;
1240 for (i = 0; i < cache_pixs; ++i) dst[i] = (src[i] >> 8) & 0xff;
1242 dec->last_row_ = dec->last_out_row_ = row;
1245 int VP8LDecodeAlphaHeader(ALPHDecoder* const alph_dec,
1246 const uint8_t* const data, size_t data_size,
1247 uint8_t* const output) {
1248 int ok = 0;
1249 VP8LDecoder* dec;
1250 VP8Io* io;
1251 assert(alph_dec != NULL);
1252 alph_dec->vp8l_dec_ = VP8LNew();
1253 if (alph_dec->vp8l_dec_ == NULL) return 0;
1254 dec = alph_dec->vp8l_dec_;
1256 dec->width_ = alph_dec->width_;
1257 dec->height_ = alph_dec->height_;
1258 dec->io_ = &alph_dec->io_;
1259 io = dec->io_;
1261 VP8InitIo(io);
1262 WebPInitCustomIo(NULL, io); // Just a sanity Init. io won't be used.
1263 io->opaque = output;
1264 io->width = alph_dec->width_;
1265 io->height = alph_dec->height_;
1267 dec->status_ = VP8_STATUS_OK;
1268 VP8LInitBitReader(&dec->br_, data, data_size);
1270 dec->action_ = READ_HDR;
1271 if (!DecodeImageStream(alph_dec->width_, alph_dec->height_, 1, dec, NULL)) {
1272 goto Err;
1275 // Special case: if alpha data uses only the color indexing transform and
1276 // doesn't use color cache (a frequent case), we will use DecodeAlphaData()
1277 // method that only needs allocation of 1 byte per pixel (alpha channel).
1278 if (dec->next_transform_ == 1 &&
1279 dec->transforms_[0].type_ == COLOR_INDEXING_TRANSFORM &&
1280 Is8bOptimizable(&dec->hdr_)) {
1281 alph_dec->use_8b_decode = 1;
1282 ok = AllocateInternalBuffers8b(dec);
1283 } else {
1284 // Allocate internal buffers (note that dec->width_ may have changed here).
1285 alph_dec->use_8b_decode = 0;
1286 ok = AllocateInternalBuffers32b(dec, alph_dec->width_);
1289 if (!ok) goto Err;
1291 dec->action_ = READ_DATA;
1292 return 1;
1294 Err:
1295 VP8LDelete(alph_dec->vp8l_dec_);
1296 alph_dec->vp8l_dec_ = NULL;
1297 return 0;
1300 int VP8LDecodeAlphaImageStream(ALPHDecoder* const alph_dec, int last_row) {
1301 VP8LDecoder* const dec = alph_dec->vp8l_dec_;
1302 assert(dec != NULL);
1303 assert(dec->action_ == READ_DATA);
1304 assert(last_row <= dec->height_);
1306 if (dec->last_pixel_ == dec->width_ * dec->height_) {
1307 return 1; // done
1310 // Decode (with special row processing).
1311 return alph_dec->use_8b_decode ?
1312 DecodeAlphaData(dec, (uint8_t*)dec->pixels_, dec->width_, dec->height_,
1313 last_row) :
1314 DecodeImageData(dec, dec->pixels_, dec->width_, dec->height_,
1315 last_row, ExtractAlphaRows);
1318 //------------------------------------------------------------------------------
1320 int VP8LDecodeHeader(VP8LDecoder* const dec, VP8Io* const io) {
1321 int width, height, has_alpha;
1323 if (dec == NULL) return 0;
1324 if (io == NULL) {
1325 dec->status_ = VP8_STATUS_INVALID_PARAM;
1326 return 0;
1329 dec->io_ = io;
1330 dec->status_ = VP8_STATUS_OK;
1331 VP8LInitBitReader(&dec->br_, io->data, io->data_size);
1332 if (!ReadImageInfo(&dec->br_, &width, &height, &has_alpha)) {
1333 dec->status_ = VP8_STATUS_BITSTREAM_ERROR;
1334 goto Error;
1336 dec->state_ = READ_DIM;
1337 io->width = width;
1338 io->height = height;
1340 dec->action_ = READ_HDR;
1341 if (!DecodeImageStream(width, height, 1, dec, NULL)) goto Error;
1342 return 1;
1344 Error:
1345 VP8LClear(dec);
1346 assert(dec->status_ != VP8_STATUS_OK);
1347 return 0;
1350 int VP8LDecodeImage(VP8LDecoder* const dec) {
1351 VP8Io* io = NULL;
1352 WebPDecParams* params = NULL;
1354 // Sanity checks.
1355 if (dec == NULL) return 0;
1357 io = dec->io_;
1358 assert(io != NULL);
1359 params = (WebPDecParams*)io->opaque;
1360 assert(params != NULL);
1361 dec->output_ = params->output;
1362 assert(dec->output_ != NULL);
1364 // Initialization.
1365 if (!WebPIoInitFromOptions(params->options, io, MODE_BGRA)) {
1366 dec->status_ = VP8_STATUS_INVALID_PARAM;
1367 goto Err;
1370 if (!AllocateInternalBuffers32b(dec, io->width)) goto Err;
1372 if (io->use_scaling && !AllocateAndInitRescaler(dec, io)) goto Err;
1374 if (io->use_scaling || WebPIsPremultipliedMode(dec->output_->colorspace)) {
1375 // need the alpha-multiply functions for premultiplied output or rescaling
1376 WebPInitAlphaProcessing();
1379 // Decode.
1380 dec->action_ = READ_DATA;
1381 if (!DecodeImageData(dec, dec->pixels_, dec->width_, dec->height_,
1382 dec->height_, ProcessRows)) {
1383 goto Err;
1386 // Cleanup.
1387 params->last_y = dec->last_out_row_;
1388 VP8LClear(dec);
1389 return 1;
1391 Err:
1392 VP8LClear(dec);
1393 assert(dec->status_ != VP8_STATUS_OK);
1394 return 0;
1397 //------------------------------------------------------------------------------