ordered chapters: seek to closest keyframe
[FFMpeg-mirror/ordered_chapters.git] / libavcodec / huffyuv.c
blobd3f6401b3d576ab20aab0079de6b844c752792ac
1 /*
2 * huffyuv codec for libavcodec
4 * Copyright (c) 2002-2003 Michael Niedermayer <michaelni@gmx.at>
6 * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of
7 * the algorithm used
9 * This file is part of FFmpeg.
11 * FFmpeg is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2.1 of the License, or (at your option) any later version.
16 * FFmpeg is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with FFmpeg; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 /**
27 * @file libavcodec/huffyuv.c
28 * huffyuv codec for libavcodec.
31 #include "avcodec.h"
32 #include "bitstream.h"
33 #include "dsputil.h"
34 #include "mathops.h"
36 #define VLC_BITS 11
38 #ifdef WORDS_BIGENDIAN
39 #define B 3
40 #define G 2
41 #define R 1
42 #else
43 #define B 0
44 #define G 1
45 #define R 2
46 #endif
48 typedef enum Predictor{
49 LEFT= 0,
50 PLANE,
51 MEDIAN,
52 } Predictor;
54 typedef struct HYuvContext{
55 AVCodecContext *avctx;
56 Predictor predictor;
57 GetBitContext gb;
58 PutBitContext pb;
59 int interlaced;
60 int decorrelate;
61 int bitstream_bpp;
62 int version;
63 int yuy2; //use yuy2 instead of 422P
64 int bgr32; //use bgr32 instead of bgr24
65 int width, height;
66 int flags;
67 int context;
68 int picture_number;
69 int last_slice_end;
70 uint8_t *temp[3];
71 uint64_t stats[3][256];
72 uint8_t len[3][256];
73 uint32_t bits[3][256];
74 uint32_t pix_bgr_map[1<<VLC_BITS];
75 VLC vlc[6]; //Y,U,V,YY,YU,YV
76 AVFrame picture;
77 uint8_t *bitstream_buffer;
78 unsigned int bitstream_buffer_size;
79 DSPContext dsp;
80 }HYuvContext;
82 static const unsigned char classic_shift_luma[] = {
83 34,36,35,69,135,232,9,16,10,24,11,23,12,16,13,10,14,8,15,8,
84 16,8,17,20,16,10,207,206,205,236,11,8,10,21,9,23,8,8,199,70,
85 69,68, 0
88 static const unsigned char classic_shift_chroma[] = {
89 66,36,37,38,39,40,41,75,76,77,110,239,144,81,82,83,84,85,118,183,
90 56,57,88,89,56,89,154,57,58,57,26,141,57,56,58,57,58,57,184,119,
91 214,245,116,83,82,49,80,79,78,77,44,75,41,40,39,38,37,36,34, 0
94 static const unsigned char classic_add_luma[256] = {
95 3, 9, 5, 12, 10, 35, 32, 29, 27, 50, 48, 45, 44, 41, 39, 37,
96 73, 70, 68, 65, 64, 61, 58, 56, 53, 50, 49, 46, 44, 41, 38, 36,
97 68, 65, 63, 61, 58, 55, 53, 51, 48, 46, 45, 43, 41, 39, 38, 36,
98 35, 33, 32, 30, 29, 27, 26, 25, 48, 47, 46, 44, 43, 41, 40, 39,
99 37, 36, 35, 34, 32, 31, 30, 28, 27, 26, 24, 23, 22, 20, 19, 37,
100 35, 34, 33, 31, 30, 29, 27, 26, 24, 23, 21, 20, 18, 17, 15, 29,
101 27, 26, 24, 22, 21, 19, 17, 16, 14, 26, 25, 23, 21, 19, 18, 16,
102 15, 27, 25, 23, 21, 19, 17, 16, 14, 26, 25, 23, 21, 18, 17, 14,
103 12, 17, 19, 13, 4, 9, 2, 11, 1, 7, 8, 0, 16, 3, 14, 6,
104 12, 10, 5, 15, 18, 11, 10, 13, 15, 16, 19, 20, 22, 24, 27, 15,
105 18, 20, 22, 24, 26, 14, 17, 20, 22, 24, 27, 15, 18, 20, 23, 25,
106 28, 16, 19, 22, 25, 28, 32, 36, 21, 25, 29, 33, 38, 42, 45, 49,
107 28, 31, 34, 37, 40, 42, 44, 47, 49, 50, 52, 54, 56, 57, 59, 60,
108 62, 64, 66, 67, 69, 35, 37, 39, 40, 42, 43, 45, 47, 48, 51, 52,
109 54, 55, 57, 59, 60, 62, 63, 66, 67, 69, 71, 72, 38, 40, 42, 43,
110 46, 47, 49, 51, 26, 28, 30, 31, 33, 34, 18, 19, 11, 13, 7, 8,
113 static const unsigned char classic_add_chroma[256] = {
114 3, 1, 2, 2, 2, 2, 3, 3, 7, 5, 7, 5, 8, 6, 11, 9,
115 7, 13, 11, 10, 9, 8, 7, 5, 9, 7, 6, 4, 7, 5, 8, 7,
116 11, 8, 13, 11, 19, 15, 22, 23, 20, 33, 32, 28, 27, 29, 51, 77,
117 43, 45, 76, 81, 46, 82, 75, 55, 56,144, 58, 80, 60, 74,147, 63,
118 143, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
119 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 27, 30, 21, 22,
120 17, 14, 5, 6,100, 54, 47, 50, 51, 53,106,107,108,109,110,111,
121 112,113,114,115, 4,117,118, 92, 94,121,122, 3,124,103, 2, 1,
122 0,129,130,131,120,119,126,125,136,137,138,139,140,141,142,134,
123 135,132,133,104, 64,101, 62, 57,102, 95, 93, 59, 61, 28, 97, 96,
124 52, 49, 48, 29, 32, 25, 24, 46, 23, 98, 45, 44, 43, 20, 42, 41,
125 19, 18, 99, 40, 15, 39, 38, 16, 13, 12, 11, 37, 10, 9, 8, 36,
126 7,128,127,105,123,116, 35, 34, 33,145, 31, 79, 42,146, 78, 26,
127 83, 48, 49, 50, 44, 47, 26, 31, 30, 18, 17, 19, 21, 24, 25, 13,
128 14, 16, 17, 18, 20, 21, 12, 14, 15, 9, 10, 6, 9, 6, 5, 8,
129 6, 12, 8, 10, 7, 9, 6, 4, 6, 2, 2, 3, 3, 3, 3, 2,
132 static inline int add_left_prediction(uint8_t *dst, uint8_t *src, int w, int acc){
133 int i;
135 for(i=0; i<w-1; i++){
136 acc+= src[i];
137 dst[i]= acc;
138 i++;
139 acc+= src[i];
140 dst[i]= acc;
143 for(; i<w; i++){
144 acc+= src[i];
145 dst[i]= acc;
148 return acc;
151 static inline void add_median_prediction(uint8_t *dst, uint8_t *src1, uint8_t *diff, int w, int *left, int *left_top){
152 int i;
153 uint8_t l, lt;
155 l= *left;
156 lt= *left_top;
158 for(i=0; i<w; i++){
159 l= mid_pred(l, src1[i], (l + src1[i] - lt)&0xFF) + diff[i];
160 lt= src1[i];
161 dst[i]= l;
164 *left= l;
165 *left_top= lt;
168 static inline void add_left_prediction_bgr32(uint8_t *dst, uint8_t *src, int w, int *red, int *green, int *blue){
169 int i;
170 int r,g,b;
171 r= *red;
172 g= *green;
173 b= *blue;
175 for(i=0; i<w; i++){
176 b+= src[4*i+B];
177 g+= src[4*i+G];
178 r+= src[4*i+R];
180 dst[4*i+B]= b;
181 dst[4*i+G]= g;
182 dst[4*i+R]= r;
185 *red= r;
186 *green= g;
187 *blue= b;
190 static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int left){
191 int i;
192 if(w<32){
193 for(i=0; i<w; i++){
194 const int temp= src[i];
195 dst[i]= temp - left;
196 left= temp;
198 return left;
199 }else{
200 for(i=0; i<16; i++){
201 const int temp= src[i];
202 dst[i]= temp - left;
203 left= temp;
205 s->dsp.diff_bytes(dst+16, src+16, src+15, w-16);
206 return src[w-1];
210 static inline void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int *red, int *green, int *blue){
211 int i;
212 int r,g,b;
213 r= *red;
214 g= *green;
215 b= *blue;
216 for(i=0; i<FFMIN(w,4); i++){
217 const int rt= src[i*4+R];
218 const int gt= src[i*4+G];
219 const int bt= src[i*4+B];
220 dst[i*4+R]= rt - r;
221 dst[i*4+G]= gt - g;
222 dst[i*4+B]= bt - b;
223 r = rt;
224 g = gt;
225 b = bt;
227 s->dsp.diff_bytes(dst+16, src+16, src+12, w*4-16);
228 *red= src[(w-1)*4+R];
229 *green= src[(w-1)*4+G];
230 *blue= src[(w-1)*4+B];
233 static void read_len_table(uint8_t *dst, GetBitContext *gb){
234 int i, val, repeat;
236 for(i=0; i<256;){
237 repeat= get_bits(gb, 3);
238 val = get_bits(gb, 5);
239 if(repeat==0)
240 repeat= get_bits(gb, 8);
241 //printf("%d %d\n", val, repeat);
242 while (repeat--)
243 dst[i++] = val;
247 static int generate_bits_table(uint32_t *dst, uint8_t *len_table){
248 int len, index;
249 uint32_t bits=0;
251 for(len=32; len>0; len--){
252 for(index=0; index<256; index++){
253 if(len_table[index]==len)
254 dst[index]= bits++;
256 if(bits & 1){
257 av_log(NULL, AV_LOG_ERROR, "Error generating huffman table\n");
258 return -1;
260 bits >>= 1;
262 return 0;
265 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
266 typedef struct {
267 uint64_t val;
268 int name;
269 } HeapElem;
271 static void heap_sift(HeapElem *h, int root, int size)
273 while(root*2+1 < size) {
274 int child = root*2+1;
275 if(child < size-1 && h[child].val > h[child+1].val)
276 child++;
277 if(h[root].val > h[child].val) {
278 FFSWAP(HeapElem, h[root], h[child]);
279 root = child;
280 } else
281 break;
285 static void generate_len_table(uint8_t *dst, uint64_t *stats, int size){
286 HeapElem h[size];
287 int up[2*size];
288 int len[2*size];
289 int offset, i, next;
291 for(offset=1; ; offset<<=1){
292 for(i=0; i<size; i++){
293 h[i].name = i;
294 h[i].val = (stats[i] << 8) + offset;
296 for(i=size/2-1; i>=0; i--)
297 heap_sift(h, i, size);
299 for(next=size; next<size*2-1; next++){
300 // merge the two smallest entries, and put it back in the heap
301 uint64_t min1v = h[0].val;
302 up[h[0].name] = next;
303 h[0].val = INT64_MAX;
304 heap_sift(h, 0, size);
305 up[h[0].name] = next;
306 h[0].name = next;
307 h[0].val += min1v;
308 heap_sift(h, 0, size);
311 len[2*size-2] = 0;
312 for(i=2*size-3; i>=size; i--)
313 len[i] = len[up[i]] + 1;
314 for(i=0; i<size; i++) {
315 dst[i] = len[up[i]] + 1;
316 if(dst[i] >= 32) break;
318 if(i==size) break;
321 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
323 static void generate_joint_tables(HYuvContext *s){
324 uint16_t symbols[1<<VLC_BITS];
325 uint16_t bits[1<<VLC_BITS];
326 uint8_t len[1<<VLC_BITS];
327 if(s->bitstream_bpp < 24){
328 int p, i, y, u;
329 for(p=0; p<3; p++){
330 for(i=y=0; y<256; y++){
331 int len0 = s->len[0][y];
332 int limit = VLC_BITS - len0;
333 if(limit <= 0)
334 continue;
335 for(u=0; u<256; u++){
336 int len1 = s->len[p][u];
337 if(len1 > limit)
338 continue;
339 len[i] = len0 + len1;
340 bits[i] = (s->bits[0][y] << len1) + s->bits[p][u];
341 symbols[i] = (y<<8) + u;
342 if(symbols[i] != 0xffff) // reserved to mean "invalid"
343 i++;
346 free_vlc(&s->vlc[3+p]);
347 init_vlc_sparse(&s->vlc[3+p], VLC_BITS, i, len, 1, 1, bits, 2, 2, symbols, 2, 2, 0);
349 }else{
350 uint8_t (*map)[4] = (uint8_t(*)[4])s->pix_bgr_map;
351 int i, b, g, r, code;
352 int p0 = s->decorrelate;
353 int p1 = !s->decorrelate;
354 // restrict the range to +/-16 becaues that's pretty much guaranteed to
355 // cover all the combinations that fit in 11 bits total, and it doesn't
356 // matter if we miss a few rare codes.
357 for(i=0, g=-16; g<16; g++){
358 int len0 = s->len[p0][g&255];
359 int limit0 = VLC_BITS - len0;
360 if(limit0 < 2)
361 continue;
362 for(b=-16; b<16; b++){
363 int len1 = s->len[p1][b&255];
364 int limit1 = limit0 - len1;
365 if(limit1 < 1)
366 continue;
367 code = (s->bits[p0][g&255] << len1) + s->bits[p1][b&255];
368 for(r=-16; r<16; r++){
369 int len2 = s->len[2][r&255];
370 if(len2 > limit1)
371 continue;
372 len[i] = len0 + len1 + len2;
373 bits[i] = (code << len2) + s->bits[2][r&255];
374 if(s->decorrelate){
375 map[i][G] = g;
376 map[i][B] = g+b;
377 map[i][R] = g+r;
378 }else{
379 map[i][B] = g;
380 map[i][G] = b;
381 map[i][R] = r;
383 i++;
387 free_vlc(&s->vlc[3]);
388 init_vlc(&s->vlc[3], VLC_BITS, i, len, 1, 1, bits, 2, 2, 0);
392 static int read_huffman_tables(HYuvContext *s, uint8_t *src, int length){
393 GetBitContext gb;
394 int i;
396 init_get_bits(&gb, src, length*8);
398 for(i=0; i<3; i++){
399 read_len_table(s->len[i], &gb);
401 if(generate_bits_table(s->bits[i], s->len[i])<0){
402 return -1;
404 #if 0
405 for(j=0; j<256; j++){
406 printf("%6X, %2d, %3d\n", s->bits[i][j], s->len[i][j], j);
408 #endif
409 free_vlc(&s->vlc[i]);
410 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
413 generate_joint_tables(s);
415 return (get_bits_count(&gb)+7)/8;
418 static int read_old_huffman_tables(HYuvContext *s){
419 #if 1
420 GetBitContext gb;
421 int i;
423 init_get_bits(&gb, classic_shift_luma, sizeof(classic_shift_luma)*8);
424 read_len_table(s->len[0], &gb);
425 init_get_bits(&gb, classic_shift_chroma, sizeof(classic_shift_chroma)*8);
426 read_len_table(s->len[1], &gb);
428 for(i=0; i<256; i++) s->bits[0][i] = classic_add_luma [i];
429 for(i=0; i<256; i++) s->bits[1][i] = classic_add_chroma[i];
431 if(s->bitstream_bpp >= 24){
432 memcpy(s->bits[1], s->bits[0], 256*sizeof(uint32_t));
433 memcpy(s->len[1] , s->len [0], 256*sizeof(uint8_t));
435 memcpy(s->bits[2], s->bits[1], 256*sizeof(uint32_t));
436 memcpy(s->len[2] , s->len [1], 256*sizeof(uint8_t));
438 for(i=0; i<3; i++){
439 free_vlc(&s->vlc[i]);
440 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
443 generate_joint_tables(s);
445 return 0;
446 #else
447 av_log(s->avctx, AV_LOG_DEBUG, "v1 huffyuv is not supported \n");
448 return -1;
449 #endif
452 static void alloc_temp(HYuvContext *s){
453 int i;
455 if(s->bitstream_bpp<24){
456 for(i=0; i<3; i++){
457 s->temp[i]= av_malloc(s->width + 16);
459 }else{
460 for(i=0; i<2; i++){
461 s->temp[i]= av_malloc(4*s->width + 16);
466 static int common_init(AVCodecContext *avctx){
467 HYuvContext *s = avctx->priv_data;
469 s->avctx= avctx;
470 s->flags= avctx->flags;
472 dsputil_init(&s->dsp, avctx);
474 s->width= avctx->width;
475 s->height= avctx->height;
476 assert(s->width>0 && s->height>0);
478 return 0;
481 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
482 static av_cold int decode_init(AVCodecContext *avctx)
484 HYuvContext *s = avctx->priv_data;
486 common_init(avctx);
487 memset(s->vlc, 0, 3*sizeof(VLC));
489 avctx->coded_frame= &s->picture;
490 s->interlaced= s->height > 288;
492 s->bgr32=1;
493 //if(avctx->extradata)
494 // printf("extradata:%X, extradata_size:%d\n", *(uint32_t*)avctx->extradata, avctx->extradata_size);
495 if(avctx->extradata_size){
496 if((avctx->bits_per_coded_sample&7) && avctx->bits_per_coded_sample != 12)
497 s->version=1; // do such files exist at all?
498 else
499 s->version=2;
500 }else
501 s->version=0;
503 if(s->version==2){
504 int method, interlace;
506 method= ((uint8_t*)avctx->extradata)[0];
507 s->decorrelate= method&64 ? 1 : 0;
508 s->predictor= method&63;
509 s->bitstream_bpp= ((uint8_t*)avctx->extradata)[1];
510 if(s->bitstream_bpp==0)
511 s->bitstream_bpp= avctx->bits_per_coded_sample&~7;
512 interlace= (((uint8_t*)avctx->extradata)[2] & 0x30) >> 4;
513 s->interlaced= (interlace==1) ? 1 : (interlace==2) ? 0 : s->interlaced;
514 s->context= ((uint8_t*)avctx->extradata)[2] & 0x40 ? 1 : 0;
516 if(read_huffman_tables(s, ((uint8_t*)avctx->extradata)+4, avctx->extradata_size) < 0)
517 return -1;
518 }else{
519 switch(avctx->bits_per_coded_sample&7){
520 case 1:
521 s->predictor= LEFT;
522 s->decorrelate= 0;
523 break;
524 case 2:
525 s->predictor= LEFT;
526 s->decorrelate= 1;
527 break;
528 case 3:
529 s->predictor= PLANE;
530 s->decorrelate= avctx->bits_per_coded_sample >= 24;
531 break;
532 case 4:
533 s->predictor= MEDIAN;
534 s->decorrelate= 0;
535 break;
536 default:
537 s->predictor= LEFT; //OLD
538 s->decorrelate= 0;
539 break;
541 s->bitstream_bpp= avctx->bits_per_coded_sample & ~7;
542 s->context= 0;
544 if(read_old_huffman_tables(s) < 0)
545 return -1;
548 switch(s->bitstream_bpp){
549 case 12:
550 avctx->pix_fmt = PIX_FMT_YUV420P;
551 break;
552 case 16:
553 if(s->yuy2){
554 avctx->pix_fmt = PIX_FMT_YUYV422;
555 }else{
556 avctx->pix_fmt = PIX_FMT_YUV422P;
558 break;
559 case 24:
560 case 32:
561 if(s->bgr32){
562 avctx->pix_fmt = PIX_FMT_RGB32;
563 }else{
564 avctx->pix_fmt = PIX_FMT_BGR24;
566 break;
567 default:
568 assert(0);
571 alloc_temp(s);
573 // av_log(NULL, AV_LOG_DEBUG, "pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced);
575 return 0;
577 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
579 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
580 static int store_table(HYuvContext *s, uint8_t *len, uint8_t *buf){
581 int i;
582 int index= 0;
584 for(i=0; i<256;){
585 int val= len[i];
586 int repeat=0;
588 for(; i<256 && len[i]==val && repeat<255; i++)
589 repeat++;
591 assert(val < 32 && val >0 && repeat<256 && repeat>0);
592 if(repeat>7){
593 buf[index++]= val;
594 buf[index++]= repeat;
595 }else{
596 buf[index++]= val | (repeat<<5);
600 return index;
603 static av_cold int encode_init(AVCodecContext *avctx)
605 HYuvContext *s = avctx->priv_data;
606 int i, j;
608 common_init(avctx);
610 avctx->extradata= av_mallocz(1024*30); // 256*3+4 == 772
611 avctx->stats_out= av_mallocz(1024*30); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132
612 s->version=2;
614 avctx->coded_frame= &s->picture;
616 switch(avctx->pix_fmt){
617 case PIX_FMT_YUV420P:
618 s->bitstream_bpp= 12;
619 break;
620 case PIX_FMT_YUV422P:
621 s->bitstream_bpp= 16;
622 break;
623 case PIX_FMT_RGB32:
624 s->bitstream_bpp= 24;
625 break;
626 default:
627 av_log(avctx, AV_LOG_ERROR, "format not supported\n");
628 return -1;
630 avctx->bits_per_coded_sample= s->bitstream_bpp;
631 s->decorrelate= s->bitstream_bpp >= 24;
632 s->predictor= avctx->prediction_method;
633 s->interlaced= avctx->flags&CODEC_FLAG_INTERLACED_ME ? 1 : 0;
634 if(avctx->context_model==1){
635 s->context= avctx->context_model;
636 if(s->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)){
637 av_log(avctx, AV_LOG_ERROR, "context=1 is not compatible with 2 pass huffyuv encoding\n");
638 return -1;
640 }else s->context= 0;
642 if(avctx->codec->id==CODEC_ID_HUFFYUV){
643 if(avctx->pix_fmt==PIX_FMT_YUV420P){
644 av_log(avctx, AV_LOG_ERROR, "Error: YV12 is not supported by huffyuv; use vcodec=ffvhuff or format=422p\n");
645 return -1;
647 if(avctx->context_model){
648 av_log(avctx, AV_LOG_ERROR, "Error: per-frame huffman tables are not supported by huffyuv; use vcodec=ffvhuff\n");
649 return -1;
651 if(s->interlaced != ( s->height > 288 ))
652 av_log(avctx, AV_LOG_INFO, "using huffyuv 2.2.0 or newer interlacing flag\n");
655 if(s->bitstream_bpp>=24 && s->predictor==MEDIAN){
656 av_log(avctx, AV_LOG_ERROR, "Error: RGB is incompatible with median predictor\n");
657 return -1;
660 ((uint8_t*)avctx->extradata)[0]= s->predictor | (s->decorrelate << 6);
661 ((uint8_t*)avctx->extradata)[1]= s->bitstream_bpp;
662 ((uint8_t*)avctx->extradata)[2]= s->interlaced ? 0x10 : 0x20;
663 if(s->context)
664 ((uint8_t*)avctx->extradata)[2]|= 0x40;
665 ((uint8_t*)avctx->extradata)[3]= 0;
666 s->avctx->extradata_size= 4;
668 if(avctx->stats_in){
669 char *p= avctx->stats_in;
671 for(i=0; i<3; i++)
672 for(j=0; j<256; j++)
673 s->stats[i][j]= 1;
675 for(;;){
676 for(i=0; i<3; i++){
677 char *next;
679 for(j=0; j<256; j++){
680 s->stats[i][j]+= strtol(p, &next, 0);
681 if(next==p) return -1;
682 p=next;
685 if(p[0]==0 || p[1]==0 || p[2]==0) break;
687 }else{
688 for(i=0; i<3; i++)
689 for(j=0; j<256; j++){
690 int d= FFMIN(j, 256-j);
692 s->stats[i][j]= 100000000/(d+1);
696 for(i=0; i<3; i++){
697 generate_len_table(s->len[i], s->stats[i], 256);
699 if(generate_bits_table(s->bits[i], s->len[i])<0){
700 return -1;
703 s->avctx->extradata_size+=
704 store_table(s, s->len[i], &((uint8_t*)s->avctx->extradata)[s->avctx->extradata_size]);
707 if(s->context){
708 for(i=0; i<3; i++){
709 int pels = s->width*s->height / (i?40:10);
710 for(j=0; j<256; j++){
711 int d= FFMIN(j, 256-j);
712 s->stats[i][j]= pels/(d+1);
715 }else{
716 for(i=0; i<3; i++)
717 for(j=0; j<256; j++)
718 s->stats[i][j]= 0;
721 // printf("pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced);
723 alloc_temp(s);
725 s->picture_number=0;
727 return 0;
729 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
731 /* TODO instead of restarting the read when the code isn't in the first level
732 * of the joint table, jump into the 2nd level of the individual table. */
733 #define READ_2PIX(dst0, dst1, plane1){\
734 uint16_t code = get_vlc2(&s->gb, s->vlc[3+plane1].table, VLC_BITS, 1);\
735 if(code != 0xffff){\
736 dst0 = code>>8;\
737 dst1 = code;\
738 }else{\
739 dst0 = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);\
740 dst1 = get_vlc2(&s->gb, s->vlc[plane1].table, VLC_BITS, 3);\
744 static void decode_422_bitstream(HYuvContext *s, int count){
745 int i;
747 count/=2;
749 for(i=0; i<count; i++){
750 READ_2PIX(s->temp[0][2*i ], s->temp[1][i], 1);
751 READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2);
755 static void decode_gray_bitstream(HYuvContext *s, int count){
756 int i;
758 count/=2;
760 for(i=0; i<count; i++){
761 READ_2PIX(s->temp[0][2*i ], s->temp[0][2*i+1], 0);
765 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
766 static int encode_422_bitstream(HYuvContext *s, int count){
767 int i;
769 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 2*4*count){
770 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
771 return -1;
774 #define LOAD4\
775 int y0 = s->temp[0][2*i];\
776 int y1 = s->temp[0][2*i+1];\
777 int u0 = s->temp[1][i];\
778 int v0 = s->temp[2][i];
780 count/=2;
781 if(s->flags&CODEC_FLAG_PASS1){
782 for(i=0; i<count; i++){
783 LOAD4;
784 s->stats[0][y0]++;
785 s->stats[1][u0]++;
786 s->stats[0][y1]++;
787 s->stats[2][v0]++;
790 if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
791 return 0;
792 if(s->context){
793 for(i=0; i<count; i++){
794 LOAD4;
795 s->stats[0][y0]++;
796 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
797 s->stats[1][u0]++;
798 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
799 s->stats[0][y1]++;
800 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
801 s->stats[2][v0]++;
802 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
804 }else{
805 for(i=0; i<count; i++){
806 LOAD4;
807 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
808 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
809 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
810 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
813 return 0;
816 static int encode_gray_bitstream(HYuvContext *s, int count){
817 int i;
819 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 4*count){
820 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
821 return -1;
824 #define LOAD2\
825 int y0 = s->temp[0][2*i];\
826 int y1 = s->temp[0][2*i+1];
827 #define STAT2\
828 s->stats[0][y0]++;\
829 s->stats[0][y1]++;
830 #define WRITE2\
831 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);\
832 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
834 count/=2;
835 if(s->flags&CODEC_FLAG_PASS1){
836 for(i=0; i<count; i++){
837 LOAD2;
838 STAT2;
841 if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
842 return 0;
844 if(s->context){
845 for(i=0; i<count; i++){
846 LOAD2;
847 STAT2;
848 WRITE2;
850 }else{
851 for(i=0; i<count; i++){
852 LOAD2;
853 WRITE2;
856 return 0;
858 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
860 static av_always_inline void decode_bgr_1(HYuvContext *s, int count, int decorrelate, int alpha){
861 int i;
862 for(i=0; i<count; i++){
863 int code = get_vlc2(&s->gb, s->vlc[3].table, VLC_BITS, 1);
864 if(code != -1){
865 *(uint32_t*)&s->temp[0][4*i] = s->pix_bgr_map[code];
866 }else if(decorrelate){
867 s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
868 s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3) + s->temp[0][4*i+G];
869 s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3) + s->temp[0][4*i+G];
870 }else{
871 s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
872 s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
873 s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
875 if(alpha)
876 get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3); //?!
880 static void decode_bgr_bitstream(HYuvContext *s, int count){
881 if(s->decorrelate){
882 if(s->bitstream_bpp==24)
883 decode_bgr_1(s, count, 1, 0);
884 else
885 decode_bgr_1(s, count, 1, 1);
886 }else{
887 if(s->bitstream_bpp==24)
888 decode_bgr_1(s, count, 0, 0);
889 else
890 decode_bgr_1(s, count, 0, 1);
894 static int encode_bgr_bitstream(HYuvContext *s, int count){
895 int i;
897 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 3*4*count){
898 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
899 return -1;
902 #define LOAD3\
903 int g= s->temp[0][4*i+G];\
904 int b= (s->temp[0][4*i+B] - g) & 0xff;\
905 int r= (s->temp[0][4*i+R] - g) & 0xff;
906 #define STAT3\
907 s->stats[0][b]++;\
908 s->stats[1][g]++;\
909 s->stats[2][r]++;
910 #define WRITE3\
911 put_bits(&s->pb, s->len[1][g], s->bits[1][g]);\
912 put_bits(&s->pb, s->len[0][b], s->bits[0][b]);\
913 put_bits(&s->pb, s->len[2][r], s->bits[2][r]);
915 if((s->flags&CODEC_FLAG_PASS1) && (s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)){
916 for(i=0; i<count; i++){
917 LOAD3;
918 STAT3;
920 }else if(s->context || (s->flags&CODEC_FLAG_PASS1)){
921 for(i=0; i<count; i++){
922 LOAD3;
923 STAT3;
924 WRITE3;
926 }else{
927 for(i=0; i<count; i++){
928 LOAD3;
929 WRITE3;
932 return 0;
935 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
936 static void draw_slice(HYuvContext *s, int y){
937 int h, cy;
938 int offset[4];
940 if(s->avctx->draw_horiz_band==NULL)
941 return;
943 h= y - s->last_slice_end;
944 y -= h;
946 if(s->bitstream_bpp==12){
947 cy= y>>1;
948 }else{
949 cy= y;
952 offset[0] = s->picture.linesize[0]*y;
953 offset[1] = s->picture.linesize[1]*cy;
954 offset[2] = s->picture.linesize[2]*cy;
955 offset[3] = 0;
956 emms_c();
958 s->avctx->draw_horiz_band(s->avctx, &s->picture, offset, y, 3, h);
960 s->last_slice_end= y + h;
963 static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, const uint8_t *buf, int buf_size){
964 HYuvContext *s = avctx->priv_data;
965 const int width= s->width;
966 const int width2= s->width>>1;
967 const int height= s->height;
968 int fake_ystride, fake_ustride, fake_vstride;
969 AVFrame * const p= &s->picture;
970 int table_size= 0;
972 AVFrame *picture = data;
974 s->bitstream_buffer= av_fast_realloc(s->bitstream_buffer, &s->bitstream_buffer_size, buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
976 s->dsp.bswap_buf((uint32_t*)s->bitstream_buffer, (const uint32_t*)buf, buf_size/4);
978 if(p->data[0])
979 avctx->release_buffer(avctx, p);
981 p->reference= 0;
982 if(avctx->get_buffer(avctx, p) < 0){
983 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
984 return -1;
987 if(s->context){
988 table_size = read_huffman_tables(s, s->bitstream_buffer, buf_size);
989 if(table_size < 0)
990 return -1;
993 if((unsigned)(buf_size-table_size) >= INT_MAX/8)
994 return -1;
996 init_get_bits(&s->gb, s->bitstream_buffer+table_size, (buf_size-table_size)*8);
998 fake_ystride= s->interlaced ? p->linesize[0]*2 : p->linesize[0];
999 fake_ustride= s->interlaced ? p->linesize[1]*2 : p->linesize[1];
1000 fake_vstride= s->interlaced ? p->linesize[2]*2 : p->linesize[2];
1002 s->last_slice_end= 0;
1004 if(s->bitstream_bpp<24){
1005 int y, cy;
1006 int lefty, leftu, leftv;
1007 int lefttopy, lefttopu, lefttopv;
1009 if(s->yuy2){
1010 p->data[0][3]= get_bits(&s->gb, 8);
1011 p->data[0][2]= get_bits(&s->gb, 8);
1012 p->data[0][1]= get_bits(&s->gb, 8);
1013 p->data[0][0]= get_bits(&s->gb, 8);
1015 av_log(avctx, AV_LOG_ERROR, "YUY2 output is not implemented yet\n");
1016 return -1;
1017 }else{
1019 leftv= p->data[2][0]= get_bits(&s->gb, 8);
1020 lefty= p->data[0][1]= get_bits(&s->gb, 8);
1021 leftu= p->data[1][0]= get_bits(&s->gb, 8);
1022 p->data[0][0]= get_bits(&s->gb, 8);
1024 switch(s->predictor){
1025 case LEFT:
1026 case PLANE:
1027 decode_422_bitstream(s, width-2);
1028 lefty= add_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
1029 if(!(s->flags&CODEC_FLAG_GRAY)){
1030 leftu= add_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
1031 leftv= add_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
1034 for(cy=y=1; y<s->height; y++,cy++){
1035 uint8_t *ydst, *udst, *vdst;
1037 if(s->bitstream_bpp==12){
1038 decode_gray_bitstream(s, width);
1040 ydst= p->data[0] + p->linesize[0]*y;
1042 lefty= add_left_prediction(ydst, s->temp[0], width, lefty);
1043 if(s->predictor == PLANE){
1044 if(y>s->interlaced)
1045 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1047 y++;
1048 if(y>=s->height) break;
1051 draw_slice(s, y);
1053 ydst= p->data[0] + p->linesize[0]*y;
1054 udst= p->data[1] + p->linesize[1]*cy;
1055 vdst= p->data[2] + p->linesize[2]*cy;
1057 decode_422_bitstream(s, width);
1058 lefty= add_left_prediction(ydst, s->temp[0], width, lefty);
1059 if(!(s->flags&CODEC_FLAG_GRAY)){
1060 leftu= add_left_prediction(udst, s->temp[1], width2, leftu);
1061 leftv= add_left_prediction(vdst, s->temp[2], width2, leftv);
1063 if(s->predictor == PLANE){
1064 if(cy>s->interlaced){
1065 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1066 if(!(s->flags&CODEC_FLAG_GRAY)){
1067 s->dsp.add_bytes(udst, udst - fake_ustride, width2);
1068 s->dsp.add_bytes(vdst, vdst - fake_vstride, width2);
1073 draw_slice(s, height);
1075 break;
1076 case MEDIAN:
1077 /* first line except first 2 pixels is left predicted */
1078 decode_422_bitstream(s, width-2);
1079 lefty= add_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
1080 if(!(s->flags&CODEC_FLAG_GRAY)){
1081 leftu= add_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
1082 leftv= add_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
1085 cy=y=1;
1087 /* second line is left predicted for interlaced case */
1088 if(s->interlaced){
1089 decode_422_bitstream(s, width);
1090 lefty= add_left_prediction(p->data[0] + p->linesize[0], s->temp[0], width, lefty);
1091 if(!(s->flags&CODEC_FLAG_GRAY)){
1092 leftu= add_left_prediction(p->data[1] + p->linesize[2], s->temp[1], width2, leftu);
1093 leftv= add_left_prediction(p->data[2] + p->linesize[1], s->temp[2], width2, leftv);
1095 y++; cy++;
1098 /* next 4 pixels are left predicted too */
1099 decode_422_bitstream(s, 4);
1100 lefty= add_left_prediction(p->data[0] + fake_ystride, s->temp[0], 4, lefty);
1101 if(!(s->flags&CODEC_FLAG_GRAY)){
1102 leftu= add_left_prediction(p->data[1] + fake_ustride, s->temp[1], 2, leftu);
1103 leftv= add_left_prediction(p->data[2] + fake_vstride, s->temp[2], 2, leftv);
1106 /* next line except the first 4 pixels is median predicted */
1107 lefttopy= p->data[0][3];
1108 decode_422_bitstream(s, width-4);
1109 add_median_prediction(p->data[0] + fake_ystride+4, p->data[0]+4, s->temp[0], width-4, &lefty, &lefttopy);
1110 if(!(s->flags&CODEC_FLAG_GRAY)){
1111 lefttopu= p->data[1][1];
1112 lefttopv= p->data[2][1];
1113 add_median_prediction(p->data[1] + fake_ustride+2, p->data[1]+2, s->temp[1], width2-2, &leftu, &lefttopu);
1114 add_median_prediction(p->data[2] + fake_vstride+2, p->data[2]+2, s->temp[2], width2-2, &leftv, &lefttopv);
1116 y++; cy++;
1118 for(; y<height; y++,cy++){
1119 uint8_t *ydst, *udst, *vdst;
1121 if(s->bitstream_bpp==12){
1122 while(2*cy > y){
1123 decode_gray_bitstream(s, width);
1124 ydst= p->data[0] + p->linesize[0]*y;
1125 add_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1126 y++;
1128 if(y>=height) break;
1130 draw_slice(s, y);
1132 decode_422_bitstream(s, width);
1134 ydst= p->data[0] + p->linesize[0]*y;
1135 udst= p->data[1] + p->linesize[1]*cy;
1136 vdst= p->data[2] + p->linesize[2]*cy;
1138 add_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1139 if(!(s->flags&CODEC_FLAG_GRAY)){
1140 add_median_prediction(udst, udst - fake_ustride, s->temp[1], width2, &leftu, &lefttopu);
1141 add_median_prediction(vdst, vdst - fake_vstride, s->temp[2], width2, &leftv, &lefttopv);
1145 draw_slice(s, height);
1146 break;
1149 }else{
1150 int y;
1151 int leftr, leftg, leftb;
1152 const int last_line= (height-1)*p->linesize[0];
1154 if(s->bitstream_bpp==32){
1155 skip_bits(&s->gb, 8);
1156 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
1157 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
1158 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
1159 }else{
1160 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
1161 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
1162 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
1163 skip_bits(&s->gb, 8);
1166 if(s->bgr32){
1167 switch(s->predictor){
1168 case LEFT:
1169 case PLANE:
1170 decode_bgr_bitstream(s, width-1);
1171 add_left_prediction_bgr32(p->data[0] + last_line+4, s->temp[0], width-1, &leftr, &leftg, &leftb);
1173 for(y=s->height-2; y>=0; y--){ //Yes it is stored upside down.
1174 decode_bgr_bitstream(s, width);
1176 add_left_prediction_bgr32(p->data[0] + p->linesize[0]*y, s->temp[0], width, &leftr, &leftg, &leftb);
1177 if(s->predictor == PLANE){
1178 if((y&s->interlaced)==0 && y<s->height-1-s->interlaced){
1179 s->dsp.add_bytes(p->data[0] + p->linesize[0]*y,
1180 p->data[0] + p->linesize[0]*y + fake_ystride, fake_ystride);
1184 draw_slice(s, height); // just 1 large slice as this is not possible in reverse order
1185 break;
1186 default:
1187 av_log(avctx, AV_LOG_ERROR, "prediction type not supported!\n");
1189 }else{
1191 av_log(avctx, AV_LOG_ERROR, "BGR24 output is not implemented yet\n");
1192 return -1;
1195 emms_c();
1197 *picture= *p;
1198 *data_size = sizeof(AVFrame);
1200 return (get_bits_count(&s->gb)+31)/32*4 + table_size;
1202 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
1204 static int common_end(HYuvContext *s){
1205 int i;
1207 for(i=0; i<3; i++){
1208 av_freep(&s->temp[i]);
1210 return 0;
1213 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
1214 static av_cold int decode_end(AVCodecContext *avctx)
1216 HYuvContext *s = avctx->priv_data;
1217 int i;
1219 common_end(s);
1220 av_freep(&s->bitstream_buffer);
1222 for(i=0; i<6; i++){
1223 free_vlc(&s->vlc[i]);
1226 return 0;
1228 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
1230 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
1231 static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
1232 HYuvContext *s = avctx->priv_data;
1233 AVFrame *pict = data;
1234 const int width= s->width;
1235 const int width2= s->width>>1;
1236 const int height= s->height;
1237 const int fake_ystride= s->interlaced ? pict->linesize[0]*2 : pict->linesize[0];
1238 const int fake_ustride= s->interlaced ? pict->linesize[1]*2 : pict->linesize[1];
1239 const int fake_vstride= s->interlaced ? pict->linesize[2]*2 : pict->linesize[2];
1240 AVFrame * const p= &s->picture;
1241 int i, j, size=0;
1243 *p = *pict;
1244 p->pict_type= FF_I_TYPE;
1245 p->key_frame= 1;
1247 if(s->context){
1248 for(i=0; i<3; i++){
1249 generate_len_table(s->len[i], s->stats[i], 256);
1250 if(generate_bits_table(s->bits[i], s->len[i])<0)
1251 return -1;
1252 size+= store_table(s, s->len[i], &buf[size]);
1255 for(i=0; i<3; i++)
1256 for(j=0; j<256; j++)
1257 s->stats[i][j] >>= 1;
1260 init_put_bits(&s->pb, buf+size, buf_size-size);
1262 if(avctx->pix_fmt == PIX_FMT_YUV422P || avctx->pix_fmt == PIX_FMT_YUV420P){
1263 int lefty, leftu, leftv, y, cy;
1265 put_bits(&s->pb, 8, leftv= p->data[2][0]);
1266 put_bits(&s->pb, 8, lefty= p->data[0][1]);
1267 put_bits(&s->pb, 8, leftu= p->data[1][0]);
1268 put_bits(&s->pb, 8, p->data[0][0]);
1270 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+2, width-2 , lefty);
1271 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+1, width2-1, leftu);
1272 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+1, width2-1, leftv);
1274 encode_422_bitstream(s, width-2);
1276 if(s->predictor==MEDIAN){
1277 int lefttopy, lefttopu, lefttopv;
1278 cy=y=1;
1279 if(s->interlaced){
1280 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+p->linesize[0], width , lefty);
1281 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+p->linesize[1], width2, leftu);
1282 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+p->linesize[2], width2, leftv);
1284 encode_422_bitstream(s, width);
1285 y++; cy++;
1288 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+fake_ystride, 4, lefty);
1289 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+fake_ustride, 2, leftu);
1290 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+fake_vstride, 2, leftv);
1292 encode_422_bitstream(s, 4);
1294 lefttopy= p->data[0][3];
1295 lefttopu= p->data[1][1];
1296 lefttopv= p->data[2][1];
1297 s->dsp.sub_hfyu_median_prediction(s->temp[0], p->data[0]+4, p->data[0] + fake_ystride+4, width-4 , &lefty, &lefttopy);
1298 s->dsp.sub_hfyu_median_prediction(s->temp[1], p->data[1]+2, p->data[1] + fake_ustride+2, width2-2, &leftu, &lefttopu);
1299 s->dsp.sub_hfyu_median_prediction(s->temp[2], p->data[2]+2, p->data[2] + fake_vstride+2, width2-2, &leftv, &lefttopv);
1300 encode_422_bitstream(s, width-4);
1301 y++; cy++;
1303 for(; y<height; y++,cy++){
1304 uint8_t *ydst, *udst, *vdst;
1306 if(s->bitstream_bpp==12){
1307 while(2*cy > y){
1308 ydst= p->data[0] + p->linesize[0]*y;
1309 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1310 encode_gray_bitstream(s, width);
1311 y++;
1313 if(y>=height) break;
1315 ydst= p->data[0] + p->linesize[0]*y;
1316 udst= p->data[1] + p->linesize[1]*cy;
1317 vdst= p->data[2] + p->linesize[2]*cy;
1319 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1320 s->dsp.sub_hfyu_median_prediction(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu);
1321 s->dsp.sub_hfyu_median_prediction(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv);
1323 encode_422_bitstream(s, width);
1325 }else{
1326 for(cy=y=1; y<height; y++,cy++){
1327 uint8_t *ydst, *udst, *vdst;
1329 /* encode a luma only line & y++ */
1330 if(s->bitstream_bpp==12){
1331 ydst= p->data[0] + p->linesize[0]*y;
1333 if(s->predictor == PLANE && s->interlaced < y){
1334 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1336 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1337 }else{
1338 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1340 encode_gray_bitstream(s, width);
1341 y++;
1342 if(y>=height) break;
1345 ydst= p->data[0] + p->linesize[0]*y;
1346 udst= p->data[1] + p->linesize[1]*cy;
1347 vdst= p->data[2] + p->linesize[2]*cy;
1349 if(s->predictor == PLANE && s->interlaced < cy){
1350 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1351 s->dsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2);
1352 s->dsp.diff_bytes(s->temp[2] + width2, vdst, vdst - fake_vstride, width2);
1354 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1355 leftu= sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu);
1356 leftv= sub_left_prediction(s, s->temp[2], s->temp[2] + width2, width2, leftv);
1357 }else{
1358 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1359 leftu= sub_left_prediction(s, s->temp[1], udst, width2, leftu);
1360 leftv= sub_left_prediction(s, s->temp[2], vdst, width2, leftv);
1363 encode_422_bitstream(s, width);
1366 }else if(avctx->pix_fmt == PIX_FMT_RGB32){
1367 uint8_t *data = p->data[0] + (height-1)*p->linesize[0];
1368 const int stride = -p->linesize[0];
1369 const int fake_stride = -fake_ystride;
1370 int y;
1371 int leftr, leftg, leftb;
1373 put_bits(&s->pb, 8, leftr= data[R]);
1374 put_bits(&s->pb, 8, leftg= data[G]);
1375 put_bits(&s->pb, 8, leftb= data[B]);
1376 put_bits(&s->pb, 8, 0);
1378 sub_left_prediction_bgr32(s, s->temp[0], data+4, width-1, &leftr, &leftg, &leftb);
1379 encode_bgr_bitstream(s, width-1);
1381 for(y=1; y<s->height; y++){
1382 uint8_t *dst = data + y*stride;
1383 if(s->predictor == PLANE && s->interlaced < y){
1384 s->dsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width*4);
1385 sub_left_prediction_bgr32(s, s->temp[0], s->temp[1], width, &leftr, &leftg, &leftb);
1386 }else{
1387 sub_left_prediction_bgr32(s, s->temp[0], dst, width, &leftr, &leftg, &leftb);
1389 encode_bgr_bitstream(s, width);
1391 }else{
1392 av_log(avctx, AV_LOG_ERROR, "Format not supported!\n");
1394 emms_c();
1396 size+= (put_bits_count(&s->pb)+31)/8;
1397 size/= 4;
1399 if((s->flags&CODEC_FLAG_PASS1) && (s->picture_number&31)==0){
1400 int j;
1401 char *p= avctx->stats_out;
1402 char *end= p + 1024*30;
1403 for(i=0; i<3; i++){
1404 for(j=0; j<256; j++){
1405 snprintf(p, end-p, "%"PRIu64" ", s->stats[i][j]);
1406 p+= strlen(p);
1407 s->stats[i][j]= 0;
1409 snprintf(p, end-p, "\n");
1410 p++;
1412 } else
1413 avctx->stats_out[0] = '\0';
1414 if(!(s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)){
1415 flush_put_bits(&s->pb);
1416 s->dsp.bswap_buf((uint32_t*)buf, (uint32_t*)buf, size);
1419 s->picture_number++;
1421 return size*4;
1424 static av_cold int encode_end(AVCodecContext *avctx)
1426 HYuvContext *s = avctx->priv_data;
1428 common_end(s);
1430 av_freep(&avctx->extradata);
1431 av_freep(&avctx->stats_out);
1433 return 0;
1435 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
1437 #if CONFIG_HUFFYUV_DECODER
1438 AVCodec huffyuv_decoder = {
1439 "huffyuv",
1440 CODEC_TYPE_VIDEO,
1441 CODEC_ID_HUFFYUV,
1442 sizeof(HYuvContext),
1443 decode_init,
1444 NULL,
1445 decode_end,
1446 decode_frame,
1447 CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND,
1448 NULL,
1449 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1451 #endif
1453 #if CONFIG_FFVHUFF_DECODER
1454 AVCodec ffvhuff_decoder = {
1455 "ffvhuff",
1456 CODEC_TYPE_VIDEO,
1457 CODEC_ID_FFVHUFF,
1458 sizeof(HYuvContext),
1459 decode_init,
1460 NULL,
1461 decode_end,
1462 decode_frame,
1463 CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND,
1464 NULL,
1465 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
1467 #endif
1469 #if CONFIG_HUFFYUV_ENCODER
1470 AVCodec huffyuv_encoder = {
1471 "huffyuv",
1472 CODEC_TYPE_VIDEO,
1473 CODEC_ID_HUFFYUV,
1474 sizeof(HYuvContext),
1475 encode_init,
1476 encode_frame,
1477 encode_end,
1478 .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE},
1479 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1481 #endif
1483 #if CONFIG_FFVHUFF_ENCODER
1484 AVCodec ffvhuff_encoder = {
1485 "ffvhuff",
1486 CODEC_TYPE_VIDEO,
1487 CODEC_ID_FFVHUFF,
1488 sizeof(HYuvContext),
1489 encode_init,
1490 encode_frame,
1491 encode_end,
1492 .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE},
1493 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
1495 #endif