2 * Copyright (C) 2003-2004 The FFmpeg project
4 * This file is part of Libav.
6 * Libav is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * Libav is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with Libav; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * On2 VP3 Video Decoder
25 * VP3 Video Decoder by Mike Melanson (mike at multimedia.cx)
26 * For more information about the VP3 coding process, visit:
27 * http://wiki.multimedia.cx/index.php?title=On2_VP3
29 * Theora decoder by Alex Beregszaszi
36 #include "libavutil/imgutils.h"
49 #define FRAGMENT_PIXELS 8
51 // FIXME split things out into their own arrays
52 typedef struct Vp3Fragment
{
54 uint8_t coding_method
;
58 #define SB_NOT_CODED 0
59 #define SB_PARTIALLY_CODED 1
60 #define SB_FULLY_CODED 2
62 // This is the maximum length of a single long bit run that can be encoded
63 // for superblock coding or block qps. Theora special-cases this to read a
64 // bit instead of flipping the current bit to allow for runs longer than 4129.
65 #define MAXIMUM_LONG_BIT_RUN 4129
67 #define MODE_INTER_NO_MV 0
69 #define MODE_INTER_PLUS_MV 2
70 #define MODE_INTER_LAST_MV 3
71 #define MODE_INTER_PRIOR_LAST 4
72 #define MODE_USING_GOLDEN 5
73 #define MODE_GOLDEN_MV 6
74 #define MODE_INTER_FOURMV 7
75 #define CODING_MODE_COUNT 8
77 /* special internal mode */
80 /* There are 6 preset schemes, plus a free-form scheme */
81 static const int ModeAlphabet
[6][CODING_MODE_COUNT
] = {
82 /* scheme 1: Last motion vector dominates */
83 { MODE_INTER_LAST_MV
, MODE_INTER_PRIOR_LAST
,
84 MODE_INTER_PLUS_MV
, MODE_INTER_NO_MV
,
85 MODE_INTRA
, MODE_USING_GOLDEN
,
86 MODE_GOLDEN_MV
, MODE_INTER_FOURMV
},
89 { MODE_INTER_LAST_MV
, MODE_INTER_PRIOR_LAST
,
90 MODE_INTER_NO_MV
, MODE_INTER_PLUS_MV
,
91 MODE_INTRA
, MODE_USING_GOLDEN
,
92 MODE_GOLDEN_MV
, MODE_INTER_FOURMV
},
95 { MODE_INTER_LAST_MV
, MODE_INTER_PLUS_MV
,
96 MODE_INTER_PRIOR_LAST
, MODE_INTER_NO_MV
,
97 MODE_INTRA
, MODE_USING_GOLDEN
,
98 MODE_GOLDEN_MV
, MODE_INTER_FOURMV
},
101 { MODE_INTER_LAST_MV
, MODE_INTER_PLUS_MV
,
102 MODE_INTER_NO_MV
, MODE_INTER_PRIOR_LAST
,
103 MODE_INTRA
, MODE_USING_GOLDEN
,
104 MODE_GOLDEN_MV
, MODE_INTER_FOURMV
},
106 /* scheme 5: No motion vector dominates */
107 { MODE_INTER_NO_MV
, MODE_INTER_LAST_MV
,
108 MODE_INTER_PRIOR_LAST
, MODE_INTER_PLUS_MV
,
109 MODE_INTRA
, MODE_USING_GOLDEN
,
110 MODE_GOLDEN_MV
, MODE_INTER_FOURMV
},
113 { MODE_INTER_NO_MV
, MODE_USING_GOLDEN
,
114 MODE_INTER_LAST_MV
, MODE_INTER_PRIOR_LAST
,
115 MODE_INTER_PLUS_MV
, MODE_INTRA
,
116 MODE_GOLDEN_MV
, MODE_INTER_FOURMV
},
119 static const uint8_t hilbert_offset
[16][2] = {
120 { 0, 0 }, { 1, 0 }, { 1, 1 }, { 0, 1 },
121 { 0, 2 }, { 0, 3 }, { 1, 3 }, { 1, 2 },
122 { 2, 2 }, { 2, 3 }, { 3, 3 }, { 3, 2 },
123 { 3, 1 }, { 2, 1 }, { 2, 0 }, { 3, 0 }
126 #define MIN_DEQUANT_VAL 2
128 typedef struct Vp3DecodeContext
{
129 AVCodecContext
*avctx
;
130 int theora
, theora_tables
;
133 int chroma_x_shift
, chroma_y_shift
;
134 ThreadFrame golden_frame
;
135 ThreadFrame last_frame
;
136 ThreadFrame current_frame
;
138 uint8_t idct_permutation
[64];
139 uint8_t idct_scantable
[64];
141 VideoDSPContext vdsp
;
142 VP3DSPContext vp3dsp
;
143 DECLARE_ALIGNED(16, int16_t, block
)[64];
146 int skip_loop_filter
;
152 int superblock_count
;
153 int y_superblock_width
;
154 int y_superblock_height
;
155 int y_superblock_count
;
156 int c_superblock_width
;
157 int c_superblock_height
;
158 int c_superblock_count
;
159 int u_superblock_start
;
160 int v_superblock_start
;
161 unsigned char *superblock_coding
;
163 int macroblock_count
;
164 int macroblock_width
;
165 int macroblock_height
;
168 int fragment_width
[2];
169 int fragment_height
[2];
171 Vp3Fragment
*all_fragments
;
172 int fragment_start
[3];
177 int8_t (*motion_val
[2])[2];
180 uint16_t coded_dc_scale_factor
[64];
181 uint32_t coded_ac_scale_factor
[64];
182 uint8_t base_matrix
[384][64];
183 uint8_t qr_count
[2][3];
184 uint8_t qr_size
[2][3][64];
185 uint16_t qr_base
[2][3][64];
188 * This is a list of all tokens in bitstream order. Reordering takes place
189 * by pulling from each level during IDCT. As a consequence, IDCT must be
190 * in Hilbert order, making the minimum slice height 64 for 4:2:0 and 32
191 * otherwise. The 32 different tokens with up to 12 bits of extradata are
192 * collapsed into 3 types, packed as follows:
193 * (from the low to high bits)
195 * 2 bits: type (0,1,2)
196 * 0: EOB run, 14 bits for run length (12 needed)
197 * 1: zero run, 7 bits for run length
198 * 7 bits for the next coefficient (3 needed)
199 * 2: coefficient, 14 bits (11 needed)
201 * Coefficients are signed, so are packed in the highest bits for automatic
204 int16_t *dct_tokens
[3][64];
205 int16_t *dct_tokens_base
;
206 #define TOKEN_EOB(eob_run) ((eob_run) << 2)
207 #define TOKEN_ZERO_RUN(coeff, zero_run) (((coeff) << 9) + ((zero_run) << 2) + 1)
208 #define TOKEN_COEFF(coeff) (((coeff) << 2) + 2)
211 * number of blocks that contain DCT coefficients at
212 * the given level or higher
214 int num_coded_frags
[3][64];
215 int total_num_coded_frags
;
217 /* this is a list of indexes into the all_fragments array indicating
218 * which of the fragments are coded */
219 int *coded_fragment_list
[3];
227 VLC superblock_run_length_vlc
;
228 VLC fragment_run_length_vlc
;
230 VLC motion_vector_vlc
;
232 /* these arrays need to be on 16-byte boundaries since SSE2 operations
234 DECLARE_ALIGNED(16, int16_t, qmat
)[3][2][3][64]; ///< qmat[qpi][is_inter][plane]
236 /* This table contains superblock_count * 16 entries. Each set of 16
237 * numbers corresponds to the fragment indexes 0..15 of the superblock.
238 * An entry will be -1 to indicate that no entry corresponds to that
240 int *superblock_fragments
;
242 /* This is an array that indicates how a particular macroblock
244 unsigned char *macroblock_coding
;
246 uint8_t *edge_emu_buffer
;
253 uint32_t huffman_table
[80][32][2];
255 uint8_t filter_limit_values
[64];
256 DECLARE_ALIGNED(8, int, bounding_values_array
)[256 + 2];
259 /************************************************************************
260 * VP3 specific functions
261 ************************************************************************/
263 static void vp3_decode_flush(AVCodecContext
*avctx
)
265 Vp3DecodeContext
*s
= avctx
->priv_data
;
267 if (s
->golden_frame
.f
)
268 ff_thread_release_buffer(avctx
, &s
->golden_frame
);
270 ff_thread_release_buffer(avctx
, &s
->last_frame
);
271 if (s
->current_frame
.f
)
272 ff_thread_release_buffer(avctx
, &s
->current_frame
);
275 static av_cold
int vp3_decode_end(AVCodecContext
*avctx
)
277 Vp3DecodeContext
*s
= avctx
->priv_data
;
280 av_freep(&s
->superblock_coding
);
281 av_freep(&s
->all_fragments
);
282 av_freep(&s
->coded_fragment_list
[0]);
283 av_freep(&s
->dct_tokens_base
);
284 av_freep(&s
->superblock_fragments
);
285 av_freep(&s
->macroblock_coding
);
286 av_freep(&s
->motion_val
[0]);
287 av_freep(&s
->motion_val
[1]);
288 av_freep(&s
->edge_emu_buffer
);
290 /* release all frames */
291 vp3_decode_flush(avctx
);
292 av_frame_free(&s
->current_frame
.f
);
293 av_frame_free(&s
->last_frame
.f
);
294 av_frame_free(&s
->golden_frame
.f
);
296 if (avctx
->internal
->is_copy
)
299 for (i
= 0; i
< 16; i
++) {
300 ff_free_vlc(&s
->dc_vlc
[i
]);
301 ff_free_vlc(&s
->ac_vlc_1
[i
]);
302 ff_free_vlc(&s
->ac_vlc_2
[i
]);
303 ff_free_vlc(&s
->ac_vlc_3
[i
]);
304 ff_free_vlc(&s
->ac_vlc_4
[i
]);
307 ff_free_vlc(&s
->superblock_run_length_vlc
);
308 ff_free_vlc(&s
->fragment_run_length_vlc
);
309 ff_free_vlc(&s
->mode_code_vlc
);
310 ff_free_vlc(&s
->motion_vector_vlc
);
316 * This function sets up all of the various blocks mappings:
317 * superblocks <-> fragments, macroblocks <-> fragments,
318 * superblocks <-> macroblocks
320 * @return 0 is successful; returns 1 if *anything* went wrong.
322 static int init_block_mapping(Vp3DecodeContext
*s
)
324 int sb_x
, sb_y
, plane
;
327 for (plane
= 0; plane
< 3; plane
++) {
328 int sb_width
= plane
? s
->c_superblock_width
329 : s
->y_superblock_width
;
330 int sb_height
= plane
? s
->c_superblock_height
331 : s
->y_superblock_height
;
332 int frag_width
= s
->fragment_width
[!!plane
];
333 int frag_height
= s
->fragment_height
[!!plane
];
335 for (sb_y
= 0; sb_y
< sb_height
; sb_y
++)
336 for (sb_x
= 0; sb_x
< sb_width
; sb_x
++)
337 for (i
= 0; i
< 16; i
++) {
338 x
= 4 * sb_x
+ hilbert_offset
[i
][0];
339 y
= 4 * sb_y
+ hilbert_offset
[i
][1];
341 if (x
< frag_width
&& y
< frag_height
)
342 s
->superblock_fragments
[j
++] = s
->fragment_start
[plane
] +
345 s
->superblock_fragments
[j
++] = -1;
349 return 0; /* successful path out */
353 * This function sets up the dequantization tables used for a particular
356 static void init_dequantizer(Vp3DecodeContext
*s
, int qpi
)
358 int ac_scale_factor
= s
->coded_ac_scale_factor
[s
->qps
[qpi
]];
359 int dc_scale_factor
= s
->coded_dc_scale_factor
[s
->qps
[qpi
]];
360 int i
, plane
, inter
, qri
, bmi
, bmj
, qistart
;
362 for (inter
= 0; inter
< 2; inter
++) {
363 for (plane
= 0; plane
< 3; plane
++) {
365 for (qri
= 0; qri
< s
->qr_count
[inter
][plane
]; qri
++) {
366 sum
+= s
->qr_size
[inter
][plane
][qri
];
367 if (s
->qps
[qpi
] <= sum
)
370 qistart
= sum
- s
->qr_size
[inter
][plane
][qri
];
371 bmi
= s
->qr_base
[inter
][plane
][qri
];
372 bmj
= s
->qr_base
[inter
][plane
][qri
+ 1];
373 for (i
= 0; i
< 64; i
++) {
374 int coeff
= (2 * (sum
- s
->qps
[qpi
]) * s
->base_matrix
[bmi
][i
] -
375 2 * (qistart
- s
->qps
[qpi
]) * s
->base_matrix
[bmj
][i
] +
376 s
->qr_size
[inter
][plane
][qri
]) /
377 (2 * s
->qr_size
[inter
][plane
][qri
]);
379 int qmin
= 8 << (inter
+ !i
);
380 int qscale
= i
? ac_scale_factor
: dc_scale_factor
;
382 s
->qmat
[qpi
][inter
][plane
][s
->idct_permutation
[i
]] =
383 av_clip((qscale
* coeff
) / 100 * 4, qmin
, 4096);
385 /* all DC coefficients use the same quant so as not to interfere
386 * with DC prediction */
387 s
->qmat
[qpi
][inter
][plane
][0] = s
->qmat
[0][inter
][plane
][0];
393 * This function initializes the loop filter boundary limits if the frame's
394 * quality index is different from the previous frame's.
396 * The filter_limit_values may not be larger than 127.
398 static void init_loop_filter(Vp3DecodeContext
*s
)
400 int *bounding_values
= s
->bounding_values_array
+ 127;
405 filter_limit
= s
->filter_limit_values
[s
->qps
[0]];
406 assert(filter_limit
< 128);
408 /* set up the bounding values */
409 memset(s
->bounding_values_array
, 0, 256 * sizeof(int));
410 for (x
= 0; x
< filter_limit
; x
++) {
411 bounding_values
[-x
] = -x
;
412 bounding_values
[x
] = x
;
414 for (x
= value
= filter_limit
; x
< 128 && value
; x
++, value
--) {
415 bounding_values
[ x
] = value
;
416 bounding_values
[-x
] = -value
;
419 bounding_values
[128] = value
;
420 bounding_values
[129] = bounding_values
[130] = filter_limit
* 0x02020202;
424 * This function unpacks all of the superblock/macroblock/fragment coding
425 * information from the bitstream.
427 static int unpack_superblocks(Vp3DecodeContext
*s
, GetBitContext
*gb
)
429 int superblock_starts
[3] = {
430 0, s
->u_superblock_start
, s
->v_superblock_start
433 int current_superblock
= 0;
435 int num_partial_superblocks
= 0;
438 int current_fragment
;
442 memset(s
->superblock_coding
, SB_FULLY_CODED
, s
->superblock_count
);
444 /* unpack the list of partially-coded superblocks */
445 bit
= get_bits1(gb
) ^ 1;
448 while (current_superblock
< s
->superblock_count
&& get_bits_left(gb
) > 0) {
449 if (s
->theora
&& current_run
== MAXIMUM_LONG_BIT_RUN
)
454 current_run
= get_vlc2(gb
, s
->superblock_run_length_vlc
.table
,
456 if (current_run
== 34)
457 current_run
+= get_bits(gb
, 12);
459 if (current_superblock
+ current_run
> s
->superblock_count
) {
460 av_log(s
->avctx
, AV_LOG_ERROR
,
461 "Invalid partially coded superblock run length\n");
465 memset(s
->superblock_coding
+ current_superblock
, bit
, current_run
);
467 current_superblock
+= current_run
;
469 num_partial_superblocks
+= current_run
;
472 /* unpack the list of fully coded superblocks if any of the blocks were
473 * not marked as partially coded in the previous step */
474 if (num_partial_superblocks
< s
->superblock_count
) {
475 int superblocks_decoded
= 0;
477 current_superblock
= 0;
478 bit
= get_bits1(gb
) ^ 1;
481 while (superblocks_decoded
< s
->superblock_count
- num_partial_superblocks
&&
482 get_bits_left(gb
) > 0) {
483 if (s
->theora
&& current_run
== MAXIMUM_LONG_BIT_RUN
)
488 current_run
= get_vlc2(gb
, s
->superblock_run_length_vlc
.table
,
490 if (current_run
== 34)
491 current_run
+= get_bits(gb
, 12);
493 for (j
= 0; j
< current_run
; current_superblock
++) {
494 if (current_superblock
>= s
->superblock_count
) {
495 av_log(s
->avctx
, AV_LOG_ERROR
,
496 "Invalid fully coded superblock run length\n");
500 /* skip any superblocks already marked as partially coded */
501 if (s
->superblock_coding
[current_superblock
] == SB_NOT_CODED
) {
502 s
->superblock_coding
[current_superblock
] = 2 * bit
;
506 superblocks_decoded
+= current_run
;
510 /* if there were partial blocks, initialize bitstream for
511 * unpacking fragment codings */
512 if (num_partial_superblocks
) {
515 /* toggle the bit because as soon as the first run length is
516 * fetched the bit will be toggled again */
521 /* figure out which fragments are coded; iterate through each
522 * superblock (all planes) */
523 s
->total_num_coded_frags
= 0;
524 memset(s
->macroblock_coding
, MODE_COPY
, s
->macroblock_count
);
526 for (plane
= 0; plane
< 3; plane
++) {
527 int sb_start
= superblock_starts
[plane
];
528 int sb_end
= sb_start
+ (plane
? s
->c_superblock_count
529 : s
->y_superblock_count
);
530 int num_coded_frags
= 0;
532 for (i
= sb_start
; i
< sb_end
&& get_bits_left(gb
) > 0; i
++) {
533 /* iterate through all 16 fragments in a superblock */
534 for (j
= 0; j
< 16; j
++) {
535 /* if the fragment is in bounds, check its coding status */
536 current_fragment
= s
->superblock_fragments
[i
* 16 + j
];
537 if (current_fragment
!= -1) {
538 int coded
= s
->superblock_coding
[i
];
540 if (s
->superblock_coding
[i
] == SB_PARTIALLY_CODED
) {
541 /* fragment may or may not be coded; this is the case
542 * that cares about the fragment coding runs */
543 if (current_run
-- == 0) {
545 current_run
= get_vlc2(gb
, s
->fragment_run_length_vlc
.table
, 5, 2);
551 /* default mode; actual mode will be decoded in
553 s
->all_fragments
[current_fragment
].coding_method
=
555 s
->coded_fragment_list
[plane
][num_coded_frags
++] =
558 /* not coded; copy this fragment from the prior frame */
559 s
->all_fragments
[current_fragment
].coding_method
=
565 s
->total_num_coded_frags
+= num_coded_frags
;
566 for (i
= 0; i
< 64; i
++)
567 s
->num_coded_frags
[plane
][i
] = num_coded_frags
;
569 s
->coded_fragment_list
[plane
+ 1] = s
->coded_fragment_list
[plane
] +
576 * This function unpacks all the coding mode data for individual macroblocks
577 * from the bitstream.
579 static int unpack_modes(Vp3DecodeContext
*s
, GetBitContext
*gb
)
581 int i
, j
, k
, sb_x
, sb_y
;
583 int current_macroblock
;
584 int current_fragment
;
586 int custom_mode_alphabet
[CODING_MODE_COUNT
];
591 for (i
= 0; i
< s
->fragment_count
; i
++)
592 s
->all_fragments
[i
].coding_method
= MODE_INTRA
;
594 /* fetch the mode coding scheme for this frame */
595 scheme
= get_bits(gb
, 3);
597 /* is it a custom coding scheme? */
599 for (i
= 0; i
< 8; i
++)
600 custom_mode_alphabet
[i
] = MODE_INTER_NO_MV
;
601 for (i
= 0; i
< 8; i
++)
602 custom_mode_alphabet
[get_bits(gb
, 3)] = i
;
603 alphabet
= custom_mode_alphabet
;
605 alphabet
= ModeAlphabet
[scheme
- 1];
607 /* iterate through all of the macroblocks that contain 1 or more
609 for (sb_y
= 0; sb_y
< s
->y_superblock_height
; sb_y
++) {
610 for (sb_x
= 0; sb_x
< s
->y_superblock_width
; sb_x
++) {
611 if (get_bits_left(gb
) <= 0)
614 for (j
= 0; j
< 4; j
++) {
615 int mb_x
= 2 * sb_x
+ (j
>> 1);
616 int mb_y
= 2 * sb_y
+ (((j
>> 1) + j
) & 1);
617 current_macroblock
= mb_y
* s
->macroblock_width
+ mb_x
;
619 if (mb_x
>= s
->macroblock_width
||
620 mb_y
>= s
->macroblock_height
)
623 #define BLOCK_X (2 * mb_x + (k & 1))
624 #define BLOCK_Y (2 * mb_y + (k >> 1))
625 /* coding modes are only stored if the macroblock has
626 * at least one luma block coded, otherwise it must be
628 for (k
= 0; k
< 4; k
++) {
629 current_fragment
= BLOCK_Y
*
630 s
->fragment_width
[0] + BLOCK_X
;
631 if (s
->all_fragments
[current_fragment
].coding_method
!= MODE_COPY
)
635 s
->macroblock_coding
[current_macroblock
] = MODE_INTER_NO_MV
;
639 /* mode 7 means get 3 bits for each coding mode */
641 coding_mode
= get_bits(gb
, 3);
643 coding_mode
= alphabet
[get_vlc2(gb
, s
->mode_code_vlc
.table
, 3, 3)];
645 s
->macroblock_coding
[current_macroblock
] = coding_mode
;
646 for (k
= 0; k
< 4; k
++) {
647 frag
= s
->all_fragments
+ BLOCK_Y
* s
->fragment_width
[0] + BLOCK_X
;
648 if (frag
->coding_method
!= MODE_COPY
)
649 frag
->coding_method
= coding_mode
;
652 #define SET_CHROMA_MODES \
653 if (frag[s->fragment_start[1]].coding_method != MODE_COPY) \
654 frag[s->fragment_start[1]].coding_method = coding_mode; \
655 if (frag[s->fragment_start[2]].coding_method != MODE_COPY) \
656 frag[s->fragment_start[2]].coding_method = coding_mode;
658 if (s
->chroma_y_shift
) {
659 frag
= s
->all_fragments
+ mb_y
*
660 s
->fragment_width
[1] + mb_x
;
662 } else if (s
->chroma_x_shift
) {
663 frag
= s
->all_fragments
+
664 2 * mb_y
* s
->fragment_width
[1] + mb_x
;
665 for (k
= 0; k
< 2; k
++) {
667 frag
+= s
->fragment_width
[1];
670 for (k
= 0; k
< 4; k
++) {
671 frag
= s
->all_fragments
+
672 BLOCK_Y
* s
->fragment_width
[1] + BLOCK_X
;
685 * This function unpacks all the motion vectors for the individual
686 * macroblocks from the bitstream.
688 static int unpack_vectors(Vp3DecodeContext
*s
, GetBitContext
*gb
)
690 int j
, k
, sb_x
, sb_y
;
694 int last_motion_x
= 0;
695 int last_motion_y
= 0;
696 int prior_last_motion_x
= 0;
697 int prior_last_motion_y
= 0;
698 int current_macroblock
;
699 int current_fragment
;
705 /* coding mode 0 is the VLC scheme; 1 is the fixed code scheme */
706 coding_mode
= get_bits1(gb
);
708 /* iterate through all of the macroblocks that contain 1 or more
710 for (sb_y
= 0; sb_y
< s
->y_superblock_height
; sb_y
++) {
711 for (sb_x
= 0; sb_x
< s
->y_superblock_width
; sb_x
++) {
712 if (get_bits_left(gb
) <= 0)
715 for (j
= 0; j
< 4; j
++) {
716 int mb_x
= 2 * sb_x
+ (j
>> 1);
717 int mb_y
= 2 * sb_y
+ (((j
>> 1) + j
) & 1);
718 current_macroblock
= mb_y
* s
->macroblock_width
+ mb_x
;
720 if (mb_x
>= s
->macroblock_width
||
721 mb_y
>= s
->macroblock_height
||
722 s
->macroblock_coding
[current_macroblock
] == MODE_COPY
)
725 switch (s
->macroblock_coding
[current_macroblock
]) {
726 case MODE_INTER_PLUS_MV
:
728 /* all 6 fragments use the same motion vector */
729 if (coding_mode
== 0) {
730 motion_x
[0] = motion_vector_table
[get_vlc2(gb
, s
->motion_vector_vlc
.table
, 6, 2)];
731 motion_y
[0] = motion_vector_table
[get_vlc2(gb
, s
->motion_vector_vlc
.table
, 6, 2)];
733 motion_x
[0] = fixed_motion_vector_table
[get_bits(gb
, 6)];
734 motion_y
[0] = fixed_motion_vector_table
[get_bits(gb
, 6)];
737 /* vector maintenance, only on MODE_INTER_PLUS_MV */
738 if (s
->macroblock_coding
[current_macroblock
] == MODE_INTER_PLUS_MV
) {
739 prior_last_motion_x
= last_motion_x
;
740 prior_last_motion_y
= last_motion_y
;
741 last_motion_x
= motion_x
[0];
742 last_motion_y
= motion_y
[0];
746 case MODE_INTER_FOURMV
:
747 /* vector maintenance */
748 prior_last_motion_x
= last_motion_x
;
749 prior_last_motion_y
= last_motion_y
;
751 /* fetch 4 vectors from the bitstream, one for each
752 * Y fragment, then average for the C fragment vectors */
753 for (k
= 0; k
< 4; k
++) {
754 current_fragment
= BLOCK_Y
* s
->fragment_width
[0] + BLOCK_X
;
755 if (s
->all_fragments
[current_fragment
].coding_method
!= MODE_COPY
) {
756 if (coding_mode
== 0) {
757 motion_x
[k
] = motion_vector_table
[get_vlc2(gb
, s
->motion_vector_vlc
.table
, 6, 2)];
758 motion_y
[k
] = motion_vector_table
[get_vlc2(gb
, s
->motion_vector_vlc
.table
, 6, 2)];
760 motion_x
[k
] = fixed_motion_vector_table
[get_bits(gb
, 6)];
761 motion_y
[k
] = fixed_motion_vector_table
[get_bits(gb
, 6)];
763 last_motion_x
= motion_x
[k
];
764 last_motion_y
= motion_y
[k
];
772 case MODE_INTER_LAST_MV
:
773 /* all 6 fragments use the last motion vector */
774 motion_x
[0] = last_motion_x
;
775 motion_y
[0] = last_motion_y
;
777 /* no vector maintenance (last vector remains the
781 case MODE_INTER_PRIOR_LAST
:
782 /* all 6 fragments use the motion vector prior to the
783 * last motion vector */
784 motion_x
[0] = prior_last_motion_x
;
785 motion_y
[0] = prior_last_motion_y
;
787 /* vector maintenance */
788 prior_last_motion_x
= last_motion_x
;
789 prior_last_motion_y
= last_motion_y
;
790 last_motion_x
= motion_x
[0];
791 last_motion_y
= motion_y
[0];
795 /* covers intra, inter without MV, golden without MV */
799 /* no vector maintenance */
803 /* assign the motion vectors to the correct fragments */
804 for (k
= 0; k
< 4; k
++) {
806 BLOCK_Y
* s
->fragment_width
[0] + BLOCK_X
;
807 if (s
->macroblock_coding
[current_macroblock
] == MODE_INTER_FOURMV
) {
808 s
->motion_val
[0][current_fragment
][0] = motion_x
[k
];
809 s
->motion_val
[0][current_fragment
][1] = motion_y
[k
];
811 s
->motion_val
[0][current_fragment
][0] = motion_x
[0];
812 s
->motion_val
[0][current_fragment
][1] = motion_y
[0];
816 if (s
->chroma_y_shift
) {
817 if (s
->macroblock_coding
[current_macroblock
] == MODE_INTER_FOURMV
) {
818 motion_x
[0] = RSHIFT(motion_x
[0] + motion_x
[1] +
819 motion_x
[2] + motion_x
[3], 2);
820 motion_y
[0] = RSHIFT(motion_y
[0] + motion_y
[1] +
821 motion_y
[2] + motion_y
[3], 2);
823 motion_x
[0] = (motion_x
[0] >> 1) | (motion_x
[0] & 1);
824 motion_y
[0] = (motion_y
[0] >> 1) | (motion_y
[0] & 1);
825 frag
= mb_y
* s
->fragment_width
[1] + mb_x
;
826 s
->motion_val
[1][frag
][0] = motion_x
[0];
827 s
->motion_val
[1][frag
][1] = motion_y
[0];
828 } else if (s
->chroma_x_shift
) {
829 if (s
->macroblock_coding
[current_macroblock
] == MODE_INTER_FOURMV
) {
830 motion_x
[0] = RSHIFT(motion_x
[0] + motion_x
[1], 1);
831 motion_y
[0] = RSHIFT(motion_y
[0] + motion_y
[1], 1);
832 motion_x
[1] = RSHIFT(motion_x
[2] + motion_x
[3], 1);
833 motion_y
[1] = RSHIFT(motion_y
[2] + motion_y
[3], 1);
835 motion_x
[1] = motion_x
[0];
836 motion_y
[1] = motion_y
[0];
838 motion_x
[0] = (motion_x
[0] >> 1) | (motion_x
[0] & 1);
839 motion_x
[1] = (motion_x
[1] >> 1) | (motion_x
[1] & 1);
841 frag
= 2 * mb_y
* s
->fragment_width
[1] + mb_x
;
842 for (k
= 0; k
< 2; k
++) {
843 s
->motion_val
[1][frag
][0] = motion_x
[k
];
844 s
->motion_val
[1][frag
][1] = motion_y
[k
];
845 frag
+= s
->fragment_width
[1];
848 for (k
= 0; k
< 4; k
++) {
849 frag
= BLOCK_Y
* s
->fragment_width
[1] + BLOCK_X
;
850 if (s
->macroblock_coding
[current_macroblock
] == MODE_INTER_FOURMV
) {
851 s
->motion_val
[1][frag
][0] = motion_x
[k
];
852 s
->motion_val
[1][frag
][1] = motion_y
[k
];
854 s
->motion_val
[1][frag
][0] = motion_x
[0];
855 s
->motion_val
[1][frag
][1] = motion_y
[0];
866 static int unpack_block_qpis(Vp3DecodeContext
*s
, GetBitContext
*gb
)
868 int qpi
, i
, j
, bit
, run_length
, blocks_decoded
, num_blocks_at_qpi
;
869 int num_blocks
= s
->total_num_coded_frags
;
871 for (qpi
= 0; qpi
< s
->nqps
- 1 && num_blocks
> 0; qpi
++) {
872 i
= blocks_decoded
= num_blocks_at_qpi
= 0;
874 bit
= get_bits1(gb
) ^ 1;
878 if (run_length
== MAXIMUM_LONG_BIT_RUN
)
883 run_length
= get_vlc2(gb
, s
->superblock_run_length_vlc
.table
, 6, 2) + 1;
884 if (run_length
== 34)
885 run_length
+= get_bits(gb
, 12);
886 blocks_decoded
+= run_length
;
889 num_blocks_at_qpi
+= run_length
;
891 for (j
= 0; j
< run_length
; i
++) {
892 if (i
>= s
->total_num_coded_frags
)
895 if (s
->all_fragments
[s
->coded_fragment_list
[0][i
]].qpi
== qpi
) {
896 s
->all_fragments
[s
->coded_fragment_list
[0][i
]].qpi
+= bit
;
900 } while (blocks_decoded
< num_blocks
&& get_bits_left(gb
) > 0);
902 num_blocks
-= num_blocks_at_qpi
;
909 * This function is called by unpack_dct_coeffs() to extract the VLCs from
910 * the bitstream. The VLCs encode tokens which are used to unpack DCT
911 * data. This function unpacks all the VLCs for either the Y plane or both
912 * C planes, and is called for DC coefficients or different AC coefficient
913 * levels (since different coefficient types require different VLC tables.
915 * This function returns a residual eob run. E.g, if a particular token gave
916 * instructions to EOB the next 5 fragments and there were only 2 fragments
917 * left in the current fragment range, 3 would be returned so that it could
918 * be passed into the next call to this same function.
920 static int unpack_vlcs(Vp3DecodeContext
*s
, GetBitContext
*gb
,
921 VLC
*table
, int coeff_index
,
932 int num_coeffs
= s
->num_coded_frags
[plane
][coeff_index
];
933 int16_t *dct_tokens
= s
->dct_tokens
[plane
][coeff_index
];
935 /* local references to structure members to avoid repeated dereferences */
936 int *coded_fragment_list
= s
->coded_fragment_list
[plane
];
937 Vp3Fragment
*all_fragments
= s
->all_fragments
;
938 VLC_TYPE(*vlc_table
)[2] = table
->table
;
941 av_log(s
->avctx
, AV_LOG_ERROR
,
942 "Invalid number of coefficients at level %d\n", coeff_index
);
944 if (eob_run
> num_coeffs
) {
946 blocks_ended
= num_coeffs
;
947 eob_run
-= num_coeffs
;
950 blocks_ended
= eob_run
;
954 // insert fake EOB token to cover the split between planes or zzi
956 dct_tokens
[j
++] = blocks_ended
<< 2;
958 while (coeff_i
< num_coeffs
&& get_bits_left(gb
) > 0) {
959 /* decode a VLC into a token */
960 token
= get_vlc2(gb
, vlc_table
, 11, 3);
961 /* use the token to get a zero run, a coefficient, and an eob run */
962 if ((unsigned) token
<= 6U) {
963 eob_run
= eob_run_base
[token
];
964 if (eob_run_get_bits
[token
])
965 eob_run
+= get_bits(gb
, eob_run_get_bits
[token
]);
967 // record only the number of blocks ended in this plane,
968 // any spill will be recorded in the next plane.
969 if (eob_run
> num_coeffs
- coeff_i
) {
970 dct_tokens
[j
++] = TOKEN_EOB(num_coeffs
- coeff_i
);
971 blocks_ended
+= num_coeffs
- coeff_i
;
972 eob_run
-= num_coeffs
- coeff_i
;
973 coeff_i
= num_coeffs
;
975 dct_tokens
[j
++] = TOKEN_EOB(eob_run
);
976 blocks_ended
+= eob_run
;
980 } else if (token
>= 0) {
981 bits_to_get
= coeff_get_bits
[token
];
983 bits_to_get
= get_bits(gb
, bits_to_get
);
984 coeff
= coeff_tables
[token
][bits_to_get
];
986 zero_run
= zero_run_base
[token
];
987 if (zero_run_get_bits
[token
])
988 zero_run
+= get_bits(gb
, zero_run_get_bits
[token
]);
991 dct_tokens
[j
++] = TOKEN_ZERO_RUN(coeff
, zero_run
);
993 // Save DC into the fragment structure. DC prediction is
994 // done in raster order, so the actual DC can't be in with
995 // other tokens. We still need the token in dct_tokens[]
996 // however, or else the structure collapses on itself.
998 all_fragments
[coded_fragment_list
[coeff_i
]].dc
= coeff
;
1000 dct_tokens
[j
++] = TOKEN_COEFF(coeff
);
1003 if (coeff_index
+ zero_run
> 64) {
1004 av_log(s
->avctx
, AV_LOG_DEBUG
,
1005 "Invalid zero run of %d with %d coeffs left\n",
1006 zero_run
, 64 - coeff_index
);
1007 zero_run
= 64 - coeff_index
;
1010 // zero runs code multiple coefficients,
1011 // so don't try to decode coeffs for those higher levels
1012 for (i
= coeff_index
+ 1; i
<= coeff_index
+ zero_run
; i
++)
1013 s
->num_coded_frags
[plane
][i
]--;
1016 av_log(s
->avctx
, AV_LOG_ERROR
, "Invalid token %d\n", token
);
1021 if (blocks_ended
> s
->num_coded_frags
[plane
][coeff_index
])
1022 av_log(s
->avctx
, AV_LOG_ERROR
, "More blocks ended than coded!\n");
1024 // decrement the number of blocks that have higher coefficients for each
1025 // EOB run at this level
1027 for (i
= coeff_index
+ 1; i
< 64; i
++)
1028 s
->num_coded_frags
[plane
][i
] -= blocks_ended
;
1030 // setup the next buffer
1032 s
->dct_tokens
[plane
+ 1][coeff_index
] = dct_tokens
+ j
;
1033 else if (coeff_index
< 63)
1034 s
->dct_tokens
[0][coeff_index
+ 1] = dct_tokens
+ j
;
1039 static void reverse_dc_prediction(Vp3DecodeContext
*s
,
1042 int fragment_height
);
1044 * This function unpacks all of the DCT coefficient data from the
1047 static int unpack_dct_coeffs(Vp3DecodeContext
*s
, GetBitContext
*gb
)
1054 int residual_eob_run
= 0;
1058 s
->dct_tokens
[0][0] = s
->dct_tokens_base
;
1060 /* fetch the DC table indexes */
1061 dc_y_table
= get_bits(gb
, 4);
1062 dc_c_table
= get_bits(gb
, 4);
1064 /* unpack the Y plane DC coefficients */
1065 residual_eob_run
= unpack_vlcs(s
, gb
, &s
->dc_vlc
[dc_y_table
], 0,
1066 0, residual_eob_run
);
1067 if (residual_eob_run
< 0)
1068 return residual_eob_run
;
1070 /* reverse prediction of the Y-plane DC coefficients */
1071 reverse_dc_prediction(s
, 0, s
->fragment_width
[0], s
->fragment_height
[0]);
1073 /* unpack the C plane DC coefficients */
1074 residual_eob_run
= unpack_vlcs(s
, gb
, &s
->dc_vlc
[dc_c_table
], 0,
1075 1, residual_eob_run
);
1076 if (residual_eob_run
< 0)
1077 return residual_eob_run
;
1078 residual_eob_run
= unpack_vlcs(s
, gb
, &s
->dc_vlc
[dc_c_table
], 0,
1079 2, residual_eob_run
);
1080 if (residual_eob_run
< 0)
1081 return residual_eob_run
;
1083 /* reverse prediction of the C-plane DC coefficients */
1084 if (!(s
->avctx
->flags
& AV_CODEC_FLAG_GRAY
)) {
1085 reverse_dc_prediction(s
, s
->fragment_start
[1],
1086 s
->fragment_width
[1], s
->fragment_height
[1]);
1087 reverse_dc_prediction(s
, s
->fragment_start
[2],
1088 s
->fragment_width
[1], s
->fragment_height
[1]);
1091 /* fetch the AC table indexes */
1092 ac_y_table
= get_bits(gb
, 4);
1093 ac_c_table
= get_bits(gb
, 4);
1095 /* build tables of AC VLC tables */
1096 for (i
= 1; i
<= 5; i
++) {
1097 y_tables
[i
] = &s
->ac_vlc_1
[ac_y_table
];
1098 c_tables
[i
] = &s
->ac_vlc_1
[ac_c_table
];
1100 for (i
= 6; i
<= 14; i
++) {
1101 y_tables
[i
] = &s
->ac_vlc_2
[ac_y_table
];
1102 c_tables
[i
] = &s
->ac_vlc_2
[ac_c_table
];
1104 for (i
= 15; i
<= 27; i
++) {
1105 y_tables
[i
] = &s
->ac_vlc_3
[ac_y_table
];
1106 c_tables
[i
] = &s
->ac_vlc_3
[ac_c_table
];
1108 for (i
= 28; i
<= 63; i
++) {
1109 y_tables
[i
] = &s
->ac_vlc_4
[ac_y_table
];
1110 c_tables
[i
] = &s
->ac_vlc_4
[ac_c_table
];
1113 /* decode all AC coefficients */
1114 for (i
= 1; i
<= 63; i
++) {
1115 residual_eob_run
= unpack_vlcs(s
, gb
, y_tables
[i
], i
,
1116 0, residual_eob_run
);
1117 if (residual_eob_run
< 0)
1118 return residual_eob_run
;
1120 residual_eob_run
= unpack_vlcs(s
, gb
, c_tables
[i
], i
,
1121 1, residual_eob_run
);
1122 if (residual_eob_run
< 0)
1123 return residual_eob_run
;
1124 residual_eob_run
= unpack_vlcs(s
, gb
, c_tables
[i
], i
,
1125 2, residual_eob_run
);
1126 if (residual_eob_run
< 0)
1127 return residual_eob_run
;
1134 * This function reverses the DC prediction for each coded fragment in
1135 * the frame. Much of this function is adapted directly from the original
1138 #define COMPATIBLE_FRAME(x) \
1139 (compatible_frame[s->all_fragments[x].coding_method] == current_frame_type)
1140 #define DC_COEFF(u) s->all_fragments[u].dc
1142 static void reverse_dc_prediction(Vp3DecodeContext
*s
,
1145 int fragment_height
)
1153 int i
= first_fragment
;
1157 /* DC values for the left, up-left, up, and up-right fragments */
1158 int vl
, vul
, vu
, vur
;
1160 /* indexes for the left, up-left, up, and up-right fragments */
1164 * The 6 fields mean:
1165 * 0: up-left multiplier
1167 * 2: up-right multiplier
1168 * 3: left multiplier
1170 static const int predictor_transform
[16][4] = {
1172 { 0, 0, 0, 128 }, // PL
1173 { 0, 0, 128, 0 }, // PUR
1174 { 0, 0, 53, 75 }, // PUR|PL
1175 { 0, 128, 0, 0 }, // PU
1176 { 0, 64, 0, 64 }, // PU |PL
1177 { 0, 128, 0, 0 }, // PU |PUR
1178 { 0, 0, 53, 75 }, // PU |PUR|PL
1179 { 128, 0, 0, 0 }, // PUL
1180 { 0, 0, 0, 128 }, // PUL|PL
1181 { 64, 0, 64, 0 }, // PUL|PUR
1182 { 0, 0, 53, 75 }, // PUL|PUR|PL
1183 { 0, 128, 0, 0 }, // PUL|PU
1184 { -104, 116, 0, 116 }, // PUL|PU |PL
1185 { 24, 80, 24, 0 }, // PUL|PU |PUR
1186 { -104, 116, 0, 116 } // PUL|PU |PUR|PL
1189 /* This table shows which types of blocks can use other blocks for
1190 * prediction. For example, INTRA is the only mode in this table to
1191 * have a frame number of 0. That means INTRA blocks can only predict
1192 * from other INTRA blocks. There are 2 golden frame coding types;
1193 * blocks encoding in these modes can only predict from other blocks
1194 * that were encoded with these 1 of these 2 modes. */
1195 static const unsigned char compatible_frame
[9] = {
1196 1, /* MODE_INTER_NO_MV */
1198 1, /* MODE_INTER_PLUS_MV */
1199 1, /* MODE_INTER_LAST_MV */
1200 1, /* MODE_INTER_PRIOR_MV */
1201 2, /* MODE_USING_GOLDEN */
1202 2, /* MODE_GOLDEN_MV */
1203 1, /* MODE_INTER_FOUR_MV */
1206 int current_frame_type
;
1208 /* there is a last DC predictor for each of the 3 frame types */
1221 /* for each fragment row... */
1222 for (y
= 0; y
< fragment_height
; y
++) {
1223 /* for each fragment in a row... */
1224 for (x
= 0; x
< fragment_width
; x
++, i
++) {
1226 /* reverse prediction if this block was coded */
1227 if (s
->all_fragments
[i
].coding_method
!= MODE_COPY
) {
1228 current_frame_type
=
1229 compatible_frame
[s
->all_fragments
[i
].coding_method
];
1235 if (COMPATIBLE_FRAME(l
))
1239 u
= i
- fragment_width
;
1241 if (COMPATIBLE_FRAME(u
))
1244 ul
= i
- fragment_width
- 1;
1246 if (COMPATIBLE_FRAME(ul
))
1249 if (x
+ 1 < fragment_width
) {
1250 ur
= i
- fragment_width
+ 1;
1252 if (COMPATIBLE_FRAME(ur
))
1257 if (transform
== 0) {
1258 /* if there were no fragments to predict from, use last
1260 predicted_dc
= last_dc
[current_frame_type
];
1262 /* apply the appropriate predictor transform */
1264 (predictor_transform
[transform
][0] * vul
) +
1265 (predictor_transform
[transform
][1] * vu
) +
1266 (predictor_transform
[transform
][2] * vur
) +
1267 (predictor_transform
[transform
][3] * vl
);
1269 predicted_dc
/= 128;
1271 /* check for outranging on the [ul u l] and
1272 * [ul u ur l] predictors */
1273 if ((transform
== 15) || (transform
== 13)) {
1274 if (FFABS(predicted_dc
- vu
) > 128)
1276 else if (FFABS(predicted_dc
- vl
) > 128)
1278 else if (FFABS(predicted_dc
- vul
) > 128)
1283 /* at long last, apply the predictor */
1284 DC_COEFF(i
) += predicted_dc
;
1286 last_dc
[current_frame_type
] = DC_COEFF(i
);
1292 static void apply_loop_filter(Vp3DecodeContext
*s
, int plane
,
1293 int ystart
, int yend
)
1296 int *bounding_values
= s
->bounding_values_array
+ 127;
1298 int width
= s
->fragment_width
[!!plane
];
1299 int height
= s
->fragment_height
[!!plane
];
1300 int fragment
= s
->fragment_start
[plane
] + ystart
* width
;
1301 ptrdiff_t stride
= s
->current_frame
.f
->linesize
[plane
];
1302 uint8_t *plane_data
= s
->current_frame
.f
->data
[plane
];
1303 if (!s
->flipped_image
)
1305 plane_data
+= s
->data_offset
[plane
] + 8 * ystart
* stride
;
1307 for (y
= ystart
; y
< yend
; y
++) {
1308 for (x
= 0; x
< width
; x
++) {
1309 /* This code basically just deblocks on the edges of coded blocks.
1310 * However, it has to be much more complicated because of the
1311 * brain damaged deblock ordering used in VP3/Theora. Order matters
1312 * because some pixels get filtered twice. */
1313 if (s
->all_fragments
[fragment
].coding_method
!= MODE_COPY
) {
1314 /* do not perform left edge filter for left columns frags */
1316 s
->vp3dsp
.h_loop_filter(
1318 stride
, bounding_values
);
1321 /* do not perform top edge filter for top row fragments */
1323 s
->vp3dsp
.v_loop_filter(
1325 stride
, bounding_values
);
1328 /* do not perform right edge filter for right column
1329 * fragments or if right fragment neighbor is also coded
1330 * in this frame (it will be filtered in next iteration) */
1331 if ((x
< width
- 1) &&
1332 (s
->all_fragments
[fragment
+ 1].coding_method
== MODE_COPY
)) {
1333 s
->vp3dsp
.h_loop_filter(
1334 plane_data
+ 8 * x
+ 8,
1335 stride
, bounding_values
);
1338 /* do not perform bottom edge filter for bottom row
1339 * fragments or if bottom fragment neighbor is also coded
1340 * in this frame (it will be filtered in the next row) */
1341 if ((y
< height
- 1) &&
1342 (s
->all_fragments
[fragment
+ width
].coding_method
== MODE_COPY
)) {
1343 s
->vp3dsp
.v_loop_filter(
1344 plane_data
+ 8 * x
+ 8 * stride
,
1345 stride
, bounding_values
);
1351 plane_data
+= 8 * stride
;
1356 * Pull DCT tokens from the 64 levels to decode and dequant the coefficients
1357 * for the next block in coding order
1359 static inline int vp3_dequant(Vp3DecodeContext
*s
, Vp3Fragment
*frag
,
1360 int plane
, int inter
, int16_t block
[64])
1362 int16_t *dequantizer
= s
->qmat
[frag
->qpi
][inter
][plane
];
1363 uint8_t *perm
= s
->idct_scantable
;
1367 int token
= *s
->dct_tokens
[plane
][i
];
1368 switch (token
& 3) {
1370 if (--token
< 4) // 0-3 are token types so the EOB run must now be 0
1371 s
->dct_tokens
[plane
][i
]++;
1373 *s
->dct_tokens
[plane
][i
] = token
& ~3;
1376 s
->dct_tokens
[plane
][i
]++;
1377 i
+= (token
>> 2) & 0x7f;
1379 av_log(s
->avctx
, AV_LOG_ERROR
, "Coefficient index overflow\n");
1382 block
[perm
[i
]] = (token
>> 9) * dequantizer
[perm
[i
]];
1386 block
[perm
[i
]] = (token
>> 2) * dequantizer
[perm
[i
]];
1387 s
->dct_tokens
[plane
][i
++]++;
1389 default: // shouldn't happen
1393 // return value is expected to be a valid level
1396 // the actual DC+prediction is in the fragment structure
1397 block
[0] = frag
->dc
* s
->qmat
[0][inter
][plane
][0];
1402 * called when all pixels up to row y are complete
1404 static void vp3_draw_horiz_band(Vp3DecodeContext
*s
, int y
)
1407 int offset
[AV_NUM_DATA_POINTERS
];
1409 if (HAVE_THREADS
&& s
->avctx
->active_thread_type
& FF_THREAD_FRAME
) {
1410 int y_flipped
= s
->flipped_image
? s
->height
- y
: y
;
1412 /* At the end of the frame, report INT_MAX instead of the height of
1413 * the frame. This makes the other threads' ff_thread_await_progress()
1414 * calls cheaper, because they don't have to clip their values. */
1415 ff_thread_report_progress(&s
->current_frame
,
1416 y_flipped
== s
->height
? INT_MAX
1421 if (!s
->avctx
->draw_horiz_band
)
1424 h
= y
- s
->last_slice_end
;
1425 s
->last_slice_end
= y
;
1428 if (!s
->flipped_image
)
1429 y
= s
->height
- y
- h
;
1431 cy
= y
>> s
->chroma_y_shift
;
1432 offset
[0] = s
->current_frame
.f
->linesize
[0] * y
;
1433 offset
[1] = s
->current_frame
.f
->linesize
[1] * cy
;
1434 offset
[2] = s
->current_frame
.f
->linesize
[2] * cy
;
1435 for (i
= 3; i
< AV_NUM_DATA_POINTERS
; i
++)
1439 s
->avctx
->draw_horiz_band(s
->avctx
, s
->current_frame
.f
, offset
, y
, 3, h
);
1443 * Wait for the reference frame of the current fragment.
1444 * The progress value is in luma pixel rows.
1446 static void await_reference_row(Vp3DecodeContext
*s
, Vp3Fragment
*fragment
,
1447 int motion_y
, int y
)
1449 ThreadFrame
*ref_frame
;
1451 int border
= motion_y
& 1;
1453 if (fragment
->coding_method
== MODE_USING_GOLDEN
||
1454 fragment
->coding_method
== MODE_GOLDEN_MV
)
1455 ref_frame
= &s
->golden_frame
;
1457 ref_frame
= &s
->last_frame
;
1459 ref_row
= y
+ (motion_y
>> 1);
1460 ref_row
= FFMAX(FFABS(ref_row
), ref_row
+ 8 + border
);
1462 ff_thread_await_progress(ref_frame
, ref_row
, 0);
1466 * Perform the final rendering for a particular slice of data.
1467 * The slice number ranges from 0..(c_superblock_height - 1).
1469 static void render_slice(Vp3DecodeContext
*s
, int slice
)
1471 int x
, y
, i
, j
, fragment
;
1472 int16_t *block
= s
->block
;
1473 int motion_x
= 0xdeadbeef, motion_y
= 0xdeadbeef;
1474 int motion_halfpel_index
;
1475 uint8_t *motion_source
;
1476 int plane
, first_pixel
;
1478 if (slice
>= s
->c_superblock_height
)
1481 for (plane
= 0; plane
< 3; plane
++) {
1482 uint8_t *output_plane
= s
->current_frame
.f
->data
[plane
] +
1483 s
->data_offset
[plane
];
1484 uint8_t *last_plane
= s
->last_frame
.f
->data
[plane
] +
1485 s
->data_offset
[plane
];
1486 uint8_t *golden_plane
= s
->golden_frame
.f
->data
[plane
] +
1487 s
->data_offset
[plane
];
1488 ptrdiff_t stride
= s
->current_frame
.f
->linesize
[plane
];
1489 int plane_width
= s
->width
>> (plane
&& s
->chroma_x_shift
);
1490 int plane_height
= s
->height
>> (plane
&& s
->chroma_y_shift
);
1491 int8_t(*motion_val
)[2] = s
->motion_val
[!!plane
];
1493 int sb_x
, sb_y
= slice
<< (!plane
&& s
->chroma_y_shift
);
1494 int slice_height
= sb_y
+ 1 + (!plane
&& s
->chroma_y_shift
);
1495 int slice_width
= plane
? s
->c_superblock_width
1496 : s
->y_superblock_width
;
1498 int fragment_width
= s
->fragment_width
[!!plane
];
1499 int fragment_height
= s
->fragment_height
[!!plane
];
1500 int fragment_start
= s
->fragment_start
[plane
];
1502 int do_await
= !plane
&& HAVE_THREADS
&&
1503 (s
->avctx
->active_thread_type
& FF_THREAD_FRAME
);
1505 if (!s
->flipped_image
)
1507 if (CONFIG_GRAY
&& plane
&& (s
->avctx
->flags
& AV_CODEC_FLAG_GRAY
))
1510 /* for each superblock row in the slice (both of them)... */
1511 for (; sb_y
< slice_height
; sb_y
++) {
1512 /* for each superblock in a row... */
1513 for (sb_x
= 0; sb_x
< slice_width
; sb_x
++) {
1514 /* for each block in a superblock... */
1515 for (j
= 0; j
< 16; j
++) {
1516 x
= 4 * sb_x
+ hilbert_offset
[j
][0];
1517 y
= 4 * sb_y
+ hilbert_offset
[j
][1];
1518 fragment
= y
* fragment_width
+ x
;
1520 i
= fragment_start
+ fragment
;
1523 if (x
>= fragment_width
|| y
>= fragment_height
)
1526 first_pixel
= 8 * y
* stride
+ 8 * x
;
1529 s
->all_fragments
[i
].coding_method
!= MODE_INTRA
)
1530 await_reference_row(s
, &s
->all_fragments
[i
],
1531 motion_val
[fragment
][1],
1532 (16 * y
) >> s
->chroma_y_shift
);
1534 /* transform if this block was coded */
1535 if (s
->all_fragments
[i
].coding_method
!= MODE_COPY
) {
1536 if ((s
->all_fragments
[i
].coding_method
== MODE_USING_GOLDEN
) ||
1537 (s
->all_fragments
[i
].coding_method
== MODE_GOLDEN_MV
))
1538 motion_source
= golden_plane
;
1540 motion_source
= last_plane
;
1542 motion_source
+= first_pixel
;
1543 motion_halfpel_index
= 0;
1545 /* sort out the motion vector if this fragment is coded
1546 * using a motion vector method */
1547 if ((s
->all_fragments
[i
].coding_method
> MODE_INTRA
) &&
1548 (s
->all_fragments
[i
].coding_method
!= MODE_USING_GOLDEN
)) {
1550 motion_x
= motion_val
[fragment
][0];
1551 motion_y
= motion_val
[fragment
][1];
1553 src_x
= (motion_x
>> 1) + 8 * x
;
1554 src_y
= (motion_y
>> 1) + 8 * y
;
1556 motion_halfpel_index
= motion_x
& 0x01;
1557 motion_source
+= (motion_x
>> 1);
1559 motion_halfpel_index
|= (motion_y
& 0x01) << 1;
1560 motion_source
+= ((motion_y
>> 1) * stride
);
1562 if (src_x
< 0 || src_y
< 0 ||
1563 src_x
+ 9 >= plane_width
||
1564 src_y
+ 9 >= plane_height
) {
1565 uint8_t *temp
= s
->edge_emu_buffer
;
1569 s
->vdsp
.emulated_edge_mc(temp
, motion_source
,
1574 motion_source
= temp
;
1578 /* first, take care of copying a block from either the
1579 * previous or the golden frame */
1580 if (s
->all_fragments
[i
].coding_method
!= MODE_INTRA
) {
1581 /* Note, it is possible to implement all MC cases
1582 * with put_no_rnd_pixels_l2 which would look more
1583 * like the VP3 source but this would be slower as
1584 * put_no_rnd_pixels_tab is better optimized */
1585 if (motion_halfpel_index
!= 3) {
1586 s
->hdsp
.put_no_rnd_pixels_tab
[1][motion_halfpel_index
](
1587 output_plane
+ first_pixel
,
1588 motion_source
, stride
, 8);
1590 /* d is 0 if motion_x and _y have the same sign,
1592 int d
= (motion_x
^ motion_y
) >> 31;
1593 s
->vp3dsp
.put_no_rnd_pixels_l2(output_plane
+ first_pixel
,
1595 motion_source
+ stride
+ 1 + d
,
1600 /* invert DCT and place (or add) in final output */
1602 if (s
->all_fragments
[i
].coding_method
== MODE_INTRA
) {
1604 index
= vp3_dequant(s
, s
->all_fragments
+ i
,
1608 s
->vp3dsp
.idct_put(output_plane
+ first_pixel
,
1612 int index
= vp3_dequant(s
, s
->all_fragments
+ i
,
1617 s
->vp3dsp
.idct_add(output_plane
+ first_pixel
,
1621 s
->vp3dsp
.idct_dc_add(output_plane
+ first_pixel
,
1626 /* copy directly from the previous frame */
1627 s
->hdsp
.put_pixels_tab
[1][0](
1628 output_plane
+ first_pixel
,
1629 last_plane
+ first_pixel
,
1635 // Filter up to the last row in the superblock row
1636 if (!s
->skip_loop_filter
)
1637 apply_loop_filter(s
, plane
, 4 * sb_y
- !!sb_y
,
1638 FFMIN(4 * sb_y
+ 3, fragment_height
- 1));
1642 /* this looks like a good place for slice dispatch... */
1644 * if (slice == s->macroblock_height - 1)
1645 * dispatch (both last slice & 2nd-to-last slice);
1646 * else if (slice > 0)
1647 * dispatch (slice - 1);
1650 vp3_draw_horiz_band(s
, FFMIN((32 << s
->chroma_y_shift
) * (slice
+ 1) - 16,
1654 /// Allocate tables for per-frame data in Vp3DecodeContext
1655 static av_cold
int allocate_tables(AVCodecContext
*avctx
)
1657 Vp3DecodeContext
*s
= avctx
->priv_data
;
1658 int y_fragment_count
, c_fragment_count
;
1660 y_fragment_count
= s
->fragment_width
[0] * s
->fragment_height
[0];
1661 c_fragment_count
= s
->fragment_width
[1] * s
->fragment_height
[1];
1663 s
->superblock_coding
= av_malloc(s
->superblock_count
);
1664 s
->all_fragments
= av_malloc(s
->fragment_count
* sizeof(Vp3Fragment
));
1666 s
->coded_fragment_list
[0] = av_malloc(s
->fragment_count
* sizeof(int));
1668 s
->dct_tokens_base
= av_malloc(64 * s
->fragment_count
*
1669 sizeof(*s
->dct_tokens_base
));
1670 s
->motion_val
[0] = av_malloc(y_fragment_count
* sizeof(*s
->motion_val
[0]));
1671 s
->motion_val
[1] = av_malloc(c_fragment_count
* sizeof(*s
->motion_val
[1]));
1673 /* work out the block mapping tables */
1674 s
->superblock_fragments
= av_malloc(s
->superblock_count
* 16 * sizeof(int));
1675 s
->macroblock_coding
= av_malloc(s
->macroblock_count
+ 1);
1677 if (!s
->superblock_coding
|| !s
->all_fragments
||
1678 !s
->dct_tokens_base
|| !s
->coded_fragment_list
[0] ||
1679 !s
->superblock_fragments
|| !s
->macroblock_coding
||
1680 !s
->motion_val
[0] || !s
->motion_val
[1]) {
1681 vp3_decode_end(avctx
);
1685 init_block_mapping(s
);
1690 static av_cold
int init_frames(Vp3DecodeContext
*s
)
1692 s
->current_frame
.f
= av_frame_alloc();
1693 s
->last_frame
.f
= av_frame_alloc();
1694 s
->golden_frame
.f
= av_frame_alloc();
1696 if (!s
->current_frame
.f
|| !s
->last_frame
.f
|| !s
->golden_frame
.f
) {
1697 av_frame_free(&s
->current_frame
.f
);
1698 av_frame_free(&s
->last_frame
.f
);
1699 av_frame_free(&s
->golden_frame
.f
);
1700 return AVERROR(ENOMEM
);
1706 static av_cold
int vp3_decode_init(AVCodecContext
*avctx
)
1708 Vp3DecodeContext
*s
= avctx
->priv_data
;
1709 int i
, inter
, plane
, ret
;
1712 int y_fragment_count
, c_fragment_count
;
1714 ret
= init_frames(s
);
1718 avctx
->internal
->allocate_progress
= 1;
1720 if (avctx
->codec_tag
== MKTAG('V', 'P', '3', '0'))
1726 s
->width
= FFALIGN(avctx
->coded_width
, 16);
1727 s
->height
= FFALIGN(avctx
->coded_height
, 16);
1728 if (avctx
->pix_fmt
== AV_PIX_FMT_NONE
)
1729 avctx
->pix_fmt
= AV_PIX_FMT_YUV420P
;
1730 avctx
->chroma_sample_location
= AVCHROMA_LOC_CENTER
;
1731 ff_hpeldsp_init(&s
->hdsp
, avctx
->flags
| AV_CODEC_FLAG_BITEXACT
);
1732 ff_videodsp_init(&s
->vdsp
, 8);
1733 ff_vp3dsp_init(&s
->vp3dsp
, avctx
->flags
);
1735 for (i
= 0; i
< 64; i
++) {
1736 #define TRANSPOSE(x) (x >> 3) | ((x & 7) << 3)
1737 s
->idct_permutation
[i
] = TRANSPOSE(i
);
1738 s
->idct_scantable
[i
] = TRANSPOSE(ff_zigzag_direct
[i
]);
1742 /* initialize to an impossible value which will force a recalculation
1743 * in the first frame decode */
1744 for (i
= 0; i
< 3; i
++)
1747 av_pix_fmt_get_chroma_sub_sample(avctx
->pix_fmt
, &s
->chroma_x_shift
,
1748 &s
->chroma_y_shift
);
1750 s
->y_superblock_width
= (s
->width
+ 31) / 32;
1751 s
->y_superblock_height
= (s
->height
+ 31) / 32;
1752 s
->y_superblock_count
= s
->y_superblock_width
* s
->y_superblock_height
;
1754 /* work out the dimensions for the C planes */
1755 c_width
= s
->width
>> s
->chroma_x_shift
;
1756 c_height
= s
->height
>> s
->chroma_y_shift
;
1757 s
->c_superblock_width
= (c_width
+ 31) / 32;
1758 s
->c_superblock_height
= (c_height
+ 31) / 32;
1759 s
->c_superblock_count
= s
->c_superblock_width
* s
->c_superblock_height
;
1761 s
->superblock_count
= s
->y_superblock_count
+ (s
->c_superblock_count
* 2);
1762 s
->u_superblock_start
= s
->y_superblock_count
;
1763 s
->v_superblock_start
= s
->u_superblock_start
+ s
->c_superblock_count
;
1765 s
->macroblock_width
= (s
->width
+ 15) / 16;
1766 s
->macroblock_height
= (s
->height
+ 15) / 16;
1767 s
->macroblock_count
= s
->macroblock_width
* s
->macroblock_height
;
1769 s
->fragment_width
[0] = s
->width
/ FRAGMENT_PIXELS
;
1770 s
->fragment_height
[0] = s
->height
/ FRAGMENT_PIXELS
;
1771 s
->fragment_width
[1] = s
->fragment_width
[0] >> s
->chroma_x_shift
;
1772 s
->fragment_height
[1] = s
->fragment_height
[0] >> s
->chroma_y_shift
;
1774 /* fragment count covers all 8x8 blocks for all 3 planes */
1775 y_fragment_count
= s
->fragment_width
[0] * s
->fragment_height
[0];
1776 c_fragment_count
= s
->fragment_width
[1] * s
->fragment_height
[1];
1777 s
->fragment_count
= y_fragment_count
+ 2 * c_fragment_count
;
1778 s
->fragment_start
[1] = y_fragment_count
;
1779 s
->fragment_start
[2] = y_fragment_count
+ c_fragment_count
;
1781 if (!s
->theora_tables
) {
1782 for (i
= 0; i
< 64; i
++) {
1783 s
->coded_dc_scale_factor
[i
] = vp31_dc_scale_factor
[i
];
1784 s
->coded_ac_scale_factor
[i
] = vp31_ac_scale_factor
[i
];
1785 s
->base_matrix
[0][i
] = vp31_intra_y_dequant
[i
];
1786 s
->base_matrix
[1][i
] = vp31_intra_c_dequant
[i
];
1787 s
->base_matrix
[2][i
] = vp31_inter_dequant
[i
];
1788 s
->filter_limit_values
[i
] = vp31_filter_limit_values
[i
];
1791 for (inter
= 0; inter
< 2; inter
++) {
1792 for (plane
= 0; plane
< 3; plane
++) {
1793 s
->qr_count
[inter
][plane
] = 1;
1794 s
->qr_size
[inter
][plane
][0] = 63;
1795 s
->qr_base
[inter
][plane
][0] =
1796 s
->qr_base
[inter
][plane
][1] = 2 * inter
+ (!!plane
) * !inter
;
1800 /* init VLC tables */
1801 for (i
= 0; i
< 16; i
++) {
1803 init_vlc(&s
->dc_vlc
[i
], 11, 32,
1804 &dc_bias
[i
][0][1], 4, 2,
1805 &dc_bias
[i
][0][0], 4, 2, 0);
1807 /* group 1 AC histograms */
1808 init_vlc(&s
->ac_vlc_1
[i
], 11, 32,
1809 &ac_bias_0
[i
][0][1], 4, 2,
1810 &ac_bias_0
[i
][0][0], 4, 2, 0);
1812 /* group 2 AC histograms */
1813 init_vlc(&s
->ac_vlc_2
[i
], 11, 32,
1814 &ac_bias_1
[i
][0][1], 4, 2,
1815 &ac_bias_1
[i
][0][0], 4, 2, 0);
1817 /* group 3 AC histograms */
1818 init_vlc(&s
->ac_vlc_3
[i
], 11, 32,
1819 &ac_bias_2
[i
][0][1], 4, 2,
1820 &ac_bias_2
[i
][0][0], 4, 2, 0);
1822 /* group 4 AC histograms */
1823 init_vlc(&s
->ac_vlc_4
[i
], 11, 32,
1824 &ac_bias_3
[i
][0][1], 4, 2,
1825 &ac_bias_3
[i
][0][0], 4, 2, 0);
1828 for (i
= 0; i
< 16; i
++) {
1830 if (init_vlc(&s
->dc_vlc
[i
], 11, 32,
1831 &s
->huffman_table
[i
][0][1], 8, 4,
1832 &s
->huffman_table
[i
][0][0], 8, 4, 0) < 0)
1835 /* group 1 AC histograms */
1836 if (init_vlc(&s
->ac_vlc_1
[i
], 11, 32,
1837 &s
->huffman_table
[i
+ 16][0][1], 8, 4,
1838 &s
->huffman_table
[i
+ 16][0][0], 8, 4, 0) < 0)
1841 /* group 2 AC histograms */
1842 if (init_vlc(&s
->ac_vlc_2
[i
], 11, 32,
1843 &s
->huffman_table
[i
+ 16 * 2][0][1], 8, 4,
1844 &s
->huffman_table
[i
+ 16 * 2][0][0], 8, 4, 0) < 0)
1847 /* group 3 AC histograms */
1848 if (init_vlc(&s
->ac_vlc_3
[i
], 11, 32,
1849 &s
->huffman_table
[i
+ 16 * 3][0][1], 8, 4,
1850 &s
->huffman_table
[i
+ 16 * 3][0][0], 8, 4, 0) < 0)
1853 /* group 4 AC histograms */
1854 if (init_vlc(&s
->ac_vlc_4
[i
], 11, 32,
1855 &s
->huffman_table
[i
+ 16 * 4][0][1], 8, 4,
1856 &s
->huffman_table
[i
+ 16 * 4][0][0], 8, 4, 0) < 0)
1861 init_vlc(&s
->superblock_run_length_vlc
, 6, 34,
1862 &superblock_run_length_vlc_table
[0][1], 4, 2,
1863 &superblock_run_length_vlc_table
[0][0], 4, 2, 0);
1865 init_vlc(&s
->fragment_run_length_vlc
, 5, 30,
1866 &fragment_run_length_vlc_table
[0][1], 4, 2,
1867 &fragment_run_length_vlc_table
[0][0], 4, 2, 0);
1869 init_vlc(&s
->mode_code_vlc
, 3, 8,
1870 &mode_code_vlc_table
[0][1], 2, 1,
1871 &mode_code_vlc_table
[0][0], 2, 1, 0);
1873 init_vlc(&s
->motion_vector_vlc
, 6, 63,
1874 &motion_vector_vlc_table
[0][1], 2, 1,
1875 &motion_vector_vlc_table
[0][0], 2, 1, 0);
1877 return allocate_tables(avctx
);
1880 av_log(avctx
, AV_LOG_FATAL
, "Invalid huffman table\n");
1884 /// Release and shuffle frames after decode finishes
1885 static int update_frames(AVCodecContext
*avctx
)
1887 Vp3DecodeContext
*s
= avctx
->priv_data
;
1890 /* shuffle frames (last = current) */
1891 ff_thread_release_buffer(avctx
, &s
->last_frame
);
1892 ret
= ff_thread_ref_frame(&s
->last_frame
, &s
->current_frame
);
1897 ff_thread_release_buffer(avctx
, &s
->golden_frame
);
1898 ret
= ff_thread_ref_frame(&s
->golden_frame
, &s
->current_frame
);
1902 ff_thread_release_buffer(avctx
, &s
->current_frame
);
1906 static int ref_frame(Vp3DecodeContext
*s
, ThreadFrame
*dst
, ThreadFrame
*src
)
1908 ff_thread_release_buffer(s
->avctx
, dst
);
1909 if (src
->f
->data
[0])
1910 return ff_thread_ref_frame(dst
, src
);
1914 static int ref_frames(Vp3DecodeContext
*dst
, Vp3DecodeContext
*src
)
1917 if ((ret
= ref_frame(dst
, &dst
->current_frame
, &src
->current_frame
)) < 0 ||
1918 (ret
= ref_frame(dst
, &dst
->golden_frame
, &src
->golden_frame
)) < 0 ||
1919 (ret
= ref_frame(dst
, &dst
->last_frame
, &src
->last_frame
)) < 0)
1924 static int vp3_update_thread_context(AVCodecContext
*dst
, const AVCodecContext
*src
)
1926 Vp3DecodeContext
*s
= dst
->priv_data
, *s1
= src
->priv_data
;
1927 int qps_changed
= 0, i
, err
;
1929 #define copy_fields(to, from, start_field, end_field) \
1930 memcpy(&to->start_field, &from->start_field, \
1931 (char *) &to->end_field - (char *) &to->start_field)
1933 if (!s1
->current_frame
.f
->data
[0] ||
1934 s
->width
!= s1
->width
|| s
->height
!= s1
->height
) {
1941 // init tables if the first frame hasn't been decoded
1942 if (!s
->current_frame
.f
->data
[0]) {
1943 int y_fragment_count
, c_fragment_count
;
1945 err
= allocate_tables(dst
);
1948 y_fragment_count
= s
->fragment_width
[0] * s
->fragment_height
[0];
1949 c_fragment_count
= s
->fragment_width
[1] * s
->fragment_height
[1];
1950 memcpy(s
->motion_val
[0], s1
->motion_val
[0],
1951 y_fragment_count
* sizeof(*s
->motion_val
[0]));
1952 memcpy(s
->motion_val
[1], s1
->motion_val
[1],
1953 c_fragment_count
* sizeof(*s
->motion_val
[1]));
1956 // copy previous frame data
1957 if ((err
= ref_frames(s
, s1
)) < 0)
1960 s
->keyframe
= s1
->keyframe
;
1962 // copy qscale data if necessary
1963 for (i
= 0; i
< 3; i
++) {
1964 if (s
->qps
[i
] != s1
->qps
[1]) {
1966 memcpy(&s
->qmat
[i
], &s1
->qmat
[i
], sizeof(s
->qmat
[i
]));
1970 if (s
->qps
[0] != s1
->qps
[0])
1971 memcpy(&s
->bounding_values_array
, &s1
->bounding_values_array
,
1972 sizeof(s
->bounding_values_array
));
1975 copy_fields(s
, s1
, qps
, superblock_count
);
1979 return update_frames(dst
);
1982 static int vp3_decode_frame(AVCodecContext
*avctx
,
1983 void *data
, int *got_frame
,
1986 AVFrame
*frame
= data
;
1987 const uint8_t *buf
= avpkt
->data
;
1988 int buf_size
= avpkt
->size
;
1989 Vp3DecodeContext
*s
= avctx
->priv_data
;
1993 init_get_bits(&gb
, buf
, buf_size
* 8);
1995 if (s
->theora
&& get_bits1(&gb
)) {
1996 av_log(avctx
, AV_LOG_ERROR
,
1997 "Header packet passed to frame decoder, skipping\n");
2001 s
->keyframe
= !get_bits1(&gb
);
2004 for (i
= 0; i
< 3; i
++)
2005 s
->last_qps
[i
] = s
->qps
[i
];
2009 s
->qps
[s
->nqps
++] = get_bits(&gb
, 6);
2010 } while (s
->theora
>= 0x030200 && s
->nqps
< 3 && get_bits1(&gb
));
2011 for (i
= s
->nqps
; i
< 3; i
++)
2014 if (s
->avctx
->debug
& FF_DEBUG_PICT_INFO
)
2015 av_log(s
->avctx
, AV_LOG_INFO
, " VP3 %sframe #%d: Q index = %d\n",
2016 s
->keyframe
? "key" : "", avctx
->frame_number
+ 1, s
->qps
[0]);
2018 s
->skip_loop_filter
= !s
->filter_limit_values
[s
->qps
[0]] ||
2019 avctx
->skip_loop_filter
>= (s
->keyframe
? AVDISCARD_ALL
2020 : AVDISCARD_NONKEY
);
2022 if (s
->qps
[0] != s
->last_qps
[0])
2023 init_loop_filter(s
);
2025 for (i
= 0; i
< s
->nqps
; i
++)
2026 // reinit all dequantizers if the first one changed, because
2027 // the DC of the first quantizer must be used for all matrices
2028 if (s
->qps
[i
] != s
->last_qps
[i
] || s
->qps
[0] != s
->last_qps
[0])
2029 init_dequantizer(s
, i
);
2031 if (avctx
->skip_frame
>= AVDISCARD_NONKEY
&& !s
->keyframe
)
2034 s
->current_frame
.f
->pict_type
= s
->keyframe
? AV_PICTURE_TYPE_I
2035 : AV_PICTURE_TYPE_P
;
2036 if (ff_thread_get_buffer(avctx
, &s
->current_frame
, AV_GET_BUFFER_FLAG_REF
) < 0) {
2037 av_log(s
->avctx
, AV_LOG_ERROR
, "get_buffer() failed\n");
2041 if (!s
->edge_emu_buffer
)
2042 s
->edge_emu_buffer
= av_malloc(9 * FFABS(s
->current_frame
.f
->linesize
[0]));
2046 skip_bits(&gb
, 4); /* width code */
2047 skip_bits(&gb
, 4); /* height code */
2049 s
->version
= get_bits(&gb
, 5);
2050 if (avctx
->frame_number
== 0)
2051 av_log(s
->avctx
, AV_LOG_DEBUG
,
2052 "VP version: %d\n", s
->version
);
2055 if (s
->version
|| s
->theora
) {
2057 av_log(s
->avctx
, AV_LOG_ERROR
,
2058 "Warning, unsupported keyframe coding type?!\n");
2059 skip_bits(&gb
, 2); /* reserved? */
2062 if (!s
->golden_frame
.f
->data
[0]) {
2063 av_log(s
->avctx
, AV_LOG_WARNING
,
2064 "vp3: first frame not a keyframe\n");
2066 s
->golden_frame
.f
->pict_type
= AV_PICTURE_TYPE_I
;
2067 if (ff_thread_get_buffer(avctx
, &s
->golden_frame
,
2068 AV_GET_BUFFER_FLAG_REF
) < 0) {
2069 av_log(s
->avctx
, AV_LOG_ERROR
, "get_buffer() failed\n");
2072 ff_thread_release_buffer(avctx
, &s
->last_frame
);
2073 if ((ret
= ff_thread_ref_frame(&s
->last_frame
,
2074 &s
->golden_frame
)) < 0)
2076 ff_thread_report_progress(&s
->last_frame
, INT_MAX
, 0);
2080 memset(s
->all_fragments
, 0, s
->fragment_count
* sizeof(Vp3Fragment
));
2081 ff_thread_finish_setup(avctx
);
2083 if (unpack_superblocks(s
, &gb
)) {
2084 av_log(s
->avctx
, AV_LOG_ERROR
, "error in unpack_superblocks\n");
2087 if (unpack_modes(s
, &gb
)) {
2088 av_log(s
->avctx
, AV_LOG_ERROR
, "error in unpack_modes\n");
2091 if (unpack_vectors(s
, &gb
)) {
2092 av_log(s
->avctx
, AV_LOG_ERROR
, "error in unpack_vectors\n");
2095 if (unpack_block_qpis(s
, &gb
)) {
2096 av_log(s
->avctx
, AV_LOG_ERROR
, "error in unpack_block_qpis\n");
2099 if (unpack_dct_coeffs(s
, &gb
)) {
2100 av_log(s
->avctx
, AV_LOG_ERROR
, "error in unpack_dct_coeffs\n");
2104 for (i
= 0; i
< 3; i
++) {
2105 int height
= s
->height
>> (i
&& s
->chroma_y_shift
);
2106 if (s
->flipped_image
)
2107 s
->data_offset
[i
] = 0;
2109 s
->data_offset
[i
] = (height
- 1) * s
->current_frame
.f
->linesize
[i
];
2112 s
->last_slice_end
= 0;
2113 for (i
= 0; i
< s
->c_superblock_height
; i
++)
2116 // filter the last row
2117 for (i
= 0; i
< 3; i
++) {
2118 int row
= (s
->height
>> (3 + (i
&& s
->chroma_y_shift
))) - 1;
2119 apply_loop_filter(s
, i
, row
, row
+ 1);
2121 vp3_draw_horiz_band(s
, s
->height
);
2123 /* output frame, offset as needed */
2124 if ((ret
= av_frame_ref(data
, s
->current_frame
.f
)) < 0)
2127 frame
->crop_left
= s
->offset_x
;
2128 frame
->crop_right
= avctx
->coded_width
- avctx
->width
- s
->offset_x
;
2129 frame
->crop_top
= s
->offset_y
;
2130 frame
->crop_bottom
= avctx
->coded_height
- avctx
->height
- s
->offset_y
;
2134 if (!HAVE_THREADS
|| !(s
->avctx
->active_thread_type
& FF_THREAD_FRAME
)) {
2135 ret
= update_frames(avctx
);
2143 ff_thread_report_progress(&s
->current_frame
, INT_MAX
, 0);
2145 if (!HAVE_THREADS
|| !(s
->avctx
->active_thread_type
& FF_THREAD_FRAME
))
2146 av_frame_unref(s
->current_frame
.f
);
2151 static int read_huffman_tree(AVCodecContext
*avctx
, GetBitContext
*gb
)
2153 Vp3DecodeContext
*s
= avctx
->priv_data
;
2155 if (get_bits1(gb
)) {
2157 if (s
->entries
>= 32) { /* overflow */
2158 av_log(avctx
, AV_LOG_ERROR
, "huffman tree overflow\n");
2161 token
= get_bits(gb
, 5);
2162 ff_dlog(avctx
, "hti %d hbits %x token %d entry : %d size %d\n",
2163 s
->hti
, s
->hbits
, token
, s
->entries
, s
->huff_code_size
);
2164 s
->huffman_table
[s
->hti
][token
][0] = s
->hbits
;
2165 s
->huffman_table
[s
->hti
][token
][1] = s
->huff_code_size
;
2168 if (s
->huff_code_size
>= 32) { /* overflow */
2169 av_log(avctx
, AV_LOG_ERROR
, "huffman tree overflow\n");
2172 s
->huff_code_size
++;
2174 if (read_huffman_tree(avctx
, gb
))
2177 if (read_huffman_tree(avctx
, gb
))
2180 s
->huff_code_size
--;
2185 static int vp3_init_thread_copy(AVCodecContext
*avctx
)
2187 Vp3DecodeContext
*s
= avctx
->priv_data
;
2189 s
->superblock_coding
= NULL
;
2190 s
->all_fragments
= NULL
;
2191 s
->coded_fragment_list
[0] = NULL
;
2192 s
->dct_tokens_base
= NULL
;
2193 s
->superblock_fragments
= NULL
;
2194 s
->macroblock_coding
= NULL
;
2195 s
->motion_val
[0] = NULL
;
2196 s
->motion_val
[1] = NULL
;
2197 s
->edge_emu_buffer
= NULL
;
2199 return init_frames(s
);
2202 #if CONFIG_THEORA_DECODER
2203 static const enum AVPixelFormat theora_pix_fmts
[4] = {
2204 AV_PIX_FMT_YUV420P
, AV_PIX_FMT_NONE
, AV_PIX_FMT_YUV422P
, AV_PIX_FMT_YUV444P
2207 static int theora_decode_header(AVCodecContext
*avctx
, GetBitContext
*gb
)
2209 Vp3DecodeContext
*s
= avctx
->priv_data
;
2210 int visible_width
, visible_height
, colorspace
;
2211 uint8_t offset_x
= 0, offset_y
= 0;
2213 AVRational fps
, aspect
;
2215 s
->theora
= get_bits_long(gb
, 24);
2216 av_log(avctx
, AV_LOG_DEBUG
, "Theora bitstream version %X\n", s
->theora
);
2218 /* 3.2.0 aka alpha3 has the same frame orientation as original vp3
2219 * but previous versions have the image flipped relative to vp3 */
2220 if (s
->theora
< 0x030200) {
2221 s
->flipped_image
= 1;
2222 av_log(avctx
, AV_LOG_DEBUG
,
2223 "Old (<alpha3) Theora bitstream, flipped image\n");
2227 s
->width
= get_bits(gb
, 16) << 4;
2229 s
->height
= get_bits(gb
, 16) << 4;
2231 if (s
->theora
>= 0x030200) {
2232 visible_width
= get_bits_long(gb
, 24);
2233 visible_height
= get_bits_long(gb
, 24);
2235 offset_x
= get_bits(gb
, 8); /* offset x */
2236 offset_y
= get_bits(gb
, 8); /* offset y, from bottom */
2240 if (av_image_check_size(visible_width
, visible_height
, 0, avctx
) < 0 ||
2241 visible_width
+ offset_x
> s
->width
||
2242 visible_height
+ offset_y
> s
->height
) {
2243 av_log(s
, AV_LOG_ERROR
,
2244 "Invalid frame dimensions - w:%d h:%d x:%d y:%d (%dx%d).\n",
2245 visible_width
, visible_height
, offset_x
, offset_y
,
2246 s
->width
, s
->height
);
2247 return AVERROR_INVALIDDATA
;
2250 fps
.num
= get_bits_long(gb
, 32);
2251 fps
.den
= get_bits_long(gb
, 32);
2252 if (fps
.num
&& fps
.den
) {
2253 if (fps
.num
< 0 || fps
.den
< 0) {
2254 av_log(avctx
, AV_LOG_ERROR
, "Invalid framerate\n");
2255 return AVERROR_INVALIDDATA
;
2257 av_reduce(&avctx
->framerate
.den
, &avctx
->framerate
.num
,
2258 fps
.den
, fps
.num
, 1 << 30);
2261 aspect
.num
= get_bits_long(gb
, 24);
2262 aspect
.den
= get_bits_long(gb
, 24);
2263 if (aspect
.num
&& aspect
.den
) {
2264 av_reduce(&avctx
->sample_aspect_ratio
.num
,
2265 &avctx
->sample_aspect_ratio
.den
,
2266 aspect
.num
, aspect
.den
, 1 << 30);
2267 ff_set_sar(avctx
, avctx
->sample_aspect_ratio
);
2270 if (s
->theora
< 0x030200)
2271 skip_bits(gb
, 5); /* keyframe frequency force */
2272 colorspace
= get_bits(gb
, 8);
2273 skip_bits(gb
, 24); /* bitrate */
2275 skip_bits(gb
, 6); /* quality hint */
2277 if (s
->theora
>= 0x030200) {
2278 skip_bits(gb
, 5); /* keyframe frequency force */
2279 avctx
->pix_fmt
= theora_pix_fmts
[get_bits(gb
, 2)];
2280 skip_bits(gb
, 3); /* reserved */
2283 ret
= ff_set_dimensions(avctx
, s
->width
, s
->height
);
2286 if (!(avctx
->flags2
& AV_CODEC_FLAG2_IGNORE_CROP
) &&
2287 (visible_width
!= s
->width
|| visible_height
!= s
->height
)) {
2288 avctx
->width
= visible_width
;
2289 avctx
->height
= visible_height
;
2290 // translate offsets from theora axis ([0,0] lower left)
2291 // to normal axis ([0,0] upper left)
2292 s
->offset_x
= offset_x
;
2293 s
->offset_y
= s
->height
- visible_height
- offset_y
;
2296 if (colorspace
== 1)
2297 avctx
->color_primaries
= AVCOL_PRI_BT470M
;
2298 else if (colorspace
== 2)
2299 avctx
->color_primaries
= AVCOL_PRI_BT470BG
;
2301 if (colorspace
== 1 || colorspace
== 2) {
2302 avctx
->colorspace
= AVCOL_SPC_BT470BG
;
2303 avctx
->color_trc
= AVCOL_TRC_BT709
;
2309 static int theora_decode_tables(AVCodecContext
*avctx
, GetBitContext
*gb
)
2311 Vp3DecodeContext
*s
= avctx
->priv_data
;
2312 int i
, n
, matrices
, inter
, plane
;
2314 if (s
->theora
>= 0x030200) {
2315 n
= get_bits(gb
, 3);
2316 /* loop filter limit values table */
2318 for (i
= 0; i
< 64; i
++)
2319 s
->filter_limit_values
[i
] = get_bits(gb
, n
);
2322 if (s
->theora
>= 0x030200)
2323 n
= get_bits(gb
, 4) + 1;
2326 /* quality threshold table */
2327 for (i
= 0; i
< 64; i
++)
2328 s
->coded_ac_scale_factor
[i
] = get_bits(gb
, n
);
2330 if (s
->theora
>= 0x030200)
2331 n
= get_bits(gb
, 4) + 1;
2334 /* dc scale factor table */
2335 for (i
= 0; i
< 64; i
++)
2336 s
->coded_dc_scale_factor
[i
] = get_bits(gb
, n
);
2338 if (s
->theora
>= 0x030200)
2339 matrices
= get_bits(gb
, 9) + 1;
2343 if (matrices
> 384) {
2344 av_log(avctx
, AV_LOG_ERROR
, "invalid number of base matrixes\n");
2348 for (n
= 0; n
< matrices
; n
++)
2349 for (i
= 0; i
< 64; i
++)
2350 s
->base_matrix
[n
][i
] = get_bits(gb
, 8);
2352 for (inter
= 0; inter
<= 1; inter
++) {
2353 for (plane
= 0; plane
<= 2; plane
++) {
2355 if (inter
|| plane
> 0)
2356 newqr
= get_bits1(gb
);
2359 if (inter
&& get_bits1(gb
)) {
2363 qtj
= (3 * inter
+ plane
- 1) / 3;
2364 plj
= (plane
+ 2) % 3;
2366 s
->qr_count
[inter
][plane
] = s
->qr_count
[qtj
][plj
];
2367 memcpy(s
->qr_size
[inter
][plane
], s
->qr_size
[qtj
][plj
],
2368 sizeof(s
->qr_size
[0][0]));
2369 memcpy(s
->qr_base
[inter
][plane
], s
->qr_base
[qtj
][plj
],
2370 sizeof(s
->qr_base
[0][0]));
2376 i
= get_bits(gb
, av_log2(matrices
- 1) + 1);
2377 if (i
>= matrices
) {
2378 av_log(avctx
, AV_LOG_ERROR
,
2379 "invalid base matrix index\n");
2382 s
->qr_base
[inter
][plane
][qri
] = i
;
2385 i
= get_bits(gb
, av_log2(63 - qi
) + 1) + 1;
2386 s
->qr_size
[inter
][plane
][qri
++] = i
;
2391 av_log(avctx
, AV_LOG_ERROR
, "invalid qi %d > 63\n", qi
);
2394 s
->qr_count
[inter
][plane
] = qri
;
2399 /* Huffman tables */
2400 for (s
->hti
= 0; s
->hti
< 80; s
->hti
++) {
2402 s
->huff_code_size
= 1;
2403 if (!get_bits1(gb
)) {
2405 if (read_huffman_tree(avctx
, gb
))
2408 if (read_huffman_tree(avctx
, gb
))
2413 s
->theora_tables
= 1;
2418 static av_cold
int theora_decode_init(AVCodecContext
*avctx
)
2420 Vp3DecodeContext
*s
= avctx
->priv_data
;
2423 uint8_t *header_start
[3];
2429 if (!avctx
->extradata_size
) {
2430 av_log(avctx
, AV_LOG_ERROR
, "Missing extradata!\n");
2434 if (avpriv_split_xiph_headers(avctx
->extradata
, avctx
->extradata_size
,
2435 42, header_start
, header_len
) < 0) {
2436 av_log(avctx
, AV_LOG_ERROR
, "Corrupt extradata\n");
2440 for (i
= 0; i
< 3; i
++) {
2441 if (header_len
[i
] <= 0)
2443 init_get_bits(&gb
, header_start
[i
], header_len
[i
] * 8);
2445 ptype
= get_bits(&gb
, 8);
2447 if (!(ptype
& 0x80)) {
2448 av_log(avctx
, AV_LOG_ERROR
, "Invalid extradata!\n");
2452 // FIXME: Check for this as well.
2453 skip_bits_long(&gb
, 6 * 8); /* "theora" */
2457 theora_decode_header(avctx
, &gb
);
2460 // FIXME: is this needed? it breaks sometimes
2461 // theora_decode_comments(avctx, gb);
2464 if (theora_decode_tables(avctx
, &gb
))
2468 av_log(avctx
, AV_LOG_ERROR
,
2469 "Unknown Theora config packet: %d\n", ptype
& ~0x80);
2472 if (ptype
!= 0x81 && 8 * header_len
[i
] != get_bits_count(&gb
))
2473 av_log(avctx
, AV_LOG_WARNING
,
2474 "%d bits left in packet %X\n",
2475 8 * header_len
[i
] - get_bits_count(&gb
), ptype
);
2476 if (s
->theora
< 0x030200)
2480 return vp3_decode_init(avctx
);
2483 AVCodec ff_theora_decoder
= {
2485 .long_name
= NULL_IF_CONFIG_SMALL("Theora"),
2486 .type
= AVMEDIA_TYPE_VIDEO
,
2487 .id
= AV_CODEC_ID_THEORA
,
2488 .priv_data_size
= sizeof(Vp3DecodeContext
),
2489 .init
= theora_decode_init
,
2490 .close
= vp3_decode_end
,
2491 .decode
= vp3_decode_frame
,
2492 .capabilities
= AV_CODEC_CAP_DR1
| AV_CODEC_CAP_DRAW_HORIZ_BAND
|
2493 AV_CODEC_CAP_FRAME_THREADS
,
2494 .flush
= vp3_decode_flush
,
2495 .init_thread_copy
= ONLY_IF_THREADS_ENABLED(vp3_init_thread_copy
),
2496 .update_thread_context
= ONLY_IF_THREADS_ENABLED(vp3_update_thread_context
),
2497 .caps_internal
= FF_CODEC_CAP_EXPORTS_CROPPING
,
2501 AVCodec ff_vp3_decoder
= {
2503 .long_name
= NULL_IF_CONFIG_SMALL("On2 VP3"),
2504 .type
= AVMEDIA_TYPE_VIDEO
,
2505 .id
= AV_CODEC_ID_VP3
,
2506 .priv_data_size
= sizeof(Vp3DecodeContext
),
2507 .init
= vp3_decode_init
,
2508 .close
= vp3_decode_end
,
2509 .decode
= vp3_decode_frame
,
2510 .capabilities
= AV_CODEC_CAP_DR1
| AV_CODEC_CAP_DRAW_HORIZ_BAND
|
2511 AV_CODEC_CAP_FRAME_THREADS
,
2512 .flush
= vp3_decode_flush
,
2513 .init_thread_copy
= ONLY_IF_THREADS_ENABLED(vp3_init_thread_copy
),
2514 .update_thread_context
= ONLY_IF_THREADS_ENABLED(vp3_update_thread_context
),