avformat/mpeg: demux ivtv captions
[ffmpeg.git] / libavcodec / rv40.c
blob0a5136d12992d9ab5ca1b324714bf0f312b7113b
1 /*
2 * RV40 decoder
3 * Copyright (c) 2007 Konstantin Shishkov
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 /**
23 * @file
24 * RV40 decoder
27 #include "config.h"
29 #include "libavutil/imgutils.h"
30 #include "libavutil/thread.h"
32 #include "avcodec.h"
33 #include "codec_internal.h"
34 #include "mpegutils.h"
35 #include "mpegvideo.h"
36 #include "mpegvideodec.h"
37 #include "golomb.h"
39 #include "rv34.h"
40 #include "rv40vlc2.h"
41 #include "rv40data.h"
43 static VLCElem aic_top_vlc[23590];
44 static const VLCElem *aic_mode1_vlc[AIC_MODE1_NUM], *aic_mode2_vlc[AIC_MODE2_NUM];
45 static const VLCElem *ptype_vlc[NUM_PTYPE_VLCS], *btype_vlc[NUM_BTYPE_VLCS];
47 static av_cold const VLCElem *rv40_init_table(VLCInitState *state, int nb_bits,
48 int nb_codes, const uint8_t (*tab)[2])
50 return ff_vlc_init_tables_from_lengths(state, nb_bits, nb_codes,
51 &tab[0][1], 2, &tab[0][0], 2, 1,
52 0, 0);
55 /**
56 * Initialize all tables.
58 static av_cold void rv40_init_tables(void)
60 VLCInitState state = VLC_INIT_STATE(aic_top_vlc);
61 int i;
63 rv40_init_table(&state, AIC_TOP_BITS, AIC_TOP_SIZE,
64 rv40_aic_top_vlc_tab);
65 for(i = 0; i < AIC_MODE1_NUM; i++){
66 // Every tenth VLC table is empty
67 if((i % 10) == 9) continue;
68 aic_mode1_vlc[i] =
69 rv40_init_table(&state, AIC_MODE1_BITS,
70 AIC_MODE1_SIZE, aic_mode1_vlc_tabs[i]);
72 for (unsigned i = 0; i < AIC_MODE2_NUM; i++){
73 uint16_t syms[AIC_MODE2_SIZE];
75 for (int j = 0; j < AIC_MODE2_SIZE; j++) {
76 int first = aic_mode2_vlc_syms[i][j] >> 4;
77 int second = aic_mode2_vlc_syms[i][j] & 0xF;
78 if (HAVE_BIGENDIAN)
79 syms[j] = (first << 8) | second;
80 else
81 syms[j] = first | (second << 8);
83 aic_mode2_vlc[i] =
84 ff_vlc_init_tables_from_lengths(&state, AIC_MODE2_BITS, AIC_MODE2_SIZE,
85 aic_mode2_vlc_bits[i], 1,
86 syms, 2, 2, 0, 0);
88 for(i = 0; i < NUM_PTYPE_VLCS; i++){
89 ptype_vlc[i] =
90 rv40_init_table(&state, PTYPE_VLC_BITS, PTYPE_VLC_SIZE,
91 ptype_vlc_tabs[i]);
93 for(i = 0; i < NUM_BTYPE_VLCS; i++){
94 btype_vlc[i] =
95 rv40_init_table(&state, BTYPE_VLC_BITS, BTYPE_VLC_SIZE,
96 btype_vlc_tabs[i]);
101 * Get stored dimension from bitstream.
103 * If the width/height is the standard one then it's coded as a 3-bit index.
104 * Otherwise it is coded as escaped 8-bit portions.
106 static int get_dimension(GetBitContext *gb, const int *dim)
108 int t = get_bits(gb, 3);
109 int val = dim[t];
110 if(val < 0)
111 val = dim[get_bits1(gb) - val];
112 if(!val){
114 if (get_bits_left(gb) < 8)
115 return AVERROR_INVALIDDATA;
116 t = get_bits(gb, 8);
117 val += t << 2;
118 }while(t == 0xFF);
120 return val;
124 * Get encoded picture size - usually this is called from rv40_parse_slice_header.
126 static void rv40_parse_picture_size(GetBitContext *gb, int *w, int *h)
128 *w = get_dimension(gb, rv40_standard_widths);
129 *h = get_dimension(gb, rv40_standard_heights);
132 static int rv40_parse_slice_header(RV34DecContext *r, GetBitContext *gb, SliceInfo *si)
134 int mb_bits;
135 int w = r->s.width, h = r->s.height;
136 int mb_size;
137 int ret;
139 memset(si, 0, sizeof(SliceInfo));
140 if(get_bits1(gb))
141 return AVERROR_INVALIDDATA;
142 si->type = get_bits(gb, 2);
143 if(si->type == 1) si->type = 0;
144 si->quant = get_bits(gb, 5);
145 if(get_bits(gb, 2))
146 return AVERROR_INVALIDDATA;
147 si->vlc_set = get_bits(gb, 2);
148 skip_bits1(gb);
149 si->pts = get_bits(gb, 13);
150 if(!si->type || !get_bits1(gb))
151 rv40_parse_picture_size(gb, &w, &h);
152 if ((ret = av_image_check_size(w, h, 0, r->s.avctx)) < 0)
153 return ret;
154 si->width = w;
155 si->height = h;
156 mb_size = ((w + 15) >> 4) * ((h + 15) >> 4);
157 mb_bits = ff_rv34_get_start_offset(gb, mb_size);
158 si->start = get_bits(gb, mb_bits);
160 return 0;
164 * Decode 4x4 intra types array.
166 static int rv40_decode_intra_types(RV34DecContext *r, GetBitContext *gb, int8_t *dst)
168 MpegEncContext *s = &r->s;
169 int i, j, k, v;
170 int A, B, C;
171 int pattern;
172 int8_t *ptr;
174 for(i = 0; i < 4; i++, dst += r->intra_types_stride){
175 if(!i && s->first_slice_line){
176 pattern = get_vlc2(gb, aic_top_vlc, AIC_TOP_BITS, 1);
177 dst[0] = (pattern >> 2) & 2;
178 dst[1] = (pattern >> 1) & 2;
179 dst[2] = pattern & 2;
180 dst[3] = (pattern << 1) & 2;
181 continue;
183 ptr = dst;
184 for(j = 0; j < 4; j++){
185 /* Coefficients are read using VLC chosen by the prediction pattern
186 * The first one (used for retrieving a pair of coefficients) is
187 * constructed from the top, top right and left coefficients
188 * The second one (used for retrieving only one coefficient) is
189 * top + 10 * left.
191 A = ptr[-r->intra_types_stride + 1]; // it won't be used for the last coefficient in a row
192 B = ptr[-r->intra_types_stride];
193 C = ptr[-1];
194 pattern = A + B * (1 << 4) + C * (1 << 8);
195 for(k = 0; k < MODE2_PATTERNS_NUM; k++)
196 if(pattern == rv40_aic_table_index[k])
197 break;
198 if(j < 3 && k < MODE2_PATTERNS_NUM){ //pattern is found, decoding 2 coefficients
199 AV_WN16(ptr, get_vlc2(gb, aic_mode2_vlc[k], AIC_MODE2_BITS, 2));
200 ptr += 2;
201 j++;
202 }else{
203 if(B != -1 && C != -1)
204 v = get_vlc2(gb, aic_mode1_vlc[B + C*10], AIC_MODE1_BITS, 1);
205 else{ // tricky decoding
206 v = 0;
207 switch(C){
208 case -1: // code 0 -> 1, 1 -> 0
209 if(B < 2)
210 v = get_bits1(gb) ^ 1;
211 break;
212 case 0:
213 case 2: // code 0 -> 2, 1 -> 0
214 v = (get_bits1(gb) ^ 1) << 1;
215 break;
218 *ptr++ = v;
222 return 0;
226 * Decode macroblock information.
228 static int rv40_decode_mb_info(RV34DecContext *r)
230 MpegEncContext *s = &r->s;
231 GetBitContext *gb = &s->gb;
232 int q, i;
233 int prev_type = 0;
234 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
236 if(!r->s.mb_skip_run) {
237 r->s.mb_skip_run = get_interleaved_ue_golomb(gb) + 1;
238 if(r->s.mb_skip_run > (unsigned)s->mb_num)
239 return -1;
242 if(--r->s.mb_skip_run)
243 return RV34_MB_SKIP;
245 if(r->avail_cache[6-4]){
246 int blocks[RV34_MB_TYPES] = {0};
247 int count = 0;
248 if(r->avail_cache[6-1])
249 blocks[r->mb_type[mb_pos - 1]]++;
250 blocks[r->mb_type[mb_pos - s->mb_stride]]++;
251 if(r->avail_cache[6-2])
252 blocks[r->mb_type[mb_pos - s->mb_stride + 1]]++;
253 if(r->avail_cache[6-5])
254 blocks[r->mb_type[mb_pos - s->mb_stride - 1]]++;
255 for(i = 0; i < RV34_MB_TYPES; i++){
256 if(blocks[i] > count){
257 count = blocks[i];
258 prev_type = i;
259 if(count>1)
260 break;
263 } else if (r->avail_cache[6-1])
264 prev_type = r->mb_type[mb_pos - 1];
266 if(s->pict_type == AV_PICTURE_TYPE_P){
267 prev_type = block_num_to_ptype_vlc_num[prev_type];
268 q = get_vlc2(gb, ptype_vlc[prev_type], PTYPE_VLC_BITS, 1);
269 if(q < PBTYPE_ESCAPE)
270 return q;
271 q = get_vlc2(gb, ptype_vlc[prev_type], PTYPE_VLC_BITS, 1);
272 av_log(s->avctx, AV_LOG_ERROR, "Dquant for P-frame\n");
273 }else{
274 prev_type = block_num_to_btype_vlc_num[prev_type];
275 q = get_vlc2(gb, btype_vlc[prev_type], BTYPE_VLC_BITS, 1);
276 if(q < PBTYPE_ESCAPE)
277 return q;
278 q = get_vlc2(gb, btype_vlc[prev_type], BTYPE_VLC_BITS, 1);
279 av_log(s->avctx, AV_LOG_ERROR, "Dquant for B-frame\n");
281 return 0;
284 enum RV40BlockPos{
285 POS_CUR,
286 POS_TOP,
287 POS_LEFT,
288 POS_BOTTOM,
291 #define MASK_CUR 0x0001
292 #define MASK_RIGHT 0x0008
293 #define MASK_BOTTOM 0x0010
294 #define MASK_TOP 0x1000
295 #define MASK_Y_TOP_ROW 0x000F
296 #define MASK_Y_LAST_ROW 0xF000
297 #define MASK_Y_LEFT_COL 0x1111
298 #define MASK_Y_RIGHT_COL 0x8888
299 #define MASK_C_TOP_ROW 0x0003
300 #define MASK_C_LAST_ROW 0x000C
301 #define MASK_C_LEFT_COL 0x0005
302 #define MASK_C_RIGHT_COL 0x000A
304 static const int neighbour_offs_x[4] = { 0, 0, -1, 0 };
305 static const int neighbour_offs_y[4] = { 0, -1, 0, 1 };
307 static void rv40_adaptive_loop_filter(RV34DSPContext *rdsp,
308 uint8_t *src, int stride, int dmode,
309 int lim_q1, int lim_p1,
310 int alpha, int beta, int beta2,
311 int chroma, int edge, int dir)
313 int filter_p1, filter_q1;
314 int strong;
315 int lims;
317 strong = rdsp->rv40_loop_filter_strength[dir](src, stride, beta, beta2,
318 edge, &filter_p1, &filter_q1);
320 lims = filter_p1 + filter_q1 + ((lim_q1 + lim_p1) >> 1) + 1;
322 if (strong) {
323 rdsp->rv40_strong_loop_filter[dir](src, stride, alpha,
324 lims, dmode, chroma);
325 } else if (filter_p1 & filter_q1) {
326 rdsp->rv40_weak_loop_filter[dir](src, stride, 1, 1, alpha, beta,
327 lims, lim_q1, lim_p1);
328 } else if (filter_p1 | filter_q1) {
329 rdsp->rv40_weak_loop_filter[dir](src, stride, filter_p1, filter_q1,
330 alpha, beta, lims >> 1, lim_q1 >> 1,
331 lim_p1 >> 1);
336 * RV40 loop filtering function
338 static void rv40_loop_filter(RV34DecContext *r, int row)
340 MpegEncContext *s = &r->s;
341 int mb_pos, mb_x;
342 int i, j, k;
343 uint8_t *Y, *C;
344 int alpha, beta, betaY, betaC;
345 int q;
346 int mbtype[4]; ///< current macroblock and its neighbours types
348 * flags indicating that macroblock can be filtered with strong filter
349 * it is set only for intra coded MB and MB with DCs coded separately
351 int mb_strong[4];
352 int clip[4]; ///< MB filter clipping value calculated from filtering strength
354 * coded block patterns for luma part of current macroblock and its neighbours
355 * Format:
356 * LSB corresponds to the top left block,
357 * each nibble represents one row of subblocks.
359 int cbp[4];
361 * coded block patterns for chroma part of current macroblock and its neighbours
362 * Format is the same as for luma with two subblocks in a row.
364 int uvcbp[4][2];
366 * This mask represents the pattern of luma subblocks that should be filtered
367 * in addition to the coded ones because they lie at the edge of
368 * 8x8 block with different enough motion vectors
370 unsigned mvmasks[4];
372 mb_pos = row * s->mb_stride;
373 for(mb_x = 0; mb_x < s->mb_width; mb_x++, mb_pos++){
374 int mbtype = s->cur_pic.mb_type[mb_pos];
375 if(IS_INTRA(mbtype) || IS_SEPARATE_DC(mbtype))
376 r->cbp_luma [mb_pos] = r->deblock_coefs[mb_pos] = 0xFFFF;
377 if(IS_INTRA(mbtype))
378 r->cbp_chroma[mb_pos] = 0xFF;
380 mb_pos = row * s->mb_stride;
381 for(mb_x = 0; mb_x < s->mb_width; mb_x++, mb_pos++){
382 int y_h_deblock, y_v_deblock;
383 int c_v_deblock[2], c_h_deblock[2];
384 int clip_left;
385 int avail[4];
386 unsigned y_to_deblock;
387 int c_to_deblock[2];
389 q = s->cur_pic.qscale_table[mb_pos];
390 alpha = rv40_alpha_tab[q];
391 beta = rv40_beta_tab [q];
392 betaY = betaC = beta * 3;
393 if(s->width * s->height <= 176*144)
394 betaY += beta;
396 avail[0] = 1;
397 avail[1] = row;
398 avail[2] = mb_x;
399 avail[3] = row < s->mb_height - 1;
400 for(i = 0; i < 4; i++){
401 if(avail[i]){
402 int pos = mb_pos + neighbour_offs_x[i] + neighbour_offs_y[i]*s->mb_stride;
403 mvmasks[i] = r->deblock_coefs[pos];
404 mbtype [i] = s->cur_pic.mb_type[pos];
405 cbp [i] = r->cbp_luma[pos];
406 uvcbp[i][0] = r->cbp_chroma[pos] & 0xF;
407 uvcbp[i][1] = r->cbp_chroma[pos] >> 4;
408 }else{
409 mvmasks[i] = 0;
410 mbtype [i] = mbtype[0];
411 cbp [i] = 0;
412 uvcbp[i][0] = uvcbp[i][1] = 0;
414 mb_strong[i] = IS_INTRA(mbtype[i]) || IS_SEPARATE_DC(mbtype[i]);
415 clip[i] = rv40_filter_clip_tbl[mb_strong[i] + 1][q];
417 y_to_deblock = mvmasks[POS_CUR]
418 | (mvmasks[POS_BOTTOM] << 16);
419 /* This pattern contains bits signalling that horizontal edges of
420 * the current block can be filtered.
421 * That happens when either of adjacent subblocks is coded or lies on
422 * the edge of 8x8 blocks with motion vectors differing by more than
423 * 3/4 pel in any component (any edge orientation for some reason).
425 y_h_deblock = y_to_deblock
426 | ((cbp[POS_CUR] << 4) & ~MASK_Y_TOP_ROW)
427 | ((cbp[POS_TOP] & MASK_Y_LAST_ROW) >> 12);
428 /* This pattern contains bits signalling that vertical edges of
429 * the current block can be filtered.
430 * That happens when either of adjacent subblocks is coded or lies on
431 * the edge of 8x8 blocks with motion vectors differing by more than
432 * 3/4 pel in any component (any edge orientation for some reason).
434 y_v_deblock = y_to_deblock
435 | ((cbp[POS_CUR] << 1) & ~MASK_Y_LEFT_COL)
436 | ((cbp[POS_LEFT] & MASK_Y_RIGHT_COL) >> 3);
437 if(!mb_x)
438 y_v_deblock &= ~MASK_Y_LEFT_COL;
439 if(!row)
440 y_h_deblock &= ~MASK_Y_TOP_ROW;
441 if(row == s->mb_height - 1 || (mb_strong[POS_CUR] | mb_strong[POS_BOTTOM]))
442 y_h_deblock &= ~(MASK_Y_TOP_ROW << 16);
443 /* Calculating chroma patterns is similar and easier since there is
444 * no motion vector pattern for them.
446 for(i = 0; i < 2; i++){
447 c_to_deblock[i] = (uvcbp[POS_BOTTOM][i] << 4) | uvcbp[POS_CUR][i];
448 c_v_deblock[i] = c_to_deblock[i]
449 | ((uvcbp[POS_CUR] [i] << 1) & ~MASK_C_LEFT_COL)
450 | ((uvcbp[POS_LEFT][i] & MASK_C_RIGHT_COL) >> 1);
451 c_h_deblock[i] = c_to_deblock[i]
452 | ((uvcbp[POS_TOP][i] & MASK_C_LAST_ROW) >> 2)
453 | (uvcbp[POS_CUR][i] << 2);
454 if(!mb_x)
455 c_v_deblock[i] &= ~MASK_C_LEFT_COL;
456 if(!row)
457 c_h_deblock[i] &= ~MASK_C_TOP_ROW;
458 if(row == s->mb_height - 1 || (mb_strong[POS_CUR] | mb_strong[POS_BOTTOM]))
459 c_h_deblock[i] &= ~(MASK_C_TOP_ROW << 4);
462 for(j = 0; j < 16; j += 4){
463 Y = s->cur_pic.data[0] + mb_x*16 + (row*16 + j) * s->linesize;
464 for(i = 0; i < 4; i++, Y += 4){
465 int ij = i + j;
466 int clip_cur = y_to_deblock & (MASK_CUR << ij) ? clip[POS_CUR] : 0;
467 int dither = j ? ij : i*4;
469 // if bottom block is coded then we can filter its top edge
470 // (or bottom edge of this block, which is the same)
471 if(y_h_deblock & (MASK_BOTTOM << ij)){
472 rv40_adaptive_loop_filter(&r->rdsp, Y+4*s->linesize,
473 s->linesize, dither,
474 y_to_deblock & (MASK_BOTTOM << ij) ? clip[POS_CUR] : 0,
475 clip_cur, alpha, beta, betaY,
476 0, 0, 0);
478 // filter left block edge in ordinary mode (with low filtering strength)
479 if(y_v_deblock & (MASK_CUR << ij) && (i || !(mb_strong[POS_CUR] | mb_strong[POS_LEFT]))){
480 if(!i)
481 clip_left = mvmasks[POS_LEFT] & (MASK_RIGHT << j) ? clip[POS_LEFT] : 0;
482 else
483 clip_left = y_to_deblock & (MASK_CUR << (ij-1)) ? clip[POS_CUR] : 0;
484 rv40_adaptive_loop_filter(&r->rdsp, Y, s->linesize, dither,
485 clip_cur,
486 clip_left,
487 alpha, beta, betaY, 0, 0, 1);
489 // filter top edge of the current macroblock when filtering strength is high
490 if(!j && y_h_deblock & (MASK_CUR << i) && (mb_strong[POS_CUR] | mb_strong[POS_TOP])){
491 rv40_adaptive_loop_filter(&r->rdsp, Y, s->linesize, dither,
492 clip_cur,
493 mvmasks[POS_TOP] & (MASK_TOP << i) ? clip[POS_TOP] : 0,
494 alpha, beta, betaY, 0, 1, 0);
496 // filter left block edge in edge mode (with high filtering strength)
497 if(y_v_deblock & (MASK_CUR << ij) && !i && (mb_strong[POS_CUR] | mb_strong[POS_LEFT])){
498 clip_left = mvmasks[POS_LEFT] & (MASK_RIGHT << j) ? clip[POS_LEFT] : 0;
499 rv40_adaptive_loop_filter(&r->rdsp, Y, s->linesize, dither,
500 clip_cur,
501 clip_left,
502 alpha, beta, betaY, 0, 1, 1);
506 for(k = 0; k < 2; k++){
507 for(j = 0; j < 2; j++){
508 C = s->cur_pic.data[k + 1] + mb_x*8 + (row*8 + j*4) * s->uvlinesize;
509 for(i = 0; i < 2; i++, C += 4){
510 int ij = i + j*2;
511 int clip_cur = c_to_deblock[k] & (MASK_CUR << ij) ? clip[POS_CUR] : 0;
512 if(c_h_deblock[k] & (MASK_CUR << (ij+2))){
513 int clip_bot = c_to_deblock[k] & (MASK_CUR << (ij+2)) ? clip[POS_CUR] : 0;
514 rv40_adaptive_loop_filter(&r->rdsp, C+4*s->uvlinesize, s->uvlinesize, i*8,
515 clip_bot,
516 clip_cur,
517 alpha, beta, betaC, 1, 0, 0);
519 if((c_v_deblock[k] & (MASK_CUR << ij)) && (i || !(mb_strong[POS_CUR] | mb_strong[POS_LEFT]))){
520 if(!i)
521 clip_left = uvcbp[POS_LEFT][k] & (MASK_CUR << (2*j+1)) ? clip[POS_LEFT] : 0;
522 else
523 clip_left = c_to_deblock[k] & (MASK_CUR << (ij-1)) ? clip[POS_CUR] : 0;
524 rv40_adaptive_loop_filter(&r->rdsp, C, s->uvlinesize, j*8,
525 clip_cur,
526 clip_left,
527 alpha, beta, betaC, 1, 0, 1);
529 if(!j && c_h_deblock[k] & (MASK_CUR << ij) && (mb_strong[POS_CUR] | mb_strong[POS_TOP])){
530 int clip_top = uvcbp[POS_TOP][k] & (MASK_CUR << (ij+2)) ? clip[POS_TOP] : 0;
531 rv40_adaptive_loop_filter(&r->rdsp, C, s->uvlinesize, i*8,
532 clip_cur,
533 clip_top,
534 alpha, beta, betaC, 1, 1, 0);
536 if(c_v_deblock[k] & (MASK_CUR << ij) && !i && (mb_strong[POS_CUR] | mb_strong[POS_LEFT])){
537 clip_left = uvcbp[POS_LEFT][k] & (MASK_CUR << (2*j+1)) ? clip[POS_LEFT] : 0;
538 rv40_adaptive_loop_filter(&r->rdsp, C, s->uvlinesize, j*8,
539 clip_cur,
540 clip_left,
541 alpha, beta, betaC, 1, 1, 1);
550 * Initialize decoder.
552 static av_cold int rv40_decode_init(AVCodecContext *avctx)
554 static AVOnce init_static_once = AV_ONCE_INIT;
555 RV34DecContext *r = avctx->priv_data;
556 int ret;
558 r->rv30 = 0;
559 if ((ret = ff_rv34_decode_init(avctx)) < 0)
560 return ret;
561 r->parse_slice_header = rv40_parse_slice_header;
562 r->decode_intra_types = rv40_decode_intra_types;
563 r->decode_mb_info = rv40_decode_mb_info;
564 r->loop_filter = rv40_loop_filter;
565 r->luma_dc_quant_i = rv40_luma_dc_quant[0];
566 r->luma_dc_quant_p = rv40_luma_dc_quant[1];
567 ff_rv40dsp_init(&r->rdsp);
568 ff_thread_once(&init_static_once, rv40_init_tables);
569 return 0;
572 const FFCodec ff_rv40_decoder = {
573 .p.name = "rv40",
574 CODEC_LONG_NAME("RealVideo 4.0"),
575 .p.type = AVMEDIA_TYPE_VIDEO,
576 .p.id = AV_CODEC_ID_RV40,
577 .priv_data_size = sizeof(RV34DecContext),
578 .init = rv40_decode_init,
579 .close = ff_rv34_decode_end,
580 FF_CODEC_DECODE_CB(ff_rv34_decode_frame),
581 .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY |
582 AV_CODEC_CAP_FRAME_THREADS,
583 .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
584 .flush = ff_mpeg_flush,
585 UPDATE_THREAD_CONTEXT(ff_rv34_decode_update_thread_context),