Merge branch 'master' of http://repo.or.cz/r/FFMpeg-mirror
[FFMpeg-mirror/DVCPRO-HD.git] / libavcodec / mpegvideo.c
blob2ffb7a247f5a7d764616e8a5a0b47bfbc74a05dd
1 /*
2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard.
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 /**
26 * @file mpegvideo.c
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "avcodec.h"
31 #include "dsputil.h"
32 #include "mpegvideo.h"
33 #include "mpegvideo_common.h"
34 #include "mjpegenc.h"
35 #include "msmpeg4.h"
36 #include "faandct.h"
37 #include <limits.h>
39 //#undef NDEBUG
40 //#include <assert.h>
42 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
43 DCTELEM *block, int n, int qscale);
44 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
45 DCTELEM *block, int n, int qscale);
46 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
47 DCTELEM *block, int n, int qscale);
48 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
49 DCTELEM *block, int n, int qscale);
50 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
51 DCTELEM *block, int n, int qscale);
52 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
53 DCTELEM *block, int n, int qscale);
54 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
55 DCTELEM *block, int n, int qscale);
57 extern int XVMC_field_start(MpegEncContext*s, AVCodecContext *avctx);
58 extern void XVMC_field_end(MpegEncContext *s);
59 extern void XVMC_decode_mb(MpegEncContext *s);
62 /* enable all paranoid tests for rounding, overflows, etc... */
63 //#define PARANOID
65 //#define DEBUG
68 static const uint8_t ff_default_chroma_qscale_table[32]={
69 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
70 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
73 const uint8_t ff_mpeg1_dc_scale_table[128]={
74 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
75 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
76 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
77 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
82 const uint8_t *ff_find_start_code(const uint8_t * restrict p, const uint8_t *end, uint32_t * restrict state){
83 int i;
85 assert(p<=end);
86 if(p>=end)
87 return end;
89 for(i=0; i<3; i++){
90 uint32_t tmp= *state << 8;
91 *state= tmp + *(p++);
92 if(tmp == 0x100 || p==end)
93 return p;
96 while(p<end){
97 if (p[-1] > 1 ) p+= 3;
98 else if(p[-2] ) p+= 2;
99 else if(p[-3]|(p[-1]-1)) p++;
100 else{
101 p++;
102 break;
106 p= FFMIN(p, end)-4;
107 *state= AV_RB32(p);
109 return p+4;
112 /* init common dct for both encoder and decoder */
113 int ff_dct_common_init(MpegEncContext *s)
115 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
116 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
117 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
118 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
119 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
120 if(s->flags & CODEC_FLAG_BITEXACT)
121 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
122 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
124 #if defined(HAVE_MMX)
125 MPV_common_init_mmx(s);
126 #elif defined(ARCH_ALPHA)
127 MPV_common_init_axp(s);
128 #elif defined(CONFIG_MLIB)
129 MPV_common_init_mlib(s);
130 #elif defined(HAVE_MMI)
131 MPV_common_init_mmi(s);
132 #elif defined(ARCH_ARMV4L)
133 MPV_common_init_armv4l(s);
134 #elif defined(HAVE_ALTIVEC)
135 MPV_common_init_altivec(s);
136 #elif defined(ARCH_BFIN)
137 MPV_common_init_bfin(s);
138 #endif
140 /* load & permutate scantables
141 note: only wmv uses different ones
143 if(s->alternate_scan){
144 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
145 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
146 }else{
147 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
148 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
150 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
151 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
153 return 0;
156 void copy_picture(Picture *dst, Picture *src){
157 *dst = *src;
158 dst->type= FF_BUFFER_TYPE_COPY;
162 * allocates a Picture
163 * The pixels are allocated/set by calling get_buffer() if shared=0
165 int alloc_picture(MpegEncContext *s, Picture *pic, int shared){
166 const int big_mb_num= s->mb_stride*(s->mb_height+1) + 1; //the +1 is needed so memset(,,stride*height) does not sig11
167 const int mb_array_size= s->mb_stride*s->mb_height;
168 const int b8_array_size= s->b8_stride*s->mb_height*2;
169 const int b4_array_size= s->b4_stride*s->mb_height*4;
170 int i;
171 int r= -1;
173 if(shared){
174 assert(pic->data[0]);
175 assert(pic->type == 0 || pic->type == FF_BUFFER_TYPE_SHARED);
176 pic->type= FF_BUFFER_TYPE_SHARED;
177 }else{
178 assert(!pic->data[0]);
180 r= s->avctx->get_buffer(s->avctx, (AVFrame*)pic);
182 if(r<0 || !pic->age || !pic->type || !pic->data[0]){
183 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %d %p)\n", r, pic->age, pic->type, pic->data[0]);
184 return -1;
187 if(s->linesize && (s->linesize != pic->linesize[0] || s->uvlinesize != pic->linesize[1])){
188 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (stride changed)\n");
189 s->avctx->release_buffer(s->avctx, (AVFrame*)pic);
190 return -1;
193 if(pic->linesize[1] != pic->linesize[2]){
194 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (uv stride mismatch)\n");
195 s->avctx->release_buffer(s->avctx, (AVFrame*)pic);
196 return -1;
199 s->linesize = pic->linesize[0];
200 s->uvlinesize= pic->linesize[1];
203 if(pic->qscale_table==NULL){
204 if (s->encoding) {
205 CHECKED_ALLOCZ(pic->mb_var , mb_array_size * sizeof(int16_t))
206 CHECKED_ALLOCZ(pic->mc_mb_var, mb_array_size * sizeof(int16_t))
207 CHECKED_ALLOCZ(pic->mb_mean , mb_array_size * sizeof(int8_t))
210 CHECKED_ALLOCZ(pic->mbskip_table , mb_array_size * sizeof(uint8_t)+2) //the +2 is for the slice end check
211 CHECKED_ALLOCZ(pic->qscale_table , mb_array_size * sizeof(uint8_t))
212 CHECKED_ALLOCZ(pic->mb_type_base , (big_mb_num + s->mb_stride) * sizeof(uint32_t))
213 pic->mb_type= pic->mb_type_base + 2*s->mb_stride+1;
214 if(s->out_format == FMT_H264){
215 for(i=0; i<2; i++){
216 CHECKED_ALLOCZ(pic->motion_val_base[i], 2 * (b4_array_size+4) * sizeof(int16_t))
217 pic->motion_val[i]= pic->motion_val_base[i]+4;
218 CHECKED_ALLOCZ(pic->ref_index[i], b8_array_size * sizeof(uint8_t))
220 pic->motion_subsample_log2= 2;
221 }else if(s->out_format == FMT_H263 || s->encoding || (s->avctx->debug&FF_DEBUG_MV) || (s->avctx->debug_mv)){
222 for(i=0; i<2; i++){
223 CHECKED_ALLOCZ(pic->motion_val_base[i], 2 * (b8_array_size+4) * sizeof(int16_t))
224 pic->motion_val[i]= pic->motion_val_base[i]+4;
225 CHECKED_ALLOCZ(pic->ref_index[i], b8_array_size * sizeof(uint8_t))
227 pic->motion_subsample_log2= 3;
229 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
230 CHECKED_ALLOCZ(pic->dct_coeff, 64 * mb_array_size * sizeof(DCTELEM)*6)
232 pic->qstride= s->mb_stride;
233 CHECKED_ALLOCZ(pic->pan_scan , 1 * sizeof(AVPanScan))
236 /* It might be nicer if the application would keep track of these
237 * but it would require an API change. */
238 memmove(s->prev_pict_types+1, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE-1);
239 s->prev_pict_types[0]= s->dropable ? FF_B_TYPE : s->pict_type;
240 if(pic->age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->age] == FF_B_TYPE)
241 pic->age= INT_MAX; // Skipped MBs in B-frames are quite rare in MPEG-1/2 and it is a bit tricky to skip them anyway.
243 return 0;
244 fail: //for the CHECKED_ALLOCZ macro
245 if(r>=0)
246 s->avctx->release_buffer(s->avctx, (AVFrame*)pic);
247 return -1;
251 * deallocates a picture
253 static void free_picture(MpegEncContext *s, Picture *pic){
254 int i;
256 if(pic->data[0] && pic->type!=FF_BUFFER_TYPE_SHARED){
257 s->avctx->release_buffer(s->avctx, (AVFrame*)pic);
260 av_freep(&pic->mb_var);
261 av_freep(&pic->mc_mb_var);
262 av_freep(&pic->mb_mean);
263 av_freep(&pic->mbskip_table);
264 av_freep(&pic->qscale_table);
265 av_freep(&pic->mb_type_base);
266 av_freep(&pic->dct_coeff);
267 av_freep(&pic->pan_scan);
268 pic->mb_type= NULL;
269 for(i=0; i<2; i++){
270 av_freep(&pic->motion_val_base[i]);
271 av_freep(&pic->ref_index[i]);
274 if(pic->type == FF_BUFFER_TYPE_SHARED){
275 for(i=0; i<4; i++){
276 pic->base[i]=
277 pic->data[i]= NULL;
279 pic->type= 0;
283 static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base){
284 int i;
286 // edge emu needs blocksize + filter length - 1 (=17x17 for halfpel / 21x21 for h264)
287 CHECKED_ALLOCZ(s->allocated_edge_emu_buffer, (s->width+64)*2*21*2); //(width + edge + align)*interlaced*MBsize*tolerance
288 s->edge_emu_buffer= s->allocated_edge_emu_buffer + (s->width+64)*2*21;
290 //FIXME should be linesize instead of s->width*2 but that is not known before get_buffer()
291 CHECKED_ALLOCZ(s->me.scratchpad, (s->width+64)*4*16*2*sizeof(uint8_t))
292 s->rd_scratchpad= s->me.scratchpad;
293 s->b_scratchpad= s->me.scratchpad;
294 s->obmc_scratchpad= s->me.scratchpad + 16;
295 if (s->encoding) {
296 CHECKED_ALLOCZ(s->me.map , ME_MAP_SIZE*sizeof(uint32_t))
297 CHECKED_ALLOCZ(s->me.score_map, ME_MAP_SIZE*sizeof(uint32_t))
298 if(s->avctx->noise_reduction){
299 CHECKED_ALLOCZ(s->dct_error_sum, 2 * 64 * sizeof(int))
302 CHECKED_ALLOCZ(s->blocks, 64*12*2 * sizeof(DCTELEM))
303 s->block= s->blocks[0];
305 for(i=0;i<12;i++){
306 s->pblocks[i] = (short *)(&s->block[i]);
308 return 0;
309 fail:
310 return -1; //free() through MPV_common_end()
313 static void free_duplicate_context(MpegEncContext *s){
314 if(s==NULL) return;
316 av_freep(&s->allocated_edge_emu_buffer); s->edge_emu_buffer= NULL;
317 av_freep(&s->me.scratchpad);
318 s->rd_scratchpad=
319 s->b_scratchpad=
320 s->obmc_scratchpad= NULL;
322 av_freep(&s->dct_error_sum);
323 av_freep(&s->me.map);
324 av_freep(&s->me.score_map);
325 av_freep(&s->blocks);
326 s->block= NULL;
329 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src){
330 #define COPY(a) bak->a= src->a
331 COPY(allocated_edge_emu_buffer);
332 COPY(edge_emu_buffer);
333 COPY(me.scratchpad);
334 COPY(rd_scratchpad);
335 COPY(b_scratchpad);
336 COPY(obmc_scratchpad);
337 COPY(me.map);
338 COPY(me.score_map);
339 COPY(blocks);
340 COPY(block);
341 COPY(start_mb_y);
342 COPY(end_mb_y);
343 COPY(me.map_generation);
344 COPY(pb);
345 COPY(dct_error_sum);
346 COPY(dct_count[0]);
347 COPY(dct_count[1]);
348 #undef COPY
351 void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src){
352 MpegEncContext bak;
353 int i;
354 //FIXME copy only needed parts
355 //START_TIMER
356 backup_duplicate_context(&bak, dst);
357 memcpy(dst, src, sizeof(MpegEncContext));
358 backup_duplicate_context(dst, &bak);
359 for(i=0;i<12;i++){
360 dst->pblocks[i] = (short *)(&dst->block[i]);
362 //STOP_TIMER("update_duplicate_context") //about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
366 * sets the given MpegEncContext to common defaults (same for encoding and decoding).
367 * the changed fields will not depend upon the prior state of the MpegEncContext.
369 void MPV_common_defaults(MpegEncContext *s){
370 s->y_dc_scale_table=
371 s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
372 s->chroma_qscale_table= ff_default_chroma_qscale_table;
373 s->progressive_frame= 1;
374 s->progressive_sequence= 1;
375 s->picture_structure= PICT_FRAME;
377 s->coded_picture_number = 0;
378 s->picture_number = 0;
379 s->input_picture_number = 0;
381 s->picture_in_gop_number = 0;
383 s->f_code = 1;
384 s->b_code = 1;
388 * sets the given MpegEncContext to defaults for decoding.
389 * the changed fields will not depend upon the prior state of the MpegEncContext.
391 void MPV_decode_defaults(MpegEncContext *s){
392 MPV_common_defaults(s);
396 * init common structure for both encoder and decoder.
397 * this assumes that some variables like width/height are already set
399 int MPV_common_init(MpegEncContext *s)
401 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y, threads;
403 s->mb_height = (s->height + 15) / 16;
405 if(s->avctx->thread_count > MAX_THREADS || (s->avctx->thread_count > s->mb_height && s->mb_height)){
406 av_log(s->avctx, AV_LOG_ERROR, "too many threads\n");
407 return -1;
410 if((s->width || s->height) && avcodec_check_dimensions(s->avctx, s->width, s->height))
411 return -1;
413 dsputil_init(&s->dsp, s->avctx);
414 ff_dct_common_init(s);
416 s->flags= s->avctx->flags;
417 s->flags2= s->avctx->flags2;
419 s->mb_width = (s->width + 15) / 16;
420 s->mb_stride = s->mb_width + 1;
421 s->b8_stride = s->mb_width*2 + 1;
422 s->b4_stride = s->mb_width*4 + 1;
423 mb_array_size= s->mb_height * s->mb_stride;
424 mv_table_size= (s->mb_height+2) * s->mb_stride + 1;
426 /* set chroma shifts */
427 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,&(s->chroma_x_shift),
428 &(s->chroma_y_shift) );
430 /* set default edge pos, will be overriden in decode_header if needed */
431 s->h_edge_pos= s->mb_width*16;
432 s->v_edge_pos= s->mb_height*16;
434 s->mb_num = s->mb_width * s->mb_height;
436 s->block_wrap[0]=
437 s->block_wrap[1]=
438 s->block_wrap[2]=
439 s->block_wrap[3]= s->b8_stride;
440 s->block_wrap[4]=
441 s->block_wrap[5]= s->mb_stride;
443 y_size = s->b8_stride * (2 * s->mb_height + 1);
444 c_size = s->mb_stride * (s->mb_height + 1);
445 yc_size = y_size + 2 * c_size;
447 /* convert fourcc to upper case */
448 s->codec_tag= toupper( s->avctx->codec_tag &0xFF)
449 + (toupper((s->avctx->codec_tag>>8 )&0xFF)<<8 )
450 + (toupper((s->avctx->codec_tag>>16)&0xFF)<<16)
451 + (toupper((s->avctx->codec_tag>>24)&0xFF)<<24);
453 s->stream_codec_tag= toupper( s->avctx->stream_codec_tag &0xFF)
454 + (toupper((s->avctx->stream_codec_tag>>8 )&0xFF)<<8 )
455 + (toupper((s->avctx->stream_codec_tag>>16)&0xFF)<<16)
456 + (toupper((s->avctx->stream_codec_tag>>24)&0xFF)<<24);
458 s->avctx->coded_frame= (AVFrame*)&s->current_picture;
460 CHECKED_ALLOCZ(s->mb_index2xy, (s->mb_num+1)*sizeof(int)) //error ressilience code looks cleaner with this
461 for(y=0; y<s->mb_height; y++){
462 for(x=0; x<s->mb_width; x++){
463 s->mb_index2xy[ x + y*s->mb_width ] = x + y*s->mb_stride;
466 s->mb_index2xy[ s->mb_height*s->mb_width ] = (s->mb_height-1)*s->mb_stride + s->mb_width; //FIXME really needed?
468 if (s->encoding) {
469 /* Allocate MV tables */
470 CHECKED_ALLOCZ(s->p_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
471 CHECKED_ALLOCZ(s->b_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
472 CHECKED_ALLOCZ(s->b_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
473 CHECKED_ALLOCZ(s->b_bidir_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
474 CHECKED_ALLOCZ(s->b_bidir_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
475 CHECKED_ALLOCZ(s->b_direct_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
476 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
477 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
478 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
479 s->b_bidir_forw_mv_table= s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
480 s->b_bidir_back_mv_table= s->b_bidir_back_mv_table_base + s->mb_stride + 1;
481 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
483 if(s->msmpeg4_version){
484 CHECKED_ALLOCZ(s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int));
486 CHECKED_ALLOCZ(s->avctx->stats_out, 256);
488 /* Allocate MB type table */
489 CHECKED_ALLOCZ(s->mb_type , mb_array_size * sizeof(uint16_t)) //needed for encoding
491 CHECKED_ALLOCZ(s->lambda_table, mb_array_size * sizeof(int))
493 CHECKED_ALLOCZ(s->q_intra_matrix, 64*32 * sizeof(int))
494 CHECKED_ALLOCZ(s->q_inter_matrix, 64*32 * sizeof(int))
495 CHECKED_ALLOCZ(s->q_intra_matrix16, 64*32*2 * sizeof(uint16_t))
496 CHECKED_ALLOCZ(s->q_inter_matrix16, 64*32*2 * sizeof(uint16_t))
497 CHECKED_ALLOCZ(s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture*))
498 CHECKED_ALLOCZ(s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture*))
500 if(s->avctx->noise_reduction){
501 CHECKED_ALLOCZ(s->dct_offset, 2 * 64 * sizeof(uint16_t))
504 CHECKED_ALLOCZ(s->picture, MAX_PICTURE_COUNT * sizeof(Picture))
506 CHECKED_ALLOCZ(s->error_status_table, mb_array_size*sizeof(uint8_t))
508 if(s->codec_id==CODEC_ID_MPEG4 || (s->flags & CODEC_FLAG_INTERLACED_ME)){
509 /* interlaced direct mode decoding tables */
510 for(i=0; i<2; i++){
511 int j, k;
512 for(j=0; j<2; j++){
513 for(k=0; k<2; k++){
514 CHECKED_ALLOCZ(s->b_field_mv_table_base[i][j][k] , mv_table_size * 2 * sizeof(int16_t))
515 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] + s->mb_stride + 1;
517 CHECKED_ALLOCZ(s->b_field_select_table[i][j] , mb_array_size * 2 * sizeof(uint8_t))
518 CHECKED_ALLOCZ(s->p_field_mv_table_base[i][j] , mv_table_size * 2 * sizeof(int16_t))
519 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
521 CHECKED_ALLOCZ(s->p_field_select_table[i] , mb_array_size * 2 * sizeof(uint8_t))
524 if (s->out_format == FMT_H263) {
525 /* ac values */
526 CHECKED_ALLOCZ(s->ac_val_base, yc_size * sizeof(int16_t) * 16);
527 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
528 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
529 s->ac_val[2] = s->ac_val[1] + c_size;
531 /* cbp values */
532 CHECKED_ALLOCZ(s->coded_block_base, y_size);
533 s->coded_block= s->coded_block_base + s->b8_stride + 1;
535 /* cbp, ac_pred, pred_dir */
536 CHECKED_ALLOCZ(s->cbp_table , mb_array_size * sizeof(uint8_t))
537 CHECKED_ALLOCZ(s->pred_dir_table, mb_array_size * sizeof(uint8_t))
540 if (s->h263_pred || s->h263_plus || !s->encoding) {
541 /* dc values */
542 //MN: we need these for error resilience of intra-frames
543 CHECKED_ALLOCZ(s->dc_val_base, yc_size * sizeof(int16_t));
544 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
545 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
546 s->dc_val[2] = s->dc_val[1] + c_size;
547 for(i=0;i<yc_size;i++)
548 s->dc_val_base[i] = 1024;
551 /* which mb is a intra block */
552 CHECKED_ALLOCZ(s->mbintra_table, mb_array_size);
553 memset(s->mbintra_table, 1, mb_array_size);
555 /* init macroblock skip table */
556 CHECKED_ALLOCZ(s->mbskip_table, mb_array_size+2);
557 //Note the +1 is for a quicker mpeg4 slice_end detection
558 CHECKED_ALLOCZ(s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE);
560 s->parse_context.state= -1;
561 if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
562 s->visualization_buffer[0] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
563 s->visualization_buffer[1] = av_malloc((s->mb_width*8 + EDGE_WIDTH) * s->mb_height*8 + EDGE_WIDTH);
564 s->visualization_buffer[2] = av_malloc((s->mb_width*8 + EDGE_WIDTH) * s->mb_height*8 + EDGE_WIDTH);
567 s->context_initialized = 1;
569 s->thread_context[0]= s;
570 threads = s->avctx->thread_count;
572 for(i=1; i<threads; i++){
573 s->thread_context[i]= av_malloc(sizeof(MpegEncContext));
574 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
577 for(i=0; i<threads; i++){
578 if(init_duplicate_context(s->thread_context[i], s) < 0)
579 goto fail;
580 s->thread_context[i]->start_mb_y= (s->mb_height*(i ) + s->avctx->thread_count/2) / s->avctx->thread_count;
581 s->thread_context[i]->end_mb_y = (s->mb_height*(i+1) + s->avctx->thread_count/2) / s->avctx->thread_count;
584 return 0;
585 fail:
586 MPV_common_end(s);
587 return -1;
590 /* init common structure for both encoder and decoder */
591 void MPV_common_end(MpegEncContext *s)
593 int i, j, k;
595 for(i=0; i<s->avctx->thread_count; i++){
596 free_duplicate_context(s->thread_context[i]);
598 for(i=1; i<s->avctx->thread_count; i++){
599 av_freep(&s->thread_context[i]);
602 av_freep(&s->parse_context.buffer);
603 s->parse_context.buffer_size=0;
605 av_freep(&s->mb_type);
606 av_freep(&s->p_mv_table_base);
607 av_freep(&s->b_forw_mv_table_base);
608 av_freep(&s->b_back_mv_table_base);
609 av_freep(&s->b_bidir_forw_mv_table_base);
610 av_freep(&s->b_bidir_back_mv_table_base);
611 av_freep(&s->b_direct_mv_table_base);
612 s->p_mv_table= NULL;
613 s->b_forw_mv_table= NULL;
614 s->b_back_mv_table= NULL;
615 s->b_bidir_forw_mv_table= NULL;
616 s->b_bidir_back_mv_table= NULL;
617 s->b_direct_mv_table= NULL;
618 for(i=0; i<2; i++){
619 for(j=0; j<2; j++){
620 for(k=0; k<2; k++){
621 av_freep(&s->b_field_mv_table_base[i][j][k]);
622 s->b_field_mv_table[i][j][k]=NULL;
624 av_freep(&s->b_field_select_table[i][j]);
625 av_freep(&s->p_field_mv_table_base[i][j]);
626 s->p_field_mv_table[i][j]=NULL;
628 av_freep(&s->p_field_select_table[i]);
631 av_freep(&s->dc_val_base);
632 av_freep(&s->ac_val_base);
633 av_freep(&s->coded_block_base);
634 av_freep(&s->mbintra_table);
635 av_freep(&s->cbp_table);
636 av_freep(&s->pred_dir_table);
638 av_freep(&s->mbskip_table);
639 av_freep(&s->prev_pict_types);
640 av_freep(&s->bitstream_buffer);
641 s->allocated_bitstream_buffer_size=0;
643 av_freep(&s->avctx->stats_out);
644 av_freep(&s->ac_stats);
645 av_freep(&s->error_status_table);
646 av_freep(&s->mb_index2xy);
647 av_freep(&s->lambda_table);
648 av_freep(&s->q_intra_matrix);
649 av_freep(&s->q_inter_matrix);
650 av_freep(&s->q_intra_matrix16);
651 av_freep(&s->q_inter_matrix16);
652 av_freep(&s->input_picture);
653 av_freep(&s->reordered_input_picture);
654 av_freep(&s->dct_offset);
656 if(s->picture){
657 for(i=0; i<MAX_PICTURE_COUNT; i++){
658 free_picture(s, &s->picture[i]);
661 av_freep(&s->picture);
662 s->context_initialized = 0;
663 s->last_picture_ptr=
664 s->next_picture_ptr=
665 s->current_picture_ptr= NULL;
666 s->linesize= s->uvlinesize= 0;
668 for(i=0; i<3; i++)
669 av_freep(&s->visualization_buffer[i]);
671 avcodec_default_free_buffers(s->avctx);
674 void init_rl(RLTable *rl, uint8_t static_store[2][2*MAX_RUN + MAX_LEVEL + 3])
676 int8_t max_level[MAX_RUN+1], max_run[MAX_LEVEL+1];
677 uint8_t index_run[MAX_RUN+1];
678 int last, run, level, start, end, i;
680 /* If table is static, we can quit if rl->max_level[0] is not NULL */
681 if(static_store && rl->max_level[0])
682 return;
684 /* compute max_level[], max_run[] and index_run[] */
685 for(last=0;last<2;last++) {
686 if (last == 0) {
687 start = 0;
688 end = rl->last;
689 } else {
690 start = rl->last;
691 end = rl->n;
694 memset(max_level, 0, MAX_RUN + 1);
695 memset(max_run, 0, MAX_LEVEL + 1);
696 memset(index_run, rl->n, MAX_RUN + 1);
697 for(i=start;i<end;i++) {
698 run = rl->table_run[i];
699 level = rl->table_level[i];
700 if (index_run[run] == rl->n)
701 index_run[run] = i;
702 if (level > max_level[run])
703 max_level[run] = level;
704 if (run > max_run[level])
705 max_run[level] = run;
707 if(static_store)
708 rl->max_level[last] = static_store[last];
709 else
710 rl->max_level[last] = av_malloc(MAX_RUN + 1);
711 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
712 if(static_store)
713 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
714 else
715 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
716 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
717 if(static_store)
718 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
719 else
720 rl->index_run[last] = av_malloc(MAX_RUN + 1);
721 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
725 void init_vlc_rl(RLTable *rl)
727 int i, q;
729 for(q=0; q<32; q++){
730 int qmul= q*2;
731 int qadd= (q-1)|1;
733 if(q==0){
734 qmul=1;
735 qadd=0;
737 for(i=0; i<rl->vlc.table_size; i++){
738 int code= rl->vlc.table[i][0];
739 int len = rl->vlc.table[i][1];
740 int level, run;
742 if(len==0){ // illegal code
743 run= 66;
744 level= MAX_LEVEL;
745 }else if(len<0){ //more bits needed
746 run= 0;
747 level= code;
748 }else{
749 if(code==rl->n){ //esc
750 run= 66;
751 level= 0;
752 }else{
753 run= rl->table_run [code] + 1;
754 level= rl->table_level[code] * qmul + qadd;
755 if(code >= rl->last) run+=192;
758 rl->rl_vlc[q][i].len= len;
759 rl->rl_vlc[q][i].level= level;
760 rl->rl_vlc[q][i].run= run;
765 int ff_find_unused_picture(MpegEncContext *s, int shared){
766 int i;
768 if(shared){
769 for(i=0; i<MAX_PICTURE_COUNT; i++){
770 if(s->picture[i].data[0]==NULL && s->picture[i].type==0) return i;
772 }else{
773 for(i=0; i<MAX_PICTURE_COUNT; i++){
774 if(s->picture[i].data[0]==NULL && s->picture[i].type!=0) return i; //FIXME
776 for(i=0; i<MAX_PICTURE_COUNT; i++){
777 if(s->picture[i].data[0]==NULL) return i;
781 av_log(s->avctx, AV_LOG_FATAL, "Internal error, picture buffer overflow\n");
782 /* We could return -1, but the codec would crash trying to draw into a
783 * non-existing frame anyway. This is safer than waiting for a random crash.
784 * Also the return of this is never useful, an encoder must only allocate
785 * as much as allowed in the specification. This has no relationship to how
786 * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
787 * enough for such valid streams).
788 * Plus, a decoder has to check stream validity and remove frames if too
789 * many reference frames are around. Waiting for "OOM" is not correct at
790 * all. Similarly, missing reference frames have to be replaced by
791 * interpolated/MC frames, anything else is a bug in the codec ...
793 abort();
794 return -1;
797 static void update_noise_reduction(MpegEncContext *s){
798 int intra, i;
800 for(intra=0; intra<2; intra++){
801 if(s->dct_count[intra] > (1<<16)){
802 for(i=0; i<64; i++){
803 s->dct_error_sum[intra][i] >>=1;
805 s->dct_count[intra] >>= 1;
808 for(i=0; i<64; i++){
809 s->dct_offset[intra][i]= (s->avctx->noise_reduction * s->dct_count[intra] + s->dct_error_sum[intra][i]/2) / (s->dct_error_sum[intra][i]+1);
815 * generic function for encode/decode called after coding/decoding the header and before a frame is coded/decoded
817 int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
819 int i;
820 AVFrame *pic;
821 s->mb_skipped = 0;
823 assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3);
825 /* mark&release old frames */
826 if (s->pict_type != FF_B_TYPE && s->last_picture_ptr && s->last_picture_ptr != s->next_picture_ptr && s->last_picture_ptr->data[0]) {
827 if(s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3){
828 avctx->release_buffer(avctx, (AVFrame*)s->last_picture_ptr);
830 /* release forgotten pictures */
831 /* if(mpeg124/h263) */
832 if(!s->encoding){
833 for(i=0; i<MAX_PICTURE_COUNT; i++){
834 if(s->picture[i].data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].reference){
835 av_log(avctx, AV_LOG_ERROR, "releasing zombie picture\n");
836 avctx->release_buffer(avctx, (AVFrame*)&s->picture[i]);
842 alloc:
843 if(!s->encoding){
844 /* release non reference frames */
845 for(i=0; i<MAX_PICTURE_COUNT; i++){
846 if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
847 s->avctx->release_buffer(s->avctx, (AVFrame*)&s->picture[i]);
851 if(s->current_picture_ptr && s->current_picture_ptr->data[0]==NULL)
852 pic= (AVFrame*)s->current_picture_ptr; //we already have a unused image (maybe it was set before reading the header)
853 else{
854 i= ff_find_unused_picture(s, 0);
855 pic= (AVFrame*)&s->picture[i];
858 pic->reference= 0;
859 if (!s->dropable){
860 if (s->codec_id == CODEC_ID_H264)
861 pic->reference = s->picture_structure;
862 else if (s->pict_type != FF_B_TYPE)
863 pic->reference = 3;
866 pic->coded_picture_number= s->coded_picture_number++;
868 if( alloc_picture(s, (Picture*)pic, 0) < 0)
869 return -1;
871 s->current_picture_ptr= (Picture*)pic;
872 s->current_picture_ptr->top_field_first= s->top_field_first; //FIXME use only the vars from current_pic
873 s->current_picture_ptr->interlaced_frame= !s->progressive_frame && !s->progressive_sequence;
876 s->current_picture_ptr->pict_type= s->pict_type;
877 // if(s->flags && CODEC_FLAG_QSCALE)
878 // s->current_picture_ptr->quality= s->new_picture_ptr->quality;
879 s->current_picture_ptr->key_frame= s->pict_type == FF_I_TYPE;
881 copy_picture(&s->current_picture, s->current_picture_ptr);
883 if (s->pict_type != FF_B_TYPE) {
884 s->last_picture_ptr= s->next_picture_ptr;
885 if(!s->dropable)
886 s->next_picture_ptr= s->current_picture_ptr;
888 /* av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n", s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
889 s->last_picture_ptr ? s->last_picture_ptr->data[0] : NULL,
890 s->next_picture_ptr ? s->next_picture_ptr->data[0] : NULL,
891 s->current_picture_ptr ? s->current_picture_ptr->data[0] : NULL,
892 s->pict_type, s->dropable);*/
894 if(s->last_picture_ptr) copy_picture(&s->last_picture, s->last_picture_ptr);
895 if(s->next_picture_ptr) copy_picture(&s->next_picture, s->next_picture_ptr);
897 if(s->pict_type != FF_I_TYPE && (s->last_picture_ptr==NULL || s->last_picture_ptr->data[0]==NULL) && !s->dropable){
898 av_log(avctx, AV_LOG_ERROR, "warning: first frame is no keyframe\n");
899 assert(s->pict_type != FF_B_TYPE); //these should have been dropped if we don't have a reference
900 goto alloc;
903 assert(s->pict_type == FF_I_TYPE || (s->last_picture_ptr && s->last_picture_ptr->data[0]));
905 if(s->picture_structure!=PICT_FRAME && s->out_format != FMT_H264){
906 int i;
907 for(i=0; i<4; i++){
908 if(s->picture_structure == PICT_BOTTOM_FIELD){
909 s->current_picture.data[i] += s->current_picture.linesize[i];
911 s->current_picture.linesize[i] *= 2;
912 s->last_picture.linesize[i] *=2;
913 s->next_picture.linesize[i] *=2;
917 s->hurry_up= s->avctx->hurry_up;
918 s->error_resilience= avctx->error_resilience;
920 /* set dequantizer, we can't do it during init as it might change for mpeg4
921 and we can't do it in the header decode as init is not called for mpeg4 there yet */
922 if(s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO){
923 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
924 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
925 }else if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
926 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
927 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
928 }else{
929 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
930 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
933 if(s->dct_error_sum){
934 assert(s->avctx->noise_reduction && s->encoding);
936 update_noise_reduction(s);
939 #ifdef HAVE_XVMC
940 if(s->avctx->xvmc_acceleration)
941 return XVMC_field_start(s, avctx);
942 #endif
943 return 0;
946 /* generic function for encode/decode called after a frame has been coded/decoded */
947 void MPV_frame_end(MpegEncContext *s)
949 int i;
950 /* draw edge for correct motion prediction if outside */
951 #ifdef HAVE_XVMC
952 //just to make sure that all data is rendered.
953 if(s->avctx->xvmc_acceleration){
954 XVMC_field_end(s);
955 }else
956 #endif
957 if(s->unrestricted_mv && s->current_picture.reference && !s->intra_only && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
958 s->dsp.draw_edges(s->current_picture.data[0], s->linesize , s->h_edge_pos , s->v_edge_pos , EDGE_WIDTH );
959 s->dsp.draw_edges(s->current_picture.data[1], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2);
960 s->dsp.draw_edges(s->current_picture.data[2], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2);
962 emms_c();
964 s->last_pict_type = s->pict_type;
965 s->last_lambda_for[s->pict_type]= s->current_picture_ptr->quality;
966 if(s->pict_type!=FF_B_TYPE){
967 s->last_non_b_pict_type= s->pict_type;
969 #if 0
970 /* copy back current_picture variables */
971 for(i=0; i<MAX_PICTURE_COUNT; i++){
972 if(s->picture[i].data[0] == s->current_picture.data[0]){
973 s->picture[i]= s->current_picture;
974 break;
977 assert(i<MAX_PICTURE_COUNT);
978 #endif
980 if(s->encoding){
981 /* release non-reference frames */
982 for(i=0; i<MAX_PICTURE_COUNT; i++){
983 if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
984 s->avctx->release_buffer(s->avctx, (AVFrame*)&s->picture[i]);
988 // clear copies, to avoid confusion
989 #if 0
990 memset(&s->last_picture, 0, sizeof(Picture));
991 memset(&s->next_picture, 0, sizeof(Picture));
992 memset(&s->current_picture, 0, sizeof(Picture));
993 #endif
994 s->avctx->coded_frame= (AVFrame*)s->current_picture_ptr;
998 * draws an line from (ex, ey) -> (sx, sy).
999 * @param w width of the image
1000 * @param h height of the image
1001 * @param stride stride/linesize of the image
1002 * @param color color of the arrow
1004 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1005 int x, y, fr, f;
1007 sx= av_clip(sx, 0, w-1);
1008 sy= av_clip(sy, 0, h-1);
1009 ex= av_clip(ex, 0, w-1);
1010 ey= av_clip(ey, 0, h-1);
1012 buf[sy*stride + sx]+= color;
1014 if(FFABS(ex - sx) > FFABS(ey - sy)){
1015 if(sx > ex){
1016 FFSWAP(int, sx, ex);
1017 FFSWAP(int, sy, ey);
1019 buf+= sx + sy*stride;
1020 ex-= sx;
1021 f= ((ey-sy)<<16)/ex;
1022 for(x= 0; x <= ex; x++){
1023 y = (x*f)>>16;
1024 fr= (x*f)&0xFFFF;
1025 buf[ y *stride + x]+= (color*(0x10000-fr))>>16;
1026 buf[(y+1)*stride + x]+= (color* fr )>>16;
1028 }else{
1029 if(sy > ey){
1030 FFSWAP(int, sx, ex);
1031 FFSWAP(int, sy, ey);
1033 buf+= sx + sy*stride;
1034 ey-= sy;
1035 if(ey) f= ((ex-sx)<<16)/ey;
1036 else f= 0;
1037 for(y= 0; y <= ey; y++){
1038 x = (y*f)>>16;
1039 fr= (y*f)&0xFFFF;
1040 buf[y*stride + x ]+= (color*(0x10000-fr))>>16;
1041 buf[y*stride + x+1]+= (color* fr )>>16;
1047 * draws an arrow from (ex, ey) -> (sx, sy).
1048 * @param w width of the image
1049 * @param h height of the image
1050 * @param stride stride/linesize of the image
1051 * @param color color of the arrow
1053 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1054 int dx,dy;
1056 sx= av_clip(sx, -100, w+100);
1057 sy= av_clip(sy, -100, h+100);
1058 ex= av_clip(ex, -100, w+100);
1059 ey= av_clip(ey, -100, h+100);
1061 dx= ex - sx;
1062 dy= ey - sy;
1064 if(dx*dx + dy*dy > 3*3){
1065 int rx= dx + dy;
1066 int ry= -dx + dy;
1067 int length= ff_sqrt((rx*rx + ry*ry)<<8);
1069 //FIXME subpixel accuracy
1070 rx= ROUNDED_DIV(rx*3<<4, length);
1071 ry= ROUNDED_DIV(ry*3<<4, length);
1073 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1074 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1076 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1080 * prints debuging info for the given picture.
1082 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){
1084 if(!pict || !pict->mb_type) return;
1086 if(s->avctx->debug&(FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)){
1087 int x,y;
1089 av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1090 switch (pict->pict_type) {
1091 case FF_I_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"I\n"); break;
1092 case FF_P_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"P\n"); break;
1093 case FF_B_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"B\n"); break;
1094 case FF_S_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"S\n"); break;
1095 case FF_SI_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"SI\n"); break;
1096 case FF_SP_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"SP\n"); break;
1098 for(y=0; y<s->mb_height; y++){
1099 for(x=0; x<s->mb_width; x++){
1100 if(s->avctx->debug&FF_DEBUG_SKIP){
1101 int count= s->mbskip_table[x + y*s->mb_stride];
1102 if(count>9) count=9;
1103 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1105 if(s->avctx->debug&FF_DEBUG_QP){
1106 av_log(s->avctx, AV_LOG_DEBUG, "%2d", pict->qscale_table[x + y*s->mb_stride]);
1108 if(s->avctx->debug&FF_DEBUG_MB_TYPE){
1109 int mb_type= pict->mb_type[x + y*s->mb_stride];
1110 //Type & MV direction
1111 if(IS_PCM(mb_type))
1112 av_log(s->avctx, AV_LOG_DEBUG, "P");
1113 else if(IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1114 av_log(s->avctx, AV_LOG_DEBUG, "A");
1115 else if(IS_INTRA4x4(mb_type))
1116 av_log(s->avctx, AV_LOG_DEBUG, "i");
1117 else if(IS_INTRA16x16(mb_type))
1118 av_log(s->avctx, AV_LOG_DEBUG, "I");
1119 else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1120 av_log(s->avctx, AV_LOG_DEBUG, "d");
1121 else if(IS_DIRECT(mb_type))
1122 av_log(s->avctx, AV_LOG_DEBUG, "D");
1123 else if(IS_GMC(mb_type) && IS_SKIP(mb_type))
1124 av_log(s->avctx, AV_LOG_DEBUG, "g");
1125 else if(IS_GMC(mb_type))
1126 av_log(s->avctx, AV_LOG_DEBUG, "G");
1127 else if(IS_SKIP(mb_type))
1128 av_log(s->avctx, AV_LOG_DEBUG, "S");
1129 else if(!USES_LIST(mb_type, 1))
1130 av_log(s->avctx, AV_LOG_DEBUG, ">");
1131 else if(!USES_LIST(mb_type, 0))
1132 av_log(s->avctx, AV_LOG_DEBUG, "<");
1133 else{
1134 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1135 av_log(s->avctx, AV_LOG_DEBUG, "X");
1138 //segmentation
1139 if(IS_8X8(mb_type))
1140 av_log(s->avctx, AV_LOG_DEBUG, "+");
1141 else if(IS_16X8(mb_type))
1142 av_log(s->avctx, AV_LOG_DEBUG, "-");
1143 else if(IS_8X16(mb_type))
1144 av_log(s->avctx, AV_LOG_DEBUG, "|");
1145 else if(IS_INTRA(mb_type) || IS_16X16(mb_type))
1146 av_log(s->avctx, AV_LOG_DEBUG, " ");
1147 else
1148 av_log(s->avctx, AV_LOG_DEBUG, "?");
1151 if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264)
1152 av_log(s->avctx, AV_LOG_DEBUG, "=");
1153 else
1154 av_log(s->avctx, AV_LOG_DEBUG, " ");
1156 // av_log(s->avctx, AV_LOG_DEBUG, " ");
1158 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1162 if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
1163 const int shift= 1 + s->quarter_sample;
1164 int mb_y;
1165 uint8_t *ptr;
1166 int i;
1167 int h_chroma_shift, v_chroma_shift;
1168 const int width = s->avctx->width;
1169 const int height= s->avctx->height;
1170 const int mv_sample_log2= 4 - pict->motion_subsample_log2;
1171 const int mv_stride= (s->mb_width << mv_sample_log2) + (s->codec_id == CODEC_ID_H264 ? 0 : 1);
1172 s->low_delay=0; //needed to see the vectors without trashing the buffers
1174 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1175 for(i=0; i<3; i++){
1176 memcpy(s->visualization_buffer[i], pict->data[i], (i==0) ? pict->linesize[i]*height:pict->linesize[i]*height >> v_chroma_shift);
1177 pict->data[i]= s->visualization_buffer[i];
1179 pict->type= FF_BUFFER_TYPE_COPY;
1180 ptr= pict->data[0];
1182 for(mb_y=0; mb_y<s->mb_height; mb_y++){
1183 int mb_x;
1184 for(mb_x=0; mb_x<s->mb_width; mb_x++){
1185 const int mb_index= mb_x + mb_y*s->mb_stride;
1186 if((s->avctx->debug_mv) && pict->motion_val){
1187 int type;
1188 for(type=0; type<3; type++){
1189 int direction = 0;
1190 switch (type) {
1191 case 0: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_P_FOR)) || (pict->pict_type!=FF_P_TYPE))
1192 continue;
1193 direction = 0;
1194 break;
1195 case 1: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_FOR)) || (pict->pict_type!=FF_B_TYPE))
1196 continue;
1197 direction = 0;
1198 break;
1199 case 2: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_BACK)) || (pict->pict_type!=FF_B_TYPE))
1200 continue;
1201 direction = 1;
1202 break;
1204 if(!USES_LIST(pict->mb_type[mb_index], direction))
1205 continue;
1207 if(IS_8X8(pict->mb_type[mb_index])){
1208 int i;
1209 for(i=0; i<4; i++){
1210 int sx= mb_x*16 + 4 + 8*(i&1);
1211 int sy= mb_y*16 + 4 + 8*(i>>1);
1212 int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
1213 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1214 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1215 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1217 }else if(IS_16X8(pict->mb_type[mb_index])){
1218 int i;
1219 for(i=0; i<2; i++){
1220 int sx=mb_x*16 + 8;
1221 int sy=mb_y*16 + 4 + 8*i;
1222 int xy= (mb_x*2 + (mb_y*2 + i)*mv_stride) << (mv_sample_log2-1);
1223 int mx=(pict->motion_val[direction][xy][0]>>shift);
1224 int my=(pict->motion_val[direction][xy][1]>>shift);
1226 if(IS_INTERLACED(pict->mb_type[mb_index]))
1227 my*=2;
1229 draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
1231 }else if(IS_8X16(pict->mb_type[mb_index])){
1232 int i;
1233 for(i=0; i<2; i++){
1234 int sx=mb_x*16 + 4 + 8*i;
1235 int sy=mb_y*16 + 8;
1236 int xy= (mb_x*2 + i + mb_y*2*mv_stride) << (mv_sample_log2-1);
1237 int mx=(pict->motion_val[direction][xy][0]>>shift);
1238 int my=(pict->motion_val[direction][xy][1]>>shift);
1240 if(IS_INTERLACED(pict->mb_type[mb_index]))
1241 my*=2;
1243 draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
1245 }else{
1246 int sx= mb_x*16 + 8;
1247 int sy= mb_y*16 + 8;
1248 int xy= (mb_x + mb_y*mv_stride) << mv_sample_log2;
1249 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1250 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1251 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1255 if((s->avctx->debug&FF_DEBUG_VIS_QP) && pict->motion_val){
1256 uint64_t c= (pict->qscale_table[mb_index]*128/31) * 0x0101010101010101ULL;
1257 int y;
1258 for(y=0; y<8; y++){
1259 *(uint64_t*)(pict->data[1] + 8*mb_x + (8*mb_y + y)*pict->linesize[1])= c;
1260 *(uint64_t*)(pict->data[2] + 8*mb_x + (8*mb_y + y)*pict->linesize[2])= c;
1263 if((s->avctx->debug&FF_DEBUG_VIS_MB_TYPE) && pict->motion_val){
1264 int mb_type= pict->mb_type[mb_index];
1265 uint64_t u,v;
1266 int y;
1267 #define COLOR(theta, r)\
1268 u= (int)(128 + r*cos(theta*3.141592/180));\
1269 v= (int)(128 + r*sin(theta*3.141592/180));
1272 u=v=128;
1273 if(IS_PCM(mb_type)){
1274 COLOR(120,48)
1275 }else if((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) || IS_INTRA16x16(mb_type)){
1276 COLOR(30,48)
1277 }else if(IS_INTRA4x4(mb_type)){
1278 COLOR(90,48)
1279 }else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type)){
1280 // COLOR(120,48)
1281 }else if(IS_DIRECT(mb_type)){
1282 COLOR(150,48)
1283 }else if(IS_GMC(mb_type) && IS_SKIP(mb_type)){
1284 COLOR(170,48)
1285 }else if(IS_GMC(mb_type)){
1286 COLOR(190,48)
1287 }else if(IS_SKIP(mb_type)){
1288 // COLOR(180,48)
1289 }else if(!USES_LIST(mb_type, 1)){
1290 COLOR(240,48)
1291 }else if(!USES_LIST(mb_type, 0)){
1292 COLOR(0,48)
1293 }else{
1294 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1295 COLOR(300,48)
1298 u*= 0x0101010101010101ULL;
1299 v*= 0x0101010101010101ULL;
1300 for(y=0; y<8; y++){
1301 *(uint64_t*)(pict->data[1] + 8*mb_x + (8*mb_y + y)*pict->linesize[1])= u;
1302 *(uint64_t*)(pict->data[2] + 8*mb_x + (8*mb_y + y)*pict->linesize[2])= v;
1305 //segmentation
1306 if(IS_8X8(mb_type) || IS_16X8(mb_type)){
1307 *(uint64_t*)(pict->data[0] + 16*mb_x + 0 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
1308 *(uint64_t*)(pict->data[0] + 16*mb_x + 8 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
1310 if(IS_8X8(mb_type) || IS_8X16(mb_type)){
1311 for(y=0; y<16; y++)
1312 pict->data[0][16*mb_x + 8 + (16*mb_y + y)*pict->linesize[0]]^= 0x80;
1314 if(IS_8X8(mb_type) && mv_sample_log2 >= 2){
1315 int dm= 1 << (mv_sample_log2-2);
1316 for(i=0; i<4; i++){
1317 int sx= mb_x*16 + 8*(i&1);
1318 int sy= mb_y*16 + 8*(i>>1);
1319 int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
1320 //FIXME bidir
1321 int32_t *mv = (int32_t*)&pict->motion_val[0][xy];
1322 if(mv[0] != mv[dm] || mv[dm*mv_stride] != mv[dm*(mv_stride+1)])
1323 for(y=0; y<8; y++)
1324 pict->data[0][sx + 4 + (sy + y)*pict->linesize[0]]^= 0x80;
1325 if(mv[0] != mv[dm*mv_stride] || mv[dm] != mv[dm*(mv_stride+1)])
1326 *(uint64_t*)(pict->data[0] + sx + (sy + 4)*pict->linesize[0])^= 0x8080808080808080ULL;
1330 if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264){
1331 // hmm
1334 s->mbskip_table[mb_index]=0;
1340 static inline int hpel_motion_lowres(MpegEncContext *s,
1341 uint8_t *dest, uint8_t *src,
1342 int field_based, int field_select,
1343 int src_x, int src_y,
1344 int width, int height, int stride,
1345 int h_edge_pos, int v_edge_pos,
1346 int w, int h, h264_chroma_mc_func *pix_op,
1347 int motion_x, int motion_y)
1349 const int lowres= s->avctx->lowres;
1350 const int s_mask= (2<<lowres)-1;
1351 int emu=0;
1352 int sx, sy;
1354 if(s->quarter_sample){
1355 motion_x/=2;
1356 motion_y/=2;
1359 sx= motion_x & s_mask;
1360 sy= motion_y & s_mask;
1361 src_x += motion_x >> (lowres+1);
1362 src_y += motion_y >> (lowres+1);
1364 src += src_y * stride + src_x;
1366 if( (unsigned)src_x > h_edge_pos - (!!sx) - w
1367 || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
1368 ff_emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w+1, (h+1)<<field_based,
1369 src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
1370 src= s->edge_emu_buffer;
1371 emu=1;
1374 sx <<= 2 - lowres;
1375 sy <<= 2 - lowres;
1376 if(field_select)
1377 src += s->linesize;
1378 pix_op[lowres](dest, src, stride, h, sx, sy);
1379 return emu;
1382 /* apply one mpeg motion vector to the three components */
1383 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
1384 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1385 int field_based, int bottom_field, int field_select,
1386 uint8_t **ref_picture, h264_chroma_mc_func *pix_op,
1387 int motion_x, int motion_y, int h)
1389 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1390 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy, uvsx, uvsy;
1391 const int lowres= s->avctx->lowres;
1392 const int block_s= 8>>lowres;
1393 const int s_mask= (2<<lowres)-1;
1394 const int h_edge_pos = s->h_edge_pos >> lowres;
1395 const int v_edge_pos = s->v_edge_pos >> lowres;
1396 linesize = s->current_picture.linesize[0] << field_based;
1397 uvlinesize = s->current_picture.linesize[1] << field_based;
1399 if(s->quarter_sample){ //FIXME obviously not perfect but qpel wont work in lowres anyway
1400 motion_x/=2;
1401 motion_y/=2;
1404 if(field_based){
1405 motion_y += (bottom_field - field_select)*((1<<lowres)-1);
1408 sx= motion_x & s_mask;
1409 sy= motion_y & s_mask;
1410 src_x = s->mb_x*2*block_s + (motion_x >> (lowres+1));
1411 src_y =(s->mb_y*2*block_s>>field_based) + (motion_y >> (lowres+1));
1413 if (s->out_format == FMT_H263) {
1414 uvsx = ((motion_x>>1) & s_mask) | (sx&1);
1415 uvsy = ((motion_y>>1) & s_mask) | (sy&1);
1416 uvsrc_x = src_x>>1;
1417 uvsrc_y = src_y>>1;
1418 }else if(s->out_format == FMT_H261){//even chroma mv's are full pel in H261
1419 mx = motion_x / 4;
1420 my = motion_y / 4;
1421 uvsx = (2*mx) & s_mask;
1422 uvsy = (2*my) & s_mask;
1423 uvsrc_x = s->mb_x*block_s + (mx >> lowres);
1424 uvsrc_y = s->mb_y*block_s + (my >> lowres);
1425 } else {
1426 mx = motion_x / 2;
1427 my = motion_y / 2;
1428 uvsx = mx & s_mask;
1429 uvsy = my & s_mask;
1430 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
1431 uvsrc_y =(s->mb_y*block_s>>field_based) + (my >> (lowres+1));
1434 ptr_y = ref_picture[0] + src_y * linesize + src_x;
1435 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
1436 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
1438 if( (unsigned)src_x > h_edge_pos - (!!sx) - 2*block_s
1439 || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
1440 ff_emulated_edge_mc(s->edge_emu_buffer, ptr_y, s->linesize, 17, 17+field_based,
1441 src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
1442 ptr_y = s->edge_emu_buffer;
1443 if(!ENABLE_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1444 uint8_t *uvbuf= s->edge_emu_buffer+18*s->linesize;
1445 ff_emulated_edge_mc(uvbuf , ptr_cb, s->uvlinesize, 9, 9+field_based,
1446 uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
1447 ff_emulated_edge_mc(uvbuf+16, ptr_cr, s->uvlinesize, 9, 9+field_based,
1448 uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
1449 ptr_cb= uvbuf;
1450 ptr_cr= uvbuf+16;
1454 if(bottom_field){ //FIXME use this for field pix too instead of the obnoxious hack which changes picture.data
1455 dest_y += s->linesize;
1456 dest_cb+= s->uvlinesize;
1457 dest_cr+= s->uvlinesize;
1460 if(field_select){
1461 ptr_y += s->linesize;
1462 ptr_cb+= s->uvlinesize;
1463 ptr_cr+= s->uvlinesize;
1466 sx <<= 2 - lowres;
1467 sy <<= 2 - lowres;
1468 pix_op[lowres-1](dest_y, ptr_y, linesize, h, sx, sy);
1470 if(!ENABLE_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1471 uvsx <<= 2 - lowres;
1472 uvsy <<= 2 - lowres;
1473 pix_op[lowres](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
1474 pix_op[lowres](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
1476 //FIXME h261 lowres loop filter
1479 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
1480 uint8_t *dest_cb, uint8_t *dest_cr,
1481 uint8_t **ref_picture,
1482 h264_chroma_mc_func *pix_op,
1483 int mx, int my){
1484 const int lowres= s->avctx->lowres;
1485 const int block_s= 8>>lowres;
1486 const int s_mask= (2<<lowres)-1;
1487 const int h_edge_pos = s->h_edge_pos >> (lowres+1);
1488 const int v_edge_pos = s->v_edge_pos >> (lowres+1);
1489 int emu=0, src_x, src_y, offset, sx, sy;
1490 uint8_t *ptr;
1492 if(s->quarter_sample){
1493 mx/=2;
1494 my/=2;
1497 /* In case of 8X8, we construct a single chroma motion vector
1498 with a special rounding */
1499 mx= ff_h263_round_chroma(mx);
1500 my= ff_h263_round_chroma(my);
1502 sx= mx & s_mask;
1503 sy= my & s_mask;
1504 src_x = s->mb_x*block_s + (mx >> (lowres+1));
1505 src_y = s->mb_y*block_s + (my >> (lowres+1));
1507 offset = src_y * s->uvlinesize + src_x;
1508 ptr = ref_picture[1] + offset;
1509 if(s->flags&CODEC_FLAG_EMU_EDGE){
1510 if( (unsigned)src_x > h_edge_pos - (!!sx) - block_s
1511 || (unsigned)src_y > v_edge_pos - (!!sy) - block_s){
1512 ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
1513 ptr= s->edge_emu_buffer;
1514 emu=1;
1517 sx <<= 2 - lowres;
1518 sy <<= 2 - lowres;
1519 pix_op[lowres](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
1521 ptr = ref_picture[2] + offset;
1522 if(emu){
1523 ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
1524 ptr= s->edge_emu_buffer;
1526 pix_op[lowres](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
1530 * motion compensation of a single macroblock
1531 * @param s context
1532 * @param dest_y luma destination pointer
1533 * @param dest_cb chroma cb/u destination pointer
1534 * @param dest_cr chroma cr/v destination pointer
1535 * @param dir direction (0->forward, 1->backward)
1536 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
1537 * @param pic_op halfpel motion compensation function (average or put normally)
1538 * the motion vectors are taken from s->mv and the MV type from s->mv_type
1540 static inline void MPV_motion_lowres(MpegEncContext *s,
1541 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1542 int dir, uint8_t **ref_picture,
1543 h264_chroma_mc_func *pix_op)
1545 int mx, my;
1546 int mb_x, mb_y, i;
1547 const int lowres= s->avctx->lowres;
1548 const int block_s= 8>>lowres;
1550 mb_x = s->mb_x;
1551 mb_y = s->mb_y;
1553 switch(s->mv_type) {
1554 case MV_TYPE_16X16:
1555 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1556 0, 0, 0,
1557 ref_picture, pix_op,
1558 s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s);
1559 break;
1560 case MV_TYPE_8X8:
1561 mx = 0;
1562 my = 0;
1563 for(i=0;i<4;i++) {
1564 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) * s->linesize)*block_s,
1565 ref_picture[0], 0, 0,
1566 (2*mb_x + (i & 1))*block_s, (2*mb_y + (i >>1))*block_s,
1567 s->width, s->height, s->linesize,
1568 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
1569 block_s, block_s, pix_op,
1570 s->mv[dir][i][0], s->mv[dir][i][1]);
1572 mx += s->mv[dir][i][0];
1573 my += s->mv[dir][i][1];
1576 if(!ENABLE_GRAY || !(s->flags&CODEC_FLAG_GRAY))
1577 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture, pix_op, mx, my);
1578 break;
1579 case MV_TYPE_FIELD:
1580 if (s->picture_structure == PICT_FRAME) {
1581 /* top field */
1582 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1583 1, 0, s->field_select[dir][0],
1584 ref_picture, pix_op,
1585 s->mv[dir][0][0], s->mv[dir][0][1], block_s);
1586 /* bottom field */
1587 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1588 1, 1, s->field_select[dir][1],
1589 ref_picture, pix_op,
1590 s->mv[dir][1][0], s->mv[dir][1][1], block_s);
1591 } else {
1592 if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != FF_B_TYPE && !s->first_field){
1593 ref_picture= s->current_picture_ptr->data;
1596 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1597 0, 0, s->field_select[dir][0],
1598 ref_picture, pix_op,
1599 s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s);
1601 break;
1602 case MV_TYPE_16X8:
1603 for(i=0; i<2; i++){
1604 uint8_t ** ref2picture;
1606 if(s->picture_structure == s->field_select[dir][i] + 1 || s->pict_type == FF_B_TYPE || s->first_field){
1607 ref2picture= ref_picture;
1608 }else{
1609 ref2picture= s->current_picture_ptr->data;
1612 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1613 0, 0, s->field_select[dir][i],
1614 ref2picture, pix_op,
1615 s->mv[dir][i][0], s->mv[dir][i][1] + 2*block_s*i, block_s);
1617 dest_y += 2*block_s*s->linesize;
1618 dest_cb+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
1619 dest_cr+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
1621 break;
1622 case MV_TYPE_DMV:
1623 if(s->picture_structure == PICT_FRAME){
1624 for(i=0; i<2; i++){
1625 int j;
1626 for(j=0; j<2; j++){
1627 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1628 1, j, j^i,
1629 ref_picture, pix_op,
1630 s->mv[dir][2*i + j][0], s->mv[dir][2*i + j][1], block_s);
1632 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
1634 }else{
1635 for(i=0; i<2; i++){
1636 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1637 0, 0, s->picture_structure != i+1,
1638 ref_picture, pix_op,
1639 s->mv[dir][2*i][0],s->mv[dir][2*i][1],2*block_s);
1641 // after put we make avg of the same block
1642 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
1644 //opposite parity is always in the same frame if this is second field
1645 if(!s->first_field){
1646 ref_picture = s->current_picture_ptr->data;
1650 break;
1651 default: assert(0);
1655 /* put block[] to dest[] */
1656 static inline void put_dct(MpegEncContext *s,
1657 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
1659 s->dct_unquantize_intra(s, block, i, qscale);
1660 s->dsp.idct_put (dest, line_size, block);
1663 /* add block[] to dest[] */
1664 static inline void add_dct(MpegEncContext *s,
1665 DCTELEM *block, int i, uint8_t *dest, int line_size)
1667 if (s->block_last_index[i] >= 0) {
1668 s->dsp.idct_add (dest, line_size, block);
1672 static inline void add_dequant_dct(MpegEncContext *s,
1673 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
1675 if (s->block_last_index[i] >= 0) {
1676 s->dct_unquantize_inter(s, block, i, qscale);
1678 s->dsp.idct_add (dest, line_size, block);
1683 * cleans dc, ac, coded_block for the current non intra MB
1685 void ff_clean_intra_table_entries(MpegEncContext *s)
1687 int wrap = s->b8_stride;
1688 int xy = s->block_index[0];
1690 s->dc_val[0][xy ] =
1691 s->dc_val[0][xy + 1 ] =
1692 s->dc_val[0][xy + wrap] =
1693 s->dc_val[0][xy + 1 + wrap] = 1024;
1694 /* ac pred */
1695 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
1696 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
1697 if (s->msmpeg4_version>=3) {
1698 s->coded_block[xy ] =
1699 s->coded_block[xy + 1 ] =
1700 s->coded_block[xy + wrap] =
1701 s->coded_block[xy + 1 + wrap] = 0;
1703 /* chroma */
1704 wrap = s->mb_stride;
1705 xy = s->mb_x + s->mb_y * wrap;
1706 s->dc_val[1][xy] =
1707 s->dc_val[2][xy] = 1024;
1708 /* ac pred */
1709 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
1710 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
1712 s->mbintra_table[xy]= 0;
1715 /* generic function called after a macroblock has been parsed by the
1716 decoder or after it has been encoded by the encoder.
1718 Important variables used:
1719 s->mb_intra : true if intra macroblock
1720 s->mv_dir : motion vector direction
1721 s->mv_type : motion vector type
1722 s->mv : motion vector
1723 s->interlaced_dct : true if interlaced dct used (mpeg2)
1725 static av_always_inline
1726 void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
1727 int lowres_flag, int is_mpeg12)
1729 int mb_x, mb_y;
1730 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
1731 #ifdef HAVE_XVMC
1732 if(s->avctx->xvmc_acceleration){
1733 XVMC_decode_mb(s);//xvmc uses pblocks
1734 return;
1736 #endif
1738 mb_x = s->mb_x;
1739 mb_y = s->mb_y;
1741 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
1742 /* save DCT coefficients */
1743 int i,j;
1744 DCTELEM *dct = &s->current_picture.dct_coeff[mb_xy*64*6];
1745 for(i=0; i<6; i++)
1746 for(j=0; j<64; j++)
1747 *dct++ = block[i][s->dsp.idct_permutation[j]];
1750 s->current_picture.qscale_table[mb_xy]= s->qscale;
1752 /* update DC predictors for P macroblocks */
1753 if (!s->mb_intra) {
1754 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
1755 if(s->mbintra_table[mb_xy])
1756 ff_clean_intra_table_entries(s);
1757 } else {
1758 s->last_dc[0] =
1759 s->last_dc[1] =
1760 s->last_dc[2] = 128 << s->intra_dc_precision;
1763 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
1764 s->mbintra_table[mb_xy]=1;
1766 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==FF_B_TYPE) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
1767 uint8_t *dest_y, *dest_cb, *dest_cr;
1768 int dct_linesize, dct_offset;
1769 op_pixels_func (*op_pix)[4];
1770 qpel_mc_func (*op_qpix)[16];
1771 const int linesize= s->current_picture.linesize[0]; //not s->linesize as this would be wrong for field pics
1772 const int uvlinesize= s->current_picture.linesize[1];
1773 const int readable= s->pict_type != FF_B_TYPE || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
1774 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
1776 /* avoid copy if macroblock skipped in last frame too */
1777 /* skip only during decoding as we might trash the buffers during encoding a bit */
1778 if(!s->encoding){
1779 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
1780 const int age= s->current_picture.age;
1782 assert(age);
1784 if (s->mb_skipped) {
1785 s->mb_skipped= 0;
1786 assert(s->pict_type!=FF_I_TYPE);
1788 (*mbskip_ptr) ++; /* indicate that this time we skipped it */
1789 if(*mbskip_ptr >99) *mbskip_ptr= 99;
1791 /* if previous was skipped too, then nothing to do ! */
1792 if (*mbskip_ptr >= age && s->current_picture.reference){
1793 return;
1795 } else if(!s->current_picture.reference){
1796 (*mbskip_ptr) ++; /* increase counter so the age can be compared cleanly */
1797 if(*mbskip_ptr >99) *mbskip_ptr= 99;
1798 } else{
1799 *mbskip_ptr = 0; /* not skipped */
1803 dct_linesize = linesize << s->interlaced_dct;
1804 dct_offset =(s->interlaced_dct)? linesize : linesize*block_size;
1806 if(readable){
1807 dest_y= s->dest[0];
1808 dest_cb= s->dest[1];
1809 dest_cr= s->dest[2];
1810 }else{
1811 dest_y = s->b_scratchpad;
1812 dest_cb= s->b_scratchpad+16*linesize;
1813 dest_cr= s->b_scratchpad+32*linesize;
1816 if (!s->mb_intra) {
1817 /* motion handling */
1818 /* decoding or more than one mb_type (MC was already done otherwise) */
1819 if(!s->encoding){
1820 if(lowres_flag){
1821 h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab;
1823 if (s->mv_dir & MV_DIR_FORWARD) {
1824 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix);
1825 op_pix = s->dsp.avg_h264_chroma_pixels_tab;
1827 if (s->mv_dir & MV_DIR_BACKWARD) {
1828 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix);
1830 }else{
1831 op_qpix= s->me.qpel_put;
1832 if ((!s->no_rounding) || s->pict_type==FF_B_TYPE){
1833 op_pix = s->dsp.put_pixels_tab;
1834 }else{
1835 op_pix = s->dsp.put_no_rnd_pixels_tab;
1837 if (s->mv_dir & MV_DIR_FORWARD) {
1838 MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix);
1839 op_pix = s->dsp.avg_pixels_tab;
1840 op_qpix= s->me.qpel_avg;
1842 if (s->mv_dir & MV_DIR_BACKWARD) {
1843 MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix);
1848 /* skip dequant / idct if we are really late ;) */
1849 if(s->hurry_up>1) goto skip_idct;
1850 if(s->avctx->skip_idct){
1851 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == FF_B_TYPE)
1852 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != FF_I_TYPE)
1853 || s->avctx->skip_idct >= AVDISCARD_ALL)
1854 goto skip_idct;
1857 /* add dct residue */
1858 if(s->encoding || !( s->h263_msmpeg4 || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO
1859 || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
1860 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
1861 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
1862 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
1863 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
1865 if(!ENABLE_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1866 if (s->chroma_y_shift){
1867 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
1868 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
1869 }else{
1870 dct_linesize >>= 1;
1871 dct_offset >>=1;
1872 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
1873 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
1874 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
1875 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
1878 } else if(is_mpeg12 || (s->codec_id != CODEC_ID_WMV2)){
1879 add_dct(s, block[0], 0, dest_y , dct_linesize);
1880 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
1881 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
1882 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
1884 if(!ENABLE_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1885 if(s->chroma_y_shift){//Chroma420
1886 add_dct(s, block[4], 4, dest_cb, uvlinesize);
1887 add_dct(s, block[5], 5, dest_cr, uvlinesize);
1888 }else{
1889 //chroma422
1890 dct_linesize = uvlinesize << s->interlaced_dct;
1891 dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8;
1893 add_dct(s, block[4], 4, dest_cb, dct_linesize);
1894 add_dct(s, block[5], 5, dest_cr, dct_linesize);
1895 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
1896 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
1897 if(!s->chroma_x_shift){//Chroma444
1898 add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
1899 add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
1900 add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
1901 add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
1904 }//fi gray
1906 else if (ENABLE_WMV2) {
1907 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
1909 } else {
1910 /* dct only in intra block */
1911 if(s->encoding || !(s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO)){
1912 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
1913 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
1914 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
1915 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
1917 if(!ENABLE_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1918 if(s->chroma_y_shift){
1919 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
1920 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
1921 }else{
1922 dct_offset >>=1;
1923 dct_linesize >>=1;
1924 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
1925 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
1926 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
1927 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
1930 }else{
1931 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
1932 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
1933 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
1934 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
1936 if(!ENABLE_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1937 if(s->chroma_y_shift){
1938 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
1939 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
1940 }else{
1942 dct_linesize = uvlinesize << s->interlaced_dct;
1943 dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8;
1945 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
1946 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
1947 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
1948 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
1949 if(!s->chroma_x_shift){//Chroma444
1950 s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
1951 s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
1952 s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
1953 s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
1956 }//gray
1959 skip_idct:
1960 if(!readable){
1961 s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
1962 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
1963 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
1968 void MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
1969 #ifndef CONFIG_SMALL
1970 if(s->out_format == FMT_MPEG1) {
1971 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
1972 else MPV_decode_mb_internal(s, block, 0, 1);
1973 } else
1974 #endif
1975 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
1976 else MPV_decode_mb_internal(s, block, 0, 0);
1981 * @param h is the normal height, this will be reduced automatically if needed for the last row
1983 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
1984 if (s->avctx->draw_horiz_band) {
1985 AVFrame *src;
1986 int offset[4];
1988 if(s->picture_structure != PICT_FRAME){
1989 h <<= 1;
1990 y <<= 1;
1991 if(s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
1994 h= FFMIN(h, s->avctx->height - y);
1996 if(s->pict_type==FF_B_TYPE || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
1997 src= (AVFrame*)s->current_picture_ptr;
1998 else if(s->last_picture_ptr)
1999 src= (AVFrame*)s->last_picture_ptr;
2000 else
2001 return;
2003 if(s->pict_type==FF_B_TYPE && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
2004 offset[0]=
2005 offset[1]=
2006 offset[2]=
2007 offset[3]= 0;
2008 }else{
2009 offset[0]= y * s->linesize;
2010 offset[1]=
2011 offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
2012 offset[3]= 0;
2015 emms_c();
2017 s->avctx->draw_horiz_band(s->avctx, src, offset,
2018 y, s->picture_structure, h);
2022 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2023 const int linesize= s->current_picture.linesize[0]; //not s->linesize as this would be wrong for field pics
2024 const int uvlinesize= s->current_picture.linesize[1];
2025 const int mb_size= 4 - s->avctx->lowres;
2027 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2028 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2029 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2030 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2031 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2032 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2033 //block_index is not used by mpeg2, so it is not affected by chroma_format
2035 s->dest[0] = s->current_picture.data[0] + ((s->mb_x - 1) << mb_size);
2036 s->dest[1] = s->current_picture.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2037 s->dest[2] = s->current_picture.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2039 if(!(s->pict_type==FF_B_TYPE && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2041 s->dest[0] += s->mb_y * linesize << mb_size;
2042 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2043 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2047 void ff_mpeg_flush(AVCodecContext *avctx){
2048 int i;
2049 MpegEncContext *s = avctx->priv_data;
2051 if(s==NULL || s->picture==NULL)
2052 return;
2054 for(i=0; i<MAX_PICTURE_COUNT; i++){
2055 if(s->picture[i].data[0] && ( s->picture[i].type == FF_BUFFER_TYPE_INTERNAL
2056 || s->picture[i].type == FF_BUFFER_TYPE_USER))
2057 avctx->release_buffer(avctx, (AVFrame*)&s->picture[i]);
2059 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2061 s->mb_x= s->mb_y= 0;
2063 s->parse_context.state= -1;
2064 s->parse_context.frame_start_found= 0;
2065 s->parse_context.overread= 0;
2066 s->parse_context.overread_index= 0;
2067 s->parse_context.index= 0;
2068 s->parse_context.last_index= 0;
2069 s->bitstream_buffer_size=0;
2070 s->pp_time=0;
2073 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
2074 DCTELEM *block, int n, int qscale)
2076 int i, level, nCoeffs;
2077 const uint16_t *quant_matrix;
2079 nCoeffs= s->block_last_index[n];
2081 if (n < 4)
2082 block[0] = block[0] * s->y_dc_scale;
2083 else
2084 block[0] = block[0] * s->c_dc_scale;
2085 /* XXX: only mpeg1 */
2086 quant_matrix = s->intra_matrix;
2087 for(i=1;i<=nCoeffs;i++) {
2088 int j= s->intra_scantable.permutated[i];
2089 level = block[j];
2090 if (level) {
2091 if (level < 0) {
2092 level = -level;
2093 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2094 level = (level - 1) | 1;
2095 level = -level;
2096 } else {
2097 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2098 level = (level - 1) | 1;
2100 block[j] = level;
2105 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
2106 DCTELEM *block, int n, int qscale)
2108 int i, level, nCoeffs;
2109 const uint16_t *quant_matrix;
2111 nCoeffs= s->block_last_index[n];
2113 quant_matrix = s->inter_matrix;
2114 for(i=0; i<=nCoeffs; i++) {
2115 int j= s->intra_scantable.permutated[i];
2116 level = block[j];
2117 if (level) {
2118 if (level < 0) {
2119 level = -level;
2120 level = (((level << 1) + 1) * qscale *
2121 ((int) (quant_matrix[j]))) >> 4;
2122 level = (level - 1) | 1;
2123 level = -level;
2124 } else {
2125 level = (((level << 1) + 1) * qscale *
2126 ((int) (quant_matrix[j]))) >> 4;
2127 level = (level - 1) | 1;
2129 block[j] = level;
2134 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
2135 DCTELEM *block, int n, int qscale)
2137 int i, level, nCoeffs;
2138 const uint16_t *quant_matrix;
2140 if(s->alternate_scan) nCoeffs= 63;
2141 else nCoeffs= s->block_last_index[n];
2143 if (n < 4)
2144 block[0] = block[0] * s->y_dc_scale;
2145 else
2146 block[0] = block[0] * s->c_dc_scale;
2147 quant_matrix = s->intra_matrix;
2148 for(i=1;i<=nCoeffs;i++) {
2149 int j= s->intra_scantable.permutated[i];
2150 level = block[j];
2151 if (level) {
2152 if (level < 0) {
2153 level = -level;
2154 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2155 level = -level;
2156 } else {
2157 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2159 block[j] = level;
2164 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
2165 DCTELEM *block, int n, int qscale)
2167 int i, level, nCoeffs;
2168 const uint16_t *quant_matrix;
2169 int sum=-1;
2171 if(s->alternate_scan) nCoeffs= 63;
2172 else nCoeffs= s->block_last_index[n];
2174 if (n < 4)
2175 block[0] = block[0] * s->y_dc_scale;
2176 else
2177 block[0] = block[0] * s->c_dc_scale;
2178 quant_matrix = s->intra_matrix;
2179 for(i=1;i<=nCoeffs;i++) {
2180 int j= s->intra_scantable.permutated[i];
2181 level = block[j];
2182 if (level) {
2183 if (level < 0) {
2184 level = -level;
2185 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2186 level = -level;
2187 } else {
2188 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2190 block[j] = level;
2191 sum+=level;
2194 block[63]^=sum&1;
2197 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
2198 DCTELEM *block, int n, int qscale)
2200 int i, level, nCoeffs;
2201 const uint16_t *quant_matrix;
2202 int sum=-1;
2204 if(s->alternate_scan) nCoeffs= 63;
2205 else nCoeffs= s->block_last_index[n];
2207 quant_matrix = s->inter_matrix;
2208 for(i=0; i<=nCoeffs; i++) {
2209 int j= s->intra_scantable.permutated[i];
2210 level = block[j];
2211 if (level) {
2212 if (level < 0) {
2213 level = -level;
2214 level = (((level << 1) + 1) * qscale *
2215 ((int) (quant_matrix[j]))) >> 4;
2216 level = -level;
2217 } else {
2218 level = (((level << 1) + 1) * qscale *
2219 ((int) (quant_matrix[j]))) >> 4;
2221 block[j] = level;
2222 sum+=level;
2225 block[63]^=sum&1;
2228 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
2229 DCTELEM *block, int n, int qscale)
2231 int i, level, qmul, qadd;
2232 int nCoeffs;
2234 assert(s->block_last_index[n]>=0);
2236 qmul = qscale << 1;
2238 if (!s->h263_aic) {
2239 if (n < 4)
2240 block[0] = block[0] * s->y_dc_scale;
2241 else
2242 block[0] = block[0] * s->c_dc_scale;
2243 qadd = (qscale - 1) | 1;
2244 }else{
2245 qadd = 0;
2247 if(s->ac_pred)
2248 nCoeffs=63;
2249 else
2250 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2252 for(i=1; i<=nCoeffs; i++) {
2253 level = block[i];
2254 if (level) {
2255 if (level < 0) {
2256 level = level * qmul - qadd;
2257 } else {
2258 level = level * qmul + qadd;
2260 block[i] = level;
2265 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
2266 DCTELEM *block, int n, int qscale)
2268 int i, level, qmul, qadd;
2269 int nCoeffs;
2271 assert(s->block_last_index[n]>=0);
2273 qadd = (qscale - 1) | 1;
2274 qmul = qscale << 1;
2276 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2278 for(i=0; i<=nCoeffs; i++) {
2279 level = block[i];
2280 if (level) {
2281 if (level < 0) {
2282 level = level * qmul - qadd;
2283 } else {
2284 level = level * qmul + qadd;
2286 block[i] = level;
2292 * set qscale and update qscale dependent variables.
2294 void ff_set_qscale(MpegEncContext * s, int qscale)
2296 if (qscale < 1)
2297 qscale = 1;
2298 else if (qscale > 31)
2299 qscale = 31;
2301 s->qscale = qscale;
2302 s->chroma_qscale= s->chroma_qscale_table[qscale];
2304 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2305 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];