2 * H263/MPEG4 backend for encoder and decoder
3 * Copyright (c) 2000,2001 Fabrice Bellard
5 * Copyright (c) 2001 Juan J. Sierralta P
6 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of Libav.
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
34 #include "mpegvideo.h"
40 #include "mpeg4video.h"
45 uint8_t ff_h263_static_rl_table_store
[2][2][2*MAX_RUN
+ MAX_LEVEL
+ 3];
48 void ff_h263_update_motion_val(MpegEncContext
* s
){
49 const int mb_xy
= s
->mb_y
* s
->mb_stride
+ s
->mb_x
;
50 //FIXME a lot of that is only needed for !low_delay
51 const int wrap
= s
->b8_stride
;
52 const int xy
= s
->block_index
[0];
54 s
->current_picture
.f
.mbskip_table
[mb_xy
] = s
->mb_skipped
;
56 if(s
->mv_type
!= MV_TYPE_8X8
){
57 int motion_x
, motion_y
;
61 } else if (s
->mv_type
== MV_TYPE_16X16
) {
62 motion_x
= s
->mv
[0][0][0];
63 motion_y
= s
->mv
[0][0][1];
64 } else /*if (s->mv_type == MV_TYPE_FIELD)*/ {
66 motion_x
= s
->mv
[0][0][0] + s
->mv
[0][1][0];
67 motion_y
= s
->mv
[0][0][1] + s
->mv
[0][1][1];
68 motion_x
= (motion_x
>>1) | (motion_x
&1);
70 s
->p_field_mv_table
[i
][0][mb_xy
][0]= s
->mv
[0][i
][0];
71 s
->p_field_mv_table
[i
][0][mb_xy
][1]= s
->mv
[0][i
][1];
73 s
->current_picture
.f
.ref_index
[0][4*mb_xy
] =
74 s
->current_picture
.f
.ref_index
[0][4*mb_xy
+ 1] = s
->field_select
[0][0];
75 s
->current_picture
.f
.ref_index
[0][4*mb_xy
+ 2] =
76 s
->current_picture
.f
.ref_index
[0][4*mb_xy
+ 3] = s
->field_select
[0][1];
79 /* no update if 8X8 because it has been done during parsing */
80 s
->current_picture
.f
.motion_val
[0][xy
][0] = motion_x
;
81 s
->current_picture
.f
.motion_val
[0][xy
][1] = motion_y
;
82 s
->current_picture
.f
.motion_val
[0][xy
+ 1][0] = motion_x
;
83 s
->current_picture
.f
.motion_val
[0][xy
+ 1][1] = motion_y
;
84 s
->current_picture
.f
.motion_val
[0][xy
+ wrap
][0] = motion_x
;
85 s
->current_picture
.f
.motion_val
[0][xy
+ wrap
][1] = motion_y
;
86 s
->current_picture
.f
.motion_val
[0][xy
+ 1 + wrap
][0] = motion_x
;
87 s
->current_picture
.f
.motion_val
[0][xy
+ 1 + wrap
][1] = motion_y
;
90 if(s
->encoding
){ //FIXME encoding MUST be cleaned up
91 if (s
->mv_type
== MV_TYPE_8X8
)
92 s
->current_picture
.f
.mb_type
[mb_xy
] = MB_TYPE_L0
| MB_TYPE_8x8
;
94 s
->current_picture
.f
.mb_type
[mb_xy
] = MB_TYPE_INTRA
;
96 s
->current_picture
.f
.mb_type
[mb_xy
] = MB_TYPE_L0
| MB_TYPE_16x16
;
100 int ff_h263_pred_dc(MpegEncContext
* s
, int n
, int16_t **dc_val_ptr
)
102 int x
, y
, wrap
, a
, c
, pred_dc
;
105 /* find prediction */
107 x
= 2 * s
->mb_x
+ (n
& 1);
108 y
= 2 * s
->mb_y
+ ((n
& 2) >> 1);
110 dc_val
= s
->dc_val
[0];
115 dc_val
= s
->dc_val
[n
- 4 + 1];
120 a
= dc_val
[(x
- 1) + (y
) * wrap
];
121 c
= dc_val
[(x
) + (y
- 1) * wrap
];
123 /* No prediction outside GOB boundary */
124 if(s
->first_slice_line
&& n
!=3){
126 if(n
!=1 && s
->mb_x
== s
->resync_mb_x
) a
= 1024;
128 /* just DC prediction */
129 if (a
!= 1024 && c
!= 1024)
130 pred_dc
= (a
+ c
) >> 1;
136 /* we assume pred is positive */
137 *dc_val_ptr
= &dc_val
[x
+ y
* wrap
];
141 void ff_h263_loop_filter(MpegEncContext
* s
){
143 const int linesize
= s
->linesize
;
144 const int uvlinesize
= s
->uvlinesize
;
145 const int xy
= s
->mb_y
* s
->mb_stride
+ s
->mb_x
;
146 uint8_t *dest_y
= s
->dest
[0];
147 uint8_t *dest_cb
= s
->dest
[1];
148 uint8_t *dest_cr
= s
->dest
[2];
150 // if(s->pict_type==AV_PICTURE_TYPE_B && !s->readable) return;
156 if (!IS_SKIP(s
->current_picture
.f
.mb_type
[xy
])) {
158 s
->dsp
.h263_v_loop_filter(dest_y
+8*linesize
, linesize
, qp_c
);
159 s
->dsp
.h263_v_loop_filter(dest_y
+8*linesize
+8, linesize
, qp_c
);
164 int qp_dt
, qp_tt
, qp_tc
;
166 if (IS_SKIP(s
->current_picture
.f
.mb_type
[xy
- s
->mb_stride
]))
169 qp_tt
= s
->current_picture
.f
.qscale_table
[xy
- s
->mb_stride
];
177 const int chroma_qp
= s
->chroma_qscale_table
[qp_tc
];
178 s
->dsp
.h263_v_loop_filter(dest_y
, linesize
, qp_tc
);
179 s
->dsp
.h263_v_loop_filter(dest_y
+8, linesize
, qp_tc
);
181 s
->dsp
.h263_v_loop_filter(dest_cb
, uvlinesize
, chroma_qp
);
182 s
->dsp
.h263_v_loop_filter(dest_cr
, uvlinesize
, chroma_qp
);
186 s
->dsp
.h263_h_loop_filter(dest_y
-8*linesize
+8 , linesize
, qp_tt
);
189 if (qp_tt
|| IS_SKIP(s
->current_picture
.f
.mb_type
[xy
- 1 - s
->mb_stride
]))
192 qp_dt
= s
->current_picture
.f
.qscale_table
[xy
- 1 - s
->mb_stride
];
195 const int chroma_qp
= s
->chroma_qscale_table
[qp_dt
];
196 s
->dsp
.h263_h_loop_filter(dest_y
-8*linesize
, linesize
, qp_dt
);
197 s
->dsp
.h263_h_loop_filter(dest_cb
-8*uvlinesize
, uvlinesize
, chroma_qp
);
198 s
->dsp
.h263_h_loop_filter(dest_cr
-8*uvlinesize
, uvlinesize
, chroma_qp
);
204 s
->dsp
.h263_h_loop_filter(dest_y
+8, linesize
, qp_c
);
205 if(s
->mb_y
+ 1 == s
->mb_height
)
206 s
->dsp
.h263_h_loop_filter(dest_y
+8*linesize
+8, linesize
, qp_c
);
211 if (qp_c
|| IS_SKIP(s
->current_picture
.f
.mb_type
[xy
- 1]))
214 qp_lc
= s
->current_picture
.f
.qscale_table
[xy
- 1];
217 s
->dsp
.h263_h_loop_filter(dest_y
, linesize
, qp_lc
);
218 if(s
->mb_y
+ 1 == s
->mb_height
){
219 const int chroma_qp
= s
->chroma_qscale_table
[qp_lc
];
220 s
->dsp
.h263_h_loop_filter(dest_y
+8* linesize
, linesize
, qp_lc
);
221 s
->dsp
.h263_h_loop_filter(dest_cb
, uvlinesize
, chroma_qp
);
222 s
->dsp
.h263_h_loop_filter(dest_cr
, uvlinesize
, chroma_qp
);
228 void ff_h263_pred_acdc(MpegEncContext
* s
, int16_t *block
, int n
)
230 int x
, y
, wrap
, a
, c
, pred_dc
, scale
, i
;
231 int16_t *dc_val
, *ac_val
, *ac_val1
;
233 /* find prediction */
235 x
= 2 * s
->mb_x
+ (n
& 1);
236 y
= 2 * s
->mb_y
+ (n
>> 1);
238 dc_val
= s
->dc_val
[0];
239 ac_val
= s
->ac_val
[0][0];
240 scale
= s
->y_dc_scale
;
245 dc_val
= s
->dc_val
[n
- 4 + 1];
246 ac_val
= s
->ac_val
[n
- 4 + 1][0];
247 scale
= s
->c_dc_scale
;
250 ac_val
+= ((y
) * wrap
+ (x
)) * 16;
256 a
= dc_val
[(x
- 1) + (y
) * wrap
];
257 c
= dc_val
[(x
) + (y
- 1) * wrap
];
259 /* No prediction outside GOB boundary */
260 if(s
->first_slice_line
&& n
!=3){
262 if(n
!=1 && s
->mb_x
== s
->resync_mb_x
) a
= 1024;
267 if (s
->h263_aic_dir
) {
268 /* left prediction */
272 block
[s
->dsp
.idct_permutation
[i
<<3]] += ac_val
[i
];
281 block
[s
->dsp
.idct_permutation
[i
]] += ac_val
[i
+ 8];
287 /* just DC prediction */
288 if (a
!= 1024 && c
!= 1024)
289 pred_dc
= (a
+ c
) >> 1;
296 /* we assume pred is positive */
297 block
[0]=block
[0]*scale
+ pred_dc
;
304 /* Update AC/DC tables */
305 dc_val
[(x
) + (y
) * wrap
] = block
[0];
309 ac_val1
[i
] = block
[s
->dsp
.idct_permutation
[i
<<3]];
312 ac_val1
[8 + i
] = block
[s
->dsp
.idct_permutation
[i
]];
315 int16_t *ff_h263_pred_motion(MpegEncContext
* s
, int block
, int dir
,
319 int16_t *A
, *B
, *C
, (*mot_val
)[2];
320 static const int off
[4]= {2, 1, 1, -1};
323 mot_val
= s
->current_picture
.f
.motion_val
[dir
] + s
->block_index
[block
];
326 /* special case for first (slice) line */
327 if (s
->first_slice_line
&& block
<3) {
328 // we can't just change some MVs to simulate that as we need them for the B frames (and ME)
329 // and if we ever support non rectangular objects than we need to do a few ifs here anyway :(
330 if(block
==0){ //most common case
331 if(s
->mb_x
== s
->resync_mb_x
){ //rare
333 }else if(s
->mb_x
+ 1 == s
->resync_mb_x
&& s
->h263_pred
){ //rare
334 C
= mot_val
[off
[block
] - wrap
];
339 *px
= mid_pred(A
[0], 0, C
[0]);
340 *py
= mid_pred(A
[1], 0, C
[1]);
347 if(s
->mb_x
+ 1 == s
->resync_mb_x
&& s
->h263_pred
){ //rare
348 C
= mot_val
[off
[block
] - wrap
];
349 *px
= mid_pred(A
[0], 0, C
[0]);
350 *py
= mid_pred(A
[1], 0, C
[1]);
356 B
= mot_val
[ - wrap
];
357 C
= mot_val
[off
[block
] - wrap
];
358 if(s
->mb_x
== s
->resync_mb_x
) //rare
361 *px
= mid_pred(A
[0], B
[0], C
[0]);
362 *py
= mid_pred(A
[1], B
[1], C
[1]);
365 B
= mot_val
[ - wrap
];
366 C
= mot_val
[off
[block
] - wrap
];
367 *px
= mid_pred(A
[0], B
[0], C
[0]);
368 *py
= mid_pred(A
[1], B
[1], C
[1]);
375 * Get the GOB height based on picture height.
377 int ff_h263_get_gob_height(MpegEncContext
*s
){
378 if (s
->height
<= 400)
380 else if (s
->height
<= 800)