2 * VC-1 and WMV3 decoder
3 * Copyright (c) 2011 Mashiat Sarker Shakkhar
4 * Copyright (c) 2006-2007 Konstantin Shishkov
5 * Partly based on vc9.c (c) 2005 Anonymous, Alex Beregszaszi, Michael Niedermayer
7 * This file is part of Libav.
9 * Libav is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
14 * Libav is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with Libav; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 * VC-1 and WMV3 decoder
31 #include "error_resilience.h"
32 #include "mpegvideo.h"
34 #include "h264chroma.h"
37 #include "vc1acdata.h"
38 #include "msmpeg4data.h"
41 #include "vdpau_internal.h"
46 #define MB_INTRA_VLC_BITS 9
50 // offset tables for interlaced picture MVDATA decoding
51 static const int offset_table1
[9] = { 0, 1, 2, 4, 8, 16, 32, 64, 128 };
52 static const int offset_table2
[9] = { 0, 1, 3, 7, 15, 31, 63, 127, 255 };
54 /***********************************************************************/
56 * @name VC-1 Bitplane decoding
74 /** @} */ //imode defines
77 /** @} */ //Bitplane group
79 static void vc1_put_signed_blocks_clamped(VC1Context
*v
)
81 MpegEncContext
*s
= &v
->s
;
82 int topleft_mb_pos
, top_mb_pos
;
83 int stride_y
, fieldtx
;
86 /* The put pixels loop is always one MB row behind the decoding loop,
87 * because we can only put pixels when overlap filtering is done, and
88 * for filtering of the bottom edge of a MB, we need the next MB row
90 * Within the row, the put pixels loop is also one MB col behind the
91 * decoding loop. The reason for this is again, because for filtering
92 * of the right MB edge, we need the next MB present. */
93 if (!s
->first_slice_line
) {
95 topleft_mb_pos
= (s
->mb_y
- 1) * s
->mb_stride
+ s
->mb_x
- 1;
96 fieldtx
= v
->fieldtx_plane
[topleft_mb_pos
];
97 stride_y
= s
->linesize
<< fieldtx
;
98 v_dist
= (16 - fieldtx
) >> (fieldtx
== 0);
99 s
->dsp
.put_signed_pixels_clamped(v
->block
[v
->topleft_blk_idx
][0],
100 s
->dest
[0] - 16 * s
->linesize
- 16,
102 s
->dsp
.put_signed_pixels_clamped(v
->block
[v
->topleft_blk_idx
][1],
103 s
->dest
[0] - 16 * s
->linesize
- 8,
105 s
->dsp
.put_signed_pixels_clamped(v
->block
[v
->topleft_blk_idx
][2],
106 s
->dest
[0] - v_dist
* s
->linesize
- 16,
108 s
->dsp
.put_signed_pixels_clamped(v
->block
[v
->topleft_blk_idx
][3],
109 s
->dest
[0] - v_dist
* s
->linesize
- 8,
111 s
->dsp
.put_signed_pixels_clamped(v
->block
[v
->topleft_blk_idx
][4],
112 s
->dest
[1] - 8 * s
->uvlinesize
- 8,
114 s
->dsp
.put_signed_pixels_clamped(v
->block
[v
->topleft_blk_idx
][5],
115 s
->dest
[2] - 8 * s
->uvlinesize
- 8,
118 if (s
->mb_x
== s
->mb_width
- 1) {
119 top_mb_pos
= (s
->mb_y
- 1) * s
->mb_stride
+ s
->mb_x
;
120 fieldtx
= v
->fieldtx_plane
[top_mb_pos
];
121 stride_y
= s
->linesize
<< fieldtx
;
122 v_dist
= fieldtx
? 15 : 8;
123 s
->dsp
.put_signed_pixels_clamped(v
->block
[v
->top_blk_idx
][0],
124 s
->dest
[0] - 16 * s
->linesize
,
126 s
->dsp
.put_signed_pixels_clamped(v
->block
[v
->top_blk_idx
][1],
127 s
->dest
[0] - 16 * s
->linesize
+ 8,
129 s
->dsp
.put_signed_pixels_clamped(v
->block
[v
->top_blk_idx
][2],
130 s
->dest
[0] - v_dist
* s
->linesize
,
132 s
->dsp
.put_signed_pixels_clamped(v
->block
[v
->top_blk_idx
][3],
133 s
->dest
[0] - v_dist
* s
->linesize
+ 8,
135 s
->dsp
.put_signed_pixels_clamped(v
->block
[v
->top_blk_idx
][4],
136 s
->dest
[1] - 8 * s
->uvlinesize
,
138 s
->dsp
.put_signed_pixels_clamped(v
->block
[v
->top_blk_idx
][5],
139 s
->dest
[2] - 8 * s
->uvlinesize
,
144 #define inc_blk_idx(idx) do { \
146 if (idx >= v->n_allocated_blks) \
150 inc_blk_idx(v
->topleft_blk_idx
);
151 inc_blk_idx(v
->top_blk_idx
);
152 inc_blk_idx(v
->left_blk_idx
);
153 inc_blk_idx(v
->cur_blk_idx
);
156 static void vc1_loop_filter_iblk(VC1Context
*v
, int pq
)
158 MpegEncContext
*s
= &v
->s
;
160 if (!s
->first_slice_line
) {
161 v
->vc1dsp
.vc1_v_loop_filter16(s
->dest
[0], s
->linesize
, pq
);
163 v
->vc1dsp
.vc1_h_loop_filter16(s
->dest
[0] - 16 * s
->linesize
, s
->linesize
, pq
);
164 v
->vc1dsp
.vc1_h_loop_filter16(s
->dest
[0] - 16 * s
->linesize
+ 8, s
->linesize
, pq
);
165 for (j
= 0; j
< 2; j
++) {
166 v
->vc1dsp
.vc1_v_loop_filter8(s
->dest
[j
+ 1], s
->uvlinesize
, pq
);
168 v
->vc1dsp
.vc1_h_loop_filter8(s
->dest
[j
+ 1] - 8 * s
->uvlinesize
, s
->uvlinesize
, pq
);
171 v
->vc1dsp
.vc1_v_loop_filter16(s
->dest
[0] + 8 * s
->linesize
, s
->linesize
, pq
);
173 if (s
->mb_y
== s
->end_mb_y
- 1) {
175 v
->vc1dsp
.vc1_h_loop_filter16(s
->dest
[0], s
->linesize
, pq
);
176 v
->vc1dsp
.vc1_h_loop_filter8(s
->dest
[1], s
->uvlinesize
, pq
);
177 v
->vc1dsp
.vc1_h_loop_filter8(s
->dest
[2], s
->uvlinesize
, pq
);
179 v
->vc1dsp
.vc1_h_loop_filter16(s
->dest
[0] + 8, s
->linesize
, pq
);
183 static void vc1_loop_filter_iblk_delayed(VC1Context
*v
, int pq
)
185 MpegEncContext
*s
= &v
->s
;
188 /* The loopfilter runs 1 row and 1 column behind the overlap filter, which
189 * means it runs two rows/cols behind the decoding loop. */
190 if (!s
->first_slice_line
) {
192 if (s
->mb_y
>= s
->start_mb_y
+ 2) {
193 v
->vc1dsp
.vc1_v_loop_filter16(s
->dest
[0] - 16 * s
->linesize
- 16, s
->linesize
, pq
);
196 v
->vc1dsp
.vc1_h_loop_filter16(s
->dest
[0] - 32 * s
->linesize
- 16, s
->linesize
, pq
);
197 v
->vc1dsp
.vc1_h_loop_filter16(s
->dest
[0] - 32 * s
->linesize
- 8, s
->linesize
, pq
);
198 for (j
= 0; j
< 2; j
++) {
199 v
->vc1dsp
.vc1_v_loop_filter8(s
->dest
[j
+ 1] - 8 * s
->uvlinesize
- 8, s
->uvlinesize
, pq
);
201 v
->vc1dsp
.vc1_h_loop_filter8(s
->dest
[j
+ 1] - 16 * s
->uvlinesize
- 8, s
->uvlinesize
, pq
);
205 v
->vc1dsp
.vc1_v_loop_filter16(s
->dest
[0] - 8 * s
->linesize
- 16, s
->linesize
, pq
);
208 if (s
->mb_x
== s
->mb_width
- 1) {
209 if (s
->mb_y
>= s
->start_mb_y
+ 2) {
210 v
->vc1dsp
.vc1_v_loop_filter16(s
->dest
[0] - 16 * s
->linesize
, s
->linesize
, pq
);
213 v
->vc1dsp
.vc1_h_loop_filter16(s
->dest
[0] - 32 * s
->linesize
, s
->linesize
, pq
);
214 v
->vc1dsp
.vc1_h_loop_filter16(s
->dest
[0] - 32 * s
->linesize
+ 8, s
->linesize
, pq
);
215 for (j
= 0; j
< 2; j
++) {
216 v
->vc1dsp
.vc1_v_loop_filter8(s
->dest
[j
+ 1] - 8 * s
->uvlinesize
, s
->uvlinesize
, pq
);
218 v
->vc1dsp
.vc1_h_loop_filter8(s
->dest
[j
+ 1] - 16 * s
->uvlinesize
, s
->uvlinesize
, pq
);
222 v
->vc1dsp
.vc1_v_loop_filter16(s
->dest
[0] - 8 * s
->linesize
, s
->linesize
, pq
);
225 if (s
->mb_y
== s
->end_mb_y
) {
228 v
->vc1dsp
.vc1_h_loop_filter16(s
->dest
[0] - 16 * s
->linesize
- 16, s
->linesize
, pq
);
229 v
->vc1dsp
.vc1_h_loop_filter16(s
->dest
[0] - 16 * s
->linesize
- 8, s
->linesize
, pq
);
231 for (j
= 0; j
< 2; j
++) {
232 v
->vc1dsp
.vc1_h_loop_filter8(s
->dest
[j
+ 1] - 8 * s
->uvlinesize
- 8, s
->uvlinesize
, pq
);
237 if (s
->mb_x
== s
->mb_width
- 1) {
239 v
->vc1dsp
.vc1_h_loop_filter16(s
->dest
[0] - 16 * s
->linesize
, s
->linesize
, pq
);
240 v
->vc1dsp
.vc1_h_loop_filter16(s
->dest
[0] - 16 * s
->linesize
+ 8, s
->linesize
, pq
);
242 for (j
= 0; j
< 2; j
++) {
243 v
->vc1dsp
.vc1_h_loop_filter8(s
->dest
[j
+ 1] - 8 * s
->uvlinesize
, s
->uvlinesize
, pq
);
251 static void vc1_smooth_overlap_filter_iblk(VC1Context
*v
)
253 MpegEncContext
*s
= &v
->s
;
256 if (v
->condover
== CONDOVER_NONE
)
259 mb_pos
= s
->mb_x
+ s
->mb_y
* s
->mb_stride
;
261 /* Within a MB, the horizontal overlap always runs before the vertical.
262 * To accomplish that, we run the H on left and internal borders of the
263 * currently decoded MB. Then, we wait for the next overlap iteration
264 * to do H overlap on the right edge of this MB, before moving over and
265 * running the V overlap. Therefore, the V overlap makes us trail by one
266 * MB col and the H overlap filter makes us trail by one MB row. This
267 * is reflected in the time at which we run the put_pixels loop. */
268 if (v
->condover
== CONDOVER_ALL
|| v
->pq
>= 9 || v
->over_flags_plane
[mb_pos
]) {
269 if (s
->mb_x
&& (v
->condover
== CONDOVER_ALL
|| v
->pq
>= 9 ||
270 v
->over_flags_plane
[mb_pos
- 1])) {
271 v
->vc1dsp
.vc1_h_s_overlap(v
->block
[v
->left_blk_idx
][1],
272 v
->block
[v
->cur_blk_idx
][0]);
273 v
->vc1dsp
.vc1_h_s_overlap(v
->block
[v
->left_blk_idx
][3],
274 v
->block
[v
->cur_blk_idx
][2]);
275 if (!(s
->flags
& CODEC_FLAG_GRAY
)) {
276 v
->vc1dsp
.vc1_h_s_overlap(v
->block
[v
->left_blk_idx
][4],
277 v
->block
[v
->cur_blk_idx
][4]);
278 v
->vc1dsp
.vc1_h_s_overlap(v
->block
[v
->left_blk_idx
][5],
279 v
->block
[v
->cur_blk_idx
][5]);
282 v
->vc1dsp
.vc1_h_s_overlap(v
->block
[v
->cur_blk_idx
][0],
283 v
->block
[v
->cur_blk_idx
][1]);
284 v
->vc1dsp
.vc1_h_s_overlap(v
->block
[v
->cur_blk_idx
][2],
285 v
->block
[v
->cur_blk_idx
][3]);
287 if (s
->mb_x
== s
->mb_width
- 1) {
288 if (!s
->first_slice_line
&& (v
->condover
== CONDOVER_ALL
|| v
->pq
>= 9 ||
289 v
->over_flags_plane
[mb_pos
- s
->mb_stride
])) {
290 v
->vc1dsp
.vc1_v_s_overlap(v
->block
[v
->top_blk_idx
][2],
291 v
->block
[v
->cur_blk_idx
][0]);
292 v
->vc1dsp
.vc1_v_s_overlap(v
->block
[v
->top_blk_idx
][3],
293 v
->block
[v
->cur_blk_idx
][1]);
294 if (!(s
->flags
& CODEC_FLAG_GRAY
)) {
295 v
->vc1dsp
.vc1_v_s_overlap(v
->block
[v
->top_blk_idx
][4],
296 v
->block
[v
->cur_blk_idx
][4]);
297 v
->vc1dsp
.vc1_v_s_overlap(v
->block
[v
->top_blk_idx
][5],
298 v
->block
[v
->cur_blk_idx
][5]);
301 v
->vc1dsp
.vc1_v_s_overlap(v
->block
[v
->cur_blk_idx
][0],
302 v
->block
[v
->cur_blk_idx
][2]);
303 v
->vc1dsp
.vc1_v_s_overlap(v
->block
[v
->cur_blk_idx
][1],
304 v
->block
[v
->cur_blk_idx
][3]);
307 if (s
->mb_x
&& (v
->condover
== CONDOVER_ALL
|| v
->over_flags_plane
[mb_pos
- 1])) {
308 if (!s
->first_slice_line
&& (v
->condover
== CONDOVER_ALL
|| v
->pq
>= 9 ||
309 v
->over_flags_plane
[mb_pos
- s
->mb_stride
- 1])) {
310 v
->vc1dsp
.vc1_v_s_overlap(v
->block
[v
->topleft_blk_idx
][2],
311 v
->block
[v
->left_blk_idx
][0]);
312 v
->vc1dsp
.vc1_v_s_overlap(v
->block
[v
->topleft_blk_idx
][3],
313 v
->block
[v
->left_blk_idx
][1]);
314 if (!(s
->flags
& CODEC_FLAG_GRAY
)) {
315 v
->vc1dsp
.vc1_v_s_overlap(v
->block
[v
->topleft_blk_idx
][4],
316 v
->block
[v
->left_blk_idx
][4]);
317 v
->vc1dsp
.vc1_v_s_overlap(v
->block
[v
->topleft_blk_idx
][5],
318 v
->block
[v
->left_blk_idx
][5]);
321 v
->vc1dsp
.vc1_v_s_overlap(v
->block
[v
->left_blk_idx
][0],
322 v
->block
[v
->left_blk_idx
][2]);
323 v
->vc1dsp
.vc1_v_s_overlap(v
->block
[v
->left_blk_idx
][1],
324 v
->block
[v
->left_blk_idx
][3]);
328 /** Do motion compensation over 1 macroblock
329 * Mostly adapted hpel_motion and qpel_motion from mpegvideo.c
331 static void vc1_mc_1mv(VC1Context
*v
, int dir
)
333 MpegEncContext
*s
= &v
->s
;
334 DSPContext
*dsp
= &v
->s
.dsp
;
335 H264ChromaContext
*h264chroma
= &v
->h264chroma
;
336 uint8_t *srcY
, *srcU
, *srcV
;
337 int dxy
, mx
, my
, uvmx
, uvmy
, src_x
, src_y
, uvsrc_x
, uvsrc_y
;
339 int v_edge_pos
= s
->v_edge_pos
>> v
->field_mode
;
341 if ((!v
->field_mode
||
342 (v
->ref_field_type
[dir
] == 1 && v
->cur_field_type
== 1)) &&
343 !v
->s
.last_picture
.f
.data
[0])
346 mx
= s
->mv
[dir
][0][0];
347 my
= s
->mv
[dir
][0][1];
349 // store motion vectors for further use in B frames
350 if (s
->pict_type
== AV_PICTURE_TYPE_P
) {
351 s
->current_picture
.f
.motion_val
[1][s
->block_index
[0] + v
->blocks_off
][0] = mx
;
352 s
->current_picture
.f
.motion_val
[1][s
->block_index
[0] + v
->blocks_off
][1] = my
;
355 uvmx
= (mx
+ ((mx
& 3) == 3)) >> 1;
356 uvmy
= (my
+ ((my
& 3) == 3)) >> 1;
357 v
->luma_mv
[s
->mb_x
][0] = uvmx
;
358 v
->luma_mv
[s
->mb_x
][1] = uvmy
;
361 v
->cur_field_type
!= v
->ref_field_type
[dir
]) {
362 my
= my
- 2 + 4 * v
->cur_field_type
;
363 uvmy
= uvmy
- 2 + 4 * v
->cur_field_type
;
366 // fastuvmc shall be ignored for interlaced frame picture
367 if (v
->fastuvmc
&& (v
->fcm
!= ILACE_FRAME
)) {
368 uvmx
= uvmx
+ ((uvmx
< 0) ? (uvmx
& 1) : -(uvmx
& 1));
369 uvmy
= uvmy
+ ((uvmy
< 0) ? (uvmy
& 1) : -(uvmy
& 1));
371 if (v
->field_mode
) { // interlaced field picture
373 if ((v
->cur_field_type
!= v
->ref_field_type
[dir
]) && v
->cur_field_type
) {
374 srcY
= s
->current_picture
.f
.data
[0];
375 srcU
= s
->current_picture
.f
.data
[1];
376 srcV
= s
->current_picture
.f
.data
[2];
378 srcY
= s
->last_picture
.f
.data
[0];
379 srcU
= s
->last_picture
.f
.data
[1];
380 srcV
= s
->last_picture
.f
.data
[2];
383 srcY
= s
->next_picture
.f
.data
[0];
384 srcU
= s
->next_picture
.f
.data
[1];
385 srcV
= s
->next_picture
.f
.data
[2];
389 srcY
= s
->last_picture
.f
.data
[0];
390 srcU
= s
->last_picture
.f
.data
[1];
391 srcV
= s
->last_picture
.f
.data
[2];
393 srcY
= s
->next_picture
.f
.data
[0];
394 srcU
= s
->next_picture
.f
.data
[1];
395 srcV
= s
->next_picture
.f
.data
[2];
399 src_x
= s
->mb_x
* 16 + (mx
>> 2);
400 src_y
= s
->mb_y
* 16 + (my
>> 2);
401 uvsrc_x
= s
->mb_x
* 8 + (uvmx
>> 2);
402 uvsrc_y
= s
->mb_y
* 8 + (uvmy
>> 2);
404 if (v
->profile
!= PROFILE_ADVANCED
) {
405 src_x
= av_clip( src_x
, -16, s
->mb_width
* 16);
406 src_y
= av_clip( src_y
, -16, s
->mb_height
* 16);
407 uvsrc_x
= av_clip(uvsrc_x
, -8, s
->mb_width
* 8);
408 uvsrc_y
= av_clip(uvsrc_y
, -8, s
->mb_height
* 8);
410 src_x
= av_clip( src_x
, -17, s
->avctx
->coded_width
);
411 src_y
= av_clip( src_y
, -18, s
->avctx
->coded_height
+ 1);
412 uvsrc_x
= av_clip(uvsrc_x
, -8, s
->avctx
->coded_width
>> 1);
413 uvsrc_y
= av_clip(uvsrc_y
, -8, s
->avctx
->coded_height
>> 1);
416 srcY
+= src_y
* s
->linesize
+ src_x
;
417 srcU
+= uvsrc_y
* s
->uvlinesize
+ uvsrc_x
;
418 srcV
+= uvsrc_y
* s
->uvlinesize
+ uvsrc_x
;
420 if (v
->field_mode
&& v
->ref_field_type
[dir
]) {
421 srcY
+= s
->current_picture_ptr
->f
.linesize
[0];
422 srcU
+= s
->current_picture_ptr
->f
.linesize
[1];
423 srcV
+= s
->current_picture_ptr
->f
.linesize
[2];
426 /* for grayscale we should not try to read from unknown area */
427 if (s
->flags
& CODEC_FLAG_GRAY
) {
428 srcU
= s
->edge_emu_buffer
+ 18 * s
->linesize
;
429 srcV
= s
->edge_emu_buffer
+ 18 * s
->linesize
;
432 if (v
->rangeredfrm
|| (v
->mv_mode
== MV_PMODE_INTENSITY_COMP
)
433 || s
->h_edge_pos
< 22 || v_edge_pos
< 22
434 || (unsigned)(src_x
- s
->mspel
) > s
->h_edge_pos
- (mx
&3) - 16 - s
->mspel
* 3
435 || (unsigned)(src_y
- 1) > v_edge_pos
- (my
&3) - 16 - 3) {
436 uint8_t *uvbuf
= s
->edge_emu_buffer
+ 19 * s
->linesize
;
438 srcY
-= s
->mspel
* (1 + s
->linesize
);
439 s
->vdsp
.emulated_edge_mc(s
->edge_emu_buffer
, srcY
, s
->linesize
,
440 17 + s
->mspel
* 2, 17 + s
->mspel
* 2,
441 src_x
- s
->mspel
, src_y
- s
->mspel
,
442 s
->h_edge_pos
, v_edge_pos
);
443 srcY
= s
->edge_emu_buffer
;
444 s
->vdsp
.emulated_edge_mc(uvbuf
, srcU
, s
->uvlinesize
, 8 + 1, 8 + 1,
445 uvsrc_x
, uvsrc_y
, s
->h_edge_pos
>> 1, v_edge_pos
>> 1);
446 s
->vdsp
.emulated_edge_mc(uvbuf
+ 16, srcV
, s
->uvlinesize
, 8 + 1, 8 + 1,
447 uvsrc_x
, uvsrc_y
, s
->h_edge_pos
>> 1, v_edge_pos
>> 1);
450 /* if we deal with range reduction we need to scale source blocks */
451 if (v
->rangeredfrm
) {
456 for (j
= 0; j
< 17 + s
->mspel
* 2; j
++) {
457 for (i
= 0; i
< 17 + s
->mspel
* 2; i
++)
458 src
[i
] = ((src
[i
] - 128) >> 1) + 128;
463 for (j
= 0; j
< 9; j
++) {
464 for (i
= 0; i
< 9; i
++) {
465 src
[i
] = ((src
[i
] - 128) >> 1) + 128;
466 src2
[i
] = ((src2
[i
] - 128) >> 1) + 128;
468 src
+= s
->uvlinesize
;
469 src2
+= s
->uvlinesize
;
472 /* if we deal with intensity compensation we need to scale source blocks */
473 if (v
->mv_mode
== MV_PMODE_INTENSITY_COMP
) {
478 for (j
= 0; j
< 17 + s
->mspel
* 2; j
++) {
479 for (i
= 0; i
< 17 + s
->mspel
* 2; i
++)
480 src
[i
] = v
->luty
[src
[i
]];
485 for (j
= 0; j
< 9; j
++) {
486 for (i
= 0; i
< 9; i
++) {
487 src
[i
] = v
->lutuv
[src
[i
]];
488 src2
[i
] = v
->lutuv
[src2
[i
]];
490 src
+= s
->uvlinesize
;
491 src2
+= s
->uvlinesize
;
494 srcY
+= s
->mspel
* (1 + s
->linesize
);
497 if (v
->field_mode
&& v
->cur_field_type
) {
498 off
= s
->current_picture_ptr
->f
.linesize
[0];
499 off_uv
= s
->current_picture_ptr
->f
.linesize
[1];
505 dxy
= ((my
& 3) << 2) | (mx
& 3);
506 v
->vc1dsp
.put_vc1_mspel_pixels_tab
[dxy
](s
->dest
[0] + off
, srcY
, s
->linesize
, v
->rnd
);
507 v
->vc1dsp
.put_vc1_mspel_pixels_tab
[dxy
](s
->dest
[0] + off
+ 8, srcY
+ 8, s
->linesize
, v
->rnd
);
508 srcY
+= s
->linesize
* 8;
509 v
->vc1dsp
.put_vc1_mspel_pixels_tab
[dxy
](s
->dest
[0] + off
+ 8 * s
->linesize
, srcY
, s
->linesize
, v
->rnd
);
510 v
->vc1dsp
.put_vc1_mspel_pixels_tab
[dxy
](s
->dest
[0] + off
+ 8 * s
->linesize
+ 8, srcY
+ 8, s
->linesize
, v
->rnd
);
511 } else { // hpel mc - always used for luma
512 dxy
= (my
& 2) | ((mx
& 2) >> 1);
514 dsp
->put_pixels_tab
[0][dxy
](s
->dest
[0] + off
, srcY
, s
->linesize
, 16);
516 dsp
->put_no_rnd_pixels_tab
[0][dxy
](s
->dest
[0] + off
, srcY
, s
->linesize
, 16);
519 if (s
->flags
& CODEC_FLAG_GRAY
) return;
520 /* Chroma MC always uses qpel bilinear */
521 uvmx
= (uvmx
& 3) << 1;
522 uvmy
= (uvmy
& 3) << 1;
524 h264chroma
->put_h264_chroma_pixels_tab
[0](s
->dest
[1] + off_uv
, srcU
, s
->uvlinesize
, 8, uvmx
, uvmy
);
525 h264chroma
->put_h264_chroma_pixels_tab
[0](s
->dest
[2] + off_uv
, srcV
, s
->uvlinesize
, 8, uvmx
, uvmy
);
527 v
->vc1dsp
.put_no_rnd_vc1_chroma_pixels_tab
[0](s
->dest
[1] + off_uv
, srcU
, s
->uvlinesize
, 8, uvmx
, uvmy
);
528 v
->vc1dsp
.put_no_rnd_vc1_chroma_pixels_tab
[0](s
->dest
[2] + off_uv
, srcV
, s
->uvlinesize
, 8, uvmx
, uvmy
);
532 static inline int median4(int a
, int b
, int c
, int d
)
535 if (c
< d
) return (FFMIN(b
, d
) + FFMAX(a
, c
)) / 2;
536 else return (FFMIN(b
, c
) + FFMAX(a
, d
)) / 2;
538 if (c
< d
) return (FFMIN(a
, d
) + FFMAX(b
, c
)) / 2;
539 else return (FFMIN(a
, c
) + FFMAX(b
, d
)) / 2;
543 /** Do motion compensation for 4-MV macroblock - luminance block
545 static void vc1_mc_4mv_luma(VC1Context
*v
, int n
, int dir
)
547 MpegEncContext
*s
= &v
->s
;
548 DSPContext
*dsp
= &v
->s
.dsp
;
550 int dxy
, mx
, my
, src_x
, src_y
;
552 int fieldmv
= (v
->fcm
== ILACE_FRAME
) ? v
->blk_mv_type
[s
->block_index
[n
]] : 0;
553 int v_edge_pos
= s
->v_edge_pos
>> v
->field_mode
;
555 if ((!v
->field_mode
||
556 (v
->ref_field_type
[dir
] == 1 && v
->cur_field_type
== 1)) &&
557 !v
->s
.last_picture
.f
.data
[0])
560 mx
= s
->mv
[dir
][n
][0];
561 my
= s
->mv
[dir
][n
][1];
565 if ((v
->cur_field_type
!= v
->ref_field_type
[dir
]) && v
->cur_field_type
)
566 srcY
= s
->current_picture
.f
.data
[0];
568 srcY
= s
->last_picture
.f
.data
[0];
570 srcY
= s
->last_picture
.f
.data
[0];
572 srcY
= s
->next_picture
.f
.data
[0];
575 if (v
->cur_field_type
!= v
->ref_field_type
[dir
])
576 my
= my
- 2 + 4 * v
->cur_field_type
;
579 if (s
->pict_type
== AV_PICTURE_TYPE_P
&& n
== 3 && v
->field_mode
) {
580 int same_count
= 0, opp_count
= 0, k
;
581 int chosen_mv
[2][4][2], f
;
583 for (k
= 0; k
< 4; k
++) {
584 f
= v
->mv_f
[0][s
->block_index
[k
] + v
->blocks_off
];
585 chosen_mv
[f
][f
? opp_count
: same_count
][0] = s
->mv
[0][k
][0];
586 chosen_mv
[f
][f
? opp_count
: same_count
][1] = s
->mv
[0][k
][1];
590 f
= opp_count
> same_count
;
591 switch (f
? opp_count
: same_count
) {
593 tx
= median4(chosen_mv
[f
][0][0], chosen_mv
[f
][1][0],
594 chosen_mv
[f
][2][0], chosen_mv
[f
][3][0]);
595 ty
= median4(chosen_mv
[f
][0][1], chosen_mv
[f
][1][1],
596 chosen_mv
[f
][2][1], chosen_mv
[f
][3][1]);
599 tx
= mid_pred(chosen_mv
[f
][0][0], chosen_mv
[f
][1][0], chosen_mv
[f
][2][0]);
600 ty
= mid_pred(chosen_mv
[f
][0][1], chosen_mv
[f
][1][1], chosen_mv
[f
][2][1]);
603 tx
= (chosen_mv
[f
][0][0] + chosen_mv
[f
][1][0]) / 2;
604 ty
= (chosen_mv
[f
][0][1] + chosen_mv
[f
][1][1]) / 2;
607 s
->current_picture
.f
.motion_val
[1][s
->block_index
[0] + v
->blocks_off
][0] = tx
;
608 s
->current_picture
.f
.motion_val
[1][s
->block_index
[0] + v
->blocks_off
][1] = ty
;
609 for (k
= 0; k
< 4; k
++)
610 v
->mv_f
[1][s
->block_index
[k
] + v
->blocks_off
] = f
;
613 if (v
->fcm
== ILACE_FRAME
) { // not sure if needed for other types of picture
615 int width
= s
->avctx
->coded_width
;
616 int height
= s
->avctx
->coded_height
>> 1;
617 qx
= (s
->mb_x
* 16) + (mx
>> 2);
618 qy
= (s
->mb_y
* 8) + (my
>> 3);
623 mx
-= 4 * (qx
- width
);
626 else if (qy
> height
+ 1)
627 my
-= 8 * (qy
- height
- 1);
630 if ((v
->fcm
== ILACE_FRAME
) && fieldmv
)
631 off
= ((n
> 1) ? s
->linesize
: 0) + (n
& 1) * 8;
633 off
= s
->linesize
* 4 * (n
& 2) + (n
& 1) * 8;
634 if (v
->field_mode
&& v
->cur_field_type
)
635 off
+= s
->current_picture_ptr
->f
.linesize
[0];
637 src_x
= s
->mb_x
* 16 + (n
& 1) * 8 + (mx
>> 2);
639 src_y
= s
->mb_y
* 16 + (n
& 2) * 4 + (my
>> 2);
641 src_y
= s
->mb_y
* 16 + ((n
> 1) ? 1 : 0) + (my
>> 2);
643 if (v
->profile
!= PROFILE_ADVANCED
) {
644 src_x
= av_clip(src_x
, -16, s
->mb_width
* 16);
645 src_y
= av_clip(src_y
, -16, s
->mb_height
* 16);
647 src_x
= av_clip(src_x
, -17, s
->avctx
->coded_width
);
648 if (v
->fcm
== ILACE_FRAME
) {
650 src_y
= av_clip(src_y
, -17, s
->avctx
->coded_height
+ 1);
652 src_y
= av_clip(src_y
, -18, s
->avctx
->coded_height
);
654 src_y
= av_clip(src_y
, -18, s
->avctx
->coded_height
+ 1);
658 srcY
+= src_y
* s
->linesize
+ src_x
;
659 if (v
->field_mode
&& v
->ref_field_type
[dir
])
660 srcY
+= s
->current_picture_ptr
->f
.linesize
[0];
662 if (fieldmv
&& !(src_y
& 1))
664 if (fieldmv
&& (src_y
& 1) && src_y
< 4)
666 if (v
->rangeredfrm
|| (v
->mv_mode
== MV_PMODE_INTENSITY_COMP
)
667 || s
->h_edge_pos
< 13 || v_edge_pos
< 23
668 || (unsigned)(src_x
- s
->mspel
) > s
->h_edge_pos
- (mx
& 3) - 8 - s
->mspel
* 2
669 || (unsigned)(src_y
- (s
->mspel
<< fieldmv
)) > v_edge_pos
- (my
& 3) - ((8 + s
->mspel
* 2) << fieldmv
)) {
670 srcY
-= s
->mspel
* (1 + (s
->linesize
<< fieldmv
));
671 /* check emulate edge stride and offset */
672 s
->vdsp
.emulated_edge_mc(s
->edge_emu_buffer
, srcY
, s
->linesize
,
673 9 + s
->mspel
* 2, (9 + s
->mspel
* 2) << fieldmv
,
674 src_x
- s
->mspel
, src_y
- (s
->mspel
<< fieldmv
),
675 s
->h_edge_pos
, v_edge_pos
);
676 srcY
= s
->edge_emu_buffer
;
677 /* if we deal with range reduction we need to scale source blocks */
678 if (v
->rangeredfrm
) {
683 for (j
= 0; j
< 9 + s
->mspel
* 2; j
++) {
684 for (i
= 0; i
< 9 + s
->mspel
* 2; i
++)
685 src
[i
] = ((src
[i
] - 128) >> 1) + 128;
686 src
+= s
->linesize
<< fieldmv
;
689 /* if we deal with intensity compensation we need to scale source blocks */
690 if (v
->mv_mode
== MV_PMODE_INTENSITY_COMP
) {
695 for (j
= 0; j
< 9 + s
->mspel
* 2; j
++) {
696 for (i
= 0; i
< 9 + s
->mspel
* 2; i
++)
697 src
[i
] = v
->luty
[src
[i
]];
698 src
+= s
->linesize
<< fieldmv
;
701 srcY
+= s
->mspel
* (1 + (s
->linesize
<< fieldmv
));
705 dxy
= ((my
& 3) << 2) | (mx
& 3);
706 v
->vc1dsp
.put_vc1_mspel_pixels_tab
[dxy
](s
->dest
[0] + off
, srcY
, s
->linesize
<< fieldmv
, v
->rnd
);
707 } else { // hpel mc - always used for luma
708 dxy
= (my
& 2) | ((mx
& 2) >> 1);
710 dsp
->put_pixels_tab
[1][dxy
](s
->dest
[0] + off
, srcY
, s
->linesize
, 8);
712 dsp
->put_no_rnd_pixels_tab
[1][dxy
](s
->dest
[0] + off
, srcY
, s
->linesize
, 8);
716 static av_always_inline
int get_chroma_mv(int *mvx
, int *mvy
, int *a
, int flag
, int *tx
, int *ty
)
719 static const int count
[16] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4};
721 idx
= ((a
[3] != flag
) << 3)
722 | ((a
[2] != flag
) << 2)
723 | ((a
[1] != flag
) << 1)
726 *tx
= median4(mvx
[0], mvx
[1], mvx
[2], mvx
[3]);
727 *ty
= median4(mvy
[0], mvy
[1], mvy
[2], mvy
[3]);
729 } else if (count
[idx
] == 1) {
732 *tx
= mid_pred(mvx
[1], mvx
[2], mvx
[3]);
733 *ty
= mid_pred(mvy
[1], mvy
[2], mvy
[3]);
736 *tx
= mid_pred(mvx
[0], mvx
[2], mvx
[3]);
737 *ty
= mid_pred(mvy
[0], mvy
[2], mvy
[3]);
740 *tx
= mid_pred(mvx
[0], mvx
[1], mvx
[3]);
741 *ty
= mid_pred(mvy
[0], mvy
[1], mvy
[3]);
744 *tx
= mid_pred(mvx
[0], mvx
[1], mvx
[2]);
745 *ty
= mid_pred(mvy
[0], mvy
[1], mvy
[2]);
748 } else if (count
[idx
] == 2) {
750 for (i
= 0; i
< 3; i
++)
755 for (i
= t1
+ 1; i
< 4; i
++)
760 *tx
= (mvx
[t1
] + mvx
[t2
]) / 2;
761 *ty
= (mvy
[t1
] + mvy
[t2
]) / 2;
769 /** Do motion compensation for 4-MV macroblock - both chroma blocks
771 static void vc1_mc_4mv_chroma(VC1Context
*v
, int dir
)
773 MpegEncContext
*s
= &v
->s
;
774 H264ChromaContext
*h264chroma
= &v
->h264chroma
;
775 uint8_t *srcU
, *srcV
;
776 int uvmx
, uvmy
, uvsrc_x
, uvsrc_y
;
777 int k
, tx
= 0, ty
= 0;
778 int mvx
[4], mvy
[4], intra
[4], mv_f
[4];
780 int chroma_ref_type
= v
->cur_field_type
, off
= 0;
781 int v_edge_pos
= s
->v_edge_pos
>> v
->field_mode
;
783 if (!v
->field_mode
&& !v
->s
.last_picture
.f
.data
[0])
785 if (s
->flags
& CODEC_FLAG_GRAY
)
788 for (k
= 0; k
< 4; k
++) {
789 mvx
[k
] = s
->mv
[dir
][k
][0];
790 mvy
[k
] = s
->mv
[dir
][k
][1];
791 intra
[k
] = v
->mb_type
[0][s
->block_index
[k
]];
793 mv_f
[k
] = v
->mv_f
[dir
][s
->block_index
[k
] + v
->blocks_off
];
796 /* calculate chroma MV vector from four luma MVs */
797 if (!v
->field_mode
|| (v
->field_mode
&& !v
->numref
)) {
798 valid_count
= get_chroma_mv(mvx
, mvy
, intra
, 0, &tx
, &ty
);
799 chroma_ref_type
= v
->reffield
;
801 s
->current_picture
.f
.motion_val
[1][s
->block_index
[0] + v
->blocks_off
][0] = 0;
802 s
->current_picture
.f
.motion_val
[1][s
->block_index
[0] + v
->blocks_off
][1] = 0;
803 v
->luma_mv
[s
->mb_x
][0] = v
->luma_mv
[s
->mb_x
][1] = 0;
804 return; //no need to do MC for intra blocks
808 if (mv_f
[0] + mv_f
[1] + mv_f
[2] + mv_f
[3] > 2)
810 valid_count
= get_chroma_mv(mvx
, mvy
, mv_f
, dominant
, &tx
, &ty
);
812 chroma_ref_type
= !v
->cur_field_type
;
814 if (v
->field_mode
&& chroma_ref_type
== 1 && v
->cur_field_type
== 1 && !v
->s
.last_picture
.f
.data
[0])
816 s
->current_picture
.f
.motion_val
[1][s
->block_index
[0] + v
->blocks_off
][0] = tx
;
817 s
->current_picture
.f
.motion_val
[1][s
->block_index
[0] + v
->blocks_off
][1] = ty
;
818 uvmx
= (tx
+ ((tx
& 3) == 3)) >> 1;
819 uvmy
= (ty
+ ((ty
& 3) == 3)) >> 1;
821 v
->luma_mv
[s
->mb_x
][0] = uvmx
;
822 v
->luma_mv
[s
->mb_x
][1] = uvmy
;
825 uvmx
= uvmx
+ ((uvmx
< 0) ? (uvmx
& 1) : -(uvmx
& 1));
826 uvmy
= uvmy
+ ((uvmy
< 0) ? (uvmy
& 1) : -(uvmy
& 1));
828 // Field conversion bias
829 if (v
->cur_field_type
!= chroma_ref_type
)
830 uvmy
+= 2 - 4 * chroma_ref_type
;
832 uvsrc_x
= s
->mb_x
* 8 + (uvmx
>> 2);
833 uvsrc_y
= s
->mb_y
* 8 + (uvmy
>> 2);
835 if (v
->profile
!= PROFILE_ADVANCED
) {
836 uvsrc_x
= av_clip(uvsrc_x
, -8, s
->mb_width
* 8);
837 uvsrc_y
= av_clip(uvsrc_y
, -8, s
->mb_height
* 8);
839 uvsrc_x
= av_clip(uvsrc_x
, -8, s
->avctx
->coded_width
>> 1);
840 uvsrc_y
= av_clip(uvsrc_y
, -8, s
->avctx
->coded_height
>> 1);
845 if ((v
->cur_field_type
!= chroma_ref_type
) && v
->cur_field_type
) {
846 srcU
= s
->current_picture
.f
.data
[1] + uvsrc_y
* s
->uvlinesize
+ uvsrc_x
;
847 srcV
= s
->current_picture
.f
.data
[2] + uvsrc_y
* s
->uvlinesize
+ uvsrc_x
;
849 srcU
= s
->last_picture
.f
.data
[1] + uvsrc_y
* s
->uvlinesize
+ uvsrc_x
;
850 srcV
= s
->last_picture
.f
.data
[2] + uvsrc_y
* s
->uvlinesize
+ uvsrc_x
;
853 srcU
= s
->last_picture
.f
.data
[1] + uvsrc_y
* s
->uvlinesize
+ uvsrc_x
;
854 srcV
= s
->last_picture
.f
.data
[2] + uvsrc_y
* s
->uvlinesize
+ uvsrc_x
;
857 srcU
= s
->next_picture
.f
.data
[1] + uvsrc_y
* s
->uvlinesize
+ uvsrc_x
;
858 srcV
= s
->next_picture
.f
.data
[2] + uvsrc_y
* s
->uvlinesize
+ uvsrc_x
;
862 if (chroma_ref_type
) {
863 srcU
+= s
->current_picture_ptr
->f
.linesize
[1];
864 srcV
+= s
->current_picture_ptr
->f
.linesize
[2];
866 off
= v
->cur_field_type
? s
->current_picture_ptr
->f
.linesize
[1] : 0;
869 if (v
->rangeredfrm
|| (v
->mv_mode
== MV_PMODE_INTENSITY_COMP
)
870 || s
->h_edge_pos
< 18 || v_edge_pos
< 18
871 || (unsigned)uvsrc_x
> (s
->h_edge_pos
>> 1) - 9
872 || (unsigned)uvsrc_y
> (v_edge_pos
>> 1) - 9) {
873 s
->vdsp
.emulated_edge_mc(s
->edge_emu_buffer
, srcU
, s
->uvlinesize
,
874 8 + 1, 8 + 1, uvsrc_x
, uvsrc_y
,
875 s
->h_edge_pos
>> 1, v_edge_pos
>> 1);
876 s
->vdsp
.emulated_edge_mc(s
->edge_emu_buffer
+ 16, srcV
, s
->uvlinesize
,
877 8 + 1, 8 + 1, uvsrc_x
, uvsrc_y
,
878 s
->h_edge_pos
>> 1, v_edge_pos
>> 1);
879 srcU
= s
->edge_emu_buffer
;
880 srcV
= s
->edge_emu_buffer
+ 16;
882 /* if we deal with range reduction we need to scale source blocks */
883 if (v
->rangeredfrm
) {
889 for (j
= 0; j
< 9; j
++) {
890 for (i
= 0; i
< 9; i
++) {
891 src
[i
] = ((src
[i
] - 128) >> 1) + 128;
892 src2
[i
] = ((src2
[i
] - 128) >> 1) + 128;
894 src
+= s
->uvlinesize
;
895 src2
+= s
->uvlinesize
;
898 /* if we deal with intensity compensation we need to scale source blocks */
899 if (v
->mv_mode
== MV_PMODE_INTENSITY_COMP
) {
905 for (j
= 0; j
< 9; j
++) {
906 for (i
= 0; i
< 9; i
++) {
907 src
[i
] = v
->lutuv
[src
[i
]];
908 src2
[i
] = v
->lutuv
[src2
[i
]];
910 src
+= s
->uvlinesize
;
911 src2
+= s
->uvlinesize
;
916 /* Chroma MC always uses qpel bilinear */
917 uvmx
= (uvmx
& 3) << 1;
918 uvmy
= (uvmy
& 3) << 1;
920 h264chroma
->put_h264_chroma_pixels_tab
[0](s
->dest
[1] + off
, srcU
, s
->uvlinesize
, 8, uvmx
, uvmy
);
921 h264chroma
->put_h264_chroma_pixels_tab
[0](s
->dest
[2] + off
, srcV
, s
->uvlinesize
, 8, uvmx
, uvmy
);
923 v
->vc1dsp
.put_no_rnd_vc1_chroma_pixels_tab
[0](s
->dest
[1] + off
, srcU
, s
->uvlinesize
, 8, uvmx
, uvmy
);
924 v
->vc1dsp
.put_no_rnd_vc1_chroma_pixels_tab
[0](s
->dest
[2] + off
, srcV
, s
->uvlinesize
, 8, uvmx
, uvmy
);
928 /** Do motion compensation for 4-MV field chroma macroblock (both U and V)
930 static void vc1_mc_4mv_chroma4(VC1Context
*v
)
932 MpegEncContext
*s
= &v
->s
;
933 H264ChromaContext
*h264chroma
= &v
->h264chroma
;
934 uint8_t *srcU
, *srcV
;
935 int uvsrc_x
, uvsrc_y
;
936 int uvmx_field
[4], uvmy_field
[4];
938 int fieldmv
= v
->blk_mv_type
[s
->block_index
[0]];
939 static const int s_rndtblfield
[16] = { 0, 0, 1, 2, 4, 4, 5, 6, 2, 2, 3, 8, 6, 6, 7, 12 };
940 int v_dist
= fieldmv
? 1 : 4; // vertical offset for lower sub-blocks
941 int v_edge_pos
= s
->v_edge_pos
>> 1;
943 if (!v
->s
.last_picture
.f
.data
[0])
945 if (s
->flags
& CODEC_FLAG_GRAY
)
948 for (i
= 0; i
< 4; i
++) {
950 uvmx_field
[i
] = (tx
+ ((tx
& 3) == 3)) >> 1;
953 uvmy_field
[i
] = (ty
>> 4) * 8 + s_rndtblfield
[ty
& 0xF];
955 uvmy_field
[i
] = (ty
+ ((ty
& 3) == 3)) >> 1;
958 for (i
= 0; i
< 4; i
++) {
959 off
= (i
& 1) * 4 + ((i
& 2) ? v_dist
* s
->uvlinesize
: 0);
960 uvsrc_x
= s
->mb_x
* 8 + (i
& 1) * 4 + (uvmx_field
[i
] >> 2);
961 uvsrc_y
= s
->mb_y
* 8 + ((i
& 2) ? v_dist
: 0) + (uvmy_field
[i
] >> 2);
962 // FIXME: implement proper pull-back (see vc1cropmv.c, vc1CROPMV_ChromaPullBack())
963 uvsrc_x
= av_clip(uvsrc_x
, -8, s
->avctx
->coded_width
>> 1);
964 uvsrc_y
= av_clip(uvsrc_y
, -8, s
->avctx
->coded_height
>> 1);
965 srcU
= s
->last_picture
.f
.data
[1] + uvsrc_y
* s
->uvlinesize
+ uvsrc_x
;
966 srcV
= s
->last_picture
.f
.data
[2] + uvsrc_y
* s
->uvlinesize
+ uvsrc_x
;
967 uvmx_field
[i
] = (uvmx_field
[i
] & 3) << 1;
968 uvmy_field
[i
] = (uvmy_field
[i
] & 3) << 1;
970 if (fieldmv
&& !(uvsrc_y
& 1))
972 if (fieldmv
&& (uvsrc_y
& 1) && uvsrc_y
< 2)
974 if ((v
->mv_mode
== MV_PMODE_INTENSITY_COMP
)
975 || s
->h_edge_pos
< 10 || v_edge_pos
< (5 << fieldmv
)
976 || (unsigned)uvsrc_x
> (s
->h_edge_pos
>> 1) - 5
977 || (unsigned)uvsrc_y
> v_edge_pos
- (5 << fieldmv
)) {
978 s
->vdsp
.emulated_edge_mc(s
->edge_emu_buffer
, srcU
, s
->uvlinesize
,
979 5, (5 << fieldmv
), uvsrc_x
, uvsrc_y
,
980 s
->h_edge_pos
>> 1, v_edge_pos
);
981 s
->vdsp
.emulated_edge_mc(s
->edge_emu_buffer
+ 16, srcV
, s
->uvlinesize
,
982 5, (5 << fieldmv
), uvsrc_x
, uvsrc_y
,
983 s
->h_edge_pos
>> 1, v_edge_pos
);
984 srcU
= s
->edge_emu_buffer
;
985 srcV
= s
->edge_emu_buffer
+ 16;
987 /* if we deal with intensity compensation we need to scale source blocks */
988 if (v
->mv_mode
== MV_PMODE_INTENSITY_COMP
) {
994 for (j
= 0; j
< 5; j
++) {
995 for (i
= 0; i
< 5; i
++) {
996 src
[i
] = v
->lutuv
[src
[i
]];
997 src2
[i
] = v
->lutuv
[src2
[i
]];
999 src
+= s
->uvlinesize
<< 1;
1000 src2
+= s
->uvlinesize
<< 1;
1005 h264chroma
->put_h264_chroma_pixels_tab
[1](s
->dest
[1] + off
, srcU
, s
->uvlinesize
<< fieldmv
, 4, uvmx_field
[i
], uvmy_field
[i
]);
1006 h264chroma
->put_h264_chroma_pixels_tab
[1](s
->dest
[2] + off
, srcV
, s
->uvlinesize
<< fieldmv
, 4, uvmx_field
[i
], uvmy_field
[i
]);
1008 v
->vc1dsp
.put_no_rnd_vc1_chroma_pixels_tab
[1](s
->dest
[1] + off
, srcU
, s
->uvlinesize
<< fieldmv
, 4, uvmx_field
[i
], uvmy_field
[i
]);
1009 v
->vc1dsp
.put_no_rnd_vc1_chroma_pixels_tab
[1](s
->dest
[2] + off
, srcV
, s
->uvlinesize
<< fieldmv
, 4, uvmx_field
[i
], uvmy_field
[i
]);
1014 /***********************************************************************/
1016 * @name VC-1 Block-level functions
1017 * @see 7.1.4, p91 and 8.1.1.7, p(1)04
1023 * @brief Get macroblock-level quantizer scale
1025 #define GET_MQUANT() \
1026 if (v->dquantfrm) { \
1028 if (v->dqprofile == DQPROFILE_ALL_MBS) { \
1029 if (v->dqbilevel) { \
1030 mquant = (get_bits1(gb)) ? v->altpq : v->pq; \
1032 mqdiff = get_bits(gb, 3); \
1034 mquant = v->pq + mqdiff; \
1036 mquant = get_bits(gb, 5); \
1039 if (v->dqprofile == DQPROFILE_SINGLE_EDGE) \
1040 edges = 1 << v->dqsbedge; \
1041 else if (v->dqprofile == DQPROFILE_DOUBLE_EDGES) \
1042 edges = (3 << v->dqsbedge) % 15; \
1043 else if (v->dqprofile == DQPROFILE_FOUR_EDGES) \
1045 if ((edges&1) && !s->mb_x) \
1046 mquant = v->altpq; \
1047 if ((edges&2) && s->first_slice_line) \
1048 mquant = v->altpq; \
1049 if ((edges&4) && s->mb_x == (s->mb_width - 1)) \
1050 mquant = v->altpq; \
1051 if ((edges&8) && s->mb_y == (s->mb_height - 1)) \
1052 mquant = v->altpq; \
1053 if (!mquant || mquant > 31) { \
1054 av_log(v->s.avctx, AV_LOG_ERROR, \
1055 "Overriding invalid mquant %d\n", mquant); \
1061 * @def GET_MVDATA(_dmv_x, _dmv_y)
1062 * @brief Get MV differentials
1063 * @see MVDATA decoding from 8.3.5.2, p(1)20
1064 * @param _dmv_x Horizontal differential for decoded MV
1065 * @param _dmv_y Vertical differential for decoded MV
1067 #define GET_MVDATA(_dmv_x, _dmv_y) \
1068 index = 1 + get_vlc2(gb, ff_vc1_mv_diff_vlc[s->mv_table_index].table, \
1069 VC1_MV_DIFF_VLC_BITS, 2); \
1071 mb_has_coeffs = 1; \
1074 mb_has_coeffs = 0; \
1077 _dmv_x = _dmv_y = 0; \
1078 } else if (index == 35) { \
1079 _dmv_x = get_bits(gb, v->k_x - 1 + s->quarter_sample); \
1080 _dmv_y = get_bits(gb, v->k_y - 1 + s->quarter_sample); \
1081 } else if (index == 36) { \
1086 index1 = index % 6; \
1087 if (!s->quarter_sample && index1 == 5) val = 1; \
1089 if (size_table[index1] - val > 0) \
1090 val = get_bits(gb, size_table[index1] - val); \
1092 sign = 0 - (val&1); \
1093 _dmv_x = (sign ^ ((val>>1) + offset_table[index1])) - sign; \
1095 index1 = index / 6; \
1096 if (!s->quarter_sample && index1 == 5) val = 1; \
1098 if (size_table[index1] - val > 0) \
1099 val = get_bits(gb, size_table[index1] - val); \
1101 sign = 0 - (val & 1); \
1102 _dmv_y = (sign ^ ((val >> 1) + offset_table[index1])) - sign; \
1105 static av_always_inline
void get_mvdata_interlaced(VC1Context
*v
, int *dmv_x
,
1106 int *dmv_y
, int *pred_flag
)
1109 int extend_x
= 0, extend_y
= 0;
1110 GetBitContext
*gb
= &v
->s
.gb
;
1113 const int* offs_tab
;
1116 bits
= VC1_2REF_MVDATA_VLC_BITS
;
1119 bits
= VC1_1REF_MVDATA_VLC_BITS
;
1122 switch (v
->dmvrange
) {
1130 extend_x
= extend_y
= 1;
1133 index
= get_vlc2(gb
, v
->imv_vlc
->table
, bits
, 3);
1135 *dmv_x
= get_bits(gb
, v
->k_x
);
1136 *dmv_y
= get_bits(gb
, v
->k_y
);
1139 *pred_flag
= *dmv_y
& 1;
1140 *dmv_y
= (*dmv_y
+ *pred_flag
) >> 1;
1142 *dmv_y
= (*dmv_y
+ (*dmv_y
& 1)) >> 1;
1148 offs_tab
= offset_table2
;
1150 offs_tab
= offset_table1
;
1151 index1
= (index
+ 1) % 9;
1153 val
= get_bits(gb
, index1
+ extend_x
);
1154 sign
= 0 -(val
& 1);
1155 *dmv_x
= (sign
^ ((val
>> 1) + offs_tab
[index1
])) - sign
;
1159 offs_tab
= offset_table2
;
1161 offs_tab
= offset_table1
;
1162 index1
= (index
+ 1) / 9;
1163 if (index1
> v
->numref
) {
1164 val
= get_bits(gb
, (index1
+ (extend_y
<< v
->numref
)) >> v
->numref
);
1165 sign
= 0 - (val
& 1);
1166 *dmv_y
= (sign
^ ((val
>> 1) + offs_tab
[index1
>> v
->numref
])) - sign
;
1169 if (v
->numref
&& pred_flag
)
1170 *pred_flag
= index1
& 1;
1174 static av_always_inline
int scaleforsame_x(VC1Context
*v
, int n
/* MV */, int dir
)
1176 int scaledvalue
, refdist
;
1177 int scalesame1
, scalesame2
;
1178 int scalezone1_x
, zone1offset_x
;
1179 int table_index
= dir
^ v
->second_field
;
1181 if (v
->s
.pict_type
!= AV_PICTURE_TYPE_B
)
1182 refdist
= v
->refdist
;
1184 refdist
= dir
? v
->brfd
: v
->frfd
;
1187 scalesame1
= ff_vc1_field_mvpred_scales
[table_index
][1][refdist
];
1188 scalesame2
= ff_vc1_field_mvpred_scales
[table_index
][2][refdist
];
1189 scalezone1_x
= ff_vc1_field_mvpred_scales
[table_index
][3][refdist
];
1190 zone1offset_x
= ff_vc1_field_mvpred_scales
[table_index
][5][refdist
];
1195 if (FFABS(n
) < scalezone1_x
)
1196 scaledvalue
= (n
* scalesame1
) >> 8;
1199 scaledvalue
= ((n
* scalesame2
) >> 8) - zone1offset_x
;
1201 scaledvalue
= ((n
* scalesame2
) >> 8) + zone1offset_x
;
1204 return av_clip(scaledvalue
, -v
->range_x
, v
->range_x
- 1);
1207 static av_always_inline
int scaleforsame_y(VC1Context
*v
, int i
, int n
/* MV */, int dir
)
1209 int scaledvalue
, refdist
;
1210 int scalesame1
, scalesame2
;
1211 int scalezone1_y
, zone1offset_y
;
1212 int table_index
= dir
^ v
->second_field
;
1214 if (v
->s
.pict_type
!= AV_PICTURE_TYPE_B
)
1215 refdist
= v
->refdist
;
1217 refdist
= dir
? v
->brfd
: v
->frfd
;
1220 scalesame1
= ff_vc1_field_mvpred_scales
[table_index
][1][refdist
];
1221 scalesame2
= ff_vc1_field_mvpred_scales
[table_index
][2][refdist
];
1222 scalezone1_y
= ff_vc1_field_mvpred_scales
[table_index
][4][refdist
];
1223 zone1offset_y
= ff_vc1_field_mvpred_scales
[table_index
][6][refdist
];
1228 if (FFABS(n
) < scalezone1_y
)
1229 scaledvalue
= (n
* scalesame1
) >> 8;
1232 scaledvalue
= ((n
* scalesame2
) >> 8) - zone1offset_y
;
1234 scaledvalue
= ((n
* scalesame2
) >> 8) + zone1offset_y
;
1238 if (v
->cur_field_type
&& !v
->ref_field_type
[dir
])
1239 return av_clip(scaledvalue
, -v
->range_y
/ 2 + 1, v
->range_y
/ 2);
1241 return av_clip(scaledvalue
, -v
->range_y
/ 2, v
->range_y
/ 2 - 1);
1244 static av_always_inline
int scaleforopp_x(VC1Context
*v
, int n
/* MV */)
1246 int scalezone1_x
, zone1offset_x
;
1247 int scaleopp1
, scaleopp2
, brfd
;
1250 brfd
= FFMIN(v
->brfd
, 3);
1251 scalezone1_x
= ff_vc1_b_field_mvpred_scales
[3][brfd
];
1252 zone1offset_x
= ff_vc1_b_field_mvpred_scales
[5][brfd
];
1253 scaleopp1
= ff_vc1_b_field_mvpred_scales
[1][brfd
];
1254 scaleopp2
= ff_vc1_b_field_mvpred_scales
[2][brfd
];
1259 if (FFABS(n
) < scalezone1_x
)
1260 scaledvalue
= (n
* scaleopp1
) >> 8;
1263 scaledvalue
= ((n
* scaleopp2
) >> 8) - zone1offset_x
;
1265 scaledvalue
= ((n
* scaleopp2
) >> 8) + zone1offset_x
;
1268 return av_clip(scaledvalue
, -v
->range_x
, v
->range_x
- 1);
1271 static av_always_inline
int scaleforopp_y(VC1Context
*v
, int n
/* MV */, int dir
)
1273 int scalezone1_y
, zone1offset_y
;
1274 int scaleopp1
, scaleopp2
, brfd
;
1277 brfd
= FFMIN(v
->brfd
, 3);
1278 scalezone1_y
= ff_vc1_b_field_mvpred_scales
[4][brfd
];
1279 zone1offset_y
= ff_vc1_b_field_mvpred_scales
[6][brfd
];
1280 scaleopp1
= ff_vc1_b_field_mvpred_scales
[1][brfd
];
1281 scaleopp2
= ff_vc1_b_field_mvpred_scales
[2][brfd
];
1286 if (FFABS(n
) < scalezone1_y
)
1287 scaledvalue
= (n
* scaleopp1
) >> 8;
1290 scaledvalue
= ((n
* scaleopp2
) >> 8) - zone1offset_y
;
1292 scaledvalue
= ((n
* scaleopp2
) >> 8) + zone1offset_y
;
1295 if (v
->cur_field_type
&& !v
->ref_field_type
[dir
]) {
1296 return av_clip(scaledvalue
, -v
->range_y
/ 2 + 1, v
->range_y
/ 2);
1298 return av_clip(scaledvalue
, -v
->range_y
/ 2, v
->range_y
/ 2 - 1);
1302 static av_always_inline
int scaleforsame(VC1Context
*v
, int i
, int n
/* MV */,
1305 int brfd
, scalesame
;
1306 int hpel
= 1 - v
->s
.quarter_sample
;
1309 if (v
->s
.pict_type
!= AV_PICTURE_TYPE_B
|| v
->second_field
|| !dir
) {
1311 n
= scaleforsame_y(v
, i
, n
, dir
) << hpel
;
1313 n
= scaleforsame_x(v
, n
, dir
) << hpel
;
1316 brfd
= FFMIN(v
->brfd
, 3);
1317 scalesame
= ff_vc1_b_field_mvpred_scales
[0][brfd
];
1319 n
= (n
* scalesame
>> 8) << hpel
;
1323 static av_always_inline
int scaleforopp(VC1Context
*v
, int n
/* MV */,
1326 int refdist
, scaleopp
;
1327 int hpel
= 1 - v
->s
.quarter_sample
;
1330 if (v
->s
.pict_type
== AV_PICTURE_TYPE_B
&& !v
->second_field
&& dir
== 1) {
1332 n
= scaleforopp_y(v
, n
, dir
) << hpel
;
1334 n
= scaleforopp_x(v
, n
) << hpel
;
1337 if (v
->s
.pict_type
!= AV_PICTURE_TYPE_B
)
1338 refdist
= FFMIN(v
->refdist
, 3);
1340 refdist
= dir
? v
->brfd
: v
->frfd
;
1341 scaleopp
= ff_vc1_field_mvpred_scales
[dir
^ v
->second_field
][0][refdist
];
1343 n
= (n
* scaleopp
>> 8) << hpel
;
1347 /** Predict and set motion vector
1349 static inline void vc1_pred_mv(VC1Context
*v
, int n
, int dmv_x
, int dmv_y
,
1350 int mv1
, int r_x
, int r_y
, uint8_t* is_intra
,
1351 int pred_flag
, int dir
)
1353 MpegEncContext
*s
= &v
->s
;
1354 int xy
, wrap
, off
= 0;
1358 int mixedmv_pic
, num_samefield
= 0, num_oppfield
= 0;
1359 int opposite
, a_f
, b_f
, c_f
;
1360 int16_t field_predA
[2];
1361 int16_t field_predB
[2];
1362 int16_t field_predC
[2];
1363 int a_valid
, b_valid
, c_valid
;
1364 int hybridmv_thresh
, y_bias
= 0;
1366 if (v
->mv_mode
== MV_PMODE_MIXED_MV
||
1367 ((v
->mv_mode
== MV_PMODE_INTENSITY_COMP
) && (v
->mv_mode2
== MV_PMODE_MIXED_MV
)))
1371 /* scale MV difference to be quad-pel */
1372 dmv_x
<<= 1 - s
->quarter_sample
;
1373 dmv_y
<<= 1 - s
->quarter_sample
;
1375 wrap
= s
->b8_stride
;
1376 xy
= s
->block_index
[n
];
1379 s
->mv
[0][n
][0] = s
->current_picture
.f
.motion_val
[0][xy
+ v
->blocks_off
][0] = 0;
1380 s
->mv
[0][n
][1] = s
->current_picture
.f
.motion_val
[0][xy
+ v
->blocks_off
][1] = 0;
1381 s
->current_picture
.f
.motion_val
[1][xy
+ v
->blocks_off
][0] = 0;
1382 s
->current_picture
.f
.motion_val
[1][xy
+ v
->blocks_off
][1] = 0;
1383 if (mv1
) { /* duplicate motion data for 1-MV block */
1384 s
->current_picture
.f
.motion_val
[0][xy
+ 1 + v
->blocks_off
][0] = 0;
1385 s
->current_picture
.f
.motion_val
[0][xy
+ 1 + v
->blocks_off
][1] = 0;
1386 s
->current_picture
.f
.motion_val
[0][xy
+ wrap
+ v
->blocks_off
][0] = 0;
1387 s
->current_picture
.f
.motion_val
[0][xy
+ wrap
+ v
->blocks_off
][1] = 0;
1388 s
->current_picture
.f
.motion_val
[0][xy
+ wrap
+ 1 + v
->blocks_off
][0] = 0;
1389 s
->current_picture
.f
.motion_val
[0][xy
+ wrap
+ 1 + v
->blocks_off
][1] = 0;
1390 v
->luma_mv
[s
->mb_x
][0] = v
->luma_mv
[s
->mb_x
][1] = 0;
1391 s
->current_picture
.f
.motion_val
[1][xy
+ 1 + v
->blocks_off
][0] = 0;
1392 s
->current_picture
.f
.motion_val
[1][xy
+ 1 + v
->blocks_off
][1] = 0;
1393 s
->current_picture
.f
.motion_val
[1][xy
+ wrap
][0] = 0;
1394 s
->current_picture
.f
.motion_val
[1][xy
+ wrap
+ v
->blocks_off
][1] = 0;
1395 s
->current_picture
.f
.motion_val
[1][xy
+ wrap
+ 1 + v
->blocks_off
][0] = 0;
1396 s
->current_picture
.f
.motion_val
[1][xy
+ wrap
+ 1 + v
->blocks_off
][1] = 0;
1401 C
= s
->current_picture
.f
.motion_val
[dir
][xy
- 1 + v
->blocks_off
];
1402 A
= s
->current_picture
.f
.motion_val
[dir
][xy
- wrap
+ v
->blocks_off
];
1404 if (v
->field_mode
&& mixedmv_pic
)
1405 off
= (s
->mb_x
== (s
->mb_width
- 1)) ? -2 : 2;
1407 off
= (s
->mb_x
== (s
->mb_width
- 1)) ? -1 : 2;
1409 //in 4-MV mode different blocks have different B predictor position
1412 off
= (s
->mb_x
> 0) ? -1 : 1;
1415 off
= (s
->mb_x
== (s
->mb_width
- 1)) ? -1 : 1;
1424 B
= s
->current_picture
.f
.motion_val
[dir
][xy
- wrap
+ off
+ v
->blocks_off
];
1426 a_valid
= !s
->first_slice_line
|| (n
== 2 || n
== 3);
1427 b_valid
= a_valid
&& (s
->mb_width
> 1);
1428 c_valid
= s
->mb_x
|| (n
== 1 || n
== 3);
1429 if (v
->field_mode
) {
1430 a_valid
= a_valid
&& !is_intra
[xy
- wrap
];
1431 b_valid
= b_valid
&& !is_intra
[xy
- wrap
+ off
];
1432 c_valid
= c_valid
&& !is_intra
[xy
- 1];
1436 a_f
= v
->mv_f
[dir
][xy
- wrap
+ v
->blocks_off
];
1437 num_oppfield
+= a_f
;
1438 num_samefield
+= 1 - a_f
;
1439 field_predA
[0] = A
[0];
1440 field_predA
[1] = A
[1];
1442 field_predA
[0] = field_predA
[1] = 0;
1446 b_f
= v
->mv_f
[dir
][xy
- wrap
+ off
+ v
->blocks_off
];
1447 num_oppfield
+= b_f
;
1448 num_samefield
+= 1 - b_f
;
1449 field_predB
[0] = B
[0];
1450 field_predB
[1] = B
[1];
1452 field_predB
[0] = field_predB
[1] = 0;
1456 c_f
= v
->mv_f
[dir
][xy
- 1 + v
->blocks_off
];
1457 num_oppfield
+= c_f
;
1458 num_samefield
+= 1 - c_f
;
1459 field_predC
[0] = C
[0];
1460 field_predC
[1] = C
[1];
1462 field_predC
[0] = field_predC
[1] = 0;
1466 if (v
->field_mode
) {
1468 // REFFIELD determines if the last field or the second-last field is
1469 // to be used as reference
1470 opposite
= 1 - v
->reffield
;
1472 if (num_samefield
<= num_oppfield
)
1473 opposite
= 1 - pred_flag
;
1475 opposite
= pred_flag
;
1480 if (a_valid
&& !a_f
) {
1481 field_predA
[0] = scaleforopp(v
, field_predA
[0], 0, dir
);
1482 field_predA
[1] = scaleforopp(v
, field_predA
[1], 1, dir
);
1484 if (b_valid
&& !b_f
) {
1485 field_predB
[0] = scaleforopp(v
, field_predB
[0], 0, dir
);
1486 field_predB
[1] = scaleforopp(v
, field_predB
[1], 1, dir
);
1488 if (c_valid
&& !c_f
) {
1489 field_predC
[0] = scaleforopp(v
, field_predC
[0], 0, dir
);
1490 field_predC
[1] = scaleforopp(v
, field_predC
[1], 1, dir
);
1492 v
->mv_f
[dir
][xy
+ v
->blocks_off
] = 1;
1493 v
->ref_field_type
[dir
] = !v
->cur_field_type
;
1495 if (a_valid
&& a_f
) {
1496 field_predA
[0] = scaleforsame(v
, n
, field_predA
[0], 0, dir
);
1497 field_predA
[1] = scaleforsame(v
, n
, field_predA
[1], 1, dir
);
1499 if (b_valid
&& b_f
) {
1500 field_predB
[0] = scaleforsame(v
, n
, field_predB
[0], 0, dir
);
1501 field_predB
[1] = scaleforsame(v
, n
, field_predB
[1], 1, dir
);
1503 if (c_valid
&& c_f
) {
1504 field_predC
[0] = scaleforsame(v
, n
, field_predC
[0], 0, dir
);
1505 field_predC
[1] = scaleforsame(v
, n
, field_predC
[1], 1, dir
);
1507 v
->mv_f
[dir
][xy
+ v
->blocks_off
] = 0;
1508 v
->ref_field_type
[dir
] = v
->cur_field_type
;
1512 px
= field_predA
[0];
1513 py
= field_predA
[1];
1514 } else if (c_valid
) {
1515 px
= field_predC
[0];
1516 py
= field_predC
[1];
1517 } else if (b_valid
) {
1518 px
= field_predB
[0];
1519 py
= field_predB
[1];
1525 if (num_samefield
+ num_oppfield
> 1) {
1526 px
= mid_pred(field_predA
[0], field_predB
[0], field_predC
[0]);
1527 py
= mid_pred(field_predA
[1], field_predB
[1], field_predC
[1]);
1530 /* Pullback MV as specified in 8.3.5.3.4 */
1531 if (!v
->field_mode
) {
1533 qx
= (s
->mb_x
<< 6) + ((n
== 1 || n
== 3) ? 32 : 0);
1534 qy
= (s
->mb_y
<< 6) + ((n
== 2 || n
== 3) ? 32 : 0);
1535 X
= (s
->mb_width
<< 6) - 4;
1536 Y
= (s
->mb_height
<< 6) - 4;
1538 if (qx
+ px
< -60) px
= -60 - qx
;
1539 if (qy
+ py
< -60) py
= -60 - qy
;
1541 if (qx
+ px
< -28) px
= -28 - qx
;
1542 if (qy
+ py
< -28) py
= -28 - qy
;
1544 if (qx
+ px
> X
) px
= X
- qx
;
1545 if (qy
+ py
> Y
) py
= Y
- qy
;
1548 if (!v
->field_mode
|| s
->pict_type
!= AV_PICTURE_TYPE_B
) {
1549 /* Calculate hybrid prediction as specified in 8.3.5.3.5 (also 10.3.5.4.3.5) */
1550 hybridmv_thresh
= 32;
1551 if (a_valid
&& c_valid
) {
1552 if (is_intra
[xy
- wrap
])
1553 sum
= FFABS(px
) + FFABS(py
);
1555 sum
= FFABS(px
- field_predA
[0]) + FFABS(py
- field_predA
[1]);
1556 if (sum
> hybridmv_thresh
) {
1557 if (get_bits1(&s
->gb
)) { // read HYBRIDPRED bit
1558 px
= field_predA
[0];
1559 py
= field_predA
[1];
1561 px
= field_predC
[0];
1562 py
= field_predC
[1];
1565 if (is_intra
[xy
- 1])
1566 sum
= FFABS(px
) + FFABS(py
);
1568 sum
= FFABS(px
- field_predC
[0]) + FFABS(py
- field_predC
[1]);
1569 if (sum
> hybridmv_thresh
) {
1570 if (get_bits1(&s
->gb
)) {
1571 px
= field_predA
[0];
1572 py
= field_predA
[1];
1574 px
= field_predC
[0];
1575 py
= field_predC
[1];
1582 if (v
->field_mode
&& v
->numref
)
1584 if (v
->field_mode
&& v
->cur_field_type
&& v
->ref_field_type
[dir
] == 0)
1586 /* store MV using signed modulus of MV range defined in 4.11 */
1587 s
->mv
[dir
][n
][0] = s
->current_picture
.f
.motion_val
[dir
][xy
+ v
->blocks_off
][0] = ((px
+ dmv_x
+ r_x
) & ((r_x
<< 1) - 1)) - r_x
;
1588 s
->mv
[dir
][n
][1] = s
->current_picture
.f
.motion_val
[dir
][xy
+ v
->blocks_off
][1] = ((py
+ dmv_y
+ r_y
- y_bias
) & ((r_y
<< 1) - 1)) - r_y
+ y_bias
;
1589 if (mv1
) { /* duplicate motion data for 1-MV block */
1590 s
->current_picture
.f
.motion_val
[dir
][xy
+ 1 + v
->blocks_off
][0] = s
->current_picture
.f
.motion_val
[dir
][xy
+ v
->blocks_off
][0];
1591 s
->current_picture
.f
.motion_val
[dir
][xy
+ 1 + v
->blocks_off
][1] = s
->current_picture
.f
.motion_val
[dir
][xy
+ v
->blocks_off
][1];
1592 s
->current_picture
.f
.motion_val
[dir
][xy
+ wrap
+ v
->blocks_off
][0] = s
->current_picture
.f
.motion_val
[dir
][xy
+ v
->blocks_off
][0];
1593 s
->current_picture
.f
.motion_val
[dir
][xy
+ wrap
+ v
->blocks_off
][1] = s
->current_picture
.f
.motion_val
[dir
][xy
+ v
->blocks_off
][1];
1594 s
->current_picture
.f
.motion_val
[dir
][xy
+ wrap
+ 1 + v
->blocks_off
][0] = s
->current_picture
.f
.motion_val
[dir
][xy
+ v
->blocks_off
][0];
1595 s
->current_picture
.f
.motion_val
[dir
][xy
+ wrap
+ 1 + v
->blocks_off
][1] = s
->current_picture
.f
.motion_val
[dir
][xy
+ v
->blocks_off
][1];
1596 v
->mv_f
[dir
][xy
+ 1 + v
->blocks_off
] = v
->mv_f
[dir
][xy
+ v
->blocks_off
];
1597 v
->mv_f
[dir
][xy
+ wrap
+ v
->blocks_off
] = v
->mv_f
[dir
][xy
+ wrap
+ 1 + v
->blocks_off
] = v
->mv_f
[dir
][xy
+ v
->blocks_off
];
1601 /** Predict and set motion vector for interlaced frame picture MBs
1603 static inline void vc1_pred_mv_intfr(VC1Context
*v
, int n
, int dmv_x
, int dmv_y
,
1604 int mvn
, int r_x
, int r_y
, uint8_t* is_intra
)
1606 MpegEncContext
*s
= &v
->s
;
1607 int xy
, wrap
, off
= 0;
1608 int A
[2], B
[2], C
[2];
1610 int a_valid
= 0, b_valid
= 0, c_valid
= 0;
1611 int field_a
, field_b
, field_c
; // 0: same, 1: opposit
1612 int total_valid
, num_samefield
, num_oppfield
;
1613 int pos_c
, pos_b
, n_adj
;
1615 wrap
= s
->b8_stride
;
1616 xy
= s
->block_index
[n
];
1619 s
->mv
[0][n
][0] = s
->current_picture
.f
.motion_val
[0][xy
][0] = 0;
1620 s
->mv
[0][n
][1] = s
->current_picture
.f
.motion_val
[0][xy
][1] = 0;
1621 s
->current_picture
.f
.motion_val
[1][xy
][0] = 0;
1622 s
->current_picture
.f
.motion_val
[1][xy
][1] = 0;
1623 if (mvn
== 1) { /* duplicate motion data for 1-MV block */
1624 s
->current_picture
.f
.motion_val
[0][xy
+ 1][0] = 0;
1625 s
->current_picture
.f
.motion_val
[0][xy
+ 1][1] = 0;
1626 s
->current_picture
.f
.motion_val
[0][xy
+ wrap
][0] = 0;
1627 s
->current_picture
.f
.motion_val
[0][xy
+ wrap
][1] = 0;
1628 s
->current_picture
.f
.motion_val
[0][xy
+ wrap
+ 1][0] = 0;
1629 s
->current_picture
.f
.motion_val
[0][xy
+ wrap
+ 1][1] = 0;
1630 v
->luma_mv
[s
->mb_x
][0] = v
->luma_mv
[s
->mb_x
][1] = 0;
1631 s
->current_picture
.f
.motion_val
[1][xy
+ 1][0] = 0;
1632 s
->current_picture
.f
.motion_val
[1][xy
+ 1][1] = 0;
1633 s
->current_picture
.f
.motion_val
[1][xy
+ wrap
][0] = 0;
1634 s
->current_picture
.f
.motion_val
[1][xy
+ wrap
][1] = 0;
1635 s
->current_picture
.f
.motion_val
[1][xy
+ wrap
+ 1][0] = 0;
1636 s
->current_picture
.f
.motion_val
[1][xy
+ wrap
+ 1][1] = 0;
1641 off
= ((n
== 0) || (n
== 1)) ? 1 : -1;
1643 if (s
->mb_x
|| (n
== 1) || (n
== 3)) {
1644 if ((v
->blk_mv_type
[xy
]) // current block (MB) has a field MV
1645 || (!v
->blk_mv_type
[xy
] && !v
->blk_mv_type
[xy
- 1])) { // or both have frame MV
1646 A
[0] = s
->current_picture
.f
.motion_val
[0][xy
- 1][0];
1647 A
[1] = s
->current_picture
.f
.motion_val
[0][xy
- 1][1];
1649 } else { // current block has frame mv and cand. has field MV (so average)
1650 A
[0] = (s
->current_picture
.f
.motion_val
[0][xy
- 1][0]
1651 + s
->current_picture
.f
.motion_val
[0][xy
- 1 + off
* wrap
][0] + 1) >> 1;
1652 A
[1] = (s
->current_picture
.f
.motion_val
[0][xy
- 1][1]
1653 + s
->current_picture
.f
.motion_val
[0][xy
- 1 + off
* wrap
][1] + 1) >> 1;
1656 if (!(n
& 1) && v
->is_intra
[s
->mb_x
- 1]) {
1662 /* Predict B and C */
1663 B
[0] = B
[1] = C
[0] = C
[1] = 0;
1664 if (n
== 0 || n
== 1 || v
->blk_mv_type
[xy
]) {
1665 if (!s
->first_slice_line
) {
1666 if (!v
->is_intra
[s
->mb_x
- s
->mb_stride
]) {
1669 pos_b
= s
->block_index
[n_adj
] - 2 * wrap
;
1670 if (v
->blk_mv_type
[pos_b
] && v
->blk_mv_type
[xy
]) {
1671 n_adj
= (n
& 2) | (n
& 1);
1673 B
[0] = s
->current_picture
.f
.motion_val
[0][s
->block_index
[n_adj
] - 2 * wrap
][0];
1674 B
[1] = s
->current_picture
.f
.motion_val
[0][s
->block_index
[n_adj
] - 2 * wrap
][1];
1675 if (v
->blk_mv_type
[pos_b
] && !v
->blk_mv_type
[xy
]) {
1676 B
[0] = (B
[0] + s
->current_picture
.f
.motion_val
[0][s
->block_index
[n_adj
^ 2] - 2 * wrap
][0] + 1) >> 1;
1677 B
[1] = (B
[1] + s
->current_picture
.f
.motion_val
[0][s
->block_index
[n_adj
^ 2] - 2 * wrap
][1] + 1) >> 1;
1680 if (s
->mb_width
> 1) {
1681 if (!v
->is_intra
[s
->mb_x
- s
->mb_stride
+ 1]) {
1684 pos_c
= s
->block_index
[2] - 2 * wrap
+ 2;
1685 if (v
->blk_mv_type
[pos_c
] && v
->blk_mv_type
[xy
]) {
1688 C
[0] = s
->current_picture
.f
.motion_val
[0][s
->block_index
[n_adj
] - 2 * wrap
+ 2][0];
1689 C
[1] = s
->current_picture
.f
.motion_val
[0][s
->block_index
[n_adj
] - 2 * wrap
+ 2][1];
1690 if (v
->blk_mv_type
[pos_c
] && !v
->blk_mv_type
[xy
]) {
1691 C
[0] = (1 + C
[0] + (s
->current_picture
.f
.motion_val
[0][s
->block_index
[n_adj
^ 2] - 2 * wrap
+ 2][0])) >> 1;
1692 C
[1] = (1 + C
[1] + (s
->current_picture
.f
.motion_val
[0][s
->block_index
[n_adj
^ 2] - 2 * wrap
+ 2][1])) >> 1;
1694 if (s
->mb_x
== s
->mb_width
- 1) {
1695 if (!v
->is_intra
[s
->mb_x
- s
->mb_stride
- 1]) {
1698 pos_c
= s
->block_index
[3] - 2 * wrap
- 2;
1699 if (v
->blk_mv_type
[pos_c
] && v
->blk_mv_type
[xy
]) {
1702 C
[0] = s
->current_picture
.f
.motion_val
[0][s
->block_index
[n_adj
] - 2 * wrap
- 2][0];
1703 C
[1] = s
->current_picture
.f
.motion_val
[0][s
->block_index
[n_adj
] - 2 * wrap
- 2][1];
1704 if (v
->blk_mv_type
[pos_c
] && !v
->blk_mv_type
[xy
]) {
1705 C
[0] = (1 + C
[0] + s
->current_picture
.f
.motion_val
[0][s
->block_index
[1] - 2 * wrap
- 2][0]) >> 1;
1706 C
[1] = (1 + C
[1] + s
->current_picture
.f
.motion_val
[0][s
->block_index
[1] - 2 * wrap
- 2][1]) >> 1;
1715 pos_b
= s
->block_index
[1];
1717 B
[0] = s
->current_picture
.f
.motion_val
[0][pos_b
][0];
1718 B
[1] = s
->current_picture
.f
.motion_val
[0][pos_b
][1];
1719 pos_c
= s
->block_index
[0];
1721 C
[0] = s
->current_picture
.f
.motion_val
[0][pos_c
][0];
1722 C
[1] = s
->current_picture
.f
.motion_val
[0][pos_c
][1];
1725 total_valid
= a_valid
+ b_valid
+ c_valid
;
1726 // check if predictor A is out of bounds
1727 if (!s
->mb_x
&& !(n
== 1 || n
== 3)) {
1730 // check if predictor B is out of bounds
1731 if ((s
->first_slice_line
&& v
->blk_mv_type
[xy
]) || (s
->first_slice_line
&& !(n
& 2))) {
1732 B
[0] = B
[1] = C
[0] = C
[1] = 0;
1734 if (!v
->blk_mv_type
[xy
]) {
1735 if (s
->mb_width
== 1) {
1739 if (total_valid
>= 2) {
1740 px
= mid_pred(A
[0], B
[0], C
[0]);
1741 py
= mid_pred(A
[1], B
[1], C
[1]);
1742 } else if (total_valid
) {
1743 if (a_valid
) { px
= A
[0]; py
= A
[1]; }
1744 if (b_valid
) { px
= B
[0]; py
= B
[1]; }
1745 if (c_valid
) { px
= C
[0]; py
= C
[1]; }
1751 field_a
= (A
[1] & 4) ? 1 : 0;
1755 field_b
= (B
[1] & 4) ? 1 : 0;
1759 field_c
= (C
[1] & 4) ? 1 : 0;
1763 num_oppfield
= field_a
+ field_b
+ field_c
;
1764 num_samefield
= total_valid
- num_oppfield
;
1765 if (total_valid
== 3) {
1766 if ((num_samefield
== 3) || (num_oppfield
== 3)) {
1767 px
= mid_pred(A
[0], B
[0], C
[0]);
1768 py
= mid_pred(A
[1], B
[1], C
[1]);
1769 } else if (num_samefield
>= num_oppfield
) {
1770 /* take one MV from same field set depending on priority
1771 the check for B may not be necessary */
1772 px
= !field_a
? A
[0] : B
[0];
1773 py
= !field_a
? A
[1] : B
[1];
1775 px
= field_a
? A
[0] : B
[0];
1776 py
= field_a
? A
[1] : B
[1];
1778 } else if (total_valid
== 2) {
1779 if (num_samefield
>= num_oppfield
) {
1780 if (!field_a
&& a_valid
) {
1783 } else if (!field_b
&& b_valid
) {
1786 } else if (c_valid
) {
1791 if (field_a
&& a_valid
) {
1794 } else if (field_b
&& b_valid
) {
1797 } else if (c_valid
) {
1802 } else if (total_valid
== 1) {
1803 px
= (a_valid
) ? A
[0] : ((b_valid
) ? B
[0] : C
[0]);
1804 py
= (a_valid
) ? A
[1] : ((b_valid
) ? B
[1] : C
[1]);
1809 /* store MV using signed modulus of MV range defined in 4.11 */
1810 s
->mv
[0][n
][0] = s
->current_picture
.f
.motion_val
[0][xy
][0] = ((px
+ dmv_x
+ r_x
) & ((r_x
<< 1) - 1)) - r_x
;
1811 s
->mv
[0][n
][1] = s
->current_picture
.f
.motion_val
[0][xy
][1] = ((py
+ dmv_y
+ r_y
) & ((r_y
<< 1) - 1)) - r_y
;
1812 if (mvn
== 1) { /* duplicate motion data for 1-MV block */
1813 s
->current_picture
.f
.motion_val
[0][xy
+ 1 ][0] = s
->current_picture
.f
.motion_val
[0][xy
][0];
1814 s
->current_picture
.f
.motion_val
[0][xy
+ 1 ][1] = s
->current_picture
.f
.motion_val
[0][xy
][1];
1815 s
->current_picture
.f
.motion_val
[0][xy
+ wrap
][0] = s
->current_picture
.f
.motion_val
[0][xy
][0];
1816 s
->current_picture
.f
.motion_val
[0][xy
+ wrap
][1] = s
->current_picture
.f
.motion_val
[0][xy
][1];
1817 s
->current_picture
.f
.motion_val
[0][xy
+ wrap
+ 1][0] = s
->current_picture
.f
.motion_val
[0][xy
][0];
1818 s
->current_picture
.f
.motion_val
[0][xy
+ wrap
+ 1][1] = s
->current_picture
.f
.motion_val
[0][xy
][1];
1819 } else if (mvn
== 2) { /* duplicate motion data for 2-Field MV block */
1820 s
->current_picture
.f
.motion_val
[0][xy
+ 1][0] = s
->current_picture
.f
.motion_val
[0][xy
][0];
1821 s
->current_picture
.f
.motion_val
[0][xy
+ 1][1] = s
->current_picture
.f
.motion_val
[0][xy
][1];
1822 s
->mv
[0][n
+ 1][0] = s
->mv
[0][n
][0];
1823 s
->mv
[0][n
+ 1][1] = s
->mv
[0][n
][1];
1827 /** Motion compensation for direct or interpolated blocks in B-frames
1829 static void vc1_interp_mc(VC1Context
*v
)
1831 MpegEncContext
*s
= &v
->s
;
1832 DSPContext
*dsp
= &v
->s
.dsp
;
1833 H264ChromaContext
*h264chroma
= &v
->h264chroma
;
1834 uint8_t *srcY
, *srcU
, *srcV
;
1835 int dxy
, mx
, my
, uvmx
, uvmy
, src_x
, src_y
, uvsrc_x
, uvsrc_y
;
1837 int v_edge_pos
= s
->v_edge_pos
>> v
->field_mode
;
1839 if (!v
->field_mode
&& !v
->s
.next_picture
.f
.data
[0])
1842 mx
= s
->mv
[1][0][0];
1843 my
= s
->mv
[1][0][1];
1844 uvmx
= (mx
+ ((mx
& 3) == 3)) >> 1;
1845 uvmy
= (my
+ ((my
& 3) == 3)) >> 1;
1846 if (v
->field_mode
) {
1847 if (v
->cur_field_type
!= v
->ref_field_type
[1])
1848 my
= my
- 2 + 4 * v
->cur_field_type
;
1849 uvmy
= uvmy
- 2 + 4 * v
->cur_field_type
;
1852 uvmx
= uvmx
+ ((uvmx
< 0) ? -(uvmx
& 1) : (uvmx
& 1));
1853 uvmy
= uvmy
+ ((uvmy
< 0) ? -(uvmy
& 1) : (uvmy
& 1));
1855 srcY
= s
->next_picture
.f
.data
[0];
1856 srcU
= s
->next_picture
.f
.data
[1];
1857 srcV
= s
->next_picture
.f
.data
[2];
1859 src_x
= s
->mb_x
* 16 + (mx
>> 2);
1860 src_y
= s
->mb_y
* 16 + (my
>> 2);
1861 uvsrc_x
= s
->mb_x
* 8 + (uvmx
>> 2);
1862 uvsrc_y
= s
->mb_y
* 8 + (uvmy
>> 2);
1864 if (v
->profile
!= PROFILE_ADVANCED
) {
1865 src_x
= av_clip( src_x
, -16, s
->mb_width
* 16);
1866 src_y
= av_clip( src_y
, -16, s
->mb_height
* 16);
1867 uvsrc_x
= av_clip(uvsrc_x
, -8, s
->mb_width
* 8);
1868 uvsrc_y
= av_clip(uvsrc_y
, -8, s
->mb_height
* 8);
1870 src_x
= av_clip( src_x
, -17, s
->avctx
->coded_width
);
1871 src_y
= av_clip( src_y
, -18, s
->avctx
->coded_height
+ 1);
1872 uvsrc_x
= av_clip(uvsrc_x
, -8, s
->avctx
->coded_width
>> 1);
1873 uvsrc_y
= av_clip(uvsrc_y
, -8, s
->avctx
->coded_height
>> 1);
1876 srcY
+= src_y
* s
->linesize
+ src_x
;
1877 srcU
+= uvsrc_y
* s
->uvlinesize
+ uvsrc_x
;
1878 srcV
+= uvsrc_y
* s
->uvlinesize
+ uvsrc_x
;
1880 if (v
->field_mode
&& v
->ref_field_type
[1]) {
1881 srcY
+= s
->current_picture_ptr
->f
.linesize
[0];
1882 srcU
+= s
->current_picture_ptr
->f
.linesize
[1];
1883 srcV
+= s
->current_picture_ptr
->f
.linesize
[2];
1886 /* for grayscale we should not try to read from unknown area */
1887 if (s
->flags
& CODEC_FLAG_GRAY
) {
1888 srcU
= s
->edge_emu_buffer
+ 18 * s
->linesize
;
1889 srcV
= s
->edge_emu_buffer
+ 18 * s
->linesize
;
1892 if (v
->rangeredfrm
|| s
->h_edge_pos
< 22 || v_edge_pos
< 22
1893 || (unsigned)(src_x
- 1) > s
->h_edge_pos
- (mx
& 3) - 16 - 3
1894 || (unsigned)(src_y
- 1) > v_edge_pos
- (my
& 3) - 16 - 3) {
1895 uint8_t *uvbuf
= s
->edge_emu_buffer
+ 19 * s
->linesize
;
1897 srcY
-= s
->mspel
* (1 + s
->linesize
);
1898 s
->vdsp
.emulated_edge_mc(s
->edge_emu_buffer
, srcY
, s
->linesize
,
1899 17 + s
->mspel
* 2, 17 + s
->mspel
* 2,
1900 src_x
- s
->mspel
, src_y
- s
->mspel
,
1901 s
->h_edge_pos
, v_edge_pos
);
1902 srcY
= s
->edge_emu_buffer
;
1903 s
->vdsp
.emulated_edge_mc(uvbuf
, srcU
, s
->uvlinesize
, 8 + 1, 8 + 1,
1904 uvsrc_x
, uvsrc_y
, s
->h_edge_pos
>> 1, v_edge_pos
>> 1);
1905 s
->vdsp
.emulated_edge_mc(uvbuf
+ 16, srcV
, s
->uvlinesize
, 8 + 1, 8 + 1,
1906 uvsrc_x
, uvsrc_y
, s
->h_edge_pos
>> 1, v_edge_pos
>> 1);
1909 /* if we deal with range reduction we need to scale source blocks */
1910 if (v
->rangeredfrm
) {
1912 uint8_t *src
, *src2
;
1915 for (j
= 0; j
< 17 + s
->mspel
* 2; j
++) {
1916 for (i
= 0; i
< 17 + s
->mspel
* 2; i
++)
1917 src
[i
] = ((src
[i
] - 128) >> 1) + 128;
1922 for (j
= 0; j
< 9; j
++) {
1923 for (i
= 0; i
< 9; i
++) {
1924 src
[i
] = ((src
[i
] - 128) >> 1) + 128;
1925 src2
[i
] = ((src2
[i
] - 128) >> 1) + 128;
1927 src
+= s
->uvlinesize
;
1928 src2
+= s
->uvlinesize
;
1931 srcY
+= s
->mspel
* (1 + s
->linesize
);
1934 if (v
->field_mode
&& v
->cur_field_type
) {
1935 off
= s
->current_picture_ptr
->f
.linesize
[0];
1936 off_uv
= s
->current_picture_ptr
->f
.linesize
[1];
1943 dxy
= ((my
& 3) << 2) | (mx
& 3);
1944 v
->vc1dsp
.avg_vc1_mspel_pixels_tab
[dxy
](s
->dest
[0] + off
, srcY
, s
->linesize
, v
->rnd
);
1945 v
->vc1dsp
.avg_vc1_mspel_pixels_tab
[dxy
](s
->dest
[0] + off
+ 8, srcY
+ 8, s
->linesize
, v
->rnd
);
1946 srcY
+= s
->linesize
* 8;
1947 v
->vc1dsp
.avg_vc1_mspel_pixels_tab
[dxy
](s
->dest
[0] + off
+ 8 * s
->linesize
, srcY
, s
->linesize
, v
->rnd
);
1948 v
->vc1dsp
.avg_vc1_mspel_pixels_tab
[dxy
](s
->dest
[0] + off
+ 8 * s
->linesize
+ 8, srcY
+ 8, s
->linesize
, v
->rnd
);
1950 dxy
= (my
& 2) | ((mx
& 2) >> 1);
1953 dsp
->avg_pixels_tab
[0][dxy
](s
->dest
[0] + off
, srcY
, s
->linesize
, 16);
1955 dsp
->avg_no_rnd_pixels_tab
[dxy
](s
->dest
[0] + off
, srcY
, s
->linesize
, 16);
1958 if (s
->flags
& CODEC_FLAG_GRAY
) return;
1959 /* Chroma MC always uses qpel blilinear */
1960 uvmx
= (uvmx
& 3) << 1;
1961 uvmy
= (uvmy
& 3) << 1;
1963 h264chroma
->avg_h264_chroma_pixels_tab
[0](s
->dest
[1] + off_uv
, srcU
, s
->uvlinesize
, 8, uvmx
, uvmy
);
1964 h264chroma
->avg_h264_chroma_pixels_tab
[0](s
->dest
[2] + off_uv
, srcV
, s
->uvlinesize
, 8, uvmx
, uvmy
);
1966 v
->vc1dsp
.avg_no_rnd_vc1_chroma_pixels_tab
[0](s
->dest
[1] + off_uv
, srcU
, s
->uvlinesize
, 8, uvmx
, uvmy
);
1967 v
->vc1dsp
.avg_no_rnd_vc1_chroma_pixels_tab
[0](s
->dest
[2] + off_uv
, srcV
, s
->uvlinesize
, 8, uvmx
, uvmy
);
1971 static av_always_inline
int scale_mv(int value
, int bfrac
, int inv
, int qs
)
1975 #if B_FRACTION_DEN==256
1979 return 2 * ((value
* n
+ 255) >> 9);
1980 return (value
* n
+ 128) >> 8;
1983 n
-= B_FRACTION_DEN
;
1985 return 2 * ((value
* n
+ B_FRACTION_DEN
- 1) / (2 * B_FRACTION_DEN
));
1986 return (value
* n
+ B_FRACTION_DEN
/2) / B_FRACTION_DEN
;
1990 /** Reconstruct motion vector for B-frame and do motion compensation
1992 static inline void vc1_b_mc(VC1Context
*v
, int dmv_x
[2], int dmv_y
[2],
1993 int direct
, int mode
)
1996 v
->mv_mode2
= v
->mv_mode
;
1997 v
->mv_mode
= MV_PMODE_INTENSITY_COMP
;
2003 v
->mv_mode
= v
->mv_mode2
;
2006 if (mode
== BMV_TYPE_INTERPOLATED
) {
2010 v
->mv_mode
= v
->mv_mode2
;
2014 if (v
->use_ic
&& (mode
== BMV_TYPE_BACKWARD
))
2015 v
->mv_mode
= v
->mv_mode2
;
2016 vc1_mc_1mv(v
, (mode
== BMV_TYPE_BACKWARD
));
2018 v
->mv_mode
= v
->mv_mode2
;
2021 static inline void vc1_pred_b_mv(VC1Context
*v
, int dmv_x
[2], int dmv_y
[2],
2022 int direct
, int mvtype
)
2024 MpegEncContext
*s
= &v
->s
;
2025 int xy
, wrap
, off
= 0;
2030 const uint8_t *is_intra
= v
->mb_type
[0];
2034 /* scale MV difference to be quad-pel */
2035 dmv_x
[0] <<= 1 - s
->quarter_sample
;
2036 dmv_y
[0] <<= 1 - s
->quarter_sample
;
2037 dmv_x
[1] <<= 1 - s
->quarter_sample
;
2038 dmv_y
[1] <<= 1 - s
->quarter_sample
;
2040 wrap
= s
->b8_stride
;
2041 xy
= s
->block_index
[0];
2044 s
->current_picture
.f
.motion_val
[0][xy
+ v
->blocks_off
][0] =
2045 s
->current_picture
.f
.motion_val
[0][xy
+ v
->blocks_off
][1] =
2046 s
->current_picture
.f
.motion_val
[1][xy
+ v
->blocks_off
][0] =
2047 s
->current_picture
.f
.motion_val
[1][xy
+ v
->blocks_off
][1] = 0;
2050 if (!v
->field_mode
) {
2051 s
->mv
[0][0][0] = scale_mv(s
->next_picture
.f
.motion_val
[1][xy
][0], v
->bfraction
, 0, s
->quarter_sample
);
2052 s
->mv
[0][0][1] = scale_mv(s
->next_picture
.f
.motion_val
[1][xy
][1], v
->bfraction
, 0, s
->quarter_sample
);
2053 s
->mv
[1][0][0] = scale_mv(s
->next_picture
.f
.motion_val
[1][xy
][0], v
->bfraction
, 1, s
->quarter_sample
);
2054 s
->mv
[1][0][1] = scale_mv(s
->next_picture
.f
.motion_val
[1][xy
][1], v
->bfraction
, 1, s
->quarter_sample
);
2056 /* Pullback predicted motion vectors as specified in 8.4.5.4 */
2057 s
->mv
[0][0][0] = av_clip(s
->mv
[0][0][0], -60 - (s
->mb_x
<< 6), (s
->mb_width
<< 6) - 4 - (s
->mb_x
<< 6));
2058 s
->mv
[0][0][1] = av_clip(s
->mv
[0][0][1], -60 - (s
->mb_y
<< 6), (s
->mb_height
<< 6) - 4 - (s
->mb_y
<< 6));
2059 s
->mv
[1][0][0] = av_clip(s
->mv
[1][0][0], -60 - (s
->mb_x
<< 6), (s
->mb_width
<< 6) - 4 - (s
->mb_x
<< 6));
2060 s
->mv
[1][0][1] = av_clip(s
->mv
[1][0][1], -60 - (s
->mb_y
<< 6), (s
->mb_height
<< 6) - 4 - (s
->mb_y
<< 6));
2063 s
->current_picture
.f
.motion_val
[0][xy
+ v
->blocks_off
][0] = s
->mv
[0][0][0];
2064 s
->current_picture
.f
.motion_val
[0][xy
+ v
->blocks_off
][1] = s
->mv
[0][0][1];
2065 s
->current_picture
.f
.motion_val
[1][xy
+ v
->blocks_off
][0] = s
->mv
[1][0][0];
2066 s
->current_picture
.f
.motion_val
[1][xy
+ v
->blocks_off
][1] = s
->mv
[1][0][1];
2070 if ((mvtype
== BMV_TYPE_FORWARD
) || (mvtype
== BMV_TYPE_INTERPOLATED
)) {
2071 C
= s
->current_picture
.f
.motion_val
[0][xy
- 2];
2072 A
= s
->current_picture
.f
.motion_val
[0][xy
- wrap
* 2];
2073 off
= (s
->mb_x
== (s
->mb_width
- 1)) ? -2 : 2;
2074 B
= s
->current_picture
.f
.motion_val
[0][xy
- wrap
* 2 + off
];
2076 if (!s
->mb_x
) C
[0] = C
[1] = 0;
2077 if (!s
->first_slice_line
) { // predictor A is not out of bounds
2078 if (s
->mb_width
== 1) {
2082 px
= mid_pred(A
[0], B
[0], C
[0]);
2083 py
= mid_pred(A
[1], B
[1], C
[1]);
2085 } else if (s
->mb_x
) { // predictor C is not out of bounds
2091 /* Pullback MV as specified in 8.3.5.3.4 */
2094 if (v
->profile
< PROFILE_ADVANCED
) {
2095 qx
= (s
->mb_x
<< 5);
2096 qy
= (s
->mb_y
<< 5);
2097 X
= (s
->mb_width
<< 5) - 4;
2098 Y
= (s
->mb_height
<< 5) - 4;
2099 if (qx
+ px
< -28) px
= -28 - qx
;
2100 if (qy
+ py
< -28) py
= -28 - qy
;
2101 if (qx
+ px
> X
) px
= X
- qx
;
2102 if (qy
+ py
> Y
) py
= Y
- qy
;
2104 qx
= (s
->mb_x
<< 6);
2105 qy
= (s
->mb_y
<< 6);
2106 X
= (s
->mb_width
<< 6) - 4;
2107 Y
= (s
->mb_height
<< 6) - 4;
2108 if (qx
+ px
< -60) px
= -60 - qx
;
2109 if (qy
+ py
< -60) py
= -60 - qy
;
2110 if (qx
+ px
> X
) px
= X
- qx
;
2111 if (qy
+ py
> Y
) py
= Y
- qy
;
2114 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2115 if (0 && !s
->first_slice_line
&& s
->mb_x
) {
2116 if (is_intra
[xy
- wrap
])
2117 sum
= FFABS(px
) + FFABS(py
);
2119 sum
= FFABS(px
- A
[0]) + FFABS(py
- A
[1]);
2121 if (get_bits1(&s
->gb
)) {
2129 if (is_intra
[xy
- 2])
2130 sum
= FFABS(px
) + FFABS(py
);
2132 sum
= FFABS(px
- C
[0]) + FFABS(py
- C
[1]);
2134 if (get_bits1(&s
->gb
)) {
2144 /* store MV using signed modulus of MV range defined in 4.11 */
2145 s
->mv
[0][0][0] = ((px
+ dmv_x
[0] + r_x
) & ((r_x
<< 1) - 1)) - r_x
;
2146 s
->mv
[0][0][1] = ((py
+ dmv_y
[0] + r_y
) & ((r_y
<< 1) - 1)) - r_y
;
2148 if ((mvtype
== BMV_TYPE_BACKWARD
) || (mvtype
== BMV_TYPE_INTERPOLATED
)) {
2149 C
= s
->current_picture
.f
.motion_val
[1][xy
- 2];
2150 A
= s
->current_picture
.f
.motion_val
[1][xy
- wrap
* 2];
2151 off
= (s
->mb_x
== (s
->mb_width
- 1)) ? -2 : 2;
2152 B
= s
->current_picture
.f
.motion_val
[1][xy
- wrap
* 2 + off
];
2156 if (!s
->first_slice_line
) { // predictor A is not out of bounds
2157 if (s
->mb_width
== 1) {
2161 px
= mid_pred(A
[0], B
[0], C
[0]);
2162 py
= mid_pred(A
[1], B
[1], C
[1]);
2164 } else if (s
->mb_x
) { // predictor C is not out of bounds
2170 /* Pullback MV as specified in 8.3.5.3.4 */
2173 if (v
->profile
< PROFILE_ADVANCED
) {
2174 qx
= (s
->mb_x
<< 5);
2175 qy
= (s
->mb_y
<< 5);
2176 X
= (s
->mb_width
<< 5) - 4;
2177 Y
= (s
->mb_height
<< 5) - 4;
2178 if (qx
+ px
< -28) px
= -28 - qx
;
2179 if (qy
+ py
< -28) py
= -28 - qy
;
2180 if (qx
+ px
> X
) px
= X
- qx
;
2181 if (qy
+ py
> Y
) py
= Y
- qy
;
2183 qx
= (s
->mb_x
<< 6);
2184 qy
= (s
->mb_y
<< 6);
2185 X
= (s
->mb_width
<< 6) - 4;
2186 Y
= (s
->mb_height
<< 6) - 4;
2187 if (qx
+ px
< -60) px
= -60 - qx
;
2188 if (qy
+ py
< -60) py
= -60 - qy
;
2189 if (qx
+ px
> X
) px
= X
- qx
;
2190 if (qy
+ py
> Y
) py
= Y
- qy
;
2193 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2194 if (0 && !s
->first_slice_line
&& s
->mb_x
) {
2195 if (is_intra
[xy
- wrap
])
2196 sum
= FFABS(px
) + FFABS(py
);
2198 sum
= FFABS(px
- A
[0]) + FFABS(py
- A
[1]);
2200 if (get_bits1(&s
->gb
)) {
2208 if (is_intra
[xy
- 2])
2209 sum
= FFABS(px
) + FFABS(py
);
2211 sum
= FFABS(px
- C
[0]) + FFABS(py
- C
[1]);
2213 if (get_bits1(&s
->gb
)) {
2223 /* store MV using signed modulus of MV range defined in 4.11 */
2225 s
->mv
[1][0][0] = ((px
+ dmv_x
[1] + r_x
) & ((r_x
<< 1) - 1)) - r_x
;
2226 s
->mv
[1][0][1] = ((py
+ dmv_y
[1] + r_y
) & ((r_y
<< 1) - 1)) - r_y
;
2228 s
->current_picture
.f
.motion_val
[0][xy
][0] = s
->mv
[0][0][0];
2229 s
->current_picture
.f
.motion_val
[0][xy
][1] = s
->mv
[0][0][1];
2230 s
->current_picture
.f
.motion_val
[1][xy
][0] = s
->mv
[1][0][0];
2231 s
->current_picture
.f
.motion_val
[1][xy
][1] = s
->mv
[1][0][1];
2234 static inline void vc1_pred_b_mv_intfi(VC1Context
*v
, int n
, int *dmv_x
, int *dmv_y
, int mv1
, int *pred_flag
)
2236 int dir
= (v
->bmvtype
== BMV_TYPE_BACKWARD
) ? 1 : 0;
2237 MpegEncContext
*s
= &v
->s
;
2238 int mb_pos
= s
->mb_x
+ s
->mb_y
* s
->mb_stride
;
2240 if (v
->bmvtype
== BMV_TYPE_DIRECT
) {
2241 int total_opp
, k
, f
;
2242 if (s
->next_picture
.f
.mb_type
[mb_pos
+ v
->mb_off
] != MB_TYPE_INTRA
) {
2243 s
->mv
[0][0][0] = scale_mv(s
->next_picture
.f
.motion_val
[1][s
->block_index
[0] + v
->blocks_off
][0],
2244 v
->bfraction
, 0, s
->quarter_sample
);
2245 s
->mv
[0][0][1] = scale_mv(s
->next_picture
.f
.motion_val
[1][s
->block_index
[0] + v
->blocks_off
][1],
2246 v
->bfraction
, 0, s
->quarter_sample
);
2247 s
->mv
[1][0][0] = scale_mv(s
->next_picture
.f
.motion_val
[1][s
->block_index
[0] + v
->blocks_off
][0],
2248 v
->bfraction
, 1, s
->quarter_sample
);
2249 s
->mv
[1][0][1] = scale_mv(s
->next_picture
.f
.motion_val
[1][s
->block_index
[0] + v
->blocks_off
][1],
2250 v
->bfraction
, 1, s
->quarter_sample
);
2252 total_opp
= v
->mv_f_next
[0][s
->block_index
[0] + v
->blocks_off
]
2253 + v
->mv_f_next
[0][s
->block_index
[1] + v
->blocks_off
]
2254 + v
->mv_f_next
[0][s
->block_index
[2] + v
->blocks_off
]
2255 + v
->mv_f_next
[0][s
->block_index
[3] + v
->blocks_off
];
2256 f
= (total_opp
> 2) ? 1 : 0;
2258 s
->mv
[0][0][0] = s
->mv
[0][0][1] = 0;
2259 s
->mv
[1][0][0] = s
->mv
[1][0][1] = 0;
2262 v
->ref_field_type
[0] = v
->ref_field_type
[1] = v
->cur_field_type
^ f
;
2263 for (k
= 0; k
< 4; k
++) {
2264 s
->current_picture
.f
.motion_val
[0][s
->block_index
[k
] + v
->blocks_off
][0] = s
->mv
[0][0][0];
2265 s
->current_picture
.f
.motion_val
[0][s
->block_index
[k
] + v
->blocks_off
][1] = s
->mv
[0][0][1];
2266 s
->current_picture
.f
.motion_val
[1][s
->block_index
[k
] + v
->blocks_off
][0] = s
->mv
[1][0][0];
2267 s
->current_picture
.f
.motion_val
[1][s
->block_index
[k
] + v
->blocks_off
][1] = s
->mv
[1][0][1];
2268 v
->mv_f
[0][s
->block_index
[k
] + v
->blocks_off
] = f
;
2269 v
->mv_f
[1][s
->block_index
[k
] + v
->blocks_off
] = f
;
2273 if (v
->bmvtype
== BMV_TYPE_INTERPOLATED
) {
2274 vc1_pred_mv(v
, 0, dmv_x
[0], dmv_y
[0], 1, v
->range_x
, v
->range_y
, v
->mb_type
[0], pred_flag
[0], 0);
2275 vc1_pred_mv(v
, 0, dmv_x
[1], dmv_y
[1], 1, v
->range_x
, v
->range_y
, v
->mb_type
[0], pred_flag
[1], 1);
2278 if (dir
) { // backward
2279 vc1_pred_mv(v
, n
, dmv_x
[1], dmv_y
[1], mv1
, v
->range_x
, v
->range_y
, v
->mb_type
[0], pred_flag
[1], 1);
2280 if (n
== 3 || mv1
) {
2281 vc1_pred_mv(v
, 0, dmv_x
[0], dmv_y
[0], 1, v
->range_x
, v
->range_y
, v
->mb_type
[0], 0, 0);
2284 vc1_pred_mv(v
, n
, dmv_x
[0], dmv_y
[0], mv1
, v
->range_x
, v
->range_y
, v
->mb_type
[0], pred_flag
[0], 0);
2285 if (n
== 3 || mv1
) {
2286 vc1_pred_mv(v
, 0, dmv_x
[1], dmv_y
[1], 1, v
->range_x
, v
->range_y
, v
->mb_type
[0], 0, 1);
2291 /** Get predicted DC value for I-frames only
2292 * prediction dir: left=0, top=1
2293 * @param s MpegEncContext
2294 * @param overlap flag indicating that overlap filtering is used
2295 * @param pq integer part of picture quantizer
2296 * @param[in] n block index in the current MB
2297 * @param dc_val_ptr Pointer to DC predictor
2298 * @param dir_ptr Prediction direction for use in AC prediction
2300 static inline int vc1_i_pred_dc(MpegEncContext
*s
, int overlap
, int pq
, int n
,
2301 int16_t **dc_val_ptr
, int *dir_ptr
)
2303 int a
, b
, c
, wrap
, pred
, scale
;
2305 static const uint16_t dcpred
[32] = {
2306 -1, 1024, 512, 341, 256, 205, 171, 146, 128,
2307 114, 102, 93, 85, 79, 73, 68, 64,
2308 60, 57, 54, 51, 49, 47, 45, 43,
2309 41, 39, 38, 37, 35, 34, 33
2312 /* find prediction - wmv3_dc_scale always used here in fact */
2313 if (n
< 4) scale
= s
->y_dc_scale
;
2314 else scale
= s
->c_dc_scale
;
2316 wrap
= s
->block_wrap
[n
];
2317 dc_val
= s
->dc_val
[0] + s
->block_index
[n
];
2323 b
= dc_val
[ - 1 - wrap
];
2324 a
= dc_val
[ - wrap
];
2326 if (pq
< 9 || !overlap
) {
2327 /* Set outer values */
2328 if (s
->first_slice_line
&& (n
!= 2 && n
!= 3))
2329 b
= a
= dcpred
[scale
];
2330 if (s
->mb_x
== 0 && (n
!= 1 && n
!= 3))
2331 b
= c
= dcpred
[scale
];
2333 /* Set outer values */
2334 if (s
->first_slice_line
&& (n
!= 2 && n
!= 3))
2336 if (s
->mb_x
== 0 && (n
!= 1 && n
!= 3))
2340 if (abs(a
- b
) <= abs(b
- c
)) {
2342 *dir_ptr
= 1; // left
2345 *dir_ptr
= 0; // top
2348 /* update predictor */
2349 *dc_val_ptr
= &dc_val
[0];
2354 /** Get predicted DC value
2355 * prediction dir: left=0, top=1
2356 * @param s MpegEncContext
2357 * @param overlap flag indicating that overlap filtering is used
2358 * @param pq integer part of picture quantizer
2359 * @param[in] n block index in the current MB
2360 * @param a_avail flag indicating top block availability
2361 * @param c_avail flag indicating left block availability
2362 * @param dc_val_ptr Pointer to DC predictor
2363 * @param dir_ptr Prediction direction for use in AC prediction
2365 static inline int vc1_pred_dc(MpegEncContext
*s
, int overlap
, int pq
, int n
,
2366 int a_avail
, int c_avail
,
2367 int16_t **dc_val_ptr
, int *dir_ptr
)
2369 int a
, b
, c
, wrap
, pred
;
2371 int mb_pos
= s
->mb_x
+ s
->mb_y
* s
->mb_stride
;
2375 wrap
= s
->block_wrap
[n
];
2376 dc_val
= s
->dc_val
[0] + s
->block_index
[n
];
2382 b
= dc_val
[ - 1 - wrap
];
2383 a
= dc_val
[ - wrap
];
2384 /* scale predictors if needed */
2385 q1
= s
->current_picture
.f
.qscale_table
[mb_pos
];
2386 dqscale_index
= s
->y_dc_scale_table
[q1
] - 1;
2387 if (dqscale_index
< 0)
2389 if (c_avail
&& (n
!= 1 && n
!= 3)) {
2390 q2
= s
->current_picture
.f
.qscale_table
[mb_pos
- 1];
2392 c
= (c
* s
->y_dc_scale_table
[q2
] * ff_vc1_dqscale
[dqscale_index
] + 0x20000) >> 18;
2394 if (a_avail
&& (n
!= 2 && n
!= 3)) {
2395 q2
= s
->current_picture
.f
.qscale_table
[mb_pos
- s
->mb_stride
];
2397 a
= (a
* s
->y_dc_scale_table
[q2
] * ff_vc1_dqscale
[dqscale_index
] + 0x20000) >> 18;
2399 if (a_avail
&& c_avail
&& (n
!= 3)) {
2404 off
-= s
->mb_stride
;
2405 q2
= s
->current_picture
.f
.qscale_table
[off
];
2407 b
= (b
* s
->y_dc_scale_table
[q2
] * ff_vc1_dqscale
[dqscale_index
] + 0x20000) >> 18;
2410 if (a_avail
&& c_avail
) {
2411 if (abs(a
- b
) <= abs(b
- c
)) {
2413 *dir_ptr
= 1; // left
2416 *dir_ptr
= 0; // top
2418 } else if (a_avail
) {
2420 *dir_ptr
= 0; // top
2421 } else if (c_avail
) {
2423 *dir_ptr
= 1; // left
2426 *dir_ptr
= 1; // left
2429 /* update predictor */
2430 *dc_val_ptr
= &dc_val
[0];
2434 /** @} */ // Block group
2437 * @name VC1 Macroblock-level functions in Simple/Main Profiles
2438 * @see 7.1.4, p91 and 8.1.1.7, p(1)04
2442 static inline int vc1_coded_block_pred(MpegEncContext
* s
, int n
,
2443 uint8_t **coded_block_ptr
)
2445 int xy
, wrap
, pred
, a
, b
, c
;
2447 xy
= s
->block_index
[n
];
2448 wrap
= s
->b8_stride
;
2453 a
= s
->coded_block
[xy
- 1 ];
2454 b
= s
->coded_block
[xy
- 1 - wrap
];
2455 c
= s
->coded_block
[xy
- wrap
];
2464 *coded_block_ptr
= &s
->coded_block
[xy
];
2470 * Decode one AC coefficient
2471 * @param v The VC1 context
2472 * @param last Last coefficient
2473 * @param skip How much zero coefficients to skip
2474 * @param value Decoded AC coefficient value
2475 * @param codingset set of VLC to decode data
2478 static void vc1_decode_ac_coeff(VC1Context
*v
, int *last
, int *skip
,
2479 int *value
, int codingset
)
2481 GetBitContext
*gb
= &v
->s
.gb
;
2482 int index
, escape
, run
= 0, level
= 0, lst
= 0;
2484 index
= get_vlc2(gb
, ff_vc1_ac_coeff_table
[codingset
].table
, AC_VLC_BITS
, 3);
2485 if (index
!= ff_vc1_ac_sizes
[codingset
] - 1) {
2486 run
= vc1_index_decode_table
[codingset
][index
][0];
2487 level
= vc1_index_decode_table
[codingset
][index
][1];
2488 lst
= index
>= vc1_last_decode_table
[codingset
] || get_bits_left(gb
) < 0;
2492 escape
= decode210(gb
);
2494 index
= get_vlc2(gb
, ff_vc1_ac_coeff_table
[codingset
].table
, AC_VLC_BITS
, 3);
2495 run
= vc1_index_decode_table
[codingset
][index
][0];
2496 level
= vc1_index_decode_table
[codingset
][index
][1];
2497 lst
= index
>= vc1_last_decode_table
[codingset
];
2500 level
+= vc1_last_delta_level_table
[codingset
][run
];
2502 level
+= vc1_delta_level_table
[codingset
][run
];
2505 run
+= vc1_last_delta_run_table
[codingset
][level
] + 1;
2507 run
+= vc1_delta_run_table
[codingset
][level
] + 1;
2513 lst
= get_bits1(gb
);
2514 if (v
->s
.esc3_level_length
== 0) {
2515 if (v
->pq
< 8 || v
->dquantfrm
) { // table 59
2516 v
->s
.esc3_level_length
= get_bits(gb
, 3);
2517 if (!v
->s
.esc3_level_length
)
2518 v
->s
.esc3_level_length
= get_bits(gb
, 2) + 8;
2519 } else { // table 60
2520 v
->s
.esc3_level_length
= get_unary(gb
, 1, 6) + 2;
2522 v
->s
.esc3_run_length
= 3 + get_bits(gb
, 2);
2524 run
= get_bits(gb
, v
->s
.esc3_run_length
);
2525 sign
= get_bits1(gb
);
2526 level
= get_bits(gb
, v
->s
.esc3_level_length
);
2537 /** Decode intra block in intra frames - should be faster than decode_intra_block
2538 * @param v VC1Context
2539 * @param block block to decode
2540 * @param[in] n subblock index
2541 * @param coded are AC coeffs present or not
2542 * @param codingset set of VLC to decode data
2544 static int vc1_decode_i_block(VC1Context
*v
, int16_t block
[64], int n
,
2545 int coded
, int codingset
)
2547 GetBitContext
*gb
= &v
->s
.gb
;
2548 MpegEncContext
*s
= &v
->s
;
2549 int dc_pred_dir
= 0; /* Direction of the DC prediction used */
2552 int16_t *ac_val
, *ac_val2
;
2555 /* Get DC differential */
2557 dcdiff
= get_vlc2(&s
->gb
, ff_msmp4_dc_luma_vlc
[s
->dc_table_index
].table
, DC_VLC_BITS
, 3);
2559 dcdiff
= get_vlc2(&s
->gb
, ff_msmp4_dc_chroma_vlc
[s
->dc_table_index
].table
, DC_VLC_BITS
, 3);
2562 av_log(s
->avctx
, AV_LOG_ERROR
, "Illegal DC VLC\n");
2566 if (dcdiff
== 119 /* ESC index value */) {
2567 /* TODO: Optimize */
2568 if (v
->pq
== 1) dcdiff
= get_bits(gb
, 10);
2569 else if (v
->pq
== 2) dcdiff
= get_bits(gb
, 9);
2570 else dcdiff
= get_bits(gb
, 8);
2573 dcdiff
= (dcdiff
<< 2) + get_bits(gb
, 2) - 3;
2574 else if (v
->pq
== 2)
2575 dcdiff
= (dcdiff
<< 1) + get_bits1(gb
) - 1;
2582 dcdiff
+= vc1_i_pred_dc(&v
->s
, v
->overlap
, v
->pq
, n
, &dc_val
, &dc_pred_dir
);
2585 /* Store the quantized DC coeff, used for prediction */
2587 block
[0] = dcdiff
* s
->y_dc_scale
;
2589 block
[0] = dcdiff
* s
->c_dc_scale
;
2600 int last
= 0, skip
, value
;
2601 const uint8_t *zz_table
;
2605 scale
= v
->pq
* 2 + v
->halfpq
;
2609 zz_table
= v
->zz_8x8
[2];
2611 zz_table
= v
->zz_8x8
[3];
2613 zz_table
= v
->zz_8x8
[1];
2615 ac_val
= s
->ac_val
[0][0] + s
->block_index
[n
] * 16;
2617 if (dc_pred_dir
) // left
2620 ac_val
-= 16 * s
->block_wrap
[n
];
2623 vc1_decode_ac_coeff(v
, &last
, &skip
, &value
, codingset
);
2627 block
[zz_table
[i
++]] = value
;
2630 /* apply AC prediction if needed */
2632 if (dc_pred_dir
) { // left
2633 for (k
= 1; k
< 8; k
++)
2634 block
[k
<< v
->left_blk_sh
] += ac_val
[k
];
2636 for (k
= 1; k
< 8; k
++)
2637 block
[k
<< v
->top_blk_sh
] += ac_val
[k
+ 8];
2640 /* save AC coeffs for further prediction */
2641 for (k
= 1; k
< 8; k
++) {
2642 ac_val2
[k
] = block
[k
<< v
->left_blk_sh
];
2643 ac_val2
[k
+ 8] = block
[k
<< v
->top_blk_sh
];
2646 /* scale AC coeffs */
2647 for (k
= 1; k
< 64; k
++)
2651 block
[k
] += (block
[k
] < 0) ? -v
->pq
: v
->pq
;
2654 if (s
->ac_pred
) i
= 63;
2660 ac_val
= s
->ac_val
[0][0] + s
->block_index
[n
] * 16;
2664 scale
= v
->pq
* 2 + v
->halfpq
;
2665 memset(ac_val2
, 0, 16 * 2);
2666 if (dc_pred_dir
) { // left
2669 memcpy(ac_val2
, ac_val
, 8 * 2);
2671 ac_val
-= 16 * s
->block_wrap
[n
];
2673 memcpy(ac_val2
+ 8, ac_val
+ 8, 8 * 2);
2676 /* apply AC prediction if needed */
2678 if (dc_pred_dir
) { //left
2679 for (k
= 1; k
< 8; k
++) {
2680 block
[k
<< v
->left_blk_sh
] = ac_val
[k
] * scale
;
2681 if (!v
->pquantizer
&& block
[k
<< v
->left_blk_sh
])
2682 block
[k
<< v
->left_blk_sh
] += (block
[k
<< v
->left_blk_sh
] < 0) ? -v
->pq
: v
->pq
;
2685 for (k
= 1; k
< 8; k
++) {
2686 block
[k
<< v
->top_blk_sh
] = ac_val
[k
+ 8] * scale
;
2687 if (!v
->pquantizer
&& block
[k
<< v
->top_blk_sh
])
2688 block
[k
<< v
->top_blk_sh
] += (block
[k
<< v
->top_blk_sh
] < 0) ? -v
->pq
: v
->pq
;
2694 s
->block_last_index
[n
] = i
;
2699 /** Decode intra block in intra frames - should be faster than decode_intra_block
2700 * @param v VC1Context
2701 * @param block block to decode
2702 * @param[in] n subblock number
2703 * @param coded are AC coeffs present or not
2704 * @param codingset set of VLC to decode data
2705 * @param mquant quantizer value for this macroblock
2707 static int vc1_decode_i_block_adv(VC1Context
*v
, int16_t block
[64], int n
,
2708 int coded
, int codingset
, int mquant
)
2710 GetBitContext
*gb
= &v
->s
.gb
;
2711 MpegEncContext
*s
= &v
->s
;
2712 int dc_pred_dir
= 0; /* Direction of the DC prediction used */
2715 int16_t *ac_val
, *ac_val2
;
2717 int a_avail
= v
->a_avail
, c_avail
= v
->c_avail
;
2718 int use_pred
= s
->ac_pred
;
2721 int mb_pos
= s
->mb_x
+ s
->mb_y
* s
->mb_stride
;
2723 /* Get DC differential */
2725 dcdiff
= get_vlc2(&s
->gb
, ff_msmp4_dc_luma_vlc
[s
->dc_table_index
].table
, DC_VLC_BITS
, 3);
2727 dcdiff
= get_vlc2(&s
->gb
, ff_msmp4_dc_chroma_vlc
[s
->dc_table_index
].table
, DC_VLC_BITS
, 3);
2730 av_log(s
->avctx
, AV_LOG_ERROR
, "Illegal DC VLC\n");
2734 if (dcdiff
== 119 /* ESC index value */) {
2735 /* TODO: Optimize */
2736 if (mquant
== 1) dcdiff
= get_bits(gb
, 10);
2737 else if (mquant
== 2) dcdiff
= get_bits(gb
, 9);
2738 else dcdiff
= get_bits(gb
, 8);
2741 dcdiff
= (dcdiff
<< 2) + get_bits(gb
, 2) - 3;
2742 else if (mquant
== 2)
2743 dcdiff
= (dcdiff
<< 1) + get_bits1(gb
) - 1;
2750 dcdiff
+= vc1_pred_dc(&v
->s
, v
->overlap
, mquant
, n
, v
->a_avail
, v
->c_avail
, &dc_val
, &dc_pred_dir
);
2753 /* Store the quantized DC coeff, used for prediction */
2755 block
[0] = dcdiff
* s
->y_dc_scale
;
2757 block
[0] = dcdiff
* s
->c_dc_scale
;
2763 /* check if AC is needed at all */
2764 if (!a_avail
&& !c_avail
)
2766 ac_val
= s
->ac_val
[0][0] + s
->block_index
[n
] * 16;
2769 scale
= mquant
* 2 + ((mquant
== v
->pq
) ? v
->halfpq
: 0);
2771 if (dc_pred_dir
) // left
2774 ac_val
-= 16 * s
->block_wrap
[n
];
2776 q1
= s
->current_picture
.f
.qscale_table
[mb_pos
];
2777 if ( dc_pred_dir
&& c_avail
&& mb_pos
)
2778 q2
= s
->current_picture
.f
.qscale_table
[mb_pos
- 1];
2779 if (!dc_pred_dir
&& a_avail
&& mb_pos
>= s
->mb_stride
)
2780 q2
= s
->current_picture
.f
.qscale_table
[mb_pos
- s
->mb_stride
];
2781 if ( dc_pred_dir
&& n
== 1)
2783 if (!dc_pred_dir
&& n
== 2)
2789 int last
= 0, skip
, value
;
2790 const uint8_t *zz_table
;
2794 if (!use_pred
&& v
->fcm
== ILACE_FRAME
) {
2795 zz_table
= v
->zzi_8x8
;
2797 if (!dc_pred_dir
) // top
2798 zz_table
= v
->zz_8x8
[2];
2800 zz_table
= v
->zz_8x8
[3];
2803 if (v
->fcm
!= ILACE_FRAME
)
2804 zz_table
= v
->zz_8x8
[1];
2806 zz_table
= v
->zzi_8x8
;
2810 vc1_decode_ac_coeff(v
, &last
, &skip
, &value
, codingset
);
2814 block
[zz_table
[i
++]] = value
;
2817 /* apply AC prediction if needed */
2819 /* scale predictors if needed*/
2820 if (q2
&& q1
!= q2
) {
2821 q1
= q1
* 2 + ((q1
== v
->pq
) ? v
->halfpq
: 0) - 1;
2822 q2
= q2
* 2 + ((q2
== v
->pq
) ? v
->halfpq
: 0) - 1;
2825 return AVERROR_INVALIDDATA
;
2826 if (dc_pred_dir
) { // left
2827 for (k
= 1; k
< 8; k
++)
2828 block
[k
<< v
->left_blk_sh
] += (ac_val
[k
] * q2
* ff_vc1_dqscale
[q1
- 1] + 0x20000) >> 18;
2830 for (k
= 1; k
< 8; k
++)
2831 block
[k
<< v
->top_blk_sh
] += (ac_val
[k
+ 8] * q2
* ff_vc1_dqscale
[q1
- 1] + 0x20000) >> 18;
2834 if (dc_pred_dir
) { //left
2835 for (k
= 1; k
< 8; k
++)
2836 block
[k
<< v
->left_blk_sh
] += ac_val
[k
];
2838 for (k
= 1; k
< 8; k
++)
2839 block
[k
<< v
->top_blk_sh
] += ac_val
[k
+ 8];
2843 /* save AC coeffs for further prediction */
2844 for (k
= 1; k
< 8; k
++) {
2845 ac_val2
[k
] = block
[k
<< v
->left_blk_sh
];
2846 ac_val2
[k
+ 8] = block
[k
<< v
->top_blk_sh
];
2849 /* scale AC coeffs */
2850 for (k
= 1; k
< 64; k
++)
2854 block
[k
] += (block
[k
] < 0) ? -mquant
: mquant
;
2857 if (use_pred
) i
= 63;
2858 } else { // no AC coeffs
2861 memset(ac_val2
, 0, 16 * 2);
2862 if (dc_pred_dir
) { // left
2864 memcpy(ac_val2
, ac_val
, 8 * 2);
2865 if (q2
&& q1
!= q2
) {
2866 q1
= q1
* 2 + ((q1
== v
->pq
) ? v
->halfpq
: 0) - 1;
2867 q2
= q2
* 2 + ((q2
== v
->pq
) ? v
->halfpq
: 0) - 1;
2869 return AVERROR_INVALIDDATA
;
2870 for (k
= 1; k
< 8; k
++)
2871 ac_val2
[k
] = (ac_val2
[k
] * q2
* ff_vc1_dqscale
[q1
- 1] + 0x20000) >> 18;
2876 memcpy(ac_val2
+ 8, ac_val
+ 8, 8 * 2);
2877 if (q2
&& q1
!= q2
) {
2878 q1
= q1
* 2 + ((q1
== v
->pq
) ? v
->halfpq
: 0) - 1;
2879 q2
= q2
* 2 + ((q2
== v
->pq
) ? v
->halfpq
: 0) - 1;
2881 return AVERROR_INVALIDDATA
;
2882 for (k
= 1; k
< 8; k
++)
2883 ac_val2
[k
+ 8] = (ac_val2
[k
+ 8] * q2
* ff_vc1_dqscale
[q1
- 1] + 0x20000) >> 18;
2888 /* apply AC prediction if needed */
2890 if (dc_pred_dir
) { // left
2891 for (k
= 1; k
< 8; k
++) {
2892 block
[k
<< v
->left_blk_sh
] = ac_val2
[k
] * scale
;
2893 if (!v
->pquantizer
&& block
[k
<< v
->left_blk_sh
])
2894 block
[k
<< v
->left_blk_sh
] += (block
[k
<< v
->left_blk_sh
] < 0) ? -mquant
: mquant
;
2897 for (k
= 1; k
< 8; k
++) {
2898 block
[k
<< v
->top_blk_sh
] = ac_val2
[k
+ 8] * scale
;
2899 if (!v
->pquantizer
&& block
[k
<< v
->top_blk_sh
])
2900 block
[k
<< v
->top_blk_sh
] += (block
[k
<< v
->top_blk_sh
] < 0) ? -mquant
: mquant
;
2906 s
->block_last_index
[n
] = i
;
2911 /** Decode intra block in inter frames - more generic version than vc1_decode_i_block
2912 * @param v VC1Context
2913 * @param block block to decode
2914 * @param[in] n subblock index
2915 * @param coded are AC coeffs present or not
2916 * @param mquant block quantizer
2917 * @param codingset set of VLC to decode data
2919 static int vc1_decode_intra_block(VC1Context
*v
, int16_t block
[64], int n
,
2920 int coded
, int mquant
, int codingset
)
2922 GetBitContext
*gb
= &v
->s
.gb
;
2923 MpegEncContext
*s
= &v
->s
;
2924 int dc_pred_dir
= 0; /* Direction of the DC prediction used */
2927 int16_t *ac_val
, *ac_val2
;
2929 int mb_pos
= s
->mb_x
+ s
->mb_y
* s
->mb_stride
;
2930 int a_avail
= v
->a_avail
, c_avail
= v
->c_avail
;
2931 int use_pred
= s
->ac_pred
;
2935 s
->dsp
.clear_block(block
);
2937 /* XXX: Guard against dumb values of mquant */
2938 mquant
= (mquant
< 1) ? 0 : ((mquant
> 31) ? 31 : mquant
);
2940 /* Set DC scale - y and c use the same */
2941 s
->y_dc_scale
= s
->y_dc_scale_table
[mquant
];
2942 s
->c_dc_scale
= s
->c_dc_scale_table
[mquant
];
2944 /* Get DC differential */
2946 dcdiff
= get_vlc2(&s
->gb
, ff_msmp4_dc_luma_vlc
[s
->dc_table_index
].table
, DC_VLC_BITS
, 3);
2948 dcdiff
= get_vlc2(&s
->gb
, ff_msmp4_dc_chroma_vlc
[s
->dc_table_index
].table
, DC_VLC_BITS
, 3);
2951 av_log(s
->avctx
, AV_LOG_ERROR
, "Illegal DC VLC\n");
2955 if (dcdiff
== 119 /* ESC index value */) {
2956 /* TODO: Optimize */
2957 if (mquant
== 1) dcdiff
= get_bits(gb
, 10);
2958 else if (mquant
== 2) dcdiff
= get_bits(gb
, 9);
2959 else dcdiff
= get_bits(gb
, 8);
2962 dcdiff
= (dcdiff
<< 2) + get_bits(gb
, 2) - 3;
2963 else if (mquant
== 2)
2964 dcdiff
= (dcdiff
<< 1) + get_bits1(gb
) - 1;
2971 dcdiff
+= vc1_pred_dc(&v
->s
, v
->overlap
, mquant
, n
, a_avail
, c_avail
, &dc_val
, &dc_pred_dir
);
2974 /* Store the quantized DC coeff, used for prediction */
2977 block
[0] = dcdiff
* s
->y_dc_scale
;
2979 block
[0] = dcdiff
* s
->c_dc_scale
;
2985 /* check if AC is needed at all and adjust direction if needed */
2986 if (!a_avail
) dc_pred_dir
= 1;
2987 if (!c_avail
) dc_pred_dir
= 0;
2988 if (!a_avail
&& !c_avail
) use_pred
= 0;
2989 ac_val
= s
->ac_val
[0][0] + s
->block_index
[n
] * 16;
2992 scale
= mquant
* 2 + v
->halfpq
;
2994 if (dc_pred_dir
) //left
2997 ac_val
-= 16 * s
->block_wrap
[n
];
2999 q1
= s
->current_picture
.f
.qscale_table
[mb_pos
];
3000 if (dc_pred_dir
&& c_avail
&& mb_pos
)
3001 q2
= s
->current_picture
.f
.qscale_table
[mb_pos
- 1];
3002 if (!dc_pred_dir
&& a_avail
&& mb_pos
>= s
->mb_stride
)
3003 q2
= s
->current_picture
.f
.qscale_table
[mb_pos
- s
->mb_stride
];
3004 if ( dc_pred_dir
&& n
== 1)
3006 if (!dc_pred_dir
&& n
== 2)
3008 if (n
== 3) q2
= q1
;
3011 int last
= 0, skip
, value
;
3015 vc1_decode_ac_coeff(v
, &last
, &skip
, &value
, codingset
);
3019 if (v
->fcm
== PROGRESSIVE
)
3020 block
[v
->zz_8x8
[0][i
++]] = value
;
3022 if (use_pred
&& (v
->fcm
== ILACE_FRAME
)) {
3023 if (!dc_pred_dir
) // top
3024 block
[v
->zz_8x8
[2][i
++]] = value
;
3026 block
[v
->zz_8x8
[3][i
++]] = value
;
3028 block
[v
->zzi_8x8
[i
++]] = value
;
3033 /* apply AC prediction if needed */
3035 /* scale predictors if needed*/
3036 if (q2
&& q1
!= q2
) {
3037 q1
= q1
* 2 + ((q1
== v
->pq
) ? v
->halfpq
: 0) - 1;
3038 q2
= q2
* 2 + ((q2
== v
->pq
) ? v
->halfpq
: 0) - 1;
3041 return AVERROR_INVALIDDATA
;
3042 if (dc_pred_dir
) { // left
3043 for (k
= 1; k
< 8; k
++)
3044 block
[k
<< v
->left_blk_sh
] += (ac_val
[k
] * q2
* ff_vc1_dqscale
[q1
- 1] + 0x20000) >> 18;
3046 for (k
= 1; k
< 8; k
++)
3047 block
[k
<< v
->top_blk_sh
] += (ac_val
[k
+ 8] * q2
* ff_vc1_dqscale
[q1
- 1] + 0x20000) >> 18;
3050 if (dc_pred_dir
) { // left
3051 for (k
= 1; k
< 8; k
++)
3052 block
[k
<< v
->left_blk_sh
] += ac_val
[k
];
3054 for (k
= 1; k
< 8; k
++)
3055 block
[k
<< v
->top_blk_sh
] += ac_val
[k
+ 8];
3059 /* save AC coeffs for further prediction */
3060 for (k
= 1; k
< 8; k
++) {
3061 ac_val2
[k
] = block
[k
<< v
->left_blk_sh
];
3062 ac_val2
[k
+ 8] = block
[k
<< v
->top_blk_sh
];
3065 /* scale AC coeffs */
3066 for (k
= 1; k
< 64; k
++)
3070 block
[k
] += (block
[k
] < 0) ? -mquant
: mquant
;
3073 if (use_pred
) i
= 63;
3074 } else { // no AC coeffs
3077 memset(ac_val2
, 0, 16 * 2);
3078 if (dc_pred_dir
) { // left
3080 memcpy(ac_val2
, ac_val
, 8 * 2);
3081 if (q2
&& q1
!= q2
) {
3082 q1
= q1
* 2 + ((q1
== v
->pq
) ? v
->halfpq
: 0) - 1;
3083 q2
= q2
* 2 + ((q2
== v
->pq
) ? v
->halfpq
: 0) - 1;
3085 return AVERROR_INVALIDDATA
;
3086 for (k
= 1; k
< 8; k
++)
3087 ac_val2
[k
] = (ac_val2
[k
] * q2
* ff_vc1_dqscale
[q1
- 1] + 0x20000) >> 18;
3092 memcpy(ac_val2
+ 8, ac_val
+ 8, 8 * 2);
3093 if (q2
&& q1
!= q2
) {
3094 q1
= q1
* 2 + ((q1
== v
->pq
) ? v
->halfpq
: 0) - 1;
3095 q2
= q2
* 2 + ((q2
== v
->pq
) ? v
->halfpq
: 0) - 1;
3097 return AVERROR_INVALIDDATA
;
3098 for (k
= 1; k
< 8; k
++)
3099 ac_val2
[k
+ 8] = (ac_val2
[k
+ 8] * q2
* ff_vc1_dqscale
[q1
- 1] + 0x20000) >> 18;
3104 /* apply AC prediction if needed */
3106 if (dc_pred_dir
) { // left
3107 for (k
= 1; k
< 8; k
++) {
3108 block
[k
<< v
->left_blk_sh
] = ac_val2
[k
] * scale
;
3109 if (!v
->pquantizer
&& block
[k
<< v
->left_blk_sh
])
3110 block
[k
<< v
->left_blk_sh
] += (block
[k
<< v
->left_blk_sh
] < 0) ? -mquant
: mquant
;
3113 for (k
= 1; k
< 8; k
++) {
3114 block
[k
<< v
->top_blk_sh
] = ac_val2
[k
+ 8] * scale
;
3115 if (!v
->pquantizer
&& block
[k
<< v
->top_blk_sh
])
3116 block
[k
<< v
->top_blk_sh
] += (block
[k
<< v
->top_blk_sh
] < 0) ? -mquant
: mquant
;
3122 s
->block_last_index
[n
] = i
;
3129 static int vc1_decode_p_block(VC1Context
*v
, int16_t block
[64], int n
,
3130 int mquant
, int ttmb
, int first_block
,
3131 uint8_t *dst
, int linesize
, int skip_block
,
3134 MpegEncContext
*s
= &v
->s
;
3135 GetBitContext
*gb
= &s
->gb
;
3138 int scale
, off
, idx
, last
, skip
, value
;
3139 int ttblk
= ttmb
& 7;
3142 s
->dsp
.clear_block(block
);
3145 ttblk
= ff_vc1_ttblk_to_tt
[v
->tt_index
][get_vlc2(gb
, ff_vc1_ttblk_vlc
[v
->tt_index
].table
, VC1_TTBLK_VLC_BITS
, 1)];
3147 if (ttblk
== TT_4X4
) {
3148 subblkpat
= ~(get_vlc2(gb
, ff_vc1_subblkpat_vlc
[v
->tt_index
].table
, VC1_SUBBLKPAT_VLC_BITS
, 1) + 1);
3150 if ((ttblk
!= TT_8X8
&& ttblk
!= TT_4X4
)
3151 && ((v
->ttmbf
|| (ttmb
!= -1 && (ttmb
& 8) && !first_block
))
3152 || (!v
->res_rtm_flag
&& !first_block
))) {
3153 subblkpat
= decode012(gb
);
3155 subblkpat
^= 3; // swap decoded pattern bits
3156 if (ttblk
== TT_8X4_TOP
|| ttblk
== TT_8X4_BOTTOM
)
3158 if (ttblk
== TT_4X8_RIGHT
|| ttblk
== TT_4X8_LEFT
)
3161 scale
= 2 * mquant
+ ((v
->pq
== mquant
) ? v
->halfpq
: 0);
3163 // convert transforms like 8X4_TOP to generic TT and SUBBLKPAT
3164 if (ttblk
== TT_8X4_TOP
|| ttblk
== TT_8X4_BOTTOM
) {
3165 subblkpat
= 2 - (ttblk
== TT_8X4_TOP
);
3168 if (ttblk
== TT_4X8_RIGHT
|| ttblk
== TT_4X8_LEFT
) {
3169 subblkpat
= 2 - (ttblk
== TT_4X8_LEFT
);
3178 vc1_decode_ac_coeff(v
, &last
, &skip
, &value
, v
->codingset2
);
3183 idx
= v
->zz_8x8
[0][i
++];
3185 idx
= v
->zzi_8x8
[i
++];
3186 block
[idx
] = value
* scale
;
3188 block
[idx
] += (block
[idx
] < 0) ? -mquant
: mquant
;
3192 v
->vc1dsp
.vc1_inv_trans_8x8_dc(dst
, linesize
, block
);
3194 v
->vc1dsp
.vc1_inv_trans_8x8(block
);
3195 s
->dsp
.add_pixels_clamped(block
, dst
, linesize
);
3200 pat
= ~subblkpat
& 0xF;
3201 for (j
= 0; j
< 4; j
++) {
3202 last
= subblkpat
& (1 << (3 - j
));
3204 off
= (j
& 1) * 4 + (j
& 2) * 16;
3206 vc1_decode_ac_coeff(v
, &last
, &skip
, &value
, v
->codingset2
);
3211 idx
= ff_vc1_simple_progressive_4x4_zz
[i
++];
3213 idx
= ff_vc1_adv_interlaced_4x4_zz
[i
++];
3214 block
[idx
+ off
] = value
* scale
;
3216 block
[idx
+ off
] += (block
[idx
+ off
] < 0) ? -mquant
: mquant
;
3218 if (!(subblkpat
& (1 << (3 - j
))) && !skip_block
) {
3220 v
->vc1dsp
.vc1_inv_trans_4x4_dc(dst
+ (j
& 1) * 4 + (j
& 2) * 2 * linesize
, linesize
, block
+ off
);
3222 v
->vc1dsp
.vc1_inv_trans_4x4(dst
+ (j
& 1) * 4 + (j
& 2) * 2 * linesize
, linesize
, block
+ off
);
3227 pat
= ~((subblkpat
& 2) * 6 + (subblkpat
& 1) * 3) & 0xF;
3228 for (j
= 0; j
< 2; j
++) {
3229 last
= subblkpat
& (1 << (1 - j
));
3233 vc1_decode_ac_coeff(v
, &last
, &skip
, &value
, v
->codingset2
);
3238 idx
= v
->zz_8x4
[i
++] + off
;
3240 idx
= ff_vc1_adv_interlaced_8x4_zz
[i
++] + off
;
3241 block
[idx
] = value
* scale
;
3243 block
[idx
] += (block
[idx
] < 0) ? -mquant
: mquant
;
3245 if (!(subblkpat
& (1 << (1 - j
))) && !skip_block
) {
3247 v
->vc1dsp
.vc1_inv_trans_8x4_dc(dst
+ j
* 4 * linesize
, linesize
, block
+ off
);
3249 v
->vc1dsp
.vc1_inv_trans_8x4(dst
+ j
* 4 * linesize
, linesize
, block
+ off
);
3254 pat
= ~(subblkpat
* 5) & 0xF;
3255 for (j
= 0; j
< 2; j
++) {
3256 last
= subblkpat
& (1 << (1 - j
));
3260 vc1_decode_ac_coeff(v
, &last
, &skip
, &value
, v
->codingset2
);
3265 idx
= v
->zz_4x8
[i
++] + off
;
3267 idx
= ff_vc1_adv_interlaced_4x8_zz
[i
++] + off
;
3268 block
[idx
] = value
* scale
;
3270 block
[idx
] += (block
[idx
] < 0) ? -mquant
: mquant
;
3272 if (!(subblkpat
& (1 << (1 - j
))) && !skip_block
) {
3274 v
->vc1dsp
.vc1_inv_trans_4x8_dc(dst
+ j
* 4, linesize
, block
+ off
);
3276 v
->vc1dsp
.vc1_inv_trans_4x8(dst
+ j
*4, linesize
, block
+ off
);
3282 *ttmb_out
|= ttblk
<< (n
* 4);
3286 /** @} */ // Macroblock group
3288 static const int size_table
[6] = { 0, 2, 3, 4, 5, 8 };
3289 static const int offset_table
[6] = { 0, 1, 3, 7, 15, 31 };
3291 static av_always_inline
void vc1_apply_p_v_loop_filter(VC1Context
*v
, int block_num
)
3293 MpegEncContext
*s
= &v
->s
;
3294 int mb_cbp
= v
->cbp
[s
->mb_x
- s
->mb_stride
],
3295 block_cbp
= mb_cbp
>> (block_num
* 4), bottom_cbp
,
3296 mb_is_intra
= v
->is_intra
[s
->mb_x
- s
->mb_stride
],
3297 block_is_intra
= mb_is_intra
>> (block_num
* 4), bottom_is_intra
;
3298 int idx
, linesize
= block_num
> 3 ? s
->uvlinesize
: s
->linesize
, ttblk
;
3301 if (block_num
> 3) {
3302 dst
= s
->dest
[block_num
- 3];
3304 dst
= s
->dest
[0] + (block_num
& 1) * 8 + ((block_num
& 2) * 4 - 8) * linesize
;
3306 if (s
->mb_y
!= s
->end_mb_y
|| block_num
< 2) {
3310 if (block_num
> 3) {
3311 bottom_cbp
= v
->cbp
[s
->mb_x
] >> (block_num
* 4);
3312 bottom_is_intra
= v
->is_intra
[s
->mb_x
] >> (block_num
* 4);
3313 mv
= &v
->luma_mv
[s
->mb_x
- s
->mb_stride
];
3314 mv_stride
= s
->mb_stride
;
3316 bottom_cbp
= (block_num
< 2) ? (mb_cbp
>> ((block_num
+ 2) * 4))
3317 : (v
->cbp
[s
->mb_x
] >> ((block_num
- 2) * 4));
3318 bottom_is_intra
= (block_num
< 2) ? (mb_is_intra
>> ((block_num
+ 2) * 4))
3319 : (v
->is_intra
[s
->mb_x
] >> ((block_num
- 2) * 4));
3320 mv_stride
= s
->b8_stride
;
3321 mv
= &s
->current_picture
.f
.motion_val
[0][s
->block_index
[block_num
] - 2 * mv_stride
];
3324 if (bottom_is_intra
& 1 || block_is_intra
& 1 ||
3325 mv
[0][0] != mv
[mv_stride
][0] || mv
[0][1] != mv
[mv_stride
][1]) {
3326 v
->vc1dsp
.vc1_v_loop_filter8(dst
, linesize
, v
->pq
);
3328 idx
= ((bottom_cbp
>> 2) | block_cbp
) & 3;
3330 v
->vc1dsp
.vc1_v_loop_filter8(dst
, linesize
, v
->pq
);
3333 v
->vc1dsp
.vc1_v_loop_filter4(dst
+ 4, linesize
, v
->pq
);
3335 v
->vc1dsp
.vc1_v_loop_filter4(dst
, linesize
, v
->pq
);
3340 dst
-= 4 * linesize
;
3341 ttblk
= (v
->ttblk
[s
->mb_x
- s
->mb_stride
] >> (block_num
* 4)) & 0xF;
3342 if (ttblk
== TT_4X4
|| ttblk
== TT_8X4
) {
3343 idx
= (block_cbp
| (block_cbp
>> 2)) & 3;
3345 v
->vc1dsp
.vc1_v_loop_filter8(dst
, linesize
, v
->pq
);
3348 v
->vc1dsp
.vc1_v_loop_filter4(dst
+ 4, linesize
, v
->pq
);
3350 v
->vc1dsp
.vc1_v_loop_filter4(dst
, linesize
, v
->pq
);
3355 static av_always_inline
void vc1_apply_p_h_loop_filter(VC1Context
*v
, int block_num
)
3357 MpegEncContext
*s
= &v
->s
;
3358 int mb_cbp
= v
->cbp
[s
->mb_x
- 1 - s
->mb_stride
],
3359 block_cbp
= mb_cbp
>> (block_num
* 4), right_cbp
,
3360 mb_is_intra
= v
->is_intra
[s
->mb_x
- 1 - s
->mb_stride
],
3361 block_is_intra
= mb_is_intra
>> (block_num
* 4), right_is_intra
;
3362 int idx
, linesize
= block_num
> 3 ? s
->uvlinesize
: s
->linesize
, ttblk
;
3365 if (block_num
> 3) {
3366 dst
= s
->dest
[block_num
- 3] - 8 * linesize
;
3368 dst
= s
->dest
[0] + (block_num
& 1) * 8 + ((block_num
& 2) * 4 - 16) * linesize
- 8;
3371 if (s
->mb_x
!= s
->mb_width
|| !(block_num
& 5)) {
3374 if (block_num
> 3) {
3375 right_cbp
= v
->cbp
[s
->mb_x
- s
->mb_stride
] >> (block_num
* 4);
3376 right_is_intra
= v
->is_intra
[s
->mb_x
- s
->mb_stride
] >> (block_num
* 4);
3377 mv
= &v
->luma_mv
[s
->mb_x
- s
->mb_stride
- 1];
3379 right_cbp
= (block_num
& 1) ? (v
->cbp
[s
->mb_x
- s
->mb_stride
] >> ((block_num
- 1) * 4))
3380 : (mb_cbp
>> ((block_num
+ 1) * 4));
3381 right_is_intra
= (block_num
& 1) ? (v
->is_intra
[s
->mb_x
- s
->mb_stride
] >> ((block_num
- 1) * 4))
3382 : (mb_is_intra
>> ((block_num
+ 1) * 4));
3383 mv
= &s
->current_picture
.f
.motion_val
[0][s
->block_index
[block_num
] - s
->b8_stride
* 2 - 2];
3385 if (block_is_intra
& 1 || right_is_intra
& 1 || mv
[0][0] != mv
[1][0] || mv
[0][1] != mv
[1][1]) {
3386 v
->vc1dsp
.vc1_h_loop_filter8(dst
, linesize
, v
->pq
);
3388 idx
= ((right_cbp
>> 1) | block_cbp
) & 5; // FIXME check
3390 v
->vc1dsp
.vc1_h_loop_filter8(dst
, linesize
, v
->pq
);
3393 v
->vc1dsp
.vc1_h_loop_filter4(dst
+ 4 * linesize
, linesize
, v
->pq
);
3395 v
->vc1dsp
.vc1_h_loop_filter4(dst
, linesize
, v
->pq
);
3401 ttblk
= (v
->ttblk
[s
->mb_x
- s
->mb_stride
- 1] >> (block_num
* 4)) & 0xf;
3402 if (ttblk
== TT_4X4
|| ttblk
== TT_4X8
) {
3403 idx
= (block_cbp
| (block_cbp
>> 1)) & 5;
3405 v
->vc1dsp
.vc1_h_loop_filter8(dst
, linesize
, v
->pq
);
3408 v
->vc1dsp
.vc1_h_loop_filter4(dst
+ linesize
* 4, linesize
, v
->pq
);
3410 v
->vc1dsp
.vc1_h_loop_filter4(dst
, linesize
, v
->pq
);
3415 static void vc1_apply_p_loop_filter(VC1Context
*v
)
3417 MpegEncContext
*s
= &v
->s
;
3420 for (i
= 0; i
< 6; i
++) {
3421 vc1_apply_p_v_loop_filter(v
, i
);
3424 /* V always precedes H, therefore we run H one MB before V;
3425 * at the end of a row, we catch up to complete the row */
3427 for (i
= 0; i
< 6; i
++) {
3428 vc1_apply_p_h_loop_filter(v
, i
);
3430 if (s
->mb_x
== s
->mb_width
- 1) {
3432 ff_update_block_index(s
);
3433 for (i
= 0; i
< 6; i
++) {
3434 vc1_apply_p_h_loop_filter(v
, i
);
3440 /** Decode one P-frame MB
3442 static int vc1_decode_p_mb(VC1Context
*v
)
3444 MpegEncContext
*s
= &v
->s
;
3445 GetBitContext
*gb
= &s
->gb
;
3447 int mb_pos
= s
->mb_x
+ s
->mb_y
* s
->mb_stride
;
3448 int cbp
; /* cbp decoding stuff */
3449 int mqdiff
, mquant
; /* MB quantization */
3450 int ttmb
= v
->ttfrm
; /* MB Transform type */
3452 int mb_has_coeffs
= 1; /* last_flag */
3453 int dmv_x
, dmv_y
; /* Differential MV components */
3454 int index
, index1
; /* LUT indexes */
3455 int val
, sign
; /* temp values */
3456 int first_block
= 1;
3458 int skipped
, fourmv
;
3459 int block_cbp
= 0, pat
, block_tt
= 0, block_intra
= 0;
3461 mquant
= v
->pq
; /* lossy initialization */
3463 if (v
->mv_type_is_raw
)
3464 fourmv
= get_bits1(gb
);
3466 fourmv
= v
->mv_type_mb_plane
[mb_pos
];
3468 skipped
= get_bits1(gb
);
3470 skipped
= v
->s
.mbskip_table
[mb_pos
];
3472 if (!fourmv
) { /* 1MV mode */
3474 GET_MVDATA(dmv_x
, dmv_y
);
3477 s
->current_picture
.f
.motion_val
[1][s
->block_index
[0]][0] = 0;
3478 s
->current_picture
.f
.motion_val
[1][s
->block_index
[0]][1] = 0;
3480 s
->current_picture
.f
.mb_type
[mb_pos
] = s
->mb_intra
? MB_TYPE_INTRA
: MB_TYPE_16x16
;
3481 vc1_pred_mv(v
, 0, dmv_x
, dmv_y
, 1, v
->range_x
, v
->range_y
, v
->mb_type
[0], 0, 0);
3483 /* FIXME Set DC val for inter block ? */
3484 if (s
->mb_intra
&& !mb_has_coeffs
) {
3486 s
->ac_pred
= get_bits1(gb
);
3488 } else if (mb_has_coeffs
) {
3490 s
->ac_pred
= get_bits1(gb
);
3491 cbp
= get_vlc2(&v
->s
.gb
, v
->cbpcy_vlc
->table
, VC1_CBPCY_P_VLC_BITS
, 2);
3497 s
->current_picture
.f
.qscale_table
[mb_pos
] = mquant
;
3499 if (!v
->ttmbf
&& !s
->mb_intra
&& mb_has_coeffs
)
3500 ttmb
= get_vlc2(gb
, ff_vc1_ttmb_vlc
[v
->tt_index
].table
,
3501 VC1_TTMB_VLC_BITS
, 2);
3502 if (!s
->mb_intra
) vc1_mc_1mv(v
, 0);
3504 for (i
= 0; i
< 6; i
++) {
3505 s
->dc_val
[0][s
->block_index
[i
]] = 0;
3507 val
= ((cbp
>> (5 - i
)) & 1);
3508 off
= (i
& 4) ? 0 : ((i
& 1) * 8 + (i
& 2) * 4 * s
->linesize
);
3509 v
->mb_type
[0][s
->block_index
[i
]] = s
->mb_intra
;
3511 /* check if prediction blocks A and C are available */
3512 v
->a_avail
= v
->c_avail
= 0;
3513 if (i
== 2 || i
== 3 || !s
->first_slice_line
)
3514 v
->a_avail
= v
->mb_type
[0][s
->block_index
[i
] - s
->block_wrap
[i
]];
3515 if (i
== 1 || i
== 3 || s
->mb_x
)
3516 v
->c_avail
= v
->mb_type
[0][s
->block_index
[i
] - 1];
3518 vc1_decode_intra_block(v
, s
->block
[i
], i
, val
, mquant
,
3519 (i
& 4) ? v
->codingset2
: v
->codingset
);
3520 if ((i
>3) && (s
->flags
& CODEC_FLAG_GRAY
))
3522 v
->vc1dsp
.vc1_inv_trans_8x8(s
->block
[i
]);
3524 for (j
= 0; j
< 64; j
++)
3525 s
->block
[i
][j
] <<= 1;
3526 s
->dsp
.put_signed_pixels_clamped(s
->block
[i
], s
->dest
[dst_idx
] + off
, i
& 4 ? s
->uvlinesize
: s
->linesize
);
3527 if (v
->pq
>= 9 && v
->overlap
) {
3529 v
->vc1dsp
.vc1_h_overlap(s
->dest
[dst_idx
] + off
, i
& 4 ? s
->uvlinesize
: s
->linesize
);
3531 v
->vc1dsp
.vc1_v_overlap(s
->dest
[dst_idx
] + off
, i
& 4 ? s
->uvlinesize
: s
->linesize
);
3533 block_cbp
|= 0xF << (i
<< 2);
3534 block_intra
|= 1 << i
;
3536 pat
= vc1_decode_p_block(v
, s
->block
[i
], i
, mquant
, ttmb
, first_block
,
3537 s
->dest
[dst_idx
] + off
, (i
& 4) ? s
->uvlinesize
: s
->linesize
,
3538 (i
& 4) && (s
->flags
& CODEC_FLAG_GRAY
), &block_tt
);
3539 block_cbp
|= pat
<< (i
<< 2);
3540 if (!v
->ttmbf
&& ttmb
< 8)
3547 for (i
= 0; i
< 6; i
++) {
3548 v
->mb_type
[0][s
->block_index
[i
]] = 0;
3549 s
->dc_val
[0][s
->block_index
[i
]] = 0;
3551 s
->current_picture
.f
.mb_type
[mb_pos
] = MB_TYPE_SKIP
;
3552 s
->current_picture
.f
.qscale_table
[mb_pos
] = 0;
3553 vc1_pred_mv(v
, 0, 0, 0, 1, v
->range_x
, v
->range_y
, v
->mb_type
[0], 0, 0);
3556 } else { // 4MV mode
3557 if (!skipped
/* unskipped MB */) {
3558 int intra_count
= 0, coded_inter
= 0;
3559 int is_intra
[6], is_coded
[6];
3561 cbp
= get_vlc2(&v
->s
.gb
, v
->cbpcy_vlc
->table
, VC1_CBPCY_P_VLC_BITS
, 2);
3562 for (i
= 0; i
< 6; i
++) {
3563 val
= ((cbp
>> (5 - i
)) & 1);
3564 s
->dc_val
[0][s
->block_index
[i
]] = 0;
3571 GET_MVDATA(dmv_x
, dmv_y
);
3573 vc1_pred_mv(v
, i
, dmv_x
, dmv_y
, 0, v
->range_x
, v
->range_y
, v
->mb_type
[0], 0, 0);
3575 vc1_mc_4mv_luma(v
, i
, 0);
3576 intra_count
+= s
->mb_intra
;
3577 is_intra
[i
] = s
->mb_intra
;
3578 is_coded
[i
] = mb_has_coeffs
;
3581 is_intra
[i
] = (intra_count
>= 3);
3585 vc1_mc_4mv_chroma(v
, 0);
3586 v
->mb_type
[0][s
->block_index
[i
]] = is_intra
[i
];
3588 coded_inter
= !is_intra
[i
] & is_coded
[i
];
3590 // if there are no coded blocks then don't do anything more
3592 if (!intra_count
&& !coded_inter
)
3595 s
->current_picture
.f
.qscale_table
[mb_pos
] = mquant
;
3596 /* test if block is intra and has pred */
3599 for (i
= 0; i
< 6; i
++)
3601 if (((!s
->first_slice_line
|| (i
== 2 || i
== 3)) && v
->mb_type
[0][s
->block_index
[i
] - s
->block_wrap
[i
]])
3602 || ((s
->mb_x
|| (i
== 1 || i
== 3)) && v
->mb_type
[0][s
->block_index
[i
] - 1])) {
3608 s
->ac_pred
= get_bits1(gb
);
3612 if (!v
->ttmbf
&& coded_inter
)
3613 ttmb
= get_vlc2(gb
, ff_vc1_ttmb_vlc
[v
->tt_index
].table
, VC1_TTMB_VLC_BITS
, 2);
3614 for (i
= 0; i
< 6; i
++) {
3616 off
= (i
& 4) ? 0 : ((i
& 1) * 8 + (i
& 2) * 4 * s
->linesize
);
3617 s
->mb_intra
= is_intra
[i
];
3619 /* check if prediction blocks A and C are available */
3620 v
->a_avail
= v
->c_avail
= 0;
3621 if (i
== 2 || i
== 3 || !s
->first_slice_line
)
3622 v
->a_avail
= v
->mb_type
[0][s
->block_index
[i
] - s
->block_wrap
[i
]];
3623 if (i
== 1 || i
== 3 || s
->mb_x
)
3624 v
->c_avail
= v
->mb_type
[0][s
->block_index
[i
] - 1];
3626 vc1_decode_intra_block(v
, s
->block
[i
], i
, is_coded
[i
], mquant
,
3627 (i
& 4) ? v
->codingset2
: v
->codingset
);
3628 if ((i
>3) && (s
->flags
& CODEC_FLAG_GRAY
))
3630 v
->vc1dsp
.vc1_inv_trans_8x8(s
->block
[i
]);
3632 for (j
= 0; j
< 64; j
++)
3633 s
->block
[i
][j
] <<= 1;
3634 s
->dsp
.put_signed_pixels_clamped(s
->block
[i
], s
->dest
[dst_idx
] + off
,
3635 (i
& 4) ? s
->uvlinesize
: s
->linesize
);
3636 if (v
->pq
>= 9 && v
->overlap
) {
3638 v
->vc1dsp
.vc1_h_overlap(s
->dest
[dst_idx
] + off
, i
& 4 ? s
->uvlinesize
: s
->linesize
);
3640 v
->vc1dsp
.vc1_v_overlap(s
->dest
[dst_idx
] + off
, i
& 4 ? s
->uvlinesize
: s
->linesize
);
3642 block_cbp
|= 0xF << (i
<< 2);
3643 block_intra
|= 1 << i
;
3644 } else if (is_coded
[i
]) {
3645 pat
= vc1_decode_p_block(v
, s
->block
[i
], i
, mquant
, ttmb
,
3646 first_block
, s
->dest
[dst_idx
] + off
,
3647 (i
& 4) ? s
->uvlinesize
: s
->linesize
,
3648 (i
& 4) && (s
->flags
& CODEC_FLAG_GRAY
),
3650 block_cbp
|= pat
<< (i
<< 2);
3651 if (!v
->ttmbf
&& ttmb
< 8)
3656 } else { // skipped MB
3658 s
->current_picture
.f
.qscale_table
[mb_pos
] = 0;
3659 for (i
= 0; i
< 6; i
++) {
3660 v
->mb_type
[0][s
->block_index
[i
]] = 0;
3661 s
->dc_val
[0][s
->block_index
[i
]] = 0;
3663 for (i
= 0; i
< 4; i
++) {
3664 vc1_pred_mv(v
, i
, 0, 0, 0, v
->range_x
, v
->range_y
, v
->mb_type
[0], 0, 0);
3665 vc1_mc_4mv_luma(v
, i
, 0);
3667 vc1_mc_4mv_chroma(v
, 0);
3668 s
->current_picture
.f
.qscale_table
[mb_pos
] = 0;
3672 v
->cbp
[s
->mb_x
] = block_cbp
;
3673 v
->ttblk
[s
->mb_x
] = block_tt
;
3674 v
->is_intra
[s
->mb_x
] = block_intra
;
3679 /* Decode one macroblock in an interlaced frame p picture */
3681 static int vc1_decode_p_mb_intfr(VC1Context
*v
)
3683 MpegEncContext
*s
= &v
->s
;
3684 GetBitContext
*gb
= &s
->gb
;
3686 int mb_pos
= s
->mb_x
+ s
->mb_y
* s
->mb_stride
;
3687 int cbp
= 0; /* cbp decoding stuff */
3688 int mqdiff
, mquant
; /* MB quantization */
3689 int ttmb
= v
->ttfrm
; /* MB Transform type */
3691 int mb_has_coeffs
= 1; /* last_flag */
3692 int dmv_x
, dmv_y
; /* Differential MV components */
3693 int val
; /* temp value */
3694 int first_block
= 1;
3696 int skipped
, fourmv
= 0, twomv
= 0;
3697 int block_cbp
= 0, pat
, block_tt
= 0;
3698 int idx_mbmode
= 0, mvbp
;
3699 int stride_y
, fieldtx
;
3701 mquant
= v
->pq
; /* Loosy initialization */
3704 skipped
= get_bits1(gb
);
3706 skipped
= v
->s
.mbskip_table
[mb_pos
];
3708 if (v
->fourmvswitch
)
3709 idx_mbmode
= get_vlc2(gb
, v
->mbmode_vlc
->table
, VC1_INTFR_4MV_MBMODE_VLC_BITS
, 2); // try getting this done
3711 idx_mbmode
= get_vlc2(gb
, v
->mbmode_vlc
->table
, VC1_INTFR_NON4MV_MBMODE_VLC_BITS
, 2); // in a single line
3712 switch (ff_vc1_mbmode_intfrp
[v
->fourmvswitch
][idx_mbmode
][0]) {
3713 /* store the motion vector type in a flag (useful later) */
3714 case MV_PMODE_INTFR_4MV
:
3716 v
->blk_mv_type
[s
->block_index
[0]] = 0;
3717 v
->blk_mv_type
[s
->block_index
[1]] = 0;
3718 v
->blk_mv_type
[s
->block_index
[2]] = 0;
3719 v
->blk_mv_type
[s
->block_index
[3]] = 0;
3721 case MV_PMODE_INTFR_4MV_FIELD
:
3723 v
->blk_mv_type
[s
->block_index
[0]] = 1;
3724 v
->blk_mv_type
[s
->block_index
[1]] = 1;
3725 v
->blk_mv_type
[s
->block_index
[2]] = 1;
3726 v
->blk_mv_type
[s
->block_index
[3]] = 1;
3728 case MV_PMODE_INTFR_2MV_FIELD
:
3730 v
->blk_mv_type
[s
->block_index
[0]] = 1;
3731 v
->blk_mv_type
[s
->block_index
[1]] = 1;
3732 v
->blk_mv_type
[s
->block_index
[2]] = 1;
3733 v
->blk_mv_type
[s
->block_index
[3]] = 1;
3735 case MV_PMODE_INTFR_1MV
:
3736 v
->blk_mv_type
[s
->block_index
[0]] = 0;
3737 v
->blk_mv_type
[s
->block_index
[1]] = 0;
3738 v
->blk_mv_type
[s
->block_index
[2]] = 0;
3739 v
->blk_mv_type
[s
->block_index
[3]] = 0;
3742 if (ff_vc1_mbmode_intfrp
[v
->fourmvswitch
][idx_mbmode
][0] == MV_PMODE_INTFR_INTRA
) { // intra MB
3743 s
->current_picture
.f
.motion_val
[1][s
->block_index
[0]][0] = 0;
3744 s
->current_picture
.f
.motion_val
[1][s
->block_index
[0]][1] = 0;
3745 s
->current_picture
.f
.mb_type
[mb_pos
] = MB_TYPE_INTRA
;
3746 s
->mb_intra
= v
->is_intra
[s
->mb_x
] = 1;
3747 for (i
= 0; i
< 6; i
++)
3748 v
->mb_type
[0][s
->block_index
[i
]] = 1;
3749 fieldtx
= v
->fieldtx_plane
[mb_pos
] = get_bits1(gb
);
3750 mb_has_coeffs
= get_bits1(gb
);
3752 cbp
= 1 + get_vlc2(&v
->s
.gb
, v
->cbpcy_vlc
->table
, VC1_CBPCY_P_VLC_BITS
, 2);
3753 v
->s
.ac_pred
= v
->acpred_plane
[mb_pos
] = get_bits1(gb
);
3755 s
->current_picture
.f
.qscale_table
[mb_pos
] = mquant
;
3756 /* Set DC scale - y and c use the same (not sure if necessary here) */
3757 s
->y_dc_scale
= s
->y_dc_scale_table
[mquant
];
3758 s
->c_dc_scale
= s
->c_dc_scale_table
[mquant
];
3760 for (i
= 0; i
< 6; i
++) {
3761 s
->dc_val
[0][s
->block_index
[i
]] = 0;
3763 val
= ((cbp
>> (5 - i
)) & 1);
3764 v
->mb_type
[0][s
->block_index
[i
]] = s
->mb_intra
;
3765 v
->a_avail
= v
->c_avail
= 0;
3766 if (i
== 2 || i
== 3 || !s
->first_slice_line
)
3767 v
->a_avail
= v
->mb_type
[0][s
->block_index
[i
] - s
->block_wrap
[i
]];
3768 if (i
== 1 || i
== 3 || s
->mb_x
)
3769 v
->c_avail
= v
->mb_type
[0][s
->block_index
[i
] - 1];
3771 vc1_decode_intra_block(v
, s
->block
[i
], i
, val
, mquant
,
3772 (i
& 4) ? v
->codingset2
: v
->codingset
);
3773 if ((i
>3) && (s
->flags
& CODEC_FLAG_GRAY
)) continue;
3774 v
->vc1dsp
.vc1_inv_trans_8x8(s
->block
[i
]);
3776 stride_y
= s
->linesize
<< fieldtx
;
3777 off
= (fieldtx
) ? ((i
& 1) * 8) + ((i
& 2) >> 1) * s
->linesize
: (i
& 1) * 8 + 4 * (i
& 2) * s
->linesize
;
3779 stride_y
= s
->uvlinesize
;
3782 s
->dsp
.put_signed_pixels_clamped(s
->block
[i
], s
->dest
[dst_idx
] + off
, stride_y
);
3786 } else { // inter MB
3787 mb_has_coeffs
= ff_vc1_mbmode_intfrp
[v
->fourmvswitch
][idx_mbmode
][3];
3789 cbp
= 1 + get_vlc2(&v
->s
.gb
, v
->cbpcy_vlc
->table
, VC1_CBPCY_P_VLC_BITS
, 2);
3790 if (ff_vc1_mbmode_intfrp
[v
->fourmvswitch
][idx_mbmode
][0] == MV_PMODE_INTFR_2MV_FIELD
) {
3791 v
->twomvbp
= get_vlc2(gb
, v
->twomvbp_vlc
->table
, VC1_2MV_BLOCK_PATTERN_VLC_BITS
, 1);
3793 if ((ff_vc1_mbmode_intfrp
[v
->fourmvswitch
][idx_mbmode
][0] == MV_PMODE_INTFR_4MV
)
3794 || (ff_vc1_mbmode_intfrp
[v
->fourmvswitch
][idx_mbmode
][0] == MV_PMODE_INTFR_4MV_FIELD
)) {
3795 v
->fourmvbp
= get_vlc2(gb
, v
->fourmvbp_vlc
->table
, VC1_4MV_BLOCK_PATTERN_VLC_BITS
, 1);
3798 s
->mb_intra
= v
->is_intra
[s
->mb_x
] = 0;
3799 for (i
= 0; i
< 6; i
++)
3800 v
->mb_type
[0][s
->block_index
[i
]] = 0;
3801 fieldtx
= v
->fieldtx_plane
[mb_pos
] = ff_vc1_mbmode_intfrp
[v
->fourmvswitch
][idx_mbmode
][1];
3802 /* for all motion vector read MVDATA and motion compensate each block */
3806 for (i
= 0; i
< 6; i
++) {
3809 val
= ((mvbp
>> (3 - i
)) & 1);
3811 get_mvdata_interlaced(v
, &dmv_x
, &dmv_y
, 0);
3813 vc1_pred_mv_intfr(v
, i
, dmv_x
, dmv_y
, 0, v
->range_x
, v
->range_y
, v
->mb_type
[0]);
3814 vc1_mc_4mv_luma(v
, i
, 0);
3815 } else if (i
== 4) {
3816 vc1_mc_4mv_chroma4(v
);
3823 get_mvdata_interlaced(v
, &dmv_x
, &dmv_y
, 0);
3825 vc1_pred_mv_intfr(v
, 0, dmv_x
, dmv_y
, 2, v
->range_x
, v
->range_y
, v
->mb_type
[0]);
3826 vc1_mc_4mv_luma(v
, 0, 0);
3827 vc1_mc_4mv_luma(v
, 1, 0);
3830 get_mvdata_interlaced(v
, &dmv_x
, &dmv_y
, 0);
3832 vc1_pred_mv_intfr(v
, 2, dmv_x
, dmv_y
, 2, v
->range_x
, v
->range_y
, v
->mb_type
[0]);
3833 vc1_mc_4mv_luma(v
, 2, 0);
3834 vc1_mc_4mv_luma(v
, 3, 0);
3835 vc1_mc_4mv_chroma4(v
);
3837 mvbp
= ff_vc1_mbmode_intfrp
[v
->fourmvswitch
][idx_mbmode
][2];
3840 get_mvdata_interlaced(v
, &dmv_x
, &dmv_y
, 0);
3842 vc1_pred_mv_intfr(v
, 0, dmv_x
, dmv_y
, 1, v
->range_x
, v
->range_y
, v
->mb_type
[0]);
3846 GET_MQUANT(); // p. 227
3847 s
->current_picture
.f
.qscale_table
[mb_pos
] = mquant
;
3848 if (!v
->ttmbf
&& cbp
)
3849 ttmb
= get_vlc2(gb
, ff_vc1_ttmb_vlc
[v
->tt_index
].table
, VC1_TTMB_VLC_BITS
, 2);
3850 for (i
= 0; i
< 6; i
++) {
3851 s
->dc_val
[0][s
->block_index
[i
]] = 0;
3853 val
= ((cbp
>> (5 - i
)) & 1);
3855 off
= (i
& 4) ? 0 : ((i
& 1) * 8 + (i
& 2) * 4 * s
->linesize
);
3857 off
= (i
& 4) ? 0 : ((i
& 1) * 8 + ((i
> 1) * s
->linesize
));
3859 pat
= vc1_decode_p_block(v
, s
->block
[i
], i
, mquant
, ttmb
,
3860 first_block
, s
->dest
[dst_idx
] + off
,
3861 (i
& 4) ? s
->uvlinesize
: (s
->linesize
<< fieldtx
),
3862 (i
& 4) && (s
->flags
& CODEC_FLAG_GRAY
), &block_tt
);
3863 block_cbp
|= pat
<< (i
<< 2);
3864 if (!v
->ttmbf
&& ttmb
< 8)
3871 s
->mb_intra
= v
->is_intra
[s
->mb_x
] = 0;
3872 for (i
= 0; i
< 6; i
++) {
3873 v
->mb_type
[0][s
->block_index
[i
]] = 0;
3874 s
->dc_val
[0][s
->block_index
[i
]] = 0;
3876 s
->current_picture
.f
.mb_type
[mb_pos
] = MB_TYPE_SKIP
;
3877 s
->current_picture
.f
.qscale_table
[mb_pos
] = 0;
3878 v
->blk_mv_type
[s
->block_index
[0]] = 0;
3879 v
->blk_mv_type
[s
->block_index
[1]] = 0;
3880 v
->blk_mv_type
[s
->block_index
[2]] = 0;
3881 v
->blk_mv_type
[s
->block_index
[3]] = 0;
3882 vc1_pred_mv_intfr(v
, 0, 0, 0, 1, v
->range_x
, v
->range_y
, v
->mb_type
[0]);
3885 if (s
->mb_x
== s
->mb_width
- 1)
3886 memmove(v
->is_intra_base
, v
->is_intra
, sizeof(v
->is_intra_base
[0])*s
->mb_stride
);
3890 static int vc1_decode_p_mb_intfi(VC1Context
*v
)
3892 MpegEncContext
*s
= &v
->s
;
3893 GetBitContext
*gb
= &s
->gb
;
3895 int mb_pos
= s
->mb_x
+ s
->mb_y
* s
->mb_stride
;
3896 int cbp
= 0; /* cbp decoding stuff */
3897 int mqdiff
, mquant
; /* MB quantization */
3898 int ttmb
= v
->ttfrm
; /* MB Transform type */
3900 int mb_has_coeffs
= 1; /* last_flag */
3901 int dmv_x
, dmv_y
; /* Differential MV components */
3902 int val
; /* temp values */
3903 int first_block
= 1;
3906 int block_cbp
= 0, pat
, block_tt
= 0;
3909 mquant
= v
->pq
; /* Loosy initialization */
3911 idx_mbmode
= get_vlc2(gb
, v
->mbmode_vlc
->table
, VC1_IF_MBMODE_VLC_BITS
, 2);
3912 if (idx_mbmode
<= 1) { // intra MB
3913 s
->mb_intra
= v
->is_intra
[s
->mb_x
] = 1;
3914 s
->current_picture
.f
.motion_val
[1][s
->block_index
[0] + v
->blocks_off
][0] = 0;
3915 s
->current_picture
.f
.motion_val
[1][s
->block_index
[0] + v
->blocks_off
][1] = 0;
3916 s
->current_picture
.f
.mb_type
[mb_pos
+ v
->mb_off
] = MB_TYPE_INTRA
;
3918 s
->current_picture
.f
.qscale_table
[mb_pos
] = mquant
;
3919 /* Set DC scale - y and c use the same (not sure if necessary here) */
3920 s
->y_dc_scale
= s
->y_dc_scale_table
[mquant
];
3921 s
->c_dc_scale
= s
->c_dc_scale_table
[mquant
];
3922 v
->s
.ac_pred
= v
->acpred_plane
[mb_pos
] = get_bits1(gb
);
3923 mb_has_coeffs
= idx_mbmode
& 1;
3925 cbp
= 1 + get_vlc2(&v
->s
.gb
, v
->cbpcy_vlc
->table
, VC1_ICBPCY_VLC_BITS
, 2);
3927 for (i
= 0; i
< 6; i
++) {
3928 s
->dc_val
[0][s
->block_index
[i
]] = 0;
3929 v
->mb_type
[0][s
->block_index
[i
]] = 1;
3931 val
= ((cbp
>> (5 - i
)) & 1);
3932 v
->a_avail
= v
->c_avail
= 0;
3933 if (i
== 2 || i
== 3 || !s
->first_slice_line
)
3934 v
->a_avail
= v
->mb_type
[0][s
->block_index
[i
] - s
->block_wrap
[i
]];
3935 if (i
== 1 || i
== 3 || s
->mb_x
)
3936 v
->c_avail
= v
->mb_type
[0][s
->block_index
[i
] - 1];
3938 vc1_decode_intra_block(v
, s
->block
[i
], i
, val
, mquant
,
3939 (i
& 4) ? v
->codingset2
: v
->codingset
);
3940 if ((i
>3) && (s
->flags
& CODEC_FLAG_GRAY
))
3942 v
->vc1dsp
.vc1_inv_trans_8x8(s
->block
[i
]);
3943 off
= (i
& 4) ? 0 : ((i
& 1) * 8 + (i
& 2) * 4 * s
->linesize
);
3944 off
+= v
->cur_field_type
? ((i
& 4) ? s
->current_picture_ptr
->f
.linesize
[1] : s
->current_picture_ptr
->f
.linesize
[0]) : 0;
3945 s
->dsp
.put_signed_pixels_clamped(s
->block
[i
], s
->dest
[dst_idx
] + off
, (i
& 4) ? s
->uvlinesize
: s
->linesize
);
3946 // TODO: loop filter
3949 s
->mb_intra
= v
->is_intra
[s
->mb_x
] = 0;
3950 s
->current_picture
.f
.mb_type
[mb_pos
+ v
->mb_off
] = MB_TYPE_16x16
;
3951 for (i
= 0; i
< 6; i
++) v
->mb_type
[0][s
->block_index
[i
]] = 0;
3952 if (idx_mbmode
<= 5) { // 1-MV
3953 dmv_x
= dmv_y
= pred_flag
= 0;
3954 if (idx_mbmode
& 1) {
3955 get_mvdata_interlaced(v
, &dmv_x
, &dmv_y
, &pred_flag
);
3957 vc1_pred_mv(v
, 0, dmv_x
, dmv_y
, 1, v
->range_x
, v
->range_y
, v
->mb_type
[0], pred_flag
, 0);
3959 mb_has_coeffs
= !(idx_mbmode
& 2);
3961 v
->fourmvbp
= get_vlc2(gb
, v
->fourmvbp_vlc
->table
, VC1_4MV_BLOCK_PATTERN_VLC_BITS
, 1);
3962 for (i
= 0; i
< 6; i
++) {
3964 dmv_x
= dmv_y
= pred_flag
= 0;
3965 val
= ((v
->fourmvbp
>> (3 - i
)) & 1);
3967 get_mvdata_interlaced(v
, &dmv_x
, &dmv_y
, &pred_flag
);
3969 vc1_pred_mv(v
, i
, dmv_x
, dmv_y
, 0, v
->range_x
, v
->range_y
, v
->mb_type
[0], pred_flag
, 0);
3970 vc1_mc_4mv_luma(v
, i
, 0);
3972 vc1_mc_4mv_chroma(v
, 0);
3974 mb_has_coeffs
= idx_mbmode
& 1;
3977 cbp
= 1 + get_vlc2(&v
->s
.gb
, v
->cbpcy_vlc
->table
, VC1_CBPCY_P_VLC_BITS
, 2);
3981 s
->current_picture
.f
.qscale_table
[mb_pos
] = mquant
;
3982 if (!v
->ttmbf
&& cbp
) {
3983 ttmb
= get_vlc2(gb
, ff_vc1_ttmb_vlc
[v
->tt_index
].table
, VC1_TTMB_VLC_BITS
, 2);
3986 for (i
= 0; i
< 6; i
++) {
3987 s
->dc_val
[0][s
->block_index
[i
]] = 0;
3989 val
= ((cbp
>> (5 - i
)) & 1);
3990 off
= (i
& 4) ? 0 : (i
& 1) * 8 + (i
& 2) * 4 * s
->linesize
;
3991 if (v
->cur_field_type
)
3992 off
+= (i
& 4) ? s
->current_picture_ptr
->f
.linesize
[1] : s
->current_picture_ptr
->f
.linesize
[0];
3994 pat
= vc1_decode_p_block(v
, s
->block
[i
], i
, mquant
, ttmb
,
3995 first_block
, s
->dest
[dst_idx
] + off
,
3996 (i
& 4) ? s
->uvlinesize
: s
->linesize
,
3997 (i
& 4) && (s
->flags
& CODEC_FLAG_GRAY
),
3999 block_cbp
|= pat
<< (i
<< 2);
4000 if (!v
->ttmbf
&& ttmb
< 8) ttmb
= -1;
4005 if (s
->mb_x
== s
->mb_width
- 1)
4006 memmove(v
->is_intra_base
, v
->is_intra
, sizeof(v
->is_intra_base
[0]) * s
->mb_stride
);
4010 /** Decode one B-frame MB (in Main profile)
4012 static void vc1_decode_b_mb(VC1Context
*v
)
4014 MpegEncContext
*s
= &v
->s
;
4015 GetBitContext
*gb
= &s
->gb
;
4017 int mb_pos
= s
->mb_x
+ s
->mb_y
* s
->mb_stride
;
4018 int cbp
= 0; /* cbp decoding stuff */
4019 int mqdiff
, mquant
; /* MB quantization */
4020 int ttmb
= v
->ttfrm
; /* MB Transform type */
4021 int mb_has_coeffs
= 0; /* last_flag */
4022 int index
, index1
; /* LUT indexes */
4023 int val
, sign
; /* temp values */
4024 int first_block
= 1;
4026 int skipped
, direct
;
4027 int dmv_x
[2], dmv_y
[2];
4028 int bmvtype
= BMV_TYPE_BACKWARD
;
4030 mquant
= v
->pq
; /* lossy initialization */
4034 direct
= get_bits1(gb
);
4036 direct
= v
->direct_mb_plane
[mb_pos
];
4038 skipped
= get_bits1(gb
);
4040 skipped
= v
->s
.mbskip_table
[mb_pos
];
4042 dmv_x
[0] = dmv_x
[1] = dmv_y
[0] = dmv_y
[1] = 0;
4043 for (i
= 0; i
< 6; i
++) {
4044 v
->mb_type
[0][s
->block_index
[i
]] = 0;
4045 s
->dc_val
[0][s
->block_index
[i
]] = 0;
4047 s
->current_picture
.f
.qscale_table
[mb_pos
] = 0;
4051 GET_MVDATA(dmv_x
[0], dmv_y
[0]);
4052 dmv_x
[1] = dmv_x
[0];
4053 dmv_y
[1] = dmv_y
[0];
4055 if (skipped
|| !s
->mb_intra
) {
4056 bmvtype
= decode012(gb
);
4059 bmvtype
= (v
->bfraction
>= (B_FRACTION_DEN
/2)) ? BMV_TYPE_BACKWARD
: BMV_TYPE_FORWARD
;
4062 bmvtype
= (v
->bfraction
>= (B_FRACTION_DEN
/2)) ? BMV_TYPE_FORWARD
: BMV_TYPE_BACKWARD
;
4065 bmvtype
= BMV_TYPE_INTERPOLATED
;
4066 dmv_x
[0] = dmv_y
[0] = 0;
4070 for (i
= 0; i
< 6; i
++)
4071 v
->mb_type
[0][s
->block_index
[i
]] = s
->mb_intra
;
4075 bmvtype
= BMV_TYPE_INTERPOLATED
;
4076 vc1_pred_b_mv(v
, dmv_x
, dmv_y
, direct
, bmvtype
);
4077 vc1_b_mc(v
, dmv_x
, dmv_y
, direct
, bmvtype
);
4081 cbp
= get_vlc2(&v
->s
.gb
, v
->cbpcy_vlc
->table
, VC1_CBPCY_P_VLC_BITS
, 2);
4084 s
->current_picture
.f
.qscale_table
[mb_pos
] = mquant
;
4086 ttmb
= get_vlc2(gb
, ff_vc1_ttmb_vlc
[v
->tt_index
].table
, VC1_TTMB_VLC_BITS
, 2);
4087 dmv_x
[0] = dmv_y
[0] = dmv_x
[1] = dmv_y
[1] = 0;
4088 vc1_pred_b_mv(v
, dmv_x
, dmv_y
, direct
, bmvtype
);
4089 vc1_b_mc(v
, dmv_x
, dmv_y
, direct
, bmvtype
);
4091 if (!mb_has_coeffs
&& !s
->mb_intra
) {
4092 /* no coded blocks - effectively skipped */
4093 vc1_pred_b_mv(v
, dmv_x
, dmv_y
, direct
, bmvtype
);
4094 vc1_b_mc(v
, dmv_x
, dmv_y
, direct
, bmvtype
);
4097 if (s
->mb_intra
&& !mb_has_coeffs
) {
4099 s
->current_picture
.f
.qscale_table
[mb_pos
] = mquant
;
4100 s
->ac_pred
= get_bits1(gb
);
4102 vc1_pred_b_mv(v
, dmv_x
, dmv_y
, direct
, bmvtype
);
4104 if (bmvtype
== BMV_TYPE_INTERPOLATED
) {
4105 GET_MVDATA(dmv_x
[0], dmv_y
[0]);
4106 if (!mb_has_coeffs
) {
4107 /* interpolated skipped block */
4108 vc1_pred_b_mv(v
, dmv_x
, dmv_y
, direct
, bmvtype
);
4109 vc1_b_mc(v
, dmv_x
, dmv_y
, direct
, bmvtype
);
4113 vc1_pred_b_mv(v
, dmv_x
, dmv_y
, direct
, bmvtype
);
4115 vc1_b_mc(v
, dmv_x
, dmv_y
, direct
, bmvtype
);
4118 s
->ac_pred
= get_bits1(gb
);
4119 cbp
= get_vlc2(&v
->s
.gb
, v
->cbpcy_vlc
->table
, VC1_CBPCY_P_VLC_BITS
, 2);
4121 s
->current_picture
.f
.qscale_table
[mb_pos
] = mquant
;
4122 if (!v
->ttmbf
&& !s
->mb_intra
&& mb_has_coeffs
)
4123 ttmb
= get_vlc2(gb
, ff_vc1_ttmb_vlc
[v
->tt_index
].table
, VC1_TTMB_VLC_BITS
, 2);
4127 for (i
= 0; i
< 6; i
++) {
4128 s
->dc_val
[0][s
->block_index
[i
]] = 0;
4130 val
= ((cbp
>> (5 - i
)) & 1);
4131 off
= (i
& 4) ? 0 : ((i
& 1) * 8 + (i
& 2) * 4 * s
->linesize
);
4132 v
->mb_type
[0][s
->block_index
[i
]] = s
->mb_intra
;
4134 /* check if prediction blocks A and C are available */
4135 v
->a_avail
= v
->c_avail
= 0;
4136 if (i
== 2 || i
== 3 || !s
->first_slice_line
)
4137 v
->a_avail
= v
->mb_type
[0][s
->block_index
[i
] - s
->block_wrap
[i
]];
4138 if (i
== 1 || i
== 3 || s
->mb_x
)
4139 v
->c_avail
= v
->mb_type
[0][s
->block_index
[i
] - 1];
4141 vc1_decode_intra_block(v
, s
->block
[i
], i
, val
, mquant
,
4142 (i
& 4) ? v
->codingset2
: v
->codingset
);
4143 if ((i
>3) && (s
->flags
& CODEC_FLAG_GRAY
))
4145 v
->vc1dsp
.vc1_inv_trans_8x8(s
->block
[i
]);
4147 for (j
= 0; j
< 64; j
++)
4148 s
->block
[i
][j
] <<= 1;
4149 s
->dsp
.put_signed_pixels_clamped(s
->block
[i
], s
->dest
[dst_idx
] + off
, i
& 4 ? s
->uvlinesize
: s
->linesize
);
4151 vc1_decode_p_block(v
, s
->block
[i
], i
, mquant
, ttmb
,
4152 first_block
, s
->dest
[dst_idx
] + off
,
4153 (i
& 4) ? s
->uvlinesize
: s
->linesize
,
4154 (i
& 4) && (s
->flags
& CODEC_FLAG_GRAY
), NULL
);
4155 if (!v
->ttmbf
&& ttmb
< 8)
4162 /** Decode one B-frame MB (in interlaced field B picture)
4164 static void vc1_decode_b_mb_intfi(VC1Context
*v
)
4166 MpegEncContext
*s
= &v
->s
;
4167 GetBitContext
*gb
= &s
->gb
;
4169 int mb_pos
= s
->mb_x
+ s
->mb_y
* s
->mb_stride
;
4170 int cbp
= 0; /* cbp decoding stuff */
4171 int mqdiff
, mquant
; /* MB quantization */
4172 int ttmb
= v
->ttfrm
; /* MB Transform type */
4173 int mb_has_coeffs
= 0; /* last_flag */
4174 int val
; /* temp value */
4175 int first_block
= 1;
4178 int dmv_x
[2], dmv_y
[2], pred_flag
[2];
4179 int bmvtype
= BMV_TYPE_BACKWARD
;
4180 int idx_mbmode
, interpmvp
;
4182 mquant
= v
->pq
; /* Loosy initialization */
4185 idx_mbmode
= get_vlc2(gb
, v
->mbmode_vlc
->table
, VC1_IF_MBMODE_VLC_BITS
, 2);
4186 if (idx_mbmode
<= 1) { // intra MB
4187 s
->mb_intra
= v
->is_intra
[s
->mb_x
] = 1;
4188 s
->current_picture
.f
.motion_val
[1][s
->block_index
[0]][0] = 0;
4189 s
->current_picture
.f
.motion_val
[1][s
->block_index
[0]][1] = 0;
4190 s
->current_picture
.f
.mb_type
[mb_pos
+ v
->mb_off
] = MB_TYPE_INTRA
;
4192 s
->current_picture
.f
.qscale_table
[mb_pos
] = mquant
;
4193 /* Set DC scale - y and c use the same (not sure if necessary here) */
4194 s
->y_dc_scale
= s
->y_dc_scale_table
[mquant
];
4195 s
->c_dc_scale
= s
->c_dc_scale_table
[mquant
];
4196 v
->s
.ac_pred
= v
->acpred_plane
[mb_pos
] = get_bits1(gb
);
4197 mb_has_coeffs
= idx_mbmode
& 1;
4199 cbp
= 1 + get_vlc2(&v
->s
.gb
, v
->cbpcy_vlc
->table
, VC1_ICBPCY_VLC_BITS
, 2);
4201 for (i
= 0; i
< 6; i
++) {
4202 s
->dc_val
[0][s
->block_index
[i
]] = 0;
4204 val
= ((cbp
>> (5 - i
)) & 1);
4205 v
->mb_type
[0][s
->block_index
[i
]] = s
->mb_intra
;
4206 v
->a_avail
= v
->c_avail
= 0;
4207 if (i
== 2 || i
== 3 || !s
->first_slice_line
)
4208 v
->a_avail
= v
->mb_type
[0][s
->block_index
[i
] - s
->block_wrap
[i
]];
4209 if (i
== 1 || i
== 3 || s
->mb_x
)
4210 v
->c_avail
= v
->mb_type
[0][s
->block_index
[i
] - 1];
4212 vc1_decode_intra_block(v
, s
->block
[i
], i
, val
, mquant
,
4213 (i
& 4) ? v
->codingset2
: v
->codingset
);
4214 if ((i
>3) && (s
->flags
& CODEC_FLAG_GRAY
))
4216 v
->vc1dsp
.vc1_inv_trans_8x8(s
->block
[i
]);
4218 for (j
= 0; j
< 64; j
++)
4219 s
->block
[i
][j
] <<= 1;
4220 off
= (i
& 4) ? 0 : ((i
& 1) * 8 + (i
& 2) * 4 * s
->linesize
);
4221 off
+= v
->cur_field_type
? ((i
& 4) ? s
->current_picture_ptr
->f
.linesize
[1] : s
->current_picture_ptr
->f
.linesize
[0]) : 0;
4222 s
->dsp
.put_signed_pixels_clamped(s
->block
[i
], s
->dest
[dst_idx
] + off
, (i
& 4) ? s
->uvlinesize
: s
->linesize
);
4223 // TODO: yet to perform loop filter
4226 s
->mb_intra
= v
->is_intra
[s
->mb_x
] = 0;
4227 s
->current_picture
.f
.mb_type
[mb_pos
+ v
->mb_off
] = MB_TYPE_16x16
;
4228 for (i
= 0; i
< 6; i
++) v
->mb_type
[0][s
->block_index
[i
]] = 0;
4230 fwd
= v
->forward_mb_plane
[mb_pos
] = get_bits1(gb
);
4232 fwd
= v
->forward_mb_plane
[mb_pos
];
4233 if (idx_mbmode
<= 5) { // 1-MV
4234 dmv_x
[0] = dmv_x
[1] = dmv_y
[0] = dmv_y
[1] = 0;
4235 pred_flag
[0] = pred_flag
[1] = 0;
4237 bmvtype
= BMV_TYPE_FORWARD
;
4239 bmvtype
= decode012(gb
);
4242 bmvtype
= BMV_TYPE_BACKWARD
;
4245 bmvtype
= BMV_TYPE_DIRECT
;
4248 bmvtype
= BMV_TYPE_INTERPOLATED
;
4249 interpmvp
= get_bits1(gb
);
4252 v
->bmvtype
= bmvtype
;
4253 if (bmvtype
!= BMV_TYPE_DIRECT
&& idx_mbmode
& 1) {
4254 get_mvdata_interlaced(v
, &dmv_x
[bmvtype
== BMV_TYPE_BACKWARD
], &dmv_y
[bmvtype
== BMV_TYPE_BACKWARD
], &pred_flag
[bmvtype
== BMV_TYPE_BACKWARD
]);
4256 if (bmvtype
== BMV_TYPE_INTERPOLATED
&& interpmvp
) {
4257 get_mvdata_interlaced(v
, &dmv_x
[1], &dmv_y
[1], &pred_flag
[1]);
4259 if (bmvtype
== BMV_TYPE_DIRECT
) {
4260 dmv_x
[0] = dmv_y
[0] = pred_flag
[0] = 0;
4261 dmv_x
[1] = dmv_y
[1] = pred_flag
[0] = 0;
4263 vc1_pred_b_mv_intfi(v
, 0, dmv_x
, dmv_y
, 1, pred_flag
);
4264 vc1_b_mc(v
, dmv_x
, dmv_y
, (bmvtype
== BMV_TYPE_DIRECT
), bmvtype
);
4265 mb_has_coeffs
= !(idx_mbmode
& 2);
4268 bmvtype
= BMV_TYPE_FORWARD
;
4269 v
->bmvtype
= bmvtype
;
4270 v
->fourmvbp
= get_vlc2(gb
, v
->fourmvbp_vlc
->table
, VC1_4MV_BLOCK_PATTERN_VLC_BITS
, 1);
4271 for (i
= 0; i
< 6; i
++) {
4273 dmv_x
[0] = dmv_y
[0] = pred_flag
[0] = 0;
4274 dmv_x
[1] = dmv_y
[1] = pred_flag
[1] = 0;
4275 val
= ((v
->fourmvbp
>> (3 - i
)) & 1);
4277 get_mvdata_interlaced(v
, &dmv_x
[bmvtype
== BMV_TYPE_BACKWARD
],
4278 &dmv_y
[bmvtype
== BMV_TYPE_BACKWARD
],
4279 &pred_flag
[bmvtype
== BMV_TYPE_BACKWARD
]);
4281 vc1_pred_b_mv_intfi(v
, i
, dmv_x
, dmv_y
, 0, pred_flag
);
4282 vc1_mc_4mv_luma(v
, i
, bmvtype
== BMV_TYPE_BACKWARD
);
4284 vc1_mc_4mv_chroma(v
, bmvtype
== BMV_TYPE_BACKWARD
);
4286 mb_has_coeffs
= idx_mbmode
& 1;
4289 cbp
= 1 + get_vlc2(&v
->s
.gb
, v
->cbpcy_vlc
->table
, VC1_CBPCY_P_VLC_BITS
, 2);
4293 s
->current_picture
.f
.qscale_table
[mb_pos
] = mquant
;
4294 if (!v
->ttmbf
&& cbp
) {
4295 ttmb
= get_vlc2(gb
, ff_vc1_ttmb_vlc
[v
->tt_index
].table
, VC1_TTMB_VLC_BITS
, 2);
4298 for (i
= 0; i
< 6; i
++) {
4299 s
->dc_val
[0][s
->block_index
[i
]] = 0;
4301 val
= ((cbp
>> (5 - i
)) & 1);
4302 off
= (i
& 4) ? 0 : (i
& 1) * 8 + (i
& 2) * 4 * s
->linesize
;
4303 if (v
->cur_field_type
)
4304 off
+= (i
& 4) ? s
->current_picture_ptr
->f
.linesize
[1] : s
->current_picture_ptr
->f
.linesize
[0];
4306 vc1_decode_p_block(v
, s
->block
[i
], i
, mquant
, ttmb
,
4307 first_block
, s
->dest
[dst_idx
] + off
,
4308 (i
& 4) ? s
->uvlinesize
: s
->linesize
,
4309 (i
& 4) && (s
->flags
& CODEC_FLAG_GRAY
), NULL
);
4310 if (!v
->ttmbf
&& ttmb
< 8)
4318 /** Decode blocks of I-frame
4320 static void vc1_decode_i_blocks(VC1Context
*v
)
4323 MpegEncContext
*s
= &v
->s
;
4328 /* select codingmode used for VLC tables selection */
4329 switch (v
->y_ac_table_index
) {
4331 v
->codingset
= (v
->pqindex
<= 8) ? CS_HIGH_RATE_INTRA
: CS_LOW_MOT_INTRA
;
4334 v
->codingset
= CS_HIGH_MOT_INTRA
;
4337 v
->codingset
= CS_MID_RATE_INTRA
;
4341 switch (v
->c_ac_table_index
) {
4343 v
->codingset2
= (v
->pqindex
<= 8) ? CS_HIGH_RATE_INTER
: CS_LOW_MOT_INTER
;
4346 v
->codingset2
= CS_HIGH_MOT_INTER
;
4349 v
->codingset2
= CS_MID_RATE_INTER
;
4353 /* Set DC scale - y and c use the same */
4354 s
->y_dc_scale
= s
->y_dc_scale_table
[v
->pq
];
4355 s
->c_dc_scale
= s
->c_dc_scale_table
[v
->pq
];
4358 s
->mb_x
= s
->mb_y
= 0;
4360 s
->first_slice_line
= 1;
4361 for (s
->mb_y
= 0; s
->mb_y
< s
->end_mb_y
; s
->mb_y
++) {
4363 ff_init_block_index(s
);
4364 for (; s
->mb_x
< v
->end_mb_x
; s
->mb_x
++) {
4366 ff_update_block_index(s
);
4367 dst
[0] = s
->dest
[0];
4368 dst
[1] = dst
[0] + 8;
4369 dst
[2] = s
->dest
[0] + s
->linesize
* 8;
4370 dst
[3] = dst
[2] + 8;
4371 dst
[4] = s
->dest
[1];
4372 dst
[5] = s
->dest
[2];
4373 s
->dsp
.clear_blocks(s
->block
[0]);
4374 mb_pos
= s
->mb_x
+ s
->mb_y
* s
->mb_width
;
4375 s
->current_picture
.f
.mb_type
[mb_pos
] = MB_TYPE_INTRA
;
4376 s
->current_picture
.f
.qscale_table
[mb_pos
] = v
->pq
;
4377 s
->current_picture
.f
.motion_val
[1][s
->block_index
[0]][0] = 0;
4378 s
->current_picture
.f
.motion_val
[1][s
->block_index
[0]][1] = 0;
4380 // do actual MB decoding and displaying
4381 cbp
= get_vlc2(&v
->s
.gb
, ff_msmp4_mb_i_vlc
.table
, MB_INTRA_VLC_BITS
, 2);
4382 v
->s
.ac_pred
= get_bits1(&v
->s
.gb
);
4384 for (k
= 0; k
< 6; k
++) {
4385 val
= ((cbp
>> (5 - k
)) & 1);
4388 int pred
= vc1_coded_block_pred(&v
->s
, k
, &coded_val
);
4392 cbp
|= val
<< (5 - k
);
4394 vc1_decode_i_block(v
, s
->block
[k
], k
, val
, (k
< 4) ? v
->codingset
: v
->codingset2
);
4396 if (k
> 3 && (s
->flags
& CODEC_FLAG_GRAY
))
4398 v
->vc1dsp
.vc1_inv_trans_8x8(s
->block
[k
]);
4399 if (v
->pq
>= 9 && v
->overlap
) {
4401 for (j
= 0; j
< 64; j
++)
4402 s
->block
[k
][j
] <<= 1;
4403 s
->dsp
.put_signed_pixels_clamped(s
->block
[k
], dst
[k
], k
& 4 ? s
->uvlinesize
: s
->linesize
);
4406 for (j
= 0; j
< 64; j
++)
4407 s
->block
[k
][j
] = (s
->block
[k
][j
] - 64) << 1;
4408 s
->dsp
.put_pixels_clamped(s
->block
[k
], dst
[k
], k
& 4 ? s
->uvlinesize
: s
->linesize
);
4412 if (v
->pq
>= 9 && v
->overlap
) {
4414 v
->vc1dsp
.vc1_h_overlap(s
->dest
[0], s
->linesize
);
4415 v
->vc1dsp
.vc1_h_overlap(s
->dest
[0] + 8 * s
->linesize
, s
->linesize
);
4416 if (!(s
->flags
& CODEC_FLAG_GRAY
)) {
4417 v
->vc1dsp
.vc1_h_overlap(s
->dest
[1], s
->uvlinesize
);
4418 v
->vc1dsp
.vc1_h_overlap(s
->dest
[2], s
->uvlinesize
);
4421 v
->vc1dsp
.vc1_h_overlap(s
->dest
[0] + 8, s
->linesize
);
4422 v
->vc1dsp
.vc1_h_overlap(s
->dest
[0] + 8 * s
->linesize
+ 8, s
->linesize
);
4423 if (!s
->first_slice_line
) {
4424 v
->vc1dsp
.vc1_v_overlap(s
->dest
[0], s
->linesize
);
4425 v
->vc1dsp
.vc1_v_overlap(s
->dest
[0] + 8, s
->linesize
);
4426 if (!(s
->flags
& CODEC_FLAG_GRAY
)) {
4427 v
->vc1dsp
.vc1_v_overlap(s
->dest
[1], s
->uvlinesize
);
4428 v
->vc1dsp
.vc1_v_overlap(s
->dest
[2], s
->uvlinesize
);
4431 v
->vc1dsp
.vc1_v_overlap(s
->dest
[0] + 8 * s
->linesize
, s
->linesize
);
4432 v
->vc1dsp
.vc1_v_overlap(s
->dest
[0] + 8 * s
->linesize
+ 8, s
->linesize
);
4434 if (v
->s
.loop_filter
) vc1_loop_filter_iblk(v
, v
->pq
);
4436 if (get_bits_count(&s
->gb
) > v
->bits
) {
4437 ff_er_add_slice(&s
->er
, 0, 0, s
->mb_x
, s
->mb_y
, ER_MB_ERROR
);
4438 av_log(s
->avctx
, AV_LOG_ERROR
, "Bits overconsumption: %i > %i\n",
4439 get_bits_count(&s
->gb
), v
->bits
);
4443 if (!v
->s
.loop_filter
)
4444 ff_mpeg_draw_horiz_band(s
, s
->mb_y
* 16, 16);
4446 ff_mpeg_draw_horiz_band(s
, (s
->mb_y
- 1) * 16, 16);
4448 s
->first_slice_line
= 0;
4450 if (v
->s
.loop_filter
)
4451 ff_mpeg_draw_horiz_band(s
, (s
->end_mb_y
- 1) * 16, 16);
4453 /* This is intentionally mb_height and not end_mb_y - unlike in advanced
4454 * profile, these only differ are when decoding MSS2 rectangles. */
4455 ff_er_add_slice(&s
->er
, 0, 0, s
->mb_width
- 1, s
->mb_height
- 1, ER_MB_END
);
4458 /** Decode blocks of I-frame for advanced profile
4460 static void vc1_decode_i_blocks_adv(VC1Context
*v
)
4463 MpegEncContext
*s
= &v
->s
;
4469 GetBitContext
*gb
= &s
->gb
;
4471 /* select codingmode used for VLC tables selection */
4472 switch (v
->y_ac_table_index
) {
4474 v
->codingset
= (v
->pqindex
<= 8) ? CS_HIGH_RATE_INTRA
: CS_LOW_MOT_INTRA
;
4477 v
->codingset
= CS_HIGH_MOT_INTRA
;
4480 v
->codingset
= CS_MID_RATE_INTRA
;
4484 switch (v
->c_ac_table_index
) {
4486 v
->codingset2
= (v
->pqindex
<= 8) ? CS_HIGH_RATE_INTER
: CS_LOW_MOT_INTER
;
4489 v
->codingset2
= CS_HIGH_MOT_INTER
;
4492 v
->codingset2
= CS_MID_RATE_INTER
;
4497 s
->mb_x
= s
->mb_y
= 0;
4499 s
->first_slice_line
= 1;
4500 s
->mb_y
= s
->start_mb_y
;
4501 if (s
->start_mb_y
) {
4503 ff_init_block_index(s
);
4504 memset(&s
->coded_block
[s
->block_index
[0] - s
->b8_stride
], 0,
4505 (1 + s
->b8_stride
) * sizeof(*s
->coded_block
));
4507 for (; s
->mb_y
< s
->end_mb_y
; s
->mb_y
++) {
4509 ff_init_block_index(s
);
4510 for (;s
->mb_x
< s
->mb_width
; s
->mb_x
++) {
4511 int16_t (*block
)[64] = v
->block
[v
->cur_blk_idx
];
4512 ff_update_block_index(s
);
4513 s
->dsp
.clear_blocks(block
[0]);
4514 mb_pos
= s
->mb_x
+ s
->mb_y
* s
->mb_stride
;
4515 s
->current_picture
.f
.mb_type
[mb_pos
+ v
->mb_off
] = MB_TYPE_INTRA
;
4516 s
->current_picture
.f
.motion_val
[1][s
->block_index
[0] + v
->blocks_off
][0] = 0;
4517 s
->current_picture
.f
.motion_val
[1][s
->block_index
[0] + v
->blocks_off
][1] = 0;
4519 // do actual MB decoding and displaying
4520 if (v
->fieldtx_is_raw
)
4521 v
->fieldtx_plane
[mb_pos
] = get_bits1(&v
->s
.gb
);
4522 cbp
= get_vlc2(&v
->s
.gb
, ff_msmp4_mb_i_vlc
.table
, MB_INTRA_VLC_BITS
, 2);
4523 if ( v
->acpred_is_raw
)
4524 v
->s
.ac_pred
= get_bits1(&v
->s
.gb
);
4526 v
->s
.ac_pred
= v
->acpred_plane
[mb_pos
];
4528 if (v
->condover
== CONDOVER_SELECT
&& v
->overflg_is_raw
)
4529 v
->over_flags_plane
[mb_pos
] = get_bits1(&v
->s
.gb
);
4533 s
->current_picture
.f
.qscale_table
[mb_pos
] = mquant
;
4534 /* Set DC scale - y and c use the same */
4535 s
->y_dc_scale
= s
->y_dc_scale_table
[mquant
];
4536 s
->c_dc_scale
= s
->c_dc_scale_table
[mquant
];
4538 for (k
= 0; k
< 6; k
++) {
4539 val
= ((cbp
>> (5 - k
)) & 1);
4542 int pred
= vc1_coded_block_pred(&v
->s
, k
, &coded_val
);
4546 cbp
|= val
<< (5 - k
);
4548 v
->a_avail
= !s
->first_slice_line
|| (k
== 2 || k
== 3);
4549 v
->c_avail
= !!s
->mb_x
|| (k
== 1 || k
== 3);
4551 vc1_decode_i_block_adv(v
, block
[k
], k
, val
,
4552 (k
< 4) ? v
->codingset
: v
->codingset2
, mquant
);
4554 if (k
> 3 && (s
->flags
& CODEC_FLAG_GRAY
))
4556 v
->vc1dsp
.vc1_inv_trans_8x8(block
[k
]);
4559 vc1_smooth_overlap_filter_iblk(v
);
4560 vc1_put_signed_blocks_clamped(v
);
4561 if (v
->s
.loop_filter
) vc1_loop_filter_iblk_delayed(v
, v
->pq
);
4563 if (get_bits_count(&s
->gb
) > v
->bits
) {
4564 // TODO: may need modification to handle slice coding
4565 ff_er_add_slice(&s
->er
, 0, s
->start_mb_y
, s
->mb_x
, s
->mb_y
, ER_MB_ERROR
);
4566 av_log(s
->avctx
, AV_LOG_ERROR
, "Bits overconsumption: %i > %i\n",
4567 get_bits_count(&s
->gb
), v
->bits
);
4571 if (!v
->s
.loop_filter
)
4572 ff_mpeg_draw_horiz_band(s
, s
->mb_y
* 16, 16);
4574 ff_mpeg_draw_horiz_band(s
, (s
->mb_y
-1) * 16, 16);
4575 s
->first_slice_line
= 0;
4578 /* raw bottom MB row */
4580 ff_init_block_index(s
);
4581 for (;s
->mb_x
< s
->mb_width
; s
->mb_x
++) {
4582 ff_update_block_index(s
);
4583 vc1_put_signed_blocks_clamped(v
);
4584 if (v
->s
.loop_filter
)
4585 vc1_loop_filter_iblk_delayed(v
, v
->pq
);
4587 if (v
->s
.loop_filter
)
4588 ff_mpeg_draw_horiz_band(s
, (s
->end_mb_y
-1)*16, 16);
4589 ff_er_add_slice(&s
->er
, 0, s
->start_mb_y
<< v
->field_mode
, s
->mb_width
- 1,
4590 (s
->end_mb_y
<< v
->field_mode
) - 1, ER_MB_END
);
4593 static void vc1_decode_p_blocks(VC1Context
*v
)
4595 MpegEncContext
*s
= &v
->s
;
4596 int apply_loop_filter
;
4598 /* select codingmode used for VLC tables selection */
4599 switch (v
->c_ac_table_index
) {
4601 v
->codingset
= (v
->pqindex
<= 8) ? CS_HIGH_RATE_INTRA
: CS_LOW_MOT_INTRA
;
4604 v
->codingset
= CS_HIGH_MOT_INTRA
;
4607 v
->codingset
= CS_MID_RATE_INTRA
;
4611 switch (v
->c_ac_table_index
) {
4613 v
->codingset2
= (v
->pqindex
<= 8) ? CS_HIGH_RATE_INTER
: CS_LOW_MOT_INTER
;
4616 v
->codingset2
= CS_HIGH_MOT_INTER
;
4619 v
->codingset2
= CS_MID_RATE_INTER
;
4623 apply_loop_filter
= s
->loop_filter
&& !(s
->avctx
->skip_loop_filter
>= AVDISCARD_NONKEY
);
4624 s
->first_slice_line
= 1;
4625 memset(v
->cbp_base
, 0, sizeof(v
->cbp_base
[0])*2*s
->mb_stride
);
4626 for (s
->mb_y
= s
->start_mb_y
; s
->mb_y
< s
->end_mb_y
; s
->mb_y
++) {
4628 ff_init_block_index(s
);
4629 for (; s
->mb_x
< s
->mb_width
; s
->mb_x
++) {
4630 ff_update_block_index(s
);
4632 if (v
->fcm
== ILACE_FIELD
)
4633 vc1_decode_p_mb_intfi(v
);
4634 else if (v
->fcm
== ILACE_FRAME
)
4635 vc1_decode_p_mb_intfr(v
);
4636 else vc1_decode_p_mb(v
);
4637 if (s
->mb_y
!= s
->start_mb_y
&& apply_loop_filter
&& v
->fcm
== PROGRESSIVE
)
4638 vc1_apply_p_loop_filter(v
);
4639 if (get_bits_count(&s
->gb
) > v
->bits
|| get_bits_count(&s
->gb
) < 0) {
4640 // TODO: may need modification to handle slice coding
4641 ff_er_add_slice(&s
->er
, 0, s
->start_mb_y
, s
->mb_x
, s
->mb_y
, ER_MB_ERROR
);
4642 av_log(s
->avctx
, AV_LOG_ERROR
, "Bits overconsumption: %i > %i at %ix%i\n",
4643 get_bits_count(&s
->gb
), v
->bits
, s
->mb_x
, s
->mb_y
);
4647 memmove(v
->cbp_base
, v
->cbp
, sizeof(v
->cbp_base
[0]) * s
->mb_stride
);
4648 memmove(v
->ttblk_base
, v
->ttblk
, sizeof(v
->ttblk_base
[0]) * s
->mb_stride
);
4649 memmove(v
->is_intra_base
, v
->is_intra
, sizeof(v
->is_intra_base
[0]) * s
->mb_stride
);
4650 memmove(v
->luma_mv_base
, v
->luma_mv
, sizeof(v
->luma_mv_base
[0]) * s
->mb_stride
);
4651 if (s
->mb_y
!= s
->start_mb_y
) ff_mpeg_draw_horiz_band(s
, (s
->mb_y
- 1) * 16, 16);
4652 s
->first_slice_line
= 0;
4654 if (apply_loop_filter
) {
4656 ff_init_block_index(s
);
4657 for (; s
->mb_x
< s
->mb_width
; s
->mb_x
++) {
4658 ff_update_block_index(s
);
4659 vc1_apply_p_loop_filter(v
);
4662 if (s
->end_mb_y
>= s
->start_mb_y
)
4663 ff_mpeg_draw_horiz_band(s
, (s
->end_mb_y
- 1) * 16, 16);
4664 ff_er_add_slice(&s
->er
, 0, s
->start_mb_y
<< v
->field_mode
, s
->mb_width
- 1,
4665 (s
->end_mb_y
<< v
->field_mode
) - 1, ER_MB_END
);
4668 static void vc1_decode_b_blocks(VC1Context
*v
)
4670 MpegEncContext
*s
= &v
->s
;
4672 /* select codingmode used for VLC tables selection */
4673 switch (v
->c_ac_table_index
) {
4675 v
->codingset
= (v
->pqindex
<= 8) ? CS_HIGH_RATE_INTRA
: CS_LOW_MOT_INTRA
;
4678 v
->codingset
= CS_HIGH_MOT_INTRA
;
4681 v
->codingset
= CS_MID_RATE_INTRA
;
4685 switch (v
->c_ac_table_index
) {
4687 v
->codingset2
= (v
->pqindex
<= 8) ? CS_HIGH_RATE_INTER
: CS_LOW_MOT_INTER
;
4690 v
->codingset2
= CS_HIGH_MOT_INTER
;
4693 v
->codingset2
= CS_MID_RATE_INTER
;
4697 s
->first_slice_line
= 1;
4698 for (s
->mb_y
= s
->start_mb_y
; s
->mb_y
< s
->end_mb_y
; s
->mb_y
++) {
4700 ff_init_block_index(s
);
4701 for (; s
->mb_x
< s
->mb_width
; s
->mb_x
++) {
4702 ff_update_block_index(s
);
4704 if (v
->fcm
== ILACE_FIELD
)
4705 vc1_decode_b_mb_intfi(v
);
4708 if (get_bits_count(&s
->gb
) > v
->bits
|| get_bits_count(&s
->gb
) < 0) {
4709 // TODO: may need modification to handle slice coding
4710 ff_er_add_slice(&s
->er
, 0, s
->start_mb_y
, s
->mb_x
, s
->mb_y
, ER_MB_ERROR
);
4711 av_log(s
->avctx
, AV_LOG_ERROR
, "Bits overconsumption: %i > %i at %ix%i\n",
4712 get_bits_count(&s
->gb
), v
->bits
, s
->mb_x
, s
->mb_y
);
4715 if (v
->s
.loop_filter
) vc1_loop_filter_iblk(v
, v
->pq
);
4717 if (!v
->s
.loop_filter
)
4718 ff_mpeg_draw_horiz_band(s
, s
->mb_y
* 16, 16);
4720 ff_mpeg_draw_horiz_band(s
, (s
->mb_y
- 1) * 16, 16);
4721 s
->first_slice_line
= 0;
4723 if (v
->s
.loop_filter
)
4724 ff_mpeg_draw_horiz_band(s
, (s
->end_mb_y
- 1) * 16, 16);
4725 ff_er_add_slice(&s
->er
, 0, s
->start_mb_y
<< v
->field_mode
, s
->mb_width
- 1,
4726 (s
->end_mb_y
<< v
->field_mode
) - 1, ER_MB_END
);
4729 static void vc1_decode_skip_blocks(VC1Context
*v
)
4731 MpegEncContext
*s
= &v
->s
;
4733 ff_er_add_slice(&s
->er
, 0, s
->start_mb_y
, s
->mb_width
- 1, s
->end_mb_y
- 1, ER_MB_END
);
4734 s
->first_slice_line
= 1;
4735 for (s
->mb_y
= s
->start_mb_y
; s
->mb_y
< s
->end_mb_y
; s
->mb_y
++) {
4737 ff_init_block_index(s
);
4738 ff_update_block_index(s
);
4739 memcpy(s
->dest
[0], s
->last_picture
.f
.data
[0] + s
->mb_y
* 16 * s
->linesize
, s
->linesize
* 16);
4740 memcpy(s
->dest
[1], s
->last_picture
.f
.data
[1] + s
->mb_y
* 8 * s
->uvlinesize
, s
->uvlinesize
* 8);
4741 memcpy(s
->dest
[2], s
->last_picture
.f
.data
[2] + s
->mb_y
* 8 * s
->uvlinesize
, s
->uvlinesize
* 8);
4742 ff_mpeg_draw_horiz_band(s
, s
->mb_y
* 16, 16);
4743 s
->first_slice_line
= 0;
4745 s
->pict_type
= AV_PICTURE_TYPE_P
;
4748 void ff_vc1_decode_blocks(VC1Context
*v
)
4751 v
->s
.esc3_level_length
= 0;
4753 ff_intrax8_decode_picture(&v
->x8
, 2*v
->pq
+ v
->halfpq
, v
->pq
* !v
->pquantizer
);
4756 v
->left_blk_idx
= -1;
4757 v
->topleft_blk_idx
= 1;
4759 switch (v
->s
.pict_type
) {
4760 case AV_PICTURE_TYPE_I
:
4761 if (v
->profile
== PROFILE_ADVANCED
)
4762 vc1_decode_i_blocks_adv(v
);
4764 vc1_decode_i_blocks(v
);
4766 case AV_PICTURE_TYPE_P
:
4767 if (v
->p_frame_skipped
)
4768 vc1_decode_skip_blocks(v
);
4770 vc1_decode_p_blocks(v
);
4772 case AV_PICTURE_TYPE_B
:
4774 if (v
->profile
== PROFILE_ADVANCED
)
4775 vc1_decode_i_blocks_adv(v
);
4777 vc1_decode_i_blocks(v
);
4779 vc1_decode_b_blocks(v
);
4785 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
4789 * Transform coefficients for both sprites in 16.16 fixed point format,
4790 * in the order they appear in the bitstream:
4792 * rotation 1 (unused)
4794 * rotation 2 (unused)
4801 int effect_type
, effect_flag
;
4802 int effect_pcount1
, effect_pcount2
; ///< amount of effect parameters stored in effect_params
4803 int effect_params1
[15], effect_params2
[10]; ///< effect parameters in 16.16 fixed point format
4806 static inline int get_fp_val(GetBitContext
* gb
)
4808 return (get_bits_long(gb
, 30) - (1 << 29)) << 1;
4811 static void vc1_sprite_parse_transform(GetBitContext
* gb
, int c
[7])
4815 switch (get_bits(gb
, 2)) {
4818 c
[2] = get_fp_val(gb
);
4822 c
[0] = c
[4] = get_fp_val(gb
);
4823 c
[2] = get_fp_val(gb
);
4826 c
[0] = get_fp_val(gb
);
4827 c
[2] = get_fp_val(gb
);
4828 c
[4] = get_fp_val(gb
);
4831 c
[0] = get_fp_val(gb
);
4832 c
[1] = get_fp_val(gb
);
4833 c
[2] = get_fp_val(gb
);
4834 c
[3] = get_fp_val(gb
);
4835 c
[4] = get_fp_val(gb
);
4838 c
[5] = get_fp_val(gb
);
4840 c
[6] = get_fp_val(gb
);
4845 static void vc1_parse_sprites(VC1Context
*v
, GetBitContext
* gb
, SpriteData
* sd
)
4847 AVCodecContext
*avctx
= v
->s
.avctx
;
4850 for (sprite
= 0; sprite
<= v
->two_sprites
; sprite
++) {
4851 vc1_sprite_parse_transform(gb
, sd
->coefs
[sprite
]);
4852 if (sd
->coefs
[sprite
][1] || sd
->coefs
[sprite
][3])
4853 av_log_ask_for_sample(avctx
, "Rotation coefficients are not zero");
4854 av_log(avctx
, AV_LOG_DEBUG
, sprite
? "S2:" : "S1:");
4855 for (i
= 0; i
< 7; i
++)
4856 av_log(avctx
, AV_LOG_DEBUG
, " %d.%.3d",
4857 sd
->coefs
[sprite
][i
] / (1<<16),
4858 (abs(sd
->coefs
[sprite
][i
]) & 0xFFFF) * 1000 / (1 << 16));
4859 av_log(avctx
, AV_LOG_DEBUG
, "\n");
4863 if (sd
->effect_type
= get_bits_long(gb
, 30)) {
4864 switch (sd
->effect_pcount1
= get_bits(gb
, 4)) {
4866 vc1_sprite_parse_transform(gb
, sd
->effect_params1
);
4869 vc1_sprite_parse_transform(gb
, sd
->effect_params1
);
4870 vc1_sprite_parse_transform(gb
, sd
->effect_params1
+ 7);
4873 for (i
= 0; i
< sd
->effect_pcount1
; i
++)
4874 sd
->effect_params1
[i
] = get_fp_val(gb
);
4876 if (sd
->effect_type
!= 13 || sd
->effect_params1
[0] != sd
->coefs
[0][6]) {
4877 // effect 13 is simple alpha blending and matches the opacity above
4878 av_log(avctx
, AV_LOG_DEBUG
, "Effect: %d; params: ", sd
->effect_type
);
4879 for (i
= 0; i
< sd
->effect_pcount1
; i
++)
4880 av_log(avctx
, AV_LOG_DEBUG
, " %d.%.2d",
4881 sd
->effect_params1
[i
] / (1 << 16),
4882 (abs(sd
->effect_params1
[i
]) & 0xFFFF) * 1000 / (1 << 16));
4883 av_log(avctx
, AV_LOG_DEBUG
, "\n");
4886 sd
->effect_pcount2
= get_bits(gb
, 16);
4887 if (sd
->effect_pcount2
> 10) {
4888 av_log(avctx
, AV_LOG_ERROR
, "Too many effect parameters\n");
4890 } else if (sd
->effect_pcount2
) {
4892 av_log(avctx
, AV_LOG_DEBUG
, "Effect params 2: ");
4893 while (++i
< sd
->effect_pcount2
) {
4894 sd
->effect_params2
[i
] = get_fp_val(gb
);
4895 av_log(avctx
, AV_LOG_DEBUG
, " %d.%.2d",
4896 sd
->effect_params2
[i
] / (1 << 16),
4897 (abs(sd
->effect_params2
[i
]) & 0xFFFF) * 1000 / (1 << 16));
4899 av_log(avctx
, AV_LOG_DEBUG
, "\n");
4902 if (sd
->effect_flag
= get_bits1(gb
))
4903 av_log(avctx
, AV_LOG_DEBUG
, "Effect flag set\n");
4905 if (get_bits_count(gb
) >= gb
->size_in_bits
+
4906 (avctx
->codec_id
== AV_CODEC_ID_WMV3IMAGE
? 64 : 0))
4907 av_log(avctx
, AV_LOG_ERROR
, "Buffer overrun\n");
4908 if (get_bits_count(gb
) < gb
->size_in_bits
- 8)
4909 av_log(avctx
, AV_LOG_WARNING
, "Buffer not fully read\n");
4912 static void vc1_draw_sprites(VC1Context
*v
, SpriteData
* sd
)
4914 int i
, plane
, row
, sprite
;
4915 int sr_cache
[2][2] = { { -1, -1 }, { -1, -1 } };
4916 uint8_t* src_h
[2][2];
4917 int xoff
[2], xadv
[2], yoff
[2], yadv
[2], alpha
;
4919 MpegEncContext
*s
= &v
->s
;
4921 for (i
= 0; i
< 2; i
++) {
4922 xoff
[i
] = av_clip(sd
->coefs
[i
][2], 0, v
->sprite_width
-1 << 16);
4923 xadv
[i
] = sd
->coefs
[i
][0];
4924 if (xadv
[i
] != 1<<16 || (v
->sprite_width
<< 16) - (v
->output_width
<< 16) - xoff
[i
])
4925 xadv
[i
] = av_clip(xadv
[i
], 0, ((v
->sprite_width
<<16) - xoff
[i
] - 1) / v
->output_width
);
4927 yoff
[i
] = av_clip(sd
->coefs
[i
][5], 0, v
->sprite_height
-1 << 16);
4928 yadv
[i
] = av_clip(sd
->coefs
[i
][4], 0, ((v
->sprite_height
<< 16) - yoff
[i
]) / v
->output_height
);
4930 alpha
= av_clip(sd
->coefs
[1][6], 0, (1<<16) - 1);
4932 for (plane
= 0; plane
< (s
->flags
&CODEC_FLAG_GRAY
? 1 : 3); plane
++) {
4933 int width
= v
->output_width
>>!!plane
;
4935 for (row
= 0; row
< v
->output_height
>>!!plane
; row
++) {
4936 uint8_t *dst
= v
->sprite_output_frame
.data
[plane
] +
4937 v
->sprite_output_frame
.linesize
[plane
] * row
;
4939 for (sprite
= 0; sprite
<= v
->two_sprites
; sprite
++) {
4940 uint8_t *iplane
= s
->current_picture
.f
.data
[plane
];
4941 int iline
= s
->current_picture
.f
.linesize
[plane
];
4942 int ycoord
= yoff
[sprite
] + yadv
[sprite
] * row
;
4943 int yline
= ycoord
>> 16;
4945 ysub
[sprite
] = ycoord
& 0xFFFF;
4947 iplane
= s
->last_picture
.f
.data
[plane
];
4948 iline
= s
->last_picture
.f
.linesize
[plane
];
4950 next_line
= FFMIN(yline
+ 1, (v
->sprite_height
>> !!plane
) - 1) * iline
;
4951 if (!(xoff
[sprite
] & 0xFFFF) && xadv
[sprite
] == 1 << 16) {
4952 src_h
[sprite
][0] = iplane
+ (xoff
[sprite
] >> 16) + yline
* iline
;
4954 src_h
[sprite
][1] = iplane
+ (xoff
[sprite
] >> 16) + next_line
;
4956 if (sr_cache
[sprite
][0] != yline
) {
4957 if (sr_cache
[sprite
][1] == yline
) {
4958 FFSWAP(uint8_t*, v
->sr_rows
[sprite
][0], v
->sr_rows
[sprite
][1]);
4959 FFSWAP(int, sr_cache
[sprite
][0], sr_cache
[sprite
][1]);
4961 v
->vc1dsp
.sprite_h(v
->sr_rows
[sprite
][0], iplane
+ yline
* iline
, xoff
[sprite
], xadv
[sprite
], width
);
4962 sr_cache
[sprite
][0] = yline
;
4965 if (ysub
[sprite
] && sr_cache
[sprite
][1] != yline
+ 1) {
4966 v
->vc1dsp
.sprite_h(v
->sr_rows
[sprite
][1],
4967 iplane
+ next_line
, xoff
[sprite
],
4968 xadv
[sprite
], width
);
4969 sr_cache
[sprite
][1] = yline
+ 1;
4971 src_h
[sprite
][0] = v
->sr_rows
[sprite
][0];
4972 src_h
[sprite
][1] = v
->sr_rows
[sprite
][1];
4976 if (!v
->two_sprites
) {
4978 v
->vc1dsp
.sprite_v_single(dst
, src_h
[0][0], src_h
[0][1], ysub
[0], width
);
4980 memcpy(dst
, src_h
[0][0], width
);
4983 if (ysub
[0] && ysub
[1]) {
4984 v
->vc1dsp
.sprite_v_double_twoscale(dst
, src_h
[0][0], src_h
[0][1], ysub
[0],
4985 src_h
[1][0], src_h
[1][1], ysub
[1], alpha
, width
);
4986 } else if (ysub
[0]) {
4987 v
->vc1dsp
.sprite_v_double_onescale(dst
, src_h
[0][0], src_h
[0][1], ysub
[0],
4988 src_h
[1][0], alpha
, width
);
4989 } else if (ysub
[1]) {
4990 v
->vc1dsp
.sprite_v_double_onescale(dst
, src_h
[1][0], src_h
[1][1], ysub
[1],
4991 src_h
[0][0], (1<<16)-1-alpha
, width
);
4993 v
->vc1dsp
.sprite_v_double_noscale(dst
, src_h
[0][0], src_h
[1][0], alpha
, width
);
4999 for (i
= 0; i
< 2; i
++) {
5009 static int vc1_decode_sprites(VC1Context
*v
, GetBitContext
* gb
)
5011 MpegEncContext
*s
= &v
->s
;
5012 AVCodecContext
*avctx
= s
->avctx
;
5015 vc1_parse_sprites(v
, gb
, &sd
);
5017 if (!s
->current_picture
.f
.data
[0]) {
5018 av_log(avctx
, AV_LOG_ERROR
, "Got no sprites\n");
5022 if (v
->two_sprites
&& (!s
->last_picture_ptr
|| !s
->last_picture
.f
.data
[0])) {
5023 av_log(avctx
, AV_LOG_WARNING
, "Need two sprites, only got one\n");
5027 if (v
->sprite_output_frame
.data
[0])
5028 avctx
->release_buffer(avctx
, &v
->sprite_output_frame
);
5030 v
->sprite_output_frame
.buffer_hints
= FF_BUFFER_HINTS_VALID
;
5031 v
->sprite_output_frame
.reference
= 0;
5032 if (ff_get_buffer(avctx
, &v
->sprite_output_frame
) < 0) {
5033 av_log(avctx
, AV_LOG_ERROR
, "get_buffer() failed\n");
5037 vc1_draw_sprites(v
, &sd
);
5042 static void vc1_sprite_flush(AVCodecContext
*avctx
)
5044 VC1Context
*v
= avctx
->priv_data
;
5045 MpegEncContext
*s
= &v
->s
;
5046 AVFrame
*f
= &s
->current_picture
.f
;
5049 /* Windows Media Image codecs have a convergence interval of two keyframes.
5050 Since we can't enforce it, clear to black the missing sprite. This is
5051 wrong but it looks better than doing nothing. */
5054 for (plane
= 0; plane
< (s
->flags
&CODEC_FLAG_GRAY
? 1 : 3); plane
++)
5055 for (i
= 0; i
< v
->sprite_height
>>!!plane
; i
++)
5056 memset(f
->data
[plane
] + i
* f
->linesize
[plane
],
5057 plane
? 128 : 0, f
->linesize
[plane
]);
5062 av_cold
int ff_vc1_decode_init_alloc_tables(VC1Context
*v
)
5064 MpegEncContext
*s
= &v
->s
;
5067 /* Allocate mb bitplanes */
5068 v
->mv_type_mb_plane
= av_malloc (s
->mb_stride
* s
->mb_height
);
5069 v
->direct_mb_plane
= av_malloc (s
->mb_stride
* s
->mb_height
);
5070 v
->forward_mb_plane
= av_malloc (s
->mb_stride
* s
->mb_height
);
5071 v
->fieldtx_plane
= av_mallocz(s
->mb_stride
* s
->mb_height
);
5072 v
->acpred_plane
= av_malloc (s
->mb_stride
* s
->mb_height
);
5073 v
->over_flags_plane
= av_malloc (s
->mb_stride
* s
->mb_height
);
5075 v
->n_allocated_blks
= s
->mb_width
+ 2;
5076 v
->block
= av_malloc(sizeof(*v
->block
) * v
->n_allocated_blks
);
5077 v
->cbp_base
= av_malloc(sizeof(v
->cbp_base
[0]) * 2 * s
->mb_stride
);
5078 v
->cbp
= v
->cbp_base
+ s
->mb_stride
;
5079 v
->ttblk_base
= av_malloc(sizeof(v
->ttblk_base
[0]) * 2 * s
->mb_stride
);
5080 v
->ttblk
= v
->ttblk_base
+ s
->mb_stride
;
5081 v
->is_intra_base
= av_mallocz(sizeof(v
->is_intra_base
[0]) * 2 * s
->mb_stride
);
5082 v
->is_intra
= v
->is_intra_base
+ s
->mb_stride
;
5083 v
->luma_mv_base
= av_malloc(sizeof(v
->luma_mv_base
[0]) * 2 * s
->mb_stride
);
5084 v
->luma_mv
= v
->luma_mv_base
+ s
->mb_stride
;
5086 /* allocate block type info in that way so it could be used with s->block_index[] */
5087 v
->mb_type_base
= av_malloc(s
->b8_stride
* (s
->mb_height
* 2 + 1) + s
->mb_stride
* (s
->mb_height
+ 1) * 2);
5088 v
->mb_type
[0] = v
->mb_type_base
+ s
->b8_stride
+ 1;
5089 v
->mb_type
[1] = v
->mb_type_base
+ s
->b8_stride
* (s
->mb_height
* 2 + 1) + s
->mb_stride
+ 1;
5090 v
->mb_type
[2] = v
->mb_type
[1] + s
->mb_stride
* (s
->mb_height
+ 1);
5092 /* allocate memory to store block level MV info */
5093 v
->blk_mv_type_base
= av_mallocz( s
->b8_stride
* (s
->mb_height
* 2 + 1) + s
->mb_stride
* (s
->mb_height
+ 1) * 2);
5094 v
->blk_mv_type
= v
->blk_mv_type_base
+ s
->b8_stride
+ 1;
5095 v
->mv_f_base
= av_mallocz(2 * (s
->b8_stride
* (s
->mb_height
* 2 + 1) + s
->mb_stride
* (s
->mb_height
+ 1) * 2));
5096 v
->mv_f
[0] = v
->mv_f_base
+ s
->b8_stride
+ 1;
5097 v
->mv_f
[1] = v
->mv_f
[0] + (s
->b8_stride
* (s
->mb_height
* 2 + 1) + s
->mb_stride
* (s
->mb_height
+ 1) * 2);
5098 v
->mv_f_last_base
= av_mallocz(2 * (s
->b8_stride
* (s
->mb_height
* 2 + 1) + s
->mb_stride
* (s
->mb_height
+ 1) * 2));
5099 v
->mv_f_last
[0] = v
->mv_f_last_base
+ s
->b8_stride
+ 1;
5100 v
->mv_f_last
[1] = v
->mv_f_last
[0] + (s
->b8_stride
* (s
->mb_height
* 2 + 1) + s
->mb_stride
* (s
->mb_height
+ 1) * 2);
5101 v
->mv_f_next_base
= av_mallocz(2 * (s
->b8_stride
* (s
->mb_height
* 2 + 1) + s
->mb_stride
* (s
->mb_height
+ 1) * 2));
5102 v
->mv_f_next
[0] = v
->mv_f_next_base
+ s
->b8_stride
+ 1;
5103 v
->mv_f_next
[1] = v
->mv_f_next
[0] + (s
->b8_stride
* (s
->mb_height
* 2 + 1) + s
->mb_stride
* (s
->mb_height
+ 1) * 2);
5105 /* Init coded blocks info */
5106 if (v
->profile
== PROFILE_ADVANCED
) {
5107 // if (alloc_bitplane(&v->over_flags_plane, s->mb_width, s->mb_height) < 0)
5109 // if (alloc_bitplane(&v->ac_pred_plane, s->mb_width, s->mb_height) < 0)
5113 ff_intrax8_common_init(&v
->x8
,s
);
5115 if (s
->avctx
->codec_id
== AV_CODEC_ID_WMV3IMAGE
|| s
->avctx
->codec_id
== AV_CODEC_ID_VC1IMAGE
) {
5116 for (i
= 0; i
< 4; i
++)
5117 if (!(v
->sr_rows
[i
>> 1][i
& 1] = av_malloc(v
->output_width
))) return -1;
5120 if (!v
->mv_type_mb_plane
|| !v
->direct_mb_plane
|| !v
->acpred_plane
|| !v
->over_flags_plane
||
5121 !v
->block
|| !v
->cbp_base
|| !v
->ttblk_base
|| !v
->is_intra_base
|| !v
->luma_mv_base
||
5128 av_cold
void ff_vc1_init_transposed_scantables(VC1Context
*v
)
5131 for (i
= 0; i
< 64; i
++) {
5132 #define transpose(x) ((x >> 3) | ((x & 7) << 3))
5133 v
->zz_8x8
[0][i
] = transpose(ff_wmv1_scantable
[0][i
]);
5134 v
->zz_8x8
[1][i
] = transpose(ff_wmv1_scantable
[1][i
]);
5135 v
->zz_8x8
[2][i
] = transpose(ff_wmv1_scantable
[2][i
]);
5136 v
->zz_8x8
[3][i
] = transpose(ff_wmv1_scantable
[3][i
]);
5137 v
->zzi_8x8
[i
] = transpose(ff_vc1_adv_interlaced_8x8_zz
[i
]);
5143 /** Initialize a VC1/WMV3 decoder
5144 * @todo TODO: Handle VC-1 IDUs (Transport level?)
5145 * @todo TODO: Decypher remaining bits in extra_data
5147 static av_cold
int vc1_decode_init(AVCodecContext
*avctx
)
5149 VC1Context
*v
= avctx
->priv_data
;
5150 MpegEncContext
*s
= &v
->s
;
5153 /* save the container output size for WMImage */
5154 v
->output_width
= avctx
->width
;
5155 v
->output_height
= avctx
->height
;
5157 if (!avctx
->extradata_size
|| !avctx
->extradata
)
5159 if (!(avctx
->flags
& CODEC_FLAG_GRAY
))
5160 avctx
->pix_fmt
= avctx
->get_format(avctx
, avctx
->codec
->pix_fmts
);
5162 avctx
->pix_fmt
= AV_PIX_FMT_GRAY8
;
5163 avctx
->hwaccel
= ff_find_hwaccel(avctx
->codec
->id
, avctx
->pix_fmt
);
5165 avctx
->flags
|= CODEC_FLAG_EMU_EDGE
;
5166 v
->s
.flags
|= CODEC_FLAG_EMU_EDGE
;
5168 if (ff_vc1_init_common(v
) < 0)
5170 ff_h264chroma_init(&v
->h264chroma
, 8);
5171 ff_vc1dsp_init(&v
->vc1dsp
);
5173 if (avctx
->codec_id
== AV_CODEC_ID_WMV3
|| avctx
->codec_id
== AV_CODEC_ID_WMV3IMAGE
) {
5176 // looks like WMV3 has a sequence header stored in the extradata
5177 // advanced sequence header may be before the first frame
5178 // the last byte of the extradata is a version number, 1 for the
5179 // samples we can decode
5181 init_get_bits(&gb
, avctx
->extradata
, avctx
->extradata_size
*8);
5183 if (ff_vc1_decode_sequence_header(avctx
, v
, &gb
) < 0)
5186 count
= avctx
->extradata_size
*8 - get_bits_count(&gb
);
5188 av_log(avctx
, AV_LOG_INFO
, "Extra data: %i bits left, value: %X\n",
5189 count
, get_bits(&gb
, count
));
5190 } else if (count
< 0) {
5191 av_log(avctx
, AV_LOG_INFO
, "Read %i bits in overflow\n", -count
);
5193 } else { // VC1/WVC1/WVP2
5194 const uint8_t *start
= avctx
->extradata
;
5195 uint8_t *end
= avctx
->extradata
+ avctx
->extradata_size
;
5196 const uint8_t *next
;
5197 int size
, buf2_size
;
5198 uint8_t *buf2
= NULL
;
5199 int seq_initialized
= 0, ep_initialized
= 0;
5201 if (avctx
->extradata_size
< 16) {
5202 av_log(avctx
, AV_LOG_ERROR
, "Extradata size too small: %i\n", avctx
->extradata_size
);
5206 buf2
= av_mallocz(avctx
->extradata_size
+ FF_INPUT_BUFFER_PADDING_SIZE
);
5207 start
= find_next_marker(start
, end
); // in WVC1 extradata first byte is its size, but can be 0 in mkv
5209 for (; next
< end
; start
= next
) {
5210 next
= find_next_marker(start
+ 4, end
);
5211 size
= next
- start
- 4;
5214 buf2_size
= vc1_unescape_buffer(start
+ 4, size
, buf2
);
5215 init_get_bits(&gb
, buf2
, buf2_size
* 8);
5216 switch (AV_RB32(start
)) {
5217 case VC1_CODE_SEQHDR
:
5218 if (ff_vc1_decode_sequence_header(avctx
, v
, &gb
) < 0) {
5222 seq_initialized
= 1;
5224 case VC1_CODE_ENTRYPOINT
:
5225 if (ff_vc1_decode_entry_point(avctx
, v
, &gb
) < 0) {
5234 if (!seq_initialized
|| !ep_initialized
) {
5235 av_log(avctx
, AV_LOG_ERROR
, "Incomplete extradata\n");
5238 v
->res_sprite
= (avctx
->codec_id
== AV_CODEC_ID_VC1IMAGE
);
5241 avctx
->profile
= v
->profile
;
5242 if (v
->profile
== PROFILE_ADVANCED
)
5243 avctx
->level
= v
->level
;
5245 avctx
->has_b_frames
= !!avctx
->max_b_frames
;
5247 s
->mb_width
= (avctx
->coded_width
+ 15) >> 4;
5248 s
->mb_height
= (avctx
->coded_height
+ 15) >> 4;
5250 if (v
->profile
== PROFILE_ADVANCED
|| v
->res_fasttx
) {
5251 ff_vc1_init_transposed_scantables(v
);
5253 memcpy(v
->zz_8x8
, ff_wmv1_scantable
, 4*64);
5258 if (avctx
->codec_id
== AV_CODEC_ID_WMV3IMAGE
|| avctx
->codec_id
== AV_CODEC_ID_VC1IMAGE
) {
5259 v
->sprite_width
= avctx
->coded_width
;
5260 v
->sprite_height
= avctx
->coded_height
;
5262 avctx
->coded_width
= avctx
->width
= v
->output_width
;
5263 avctx
->coded_height
= avctx
->height
= v
->output_height
;
5265 // prevent 16.16 overflows
5266 if (v
->sprite_width
> 1 << 14 ||
5267 v
->sprite_height
> 1 << 14 ||
5268 v
->output_width
> 1 << 14 ||
5269 v
->output_height
> 1 << 14) return -1;
5274 /** Close a VC1/WMV3 decoder
5275 * @warning Initial try at using MpegEncContext stuff
5277 av_cold
int ff_vc1_decode_end(AVCodecContext
*avctx
)
5279 VC1Context
*v
= avctx
->priv_data
;
5282 if ((avctx
->codec_id
== AV_CODEC_ID_WMV3IMAGE
|| avctx
->codec_id
== AV_CODEC_ID_VC1IMAGE
)
5283 && v
->sprite_output_frame
.data
[0])
5284 avctx
->release_buffer(avctx
, &v
->sprite_output_frame
);
5285 for (i
= 0; i
< 4; i
++)
5286 av_freep(&v
->sr_rows
[i
>> 1][i
& 1]);
5287 av_freep(&v
->hrd_rate
);
5288 av_freep(&v
->hrd_buffer
);
5289 ff_MPV_common_end(&v
->s
);
5290 av_freep(&v
->mv_type_mb_plane
);
5291 av_freep(&v
->direct_mb_plane
);
5292 av_freep(&v
->forward_mb_plane
);
5293 av_freep(&v
->fieldtx_plane
);
5294 av_freep(&v
->acpred_plane
);
5295 av_freep(&v
->over_flags_plane
);
5296 av_freep(&v
->mb_type_base
);
5297 av_freep(&v
->blk_mv_type_base
);
5298 av_freep(&v
->mv_f_base
);
5299 av_freep(&v
->mv_f_last_base
);
5300 av_freep(&v
->mv_f_next_base
);
5301 av_freep(&v
->block
);
5302 av_freep(&v
->cbp_base
);
5303 av_freep(&v
->ttblk_base
);
5304 av_freep(&v
->is_intra_base
); // FIXME use v->mb_type[]
5305 av_freep(&v
->luma_mv_base
);
5306 ff_intrax8_common_end(&v
->x8
);
5311 /** Decode a VC1/WMV3 frame
5312 * @todo TODO: Handle VC-1 IDUs (Transport level?)
5314 static int vc1_decode_frame(AVCodecContext
*avctx
, void *data
,
5315 int *got_frame
, AVPacket
*avpkt
)
5317 const uint8_t *buf
= avpkt
->data
;
5318 int buf_size
= avpkt
->size
, n_slices
= 0, i
;
5319 VC1Context
*v
= avctx
->priv_data
;
5320 MpegEncContext
*s
= &v
->s
;
5321 AVFrame
*pict
= data
;
5322 uint8_t *buf2
= NULL
;
5323 const uint8_t *buf_start
= buf
;
5324 int mb_height
, n_slices1
;
5329 } *slices
= NULL
, *tmp
;
5331 /* no supplementary picture */
5332 if (buf_size
== 0 || (buf_size
== 4 && AV_RB32(buf
) == VC1_CODE_ENDOFSEQ
)) {
5333 /* special case for last picture */
5334 if (s
->low_delay
== 0 && s
->next_picture_ptr
) {
5335 *pict
= s
->next_picture_ptr
->f
;
5336 s
->next_picture_ptr
= NULL
;
5344 if (s
->avctx
->codec
->capabilities
&CODEC_CAP_HWACCEL_VDPAU
) {
5345 if (v
->profile
< PROFILE_ADVANCED
)
5346 avctx
->pix_fmt
= AV_PIX_FMT_VDPAU_WMV3
;
5348 avctx
->pix_fmt
= AV_PIX_FMT_VDPAU_VC1
;
5351 //for advanced profile we may need to parse and unescape data
5352 if (avctx
->codec_id
== AV_CODEC_ID_VC1
|| avctx
->codec_id
== AV_CODEC_ID_VC1IMAGE
) {
5354 buf2
= av_mallocz(buf_size
+ FF_INPUT_BUFFER_PADDING_SIZE
);
5356 if (IS_MARKER(AV_RB32(buf
))) { /* frame starts with marker and needs to be parsed */
5357 const uint8_t *start
, *end
, *next
;
5361 for (start
= buf
, end
= buf
+ buf_size
; next
< end
; start
= next
) {
5362 next
= find_next_marker(start
+ 4, end
);
5363 size
= next
- start
- 4;
5364 if (size
<= 0) continue;
5365 switch (AV_RB32(start
)) {
5366 case VC1_CODE_FRAME
:
5367 if (avctx
->hwaccel
||
5368 s
->avctx
->codec
->capabilities
&CODEC_CAP_HWACCEL_VDPAU
)
5370 buf_size2
= vc1_unescape_buffer(start
+ 4, size
, buf2
);
5372 case VC1_CODE_FIELD
: {
5374 tmp
= av_realloc(slices
, sizeof(*slices
) * (n_slices
+1));
5378 slices
[n_slices
].buf
= av_mallocz(buf_size
+ FF_INPUT_BUFFER_PADDING_SIZE
);
5379 if (!slices
[n_slices
].buf
)
5381 buf_size3
= vc1_unescape_buffer(start
+ 4, size
,
5382 slices
[n_slices
].buf
);
5383 init_get_bits(&slices
[n_slices
].gb
, slices
[n_slices
].buf
,
5385 /* assuming that the field marker is at the exact middle,
5386 hope it's correct */
5387 slices
[n_slices
].mby_start
= s
->mb_height
>> 1;
5388 n_slices1
= n_slices
- 1; // index of the last slice of the first field
5392 case VC1_CODE_ENTRYPOINT
: /* it should be before frame data */
5393 buf_size2
= vc1_unescape_buffer(start
+ 4, size
, buf2
);
5394 init_get_bits(&s
->gb
, buf2
, buf_size2
* 8);
5395 ff_vc1_decode_entry_point(avctx
, v
, &s
->gb
);
5397 case VC1_CODE_SLICE
: {
5399 tmp
= av_realloc(slices
, sizeof(*slices
) * (n_slices
+1));
5403 slices
[n_slices
].buf
= av_mallocz(buf_size
+ FF_INPUT_BUFFER_PADDING_SIZE
);
5404 if (!slices
[n_slices
].buf
)
5406 buf_size3
= vc1_unescape_buffer(start
+ 4, size
,
5407 slices
[n_slices
].buf
);
5408 init_get_bits(&slices
[n_slices
].gb
, slices
[n_slices
].buf
,
5410 slices
[n_slices
].mby_start
= get_bits(&slices
[n_slices
].gb
, 9);
5416 } else if (v
->interlace
&& ((buf
[0] & 0xC0) == 0xC0)) { /* WVC1 interlaced stores both fields divided by marker */
5417 const uint8_t *divider
;
5420 divider
= find_next_marker(buf
, buf
+ buf_size
);
5421 if ((divider
== (buf
+ buf_size
)) || AV_RB32(divider
) != VC1_CODE_FIELD
) {
5422 av_log(avctx
, AV_LOG_ERROR
, "Error in WVC1 interlaced frame\n");
5424 } else { // found field marker, unescape second field
5425 tmp
= av_realloc(slices
, sizeof(*slices
) * (n_slices
+1));
5429 slices
[n_slices
].buf
= av_mallocz(buf_size
+ FF_INPUT_BUFFER_PADDING_SIZE
);
5430 if (!slices
[n_slices
].buf
)
5432 buf_size3
= vc1_unescape_buffer(divider
+ 4, buf
+ buf_size
- divider
- 4, slices
[n_slices
].buf
);
5433 init_get_bits(&slices
[n_slices
].gb
, slices
[n_slices
].buf
,
5435 slices
[n_slices
].mby_start
= s
->mb_height
>> 1;
5436 n_slices1
= n_slices
- 1;
5439 buf_size2
= vc1_unescape_buffer(buf
, divider
- buf
, buf2
);
5441 buf_size2
= vc1_unescape_buffer(buf
, buf_size
, buf2
);
5443 init_get_bits(&s
->gb
, buf2
, buf_size2
*8);
5445 init_get_bits(&s
->gb
, buf
, buf_size
*8);
5447 if (v
->res_sprite
) {
5448 v
->new_sprite
= !get_bits1(&s
->gb
);
5449 v
->two_sprites
= get_bits1(&s
->gb
);
5450 /* res_sprite means a Windows Media Image stream, AV_CODEC_ID_*IMAGE means
5451 we're using the sprite compositor. These are intentionally kept separate
5452 so you can get the raw sprites by using the wmv3 decoder for WMVP or
5453 the vc1 one for WVP2 */
5454 if (avctx
->codec_id
== AV_CODEC_ID_WMV3IMAGE
|| avctx
->codec_id
== AV_CODEC_ID_VC1IMAGE
) {
5455 if (v
->new_sprite
) {
5456 // switch AVCodecContext parameters to those of the sprites
5457 avctx
->width
= avctx
->coded_width
= v
->sprite_width
;
5458 avctx
->height
= avctx
->coded_height
= v
->sprite_height
;
5465 if (s
->context_initialized
&&
5466 (s
->width
!= avctx
->coded_width
||
5467 s
->height
!= avctx
->coded_height
)) {
5468 ff_vc1_decode_end(avctx
);
5471 if (!s
->context_initialized
) {
5472 if (ff_msmpeg4_decode_init(avctx
) < 0 || ff_vc1_decode_init_alloc_tables(v
) < 0)
5475 s
->low_delay
= !avctx
->has_b_frames
|| v
->res_sprite
;
5477 if (v
->profile
== PROFILE_ADVANCED
) {
5478 s
->h_edge_pos
= avctx
->coded_width
;
5479 s
->v_edge_pos
= avctx
->coded_height
;
5483 /* We need to set current_picture_ptr before reading the header,
5484 * otherwise we cannot store anything in there. */
5485 if (s
->current_picture_ptr
== NULL
|| s
->current_picture_ptr
->f
.data
[0]) {
5486 int i
= ff_find_unused_picture(s
, 0);
5489 s
->current_picture_ptr
= &s
->picture
[i
];
5492 // do parse frame header
5493 v
->pic_header_flag
= 0;
5494 if (v
->profile
< PROFILE_ADVANCED
) {
5495 if (ff_vc1_parse_frame_header(v
, &s
->gb
) == -1) {
5499 if (ff_vc1_parse_frame_header_adv(v
, &s
->gb
) == -1) {
5504 if ((avctx
->codec_id
== AV_CODEC_ID_WMV3IMAGE
|| avctx
->codec_id
== AV_CODEC_ID_VC1IMAGE
)
5505 && s
->pict_type
!= AV_PICTURE_TYPE_I
) {
5506 av_log(v
->s
.avctx
, AV_LOG_ERROR
, "Sprite decoder: expected I-frame\n");
5510 // process pulldown flags
5511 s
->current_picture_ptr
->f
.repeat_pict
= 0;
5512 // Pulldown flags are only valid when 'broadcast' has been set.
5513 // So ticks_per_frame will be 2
5516 s
->current_picture_ptr
->f
.repeat_pict
= 1;
5517 } else if (v
->rptfrm
) {
5519 s
->current_picture_ptr
->f
.repeat_pict
= v
->rptfrm
* 2;
5522 // for skipping the frame
5523 s
->current_picture
.f
.pict_type
= s
->pict_type
;
5524 s
->current_picture
.f
.key_frame
= s
->pict_type
== AV_PICTURE_TYPE_I
;
5526 /* skip B-frames if we don't have reference frames */
5527 if (s
->last_picture_ptr
== NULL
&& (s
->pict_type
== AV_PICTURE_TYPE_B
|| s
->droppable
)) {
5530 if ((avctx
->skip_frame
>= AVDISCARD_NONREF
&& s
->pict_type
== AV_PICTURE_TYPE_B
) ||
5531 (avctx
->skip_frame
>= AVDISCARD_NONKEY
&& s
->pict_type
!= AV_PICTURE_TYPE_I
) ||
5532 avctx
->skip_frame
>= AVDISCARD_ALL
) {
5536 if (s
->next_p_frame_damaged
) {
5537 if (s
->pict_type
== AV_PICTURE_TYPE_B
)
5540 s
->next_p_frame_damaged
= 0;
5543 if (ff_MPV_frame_start(s
, avctx
) < 0) {
5547 s
->me
.qpel_put
= s
->dsp
.put_qpel_pixels_tab
;
5548 s
->me
.qpel_avg
= s
->dsp
.avg_qpel_pixels_tab
;
5550 if ((CONFIG_VC1_VDPAU_DECODER
)
5551 &&s
->avctx
->codec
->capabilities
&CODEC_CAP_HWACCEL_VDPAU
)
5552 ff_vdpau_vc1_decode_picture(s
, buf_start
, (buf
+ buf_size
) - buf_start
);
5553 else if (avctx
->hwaccel
) {
5554 if (avctx
->hwaccel
->start_frame(avctx
, buf
, buf_size
) < 0)
5556 if (avctx
->hwaccel
->decode_slice(avctx
, buf_start
, (buf
+ buf_size
) - buf_start
) < 0)
5558 if (avctx
->hwaccel
->end_frame(avctx
) < 0)
5561 ff_mpeg_er_frame_start(s
);
5563 v
->bits
= buf_size
* 8;
5564 v
->end_mb_x
= s
->mb_width
;
5565 if (v
->field_mode
) {
5567 s
->current_picture
.f
.linesize
[0] <<= 1;
5568 s
->current_picture
.f
.linesize
[1] <<= 1;
5569 s
->current_picture
.f
.linesize
[2] <<= 1;
5571 s
->uvlinesize
<<= 1;
5572 tmp
[0] = v
->mv_f_last
[0];
5573 tmp
[1] = v
->mv_f_last
[1];
5574 v
->mv_f_last
[0] = v
->mv_f_next
[0];
5575 v
->mv_f_last
[1] = v
->mv_f_next
[1];
5576 v
->mv_f_next
[0] = v
->mv_f
[0];
5577 v
->mv_f_next
[1] = v
->mv_f
[1];
5578 v
->mv_f
[0] = tmp
[0];
5579 v
->mv_f
[1] = tmp
[1];
5581 mb_height
= s
->mb_height
>> v
->field_mode
;
5582 for (i
= 0; i
<= n_slices
; i
++) {
5583 if (i
> 0 && slices
[i
- 1].mby_start
>= mb_height
) {
5584 if (v
->field_mode
<= 0) {
5585 av_log(v
->s
.avctx
, AV_LOG_ERROR
, "Slice %d starts beyond "
5586 "picture boundary (%d >= %d)\n", i
,
5587 slices
[i
- 1].mby_start
, mb_height
);
5590 v
->second_field
= 1;
5591 v
->blocks_off
= s
->mb_width
* s
->mb_height
<< 1;
5592 v
->mb_off
= s
->mb_stride
* s
->mb_height
>> 1;
5594 v
->second_field
= 0;
5599 v
->pic_header_flag
= 0;
5600 if (v
->field_mode
&& i
== n_slices1
+ 2) {
5601 if (ff_vc1_parse_frame_header_adv(v
, &s
->gb
) < 0) {
5602 av_log(v
->s
.avctx
, AV_LOG_ERROR
, "Field header damaged\n");
5605 } else if (get_bits1(&s
->gb
)) {
5606 v
->pic_header_flag
= 1;
5607 if (ff_vc1_parse_frame_header_adv(v
, &s
->gb
) < 0) {
5608 av_log(v
->s
.avctx
, AV_LOG_ERROR
, "Slice header damaged\n");
5613 s
->start_mb_y
= (i
== 0) ? 0 : FFMAX(0, slices
[i
-1].mby_start
% mb_height
);
5614 if (!v
->field_mode
|| v
->second_field
)
5615 s
->end_mb_y
= (i
== n_slices
) ? mb_height
: FFMIN(mb_height
, slices
[i
].mby_start
% mb_height
);
5617 s
->end_mb_y
= (i
<= n_slices1
+ 1) ? mb_height
: FFMIN(mb_height
, slices
[i
].mby_start
% mb_height
);
5618 ff_vc1_decode_blocks(v
);
5620 s
->gb
= slices
[i
].gb
;
5622 if (v
->field_mode
) {
5623 v
->second_field
= 0;
5624 if (s
->pict_type
== AV_PICTURE_TYPE_B
) {
5625 memcpy(v
->mv_f_base
, v
->mv_f_next_base
,
5626 2 * (s
->b8_stride
* (s
->mb_height
* 2 + 1) + s
->mb_stride
* (s
->mb_height
+ 1) * 2));
5628 s
->current_picture
.f
.linesize
[0] >>= 1;
5629 s
->current_picture
.f
.linesize
[1] >>= 1;
5630 s
->current_picture
.f
.linesize
[2] >>= 1;
5632 s
->uvlinesize
>>= 1;
5634 av_dlog(s
->avctx
, "Consumed %i/%i bits\n",
5635 get_bits_count(&s
->gb
), s
->gb
.size_in_bits
);
5636 // if (get_bits_count(&s->gb) > buf_size * 8)
5638 ff_er_frame_end(&s
->er
);
5641 ff_MPV_frame_end(s
);
5643 if (avctx
->codec_id
== AV_CODEC_ID_WMV3IMAGE
|| avctx
->codec_id
== AV_CODEC_ID_VC1IMAGE
) {
5645 avctx
->width
= avctx
->coded_width
= v
->output_width
;
5646 avctx
->height
= avctx
->coded_height
= v
->output_height
;
5647 if (avctx
->skip_frame
>= AVDISCARD_NONREF
)
5649 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
5650 if (vc1_decode_sprites(v
, &s
->gb
))
5653 *pict
= v
->sprite_output_frame
;
5656 if (s
->pict_type
== AV_PICTURE_TYPE_B
|| s
->low_delay
) {
5657 *pict
= s
->current_picture_ptr
->f
;
5658 } else if (s
->last_picture_ptr
!= NULL
) {
5659 *pict
= s
->last_picture_ptr
->f
;
5661 if (s
->last_picture_ptr
|| s
->low_delay
) {
5663 ff_print_debug_info(s
, pict
);
5669 for (i
= 0; i
< n_slices
; i
++)
5670 av_free(slices
[i
].buf
);
5676 for (i
= 0; i
< n_slices
; i
++)
5677 av_free(slices
[i
].buf
);
5683 static const AVProfile profiles
[] = {
5684 { FF_PROFILE_VC1_SIMPLE
, "Simple" },
5685 { FF_PROFILE_VC1_MAIN
, "Main" },
5686 { FF_PROFILE_VC1_COMPLEX
, "Complex" },
5687 { FF_PROFILE_VC1_ADVANCED
, "Advanced" },
5688 { FF_PROFILE_UNKNOWN
},
5691 static const enum AVPixelFormat vc1_hwaccel_pixfmt_list_420
[] = {
5693 AV_PIX_FMT_DXVA2_VLD
,
5696 AV_PIX_FMT_VAAPI_VLD
,
5705 AVCodec ff_vc1_decoder
= {
5707 .type
= AVMEDIA_TYPE_VIDEO
,
5708 .id
= AV_CODEC_ID_VC1
,
5709 .priv_data_size
= sizeof(VC1Context
),
5710 .init
= vc1_decode_init
,
5711 .close
= ff_vc1_decode_end
,
5712 .decode
= vc1_decode_frame
,
5713 .flush
= ff_mpeg_flush
,
5714 .capabilities
= CODEC_CAP_DR1
| CODEC_CAP_DELAY
,
5715 .long_name
= NULL_IF_CONFIG_SMALL("SMPTE VC-1"),
5716 .pix_fmts
= vc1_hwaccel_pixfmt_list_420
,
5717 .profiles
= NULL_IF_CONFIG_SMALL(profiles
)
5720 #if CONFIG_WMV3_DECODER
5721 AVCodec ff_wmv3_decoder
= {
5723 .type
= AVMEDIA_TYPE_VIDEO
,
5724 .id
= AV_CODEC_ID_WMV3
,
5725 .priv_data_size
= sizeof(VC1Context
),
5726 .init
= vc1_decode_init
,
5727 .close
= ff_vc1_decode_end
,
5728 .decode
= vc1_decode_frame
,
5729 .flush
= ff_mpeg_flush
,
5730 .capabilities
= CODEC_CAP_DR1
| CODEC_CAP_DELAY
,
5731 .long_name
= NULL_IF_CONFIG_SMALL("Windows Media Video 9"),
5732 .pix_fmts
= vc1_hwaccel_pixfmt_list_420
,
5733 .profiles
= NULL_IF_CONFIG_SMALL(profiles
)
5737 #if CONFIG_WMV3_VDPAU_DECODER
5738 AVCodec ff_wmv3_vdpau_decoder
= {
5739 .name
= "wmv3_vdpau",
5740 .type
= AVMEDIA_TYPE_VIDEO
,
5741 .id
= AV_CODEC_ID_WMV3
,
5742 .priv_data_size
= sizeof(VC1Context
),
5743 .init
= vc1_decode_init
,
5744 .close
= ff_vc1_decode_end
,
5745 .decode
= vc1_decode_frame
,
5746 .capabilities
= CODEC_CAP_DR1
| CODEC_CAP_DELAY
| CODEC_CAP_HWACCEL_VDPAU
,
5747 .long_name
= NULL_IF_CONFIG_SMALL("Windows Media Video 9 VDPAU"),
5748 .pix_fmts
= (const enum AVPixelFormat
[]){ AV_PIX_FMT_VDPAU_WMV3
, AV_PIX_FMT_NONE
},
5749 .profiles
= NULL_IF_CONFIG_SMALL(profiles
)
5753 #if CONFIG_VC1_VDPAU_DECODER
5754 AVCodec ff_vc1_vdpau_decoder
= {
5755 .name
= "vc1_vdpau",
5756 .type
= AVMEDIA_TYPE_VIDEO
,
5757 .id
= AV_CODEC_ID_VC1
,
5758 .priv_data_size
= sizeof(VC1Context
),
5759 .init
= vc1_decode_init
,
5760 .close
= ff_vc1_decode_end
,
5761 .decode
= vc1_decode_frame
,
5762 .capabilities
= CODEC_CAP_DR1
| CODEC_CAP_DELAY
| CODEC_CAP_HWACCEL_VDPAU
,
5763 .long_name
= NULL_IF_CONFIG_SMALL("SMPTE VC-1 VDPAU"),
5764 .pix_fmts
= (const enum AVPixelFormat
[]){ AV_PIX_FMT_VDPAU_VC1
, AV_PIX_FMT_NONE
},
5765 .profiles
= NULL_IF_CONFIG_SMALL(profiles
)
5769 #if CONFIG_WMV3IMAGE_DECODER
5770 AVCodec ff_wmv3image_decoder
= {
5771 .name
= "wmv3image",
5772 .type
= AVMEDIA_TYPE_VIDEO
,
5773 .id
= AV_CODEC_ID_WMV3IMAGE
,
5774 .priv_data_size
= sizeof(VC1Context
),
5775 .init
= vc1_decode_init
,
5776 .close
= ff_vc1_decode_end
,
5777 .decode
= vc1_decode_frame
,
5778 .capabilities
= CODEC_CAP_DR1
,
5779 .flush
= vc1_sprite_flush
,
5780 .long_name
= NULL_IF_CONFIG_SMALL("Windows Media Video 9 Image"),
5781 .pix_fmts
= ff_pixfmt_list_420
5785 #if CONFIG_VC1IMAGE_DECODER
5786 AVCodec ff_vc1image_decoder
= {
5788 .type
= AVMEDIA_TYPE_VIDEO
,
5789 .id
= AV_CODEC_ID_VC1IMAGE
,
5790 .priv_data_size
= sizeof(VC1Context
),
5791 .init
= vc1_decode_init
,
5792 .close
= ff_vc1_decode_end
,
5793 .decode
= vc1_decode_frame
,
5794 .capabilities
= CODEC_CAP_DR1
,
5795 .flush
= vc1_sprite_flush
,
5796 .long_name
= NULL_IF_CONFIG_SMALL("Windows Media Video 9 Image v2"),
5797 .pix_fmts
= ff_pixfmt_list_420