2 * Copyright (c) 2010 The VP8 project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license and patent
5 * grant that can be found in the LICENSE file in the root of the source
6 * tree. All contributing project authors may be found in the AUTHORS
7 * file in the root of the source tree.
11 #include "treereader.h"
12 #include "entropymv.h"
13 #include "entropymode.h"
14 #include "onyxd_int.h"
15 #include "findnearmv.h"
21 static int read_mvcomponent(vp8_reader
*r
, const MV_CONTEXT
*mvc
)
23 const vp8_prob
*const p
= (const vp8_prob
*) mvc
;
26 if (vp8_read(r
, p
[mvpis_short
])) /* Large */
32 x
+= vp8_read(r
, p
[MVPbits
+ i
]) << i
;
36 i
= mvlong_width
- 1; /* Skip bit 3, which is sometimes implicit */
40 x
+= vp8_read(r
, p
[MVPbits
+ i
]) << i
;
44 if (!(x
& 0xFFF0) || vp8_read(r
, p
[MVPbits
+ 3]))
48 x
= vp8_treed_read(r
, vp8_small_mvtree
, p
+ MVPshort
);
50 if (x
&& vp8_read(r
, p
[MVPsign
]))
56 static void read_mv(vp8_reader
*r
, MV
*mv
, const MV_CONTEXT
*mvc
)
58 mv
->row
= (short)(read_mvcomponent(r
, mvc
) << 1);
59 mv
->col
= (short)(read_mvcomponent(r
, ++mvc
) << 1);
63 static void read_mvcontexts(vp8_reader
*bc
, MV_CONTEXT
*mvc
)
69 const vp8_prob
*up
= vp8_mv_update_probs
[i
].prob
;
70 vp8_prob
*p
= (vp8_prob
*)(mvc
+ i
);
71 vp8_prob
*const pstop
= p
+ MVPcount
;
75 if (vp8_read(bc
, *up
++))
77 const vp8_prob x
= (vp8_prob
)vp8_read_literal(bc
, 7);
88 static MB_PREDICTION_MODE
read_mv_ref(vp8_reader
*bc
, const vp8_prob
*p
)
90 const int i
= vp8_treed_read(bc
, vp8_mv_ref_tree
, p
);
92 return (MB_PREDICTION_MODE
)i
;
95 static MB_PREDICTION_MODE
sub_mv_ref(vp8_reader
*bc
, const vp8_prob
*p
)
97 const int i
= vp8_treed_read(bc
, vp8_sub_mv_ref_tree
, p
);
99 return (MB_PREDICTION_MODE
)i
;
101 unsigned int vp8_mv_cont_count
[5][4] =
110 void vp8_decode_mode_mvs(VP8D_COMP
*pbi
)
112 const MV Zero
= { 0, 0};
114 VP8_COMMON
*const pc
= & pbi
->common
;
115 vp8_reader
*const bc
= & pbi
->bc
;
117 MODE_INFO
*mi
= pc
->mi
, *ms
;
118 const int mis
= pc
->mode_info_stride
;
120 MV_CONTEXT
*const mvc
= pc
->fc
.mvc
;
127 vp8_prob prob_skip_false
= 0;
129 if (pc
->mb_no_coeff_skip
)
130 prob_skip_false
= (vp8_prob
)vp8_read_literal(bc
, 8);
132 prob_intra
= (vp8_prob
)vp8_read_literal(bc
, 8);
133 prob_last
= (vp8_prob
)vp8_read_literal(bc
, 8);
134 prob_gf
= (vp8_prob
)vp8_read_literal(bc
, 8);
138 if (vp8_read_bit(bc
))
144 pc
->fc
.ymode_prob
[i
] = (vp8_prob
) vp8_read_literal(bc
, 8);
149 if (vp8_read_bit(bc
))
155 pc
->fc
.uv_mode_prob
[i
] = (vp8_prob
) vp8_read_literal(bc
, 8);
160 read_mvcontexts(bc
, mvc
);
162 while (++mb_row
< pc
->mb_rows
)
166 while (++mb_col
< pc
->mb_cols
)
168 MB_MODE_INFO
*const mbmi
= & mi
->mbmi
;
169 MV
*const mv
= & mbmi
->mv
.as_mv
;
170 VP8_COMMON
*const pc
= &pbi
->common
;
171 MACROBLOCKD
*xd
= &pbi
->mb
;
173 vp8dx_bool_decoder_fill(bc
);
175 // Distance of Mb to the various image edges.
176 // These specified to 8th pel as they are always compared to MV values that are in 1/8th pel units
177 xd
->mb_to_left_edge
= -((mb_col
* 16) << 3);
178 xd
->mb_to_right_edge
= ((pc
->mb_cols
- 1 - mb_col
) * 16) << 3;
179 xd
->mb_to_top_edge
= -((mb_row
* 16)) << 3;
180 xd
->mb_to_bottom_edge
= ((pc
->mb_rows
- 1 - mb_row
) * 16) << 3;
182 // If required read in new segmentation data for this MB
183 if (pbi
->mb
.update_mb_segmentation_map
)
184 vp8_read_mb_features(bc
, mbmi
, &pbi
->mb
);
186 // Read the macroblock coeff skip flag if this feature is in use, else default to 0
187 if (pc
->mb_no_coeff_skip
)
188 mbmi
->mb_skip_coeff
= vp8_read(bc
, prob_skip_false
);
190 mbmi
->mb_skip_coeff
= 0;
192 mbmi
->uv_mode
= DC_PRED
;
194 if ((mbmi
->ref_frame
= (MV_REFERENCE_FRAME
) vp8_read(bc
, prob_intra
))) /* inter MB */
197 vp8_prob mv_ref_p
[VP8_MVREFS
-1];
198 MV nearest
, nearby
, best_mv
;
200 if (vp8_read(bc
, prob_last
))
202 mbmi
->ref_frame
= (MV_REFERENCE_FRAME
)((int)mbmi
->ref_frame
+ (int)(1 + vp8_read(bc
, prob_gf
)));
205 vp8_find_near_mvs(xd
, mi
, &nearest
, &nearby
, &best_mv
, rct
, mbmi
->ref_frame
, pbi
->common
.ref_frame_sign_bias
);
207 vp8_mv_ref_probs(mv_ref_p
, rct
);
209 switch (mbmi
->mode
= read_mv_ref(bc
, mv_ref_p
))
213 const int s
= mbmi
->partitioning
= vp8_treed_read(
214 bc
, vp8_mbsplit_tree
, vp8_mbsplit_probs
216 const int num_p
= vp8_mbsplit_count
[s
];
217 const int *const L
= vp8_mbsplits
[s
];
220 do /* for each subset j */
222 B_MODE_INFO
*const bmi
= mbmi
->partition_bmi
+ j
;
223 MV
*const mv
= & bmi
->mv
.as_mv
;
225 int k
= -1; /* first block in subset j */
237 mv_contz
= vp8_mv_cont(&(vp8_left_bmi(mi
, k
)->mv
.as_mv
), &(vp8_above_bmi(mi
, k
, mis
)->mv
.as_mv
));
239 switch (bmi
->mode
= (B_PREDICTION_MODE
) sub_mv_ref(bc
, vp8_sub_mv_ref_prob2
[mv_contz
])) //pc->fc.sub_mv_ref_prob))
242 read_mv(bc
, mv
, (const MV_CONTEXT
*) mvc
);
243 mv
->row
+= best_mv
.row
;
244 mv
->col
+= best_mv
.col
;
245 #ifdef VPX_MODE_COUNT
246 vp8_mv_cont_count
[mv_contz
][3]++;
250 *mv
= vp8_left_bmi(mi
, k
)->mv
.as_mv
;
251 #ifdef VPX_MODE_COUNT
252 vp8_mv_cont_count
[mv_contz
][0]++;
256 *mv
= vp8_above_bmi(mi
, k
, mis
)->mv
.as_mv
;
257 #ifdef VPX_MODE_COUNT
258 vp8_mv_cont_count
[mv_contz
][1]++;
263 #ifdef VPX_MODE_COUNT
264 vp8_mv_cont_count
[mv_contz
][2]++;
271 /* Fill (uniform) modes, mvs of jth subset.
272 Must do it here because ensuing subsets can
273 refer back to us via "left" or "above". */
283 *mv
= mi
->bmi
[15].mv
.as_mv
;
285 break; /* done with SPLITMV */
290 // Clip "next_nearest" so that it does not extend to far out of image
291 if (mv
->col
< (xd
->mb_to_left_edge
- LEFT_TOP_MARGIN
))
292 mv
->col
= xd
->mb_to_left_edge
- LEFT_TOP_MARGIN
;
293 else if (mv
->col
> xd
->mb_to_right_edge
+ RIGHT_BOTTOM_MARGIN
)
294 mv
->col
= xd
->mb_to_right_edge
+ RIGHT_BOTTOM_MARGIN
;
296 if (mv
->row
< (xd
->mb_to_top_edge
- LEFT_TOP_MARGIN
))
297 mv
->row
= xd
->mb_to_top_edge
- LEFT_TOP_MARGIN
;
298 else if (mv
->row
> xd
->mb_to_bottom_edge
+ RIGHT_BOTTOM_MARGIN
)
299 mv
->row
= xd
->mb_to_bottom_edge
+ RIGHT_BOTTOM_MARGIN
;
306 // Clip "next_nearest" so that it does not extend to far out of image
307 if (mv
->col
< (xd
->mb_to_left_edge
- LEFT_TOP_MARGIN
))
308 mv
->col
= xd
->mb_to_left_edge
- LEFT_TOP_MARGIN
;
309 else if (mv
->col
> xd
->mb_to_right_edge
+ RIGHT_BOTTOM_MARGIN
)
310 mv
->col
= xd
->mb_to_right_edge
+ RIGHT_BOTTOM_MARGIN
;
312 if (mv
->row
< (xd
->mb_to_top_edge
- LEFT_TOP_MARGIN
))
313 mv
->row
= xd
->mb_to_top_edge
- LEFT_TOP_MARGIN
;
314 else if (mv
->row
> xd
->mb_to_bottom_edge
+ RIGHT_BOTTOM_MARGIN
)
315 mv
->row
= xd
->mb_to_bottom_edge
+ RIGHT_BOTTOM_MARGIN
;
324 read_mv(bc
, mv
, (const MV_CONTEXT
*) mvc
);
325 mv
->row
+= best_mv
.row
;
326 mv
->col
+= best_mv
.col
;
327 /* Encoder should not produce invalid motion vectors, but since
328 * arbitrary length MVs can be parsed from the bitstream, we
329 * need to clamp them here in case we're reading bad data to
333 assert(mv
->col
>= (xd
->mb_to_left_edge
- LEFT_TOP_MARGIN
));
334 assert(mv
->col
<= (xd
->mb_to_right_edge
+ RIGHT_BOTTOM_MARGIN
));
335 assert(mv
->row
>= (xd
->mb_to_top_edge
- LEFT_TOP_MARGIN
));
336 assert(mv
->row
<= (xd
->mb_to_bottom_edge
+ RIGHT_BOTTOM_MARGIN
));
339 if (mv
->col
< (xd
->mb_to_left_edge
- LEFT_TOP_MARGIN
))
340 mv
->col
= xd
->mb_to_left_edge
- LEFT_TOP_MARGIN
;
341 else if (mv
->col
> xd
->mb_to_right_edge
+ RIGHT_BOTTOM_MARGIN
)
342 mv
->col
= xd
->mb_to_right_edge
+ RIGHT_BOTTOM_MARGIN
;
344 if (mv
->row
< (xd
->mb_to_top_edge
- LEFT_TOP_MARGIN
))
345 mv
->row
= xd
->mb_to_top_edge
- LEFT_TOP_MARGIN
;
346 else if (mv
->row
> xd
->mb_to_bottom_edge
+ RIGHT_BOTTOM_MARGIN
)
347 mv
->row
= xd
->mb_to_bottom_edge
+ RIGHT_BOTTOM_MARGIN
;
349 propagate_mv
: /* same MV throughout */
354 // mi->bmi[i].mv.as_mv = *mv;
358 mi
->bmi
[0].mv
.as_mv
= *mv
;
359 mi
->bmi
[1].mv
.as_mv
= *mv
;
360 mi
->bmi
[2].mv
.as_mv
= *mv
;
361 mi
->bmi
[3].mv
.as_mv
= *mv
;
362 mi
->bmi
[4].mv
.as_mv
= *mv
;
363 mi
->bmi
[5].mv
.as_mv
= *mv
;
364 mi
->bmi
[6].mv
.as_mv
= *mv
;
365 mi
->bmi
[7].mv
.as_mv
= *mv
;
366 mi
->bmi
[8].mv
.as_mv
= *mv
;
367 mi
->bmi
[9].mv
.as_mv
= *mv
;
368 mi
->bmi
[10].mv
.as_mv
= *mv
;
369 mi
->bmi
[11].mv
.as_mv
= *mv
;
370 mi
->bmi
[12].mv
.as_mv
= *mv
;
371 mi
->bmi
[13].mv
.as_mv
= *mv
;
372 mi
->bmi
[14].mv
.as_mv
= *mv
;
373 mi
->bmi
[15].mv
.as_mv
= *mv
;
387 /* MB is intra coded */
393 mi
->bmi
[j
].mv
.as_mv
= Zero
;
399 if ((mbmi
->mode
= (MB_PREDICTION_MODE
) vp8_read_ymode(bc
, pc
->fc
.ymode_prob
)) == B_PRED
)
405 mi
->bmi
[j
].mode
= (B_PREDICTION_MODE
)vp8_read_bmode(bc
, pc
->fc
.bmode_prob
);
410 mbmi
->uv_mode
= (MB_PREDICTION_MODE
)vp8_read_uv_mode(bc
, pc
->fc
.uv_mode_prob
);
413 mi
++; // next macroblock
416 mi
++; // skip left predictor each row