2 * Copyright (c) 2010 The VP8 project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
12 #include "vpx_ports/config.h"
18 #include "entropymode.h"
19 #include "quant_common.h"
20 #include "segmentation.h"
21 #include "setupintrarecon.h"
22 #include "encodeintra.h"
23 #include "reconinter.h"
25 #include "pickinter.h"
26 #include "findnearmv.h"
27 #include "reconintra.h"
31 #include "vpx_ports/vpx_timer.h"
33 #if CONFIG_RUNTIME_CPU_DETECT
34 #define RTCD(x) &cpi->common.rtcd.x
35 #define IF_RTCD(x) (x)
38 #define IF_RTCD(x) NULL
40 extern void vp8_stuff_mb(VP8_COMP
*cpi
, MACROBLOCKD
*x
, TOKENEXTRA
**t
) ;
42 extern void vp8cx_initialize_me_consts(VP8_COMP
*cpi
, int QIndex
);
43 extern void vp8_auto_select_speed(VP8_COMP
*cpi
);
44 extern void vp8cx_init_mbrthread_data(VP8_COMP
*cpi
,
49 void vp8_build_block_offsets(MACROBLOCK
*x
);
50 void vp8_setup_block_ptrs(MACROBLOCK
*x
);
51 int vp8cx_encode_inter_macroblock(VP8_COMP
*cpi
, MACROBLOCK
*x
, TOKENEXTRA
**t
, int recon_yoffset
, int recon_uvoffset
);
52 int vp8cx_encode_intra_macro_block(VP8_COMP
*cpi
, MACROBLOCK
*x
, TOKENEXTRA
**t
);
55 unsigned int inter_y_modes
[10] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
56 unsigned int inter_uv_modes
[4] = {0, 0, 0, 0};
57 unsigned int inter_b_modes
[15] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
58 unsigned int y_modes
[5] = {0, 0, 0, 0, 0};
59 unsigned int uv_modes
[4] = {0, 0, 0, 0};
60 unsigned int b_modes
[14] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
63 static const int qrounding_factors
[129] =
65 56, 56, 56, 56, 48, 48, 56, 56,
66 48, 48, 48, 48, 48, 48, 48, 48,
67 48, 48, 48, 48, 48, 48, 48, 48,
68 48, 48, 48, 48, 48, 48, 48, 48,
69 48, 48, 48, 48, 48, 48, 48, 48,
70 48, 48, 48, 48, 48, 48, 48, 48,
71 48, 48, 48, 48, 48, 48, 48, 48,
72 48, 48, 48, 48, 48, 48, 48, 48,
73 48, 48, 48, 48, 48, 48, 48, 48,
74 48, 48, 48, 48, 48, 48, 48, 48,
75 48, 48, 48, 48, 48, 48, 48, 48,
76 48, 48, 48, 48, 48, 48, 48, 48,
77 48, 48, 48, 48, 48, 48, 48, 48,
78 48, 48, 48, 48, 48, 48, 48, 48,
79 48, 48, 48, 48, 48, 48, 48, 48,
80 48, 48, 48, 48, 48, 48, 48, 48,
84 static const int qzbin_factors
[129] =
86 72, 72, 72, 72, 80, 80, 72, 72,
87 80, 80, 80, 80, 80, 80, 80, 80,
88 80, 80, 80, 80, 80, 80, 80, 80,
89 80, 80, 80, 80, 80, 80, 80, 80,
90 80, 80, 80, 80, 80, 80, 80, 80,
91 80, 80, 80, 80, 80, 80, 80, 80,
92 80, 80, 80, 80, 80, 80, 80, 80,
93 80, 80, 80, 80, 80, 80, 80, 80,
94 80, 80, 80, 80, 80, 80, 80, 80,
95 80, 80, 80, 80, 80, 80, 80, 80,
96 80, 80, 80, 80, 80, 80, 80, 80,
97 80, 80, 80, 80, 80, 80, 80, 80,
98 80, 80, 80, 80, 80, 80, 80, 80,
99 80, 80, 80, 80, 80, 80, 80, 80,
100 80, 80, 80, 80, 80, 80, 80, 80,
101 80, 80, 80, 80, 80, 80, 80, 80,
104 //#define EXACT_QUANT
106 static void vp8cx_invert_quant(short *quant
, short *shift
, short d
)
111 for(l
= 0; t
> 1; l
++)
113 t
= 1 + (1<<(16+l
))/d
;
114 *quant
= (short)(t
- (1<<16));
118 void vp8cx_init_quantizer(VP8_COMP
*cpi
)
125 int zbin_boost
[16] = {0, 0, 8, 10, 12, 14, 16, 20, 24, 28, 32, 36, 40, 44, 44, 44};
127 for (Q
= 0; Q
< QINDEX_RANGE
; Q
++)
130 quant_val
= vp8_dc_quant(Q
, cpi
->common
.y1dc_delta_q
);
131 vp8cx_invert_quant(cpi
->Y1quant
[Q
][0] + 0,
132 cpi
->Y1quant_shift
[Q
][0] + 0, quant_val
);
133 cpi
->Y1zbin
[Q
][0][0] = ((qzbin_factors
[Q
] * quant_val
) + 64) >> 7;
134 cpi
->Y1round
[Q
][0][0] = (qrounding_factors
[Q
] * quant_val
) >> 7;
135 cpi
->common
.Y1dequant
[Q
][0][0] = quant_val
;
136 cpi
->zrun_zbin_boost_y1
[Q
][0] = (quant_val
* zbin_boost
[0]) >> 7;
138 quant_val
= vp8_dc2quant(Q
, cpi
->common
.y2dc_delta_q
);
139 vp8cx_invert_quant(cpi
->Y2quant
[Q
][0] + 0,
140 cpi
->Y2quant_shift
[Q
][0] + 0, quant_val
);
141 cpi
->Y2zbin
[Q
][0][0] = ((qzbin_factors
[Q
] * quant_val
) + 64) >> 7;
142 cpi
->Y2round
[Q
][0][0] = (qrounding_factors
[Q
] * quant_val
) >> 7;
143 cpi
->common
.Y2dequant
[Q
][0][0] = quant_val
;
144 cpi
->zrun_zbin_boost_y2
[Q
][0] = (quant_val
* zbin_boost
[0]) >> 7;
146 quant_val
= vp8_dc_uv_quant(Q
, cpi
->common
.uvdc_delta_q
);
147 vp8cx_invert_quant(cpi
->UVquant
[Q
][0] + 0,
148 cpi
->UVquant_shift
[Q
][0] + 0, quant_val
);
149 cpi
->UVzbin
[Q
][0][0] = ((qzbin_factors
[Q
] * quant_val
) + 64) >> 7;;
150 cpi
->UVround
[Q
][0][0] = (qrounding_factors
[Q
] * quant_val
) >> 7;
151 cpi
->common
.UVdequant
[Q
][0][0] = quant_val
;
152 cpi
->zrun_zbin_boost_uv
[Q
][0] = (quant_val
* zbin_boost
[0]) >> 7;
154 // all the ac values = ;
155 for (i
= 1; i
< 16; i
++)
157 int rc
= vp8_default_zig_zag1d
[i
];
161 quant_val
= vp8_ac_yquant(Q
);
162 vp8cx_invert_quant(cpi
->Y1quant
[Q
][r
] + c
,
163 cpi
->Y1quant_shift
[Q
][r
] + c
, quant_val
);
164 cpi
->Y1zbin
[Q
][r
][c
] = ((qzbin_factors
[Q
] * quant_val
) + 64) >> 7;
165 cpi
->Y1round
[Q
][r
][c
] = (qrounding_factors
[Q
] * quant_val
) >> 7;
166 cpi
->common
.Y1dequant
[Q
][r
][c
] = quant_val
;
167 cpi
->zrun_zbin_boost_y1
[Q
][i
] = (quant_val
* zbin_boost
[i
]) >> 7;
169 quant_val
= vp8_ac2quant(Q
, cpi
->common
.y2ac_delta_q
);
170 vp8cx_invert_quant(cpi
->Y2quant
[Q
][r
] + c
,
171 cpi
->Y2quant_shift
[Q
][r
] + c
, quant_val
);
172 cpi
->Y2zbin
[Q
][r
][c
] = ((qzbin_factors
[Q
] * quant_val
) + 64) >> 7;
173 cpi
->Y2round
[Q
][r
][c
] = (qrounding_factors
[Q
] * quant_val
) >> 7;
174 cpi
->common
.Y2dequant
[Q
][r
][c
] = quant_val
;
175 cpi
->zrun_zbin_boost_y2
[Q
][i
] = (quant_val
* zbin_boost
[i
]) >> 7;
177 quant_val
= vp8_ac_uv_quant(Q
, cpi
->common
.uvac_delta_q
);
178 vp8cx_invert_quant(cpi
->UVquant
[Q
][r
] + c
,
179 cpi
->UVquant_shift
[Q
][r
] + c
, quant_val
);
180 cpi
->UVzbin
[Q
][r
][c
] = ((qzbin_factors
[Q
] * quant_val
) + 64) >> 7;
181 cpi
->UVround
[Q
][r
][c
] = (qrounding_factors
[Q
] * quant_val
) >> 7;
182 cpi
->common
.UVdequant
[Q
][r
][c
] = quant_val
;
183 cpi
->zrun_zbin_boost_uv
[Q
][i
] = (quant_val
* zbin_boost
[i
]) >> 7;
188 void vp8cx_init_quantizer(VP8_COMP
*cpi
)
195 int zbin_boost
[16] = {0, 0, 8, 10, 12, 14, 16, 20, 24, 28, 32, 36, 40, 44, 44, 44};
197 for (Q
= 0; Q
< QINDEX_RANGE
; Q
++)
200 quant_val
= vp8_dc_quant(Q
, cpi
->common
.y1dc_delta_q
);
201 cpi
->Y1quant
[Q
][0][0] = (1 << 16) / quant_val
;
202 cpi
->Y1zbin
[Q
][0][0] = ((qzbin_factors
[Q
] * quant_val
) + 64) >> 7;
203 cpi
->Y1round
[Q
][0][0] = (qrounding_factors
[Q
] * quant_val
) >> 7;
204 cpi
->common
.Y1dequant
[Q
][0][0] = quant_val
;
205 cpi
->zrun_zbin_boost_y1
[Q
][0] = (quant_val
* zbin_boost
[0]) >> 7;
207 quant_val
= vp8_dc2quant(Q
, cpi
->common
.y2dc_delta_q
);
208 cpi
->Y2quant
[Q
][0][0] = (1 << 16) / quant_val
;
209 cpi
->Y2zbin
[Q
][0][0] = ((qzbin_factors
[Q
] * quant_val
) + 64) >> 7;
210 cpi
->Y2round
[Q
][0][0] = (qrounding_factors
[Q
] * quant_val
) >> 7;
211 cpi
->common
.Y2dequant
[Q
][0][0] = quant_val
;
212 cpi
->zrun_zbin_boost_y2
[Q
][0] = (quant_val
* zbin_boost
[0]) >> 7;
214 quant_val
= vp8_dc_uv_quant(Q
, cpi
->common
.uvdc_delta_q
);
215 cpi
->UVquant
[Q
][0][0] = (1 << 16) / quant_val
;
216 cpi
->UVzbin
[Q
][0][0] = ((qzbin_factors
[Q
] * quant_val
) + 64) >> 7;;
217 cpi
->UVround
[Q
][0][0] = (qrounding_factors
[Q
] * quant_val
) >> 7;
218 cpi
->common
.UVdequant
[Q
][0][0] = quant_val
;
219 cpi
->zrun_zbin_boost_uv
[Q
][0] = (quant_val
* zbin_boost
[0]) >> 7;
221 // all the ac values = ;
222 for (i
= 1; i
< 16; i
++)
224 int rc
= vp8_default_zig_zag1d
[i
];
228 quant_val
= vp8_ac_yquant(Q
);
229 cpi
->Y1quant
[Q
][r
][c
] = (1 << 16) / quant_val
;
230 cpi
->Y1zbin
[Q
][r
][c
] = ((qzbin_factors
[Q
] * quant_val
) + 64) >> 7;
231 cpi
->Y1round
[Q
][r
][c
] = (qrounding_factors
[Q
] * quant_val
) >> 7;
232 cpi
->common
.Y1dequant
[Q
][r
][c
] = quant_val
;
233 cpi
->zrun_zbin_boost_y1
[Q
][i
] = (quant_val
* zbin_boost
[i
]) >> 7;
235 quant_val
= vp8_ac2quant(Q
, cpi
->common
.y2ac_delta_q
);
236 cpi
->Y2quant
[Q
][r
][c
] = (1 << 16) / quant_val
;
237 cpi
->Y2zbin
[Q
][r
][c
] = ((qzbin_factors
[Q
] * quant_val
) + 64) >> 7;
238 cpi
->Y2round
[Q
][r
][c
] = (qrounding_factors
[Q
] * quant_val
) >> 7;
239 cpi
->common
.Y2dequant
[Q
][r
][c
] = quant_val
;
240 cpi
->zrun_zbin_boost_y2
[Q
][i
] = (quant_val
* zbin_boost
[i
]) >> 7;
242 quant_val
= vp8_ac_uv_quant(Q
, cpi
->common
.uvac_delta_q
);
243 cpi
->UVquant
[Q
][r
][c
] = (1 << 16) / quant_val
;
244 cpi
->UVzbin
[Q
][r
][c
] = ((qzbin_factors
[Q
] * quant_val
) + 64) >> 7;
245 cpi
->UVround
[Q
][r
][c
] = (qrounding_factors
[Q
] * quant_val
) >> 7;
246 cpi
->common
.UVdequant
[Q
][r
][c
] = quant_val
;
247 cpi
->zrun_zbin_boost_uv
[Q
][i
] = (quant_val
* zbin_boost
[i
]) >> 7;
252 void vp8cx_mb_init_quantizer(VP8_COMP
*cpi
, MACROBLOCK
*x
)
256 MACROBLOCKD
*xd
= &x
->e_mbd
;
259 // Select the baseline MB Q index.
260 if (xd
->segmentation_enabled
)
263 if (xd
->mb_segement_abs_delta
== SEGMENT_ABSDATA
)
265 QIndex
= xd
->segment_feature_data
[MB_LVL_ALT_Q
][xd
->mode_info_context
->mbmi
.segment_id
];
269 QIndex
= cpi
->common
.base_qindex
+ xd
->segment_feature_data
[MB_LVL_ALT_Q
][xd
->mode_info_context
->mbmi
.segment_id
];
270 QIndex
= (QIndex
>= 0) ? ((QIndex
<= MAXQ
) ? QIndex
: MAXQ
) : 0; // Clamp to valid range
274 QIndex
= cpi
->common
.base_qindex
;
277 zbin_extra
= (cpi
->common
.Y1dequant
[QIndex
][0][1] * (cpi
->zbin_over_quant
+ cpi
->zbin_mode_boost
)) >> 7;
279 for (i
= 0; i
< 16; i
++)
281 x
->block
[i
].quant
= cpi
->Y1quant
[QIndex
];
282 x
->block
[i
].quant_shift
= cpi
->Y1quant_shift
[QIndex
];
283 x
->block
[i
].zbin
= cpi
->Y1zbin
[QIndex
];
284 x
->block
[i
].round
= cpi
->Y1round
[QIndex
];
285 x
->e_mbd
.block
[i
].dequant
= cpi
->common
.Y1dequant
[QIndex
];
286 x
->block
[i
].zrun_zbin_boost
= cpi
->zrun_zbin_boost_y1
[QIndex
];
287 x
->block
[i
].zbin_extra
= (short)zbin_extra
;
291 zbin_extra
= (cpi
->common
.UVdequant
[QIndex
][0][1] * (cpi
->zbin_over_quant
+ cpi
->zbin_mode_boost
)) >> 7;
293 for (i
= 16; i
< 24; i
++)
295 x
->block
[i
].quant
= cpi
->UVquant
[QIndex
];
296 x
->block
[i
].quant_shift
= cpi
->UVquant_shift
[QIndex
];
297 x
->block
[i
].zbin
= cpi
->UVzbin
[QIndex
];
298 x
->block
[i
].round
= cpi
->UVround
[QIndex
];
299 x
->e_mbd
.block
[i
].dequant
= cpi
->common
.UVdequant
[QIndex
];
300 x
->block
[i
].zrun_zbin_boost
= cpi
->zrun_zbin_boost_uv
[QIndex
];
301 x
->block
[i
].zbin_extra
= (short)zbin_extra
;
305 zbin_extra
= (cpi
->common
.Y2dequant
[QIndex
][0][1] * ((cpi
->zbin_over_quant
/ 2) + cpi
->zbin_mode_boost
)) >> 7;
306 x
->block
[24].quant
= cpi
->Y2quant
[QIndex
];
307 x
->block
[24].quant_shift
= cpi
->Y2quant_shift
[QIndex
];
308 x
->block
[24].zbin
= cpi
->Y2zbin
[QIndex
];
309 x
->block
[24].round
= cpi
->Y2round
[QIndex
];
310 x
->e_mbd
.block
[24].dequant
= cpi
->common
.Y2dequant
[QIndex
];
311 x
->block
[24].zrun_zbin_boost
= cpi
->zrun_zbin_boost_y2
[QIndex
];
312 x
->block
[24].zbin_extra
= (short)zbin_extra
;
315 void vp8cx_frame_init_quantizer(VP8_COMP
*cpi
)
317 // vp8cx_init_quantizer() is first called in vp8_create_compressor(). A check is added here so that vp8cx_init_quantizer() is only called
318 // when these values are not all zero.
319 if (cpi
->common
.y1dc_delta_q
| cpi
->common
.y2dc_delta_q
| cpi
->common
.uvdc_delta_q
| cpi
->common
.y2ac_delta_q
| cpi
->common
.uvac_delta_q
)
321 vp8cx_init_quantizer(cpi
);
324 // MB level quantizer setup
325 vp8cx_mb_init_quantizer(cpi
, &cpi
->mb
);
331 void encode_mb_row(VP8_COMP
*cpi
,
341 int recon_yoffset
, recon_uvoffset
;
343 int ref_fb_idx
= cm
->lst_fb_idx
;
344 int dst_fb_idx
= cm
->new_fb_idx
;
345 int recon_y_stride
= cm
->yv12_fb
[ref_fb_idx
].y_stride
;
346 int recon_uv_stride
= cm
->yv12_fb
[ref_fb_idx
].uv_stride
;
347 int seg_map_index
= (mb_row
* cpi
->common
.mb_cols
);
350 // reset above block coeffs
351 xd
->above_context
[Y1CONTEXT
] = cm
->above_context
[Y1CONTEXT
];
352 xd
->above_context
[UCONTEXT
] = cm
->above_context
[UCONTEXT
];
353 xd
->above_context
[VCONTEXT
] = cm
->above_context
[VCONTEXT
];
354 xd
->above_context
[Y2CONTEXT
] = cm
->above_context
[Y2CONTEXT
];
356 xd
->up_available
= (mb_row
!= 0);
357 recon_yoffset
= (mb_row
* recon_y_stride
* 16);
358 recon_uvoffset
= (mb_row
* recon_uv_stride
* 8);
360 cpi
->tplist
[mb_row
].start
= *tp
;
361 //printf("Main mb_row = %d\n", mb_row);
363 // for each macroblock col in image
364 for (mb_col
= 0; mb_col
< cm
->mb_cols
; mb_col
++)
366 // Distance of Mb to the various image edges.
367 // These specified to 8th pel as they are always compared to values that are in 1/8th pel units
368 xd
->mb_to_left_edge
= -((mb_col
* 16) << 3);
369 xd
->mb_to_right_edge
= ((cm
->mb_cols
- 1 - mb_col
) * 16) << 3;
370 xd
->mb_to_top_edge
= -((mb_row
* 16) << 3);
371 xd
->mb_to_bottom_edge
= ((cm
->mb_rows
- 1 - mb_row
) * 16) << 3;
373 // Set up limit values for motion vectors used to prevent them extending outside the UMV borders
374 x
->mv_col_min
= -((mb_col
* 16) + (VP8BORDERINPIXELS
- 16));
375 x
->mv_col_max
= ((cm
->mb_cols
- 1 - mb_col
) * 16) + (VP8BORDERINPIXELS
- 16);
376 x
->mv_row_min
= -((mb_row
* 16) + (VP8BORDERINPIXELS
- 16));
377 x
->mv_row_max
= ((cm
->mb_rows
- 1 - mb_row
) * 16) + (VP8BORDERINPIXELS
- 16);
379 xd
->dst
.y_buffer
= cm
->yv12_fb
[dst_fb_idx
].y_buffer
+ recon_yoffset
;
380 xd
->dst
.u_buffer
= cm
->yv12_fb
[dst_fb_idx
].u_buffer
+ recon_uvoffset
;
381 xd
->dst
.v_buffer
= cm
->yv12_fb
[dst_fb_idx
].v_buffer
+ recon_uvoffset
;
382 xd
->left_available
= (mb_col
!= 0);
384 // Is segmentation enabled
385 // MB level adjutment to quantizer
386 if (xd
->segmentation_enabled
)
388 // Code to set segment id in xd->mbmi.segment_id for current MB (with range checking)
389 if (cpi
->segmentation_map
[seg_map_index
+mb_col
] <= 3)
390 xd
->mode_info_context
->mbmi
.segment_id
= cpi
->segmentation_map
[seg_map_index
+mb_col
];
392 xd
->mode_info_context
->mbmi
.segment_id
= 0;
394 vp8cx_mb_init_quantizer(cpi
, x
);
397 xd
->mode_info_context
->mbmi
.segment_id
= 0; // Set to Segment 0 by default
399 x
->active_ptr
= cpi
->active_map
+ seg_map_index
+ mb_col
;
401 if (cm
->frame_type
== KEY_FRAME
)
403 *totalrate
+= vp8cx_encode_intra_macro_block(cpi
, x
, tp
);
405 y_modes
[xd
->mbmi
.mode
] ++;
410 *totalrate
+= vp8cx_encode_inter_macroblock(cpi
, x
, tp
, recon_yoffset
, recon_uvoffset
);
413 inter_y_modes
[xd
->mbmi
.mode
] ++;
415 if (xd
->mbmi
.mode
== SPLITMV
)
419 for (b
= 0; b
< xd
->mbmi
.partition_count
; b
++)
421 inter_b_modes
[xd
->mbmi
.partition_bmi
[b
].mode
] ++;
427 // Count of last ref frame 0,0 useage
428 if ((xd
->mode_info_context
->mbmi
.mode
== ZEROMV
) && (xd
->mode_info_context
->mbmi
.ref_frame
== LAST_FRAME
))
429 cpi
->inter_zz_count
++;
431 // Special case code for cyclic refresh
432 // If cyclic update enabled then copy xd->mbmi.segment_id; (which may have been updated based on mode
433 // during vp8cx_encode_inter_macroblock()) back into the global sgmentation map
434 if (cpi
->cyclic_refresh_mode_enabled
&& xd
->segmentation_enabled
)
436 cpi
->segmentation_map
[seg_map_index
+mb_col
] = xd
->mode_info_context
->mbmi
.segment_id
;
438 // If the block has been refreshed mark it as clean (the magnitude of the -ve influences how long it will be before we consider another refresh):
439 // Else if it was coded (last frame 0,0) and has not already been refreshed then mark it as a candidate for cleanup next time (marked 0)
440 // else mark it as dirty (1).
441 if (xd
->mode_info_context
->mbmi
.segment_id
)
442 cpi
->cyclic_refresh_map
[seg_map_index
+mb_col
] = -1;
443 else if ((xd
->mode_info_context
->mbmi
.mode
== ZEROMV
) && (xd
->mode_info_context
->mbmi
.ref_frame
== LAST_FRAME
))
445 if (cpi
->cyclic_refresh_map
[seg_map_index
+mb_col
] == 1)
446 cpi
->cyclic_refresh_map
[seg_map_index
+mb_col
] = 0;
449 cpi
->cyclic_refresh_map
[seg_map_index
+mb_col
] = 1;
454 cpi
->tplist
[mb_row
].stop
= *tp
;
456 x
->gf_active_ptr
++; // Increment pointer into gf useage flags structure for next mb
458 for (i
= 0; i
< 16; i
++)
459 vpx_memcpy(&xd
->mode_info_context
->bmi
[i
], &xd
->block
[i
].bmi
, sizeof(xd
->block
[i
].bmi
));
461 // adjust to the next column of macroblocks
462 x
->src
.y_buffer
+= 16;
463 x
->src
.u_buffer
+= 8;
464 x
->src
.v_buffer
+= 8;
469 // Keep track of segment useage
470 segment_counts
[xd
->mode_info_context
->mbmi
.segment_id
] ++;
473 xd
->mode_info_context
++;
475 xd
->above_context
[Y1CONTEXT
] += 4;
476 xd
->above_context
[UCONTEXT
] += 2;
477 xd
->above_context
[VCONTEXT
] += 2;
478 xd
->above_context
[Y2CONTEXT
] ++;
479 cpi
->current_mb_col_main
= mb_col
;
482 //extend the recon for intra prediction
484 &cm
->yv12_fb
[dst_fb_idx
],
485 xd
->dst
.y_buffer
+ 16,
486 xd
->dst
.u_buffer
+ 8,
487 xd
->dst
.v_buffer
+ 8);
489 // this is to account for the border
490 xd
->mode_info_context
++;
497 void vp8_encode_frame(VP8_COMP
*cpi
)
500 MACROBLOCK
*const x
= & cpi
->mb
;
501 VP8_COMMON
*const cm
= & cpi
->common
;
502 MACROBLOCKD
*const xd
= & x
->e_mbd
;
505 TOKENEXTRA
*tp
= cpi
->tok
;
506 int segment_counts
[MAX_MB_SEGMENTS
];
509 if (cm
->frame_type
!= KEY_FRAME
)
511 if (cm
->mcomp_filter_type
== SIXTAP
)
513 xd
->subpixel_predict
= SUBPIX_INVOKE(&cpi
->common
.rtcd
.subpix
, sixtap4x4
);
514 xd
->subpixel_predict8x4
= SUBPIX_INVOKE(&cpi
->common
.rtcd
.subpix
, sixtap8x4
);
515 xd
->subpixel_predict8x8
= SUBPIX_INVOKE(&cpi
->common
.rtcd
.subpix
, sixtap8x8
);
516 xd
->subpixel_predict16x16
= SUBPIX_INVOKE(&cpi
->common
.rtcd
.subpix
, sixtap16x16
);
520 xd
->subpixel_predict
= SUBPIX_INVOKE(&cpi
->common
.rtcd
.subpix
, bilinear4x4
);
521 xd
->subpixel_predict8x4
= SUBPIX_INVOKE(&cpi
->common
.rtcd
.subpix
, bilinear8x4
);
522 xd
->subpixel_predict8x8
= SUBPIX_INVOKE(&cpi
->common
.rtcd
.subpix
, bilinear8x8
);
523 xd
->subpixel_predict16x16
= SUBPIX_INVOKE(&cpi
->common
.rtcd
.subpix
, bilinear16x16
);
529 // For key frames make sure the intra ref frame probability value
530 // is set to "all intra"
531 //cpi->prob_intra_coded = 255;
535 x
->gf_active_ptr
= (signed char *)cpi
->gf_active_flags
; // Point to base of GF active flags data structure
537 x
->vector_range
= 32;
539 // Count of MBs using the alternate Q if any
542 // Reset frame count of inter 0,0 motion vector useage.
543 cpi
->inter_zz_count
= 0;
545 vpx_memset(segment_counts
, 0, sizeof(segment_counts
));
547 cpi
->prediction_error
= 0;
548 cpi
->intra_error
= 0;
549 cpi
->skip_true_count
= 0;
550 cpi
->skip_false_count
= 0;
554 cpi
->frame_distortion
= 0;
555 cpi
->last_mb_distortion
= 0;
560 xd
->mode_info
= cm
->mi
- 1;
562 xd
->mode_info_context
= cm
->mi
;
563 xd
->mode_info_stride
= cm
->mode_info_stride
;
565 xd
->frame_type
= cm
->frame_type
;
567 xd
->frames_since_golden
= cm
->frames_since_golden
;
568 xd
->frames_till_alt_ref_frame
= cm
->frames_till_alt_ref_frame
;
569 vp8_zero(cpi
->MVcount
);
570 // vp8_zero( Contexts)
571 vp8_zero(cpi
->coef_counts
);
573 // reset intra mode contexts
574 if (cm
->frame_type
== KEY_FRAME
)
575 vp8_init_mbmode_probs(cm
);
578 vp8cx_frame_init_quantizer(cpi
);
580 if (cpi
->compressor_speed
== 2)
582 if (cpi
->oxcf
.cpu_used
< 0)
583 cpi
->Speed
= -(cpi
->oxcf
.cpu_used
);
585 vp8_auto_select_speed(cpi
);
588 vp8_initialize_rd_consts(cpi
, vp8_dc_quant(cm
->base_qindex
, cm
->y1dc_delta_q
));
589 //vp8_initialize_rd_consts( cpi, vp8_dc_quant(cpi->avg_frame_qindex, cm->y1dc_delta_q) );
590 vp8cx_initialize_me_consts(cpi
, cm
->base_qindex
);
591 //vp8cx_initialize_me_consts( cpi, cpi->avg_frame_qindex);
593 // Copy data over into macro block data sturctures.
595 x
->src
= * cpi
->Source
;
596 xd
->pre
= cm
->yv12_fb
[cm
->lst_fb_idx
];
597 xd
->dst
= cm
->yv12_fb
[cm
->new_fb_idx
];
599 // set up frame new frame for intra coded blocks
601 vp8_setup_intra_recon(&cm
->yv12_fb
[cm
->new_fb_idx
]);
603 vp8_build_block_offsets(x
);
605 vp8_setup_block_dptrs(&x
->e_mbd
);
607 vp8_setup_block_ptrs(x
);
609 x
->rddiv
= cpi
->RDDIV
;
610 x
->rdmult
= cpi
->RDMULT
;
613 // Experimental rd code
614 // 2 Pass - Possibly set Rdmult based on last frame distortion + this frame target bits or other metrics
615 // such as cpi->rate_correction_factor that indicate relative complexity.
616 /*if ( cpi->pass == 2 && (cpi->last_frame_distortion > 0) && (cpi->target_bits_per_mb > 0) )
618 //x->rdmult = ((cpi->last_frame_distortion * 256)/cpi->common.MBs)/ cpi->target_bits_per_mb;
619 x->rdmult = (int)(cpi->RDMULT * cpi->rate_correction_factor);
622 x->rdmult = cpi->RDMULT; */
623 //x->rdmult = (int)(cpi->RDMULT * pow( (cpi->rate_correction_factor * 2.0), 0.75 ));
626 xd
->mode_info_context
->mbmi
.mode
= DC_PRED
;
627 xd
->mode_info_context
->mbmi
.uv_mode
= DC_PRED
;
629 xd
->left_context
= cm
->left_context
;
631 vp8_zero(cpi
->count_mb_ref_frame_usage
)
632 vp8_zero(cpi
->ymode_count
)
633 vp8_zero(cpi
->uv_mode_count
)
637 // vp8_zero( entropy_stats)
639 ENTROPY_CONTEXT
**p
= cm
->above_context
;
640 const size_t L
= cm
->mb_cols
;
642 vp8_zero_array(p
[Y1CONTEXT
], L
* 4)
643 vp8_zero_array(p
[ UCONTEXT
], L
* 2)
644 vp8_zero_array(p
[ VCONTEXT
], L
* 2)
645 vp8_zero_array(p
[Y2CONTEXT
], L
)
650 struct vpx_usec_timer emr_timer
;
651 vpx_usec_timer_start(&emr_timer
);
653 if (!cpi
->b_multi_threaded
)
655 // for each macroblock row in image
656 for (mb_row
= 0; mb_row
< cm
->mb_rows
; mb_row
++)
659 vp8_zero(cm
->left_context
)
661 encode_mb_row(cpi
, cm
, mb_row
, x
, xd
, &tp
, segment_counts
, &totalrate
);
663 // adjust to the next row of mbs
664 x
->src
.y_buffer
+= 16 * x
->src
.y_stride
- 16 * cm
->mb_cols
;
665 x
->src
.u_buffer
+= 8 * x
->src
.uv_stride
- 8 * cm
->mb_cols
;
666 x
->src
.v_buffer
+= 8 * x
->src
.uv_stride
- 8 * cm
->mb_cols
;
669 cpi
->tok_count
= tp
- cpi
->tok
;
674 #if CONFIG_MULTITHREAD
675 vp8cx_init_mbrthread_data(cpi
, x
, cpi
->mb_row_ei
, 1, cpi
->encoding_thread_count
);
677 for (mb_row
= 0; mb_row
< cm
->mb_rows
; mb_row
+= (cpi
->encoding_thread_count
+ 1))
680 cpi
->current_mb_col_main
= -1;
682 for (i
= 0; i
< cpi
->encoding_thread_count
; i
++)
684 if ((mb_row
+ i
+ 1) >= cm
->mb_rows
)
687 cpi
->mb_row_ei
[i
].mb_row
= mb_row
+ i
+ 1;
688 cpi
->mb_row_ei
[i
].tp
= cpi
->tok
+ (mb_row
+ i
+ 1) * (cm
->mb_cols
* 16 * 24);
689 cpi
->mb_row_ei
[i
].current_mb_col
= -1;
690 //SetEvent(cpi->h_event_mbrencoding[i]);
691 sem_post(&cpi
->h_event_mbrencoding
[i
]);
694 vp8_zero(cm
->left_context
)
696 tp
= cpi
->tok
+ mb_row
* (cm
->mb_cols
* 16 * 24);
698 encode_mb_row(cpi
, cm
, mb_row
, x
, xd
, &tp
, segment_counts
, &totalrate
);
700 // adjust to the next row of mbs
701 x
->src
.y_buffer
+= 16 * x
->src
.y_stride
* (cpi
->encoding_thread_count
+ 1) - 16 * cm
->mb_cols
;
702 x
->src
.u_buffer
+= 8 * x
->src
.uv_stride
* (cpi
->encoding_thread_count
+ 1) - 8 * cm
->mb_cols
;
703 x
->src
.v_buffer
+= 8 * x
->src
.uv_stride
* (cpi
->encoding_thread_count
+ 1) - 8 * cm
->mb_cols
;
705 xd
->mode_info_context
+= xd
->mode_info_stride
* cpi
->encoding_thread_count
;
707 if (mb_row
< cm
->mb_rows
- 1)
708 //WaitForSingleObject(cpi->h_event_main, INFINITE);
709 sem_wait(&cpi
->h_event_main
);
713 for( ;mb_row<cm->mb_rows; mb_row ++)
715 vp8_zero( cm->left_context)
717 tp = cpi->tok + mb_row * (cm->mb_cols * 16 * 24);
719 encode_mb_row(cpi, cm, mb_row, x, xd, &tp, segment_counts, &totalrate);
720 // adjust to the next row of mbs
721 x->src.y_buffer += 16 * x->src.y_stride - 16 * cm->mb_cols;
722 x->src.u_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols;
723 x->src.v_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols;
729 for (mb_row
= 0; mb_row
< cm
->mb_rows
; mb_row
++)
731 cpi
->tok_count
+= cpi
->tplist
[mb_row
].stop
- cpi
->tplist
[mb_row
].start
;
734 if (xd
->segmentation_enabled
)
739 if (xd
->segmentation_enabled
)
742 for (i
= 0; i
< cpi
->encoding_thread_count
; i
++)
744 for (j
= 0; j
< 4; j
++)
745 segment_counts
[j
] += cpi
->mb_row_ei
[i
].segment_counts
[j
];
751 for (i
= 0; i
< cpi
->encoding_thread_count
; i
++)
753 totalrate
+= cpi
->mb_row_ei
[i
].totalrate
;
760 vpx_usec_timer_mark(&emr_timer
);
761 cpi
->time_encode_mb_row
+= vpx_usec_timer_elapsed(&emr_timer
);
766 // Work out the segment probabilites if segmentation is enabled
767 if (xd
->segmentation_enabled
)
773 vpx_memset(xd
->mb_segment_tree_probs
, 255 , sizeof(xd
->mb_segment_tree_probs
));
775 tot_count
= segment_counts
[0] + segment_counts
[1] + segment_counts
[2] + segment_counts
[3];
779 xd
->mb_segment_tree_probs
[0] = ((segment_counts
[0] + segment_counts
[1]) * 255) / tot_count
;
781 tot_count
= segment_counts
[0] + segment_counts
[1];
785 xd
->mb_segment_tree_probs
[1] = (segment_counts
[0] * 255) / tot_count
;
788 tot_count
= segment_counts
[2] + segment_counts
[3];
791 xd
->mb_segment_tree_probs
[2] = (segment_counts
[2] * 255) / tot_count
;
793 // Zero probabilities not allowed
794 for (i
= 0; i
< MB_FEATURE_TREE_PROBS
; i
++)
796 if (xd
->mb_segment_tree_probs
[i
] == 0)
797 xd
->mb_segment_tree_probs
[i
] = 1;
802 // 256 rate units to the bit
803 cpi
->projected_frame_size
= totalrate
>> 8; // projected_frame_size in units of BYTES
805 // Make a note of the percentage MBs coded Intra.
806 if (cm
->frame_type
== KEY_FRAME
)
808 cpi
->this_frame_percent_intra
= 100;
814 tot_modes
= cpi
->count_mb_ref_frame_usage
[INTRA_FRAME
]
815 + cpi
->count_mb_ref_frame_usage
[LAST_FRAME
]
816 + cpi
->count_mb_ref_frame_usage
[GOLDEN_FRAME
]
817 + cpi
->count_mb_ref_frame_usage
[ALTREF_FRAME
];
820 cpi
->this_frame_percent_intra
= cpi
->count_mb_ref_frame_usage
[INTRA_FRAME
] * 100 / tot_modes
;
827 int flag
[2] = {0, 0};
829 for (cnt
= 0; cnt
< MVPcount
; cnt
++)
831 if (cm
->fc
.pre_mvc
[0][cnt
] != cm
->fc
.mvc
[0][cnt
])
834 vpx_memcpy(cm
->fc
.pre_mvc
[0], cm
->fc
.mvc
[0], MVPcount
);
839 for (cnt
= 0; cnt
< MVPcount
; cnt
++)
841 if (cm
->fc
.pre_mvc
[1][cnt
] != cm
->fc
.mvc
[1][cnt
])
844 vpx_memcpy(cm
->fc
.pre_mvc
[1], cm
->fc
.mvc
[1], MVPcount
);
849 if (flag
[0] || flag
[1])
850 vp8_build_component_cost_table(cpi
->mb
.mvcost
, cpi
->mb
.mvsadcost
, (const MV_CONTEXT
*) cm
->fc
.mvc
, flag
);
854 // Adjust the projected reference frame useage probability numbers to reflect
855 // what we have just seen. This may be usefull when we make multiple itterations
856 // of the recode loop rather than continuing to use values from the previous frame.
857 if ((cm
->frame_type
!= KEY_FRAME
) && !cm
->refresh_alt_ref_frame
&& !cm
->refresh_golden_frame
)
859 const int *const rfct
= cpi
->count_mb_ref_frame_usage
;
860 const int rf_intra
= rfct
[INTRA_FRAME
];
861 const int rf_inter
= rfct
[LAST_FRAME
] + rfct
[GOLDEN_FRAME
] + rfct
[ALTREF_FRAME
];
863 if ((rf_intra
+ rf_inter
) > 0)
865 cpi
->prob_intra_coded
= (rf_intra
* 255) / (rf_intra
+ rf_inter
);
867 if (cpi
->prob_intra_coded
< 1)
868 cpi
->prob_intra_coded
= 1;
870 if ((cm
->frames_since_golden
> 0) || cpi
->source_alt_ref_active
)
872 cpi
->prob_last_coded
= rf_inter
? (rfct
[LAST_FRAME
] * 255) / rf_inter
: 128;
874 if (cpi
->prob_last_coded
< 1)
875 cpi
->prob_last_coded
= 1;
877 cpi
->prob_gf_coded
= (rfct
[GOLDEN_FRAME
] + rfct
[ALTREF_FRAME
])
878 ? (rfct
[GOLDEN_FRAME
] * 255) / (rfct
[GOLDEN_FRAME
] + rfct
[ALTREF_FRAME
]) : 128;
880 if (cpi
->prob_gf_coded
< 1)
881 cpi
->prob_gf_coded
= 1;
887 // Keep record of the total distortion this time around for future use
888 cpi
->last_frame_distortion
= cpi
->frame_distortion
;
892 void vp8_setup_block_ptrs(MACROBLOCK
*x
)
897 for (r
= 0; r
< 4; r
++)
899 for (c
= 0; c
< 4; c
++)
901 x
->block
[r
*4+c
].src_diff
= x
->src_diff
+ r
* 4 * 16 + c
* 4;
905 for (r
= 0; r
< 2; r
++)
907 for (c
= 0; c
< 2; c
++)
909 x
->block
[16 + r
*2+c
].src_diff
= x
->src_diff
+ 256 + r
* 4 * 8 + c
* 4;
914 for (r
= 0; r
< 2; r
++)
916 for (c
= 0; c
< 2; c
++)
918 x
->block
[20 + r
*2+c
].src_diff
= x
->src_diff
+ 320 + r
* 4 * 8 + c
* 4;
922 x
->block
[24].src_diff
= x
->src_diff
+ 384;
925 for (i
= 0; i
< 25; i
++)
927 x
->block
[i
].coeff
= x
->coeff
+ i
* 16;
931 void vp8_build_block_offsets(MACROBLOCK
*x
)
936 vp8_build_block_doffsets(&x
->e_mbd
);
939 for (br
= 0; br
< 4; br
++)
941 for (bc
= 0; bc
< 4; bc
++)
943 BLOCK
*this_block
= &x
->block
[block
];
944 this_block
->base_src
= &x
->src
.y_buffer
;
945 this_block
->src_stride
= x
->src
.y_stride
;
946 this_block
->src
= 4 * br
* this_block
->src_stride
+ 4 * bc
;
952 for (br
= 0; br
< 2; br
++)
954 for (bc
= 0; bc
< 2; bc
++)
956 BLOCK
*this_block
= &x
->block
[block
];
957 this_block
->base_src
= &x
->src
.u_buffer
;
958 this_block
->src_stride
= x
->src
.uv_stride
;
959 this_block
->src
= 4 * br
* this_block
->src_stride
+ 4 * bc
;
965 for (br
= 0; br
< 2; br
++)
967 for (bc
= 0; bc
< 2; bc
++)
969 BLOCK
*this_block
= &x
->block
[block
];
970 this_block
->base_src
= &x
->src
.v_buffer
;
971 this_block
->src_stride
= x
->src
.uv_stride
;
972 this_block
->src
= 4 * br
* this_block
->src_stride
+ 4 * bc
;
978 static void sum_intra_stats(VP8_COMP
*cpi
, MACROBLOCK
*x
)
980 const MACROBLOCKD
*xd
= & x
->e_mbd
;
981 const MB_PREDICTION_MODE m
= xd
->mode_info_context
->mbmi
.mode
;
982 const MB_PREDICTION_MODE uvm
= xd
->mode_info_context
->mbmi
.uv_mode
;
985 const int is_key
= cpi
->common
.frame_type
== KEY_FRAME
;
987 ++ (is_key
? uv_modes
: inter_uv_modes
)[uvm
];
991 unsigned int *const bct
= is_key
? b_modes
: inter_b_modes
;
997 ++ bct
[xd
->block
[b
].bmi
.mode
];
1004 ++cpi
->ymode_count
[m
];
1005 ++cpi
->uv_mode_count
[uvm
];
1008 int vp8cx_encode_intra_macro_block(VP8_COMP
*cpi
, MACROBLOCK
*x
, TOKENEXTRA
**t
)
1010 int Error4x4
, Error16x16
, error_uv
;
1011 B_PREDICTION_MODE intra_bmodes
[16];
1012 int rate4x4
, rate16x16
, rateuv
;
1013 int dist4x4
, dist16x16
, distuv
;
1015 int rate4x4_tokenonly
= 0;
1016 int rate16x16_tokenonly
= 0;
1017 int rateuv_tokenonly
= 0;
1020 x
->e_mbd
.mode_info_context
->mbmi
.ref_frame
= INTRA_FRAME
;
1022 #if !(CONFIG_REALTIME_ONLY)
1024 if (cpi
->sf
.RD
|| cpi
->compressor_speed
!= 2)
1026 Error4x4
= vp8_rd_pick_intra4x4mby_modes(cpi
, x
, &rate4x4
, &rate4x4_tokenonly
, &dist4x4
);
1028 //save the b modes for possible later use
1029 for (i
= 0; i
< 16; i
++)
1030 intra_bmodes
[i
] = x
->e_mbd
.block
[i
].bmi
.mode
;
1032 Error16x16
= vp8_rd_pick_intra16x16mby_mode(cpi
, x
, &rate16x16
, &rate16x16_tokenonly
, &dist16x16
);
1034 error_uv
= vp8_rd_pick_intra_mbuv_mode(cpi
, x
, &rateuv
, &rateuv_tokenonly
, &distuv
);
1036 x
->e_mbd
.mode_info_context
->mbmi
.mb_skip_coeff
= (cpi
->common
.mb_no_coeff_skip
) ? 1 : 0;
1038 vp8_encode_intra16x16mbuv(IF_RTCD(&cpi
->rtcd
), x
);
1041 if (Error4x4
< Error16x16
)
1044 x
->e_mbd
.mode_info_context
->mbmi
.mode
= B_PRED
;
1046 // get back the intra block modes
1047 for (i
= 0; i
< 16; i
++)
1048 x
->e_mbd
.block
[i
].bmi
.mode
= intra_bmodes
[i
];
1050 vp8_encode_intra4x4mby(IF_RTCD(&cpi
->rtcd
), x
);
1051 cpi
->prediction_error
+= Error4x4
;
1053 // Experimental RD code
1054 cpi
->frame_distortion
+= dist4x4
;
1059 vp8_encode_intra16x16mby(IF_RTCD(&cpi
->rtcd
), x
);
1063 // Experimental RD code
1064 cpi
->prediction_error
+= Error16x16
;
1065 cpi
->frame_distortion
+= dist16x16
;
1069 sum_intra_stats(cpi
, x
);
1071 vp8_tokenize_mb(cpi
, &x
->e_mbd
, t
);
1077 int rate2
, distortion2
;
1078 MB_PREDICTION_MODE mode
, best_mode
= DC_PRED
;
1080 Error16x16
= INT_MAX
;
1082 for (mode
= DC_PRED
; mode
<= TM_PRED
; mode
++)
1084 x
->e_mbd
.mode_info_context
->mbmi
.mode
= mode
;
1085 vp8_build_intra_predictors_mby_ptr(&x
->e_mbd
);
1086 distortion2
= VARIANCE_INVOKE(&cpi
->rtcd
.variance
, get16x16prederror
)(x
->src
.y_buffer
, x
->src
.y_stride
, x
->e_mbd
.predictor
, 16, 0x7fffffff);
1087 rate2
= x
->mbmode_cost
[x
->e_mbd
.frame_type
][mode
];
1088 this_rd
= RD_ESTIMATE(x
->rdmult
, x
->rddiv
, rate2
, distortion2
);
1090 if (Error16x16
> this_rd
)
1092 Error16x16
= this_rd
;
1097 vp8_pick_intra4x4mby_modes(IF_RTCD(&cpi
->rtcd
), x
, &rate2
, &distortion2
);
1099 if (distortion2
== INT_MAX
)
1102 Error4x4
= RD_ESTIMATE(x
->rdmult
, x
->rddiv
, rate2
, distortion2
);
1104 x
->e_mbd
.mode_info_context
->mbmi
.mb_skip_coeff
= (cpi
->common
.mb_no_coeff_skip
) ? 1 : 0;
1106 if (Error4x4
< Error16x16
)
1108 x
->e_mbd
.mode_info_context
->mbmi
.mode
= B_PRED
;
1109 vp8_encode_intra4x4mby(IF_RTCD(&cpi
->rtcd
), x
);
1110 cpi
->prediction_error
+= Error4x4
;
1114 x
->e_mbd
.mode_info_context
->mbmi
.mode
= best_mode
;
1115 vp8_encode_intra16x16mby(IF_RTCD(&cpi
->rtcd
), x
);
1116 cpi
->prediction_error
+= Error16x16
;
1119 vp8_pick_intra_mbuv_mode(x
);
1120 vp8_encode_intra16x16mbuv(IF_RTCD(&cpi
->rtcd
), x
);
1121 sum_intra_stats(cpi
, x
);
1122 vp8_tokenize_mb(cpi
, &x
->e_mbd
, t
);
1131 extern void vp8_fix_contexts(VP8_COMP
*cpi
, MACROBLOCKD
*x
);
1133 int vp8cx_encode_inter_macroblock
1135 VP8_COMP
*cpi
, MACROBLOCK
*x
, TOKENEXTRA
**t
,
1136 int recon_yoffset
, int recon_uvoffset
1139 MACROBLOCKD
*const xd
= &x
->e_mbd
;
1141 int intra_error
= 0;
1147 if (xd
->segmentation_enabled
)
1148 x
->encode_breakout
= cpi
->segment_encode_breakout
[xd
->mode_info_context
->mbmi
.segment_id
];
1150 x
->encode_breakout
= cpi
->oxcf
.encode_breakout
;
1152 #if !(CONFIG_REALTIME_ONLY)
1156 inter_error
= vp8_rd_pick_inter_mode(cpi
, x
, recon_yoffset
, recon_uvoffset
, &rate
, &distortion
, &intra_error
);
1160 inter_error
= vp8_pick_inter_mode(cpi
, x
, recon_yoffset
, recon_uvoffset
, &rate
, &distortion
, &intra_error
);
1163 cpi
->prediction_error
+= inter_error
;
1164 cpi
->intra_error
+= intra_error
;
1167 // Experimental RD code
1168 cpi
->frame_distortion
+= distortion
;
1169 cpi
->last_mb_distortion
= distortion
;
1172 // MB level adjutment to quantizer setup
1173 if (xd
->segmentation_enabled
|| cpi
->zbin_mode_boost_enabled
)
1175 // If cyclic update enabled
1176 if (cpi
->cyclic_refresh_mode_enabled
)
1178 // Clear segment_id back to 0 if not coded (last frame 0,0)
1179 if ((xd
->mode_info_context
->mbmi
.segment_id
== 1) &&
1180 ((xd
->mode_info_context
->mbmi
.ref_frame
!= LAST_FRAME
) || (xd
->mode_info_context
->mbmi
.mode
!= ZEROMV
)))
1182 xd
->mode_info_context
->mbmi
.segment_id
= 0;
1186 // Experimental code. Special case for gf and arf zeromv modes. Increase zbin size to supress noise
1187 if (cpi
->zbin_mode_boost_enabled
)
1189 if ((xd
->mode_info_context
->mbmi
.mode
== ZEROMV
) && (xd
->mode_info_context
->mbmi
.ref_frame
!= LAST_FRAME
))
1190 cpi
->zbin_mode_boost
= GF_ZEROMV_ZBIN_BOOST
;
1192 cpi
->zbin_mode_boost
= 0;
1195 vp8cx_mb_init_quantizer(cpi
, x
);
1198 cpi
->count_mb_ref_frame_usage
[xd
->mode_info_context
->mbmi
.ref_frame
] ++;
1200 if (xd
->mode_info_context
->mbmi
.ref_frame
== INTRA_FRAME
)
1202 x
->e_mbd
.mode_info_context
->mbmi
.mb_skip_coeff
= (cpi
->common
.mb_no_coeff_skip
) ? 1 : 0;
1204 vp8_encode_intra16x16mbuv(IF_RTCD(&cpi
->rtcd
), x
);
1206 if (xd
->mode_info_context
->mbmi
.mode
== B_PRED
)
1208 vp8_encode_intra4x4mby(IF_RTCD(&cpi
->rtcd
), x
);
1212 vp8_encode_intra16x16mby(IF_RTCD(&cpi
->rtcd
), x
);
1215 sum_intra_stats(cpi
, x
);
1224 vp8_find_near_mvs(xd
, xd
->mode_info_context
,
1225 &nearest
, &nearby
, &best_ref_mv
, mdcounts
, xd
->mode_info_context
->mbmi
.ref_frame
, cpi
->common
.ref_frame_sign_bias
);
1227 vp8_build_uvmvs(xd
, cpi
->common
.full_pixel
);
1229 if (xd
->mode_info_context
->mbmi
.ref_frame
== LAST_FRAME
)
1230 ref_fb_idx
= cpi
->common
.lst_fb_idx
;
1231 else if (xd
->mode_info_context
->mbmi
.ref_frame
== GOLDEN_FRAME
)
1232 ref_fb_idx
= cpi
->common
.gld_fb_idx
;
1234 ref_fb_idx
= cpi
->common
.alt_fb_idx
;
1236 xd
->pre
.y_buffer
= cpi
->common
.yv12_fb
[ref_fb_idx
].y_buffer
+ recon_yoffset
;
1237 xd
->pre
.u_buffer
= cpi
->common
.yv12_fb
[ref_fb_idx
].u_buffer
+ recon_uvoffset
;
1238 xd
->pre
.v_buffer
= cpi
->common
.yv12_fb
[ref_fb_idx
].v_buffer
+ recon_uvoffset
;
1240 if (xd
->mode_info_context
->mbmi
.mode
== SPLITMV
)
1244 for (i
= 0; i
< 16; i
++)
1246 if (xd
->block
[i
].bmi
.mode
== NEW4X4
)
1248 cpi
->MVcount
[0][mv_max
+((xd
->block
[i
].bmi
.mv
.as_mv
.row
- best_ref_mv
.row
) >> 1)]++;
1249 cpi
->MVcount
[1][mv_max
+((xd
->block
[i
].bmi
.mv
.as_mv
.col
- best_ref_mv
.col
) >> 1)]++;
1253 else if (xd
->mode_info_context
->mbmi
.mode
== NEWMV
)
1255 cpi
->MVcount
[0][mv_max
+((xd
->block
[0].bmi
.mv
.as_mv
.row
- best_ref_mv
.row
) >> 1)]++;
1256 cpi
->MVcount
[1][mv_max
+((xd
->block
[0].bmi
.mv
.as_mv
.col
- best_ref_mv
.col
) >> 1)]++;
1259 if (!x
->skip
&& !x
->e_mbd
.mode_info_context
->mbmi
.force_no_skip
)
1261 vp8_encode_inter16x16(IF_RTCD(&cpi
->rtcd
), x
);
1263 // Clear mb_skip_coeff if mb_no_coeff_skip is not set
1264 if (!cpi
->common
.mb_no_coeff_skip
)
1265 xd
->mode_info_context
->mbmi
.mb_skip_coeff
= 0;
1269 vp8_stuff_inter16x16(x
);
1273 vp8_tokenize_mb(cpi
, xd
, t
);
1276 if (cpi
->common
.mb_no_coeff_skip
)
1278 if (xd
->mode_info_context
->mbmi
.mode
!= B_PRED
&& xd
->mode_info_context
->mbmi
.mode
!= SPLITMV
)
1279 xd
->mode_info_context
->mbmi
.dc_diff
= 0;
1281 xd
->mode_info_context
->mbmi
.dc_diff
= 1;
1283 xd
->mode_info_context
->mbmi
.mb_skip_coeff
= 1;
1284 cpi
->skip_true_count
++;
1285 vp8_fix_contexts(cpi
, xd
);
1289 vp8_stuff_mb(cpi
, xd
, t
);
1290 xd
->mode_info_context
->mbmi
.mb_skip_coeff
= 0;
1291 cpi
->skip_false_count
++;