Removed unused vp8_recon_intra4x4mb function
[libvpx.git] / vp8 / encoder / rdopt.c
blob3449e4532652d0a69fb3e4954a940cf32a6b96bf
1 /*
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
12 #include <stdio.h>
13 #include <math.h>
14 #include <limits.h>
15 #include <assert.h>
16 #include "pragmas.h"
18 #include "tokenize.h"
19 #include "treewriter.h"
20 #include "onyx_int.h"
21 #include "modecosts.h"
22 #include "encodeintra.h"
23 #include "entropymode.h"
24 #include "reconinter.h"
25 #include "reconintra.h"
26 #include "reconintra4x4.h"
27 #include "findnearmv.h"
28 #include "encodemb.h"
29 #include "quantize.h"
30 #include "idct.h"
31 #include "g_common.h"
32 #include "variance.h"
33 #include "mcomp.h"
35 #include "vpx_mem/vpx_mem.h"
36 #include "dct.h"
37 #include "systemdependent.h"
39 #if CONFIG_RUNTIME_CPU_DETECT
40 #define IF_RTCD(x) (x)
41 #else
42 #define IF_RTCD(x) NULL
43 #endif
46 extern void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x);
47 extern void vp8_update_zbin_extra(VP8_COMP *cpi, MACROBLOCK *x);
50 #define RDCOST(RM,DM,R,D) ( ((128+(R)*(RM)) >> 8) + (DM)*(D) )
52 #define MAXF(a,b) (((a) > (b)) ? (a) : (b))
56 const int vp8_auto_speed_thresh[17] =
58 1000,
59 200,
60 150,
61 130,
62 150,
63 125,
64 120,
65 115,
66 115,
67 115,
68 115,
69 115,
70 115,
71 115,
72 115,
73 115,
74 105
77 const MB_PREDICTION_MODE vp8_mode_order[MAX_MODES] =
79 ZEROMV,
80 DC_PRED,
82 NEARESTMV,
83 NEARMV,
85 ZEROMV,
86 NEARESTMV,
88 ZEROMV,
89 NEARESTMV,
91 NEARMV,
92 NEARMV,
94 V_PRED,
95 H_PRED,
96 TM_PRED,
98 NEWMV,
99 NEWMV,
100 NEWMV,
102 SPLITMV,
103 SPLITMV,
104 SPLITMV,
106 B_PRED,
109 const MV_REFERENCE_FRAME vp8_ref_frame_order[MAX_MODES] =
111 LAST_FRAME,
112 INTRA_FRAME,
114 LAST_FRAME,
115 LAST_FRAME,
117 GOLDEN_FRAME,
118 GOLDEN_FRAME,
120 ALTREF_FRAME,
121 ALTREF_FRAME,
123 GOLDEN_FRAME,
124 ALTREF_FRAME,
126 INTRA_FRAME,
127 INTRA_FRAME,
128 INTRA_FRAME,
130 LAST_FRAME,
131 GOLDEN_FRAME,
132 ALTREF_FRAME,
134 LAST_FRAME,
135 GOLDEN_FRAME,
136 ALTREF_FRAME,
138 INTRA_FRAME,
141 static void fill_token_costs(
142 unsigned int c [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [vp8_coef_tokens],
143 const vp8_prob p [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [vp8_coef_tokens-1]
146 int i, j, k;
149 for (i = 0; i < BLOCK_TYPES; i++)
150 for (j = 0; j < COEF_BANDS; j++)
151 for (k = 0; k < PREV_COEF_CONTEXTS; k++)
153 vp8_cost_tokens((int *)(c [i][j][k]), p [i][j][k], vp8_coef_tree);
157 static int rd_iifactor [ 32 ] = { 4, 4, 3, 2, 1, 0, 0, 0,
158 0, 0, 0, 0, 0, 0, 0, 0,
159 0, 0, 0, 0, 0, 0, 0, 0,
160 0, 0, 0, 0, 0, 0, 0, 0,
164 /* values are now correlated to quantizer */
165 static int sad_per_bit16lut[QINDEX_RANGE] =
167 5, 5, 5, 5, 5, 5, 6, 6,
168 6, 6, 6, 6, 6, 7, 7, 7,
169 7, 7, 7, 7, 8, 8, 8, 8,
170 8, 8, 8, 8, 8, 8, 9, 9,
171 9, 9, 9, 9, 10, 10, 10, 10,
172 10, 10, 11, 11, 11, 11, 11, 11,
173 12, 12, 12, 12, 12, 12, 12, 13,
174 13, 13, 13, 13, 13, 14, 14, 14,
175 14, 14, 15, 15, 15, 15, 15, 15,
176 16, 16, 16, 16, 16, 16, 17, 17,
177 17, 17, 17, 17, 17, 18, 18, 18,
178 18, 18, 19, 19, 19, 19, 19, 19,
179 20, 20, 20, 21, 21, 21, 21, 22,
180 22, 22, 23, 23, 23, 24, 24, 24,
181 25, 25, 26, 26, 27, 27, 27, 28,
182 28, 28, 29, 29, 30, 30, 31, 31
184 static int sad_per_bit4lut[QINDEX_RANGE] =
186 5, 5, 5, 5, 5, 5, 7, 7,
187 7, 7, 7, 7, 7, 8, 8, 8,
188 8, 8, 8, 8, 10, 10, 10, 10,
189 10, 10, 10, 10, 10, 10, 11, 11,
190 11, 11, 11, 11, 13, 13, 13, 13,
191 13, 13, 14, 14, 14, 14, 14, 14,
192 16, 16, 16, 16, 16, 16, 16, 17,
193 17, 17, 17, 17, 17, 19, 19, 19,
194 19, 19, 20, 20, 20, 20, 20, 20,
195 22, 22, 22, 22, 22, 22, 23, 23,
196 23, 23, 23, 23, 23, 25, 25, 25,
197 25, 25, 26, 26, 26, 26, 26, 26,
198 28, 28, 28, 29, 29, 29, 29, 31,
199 31, 31, 32, 32, 32, 34, 34, 34,
200 35, 35, 37, 37, 38, 38, 38, 40,
201 40, 40, 41, 41, 43, 43, 44, 44,
204 void vp8cx_initialize_me_consts(VP8_COMP *cpi, int QIndex)
206 cpi->mb.sadperbit16 = sad_per_bit16lut[QIndex];
207 cpi->mb.sadperbit4 = sad_per_bit4lut[QIndex];
210 void vp8_initialize_rd_consts(VP8_COMP *cpi, int Qvalue)
212 int q;
213 int i;
214 double capped_q = (Qvalue < 160) ? (double)Qvalue : 160.0;
215 double rdconst = 3.00;
217 vp8_clear_system_state(); //__asm emms;
219 // Further tests required to see if optimum is different
220 // for key frames, golden frames and arf frames.
221 // if (cpi->common.refresh_golden_frame ||
222 // cpi->common.refresh_alt_ref_frame)
223 cpi->RDMULT = (int)(rdconst * (capped_q * capped_q));
225 // Extend rate multiplier along side quantizer zbin increases
226 if (cpi->zbin_over_quant > 0)
228 double oq_factor;
229 double modq;
231 // Experimental code using the same basic equation as used for Q above
232 // The units of cpi->zbin_over_quant are 1/128 of Q bin size
233 oq_factor = 1.0 + ((double)0.0015625 * cpi->zbin_over_quant);
234 modq = (int)((double)capped_q * oq_factor);
235 cpi->RDMULT = (int)(rdconst * (modq * modq));
238 if (cpi->pass == 2 && (cpi->common.frame_type != KEY_FRAME))
240 if (cpi->next_iiratio > 31)
241 cpi->RDMULT += (cpi->RDMULT * rd_iifactor[31]) >> 4;
242 else
243 cpi->RDMULT += (cpi->RDMULT * rd_iifactor[cpi->next_iiratio]) >> 4;
246 cpi->mb.errorperbit = (cpi->RDMULT / 100);
247 cpi->mb.errorperbit += (cpi->mb.errorperbit==0);
249 vp8_set_speed_features(cpi);
251 if (cpi->common.simpler_lpf)
252 cpi->common.filter_type = SIMPLE_LOOPFILTER;
254 q = (int)pow(Qvalue, 1.25);
256 if (q < 8)
257 q = 8;
259 if (cpi->RDMULT > 1000)
261 cpi->RDDIV = 1;
262 cpi->RDMULT /= 100;
264 for (i = 0; i < MAX_MODES; i++)
266 if (cpi->sf.thresh_mult[i] < INT_MAX)
268 cpi->rd_threshes[i] = cpi->sf.thresh_mult[i] * q / 100;
270 else
272 cpi->rd_threshes[i] = INT_MAX;
275 cpi->rd_baseline_thresh[i] = cpi->rd_threshes[i];
278 else
280 cpi->RDDIV = 100;
282 for (i = 0; i < MAX_MODES; i++)
284 if (cpi->sf.thresh_mult[i] < (INT_MAX / q))
286 cpi->rd_threshes[i] = cpi->sf.thresh_mult[i] * q;
288 else
290 cpi->rd_threshes[i] = INT_MAX;
293 cpi->rd_baseline_thresh[i] = cpi->rd_threshes[i];
297 fill_token_costs(
298 cpi->mb.token_costs,
299 (const vp8_prob( *)[8][3][11]) cpi->common.fc.coef_probs
302 vp8_init_mode_costs(cpi);
306 void vp8_auto_select_speed(VP8_COMP *cpi)
308 int used = cpi->oxcf.cpu_used;
310 int milliseconds_for_compress = (int)(1000000 / cpi->oxcf.frame_rate);
312 milliseconds_for_compress = milliseconds_for_compress * (16 - cpi->oxcf.cpu_used) / 16;
314 #if 0
316 if (0)
318 FILE *f;
320 f = fopen("speed.stt", "a");
321 fprintf(f, " %8ld %10ld %10ld %10ld\n",
322 cpi->common.current_video_frame, cpi->Speed, milliseconds_for_compress, cpi->avg_pick_mode_time);
323 fclose(f);
326 #endif
329 // this is done during parameter valid check
330 if( used > 16)
331 used = 16;
332 if( used < -16)
333 used = -16;
336 if (cpi->avg_pick_mode_time < milliseconds_for_compress && (cpi->avg_encode_time - cpi->avg_pick_mode_time) < milliseconds_for_compress)
338 if (cpi->avg_pick_mode_time == 0)
340 cpi->Speed = 4;
342 else
344 if (milliseconds_for_compress * 100 < cpi->avg_encode_time * 95)
346 cpi->Speed += 2;
347 cpi->avg_pick_mode_time = 0;
348 cpi->avg_encode_time = 0;
350 if (cpi->Speed > 16)
352 cpi->Speed = 16;
356 if (milliseconds_for_compress * 100 > cpi->avg_encode_time * vp8_auto_speed_thresh[cpi->Speed])
358 cpi->Speed -= 1;
359 cpi->avg_pick_mode_time = 0;
360 cpi->avg_encode_time = 0;
362 // In real-time mode, cpi->speed is in [4, 16].
363 if (cpi->Speed < 4) //if ( cpi->Speed < 0 )
365 cpi->Speed = 4; //cpi->Speed = 0;
370 else
372 cpi->Speed += 4;
374 if (cpi->Speed > 16)
375 cpi->Speed = 16;
378 cpi->avg_pick_mode_time = 0;
379 cpi->avg_encode_time = 0;
383 int vp8_block_error_c(short *coeff, short *dqcoeff)
385 int i;
386 int error = 0;
388 for (i = 0; i < 16; i++)
390 int this_diff = coeff[i] - dqcoeff[i];
391 error += this_diff * this_diff;
394 return error;
397 int vp8_mbblock_error_c(MACROBLOCK *mb, int dc)
399 BLOCK *be;
400 BLOCKD *bd;
401 int i, j;
402 int berror, error = 0;
404 for (i = 0; i < 16; i++)
406 be = &mb->block[i];
407 bd = &mb->e_mbd.block[i];
409 berror = 0;
411 for (j = dc; j < 16; j++)
413 int this_diff = be->coeff[j] - bd->dqcoeff[j];
414 berror += this_diff * this_diff;
417 error += berror;
420 return error;
423 int vp8_mbuverror_c(MACROBLOCK *mb)
426 BLOCK *be;
427 BLOCKD *bd;
430 int i;
431 int error = 0;
433 for (i = 16; i < 24; i++)
435 be = &mb->block[i];
436 bd = &mb->e_mbd.block[i];
438 error += vp8_block_error_c(be->coeff, bd->dqcoeff);
441 return error;
444 int VP8_UVSSE(MACROBLOCK *x, const vp8_variance_rtcd_vtable_t *rtcd)
446 unsigned char *uptr, *vptr;
447 unsigned char *upred_ptr = (*(x->block[16].base_src) + x->block[16].src);
448 unsigned char *vpred_ptr = (*(x->block[20].base_src) + x->block[20].src);
449 int uv_stride = x->block[16].src_stride;
451 unsigned int sse1 = 0;
452 unsigned int sse2 = 0;
453 int mv_row;
454 int mv_col;
455 int offset;
456 int pre_stride = x->e_mbd.block[16].pre_stride;
458 vp8_build_uvmvs(&x->e_mbd, 0);
459 mv_row = x->e_mbd.block[16].bmi.mv.as_mv.row;
460 mv_col = x->e_mbd.block[16].bmi.mv.as_mv.col;
462 offset = (mv_row >> 3) * pre_stride + (mv_col >> 3);
463 uptr = x->e_mbd.pre.u_buffer + offset;
464 vptr = x->e_mbd.pre.v_buffer + offset;
466 if ((mv_row | mv_col) & 7)
468 VARIANCE_INVOKE(rtcd, subpixvar8x8)(uptr, pre_stride, mv_col & 7, mv_row & 7, upred_ptr, uv_stride, &sse2);
469 VARIANCE_INVOKE(rtcd, subpixvar8x8)(vptr, pre_stride, mv_col & 7, mv_row & 7, vpred_ptr, uv_stride, &sse1);
470 sse2 += sse1;
472 else
474 VARIANCE_INVOKE(rtcd, subpixvar8x8)(uptr, pre_stride, mv_col & 7, mv_row & 7, upred_ptr, uv_stride, &sse2);
475 VARIANCE_INVOKE(rtcd, subpixvar8x8)(vptr, pre_stride, mv_col & 7, mv_row & 7, vpred_ptr, uv_stride, &sse1);
476 sse2 += sse1;
479 return sse2;
483 #if !(CONFIG_REALTIME_ONLY)
484 static int cost_coeffs(MACROBLOCK *mb, BLOCKD *b, int type, ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l)
486 int c = !type; /* start at coef 0, unless Y with Y2 */
487 int eob = b->eob;
488 int pt ; /* surrounding block/prev coef predictor */
489 int cost = 0;
490 short *qcoeff_ptr = b->qcoeff;
492 VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
494 # define QC( I) ( qcoeff_ptr [vp8_default_zig_zag1d[I]] )
496 for (; c < eob; c++)
498 int v = QC(c);
499 int t = vp8_dct_value_tokens_ptr[v].Token;
500 cost += mb->token_costs [type] [vp8_coef_bands[c]] [pt] [t];
501 cost += vp8_dct_value_cost_ptr[v];
502 pt = vp8_prev_token_class[t];
505 # undef QC
507 if (c < 16)
508 cost += mb->token_costs [type] [vp8_coef_bands[c]] [pt] [DCT_EOB_TOKEN];
510 pt = (c != !type); // is eob first coefficient;
511 *a = *l = pt;
513 return cost;
516 static int vp8_rdcost_mby(MACROBLOCK *mb)
518 int cost = 0;
519 int b;
520 MACROBLOCKD *x = &mb->e_mbd;
521 ENTROPY_CONTEXT_PLANES t_above, t_left;
522 ENTROPY_CONTEXT *ta;
523 ENTROPY_CONTEXT *tl;
525 vpx_memcpy(&t_above, mb->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
526 vpx_memcpy(&t_left, mb->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
528 ta = (ENTROPY_CONTEXT *)&t_above;
529 tl = (ENTROPY_CONTEXT *)&t_left;
531 for (b = 0; b < 16; b++)
532 cost += cost_coeffs(mb, x->block + b, 0,
533 ta + vp8_block2above[b], tl + vp8_block2left[b]);
535 cost += cost_coeffs(mb, x->block + 24, 1,
536 ta + vp8_block2above[24], tl + vp8_block2left[24]);
538 return cost;
541 static void macro_block_yrd( MACROBLOCK *mb,
542 int *Rate,
543 int *Distortion,
544 const vp8_encodemb_rtcd_vtable_t *rtcd)
546 int b;
547 MACROBLOCKD *const x = &mb->e_mbd;
548 BLOCK *const mb_y2 = mb->block + 24;
549 BLOCKD *const x_y2 = x->block + 24;
550 short *Y2DCPtr = mb_y2->src_diff;
551 BLOCK *beptr;
552 int d;
554 ENCODEMB_INVOKE(rtcd, submby)( mb->src_diff, mb->src.y_buffer,
555 mb->e_mbd.predictor, mb->src.y_stride );
557 // Fdct and building the 2nd order block
558 for (beptr = mb->block; beptr < mb->block + 16; beptr += 2)
560 mb->vp8_short_fdct8x4(beptr->src_diff, beptr->coeff, 32);
561 *Y2DCPtr++ = beptr->coeff[0];
562 *Y2DCPtr++ = beptr->coeff[16];
565 // 2nd order fdct
566 mb->short_walsh4x4(mb_y2->src_diff, mb_y2->coeff, 8);
568 // Quantization
569 for (b = 0; b < 16; b++)
571 mb->quantize_b(&mb->block[b], &mb->e_mbd.block[b]);
574 // DC predication and Quantization of 2nd Order block
575 mb->quantize_b(mb_y2, x_y2);
577 // Distortion
578 d = ENCODEMB_INVOKE(rtcd, mberr)(mb, 1) << 2;
579 d += ENCODEMB_INVOKE(rtcd, berr)(mb_y2->coeff, x_y2->dqcoeff);
581 *Distortion = (d >> 4);
583 // rate
584 *Rate = vp8_rdcost_mby(mb);
587 static void save_predictor(unsigned char *predictor, unsigned char *dst)
589 int r, c;
590 for (r = 0; r < 4; r++)
592 for (c = 0; c < 4; c++)
594 *dst = predictor[c];
595 dst++;
598 predictor += 16;
601 static void restore_predictor(unsigned char *predictor, unsigned char *dst)
603 int r, c;
604 for (r = 0; r < 4; r++)
606 for (c = 0; c < 4; c++)
608 predictor[c] = *dst;
609 dst++;
612 predictor += 16;
615 static int rd_pick_intra4x4block(
616 VP8_COMP *cpi,
617 MACROBLOCK *x,
618 BLOCK *be,
619 BLOCKD *b,
620 B_PREDICTION_MODE *best_mode,
621 unsigned int *bmode_costs,
622 ENTROPY_CONTEXT *a,
623 ENTROPY_CONTEXT *l,
625 int *bestrate,
626 int *bestratey,
627 int *bestdistortion)
629 B_PREDICTION_MODE mode;
630 int best_rd = INT_MAX;
631 int rate = 0;
632 int distortion;
634 ENTROPY_CONTEXT ta = *a, tempa = *a;
635 ENTROPY_CONTEXT tl = *l, templ = *l;
637 DECLARE_ALIGNED_ARRAY(16, unsigned char, predictor, 16);
638 DECLARE_ALIGNED_ARRAY(16, short, dqcoeff, 16);
640 for (mode = B_DC_PRED; mode <= B_HU_PRED; mode++)
642 int this_rd;
643 int ratey;
645 rate = bmode_costs[mode];
647 vp8_predict_intra4x4(b, mode, b->predictor);
648 ENCODEMB_INVOKE(IF_RTCD(&cpi->rtcd.encodemb), subb)(be, b, 16);
649 x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32);
650 x->quantize_b(be, b);
652 tempa = ta;
653 templ = tl;
655 ratey = cost_coeffs(x, b, 3, &tempa, &templ);
656 rate += ratey;
657 distortion = ENCODEMB_INVOKE(IF_RTCD(&cpi->rtcd.encodemb), berr)(be->coeff, b->dqcoeff) >> 2;
659 this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
661 if (this_rd < best_rd)
663 *bestrate = rate;
664 *bestratey = ratey;
665 *bestdistortion = distortion;
666 best_rd = this_rd;
667 *best_mode = mode;
668 *a = tempa;
669 *l = templ;
670 save_predictor(b->predictor, predictor);
671 vpx_memcpy(dqcoeff, b->dqcoeff, 32);
675 b->bmi.mode = (B_PREDICTION_MODE)(*best_mode);
677 restore_predictor(b->predictor, predictor);
678 vpx_memcpy(b->dqcoeff, dqcoeff, 32);
680 IDCT_INVOKE(IF_RTCD(&cpi->rtcd.common->idct), idct16)(b->dqcoeff, b->diff, 32);
681 RECON_INVOKE(IF_RTCD(&cpi->rtcd.common->recon), recon)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
683 return best_rd;
687 int vp8_rd_pick_intra4x4mby_modes(VP8_COMP *cpi, MACROBLOCK *mb, int *Rate,
688 int *rate_y, int *Distortion, int best_rd)
690 MACROBLOCKD *const xd = &mb->e_mbd;
691 int i;
692 int cost = mb->mbmode_cost [xd->frame_type] [B_PRED];
693 int distortion = 0;
694 int tot_rate_y = 0;
695 int total_rd = 0;
696 ENTROPY_CONTEXT_PLANES t_above, t_left;
697 ENTROPY_CONTEXT *ta;
698 ENTROPY_CONTEXT *tl;
699 unsigned int *bmode_costs;
701 vpx_memcpy(&t_above, mb->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
702 vpx_memcpy(&t_left, mb->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
704 ta = (ENTROPY_CONTEXT *)&t_above;
705 tl = (ENTROPY_CONTEXT *)&t_left;
707 vp8_intra_prediction_down_copy(xd);
709 bmode_costs = mb->inter_bmode_costs;
711 for (i = 0; i < 16; i++)
713 MODE_INFO *const mic = xd->mode_info_context;
714 const int mis = xd->mode_info_stride;
715 B_PREDICTION_MODE UNINITIALIZED_IS_SAFE(best_mode);
716 int UNINITIALIZED_IS_SAFE(r), UNINITIALIZED_IS_SAFE(ry), UNINITIALIZED_IS_SAFE(d);
718 if (mb->e_mbd.frame_type == KEY_FRAME)
720 const B_PREDICTION_MODE A = vp8_above_bmi(mic, i, mis)->mode;
721 const B_PREDICTION_MODE L = vp8_left_bmi(mic, i)->mode;
723 bmode_costs = mb->bmode_costs[A][L];
726 total_rd += rd_pick_intra4x4block(
727 cpi, mb, mb->block + i, xd->block + i, &best_mode, bmode_costs,
728 ta + vp8_block2above[i],
729 tl + vp8_block2left[i], &r, &ry, &d);
731 cost += r;
732 distortion += d;
733 tot_rate_y += ry;
734 mic->bmi[i].mode = xd->block[i].bmi.mode = best_mode;
736 if(total_rd >= best_rd)
737 break;
740 if(total_rd >= best_rd)
741 return INT_MAX;
743 *Rate = cost;
744 *rate_y += tot_rate_y;
745 *Distortion = distortion;
747 return RDCOST(mb->rdmult, mb->rddiv, cost, distortion);
749 int vp8_rd_pick_intra16x16mby_mode(VP8_COMP *cpi,
750 MACROBLOCK *x,
751 int *Rate,
752 int *rate_y,
753 int *Distortion)
755 MB_PREDICTION_MODE mode;
756 MB_PREDICTION_MODE UNINITIALIZED_IS_SAFE(mode_selected);
757 int rate, ratey;
758 int distortion;
759 int best_rd = INT_MAX;
760 int this_rd;
762 //Y Search for 16x16 intra prediction mode
763 for (mode = DC_PRED; mode <= TM_PRED; mode++)
765 x->e_mbd.mode_info_context->mbmi.mode = mode;
767 vp8_build_intra_predictors_mby_ptr(&x->e_mbd);
769 macro_block_yrd(x, &ratey, &distortion, IF_RTCD(&cpi->rtcd.encodemb));
770 rate = ratey + x->mbmode_cost[x->e_mbd.frame_type]
771 [x->e_mbd.mode_info_context->mbmi.mode];
773 this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
775 if (this_rd < best_rd)
777 mode_selected = mode;
778 best_rd = this_rd;
779 *Rate = rate;
780 *rate_y = ratey;
781 *Distortion = distortion;
785 x->e_mbd.mode_info_context->mbmi.mode = mode_selected;
786 return best_rd;
789 static int rd_cost_mbuv(MACROBLOCK *mb)
791 int b;
792 int cost = 0;
793 MACROBLOCKD *x = &mb->e_mbd;
794 ENTROPY_CONTEXT_PLANES t_above, t_left;
795 ENTROPY_CONTEXT *ta;
796 ENTROPY_CONTEXT *tl;
798 vpx_memcpy(&t_above, mb->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
799 vpx_memcpy(&t_left, mb->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
801 ta = (ENTROPY_CONTEXT *)&t_above;
802 tl = (ENTROPY_CONTEXT *)&t_left;
804 for (b = 16; b < 20; b++)
805 cost += cost_coeffs(mb, x->block + b, vp8_block2type[b],
806 ta + vp8_block2above[b], tl + vp8_block2left[b]);
808 for (b = 20; b < 24; b++)
809 cost += cost_coeffs(mb, x->block + b, vp8_block2type[b],
810 ta + vp8_block2above[b], tl + vp8_block2left[b]);
812 return cost;
816 static int vp8_rd_inter_uv(VP8_COMP *cpi, MACROBLOCK *x, int *rate, int *distortion, int fullpixel)
818 vp8_build_uvmvs(&x->e_mbd, fullpixel);
819 vp8_encode_inter16x16uvrd(IF_RTCD(&cpi->rtcd), x);
822 *rate = rd_cost_mbuv(x);
823 *distortion = ENCODEMB_INVOKE(&cpi->rtcd.encodemb, mbuverr)(x) / 4;
825 return RDCOST(x->rdmult, x->rddiv, *rate, *distortion);
828 int vp8_rd_pick_intra_mbuv_mode(VP8_COMP *cpi, MACROBLOCK *x, int *rate, int *rate_tokenonly, int *distortion)
830 MB_PREDICTION_MODE mode;
831 MB_PREDICTION_MODE UNINITIALIZED_IS_SAFE(mode_selected);
832 int best_rd = INT_MAX;
833 int UNINITIALIZED_IS_SAFE(d), UNINITIALIZED_IS_SAFE(r);
834 int rate_to;
836 for (mode = DC_PRED; mode <= TM_PRED; mode++)
838 int rate;
839 int distortion;
840 int this_rd;
842 x->e_mbd.mode_info_context->mbmi.uv_mode = mode;
843 vp8_build_intra_predictors_mbuv(&x->e_mbd);
844 ENCODEMB_INVOKE(IF_RTCD(&cpi->rtcd.encodemb), submbuv)(x->src_diff,
845 x->src.u_buffer, x->src.v_buffer, x->e_mbd.predictor,
846 x->src.uv_stride);
847 vp8_transform_mbuv(x);
848 vp8_quantize_mbuv(x);
850 rate_to = rd_cost_mbuv(x);
851 rate = rate_to + x->intra_uv_mode_cost[x->e_mbd.frame_type][x->e_mbd.mode_info_context->mbmi.uv_mode];
853 distortion = ENCODEMB_INVOKE(&cpi->rtcd.encodemb, mbuverr)(x) / 4;
855 this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
857 if (this_rd < best_rd)
859 best_rd = this_rd;
860 d = distortion;
861 r = rate;
862 *rate_tokenonly = rate_to;
863 mode_selected = mode;
867 *rate = r;
868 *distortion = d;
870 x->e_mbd.mode_info_context->mbmi.uv_mode = mode_selected;
871 return best_rd;
873 #endif
875 int vp8_cost_mv_ref(MB_PREDICTION_MODE m, const int near_mv_ref_ct[4])
877 vp8_prob p [VP8_MVREFS-1];
878 assert(NEARESTMV <= m && m <= SPLITMV);
879 vp8_mv_ref_probs(p, near_mv_ref_ct);
880 return vp8_cost_token(vp8_mv_ref_tree, p,
881 vp8_mv_ref_encoding_array - NEARESTMV + m);
884 void vp8_set_mbmode_and_mvs(MACROBLOCK *x, MB_PREDICTION_MODE mb, MV *mv)
886 int i;
888 x->e_mbd.mode_info_context->mbmi.mode = mb;
889 x->e_mbd.mode_info_context->mbmi.mv.as_mv.row = mv->row;
890 x->e_mbd.mode_info_context->mbmi.mv.as_mv.col = mv->col;
892 for (i = 0; i < 16; i++)
894 B_MODE_INFO *bmi = &x->e_mbd.block[i].bmi;
895 bmi->mode = (B_PREDICTION_MODE) mb;
896 bmi->mv.as_mv.row = mv->row;
897 bmi->mv.as_mv.col = mv->col;
901 #if !(CONFIG_REALTIME_ONLY)
902 static int labels2mode(
903 MACROBLOCK *x,
904 int const *labelings, int which_label,
905 B_PREDICTION_MODE this_mode,
906 MV *this_mv, MV *best_ref_mv,
907 int *mvcost[2]
910 MACROBLOCKD *const xd = & x->e_mbd;
911 MODE_INFO *const mic = xd->mode_info_context;
912 const int mis = xd->mode_info_stride;
914 int cost = 0;
915 int thismvcost = 0;
917 /* We have to be careful retrieving previously-encoded motion vectors.
918 Ones from this macroblock have to be pulled from the BLOCKD array
919 as they have not yet made it to the bmi array in our MB_MODE_INFO. */
921 int i = 0;
925 BLOCKD *const d = xd->block + i;
926 const int row = i >> 2, col = i & 3;
928 B_PREDICTION_MODE m;
930 if (labelings[i] != which_label)
931 continue;
933 if (col && labelings[i] == labelings[i-1])
934 m = LEFT4X4;
935 else if (row && labelings[i] == labelings[i-4])
936 m = ABOVE4X4;
937 else
939 // the only time we should do costing for new motion vector or mode
940 // is when we are on a new label (jbb May 08, 2007)
941 switch (m = this_mode)
943 case NEW4X4 :
944 thismvcost = vp8_mv_bit_cost(this_mv, best_ref_mv, mvcost, 102);
945 break;
946 case LEFT4X4:
947 *this_mv = col ? d[-1].bmi.mv.as_mv : vp8_left_bmi(mic, i)->mv.as_mv;
948 break;
949 case ABOVE4X4:
950 *this_mv = row ? d[-4].bmi.mv.as_mv : vp8_above_bmi(mic, i, mis)->mv.as_mv;
951 break;
952 case ZERO4X4:
953 this_mv->row = this_mv->col = 0;
954 break;
955 default:
956 break;
959 if (m == ABOVE4X4) // replace above with left if same
961 const MV mv = col ? d[-1].bmi.mv.as_mv : vp8_left_bmi(mic, i)->mv.as_mv;
963 if (mv.row == this_mv->row && mv.col == this_mv->col)
964 m = LEFT4X4;
967 cost = x->inter_bmode_costs[ m];
970 d->bmi.mode = m;
971 d->bmi.mv.as_mv = *this_mv;
974 while (++i < 16);
976 cost += thismvcost ;
977 return cost;
980 static int rdcost_mbsegment_y(MACROBLOCK *mb, const int *labels,
981 int which_label, ENTROPY_CONTEXT *ta,
982 ENTROPY_CONTEXT *tl)
984 int cost = 0;
985 int b;
986 MACROBLOCKD *x = &mb->e_mbd;
988 for (b = 0; b < 16; b++)
989 if (labels[ b] == which_label)
990 cost += cost_coeffs(mb, x->block + b, 3,
991 ta + vp8_block2above[b],
992 tl + vp8_block2left[b]);
994 return cost;
997 static unsigned int vp8_encode_inter_mb_segment(MACROBLOCK *x, int const *labels, int which_label, const vp8_encodemb_rtcd_vtable_t *rtcd)
999 int i;
1000 unsigned int distortion = 0;
1002 for (i = 0; i < 16; i++)
1004 if (labels[i] == which_label)
1006 BLOCKD *bd = &x->e_mbd.block[i];
1007 BLOCK *be = &x->block[i];
1010 vp8_build_inter_predictors_b(bd, 16, x->e_mbd.subpixel_predict);
1011 ENCODEMB_INVOKE(rtcd, subb)(be, bd, 16);
1012 x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32);
1014 // set to 0 no way to account for 2nd order DC so discount
1015 //be->coeff[0] = 0;
1016 x->quantize_b(be, bd);
1018 distortion += ENCODEMB_INVOKE(rtcd, berr)(be->coeff, bd->dqcoeff);
1022 return distortion;
1025 unsigned char vp8_mbsplit_offset2[4][16] = {
1026 { 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
1027 { 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
1028 { 0, 2, 8, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
1029 { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}
1033 static const unsigned int segmentation_to_sseshift[4] = {3, 3, 2, 0};
1036 typedef struct
1038 MV *ref_mv;
1039 MV *mvp;
1041 int segment_rd;
1042 int segment_num;
1043 int r;
1044 int d;
1045 int segment_yrate;
1046 B_PREDICTION_MODE modes[16];
1047 int_mv mvs[16];
1048 unsigned char eobs[16];
1050 int mvthresh;
1051 int *mdcounts;
1053 MV sv_mvp[4]; // save 4 mvp from 8x8
1054 int sv_istep[2]; // save 2 initial step_param for 16x8/8x16
1056 } BEST_SEG_INFO;
1059 void vp8_rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x, BEST_SEG_INFO *bsi,
1060 unsigned int segmentation)
1062 int i;
1063 int const *labels;
1064 int br = 0;
1065 int bd = 0;
1066 B_PREDICTION_MODE this_mode;
1069 int label_count;
1070 int this_segment_rd = 0;
1071 int label_mv_thresh;
1072 int rate = 0;
1073 int sbr = 0;
1074 int sbd = 0;
1075 int segmentyrate = 0;
1077 vp8_variance_fn_ptr_t *v_fn_ptr;
1079 ENTROPY_CONTEXT_PLANES t_above, t_left;
1080 ENTROPY_CONTEXT *ta;
1081 ENTROPY_CONTEXT *tl;
1082 ENTROPY_CONTEXT_PLANES t_above_b, t_left_b;
1083 ENTROPY_CONTEXT *ta_b;
1084 ENTROPY_CONTEXT *tl_b;
1086 vpx_memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
1087 vpx_memcpy(&t_left, x->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
1089 ta = (ENTROPY_CONTEXT *)&t_above;
1090 tl = (ENTROPY_CONTEXT *)&t_left;
1091 ta_b = (ENTROPY_CONTEXT *)&t_above_b;
1092 tl_b = (ENTROPY_CONTEXT *)&t_left_b;
1094 br = 0;
1095 bd = 0;
1097 v_fn_ptr = &cpi->fn_ptr[segmentation];
1098 labels = vp8_mbsplits[segmentation];
1099 label_count = vp8_mbsplit_count[segmentation];
1101 // 64 makes this threshold really big effectively
1102 // making it so that we very rarely check mvs on
1103 // segments. setting this to 1 would make mv thresh
1104 // roughly equal to what it is for macroblocks
1105 label_mv_thresh = 1 * bsi->mvthresh / label_count ;
1107 // Segmentation method overheads
1108 rate = vp8_cost_token(vp8_mbsplit_tree, vp8_mbsplit_probs, vp8_mbsplit_encodings + segmentation);
1109 rate += vp8_cost_mv_ref(SPLITMV, bsi->mdcounts);
1110 this_segment_rd += RDCOST(x->rdmult, x->rddiv, rate, 0);
1111 br += rate;
1113 for (i = 0; i < label_count; i++)
1115 MV mode_mv[B_MODE_COUNT];
1116 int best_label_rd = INT_MAX;
1117 B_PREDICTION_MODE mode_selected = ZERO4X4;
1118 int bestlabelyrate = 0;
1120 // search for the best motion vector on this segment
1121 for (this_mode = LEFT4X4; this_mode <= NEW4X4 ; this_mode ++)
1123 int this_rd;
1124 int distortion;
1125 int labelyrate;
1126 ENTROPY_CONTEXT_PLANES t_above_s, t_left_s;
1127 ENTROPY_CONTEXT *ta_s;
1128 ENTROPY_CONTEXT *tl_s;
1130 vpx_memcpy(&t_above_s, &t_above, sizeof(ENTROPY_CONTEXT_PLANES));
1131 vpx_memcpy(&t_left_s, &t_left, sizeof(ENTROPY_CONTEXT_PLANES));
1133 ta_s = (ENTROPY_CONTEXT *)&t_above_s;
1134 tl_s = (ENTROPY_CONTEXT *)&t_left_s;
1136 if (this_mode == NEW4X4)
1138 int sseshift;
1139 int num00;
1140 int step_param = 0;
1141 int further_steps;
1142 int n;
1143 int thissme;
1144 int bestsme = INT_MAX;
1145 MV temp_mv;
1146 BLOCK *c;
1147 BLOCKD *e;
1149 // Is the best so far sufficiently good that we cant justify doing and new motion search.
1150 if (best_label_rd < label_mv_thresh)
1151 break;
1153 if(cpi->compressor_speed)
1155 if (segmentation == BLOCK_8X16 || segmentation == BLOCK_16X8)
1157 bsi->mvp = &bsi->sv_mvp[i];
1158 if (i==1 && segmentation == BLOCK_16X8) bsi->mvp = &bsi->sv_mvp[2];
1160 step_param = bsi->sv_istep[i];
1163 // use previous block's result as next block's MV predictor.
1164 if (segmentation == BLOCK_4X4 && i>0)
1166 bsi->mvp = &(x->e_mbd.block[i-1].bmi.mv.as_mv);
1167 if (i==4 || i==8 || i==12) bsi->mvp = &(x->e_mbd.block[i-4].bmi.mv.as_mv);
1168 step_param = 2;
1172 further_steps = (MAX_MVSEARCH_STEPS - 1) - step_param;
1175 int sadpb = x->sadperbit4;
1177 // find first label
1178 n = vp8_mbsplit_offset2[segmentation][i];
1180 c = &x->block[n];
1181 e = &x->e_mbd.block[n];
1183 if (cpi->sf.search_method == HEX)
1184 bestsme = vp8_hex_search(x, c, e, bsi->ref_mv,
1185 &mode_mv[NEW4X4], step_param, sadpb, &num00, v_fn_ptr, x->mvsadcost, x->mvcost, bsi->ref_mv);
1187 else
1189 bestsme = cpi->diamond_search_sad(x, c, e, bsi->mvp,
1190 &mode_mv[NEW4X4], step_param,
1191 sadpb / 2, &num00, v_fn_ptr, x->mvsadcost, x->mvcost, bsi->ref_mv);
1193 n = num00;
1194 num00 = 0;
1196 while (n < further_steps)
1198 n++;
1200 if (num00)
1201 num00--;
1202 else
1204 thissme = cpi->diamond_search_sad(x, c, e, bsi->mvp,
1205 &temp_mv, step_param + n,
1206 sadpb / 2, &num00, v_fn_ptr, x->mvsadcost, x->mvcost, bsi->ref_mv);
1208 if (thissme < bestsme)
1210 bestsme = thissme;
1211 mode_mv[NEW4X4].row = temp_mv.row;
1212 mode_mv[NEW4X4].col = temp_mv.col;
1218 sseshift = segmentation_to_sseshift[segmentation];
1220 // Should we do a full search (best quality only)
1221 if ((cpi->compressor_speed == 0) && (bestsme >> sseshift) > 4000)
1223 thissme = cpi->full_search_sad(x, c, e, bsi->mvp,
1224 sadpb / 4, 16, v_fn_ptr, x->mvcost, x->mvsadcost,bsi->ref_mv);
1226 if (thissme < bestsme)
1228 bestsme = thissme;
1229 mode_mv[NEW4X4] = e->bmi.mv.as_mv;
1231 else
1233 // The full search result is actually worse so re-instate the previous best vector
1234 e->bmi.mv.as_mv = mode_mv[NEW4X4];
1239 if (bestsme < INT_MAX)
1241 if (!cpi->common.full_pixel)
1242 cpi->find_fractional_mv_step(x, c, e, &mode_mv[NEW4X4],
1243 bsi->ref_mv, x->errorperbit / 2, v_fn_ptr, x->mvcost);
1244 else
1245 vp8_skip_fractional_mv_step(x, c, e, &mode_mv[NEW4X4],
1246 bsi->ref_mv, x->errorperbit, v_fn_ptr, x->mvcost);
1248 } /* NEW4X4 */
1250 rate = labels2mode(x, labels, i, this_mode, &mode_mv[this_mode],
1251 bsi->ref_mv, x->mvcost);
1253 // Trap vectors that reach beyond the UMV borders
1254 if (((mode_mv[this_mode].row >> 3) < x->mv_row_min) || ((mode_mv[this_mode].row >> 3) > x->mv_row_max) ||
1255 ((mode_mv[this_mode].col >> 3) < x->mv_col_min) || ((mode_mv[this_mode].col >> 3) > x->mv_col_max))
1257 continue;
1260 distortion = vp8_encode_inter_mb_segment(x, labels, i, IF_RTCD(&cpi->rtcd.encodemb)) / 4;
1262 labelyrate = rdcost_mbsegment_y(x, labels, i, ta_s, tl_s);
1263 rate += labelyrate;
1265 this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
1267 if (this_rd < best_label_rd)
1269 sbr = rate;
1270 sbd = distortion;
1271 bestlabelyrate = labelyrate;
1272 mode_selected = this_mode;
1273 best_label_rd = this_rd;
1275 vpx_memcpy(ta_b, ta_s, sizeof(ENTROPY_CONTEXT_PLANES));
1276 vpx_memcpy(tl_b, tl_s, sizeof(ENTROPY_CONTEXT_PLANES));
1279 } /*for each 4x4 mode*/
1281 vpx_memcpy(ta, ta_b, sizeof(ENTROPY_CONTEXT_PLANES));
1282 vpx_memcpy(tl, tl_b, sizeof(ENTROPY_CONTEXT_PLANES));
1284 labels2mode(x, labels, i, mode_selected, &mode_mv[mode_selected],
1285 bsi->ref_mv, x->mvcost);
1287 br += sbr;
1288 bd += sbd;
1289 segmentyrate += bestlabelyrate;
1290 this_segment_rd += best_label_rd;
1292 if (this_segment_rd >= bsi->segment_rd)
1293 break;
1295 } /* for each label */
1297 if (this_segment_rd < bsi->segment_rd)
1299 bsi->r = br;
1300 bsi->d = bd;
1301 bsi->segment_yrate = segmentyrate;
1302 bsi->segment_rd = this_segment_rd;
1303 bsi->segment_num = segmentation;
1305 // store everything needed to come back to this!!
1306 for (i = 0; i < 16; i++)
1308 BLOCKD *bd = &x->e_mbd.block[i];
1310 bsi->mvs[i].as_mv = bd->bmi.mv.as_mv;
1311 bsi->modes[i] = bd->bmi.mode;
1312 bsi->eobs[i] = bd->eob;
1317 static __inline
1318 void vp8_cal_step_param(int sr, int *sp)
1320 int step = 0;
1322 if (sr > MAX_FIRST_STEP) sr = MAX_FIRST_STEP;
1323 else if (sr < 1) sr = 1;
1325 while (sr>>=1)
1326 step++;
1328 *sp = MAX_MVSEARCH_STEPS - 1 - step;
1331 static int vp8_rd_pick_best_mbsegmentation(VP8_COMP *cpi, MACROBLOCK *x,
1332 MV *best_ref_mv, int best_rd,
1333 int *mdcounts, int *returntotrate,
1334 int *returnyrate, int *returndistortion,
1335 int mvthresh)
1337 int i;
1338 BEST_SEG_INFO bsi;
1340 vpx_memset(&bsi, 0, sizeof(bsi));
1342 bsi.segment_rd = best_rd;
1343 bsi.ref_mv = best_ref_mv;
1344 bsi.mvp = best_ref_mv;
1345 bsi.mvthresh = mvthresh;
1346 bsi.mdcounts = mdcounts;
1348 for(i = 0; i < 16; i++)
1350 bsi.modes[i] = ZERO4X4;
1353 if(cpi->compressor_speed == 0)
1355 /* for now, we will keep the original segmentation order
1356 when in best quality mode */
1357 vp8_rd_check_segment(cpi, x, &bsi, BLOCK_16X8);
1358 vp8_rd_check_segment(cpi, x, &bsi, BLOCK_8X16);
1359 vp8_rd_check_segment(cpi, x, &bsi, BLOCK_8X8);
1360 vp8_rd_check_segment(cpi, x, &bsi, BLOCK_4X4);
1362 else
1364 int sr;
1366 vp8_rd_check_segment(cpi, x, &bsi, BLOCK_8X8);
1368 if (bsi.segment_rd < best_rd)
1370 int col_min = (best_ref_mv->col - MAX_FULL_PEL_VAL) >>3;
1371 int col_max = (best_ref_mv->col + MAX_FULL_PEL_VAL) >>3;
1372 int row_min = (best_ref_mv->row - MAX_FULL_PEL_VAL) >>3;
1373 int row_max = (best_ref_mv->row + MAX_FULL_PEL_VAL) >>3;
1375 int tmp_col_min = x->mv_col_min;
1376 int tmp_col_max = x->mv_col_max;
1377 int tmp_row_min = x->mv_row_min;
1378 int tmp_row_max = x->mv_row_max;
1380 /* Get intersection of UMV window and valid MV window to reduce # of checks in diamond search. */
1381 if (x->mv_col_min < col_min )
1382 x->mv_col_min = col_min;
1383 if (x->mv_col_max > col_max )
1384 x->mv_col_max = col_max;
1385 if (x->mv_row_min < row_min )
1386 x->mv_row_min = row_min;
1387 if (x->mv_row_max > row_max )
1388 x->mv_row_max = row_max;
1390 /* Get 8x8 result */
1391 bsi.sv_mvp[0] = bsi.mvs[0].as_mv;
1392 bsi.sv_mvp[1] = bsi.mvs[2].as_mv;
1393 bsi.sv_mvp[2] = bsi.mvs[8].as_mv;
1394 bsi.sv_mvp[3] = bsi.mvs[10].as_mv;
1396 /* Use 8x8 result as 16x8/8x16's predictor MV. Adjust search range according to the closeness of 2 MV. */
1397 /* block 8X16 */
1399 sr = MAXF((abs(bsi.sv_mvp[0].row - bsi.sv_mvp[2].row))>>3, (abs(bsi.sv_mvp[0].col - bsi.sv_mvp[2].col))>>3);
1400 vp8_cal_step_param(sr, &bsi.sv_istep[0]);
1402 sr = MAXF((abs(bsi.sv_mvp[1].row - bsi.sv_mvp[3].row))>>3, (abs(bsi.sv_mvp[1].col - bsi.sv_mvp[3].col))>>3);
1403 vp8_cal_step_param(sr, &bsi.sv_istep[1]);
1405 vp8_rd_check_segment(cpi, x, &bsi, BLOCK_8X16);
1408 /* block 16X8 */
1410 sr = MAXF((abs(bsi.sv_mvp[0].row - bsi.sv_mvp[1].row))>>3, (abs(bsi.sv_mvp[0].col - bsi.sv_mvp[1].col))>>3);
1411 vp8_cal_step_param(sr, &bsi.sv_istep[0]);
1413 sr = MAXF((abs(bsi.sv_mvp[2].row - bsi.sv_mvp[3].row))>>3, (abs(bsi.sv_mvp[2].col - bsi.sv_mvp[3].col))>>3);
1414 vp8_cal_step_param(sr, &bsi.sv_istep[1]);
1416 vp8_rd_check_segment(cpi, x, &bsi, BLOCK_16X8);
1419 /* If 8x8 is better than 16x8/8x16, then do 4x4 search */
1420 /* Not skip 4x4 if speed=0 (good quality) */
1421 if (cpi->sf.no_skip_block4x4_search || bsi.segment_num == BLOCK_8X8) /* || (sv_segment_rd8x8-bsi.segment_rd) < sv_segment_rd8x8>>5) */
1423 bsi.mvp = &bsi.sv_mvp[0];
1424 vp8_rd_check_segment(cpi, x, &bsi, BLOCK_4X4);
1427 /* restore UMV window */
1428 x->mv_col_min = tmp_col_min;
1429 x->mv_col_max = tmp_col_max;
1430 x->mv_row_min = tmp_row_min;
1431 x->mv_row_max = tmp_row_max;
1435 /* set it to the best */
1436 for (i = 0; i < 16; i++)
1438 BLOCKD *bd = &x->e_mbd.block[i];
1440 bd->bmi.mv.as_mv = bsi.mvs[i].as_mv;
1441 bd->bmi.mode = bsi.modes[i];
1442 bd->eob = bsi.eobs[i];
1445 *returntotrate = bsi.r;
1446 *returndistortion = bsi.d;
1447 *returnyrate = bsi.segment_yrate;
1449 /* save partitions */
1450 x->e_mbd.mode_info_context->mbmi.partitioning = bsi.segment_num;
1451 x->partition_info->count = vp8_mbsplit_count[bsi.segment_num];
1453 for (i = 0; i < x->partition_info->count; i++)
1455 int j;
1457 j = vp8_mbsplit_offset2[bsi.segment_num][i];
1459 x->partition_info->bmi[i].mode = x->e_mbd.block[j].bmi.mode;
1460 x->partition_info->bmi[i].mv.as_mv = x->e_mbd.block[j].bmi.mv.as_mv;
1463 return bsi.segment_rd;
1465 #endif
1467 static void swap(int *x,int *y)
1469 int tmp;
1471 tmp = *x;
1472 *x = *y;
1473 *y = tmp;
1476 static void quicksortmv(int arr[],int left, int right)
1478 int lidx,ridx,pivot;
1480 lidx = left;
1481 ridx = right;
1483 if( left < right)
1485 pivot = (left + right)/2;
1487 while(lidx <=pivot && ridx >=pivot)
1489 while(arr[lidx] < arr[pivot] && lidx <= pivot)
1490 lidx++;
1491 while(arr[ridx] > arr[pivot] && ridx >= pivot)
1492 ridx--;
1493 swap(&arr[lidx], &arr[ridx]);
1494 lidx++;
1495 ridx--;
1496 if(lidx-1 == pivot)
1498 ridx++;
1499 pivot = ridx;
1501 else if(ridx+1 == pivot)
1503 lidx--;
1504 pivot = lidx;
1507 quicksortmv(arr, left, pivot - 1);
1508 quicksortmv(arr, pivot + 1, right);
1512 static void quicksortsad(int arr[],int idx[], int left, int right)
1514 int lidx,ridx,pivot;
1516 lidx = left;
1517 ridx = right;
1519 if( left < right)
1521 pivot = (left + right)/2;
1523 while(lidx <=pivot && ridx >=pivot)
1525 while(arr[lidx] < arr[pivot] && lidx <= pivot)
1526 lidx++;
1527 while(arr[ridx] > arr[pivot] && ridx >= pivot)
1528 ridx--;
1529 swap(&arr[lidx], &arr[ridx]);
1530 swap(&idx[lidx], &idx[ridx]);
1531 lidx++;
1532 ridx--;
1533 if(lidx-1 == pivot)
1535 ridx++;
1536 pivot = ridx;
1538 else if(ridx+1 == pivot)
1540 lidx--;
1541 pivot = lidx;
1544 quicksortsad(arr, idx, left, pivot - 1);
1545 quicksortsad(arr, idx, pivot + 1, right);
1549 //The improved MV prediction
1550 void vp8_mv_pred
1552 VP8_COMP *cpi,
1553 MACROBLOCKD *xd,
1554 const MODE_INFO *here,
1555 MV *mvp,
1556 int refframe,
1557 int *ref_frame_sign_bias,
1558 int *sr,
1559 int near_sadidx[]
1562 const MODE_INFO *above = here - xd->mode_info_stride;
1563 const MODE_INFO *left = here - 1;
1564 const MODE_INFO *aboveleft = above - 1;
1565 int_mv near_mvs[8];
1566 int near_ref[8];
1567 int_mv mv;
1568 int vcnt=0;
1569 int find=0;
1570 int mb_offset;
1572 int mvx[8];
1573 int mvy[8];
1574 int i;
1576 mv.as_int = 0;
1578 if(here->mbmi.ref_frame != INTRA_FRAME)
1580 near_mvs[0].as_int = near_mvs[1].as_int = near_mvs[2].as_int = near_mvs[3].as_int = near_mvs[4].as_int = near_mvs[5].as_int = near_mvs[6].as_int = near_mvs[7].as_int = 0;
1581 near_ref[0] = near_ref[1] = near_ref[2] = near_ref[3] = near_ref[4] = near_ref[5] = near_ref[6] = near_ref[7] = 0;
1583 // read in 3 nearby block's MVs from current frame as prediction candidates.
1584 if (above->mbmi.ref_frame != INTRA_FRAME)
1586 near_mvs[vcnt].as_int = above->mbmi.mv.as_int;
1587 mv_bias(ref_frame_sign_bias[above->mbmi.ref_frame], refframe, &near_mvs[vcnt], ref_frame_sign_bias);
1588 near_ref[vcnt] = above->mbmi.ref_frame;
1590 vcnt++;
1591 if (left->mbmi.ref_frame != INTRA_FRAME)
1593 near_mvs[vcnt].as_int = left->mbmi.mv.as_int;
1594 mv_bias(ref_frame_sign_bias[left->mbmi.ref_frame], refframe, &near_mvs[vcnt], ref_frame_sign_bias);
1595 near_ref[vcnt] = left->mbmi.ref_frame;
1597 vcnt++;
1598 if (aboveleft->mbmi.ref_frame != INTRA_FRAME)
1600 near_mvs[vcnt].as_int = aboveleft->mbmi.mv.as_int;
1601 mv_bias(ref_frame_sign_bias[aboveleft->mbmi.ref_frame], refframe, &near_mvs[vcnt], ref_frame_sign_bias);
1602 near_ref[vcnt] = aboveleft->mbmi.ref_frame;
1604 vcnt++;
1606 // read in 5 nearby block's MVs from last frame.
1607 if(cpi->common.last_frame_type != KEY_FRAME)
1609 mb_offset = (-xd->mb_to_top_edge/128 + 1) * (xd->mode_info_stride +1) + (-xd->mb_to_left_edge/128 +1) ;
1611 // current in last frame
1612 if (cpi->lf_ref_frame[mb_offset] != INTRA_FRAME)
1614 near_mvs[vcnt].as_int = cpi->lfmv[mb_offset].as_int;
1615 mv_bias(cpi->lf_ref_frame_sign_bias[mb_offset], refframe, &near_mvs[vcnt], ref_frame_sign_bias);
1616 near_ref[vcnt] = cpi->lf_ref_frame[mb_offset];
1618 vcnt++;
1620 // above in last frame
1621 if (cpi->lf_ref_frame[mb_offset - xd->mode_info_stride-1] != INTRA_FRAME)
1623 near_mvs[vcnt].as_int = cpi->lfmv[mb_offset - xd->mode_info_stride-1].as_int;
1624 mv_bias(cpi->lf_ref_frame_sign_bias[mb_offset - xd->mode_info_stride-1], refframe, &near_mvs[vcnt], ref_frame_sign_bias);
1625 near_ref[vcnt] = cpi->lf_ref_frame[mb_offset - xd->mode_info_stride-1];
1627 vcnt++;
1629 // left in last frame
1630 if (cpi->lf_ref_frame[mb_offset-1] != INTRA_FRAME)
1632 near_mvs[vcnt].as_int = cpi->lfmv[mb_offset -1].as_int;
1633 mv_bias(cpi->lf_ref_frame_sign_bias[mb_offset -1], refframe, &near_mvs[vcnt], ref_frame_sign_bias);
1634 near_ref[vcnt] = cpi->lf_ref_frame[mb_offset - 1];
1636 vcnt++;
1638 // right in last frame
1639 if (cpi->lf_ref_frame[mb_offset +1] != INTRA_FRAME)
1641 near_mvs[vcnt].as_int = cpi->lfmv[mb_offset +1].as_int;
1642 mv_bias(cpi->lf_ref_frame_sign_bias[mb_offset +1], refframe, &near_mvs[vcnt], ref_frame_sign_bias);
1643 near_ref[vcnt] = cpi->lf_ref_frame[mb_offset +1];
1645 vcnt++;
1647 // below in last frame
1648 if (cpi->lf_ref_frame[mb_offset + xd->mode_info_stride +1] != INTRA_FRAME)
1650 near_mvs[vcnt].as_int = cpi->lfmv[mb_offset + xd->mode_info_stride +1].as_int;
1651 mv_bias(cpi->lf_ref_frame_sign_bias[mb_offset + xd->mode_info_stride +1], refframe, &near_mvs[vcnt], ref_frame_sign_bias);
1652 near_ref[vcnt] = cpi->lf_ref_frame[mb_offset + xd->mode_info_stride +1];
1654 vcnt++;
1657 for(i=0; i< vcnt; i++)
1659 if(near_ref[near_sadidx[i]] != INTRA_FRAME)
1661 if(here->mbmi.ref_frame == near_ref[near_sadidx[i]])
1663 mv.as_int = near_mvs[near_sadidx[i]].as_int;
1664 find = 1;
1665 if (i < 3)
1666 *sr = 3;
1667 else
1668 *sr = 2;
1669 break;
1674 if(!find)
1676 for(i=0; i<vcnt; i++)
1678 mvx[i] = near_mvs[i].as_mv.row;
1679 mvy[i] = near_mvs[i].as_mv.col;
1682 quicksortmv (mvx, 0, vcnt-1);
1683 quicksortmv (mvy, 0, vcnt-1);
1684 mv.as_mv.row = mvx[vcnt/2];
1685 mv.as_mv.col = mvy[vcnt/2];
1687 find = 1;
1688 //sr is set to 0 to allow calling function to decide the search range.
1689 *sr = 0;
1693 /* Set up return values */
1694 *mvp = mv.as_mv;
1695 vp8_clamp_mv(mvp, xd);
1698 void vp8_cal_sad(VP8_COMP *cpi, MACROBLOCKD *xd, MACROBLOCK *x, int recon_yoffset, int near_sadidx[])
1701 int near_sad[8] = {0}; // 0-cf above, 1-cf left, 2-cf aboveleft, 3-lf current, 4-lf above, 5-lf left, 6-lf right, 7-lf below
1703 //calculate sad for current frame 3 nearby MBs.
1704 if( xd->mb_to_top_edge==0 && xd->mb_to_left_edge ==0)
1706 near_sad[0] = near_sad[1] = near_sad[2] = INT_MAX;
1707 }else if(xd->mb_to_top_edge==0)
1708 { //only has left MB for sad calculation.
1709 near_sad[0] = near_sad[2] = INT_MAX;
1710 near_sad[1] = cpi->fn_ptr[BLOCK_16X16].sdf(x->src.y_buffer, x->src.y_stride, xd->dst.y_buffer - 16,xd->dst.y_stride, 0x7fffffff);
1711 }else if(xd->mb_to_left_edge ==0)
1712 { //only has left MB for sad calculation.
1713 near_sad[1] = near_sad[2] = INT_MAX;
1714 near_sad[0] = cpi->fn_ptr[BLOCK_16X16].sdf(x->src.y_buffer, x->src.y_stride, xd->dst.y_buffer - xd->dst.y_stride *16,xd->dst.y_stride, 0x7fffffff);
1715 }else
1717 near_sad[0] = cpi->fn_ptr[BLOCK_16X16].sdf(x->src.y_buffer, x->src.y_stride, xd->dst.y_buffer - xd->dst.y_stride *16,xd->dst.y_stride, 0x7fffffff);
1718 near_sad[1] = cpi->fn_ptr[BLOCK_16X16].sdf(x->src.y_buffer, x->src.y_stride, xd->dst.y_buffer - 16,xd->dst.y_stride, 0x7fffffff);
1719 near_sad[2] = cpi->fn_ptr[BLOCK_16X16].sdf(x->src.y_buffer, x->src.y_stride, xd->dst.y_buffer - xd->dst.y_stride *16 -16,xd->dst.y_stride, 0x7fffffff);
1722 if(cpi->common.last_frame_type != KEY_FRAME)
1724 //calculate sad for last frame 5 nearby MBs.
1725 unsigned char *pre_y_buffer = cpi->common.yv12_fb[cpi->common.lst_fb_idx].y_buffer + recon_yoffset;
1726 int pre_y_stride = cpi->common.yv12_fb[cpi->common.lst_fb_idx].y_stride;
1728 if(xd->mb_to_top_edge==0) near_sad[4] = INT_MAX;
1729 if(xd->mb_to_left_edge ==0) near_sad[5] = INT_MAX;
1730 if(xd->mb_to_right_edge ==0) near_sad[6] = INT_MAX;
1731 if(xd->mb_to_bottom_edge==0) near_sad[7] = INT_MAX;
1733 if(near_sad[4] != INT_MAX)
1734 near_sad[4] = cpi->fn_ptr[BLOCK_16X16].sdf(x->src.y_buffer, x->src.y_stride, pre_y_buffer - pre_y_stride *16, pre_y_stride, 0x7fffffff);
1735 if(near_sad[5] != INT_MAX)
1736 near_sad[5] = cpi->fn_ptr[BLOCK_16X16].sdf(x->src.y_buffer, x->src.y_stride, pre_y_buffer - 16, pre_y_stride, 0x7fffffff);
1737 near_sad[3] = cpi->fn_ptr[BLOCK_16X16].sdf(x->src.y_buffer, x->src.y_stride, pre_y_buffer, pre_y_stride, 0x7fffffff);
1738 if(near_sad[6] != INT_MAX)
1739 near_sad[6] = cpi->fn_ptr[BLOCK_16X16].sdf(x->src.y_buffer, x->src.y_stride, pre_y_buffer + 16, pre_y_stride, 0x7fffffff);
1740 if(near_sad[7] != INT_MAX)
1741 near_sad[7] = cpi->fn_ptr[BLOCK_16X16].sdf(x->src.y_buffer, x->src.y_stride, pre_y_buffer + pre_y_stride *16, pre_y_stride, 0x7fffffff);
1744 if(cpi->common.last_frame_type != KEY_FRAME)
1746 quicksortsad(near_sad, near_sadidx, 0, 7);
1747 }else
1749 quicksortsad(near_sad, near_sadidx, 0, 2);
1753 #if !(CONFIG_REALTIME_ONLY)
1754 int vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int recon_uvoffset, int *returnrate, int *returndistortion, int *returnintra)
1756 BLOCK *b = &x->block[0];
1757 BLOCKD *d = &x->e_mbd.block[0];
1758 MACROBLOCKD *xd = &x->e_mbd;
1759 B_MODE_INFO best_bmodes[16];
1760 MB_MODE_INFO best_mbmode;
1761 PARTITION_INFO best_partition;
1762 MV best_ref_mv;
1763 MV mode_mv[MB_MODE_COUNT];
1764 MB_PREDICTION_MODE this_mode;
1765 int num00;
1766 int best_mode_index = 0;
1768 int i;
1769 int mode_index;
1770 int mdcounts[4];
1771 int rate;
1772 int distortion;
1773 int best_rd = INT_MAX; // 1 << 30;
1774 int ref_frame_cost[MAX_REF_FRAMES];
1775 int rate2, distortion2;
1776 int uv_intra_rate, uv_intra_distortion, uv_intra_rate_tokenonly;
1777 int rate_y, UNINITIALIZED_IS_SAFE(rate_uv);
1778 int distortion_uv;
1779 int best_yrd = INT_MAX;
1781 //int all_rds[MAX_MODES]; // Experimental debug code.
1782 //int all_rates[MAX_MODES];
1783 //int all_dist[MAX_MODES];
1784 //int intermodecost[MAX_MODES];
1786 MB_PREDICTION_MODE uv_intra_mode;
1788 int force_no_skip = 0;
1790 MV mvp;
1791 int near_sadidx[8] = {0, 1, 2, 3, 4, 5, 6, 7};
1792 int saddone=0;
1793 int sr=0; //search range got from mv_pred(). It uses step_param levels. (0-7)
1795 MV frame_nearest_mv[4];
1796 MV frame_near_mv[4];
1797 MV frame_best_ref_mv[4];
1798 int frame_mdcounts[4][4];
1799 int frame_lf_or_gf[4];
1800 unsigned char *y_buffer[4];
1801 unsigned char *u_buffer[4];
1802 unsigned char *v_buffer[4];
1804 vpx_memset(&best_mbmode, 0, sizeof(best_mbmode));
1806 if (cpi->ref_frame_flags & VP8_LAST_FLAG)
1808 YV12_BUFFER_CONFIG *lst_yv12 = &cpi->common.yv12_fb[cpi->common.lst_fb_idx];
1810 vp8_find_near_mvs(&x->e_mbd, x->e_mbd.mode_info_context, &frame_nearest_mv[LAST_FRAME], &frame_near_mv[LAST_FRAME],
1811 &frame_best_ref_mv[LAST_FRAME], frame_mdcounts[LAST_FRAME], LAST_FRAME, cpi->common.ref_frame_sign_bias);
1813 y_buffer[LAST_FRAME] = lst_yv12->y_buffer + recon_yoffset;
1814 u_buffer[LAST_FRAME] = lst_yv12->u_buffer + recon_uvoffset;
1815 v_buffer[LAST_FRAME] = lst_yv12->v_buffer + recon_uvoffset;
1817 frame_lf_or_gf[LAST_FRAME] = 0;
1820 if (cpi->ref_frame_flags & VP8_GOLD_FLAG)
1822 YV12_BUFFER_CONFIG *gld_yv12 = &cpi->common.yv12_fb[cpi->common.gld_fb_idx];
1824 vp8_find_near_mvs(&x->e_mbd, x->e_mbd.mode_info_context, &frame_nearest_mv[GOLDEN_FRAME], &frame_near_mv[GOLDEN_FRAME],
1825 &frame_best_ref_mv[GOLDEN_FRAME], frame_mdcounts[GOLDEN_FRAME], GOLDEN_FRAME, cpi->common.ref_frame_sign_bias);
1827 y_buffer[GOLDEN_FRAME] = gld_yv12->y_buffer + recon_yoffset;
1828 u_buffer[GOLDEN_FRAME] = gld_yv12->u_buffer + recon_uvoffset;
1829 v_buffer[GOLDEN_FRAME] = gld_yv12->v_buffer + recon_uvoffset;
1831 frame_lf_or_gf[GOLDEN_FRAME] = 1;
1834 if (cpi->ref_frame_flags & VP8_ALT_FLAG)
1836 YV12_BUFFER_CONFIG *alt_yv12 = &cpi->common.yv12_fb[cpi->common.alt_fb_idx];
1838 vp8_find_near_mvs(&x->e_mbd, x->e_mbd.mode_info_context, &frame_nearest_mv[ALTREF_FRAME], &frame_near_mv[ALTREF_FRAME],
1839 &frame_best_ref_mv[ALTREF_FRAME], frame_mdcounts[ALTREF_FRAME], ALTREF_FRAME, cpi->common.ref_frame_sign_bias);
1841 y_buffer[ALTREF_FRAME] = alt_yv12->y_buffer + recon_yoffset;
1842 u_buffer[ALTREF_FRAME] = alt_yv12->u_buffer + recon_uvoffset;
1843 v_buffer[ALTREF_FRAME] = alt_yv12->v_buffer + recon_uvoffset;
1845 frame_lf_or_gf[ALTREF_FRAME] = 1;
1848 *returnintra = INT_MAX;
1849 cpi->mbs_tested_so_far++; // Count of the number of MBs tested so far this frame
1851 x->skip = 0;
1853 ref_frame_cost[INTRA_FRAME] = vp8_cost_zero(cpi->prob_intra_coded);
1855 // Special case treatment when GF and ARF are not sensible options for reference
1856 if (cpi->ref_frame_flags == VP8_LAST_FLAG)
1858 ref_frame_cost[LAST_FRAME] = vp8_cost_one(cpi->prob_intra_coded)
1859 + vp8_cost_zero(255);
1860 ref_frame_cost[GOLDEN_FRAME] = vp8_cost_one(cpi->prob_intra_coded)
1861 + vp8_cost_one(255)
1862 + vp8_cost_zero(128);
1863 ref_frame_cost[ALTREF_FRAME] = vp8_cost_one(cpi->prob_intra_coded)
1864 + vp8_cost_one(255)
1865 + vp8_cost_one(128);
1867 else
1869 ref_frame_cost[LAST_FRAME] = vp8_cost_one(cpi->prob_intra_coded)
1870 + vp8_cost_zero(cpi->prob_last_coded);
1871 ref_frame_cost[GOLDEN_FRAME] = vp8_cost_one(cpi->prob_intra_coded)
1872 + vp8_cost_one(cpi->prob_last_coded)
1873 + vp8_cost_zero(cpi->prob_gf_coded);
1874 ref_frame_cost[ALTREF_FRAME] = vp8_cost_one(cpi->prob_intra_coded)
1875 + vp8_cost_one(cpi->prob_last_coded)
1876 + vp8_cost_one(cpi->prob_gf_coded);
1879 vpx_memset(mode_mv, 0, sizeof(mode_mv));
1881 x->e_mbd.mode_info_context->mbmi.ref_frame = INTRA_FRAME;
1882 vp8_rd_pick_intra_mbuv_mode(cpi, x, &uv_intra_rate, &uv_intra_rate_tokenonly, &uv_intra_distortion);
1883 uv_intra_mode = x->e_mbd.mode_info_context->mbmi.uv_mode;
1885 for (mode_index = 0; mode_index < MAX_MODES; mode_index++)
1887 int this_rd = INT_MAX;
1888 int lf_or_gf = 0; // Lat Frame (01) or gf/arf (1)
1889 int disable_skip = 0;
1890 int other_cost = 0;
1892 force_no_skip = 0;
1894 // Experimental debug code.
1895 // Record of rd values recorded for this MB. -1 indicates not measured
1896 //all_rds[mode_index] = -1;
1897 //all_rates[mode_index] = -1;
1898 //all_dist[mode_index] = -1;
1899 //intermodecost[mode_index] = -1;
1901 // Test best rd so far against threshold for trying this mode.
1902 if (best_rd <= cpi->rd_threshes[mode_index])
1903 continue;
1905 // These variables hold are rolling total cost and distortion for this mode
1906 rate2 = 0;
1907 distortion2 = 0;
1909 this_mode = vp8_mode_order[mode_index];
1911 x->e_mbd.mode_info_context->mbmi.mode = this_mode;
1912 x->e_mbd.mode_info_context->mbmi.uv_mode = DC_PRED;
1913 x->e_mbd.mode_info_context->mbmi.ref_frame = vp8_ref_frame_order[mode_index];
1915 // Only consider ZEROMV/ALTREF_FRAME for alt ref frame,
1916 // unless ARNR filtering is enabled in which case we want
1917 // an unfiltered alternative
1918 if (cpi->is_src_frame_alt_ref && (cpi->oxcf.arnr_max_frames == 0))
1920 if (this_mode != ZEROMV || x->e_mbd.mode_info_context->mbmi.ref_frame != ALTREF_FRAME)
1921 continue;
1924 /* everything but intra */
1925 if (x->e_mbd.mode_info_context->mbmi.ref_frame)
1927 x->e_mbd.pre.y_buffer = y_buffer[x->e_mbd.mode_info_context->mbmi.ref_frame];
1928 x->e_mbd.pre.u_buffer = u_buffer[x->e_mbd.mode_info_context->mbmi.ref_frame];
1929 x->e_mbd.pre.v_buffer = v_buffer[x->e_mbd.mode_info_context->mbmi.ref_frame];
1930 mode_mv[NEARESTMV] = frame_nearest_mv[x->e_mbd.mode_info_context->mbmi.ref_frame];
1931 mode_mv[NEARMV] = frame_near_mv[x->e_mbd.mode_info_context->mbmi.ref_frame];
1932 best_ref_mv = frame_best_ref_mv[x->e_mbd.mode_info_context->mbmi.ref_frame];
1933 vpx_memcpy(mdcounts, frame_mdcounts[x->e_mbd.mode_info_context->mbmi.ref_frame], sizeof(mdcounts));
1934 lf_or_gf = frame_lf_or_gf[x->e_mbd.mode_info_context->mbmi.ref_frame];
1937 if(x->e_mbd.mode_info_context->mbmi.mode == NEWMV)
1939 if(!saddone)
1941 vp8_cal_sad(cpi,xd,x, recon_yoffset ,&near_sadidx[0] );
1942 saddone = 1;
1945 vp8_mv_pred(cpi, &x->e_mbd, x->e_mbd.mode_info_context, &mvp,
1946 x->e_mbd.mode_info_context->mbmi.ref_frame, cpi->common.ref_frame_sign_bias, &sr, &near_sadidx[0]);
1948 /* adjust mvp to make sure it is within MV range */
1949 if(mvp.row > best_ref_mv.row + MAX_FULL_PEL_VAL)
1950 mvp.row = best_ref_mv.row + MAX_FULL_PEL_VAL;
1951 else if(mvp.row < best_ref_mv.row - MAX_FULL_PEL_VAL)
1952 mvp.row = best_ref_mv.row - MAX_FULL_PEL_VAL;
1953 if(mvp.col > best_ref_mv.col + MAX_FULL_PEL_VAL)
1954 mvp.col = best_ref_mv.col + MAX_FULL_PEL_VAL;
1955 else if(mvp.col < best_ref_mv.col - MAX_FULL_PEL_VAL)
1956 mvp.col = best_ref_mv.col - MAX_FULL_PEL_VAL;
1959 // Check to see if the testing frequency for this mode is at its max
1960 // If so then prevent it from being tested and increase the threshold for its testing
1961 if (cpi->mode_test_hit_counts[mode_index] && (cpi->mode_check_freq[mode_index] > 1))
1963 if (cpi->mbs_tested_so_far <= cpi->mode_check_freq[mode_index] * cpi->mode_test_hit_counts[mode_index])
1965 // Increase the threshold for coding this mode to make it less likely to be chosen
1966 cpi->rd_thresh_mult[mode_index] += 4;
1968 if (cpi->rd_thresh_mult[mode_index] > MAX_THRESHMULT)
1969 cpi->rd_thresh_mult[mode_index] = MAX_THRESHMULT;
1971 cpi->rd_threshes[mode_index] = (cpi->rd_baseline_thresh[mode_index] >> 7) * cpi->rd_thresh_mult[mode_index];
1973 continue;
1977 // We have now reached the point where we are going to test the current mode so increment the counter for the number of times it has been tested
1978 cpi->mode_test_hit_counts[mode_index] ++;
1980 // Experimental code. Special case for gf and arf zeromv modes. Increase zbin size to supress noise
1981 if (cpi->zbin_mode_boost_enabled)
1983 if ( vp8_ref_frame_order[mode_index] == INTRA_FRAME )
1984 cpi->zbin_mode_boost = 0;
1985 else
1987 if (vp8_mode_order[mode_index] == ZEROMV)
1989 if (vp8_ref_frame_order[mode_index] != LAST_FRAME)
1990 cpi->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST;
1991 else
1992 cpi->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST;
1994 else if (vp8_ref_frame_order[mode_index] == SPLITMV)
1995 cpi->zbin_mode_boost = 0;
1996 else
1997 cpi->zbin_mode_boost = MV_ZBIN_BOOST;
2000 vp8_update_zbin_extra(cpi, x);
2003 switch (this_mode)
2005 case B_PRED:
2007 int tmp_rd;
2009 // Note the rate value returned here includes the cost of coding the BPRED mode : x->mbmode_cost[x->e_mbd.frame_type][BPRED];
2010 tmp_rd = vp8_rd_pick_intra4x4mby_modes(cpi, x, &rate, &rate_y, &distortion, best_yrd);
2011 rate2 += rate;
2012 distortion2 += distortion;
2014 if(tmp_rd < best_yrd)
2016 rate2 += uv_intra_rate;
2017 rate_uv = uv_intra_rate_tokenonly;
2018 distortion2 += uv_intra_distortion;
2019 distortion_uv = uv_intra_distortion;
2021 else
2023 this_rd = INT_MAX;
2024 disable_skip = 1;
2027 break;
2029 case SPLITMV:
2031 int tmp_rd;
2032 int this_rd_thresh;
2034 this_rd_thresh = (x->e_mbd.mode_info_context->mbmi.ref_frame == LAST_FRAME) ? cpi->rd_threshes[THR_NEWMV] : cpi->rd_threshes[THR_NEWA];
2035 this_rd_thresh = (x->e_mbd.mode_info_context->mbmi.ref_frame == GOLDEN_FRAME) ? cpi->rd_threshes[THR_NEWG]: this_rd_thresh;
2037 tmp_rd = vp8_rd_pick_best_mbsegmentation(cpi, x, &best_ref_mv,
2038 best_yrd, mdcounts,
2039 &rate, &rate_y, &distortion, this_rd_thresh) ;
2041 rate2 += rate;
2042 distortion2 += distortion;
2044 // If even the 'Y' rd value of split is higher than best so far then dont bother looking at UV
2045 if (tmp_rd < best_yrd)
2047 // Now work out UV cost and add it in
2048 vp8_rd_inter_uv(cpi, x, &rate_uv, &distortion_uv, cpi->common.full_pixel);
2049 rate2 += rate_uv;
2050 distortion2 += distortion_uv;
2052 else
2054 this_rd = INT_MAX;
2055 disable_skip = 1;
2058 break;
2059 case DC_PRED:
2060 case V_PRED:
2061 case H_PRED:
2062 case TM_PRED:
2063 x->e_mbd.mode_info_context->mbmi.ref_frame = INTRA_FRAME;
2064 vp8_build_intra_predictors_mby_ptr(&x->e_mbd);
2065 macro_block_yrd(x, &rate_y, &distortion, IF_RTCD(&cpi->rtcd.encodemb)) ;
2066 rate2 += rate_y;
2067 distortion2 += distortion;
2068 rate2 += x->mbmode_cost[x->e_mbd.frame_type][x->e_mbd.mode_info_context->mbmi.mode];
2069 rate2 += uv_intra_rate;
2070 rate_uv = uv_intra_rate_tokenonly;
2071 distortion2 += uv_intra_distortion;
2072 distortion_uv = uv_intra_distortion;
2073 break;
2075 case NEWMV:
2077 // Decrement full search counter
2078 if (cpi->check_freq[lf_or_gf] > 0)
2079 cpi->check_freq[lf_or_gf] --;
2082 int thissme;
2083 int bestsme = INT_MAX;
2084 int step_param = cpi->sf.first_step;
2085 int search_range;
2086 int further_steps;
2087 int n;
2089 int col_min = (best_ref_mv.col - MAX_FULL_PEL_VAL) >>3;
2090 int col_max = (best_ref_mv.col + MAX_FULL_PEL_VAL) >>3;
2091 int row_min = (best_ref_mv.row - MAX_FULL_PEL_VAL) >>3;
2092 int row_max = (best_ref_mv.row + MAX_FULL_PEL_VAL) >>3;
2094 int tmp_col_min = x->mv_col_min;
2095 int tmp_col_max = x->mv_col_max;
2096 int tmp_row_min = x->mv_row_min;
2097 int tmp_row_max = x->mv_row_max;
2099 // Get intersection of UMV window and valid MV window to reduce # of checks in diamond search.
2100 if (x->mv_col_min < col_min )
2101 x->mv_col_min = col_min;
2102 if (x->mv_col_max > col_max )
2103 x->mv_col_max = col_max;
2104 if (x->mv_row_min < row_min )
2105 x->mv_row_min = row_min;
2106 if (x->mv_row_max > row_max )
2107 x->mv_row_max = row_max;
2109 //adjust search range according to sr from mv prediction
2110 if(sr > step_param)
2111 step_param = sr;
2113 // Work out how long a search we should do
2114 search_range = MAXF(abs(best_ref_mv.col), abs(best_ref_mv.row)) >> 3;
2116 if (search_range >= x->vector_range)
2117 x->vector_range = search_range;
2118 else if (x->vector_range > cpi->sf.min_fs_radius)
2119 x->vector_range--;
2121 // Initial step/diamond search
2123 int sadpb = x->sadperbit16;
2125 if (cpi->sf.search_method == HEX)
2127 bestsme = vp8_hex_search(x, b, d, &best_ref_mv, &d->bmi.mv.as_mv, step_param, sadpb/*x->errorperbit*/, &num00, &cpi->fn_ptr[BLOCK_16X16], x->mvsadcost, x->mvcost, &best_ref_mv);
2128 mode_mv[NEWMV].row = d->bmi.mv.as_mv.row;
2129 mode_mv[NEWMV].col = d->bmi.mv.as_mv.col;
2131 else
2133 bestsme = cpi->diamond_search_sad(x, b, d, &mvp, &d->bmi.mv.as_mv, step_param, sadpb / 2/*x->errorperbit*/, &num00, &cpi->fn_ptr[BLOCK_16X16], x->mvsadcost, x->mvcost, &best_ref_mv); //sadpb < 9
2134 mode_mv[NEWMV].row = d->bmi.mv.as_mv.row;
2135 mode_mv[NEWMV].col = d->bmi.mv.as_mv.col;
2137 // Further step/diamond searches as necessary
2138 n = 0;
2139 further_steps = (cpi->sf.max_step_search_steps - 1) - step_param;
2141 n = num00;
2142 num00 = 0;
2144 while (n < further_steps)
2146 n++;
2148 if (num00)
2149 num00--;
2150 else
2152 thissme = cpi->diamond_search_sad(x, b, d, &mvp, &d->bmi.mv.as_mv, step_param + n, sadpb / 4/*x->errorperbit*/, &num00, &cpi->fn_ptr[BLOCK_16X16], x->mvsadcost, x->mvcost, &best_ref_mv); //sadpb = 9
2154 if (thissme < bestsme)
2156 bestsme = thissme;
2157 mode_mv[NEWMV].row = d->bmi.mv.as_mv.row;
2158 mode_mv[NEWMV].col = d->bmi.mv.as_mv.col;
2160 else
2162 d->bmi.mv.as_mv.row = mode_mv[NEWMV].row;
2163 d->bmi.mv.as_mv.col = mode_mv[NEWMV].col;
2171 // Should we do a full search
2172 if (!cpi->check_freq[lf_or_gf] || cpi->do_full[lf_or_gf])
2174 int thissme;
2175 int full_flag_thresh = 0;
2176 MV full_mvp;
2178 full_mvp.row = d->bmi.mv.as_mv.row <<3; // use diamond search result as full search staring point
2179 full_mvp.col = d->bmi.mv.as_mv.col <<3;
2181 // Update x->vector_range based on best vector found in step search
2182 search_range = MAXF(abs((mvp.row>>3) - d->bmi.mv.as_mv.row), abs((mvp.col>>3) - d->bmi.mv.as_mv.col));
2183 //search_range *= 1.4; //didn't improve PSNR
2185 if (search_range > x->vector_range)
2186 x->vector_range = search_range;
2187 else
2188 search_range = x->vector_range;
2190 // Apply limits
2191 search_range = (search_range > cpi->sf.max_fs_radius) ? cpi->sf.max_fs_radius : search_range;
2193 //add this to reduce full search range.
2194 if(sr<=3 && search_range > 8) search_range = 8;
2197 int sadpb = x->sadperbit16 >> 2;
2198 thissme = cpi->full_search_sad(x, b, d, &full_mvp, sadpb, search_range, &cpi->fn_ptr[BLOCK_16X16], x->mvcost, x->mvsadcost,&best_ref_mv);
2201 // Barrier threshold to initiating full search
2202 // full_flag_thresh = 10 + (thissme >> 7);
2203 if ((thissme + full_flag_thresh) < bestsme)
2205 cpi->do_full[lf_or_gf] ++;
2206 bestsme = thissme;
2208 else if (thissme < bestsme)
2209 bestsme = thissme;
2210 else
2212 cpi->do_full[lf_or_gf] = cpi->do_full[lf_or_gf] >> 1;
2213 cpi->check_freq[lf_or_gf] = cpi->sf.full_freq[lf_or_gf];
2215 // The full search result is actually worse so re-instate the previous best vector
2216 d->bmi.mv.as_mv.row = mode_mv[NEWMV].row;
2217 d->bmi.mv.as_mv.col = mode_mv[NEWMV].col;
2221 x->mv_col_min = tmp_col_min;
2222 x->mv_col_max = tmp_col_max;
2223 x->mv_row_min = tmp_row_min;
2224 x->mv_row_max = tmp_row_max;
2226 if (bestsme < INT_MAX)
2227 // cpi->find_fractional_mv_step(x,b,d,&d->bmi.mv.as_mv,&best_ref_mv,x->errorperbit/2,cpi->fn_ptr.svf,cpi->fn_ptr.vf,x->mvcost); // normal mvc=11
2228 cpi->find_fractional_mv_step(x, b, d, &d->bmi.mv.as_mv, &best_ref_mv, x->errorperbit / 4, &cpi->fn_ptr[BLOCK_16X16], x->mvcost);
2230 mode_mv[NEWMV].row = d->bmi.mv.as_mv.row;
2231 mode_mv[NEWMV].col = d->bmi.mv.as_mv.col;
2233 // Add the new motion vector cost to our rolling cost variable
2234 rate2 += vp8_mv_bit_cost(&mode_mv[NEWMV], &best_ref_mv, x->mvcost, 96);
2238 case NEARESTMV:
2239 case NEARMV:
2241 // Clip "next_nearest" so that it does not extend to far out of image
2242 if (mode_mv[this_mode].col < (xd->mb_to_left_edge - LEFT_TOP_MARGIN))
2243 mode_mv[this_mode].col = xd->mb_to_left_edge - LEFT_TOP_MARGIN;
2244 else if (mode_mv[this_mode].col > xd->mb_to_right_edge + RIGHT_BOTTOM_MARGIN)
2245 mode_mv[this_mode].col = xd->mb_to_right_edge + RIGHT_BOTTOM_MARGIN;
2247 if (mode_mv[this_mode].row < (xd->mb_to_top_edge - LEFT_TOP_MARGIN))
2248 mode_mv[this_mode].row = xd->mb_to_top_edge - LEFT_TOP_MARGIN;
2249 else if (mode_mv[this_mode].row > xd->mb_to_bottom_edge + RIGHT_BOTTOM_MARGIN)
2250 mode_mv[this_mode].row = xd->mb_to_bottom_edge + RIGHT_BOTTOM_MARGIN;
2252 // Do not bother proceeding if the vector (from newmv,nearest or near) is 0,0 as this should then be coded using the zeromv mode.
2253 if (((this_mode == NEARMV) || (this_mode == NEARESTMV)) &&
2254 ((mode_mv[this_mode].row == 0) && (mode_mv[this_mode].col == 0)))
2255 continue;
2257 case ZEROMV:
2259 mv_selected:
2261 // Trap vectors that reach beyond the UMV borders
2262 // Note that ALL New MV, Nearest MV Near MV and Zero MV code drops through to this point
2263 // because of the lack of break statements in the previous two cases.
2264 if (((mode_mv[this_mode].row >> 3) < x->mv_row_min) || ((mode_mv[this_mode].row >> 3) > x->mv_row_max) ||
2265 ((mode_mv[this_mode].col >> 3) < x->mv_col_min) || ((mode_mv[this_mode].col >> 3) > x->mv_col_max))
2266 continue;
2268 vp8_set_mbmode_and_mvs(x, this_mode, &mode_mv[this_mode]);
2269 vp8_build_inter_predictors_mby(&x->e_mbd);
2271 if (cpi->active_map_enabled && x->active_ptr[0] == 0) {
2272 x->skip = 1;
2274 else if (x->encode_breakout)
2276 int sum, sse;
2277 int threshold = (xd->block[0].dequant[1]
2278 * xd->block[0].dequant[1] >>4);
2280 if(threshold < x->encode_breakout)
2281 threshold = x->encode_breakout;
2283 VARIANCE_INVOKE(&cpi->rtcd.variance, get16x16var)
2284 (x->src.y_buffer, x->src.y_stride,
2285 x->e_mbd.predictor, 16, (unsigned int *)(&sse), &sum);
2287 if (sse < threshold)
2289 // Check u and v to make sure skip is ok
2290 int sse2 = 0;
2291 /* If theres is no codeable 2nd order dc
2292 or a very small uniform pixel change change */
2293 if (abs(sum) < (xd->block[24].dequant[0]<<2)||
2294 ((sum * sum>>8) > sse && abs(sum) <128))
2296 sse2 = VP8_UVSSE(x, IF_RTCD(&cpi->rtcd.variance));
2298 if (sse2 * 2 < threshold)
2300 x->skip = 1;
2301 distortion2 = sse + sse2;
2302 rate2 = 500;
2304 /* for best_yrd calculation */
2305 rate_uv = 0;
2306 distortion_uv = sse2;
2308 disable_skip = 1;
2309 this_rd = RDCOST(x->rdmult, x->rddiv, rate2,
2310 distortion2);
2312 break;
2319 //intermodecost[mode_index] = vp8_cost_mv_ref(this_mode, mdcounts); // Experimental debug code
2321 // Add in the Mv/mode cost
2322 rate2 += vp8_cost_mv_ref(this_mode, mdcounts);
2324 // Y cost and distortion
2325 macro_block_yrd(x, &rate_y, &distortion, IF_RTCD(&cpi->rtcd.encodemb));
2326 rate2 += rate_y;
2327 distortion2 += distortion;
2329 // UV cost and distortion
2330 vp8_rd_inter_uv(cpi, x, &rate_uv, &distortion_uv, cpi->common.full_pixel);
2331 rate2 += rate_uv;
2332 distortion2 += distortion_uv;
2333 break;
2335 default:
2336 break;
2339 // Where skip is allowable add in the default per mb cost for the no skip case.
2340 // where we then decide to skip we have to delete this and replace it with the
2341 // cost of signallying a skip
2342 if (cpi->common.mb_no_coeff_skip)
2344 other_cost += vp8_cost_bit(cpi->prob_skip_false, 0);
2345 rate2 += other_cost;
2348 // Estimate the reference frame signaling cost and add it to the rolling cost variable.
2349 rate2 += ref_frame_cost[x->e_mbd.mode_info_context->mbmi.ref_frame];
2351 if (!disable_skip)
2353 // Test for the condition where skip block will be activated because there are no non zero coefficients and make any necessary adjustment for rate
2354 if (cpi->common.mb_no_coeff_skip)
2356 int tteob;
2358 tteob = 0;
2360 for (i = 0; i <= 24; i++)
2362 tteob += x->e_mbd.block[i].eob;
2365 if (tteob == 0)
2367 rate2 -= (rate_y + rate_uv);
2368 //for best_yrd calculation
2369 rate_uv = 0;
2371 // Back out no skip flag costing and add in skip flag costing
2372 if (cpi->prob_skip_false)
2374 int prob_skip_cost;
2376 prob_skip_cost = vp8_cost_bit(cpi->prob_skip_false, 1);
2377 prob_skip_cost -= vp8_cost_bit(cpi->prob_skip_false, 0);
2378 rate2 += prob_skip_cost;
2379 other_cost += prob_skip_cost;
2383 // Calculate the final RD estimate for this mode
2384 this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
2387 // Experimental debug code.
2388 //all_rds[mode_index] = this_rd;
2389 //all_rates[mode_index] = rate2;
2390 //all_dist[mode_index] = distortion2;
2392 if ((x->e_mbd.mode_info_context->mbmi.ref_frame == INTRA_FRAME) && (this_rd < *returnintra))
2394 *returnintra = this_rd ;
2397 // Did this mode help.. i.i is it the new best mode
2398 if (this_rd < best_rd || x->skip)
2400 // Note index of best mode so far
2401 best_mode_index = mode_index;
2402 x->e_mbd.mode_info_context->mbmi.force_no_skip = force_no_skip;
2404 if (this_mode <= B_PRED)
2406 x->e_mbd.mode_info_context->mbmi.uv_mode = uv_intra_mode;
2409 other_cost += ref_frame_cost[x->e_mbd.mode_info_context->mbmi.ref_frame];
2411 /* Calculate the final y RD estimate for this mode */
2412 best_yrd = RDCOST(x->rdmult, x->rddiv, (rate2-rate_uv-other_cost),
2413 (distortion2-distortion_uv));
2415 *returnrate = rate2;
2416 *returndistortion = distortion2;
2417 best_rd = this_rd;
2418 vpx_memcpy(&best_mbmode, &x->e_mbd.mode_info_context->mbmi, sizeof(MB_MODE_INFO));
2419 vpx_memcpy(&best_partition, x->partition_info, sizeof(PARTITION_INFO));
2421 for (i = 0; i < 16; i++)
2423 vpx_memcpy(&best_bmodes[i], &x->e_mbd.block[i].bmi, sizeof(B_MODE_INFO));
2426 // Testing this mode gave rise to an improvement in best error score. Lower threshold a bit for next time
2427 cpi->rd_thresh_mult[mode_index] = (cpi->rd_thresh_mult[mode_index] >= (MIN_THRESHMULT + 2)) ? cpi->rd_thresh_mult[mode_index] - 2 : MIN_THRESHMULT;
2428 cpi->rd_threshes[mode_index] = (cpi->rd_baseline_thresh[mode_index] >> 7) * cpi->rd_thresh_mult[mode_index];
2431 // If the mode did not help improve the best error case then raise the threshold for testing that mode next time around.
2432 else
2434 cpi->rd_thresh_mult[mode_index] += 4;
2436 if (cpi->rd_thresh_mult[mode_index] > MAX_THRESHMULT)
2437 cpi->rd_thresh_mult[mode_index] = MAX_THRESHMULT;
2439 cpi->rd_threshes[mode_index] = (cpi->rd_baseline_thresh[mode_index] >> 7) * cpi->rd_thresh_mult[mode_index];
2442 if (x->skip)
2443 break;
2447 // Reduce the activation RD thresholds for the best choice mode
2448 if ((cpi->rd_baseline_thresh[best_mode_index] > 0) && (cpi->rd_baseline_thresh[best_mode_index] < (INT_MAX >> 2)))
2450 int best_adjustment = (cpi->rd_thresh_mult[best_mode_index] >> 2);
2452 cpi->rd_thresh_mult[best_mode_index] = (cpi->rd_thresh_mult[best_mode_index] >= (MIN_THRESHMULT + best_adjustment)) ? cpi->rd_thresh_mult[best_mode_index] - best_adjustment : MIN_THRESHMULT;
2453 cpi->rd_threshes[best_mode_index] = (cpi->rd_baseline_thresh[best_mode_index] >> 7) * cpi->rd_thresh_mult[best_mode_index];
2455 // If we chose a split mode then reset the new MV thresholds as well
2456 /*if ( vp8_mode_order[best_mode_index] == SPLITMV )
2458 best_adjustment = 4; //(cpi->rd_thresh_mult[THR_NEWMV] >> 4);
2459 cpi->rd_thresh_mult[THR_NEWMV] = (cpi->rd_thresh_mult[THR_NEWMV] >= (MIN_THRESHMULT+best_adjustment)) ? cpi->rd_thresh_mult[THR_NEWMV]-best_adjustment: MIN_THRESHMULT;
2460 cpi->rd_threshes[THR_NEWMV] = (cpi->rd_baseline_thresh[THR_NEWMV] >> 7) * cpi->rd_thresh_mult[THR_NEWMV];
2462 best_adjustment = 4; //(cpi->rd_thresh_mult[THR_NEWG] >> 4);
2463 cpi->rd_thresh_mult[THR_NEWG] = (cpi->rd_thresh_mult[THR_NEWG] >= (MIN_THRESHMULT+best_adjustment)) ? cpi->rd_thresh_mult[THR_NEWG]-best_adjustment: MIN_THRESHMULT;
2464 cpi->rd_threshes[THR_NEWG] = (cpi->rd_baseline_thresh[THR_NEWG] >> 7) * cpi->rd_thresh_mult[THR_NEWG];
2466 best_adjustment = 4; //(cpi->rd_thresh_mult[THR_NEWA] >> 4);
2467 cpi->rd_thresh_mult[THR_NEWA] = (cpi->rd_thresh_mult[THR_NEWA] >= (MIN_THRESHMULT+best_adjustment)) ? cpi->rd_thresh_mult[THR_NEWA]-best_adjustment: MIN_THRESHMULT;
2468 cpi->rd_threshes[THR_NEWA] = (cpi->rd_baseline_thresh[THR_NEWA] >> 7) * cpi->rd_thresh_mult[THR_NEWA];
2473 // If we have chosen new mv or split then decay the full search check count more quickly.
2474 if ((vp8_mode_order[best_mode_index] == NEWMV) || (vp8_mode_order[best_mode_index] == SPLITMV))
2476 int lf_or_gf = (vp8_ref_frame_order[best_mode_index] == LAST_FRAME) ? 0 : 1;
2478 if (cpi->check_freq[lf_or_gf] && !cpi->do_full[lf_or_gf])
2480 cpi->check_freq[lf_or_gf] --;
2484 // Keep a record of best mode index that we chose
2485 cpi->last_best_mode_index = best_mode_index;
2487 // Note how often each mode chosen as best
2488 cpi->mode_chosen_counts[best_mode_index] ++;
2491 if (cpi->is_src_frame_alt_ref && (best_mbmode.mode != ZEROMV || best_mbmode.ref_frame != ALTREF_FRAME))
2493 best_mbmode.mode = ZEROMV;
2494 best_mbmode.ref_frame = ALTREF_FRAME;
2495 best_mbmode.mv.as_int = 0;
2496 best_mbmode.uv_mode = 0;
2497 best_mbmode.mb_skip_coeff = (cpi->common.mb_no_coeff_skip) ? 1 : 0;
2498 best_mbmode.partitioning = 0;
2499 best_mbmode.dc_diff = 0;
2501 vpx_memcpy(&x->e_mbd.mode_info_context->mbmi, &best_mbmode, sizeof(MB_MODE_INFO));
2502 vpx_memcpy(x->partition_info, &best_partition, sizeof(PARTITION_INFO));
2504 for (i = 0; i < 16; i++)
2506 vpx_memset(&x->e_mbd.block[i].bmi, 0, sizeof(B_MODE_INFO));
2509 x->e_mbd.mode_info_context->mbmi.mv.as_int = 0;
2511 return best_rd;
2515 if(best_mbmode.mode <= B_PRED)
2517 int i;
2518 for (i = 0; i < 16; i++)
2520 best_bmodes[i].mv.as_int = 0;
2524 // macroblock modes
2525 vpx_memcpy(&x->e_mbd.mode_info_context->mbmi, &best_mbmode, sizeof(MB_MODE_INFO));
2526 vpx_memcpy(x->partition_info, &best_partition, sizeof(PARTITION_INFO));
2528 for (i = 0; i < 16; i++)
2530 vpx_memcpy(&x->e_mbd.block[i].bmi, &best_bmodes[i], sizeof(B_MODE_INFO));
2533 x->e_mbd.mode_info_context->mbmi.mv.as_mv = x->e_mbd.block[15].bmi.mv.as_mv;
2535 return best_rd;
2537 #endif