2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
13 #include "vpx_ports/config.h"
15 #include "modecosts.h"
16 #include "encodeintra.h"
17 #include "entropymode.h"
18 #include "pickinter.h"
19 #include "findnearmv.h"
21 #include "reconinter.h"
22 #include "reconintra.h"
23 #include "reconintra4x4.h"
28 #include "vpx_mem/vpx_mem.h"
30 #if CONFIG_RUNTIME_CPU_DETECT
31 #define IF_RTCD(x) (x)
33 #define IF_RTCD(x) NULL
36 extern int VP8_UVSSE(MACROBLOCK
*x
, const vp8_variance_rtcd_vtable_t
*rtcd
);
39 extern unsigned int cnt_pm
;
42 extern const MV_REFERENCE_FRAME vp8_ref_frame_order
[MAX_MODES
];
43 extern const MB_PREDICTION_MODE vp8_mode_order
[MAX_MODES
];
46 extern unsigned int (*vp8_get16x16pred_error
)(unsigned char *src_ptr
, int src_stride
, unsigned char *ref_ptr
, int ref_stride
);
47 extern unsigned int (*vp8_get4x4sse_cs
)(unsigned char *src_ptr
, int source_stride
, unsigned char *ref_ptr
, int recon_stride
);
48 extern int vp8_rd_pick_best_mbsegmentation(VP8_COMP
*cpi
, MACROBLOCK
*x
, MV
*best_ref_mv
, int best_rd
, int *, int *, int *, int, int *mvcost
[2], int, int fullpixel
);
49 extern int vp8_cost_mv_ref(MB_PREDICTION_MODE m
, const int near_mv_ref_ct
[4]);
50 extern void vp8_set_mbmode_and_mvs(MACROBLOCK
*x
, MB_PREDICTION_MODE mb
, MV
*mv
);
53 int vp8_skip_fractional_mv_step(MACROBLOCK
*mb
, BLOCK
*b
, BLOCKD
*d
, MV
*bestmv
, MV
*ref_mv
, int error_per_bit
, const vp8_variance_fn_ptr_t
*vfp
, int *mvcost
[2])
67 static int get_inter_mbpred_error(MACROBLOCK
*mb
, const vp8_variance_fn_ptr_t
*vfp
, unsigned int *sse
)
70 BLOCK
*b
= &mb
->block
[0];
71 BLOCKD
*d
= &mb
->e_mbd
.block
[0];
72 unsigned char *what
= (*(b
->base_src
) + b
->src
);
73 int what_stride
= b
->src_stride
;
74 unsigned char *in_what
= *(d
->base_pre
) + d
->pre
;
75 int in_what_stride
= d
->pre_stride
;
76 int xoffset
= d
->bmi
.mv
.as_mv
.col
& 7;
77 int yoffset
= d
->bmi
.mv
.as_mv
.row
& 7;
79 in_what
+= (d
->bmi
.mv
.as_mv
.row
>> 3) * d
->pre_stride
+ (d
->bmi
.mv
.as_mv
.col
>> 3);
81 if (xoffset
| yoffset
)
83 return vfp
->svf(in_what
, in_what_stride
, xoffset
, yoffset
, what
, what_stride
, sse
);
87 return vfp
->vf(what
, what_stride
, in_what
, in_what_stride
, sse
);
92 unsigned int vp8_get16x16pred_error_c
94 const unsigned char *src_ptr
,
96 const unsigned char *ref_ptr
,
101 unsigned pred_error
= 0;
105 for (i
= 0; i
< 16; i
++)
109 for (j
= 0; j
< 16; j
++)
111 diff
= src_ptr
[j
] - ref_ptr
[j
];
113 pred_error
+= diff
* diff
;
116 src_ptr
+= src_stride
;
117 ref_ptr
+= ref_stride
;
120 pred_error
-= sum
* sum
/ 256;
125 unsigned int vp8_get4x4sse_cs_c
127 const unsigned char *src_ptr
,
129 const unsigned char *ref_ptr
,
137 for (r
= 0; r
< 4; r
++)
139 for (c
= 0; c
< 4; c
++)
141 int diff
= src_ptr
[c
] - ref_ptr
[c
];
142 distortion
+= diff
* diff
;
145 src_ptr
+= source_stride
;
146 ref_ptr
+= recon_stride
;
152 static int get_prediction_error(BLOCK
*be
, BLOCKD
*b
, const vp8_variance_rtcd_vtable_t
*rtcd
)
156 sptr
= (*(be
->base_src
) + be
->src
);
159 return VARIANCE_INVOKE(rtcd
, get4x4sse_cs
)(sptr
, be
->src_stride
, dptr
, 16, 0x7fffffff);
163 static int pick_intra4x4block(
164 const VP8_ENCODER_RTCD
*rtcd
,
168 B_PREDICTION_MODE
*best_mode
,
169 B_PREDICTION_MODE above
,
170 B_PREDICTION_MODE left
,
175 B_PREDICTION_MODE mode
;
176 int best_rd
= INT_MAX
; // 1<<30
179 unsigned int *mode_costs
;
181 if (x
->e_mbd
.frame_type
== KEY_FRAME
)
183 mode_costs
= x
->bmode_costs
[above
][left
];
187 mode_costs
= x
->inter_bmode_costs
;
190 for (mode
= B_DC_PRED
; mode
<= B_HE_PRED
/*B_HU_PRED*/; mode
++)
194 rate
= mode_costs
[mode
];
195 vp8_predict_intra4x4(b
, mode
, b
->predictor
);
196 distortion
= get_prediction_error(be
, b
, &rtcd
->variance
);
197 this_rd
= RD_ESTIMATE(x
->rdmult
, x
->rddiv
, rate
, distortion
);
199 if (this_rd
< best_rd
)
202 *bestdistortion
= distortion
;
208 b
->bmi
.mode
= (B_PREDICTION_MODE
)(*best_mode
);
209 vp8_encode_intra4x4block(rtcd
, x
, be
, b
, b
->bmi
.mode
);
215 int vp8_pick_intra4x4mby_modes(const VP8_ENCODER_RTCD
*rtcd
, MACROBLOCK
*mb
, int *Rate
, int *best_dist
)
217 MACROBLOCKD
*const xd
= &mb
->e_mbd
;
219 int cost
= mb
->mbmode_cost
[xd
->frame_type
] [B_PRED
];
223 vp8_intra_prediction_down_copy(xd
);
225 for (i
= 0; i
< 16; i
++)
227 MODE_INFO
*const mic
= xd
->mode_info_context
;
228 const int mis
= xd
->mode_info_stride
;
229 const B_PREDICTION_MODE A
= vp8_above_bmi(mic
, i
, mis
)->mode
;
230 const B_PREDICTION_MODE L
= vp8_left_bmi(mic
, i
)->mode
;
231 B_PREDICTION_MODE
UNINITIALIZED_IS_SAFE(best_mode
);
232 int UNINITIALIZED_IS_SAFE(r
), UNINITIALIZED_IS_SAFE(d
);
234 pick_intra4x4block(rtcd
, mb
, mb
->block
+ i
, xd
->block
+ i
,
235 &best_mode
, A
, L
, &r
, &d
);
240 mic
->bmi
[i
].mode
= xd
->block
[i
].bmi
.mode
= best_mode
;
242 // Break out case where we have already exceeded best so far value that was bassed in
243 if (distortion
> *best_dist
)
247 for (i
= 0; i
< 16; i
++)
248 xd
->block
[i
].bmi
.mv
.as_int
= 0;
254 *best_dist
= distortion
;
255 error
= RD_ESTIMATE(mb
->rdmult
, mb
->rddiv
, cost
, distortion
);
259 *best_dist
= INT_MAX
;
266 int vp8_pick_intra_mbuv_mode(MACROBLOCK
*mb
)
269 MACROBLOCKD
*x
= &mb
->e_mbd
;
270 unsigned char *uabove_row
= x
->dst
.u_buffer
- x
->dst
.uv_stride
;
271 unsigned char *vabove_row
= x
->dst
.v_buffer
- x
->dst
.uv_stride
;
272 unsigned char *usrc_ptr
= (mb
->block
[16].src
+ *mb
->block
[16].base_src
);
273 unsigned char *vsrc_ptr
= (mb
->block
[20].src
+ *mb
->block
[20].base_src
);
274 int uvsrc_stride
= mb
->block
[16].src_stride
;
275 unsigned char uleft_col
[8];
276 unsigned char vleft_col
[8];
277 unsigned char utop_left
= uabove_row
[-1];
278 unsigned char vtop_left
= vabove_row
[-1];
286 int pred_error
[4] = {0, 0, 0, 0}, best_error
= INT_MAX
;
287 MB_PREDICTION_MODE
UNINITIALIZED_IS_SAFE(best_mode
);
290 for (i
= 0; i
< 8; i
++)
292 uleft_col
[i
] = x
->dst
.u_buffer
[i
* x
->dst
.uv_stride
-1];
293 vleft_col
[i
] = x
->dst
.v_buffer
[i
* x
->dst
.uv_stride
-1];
296 if (!x
->up_available
&& !x
->left_available
)
308 for (i
= 0; i
< 8; i
++)
310 Uaverage
+= uabove_row
[i
];
311 Vaverage
+= vabove_row
[i
];
318 if (x
->left_available
)
320 for (i
= 0; i
< 8; i
++)
322 Uaverage
+= uleft_col
[i
];
323 Vaverage
+= vleft_col
[i
];
330 expected_udc
= (Uaverage
+ (1 << (shift
- 1))) >> shift
;
331 expected_vdc
= (Vaverage
+ (1 << (shift
- 1))) >> shift
;
335 for (i
= 0; i
< 8; i
++)
337 for (j
= 0; j
< 8; j
++)
340 int predu
= uleft_col
[i
] + uabove_row
[j
] - utop_left
;
341 int predv
= vleft_col
[i
] + vabove_row
[j
] - vtop_left
;
360 diff
= u_p
- expected_udc
;
361 pred_error
[DC_PRED
] += diff
* diff
;
362 diff
= v_p
- expected_vdc
;
363 pred_error
[DC_PRED
] += diff
* diff
;
366 diff
= u_p
- uabove_row
[j
];
367 pred_error
[V_PRED
] += diff
* diff
;
368 diff
= v_p
- vabove_row
[j
];
369 pred_error
[V_PRED
] += diff
* diff
;
372 diff
= u_p
- uleft_col
[i
];
373 pred_error
[H_PRED
] += diff
* diff
;
374 diff
= v_p
- vleft_col
[i
];
375 pred_error
[H_PRED
] += diff
* diff
;
379 pred_error
[TM_PRED
] += diff
* diff
;
381 pred_error
[TM_PRED
] += diff
* diff
;
386 usrc_ptr
+= uvsrc_stride
;
387 vsrc_ptr
+= uvsrc_stride
;
391 usrc_ptr
= (mb
->block
[18].src
+ *mb
->block
[18].base_src
);
392 vsrc_ptr
= (mb
->block
[22].src
+ *mb
->block
[22].base_src
);
400 for (i
= DC_PRED
; i
<= TM_PRED
; i
++)
402 if (best_error
> pred_error
[i
])
404 best_error
= pred_error
[i
];
405 best_mode
= (MB_PREDICTION_MODE
)i
;
410 mb
->e_mbd
.mode_info_context
->mbmi
.uv_mode
= best_mode
;
415 int vp8_pick_inter_mode(VP8_COMP
*cpi
, MACROBLOCK
*x
, int recon_yoffset
, int recon_uvoffset
, int *returnrate
, int *returndistortion
, int *returnintra
)
417 BLOCK
*b
= &x
->block
[0];
418 BLOCKD
*d
= &x
->e_mbd
.block
[0];
419 MACROBLOCKD
*xd
= &x
->e_mbd
;
420 B_MODE_INFO best_bmodes
[16];
421 MB_MODE_INFO best_mbmode
;
422 PARTITION_INFO best_partition
;
424 MV mode_mv
[MB_MODE_COUNT
];
425 MB_PREDICTION_MODE this_mode
;
429 int best_rd
= INT_MAX
; // 1 << 30;
430 int best_intra_rd
= INT_MAX
;
432 int ref_frame_cost
[MAX_REF_FRAMES
];
437 //int all_rds[MAX_MODES]; // Experimental debug code.
438 int best_mode_index
= 0;
442 int near_sadidx
[8] = {0, 1, 2, 3, 4, 5, 6, 7};
444 int sr
=0; //search range got from mv_pred(). It uses step_param levels. (0-7)
448 MV frame_best_ref_mv
[4];
450 unsigned char *y_buffer
[4];
451 unsigned char *u_buffer
[4];
452 unsigned char *v_buffer
[4];
454 int skip_mode
[4] = {0, 0, 0, 0};
456 vpx_memset(mode_mv
, 0, sizeof(mode_mv
));
457 vpx_memset(nearest_mv
, 0, sizeof(nearest_mv
));
458 vpx_memset(near_mv
, 0, sizeof(near_mv
));
459 vpx_memset(&best_mbmode
, 0, sizeof(best_mbmode
));
462 // set up all the refframe dependent pointers.
463 if (cpi
->ref_frame_flags
& VP8_LAST_FLAG
)
465 YV12_BUFFER_CONFIG
*lst_yv12
= &cpi
->common
.yv12_fb
[cpi
->common
.lst_fb_idx
];
467 vp8_find_near_mvs(&x
->e_mbd
, x
->e_mbd
.mode_info_context
, &nearest_mv
[LAST_FRAME
], &near_mv
[LAST_FRAME
],
468 &frame_best_ref_mv
[LAST_FRAME
], MDCounts
[LAST_FRAME
], LAST_FRAME
, cpi
->common
.ref_frame_sign_bias
);
470 y_buffer
[LAST_FRAME
] = lst_yv12
->y_buffer
+ recon_yoffset
;
471 u_buffer
[LAST_FRAME
] = lst_yv12
->u_buffer
+ recon_uvoffset
;
472 v_buffer
[LAST_FRAME
] = lst_yv12
->v_buffer
+ recon_uvoffset
;
475 skip_mode
[LAST_FRAME
] = 1;
477 if (cpi
->ref_frame_flags
& VP8_GOLD_FLAG
)
479 YV12_BUFFER_CONFIG
*gld_yv12
= &cpi
->common
.yv12_fb
[cpi
->common
.gld_fb_idx
];
481 vp8_find_near_mvs(&x
->e_mbd
, x
->e_mbd
.mode_info_context
, &nearest_mv
[GOLDEN_FRAME
], &near_mv
[GOLDEN_FRAME
],
482 &frame_best_ref_mv
[GOLDEN_FRAME
], MDCounts
[GOLDEN_FRAME
], GOLDEN_FRAME
, cpi
->common
.ref_frame_sign_bias
);
484 y_buffer
[GOLDEN_FRAME
] = gld_yv12
->y_buffer
+ recon_yoffset
;
485 u_buffer
[GOLDEN_FRAME
] = gld_yv12
->u_buffer
+ recon_uvoffset
;
486 v_buffer
[GOLDEN_FRAME
] = gld_yv12
->v_buffer
+ recon_uvoffset
;
489 skip_mode
[GOLDEN_FRAME
] = 1;
491 if (cpi
->ref_frame_flags
& VP8_ALT_FLAG
&& cpi
->source_alt_ref_active
)
493 YV12_BUFFER_CONFIG
*alt_yv12
= &cpi
->common
.yv12_fb
[cpi
->common
.alt_fb_idx
];
495 vp8_find_near_mvs(&x
->e_mbd
, x
->e_mbd
.mode_info_context
, &nearest_mv
[ALTREF_FRAME
], &near_mv
[ALTREF_FRAME
],
496 &frame_best_ref_mv
[ALTREF_FRAME
], MDCounts
[ALTREF_FRAME
], ALTREF_FRAME
, cpi
->common
.ref_frame_sign_bias
);
498 y_buffer
[ALTREF_FRAME
] = alt_yv12
->y_buffer
+ recon_yoffset
;
499 u_buffer
[ALTREF_FRAME
] = alt_yv12
->u_buffer
+ recon_uvoffset
;
500 v_buffer
[ALTREF_FRAME
] = alt_yv12
->v_buffer
+ recon_uvoffset
;
503 skip_mode
[ALTREF_FRAME
] = 1;
505 cpi
->mbs_tested_so_far
++; // Count of the number of MBs tested so far this frame
507 *returnintra
= best_intra_rd
;
510 ref_frame_cost
[INTRA_FRAME
] = vp8_cost_zero(cpi
->prob_intra_coded
);
512 // Special case treatment when GF and ARF are not sensible options for reference
513 if (cpi
->ref_frame_flags
== VP8_LAST_FLAG
)
515 ref_frame_cost
[LAST_FRAME
] = vp8_cost_one(cpi
->prob_intra_coded
)
516 + vp8_cost_zero(255);
517 ref_frame_cost
[GOLDEN_FRAME
] = vp8_cost_one(cpi
->prob_intra_coded
)
519 + vp8_cost_zero(128);
520 ref_frame_cost
[ALTREF_FRAME
] = vp8_cost_one(cpi
->prob_intra_coded
)
526 ref_frame_cost
[LAST_FRAME
] = vp8_cost_one(cpi
->prob_intra_coded
)
527 + vp8_cost_zero(cpi
->prob_last_coded
);
528 ref_frame_cost
[GOLDEN_FRAME
] = vp8_cost_one(cpi
->prob_intra_coded
)
529 + vp8_cost_one(cpi
->prob_last_coded
)
530 + vp8_cost_zero(cpi
->prob_gf_coded
);
531 ref_frame_cost
[ALTREF_FRAME
] = vp8_cost_one(cpi
->prob_intra_coded
)
532 + vp8_cost_one(cpi
->prob_last_coded
)
533 + vp8_cost_one(cpi
->prob_gf_coded
);
536 x
->e_mbd
.mode_info_context
->mbmi
.ref_frame
= INTRA_FRAME
;
538 // if we encode a new mv this is important
539 // find the best new motion vector
540 for (mode_index
= 0; mode_index
< MAX_MODES
; mode_index
++)
543 int this_rd
= INT_MAX
;
545 if (best_rd
<= cpi
->rd_threshes
[mode_index
])
548 x
->e_mbd
.mode_info_context
->mbmi
.ref_frame
= vp8_ref_frame_order
[mode_index
];
550 if (skip_mode
[x
->e_mbd
.mode_info_context
->mbmi
.ref_frame
])
553 // Check to see if the testing frequency for this mode is at its max
554 // If so then prevent it from being tested and increase the threshold for its testing
555 if (cpi
->mode_test_hit_counts
[mode_index
] && (cpi
->mode_check_freq
[mode_index
] > 1))
557 //if ( (cpi->mbs_tested_so_far / cpi->mode_test_hit_counts[mode_index]) <= cpi->mode_check_freq[mode_index] )
558 if (cpi
->mbs_tested_so_far
<= (cpi
->mode_check_freq
[mode_index
] * cpi
->mode_test_hit_counts
[mode_index
]))
560 // Increase the threshold for coding this mode to make it less likely to be chosen
561 cpi
->rd_thresh_mult
[mode_index
] += 4;
563 if (cpi
->rd_thresh_mult
[mode_index
] > MAX_THRESHMULT
)
564 cpi
->rd_thresh_mult
[mode_index
] = MAX_THRESHMULT
;
566 cpi
->rd_threshes
[mode_index
] = (cpi
->rd_baseline_thresh
[mode_index
] >> 7) * cpi
->rd_thresh_mult
[mode_index
];
572 // We have now reached the point where we are going to test the current mode so increment the counter for the number of times it has been tested
573 cpi
->mode_test_hit_counts
[mode_index
] ++;
578 this_mode
= vp8_mode_order
[mode_index
];
580 // Experimental debug code.
581 //all_rds[mode_index] = -1;
583 x
->e_mbd
.mode_info_context
->mbmi
.mode
= this_mode
;
584 x
->e_mbd
.mode_info_context
->mbmi
.uv_mode
= DC_PRED
;
586 // Work out the cost assosciated with selecting the reference frame
587 frame_cost
= ref_frame_cost
[x
->e_mbd
.mode_info_context
->mbmi
.ref_frame
];
590 // everything but intra
591 if (x
->e_mbd
.mode_info_context
->mbmi
.ref_frame
)
593 x
->e_mbd
.pre
.y_buffer
= y_buffer
[x
->e_mbd
.mode_info_context
->mbmi
.ref_frame
];
594 x
->e_mbd
.pre
.u_buffer
= u_buffer
[x
->e_mbd
.mode_info_context
->mbmi
.ref_frame
];
595 x
->e_mbd
.pre
.v_buffer
= v_buffer
[x
->e_mbd
.mode_info_context
->mbmi
.ref_frame
];
596 mode_mv
[NEARESTMV
] = nearest_mv
[x
->e_mbd
.mode_info_context
->mbmi
.ref_frame
];
597 mode_mv
[NEARMV
] = near_mv
[x
->e_mbd
.mode_info_context
->mbmi
.ref_frame
];
598 best_ref_mv
= frame_best_ref_mv
[x
->e_mbd
.mode_info_context
->mbmi
.ref_frame
];
599 memcpy(mdcounts
, MDCounts
[x
->e_mbd
.mode_info_context
->mbmi
.ref_frame
], sizeof(mdcounts
));
602 // Only consider ZEROMV/ALTREF_FRAME for alt ref frame,
603 // unless ARNR filtering is enabled in which case we want
604 // an unfiltered alternative
605 if (cpi
->is_src_frame_alt_ref
&& (cpi
->oxcf
.arnr_max_frames
== 0))
607 if (this_mode
!= ZEROMV
|| x
->e_mbd
.mode_info_context
->mbmi
.ref_frame
!= ALTREF_FRAME
)
611 if(cpi
->sf
.improved_mv_pred
&& x
->e_mbd
.mode_info_context
->mbmi
.mode
== NEWMV
)
615 vp8_cal_sad(cpi
,xd
,x
, recon_yoffset
,&near_sadidx
[0] );
619 vp8_mv_pred(cpi
, &x
->e_mbd
, x
->e_mbd
.mode_info_context
, &mvp
,
620 x
->e_mbd
.mode_info_context
->mbmi
.ref_frame
, cpi
->common
.ref_frame_sign_bias
, &sr
, &near_sadidx
[0]);
622 /* adjust mvp to make sure it is within MV range */
623 if(mvp
.row
> best_ref_mv
.row
+ MAX_FULL_PEL_VAL
)
624 mvp
.row
= best_ref_mv
.row
+ MAX_FULL_PEL_VAL
;
625 else if(mvp
.row
< best_ref_mv
.row
- MAX_FULL_PEL_VAL
)
626 mvp
.row
= best_ref_mv
.row
- MAX_FULL_PEL_VAL
;
627 if(mvp
.col
> best_ref_mv
.col
+ MAX_FULL_PEL_VAL
)
628 mvp
.col
= best_ref_mv
.col
+ MAX_FULL_PEL_VAL
;
629 else if(mvp
.col
< best_ref_mv
.col
- MAX_FULL_PEL_VAL
)
630 mvp
.col
= best_ref_mv
.col
- MAX_FULL_PEL_VAL
;
636 distortion2
= *returndistortion
; // Best so far passed in as breakout value to vp8_pick_intra4x4mby_modes
637 vp8_pick_intra4x4mby_modes(IF_RTCD(&cpi
->rtcd
), x
, &rate
, &distortion2
);
639 distortion2
= VARIANCE_INVOKE(&cpi
->rtcd
.variance
, get16x16prederror
)(x
->src
.y_buffer
, x
->src
.y_stride
, x
->e_mbd
.predictor
, 16, 0x7fffffff);
641 if (distortion2
== INT_MAX
)
647 this_rd
= RD_ESTIMATE(x
->rdmult
, x
->rddiv
, rate2
, distortion2
);
649 if (this_rd
< best_intra_rd
)
651 best_intra_rd
= this_rd
;
652 *returnintra
= best_intra_rd
;
660 // Split MV modes currently not supported when RD is nopt enabled.
667 vp8_build_intra_predictors_mby_ptr(&x
->e_mbd
);
668 distortion2
= VARIANCE_INVOKE(&cpi
->rtcd
.variance
, get16x16prederror
)(x
->src
.y_buffer
, x
->src
.y_stride
, x
->e_mbd
.predictor
, 16, 0x7fffffff);
669 rate2
+= x
->mbmode_cost
[x
->e_mbd
.frame_type
][x
->e_mbd
.mode_info_context
->mbmi
.mode
];
670 this_rd
= RD_ESTIMATE(x
->rdmult
, x
->rddiv
, rate2
, distortion2
);
672 if (this_rd
< best_intra_rd
)
674 best_intra_rd
= this_rd
;
675 *returnintra
= best_intra_rd
;
686 int sadpb
= x
->sadperbit16
;
693 int tmp_col_min
= x
->mv_col_min
;
694 int tmp_col_max
= x
->mv_col_max
;
695 int tmp_row_min
= x
->mv_row_min
;
696 int tmp_row_max
= x
->mv_row_max
;
698 int speed_adjust
= (cpi
->Speed
> 5) ? ((cpi
->Speed
>= 8)? 3 : 2) : 1;
700 // Further step/diamond searches as necessary
701 step_param
= cpi
->sf
.first_step
+ speed_adjust
;
703 if(cpi
->sf
.improved_mv_pred
)
706 //adjust search range according to sr from mv prediction
710 col_min
= (best_ref_mv
.col
- MAX_FULL_PEL_VAL
) >>3;
711 col_max
= (best_ref_mv
.col
+ MAX_FULL_PEL_VAL
) >>3;
712 row_min
= (best_ref_mv
.row
- MAX_FULL_PEL_VAL
) >>3;
713 row_max
= (best_ref_mv
.row
+ MAX_FULL_PEL_VAL
) >>3;
715 // Get intersection of UMV window and valid MV window to reduce # of checks in diamond search.
716 if (x
->mv_col_min
< col_min
)
717 x
->mv_col_min
= col_min
;
718 if (x
->mv_col_max
> col_max
)
719 x
->mv_col_max
= col_max
;
720 if (x
->mv_row_min
< row_min
)
721 x
->mv_row_min
= row_min
;
722 if (x
->mv_row_max
> row_max
)
723 x
->mv_row_max
= row_max
;
726 mvp
.row
= best_ref_mv
.row
;
727 mvp
.col
= best_ref_mv
.col
;
730 further_steps
= (cpi
->Speed
>= 8)? 0: (cpi
->sf
.max_step_search_steps
- 1 - step_param
);
732 if (cpi
->sf
.search_method
== HEX
)
734 bestsme
= vp8_hex_search(x
, b
, d
, &mvp
, &d
->bmi
.mv
.as_mv
, step_param
, sadpb
/*x->errorperbit*/, &num00
, &cpi
->fn_ptr
[BLOCK_16X16
], x
->mvsadcost
, x
->mvcost
, &best_ref_mv
);
735 mode_mv
[NEWMV
].row
= d
->bmi
.mv
.as_mv
.row
;
736 mode_mv
[NEWMV
].col
= d
->bmi
.mv
.as_mv
.col
;
740 bestsme
= cpi
->diamond_search_sad(x
, b
, d
, &mvp
, &d
->bmi
.mv
.as_mv
, step_param
, sadpb
/ 2/*x->errorperbit*/, &num00
, &cpi
->fn_ptr
[BLOCK_16X16
], x
->mvsadcost
, x
->mvcost
, &best_ref_mv
); //sadpb < 9
741 mode_mv
[NEWMV
].row
= d
->bmi
.mv
.as_mv
.row
;
742 mode_mv
[NEWMV
].col
= d
->bmi
.mv
.as_mv
.col
;
744 // Further step/diamond searches as necessary
746 //further_steps = (cpi->sf.max_step_search_steps - 1) - step_param;
751 while (n
< further_steps
)
759 thissme
= cpi
->diamond_search_sad(x
, b
, d
, &mvp
, &d
->bmi
.mv
.as_mv
, step_param
+ n
, sadpb
/ 4/*x->errorperbit*/, &num00
, &cpi
->fn_ptr
[BLOCK_16X16
], x
->mvsadcost
, x
->mvcost
, &best_ref_mv
); //sadpb = 9
761 if (thissme
< bestsme
)
764 mode_mv
[NEWMV
].row
= d
->bmi
.mv
.as_mv
.row
;
765 mode_mv
[NEWMV
].col
= d
->bmi
.mv
.as_mv
.col
;
769 d
->bmi
.mv
.as_mv
.row
= mode_mv
[NEWMV
].row
;
770 d
->bmi
.mv
.as_mv
.col
= mode_mv
[NEWMV
].col
;
776 if(cpi
->sf
.improved_mv_pred
)
778 x
->mv_col_min
= tmp_col_min
;
779 x
->mv_col_max
= tmp_col_max
;
780 x
->mv_row_min
= tmp_row_min
;
781 x
->mv_row_max
= tmp_row_max
;
784 if (bestsme
< INT_MAX
)
785 cpi
->find_fractional_mv_step(x
, b
, d
, &d
->bmi
.mv
.as_mv
, &best_ref_mv
, x
->errorperbit
, &cpi
->fn_ptr
[BLOCK_16X16
], cpi
->mb
.mvcost
);
787 mode_mv
[NEWMV
].row
= d
->bmi
.mv
.as_mv
.row
;
788 mode_mv
[NEWMV
].col
= d
->bmi
.mv
.as_mv
.col
;
791 rate2
+= vp8_mv_bit_cost(&mode_mv
[NEWMV
], &best_ref_mv
, cpi
->mb
.mvcost
, 128);
797 if (mode_mv
[this_mode
].row
== 0 && mode_mv
[this_mode
].col
== 0)
802 // Trap vectors that reach beyond the UMV borders
803 // Note that ALL New MV, Nearest MV Near MV and Zero MV code drops through to this point
804 // because of the lack of break statements in the previous two cases.
805 if (((mode_mv
[this_mode
].row
>> 3) < x
->mv_row_min
) || ((mode_mv
[this_mode
].row
>> 3) > x
->mv_row_max
) ||
806 ((mode_mv
[this_mode
].col
>> 3) < x
->mv_col_min
) || ((mode_mv
[this_mode
].col
>> 3) > x
->mv_col_max
))
809 rate2
+= vp8_cost_mv_ref(this_mode
, mdcounts
);
810 x
->e_mbd
.mode_info_context
->mbmi
.mode
= this_mode
;
811 x
->e_mbd
.mode_info_context
->mbmi
.mv
.as_mv
= mode_mv
[this_mode
];
812 x
->e_mbd
.block
[0].bmi
.mode
= this_mode
;
813 x
->e_mbd
.block
[0].bmi
.mv
.as_int
= x
->e_mbd
.mode_info_context
->mbmi
.mv
.as_int
;
815 distortion2
= get_inter_mbpred_error(x
, &cpi
->fn_ptr
[BLOCK_16X16
], (unsigned int *)(&sse
));
817 this_rd
= RD_ESTIMATE(x
->rdmult
, x
->rddiv
, rate2
, distortion2
);
819 if (cpi
->active_map_enabled
&& x
->active_ptr
[0] == 0)
823 else if (sse
< x
->encode_breakout
)
825 // Check u and v to make sure skip is ok
828 sse2
= VP8_UVSSE(x
, IF_RTCD(&cpi
->rtcd
.variance
));
830 if (sse2
* 2 < x
->encode_breakout
)
841 // Experimental debug code.
842 //all_rds[mode_index] = this_rd;
844 if (this_rd
< best_rd
|| x
->skip
)
846 // Note index of best mode
847 best_mode_index
= mode_index
;
850 *returndistortion
= distortion2
;
852 vpx_memcpy(&best_mbmode
, &x
->e_mbd
.mode_info_context
->mbmi
, sizeof(MB_MODE_INFO
));
853 vpx_memcpy(&best_partition
, x
->partition_info
, sizeof(PARTITION_INFO
));
855 if (this_mode
== B_PRED
|| this_mode
== SPLITMV
)
856 for (i
= 0; i
< 16; i
++)
858 vpx_memcpy(&best_bmodes
[i
], &x
->e_mbd
.block
[i
].bmi
, sizeof(B_MODE_INFO
));
862 best_bmodes
[0].mv
= x
->e_mbd
.block
[0].bmi
.mv
;
865 // Testing this mode gave rise to an improvement in best error score. Lower threshold a bit for next time
866 cpi
->rd_thresh_mult
[mode_index
] = (cpi
->rd_thresh_mult
[mode_index
] >= (MIN_THRESHMULT
+ 2)) ? cpi
->rd_thresh_mult
[mode_index
] - 2 : MIN_THRESHMULT
;
867 cpi
->rd_threshes
[mode_index
] = (cpi
->rd_baseline_thresh
[mode_index
] >> 7) * cpi
->rd_thresh_mult
[mode_index
];
870 // If the mode did not help improve the best error case then raise the threshold for testing that mode next time around.
873 cpi
->rd_thresh_mult
[mode_index
] += 4;
875 if (cpi
->rd_thresh_mult
[mode_index
] > MAX_THRESHMULT
)
876 cpi
->rd_thresh_mult
[mode_index
] = MAX_THRESHMULT
;
878 cpi
->rd_threshes
[mode_index
] = (cpi
->rd_baseline_thresh
[mode_index
] >> 7) * cpi
->rd_thresh_mult
[mode_index
];
885 // Reduce the activation RD thresholds for the best choice mode
886 if ((cpi
->rd_baseline_thresh
[best_mode_index
] > 0) && (cpi
->rd_baseline_thresh
[best_mode_index
] < (INT_MAX
>> 2)))
888 int best_adjustment
= (cpi
->rd_thresh_mult
[best_mode_index
] >> 3);
890 cpi
->rd_thresh_mult
[best_mode_index
] = (cpi
->rd_thresh_mult
[best_mode_index
] >= (MIN_THRESHMULT
+ best_adjustment
)) ? cpi
->rd_thresh_mult
[best_mode_index
] - best_adjustment
: MIN_THRESHMULT
;
891 cpi
->rd_threshes
[best_mode_index
] = (cpi
->rd_baseline_thresh
[best_mode_index
] >> 7) * cpi
->rd_thresh_mult
[best_mode_index
];
894 // Keep a record of best mode index for use in next loop
895 cpi
->last_best_mode_index
= best_mode_index
;
897 if (best_mbmode
.mode
<= B_PRED
)
899 x
->e_mbd
.mode_info_context
->mbmi
.ref_frame
= INTRA_FRAME
;
900 vp8_pick_intra_mbuv_mode(x
);
901 best_mbmode
.uv_mode
= x
->e_mbd
.mode_info_context
->mbmi
.uv_mode
;
906 int this_rdbin
= (*returndistortion
>> 7);
908 if (this_rdbin
>= 1024)
913 cpi
->error_bins
[this_rdbin
] ++;
917 if (cpi
->is_src_frame_alt_ref
&& (best_mbmode
.mode
!= ZEROMV
|| best_mbmode
.ref_frame
!= ALTREF_FRAME
))
919 best_mbmode
.mode
= ZEROMV
;
920 best_mbmode
.ref_frame
= ALTREF_FRAME
;
921 best_mbmode
.mv
.as_int
= 0;
922 best_mbmode
.uv_mode
= 0;
923 best_mbmode
.mb_skip_coeff
= (cpi
->common
.mb_no_coeff_skip
) ? 1 : 0;
924 best_mbmode
.partitioning
= 0;
925 best_mbmode
.dc_diff
= 0;
927 vpx_memcpy(&x
->e_mbd
.mode_info_context
->mbmi
, &best_mbmode
, sizeof(MB_MODE_INFO
));
928 vpx_memcpy(x
->partition_info
, &best_partition
, sizeof(PARTITION_INFO
));
930 for (i
= 0; i
< 16; i
++)
932 vpx_memset(&x
->e_mbd
.block
[i
].bmi
, 0, sizeof(B_MODE_INFO
));
935 x
->e_mbd
.mode_info_context
->mbmi
.mv
.as_int
= 0;
942 vpx_memcpy(&x
->e_mbd
.mode_info_context
->mbmi
, &best_mbmode
, sizeof(MB_MODE_INFO
));
943 vpx_memcpy(x
->partition_info
, &best_partition
, sizeof(PARTITION_INFO
));
945 if (x
->e_mbd
.mode_info_context
->mbmi
.mode
== B_PRED
|| x
->e_mbd
.mode_info_context
->mbmi
.mode
== SPLITMV
)
946 for (i
= 0; i
< 16; i
++)
948 vpx_memcpy(&x
->e_mbd
.block
[i
].bmi
, &best_bmodes
[i
], sizeof(B_MODE_INFO
));
953 vp8_set_mbmode_and_mvs(x
, x
->e_mbd
.mode_info_context
->mbmi
.mode
, &best_bmodes
[0].mv
.as_mv
);
956 x
->e_mbd
.mode_info_context
->mbmi
.mv
.as_mv
= x
->e_mbd
.block
[15].bmi
.mv
.as_mv
;