2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
13 #include "vpx_ports/config.h"
15 #include "modecosts.h"
16 #include "encodeintra.h"
17 #include "vp8/common/entropymode.h"
18 #include "pickinter.h"
19 #include "vp8/common/findnearmv.h"
21 #include "vp8/common/reconinter.h"
22 #include "vp8/common/reconintra.h"
23 #include "vp8/common/reconintra4x4.h"
24 #include "vp8/common/g_common.h"
28 #include "vpx_mem/vpx_mem.h"
30 #if CONFIG_RUNTIME_CPU_DETECT
31 #define IF_RTCD(x) (x)
33 #define IF_RTCD(x) NULL
36 extern int VP8_UVSSE(MACROBLOCK
*x
, const vp8_variance_rtcd_vtable_t
*rtcd
);
39 extern unsigned int cnt_pm
;
42 extern const MV_REFERENCE_FRAME vp8_ref_frame_order
[MAX_MODES
];
43 extern const MB_PREDICTION_MODE vp8_mode_order
[MAX_MODES
];
46 extern unsigned int (*vp8_get16x16pred_error
)(unsigned char *src_ptr
, int src_stride
, unsigned char *ref_ptr
, int ref_stride
);
47 extern unsigned int (*vp8_get4x4sse_cs
)(unsigned char *src_ptr
, int source_stride
, unsigned char *ref_ptr
, int recon_stride
);
48 extern int vp8_rd_pick_best_mbsegmentation(VP8_COMP
*cpi
, MACROBLOCK
*x
, MV
*best_ref_mv
, int best_rd
, int *, int *, int *, int, int *mvcost
[2], int, int fullpixel
);
49 extern int vp8_cost_mv_ref(MB_PREDICTION_MODE m
, const int near_mv_ref_ct
[4]);
50 extern void vp8_set_mbmode_and_mvs(MACROBLOCK
*x
, MB_PREDICTION_MODE mb
, MV
*mv
);
53 int vp8_skip_fractional_mv_step(MACROBLOCK
*mb
, BLOCK
*b
, BLOCKD
*d
, MV
*bestmv
, MV
*ref_mv
, int error_per_bit
, const vp8_variance_fn_ptr_t
*vfp
, int *mvcost
[2])
67 static int get_inter_mbpred_error(MACROBLOCK
*mb
, const vp8_variance_fn_ptr_t
*vfp
, unsigned int *sse
)
70 BLOCK
*b
= &mb
->block
[0];
71 BLOCKD
*d
= &mb
->e_mbd
.block
[0];
72 unsigned char *what
= (*(b
->base_src
) + b
->src
);
73 int what_stride
= b
->src_stride
;
74 unsigned char *in_what
= *(d
->base_pre
) + d
->pre
;
75 int in_what_stride
= d
->pre_stride
;
76 int xoffset
= d
->bmi
.mv
.as_mv
.col
& 7;
77 int yoffset
= d
->bmi
.mv
.as_mv
.row
& 7;
79 in_what
+= (d
->bmi
.mv
.as_mv
.row
>> 3) * d
->pre_stride
+ (d
->bmi
.mv
.as_mv
.col
>> 3);
81 if (xoffset
| yoffset
)
83 return vfp
->svf(in_what
, in_what_stride
, xoffset
, yoffset
, what
, what_stride
, sse
);
87 return vfp
->vf(what
, what_stride
, in_what
, in_what_stride
, sse
);
92 unsigned int vp8_get16x16pred_error_c
94 const unsigned char *src_ptr
,
96 const unsigned char *ref_ptr
,
101 unsigned pred_error
= 0;
105 for (i
= 0; i
< 16; i
++)
109 for (j
= 0; j
< 16; j
++)
111 diff
= src_ptr
[j
] - ref_ptr
[j
];
113 pred_error
+= diff
* diff
;
116 src_ptr
+= src_stride
;
117 ref_ptr
+= ref_stride
;
120 pred_error
-= sum
* sum
/ 256;
125 unsigned int vp8_get4x4sse_cs_c
127 const unsigned char *src_ptr
,
129 const unsigned char *ref_ptr
,
137 for (r
= 0; r
< 4; r
++)
139 for (c
= 0; c
< 4; c
++)
141 int diff
= src_ptr
[c
] - ref_ptr
[c
];
142 distortion
+= diff
* diff
;
145 src_ptr
+= source_stride
;
146 ref_ptr
+= recon_stride
;
152 static int get_prediction_error(BLOCK
*be
, BLOCKD
*b
, const vp8_variance_rtcd_vtable_t
*rtcd
)
156 sptr
= (*(be
->base_src
) + be
->src
);
159 return VARIANCE_INVOKE(rtcd
, get4x4sse_cs
)(sptr
, be
->src_stride
, dptr
, 16, 0x7fffffff);
163 static int pick_intra4x4block(
164 const VP8_ENCODER_RTCD
*rtcd
,
168 B_PREDICTION_MODE
*best_mode
,
169 B_PREDICTION_MODE above
,
170 B_PREDICTION_MODE left
,
175 B_PREDICTION_MODE mode
;
176 int best_rd
= INT_MAX
; // 1<<30
179 unsigned int *mode_costs
;
181 if (x
->e_mbd
.frame_type
== KEY_FRAME
)
183 mode_costs
= x
->bmode_costs
[above
][left
];
187 mode_costs
= x
->inter_bmode_costs
;
190 for (mode
= B_DC_PRED
; mode
<= B_HE_PRED
/*B_HU_PRED*/; mode
++)
194 rate
= mode_costs
[mode
];
195 vp8_predict_intra4x4(b
, mode
, b
->predictor
);
196 distortion
= get_prediction_error(be
, b
, &rtcd
->variance
);
197 this_rd
= RDCOST(x
->rdmult
, x
->rddiv
, rate
, distortion
);
199 if (this_rd
< best_rd
)
202 *bestdistortion
= distortion
;
208 b
->bmi
.mode
= (B_PREDICTION_MODE
)(*best_mode
);
209 vp8_encode_intra4x4block(rtcd
, x
, be
, b
, b
->bmi
.mode
);
215 int vp8_pick_intra4x4mby_modes(const VP8_ENCODER_RTCD
*rtcd
, MACROBLOCK
*mb
, int *Rate
, int *best_dist
)
217 MACROBLOCKD
*const xd
= &mb
->e_mbd
;
219 int cost
= mb
->mbmode_cost
[xd
->frame_type
] [B_PRED
];
223 vp8_intra_prediction_down_copy(xd
);
225 for (i
= 0; i
< 16; i
++)
227 MODE_INFO
*const mic
= xd
->mode_info_context
;
228 const int mis
= xd
->mode_info_stride
;
229 const B_PREDICTION_MODE A
= vp8_above_bmi(mic
, i
, mis
)->mode
;
230 const B_PREDICTION_MODE L
= vp8_left_bmi(mic
, i
)->mode
;
231 B_PREDICTION_MODE
UNINITIALIZED_IS_SAFE(best_mode
);
232 int UNINITIALIZED_IS_SAFE(r
), UNINITIALIZED_IS_SAFE(d
);
234 pick_intra4x4block(rtcd
, mb
, mb
->block
+ i
, xd
->block
+ i
,
235 &best_mode
, A
, L
, &r
, &d
);
240 mic
->bmi
[i
].mode
= xd
->block
[i
].bmi
.mode
= best_mode
;
242 // Break out case where we have already exceeded best so far value that was bassed in
243 if (distortion
> *best_dist
)
247 for (i
= 0; i
< 16; i
++)
248 xd
->block
[i
].bmi
.mv
.as_int
= 0;
254 *best_dist
= distortion
;
255 error
= RDCOST(mb
->rdmult
, mb
->rddiv
, cost
, distortion
);
259 *best_dist
= INT_MAX
;
266 void vp8_pick_intra_mbuv_mode(MACROBLOCK
*mb
)
269 MACROBLOCKD
*x
= &mb
->e_mbd
;
270 unsigned char *uabove_row
= x
->dst
.u_buffer
- x
->dst
.uv_stride
;
271 unsigned char *vabove_row
= x
->dst
.v_buffer
- x
->dst
.uv_stride
;
272 unsigned char *usrc_ptr
= (mb
->block
[16].src
+ *mb
->block
[16].base_src
);
273 unsigned char *vsrc_ptr
= (mb
->block
[20].src
+ *mb
->block
[20].base_src
);
274 int uvsrc_stride
= mb
->block
[16].src_stride
;
275 unsigned char uleft_col
[8];
276 unsigned char vleft_col
[8];
277 unsigned char utop_left
= uabove_row
[-1];
278 unsigned char vtop_left
= vabove_row
[-1];
286 int pred_error
[4] = {0, 0, 0, 0}, best_error
= INT_MAX
;
287 MB_PREDICTION_MODE
UNINITIALIZED_IS_SAFE(best_mode
);
290 for (i
= 0; i
< 8; i
++)
292 uleft_col
[i
] = x
->dst
.u_buffer
[i
* x
->dst
.uv_stride
-1];
293 vleft_col
[i
] = x
->dst
.v_buffer
[i
* x
->dst
.uv_stride
-1];
296 if (!x
->up_available
&& !x
->left_available
)
308 for (i
= 0; i
< 8; i
++)
310 Uaverage
+= uabove_row
[i
];
311 Vaverage
+= vabove_row
[i
];
318 if (x
->left_available
)
320 for (i
= 0; i
< 8; i
++)
322 Uaverage
+= uleft_col
[i
];
323 Vaverage
+= vleft_col
[i
];
330 expected_udc
= (Uaverage
+ (1 << (shift
- 1))) >> shift
;
331 expected_vdc
= (Vaverage
+ (1 << (shift
- 1))) >> shift
;
335 for (i
= 0; i
< 8; i
++)
337 for (j
= 0; j
< 8; j
++)
340 int predu
= uleft_col
[i
] + uabove_row
[j
] - utop_left
;
341 int predv
= vleft_col
[i
] + vabove_row
[j
] - vtop_left
;
360 diff
= u_p
- expected_udc
;
361 pred_error
[DC_PRED
] += diff
* diff
;
362 diff
= v_p
- expected_vdc
;
363 pred_error
[DC_PRED
] += diff
* diff
;
366 diff
= u_p
- uabove_row
[j
];
367 pred_error
[V_PRED
] += diff
* diff
;
368 diff
= v_p
- vabove_row
[j
];
369 pred_error
[V_PRED
] += diff
* diff
;
372 diff
= u_p
- uleft_col
[i
];
373 pred_error
[H_PRED
] += diff
* diff
;
374 diff
= v_p
- vleft_col
[i
];
375 pred_error
[H_PRED
] += diff
* diff
;
379 pred_error
[TM_PRED
] += diff
* diff
;
381 pred_error
[TM_PRED
] += diff
* diff
;
386 usrc_ptr
+= uvsrc_stride
;
387 vsrc_ptr
+= uvsrc_stride
;
391 usrc_ptr
= (mb
->block
[18].src
+ *mb
->block
[18].base_src
);
392 vsrc_ptr
= (mb
->block
[22].src
+ *mb
->block
[22].base_src
);
400 for (i
= DC_PRED
; i
<= TM_PRED
; i
++)
402 if (best_error
> pred_error
[i
])
404 best_error
= pred_error
[i
];
405 best_mode
= (MB_PREDICTION_MODE
)i
;
410 mb
->e_mbd
.mode_info_context
->mbmi
.uv_mode
= best_mode
;
414 void vp8_pick_inter_mode(VP8_COMP
*cpi
, MACROBLOCK
*x
, int recon_yoffset
, int recon_uvoffset
, int *returnrate
, int *returndistortion
, int *returnintra
)
416 BLOCK
*b
= &x
->block
[0];
417 BLOCKD
*d
= &x
->e_mbd
.block
[0];
418 MACROBLOCKD
*xd
= &x
->e_mbd
;
419 B_MODE_INFO best_bmodes
[16];
420 MB_MODE_INFO best_mbmode
;
421 PARTITION_INFO best_partition
;
423 MV mode_mv
[MB_MODE_COUNT
];
424 MB_PREDICTION_MODE this_mode
;
428 int best_rd
= INT_MAX
; // 1 << 30;
429 int best_intra_rd
= INT_MAX
;
431 int ref_frame_cost
[MAX_REF_FRAMES
];
436 //int all_rds[MAX_MODES]; // Experimental debug code.
437 int best_mode_index
= 0;
441 int near_sadidx
[8] = {0, 1, 2, 3, 4, 5, 6, 7};
443 int sr
=0; //search range got from mv_pred(). It uses step_param levels. (0-7)
447 MV frame_best_ref_mv
[4];
449 unsigned char *y_buffer
[4];
450 unsigned char *u_buffer
[4];
451 unsigned char *v_buffer
[4];
453 int skip_mode
[4] = {0, 0, 0, 0};
455 vpx_memset(mode_mv
, 0, sizeof(mode_mv
));
456 vpx_memset(nearest_mv
, 0, sizeof(nearest_mv
));
457 vpx_memset(near_mv
, 0, sizeof(near_mv
));
458 vpx_memset(&best_mbmode
, 0, sizeof(best_mbmode
));
461 // set up all the refframe dependent pointers.
462 if (cpi
->ref_frame_flags
& VP8_LAST_FLAG
)
464 YV12_BUFFER_CONFIG
*lst_yv12
= &cpi
->common
.yv12_fb
[cpi
->common
.lst_fb_idx
];
466 vp8_find_near_mvs(&x
->e_mbd
, x
->e_mbd
.mode_info_context
, &nearest_mv
[LAST_FRAME
], &near_mv
[LAST_FRAME
],
467 &frame_best_ref_mv
[LAST_FRAME
], MDCounts
[LAST_FRAME
], LAST_FRAME
, cpi
->common
.ref_frame_sign_bias
);
469 y_buffer
[LAST_FRAME
] = lst_yv12
->y_buffer
+ recon_yoffset
;
470 u_buffer
[LAST_FRAME
] = lst_yv12
->u_buffer
+ recon_uvoffset
;
471 v_buffer
[LAST_FRAME
] = lst_yv12
->v_buffer
+ recon_uvoffset
;
474 skip_mode
[LAST_FRAME
] = 1;
476 if (cpi
->ref_frame_flags
& VP8_GOLD_FLAG
)
478 YV12_BUFFER_CONFIG
*gld_yv12
= &cpi
->common
.yv12_fb
[cpi
->common
.gld_fb_idx
];
480 vp8_find_near_mvs(&x
->e_mbd
, x
->e_mbd
.mode_info_context
, &nearest_mv
[GOLDEN_FRAME
], &near_mv
[GOLDEN_FRAME
],
481 &frame_best_ref_mv
[GOLDEN_FRAME
], MDCounts
[GOLDEN_FRAME
], GOLDEN_FRAME
, cpi
->common
.ref_frame_sign_bias
);
483 y_buffer
[GOLDEN_FRAME
] = gld_yv12
->y_buffer
+ recon_yoffset
;
484 u_buffer
[GOLDEN_FRAME
] = gld_yv12
->u_buffer
+ recon_uvoffset
;
485 v_buffer
[GOLDEN_FRAME
] = gld_yv12
->v_buffer
+ recon_uvoffset
;
488 skip_mode
[GOLDEN_FRAME
] = 1;
490 if (cpi
->ref_frame_flags
& VP8_ALT_FLAG
&& cpi
->source_alt_ref_active
)
492 YV12_BUFFER_CONFIG
*alt_yv12
= &cpi
->common
.yv12_fb
[cpi
->common
.alt_fb_idx
];
494 vp8_find_near_mvs(&x
->e_mbd
, x
->e_mbd
.mode_info_context
, &nearest_mv
[ALTREF_FRAME
], &near_mv
[ALTREF_FRAME
],
495 &frame_best_ref_mv
[ALTREF_FRAME
], MDCounts
[ALTREF_FRAME
], ALTREF_FRAME
, cpi
->common
.ref_frame_sign_bias
);
497 y_buffer
[ALTREF_FRAME
] = alt_yv12
->y_buffer
+ recon_yoffset
;
498 u_buffer
[ALTREF_FRAME
] = alt_yv12
->u_buffer
+ recon_uvoffset
;
499 v_buffer
[ALTREF_FRAME
] = alt_yv12
->v_buffer
+ recon_uvoffset
;
502 skip_mode
[ALTREF_FRAME
] = 1;
504 cpi
->mbs_tested_so_far
++; // Count of the number of MBs tested so far this frame
506 *returnintra
= INT_MAX
;
509 ref_frame_cost
[INTRA_FRAME
] = vp8_cost_zero(cpi
->prob_intra_coded
);
511 // Special case treatment when GF and ARF are not sensible options for reference
512 if (cpi
->ref_frame_flags
== VP8_LAST_FLAG
)
514 ref_frame_cost
[LAST_FRAME
] = vp8_cost_one(cpi
->prob_intra_coded
)
515 + vp8_cost_zero(255);
516 ref_frame_cost
[GOLDEN_FRAME
] = vp8_cost_one(cpi
->prob_intra_coded
)
518 + vp8_cost_zero(128);
519 ref_frame_cost
[ALTREF_FRAME
] = vp8_cost_one(cpi
->prob_intra_coded
)
525 ref_frame_cost
[LAST_FRAME
] = vp8_cost_one(cpi
->prob_intra_coded
)
526 + vp8_cost_zero(cpi
->prob_last_coded
);
527 ref_frame_cost
[GOLDEN_FRAME
] = vp8_cost_one(cpi
->prob_intra_coded
)
528 + vp8_cost_one(cpi
->prob_last_coded
)
529 + vp8_cost_zero(cpi
->prob_gf_coded
);
530 ref_frame_cost
[ALTREF_FRAME
] = vp8_cost_one(cpi
->prob_intra_coded
)
531 + vp8_cost_one(cpi
->prob_last_coded
)
532 + vp8_cost_one(cpi
->prob_gf_coded
);
535 x
->e_mbd
.mode_info_context
->mbmi
.ref_frame
= INTRA_FRAME
;
537 // if we encode a new mv this is important
538 // find the best new motion vector
539 for (mode_index
= 0; mode_index
< MAX_MODES
; mode_index
++)
542 int this_rd
= INT_MAX
;
544 if (best_rd
<= cpi
->rd_threshes
[mode_index
])
547 x
->e_mbd
.mode_info_context
->mbmi
.ref_frame
= vp8_ref_frame_order
[mode_index
];
549 if (skip_mode
[x
->e_mbd
.mode_info_context
->mbmi
.ref_frame
])
552 // Check to see if the testing frequency for this mode is at its max
553 // If so then prevent it from being tested and increase the threshold for its testing
554 if (cpi
->mode_test_hit_counts
[mode_index
] && (cpi
->mode_check_freq
[mode_index
] > 1))
556 //if ( (cpi->mbs_tested_so_far / cpi->mode_test_hit_counts[mode_index]) <= cpi->mode_check_freq[mode_index] )
557 if (cpi
->mbs_tested_so_far
<= (cpi
->mode_check_freq
[mode_index
] * cpi
->mode_test_hit_counts
[mode_index
]))
559 // Increase the threshold for coding this mode to make it less likely to be chosen
560 cpi
->rd_thresh_mult
[mode_index
] += 4;
562 if (cpi
->rd_thresh_mult
[mode_index
] > MAX_THRESHMULT
)
563 cpi
->rd_thresh_mult
[mode_index
] = MAX_THRESHMULT
;
565 cpi
->rd_threshes
[mode_index
] = (cpi
->rd_baseline_thresh
[mode_index
] >> 7) * cpi
->rd_thresh_mult
[mode_index
];
571 // We have now reached the point where we are going to test the current mode so increment the counter for the number of times it has been tested
572 cpi
->mode_test_hit_counts
[mode_index
] ++;
577 this_mode
= vp8_mode_order
[mode_index
];
579 // Experimental debug code.
580 //all_rds[mode_index] = -1;
582 x
->e_mbd
.mode_info_context
->mbmi
.mode
= this_mode
;
583 x
->e_mbd
.mode_info_context
->mbmi
.uv_mode
= DC_PRED
;
585 // Work out the cost assosciated with selecting the reference frame
586 frame_cost
= ref_frame_cost
[x
->e_mbd
.mode_info_context
->mbmi
.ref_frame
];
589 // everything but intra
590 if (x
->e_mbd
.mode_info_context
->mbmi
.ref_frame
)
592 x
->e_mbd
.pre
.y_buffer
= y_buffer
[x
->e_mbd
.mode_info_context
->mbmi
.ref_frame
];
593 x
->e_mbd
.pre
.u_buffer
= u_buffer
[x
->e_mbd
.mode_info_context
->mbmi
.ref_frame
];
594 x
->e_mbd
.pre
.v_buffer
= v_buffer
[x
->e_mbd
.mode_info_context
->mbmi
.ref_frame
];
595 mode_mv
[NEARESTMV
] = nearest_mv
[x
->e_mbd
.mode_info_context
->mbmi
.ref_frame
];
596 mode_mv
[NEARMV
] = near_mv
[x
->e_mbd
.mode_info_context
->mbmi
.ref_frame
];
597 best_ref_mv
= frame_best_ref_mv
[x
->e_mbd
.mode_info_context
->mbmi
.ref_frame
];
598 memcpy(mdcounts
, MDCounts
[x
->e_mbd
.mode_info_context
->mbmi
.ref_frame
], sizeof(mdcounts
));
601 // Only consider ZEROMV/ALTREF_FRAME for alt ref frame,
602 // unless ARNR filtering is enabled in which case we want
603 // an unfiltered alternative
604 if (cpi
->is_src_frame_alt_ref
&& (cpi
->oxcf
.arnr_max_frames
== 0))
606 if (this_mode
!= ZEROMV
|| x
->e_mbd
.mode_info_context
->mbmi
.ref_frame
!= ALTREF_FRAME
)
610 if(cpi
->sf
.improved_mv_pred
&& x
->e_mbd
.mode_info_context
->mbmi
.mode
== NEWMV
)
614 vp8_cal_sad(cpi
,xd
,x
, recon_yoffset
,&near_sadidx
[0] );
618 vp8_mv_pred(cpi
, &x
->e_mbd
, x
->e_mbd
.mode_info_context
, &mvp
,
619 x
->e_mbd
.mode_info_context
->mbmi
.ref_frame
, cpi
->common
.ref_frame_sign_bias
, &sr
, &near_sadidx
[0]);
621 /* adjust mvp to make sure it is within MV range */
622 if(mvp
.row
> best_ref_mv
.row
+ MAX_FULL_PEL_VAL
)
623 mvp
.row
= best_ref_mv
.row
+ MAX_FULL_PEL_VAL
;
624 else if(mvp
.row
< best_ref_mv
.row
- MAX_FULL_PEL_VAL
)
625 mvp
.row
= best_ref_mv
.row
- MAX_FULL_PEL_VAL
;
626 if(mvp
.col
> best_ref_mv
.col
+ MAX_FULL_PEL_VAL
)
627 mvp
.col
= best_ref_mv
.col
+ MAX_FULL_PEL_VAL
;
628 else if(mvp
.col
< best_ref_mv
.col
- MAX_FULL_PEL_VAL
)
629 mvp
.col
= best_ref_mv
.col
- MAX_FULL_PEL_VAL
;
635 distortion2
= *returndistortion
; // Best so far passed in as breakout value to vp8_pick_intra4x4mby_modes
636 vp8_pick_intra4x4mby_modes(IF_RTCD(&cpi
->rtcd
), x
, &rate
, &distortion2
);
638 distortion2
= VARIANCE_INVOKE(&cpi
->rtcd
.variance
, get16x16prederror
)(x
->src
.y_buffer
, x
->src
.y_stride
, x
->e_mbd
.predictor
, 16, 0x7fffffff);
640 if (distortion2
== INT_MAX
)
646 this_rd
= RDCOST(x
->rdmult
, x
->rddiv
, rate2
, distortion2
);
648 if (this_rd
< best_intra_rd
)
650 best_intra_rd
= this_rd
;
651 *returnintra
= distortion2
;
659 // Split MV modes currently not supported when RD is nopt enabled.
666 RECON_INVOKE(&cpi
->common
.rtcd
.recon
, build_intra_predictors_mby
)
668 distortion2
= VARIANCE_INVOKE(&cpi
->rtcd
.variance
, get16x16prederror
)(x
->src
.y_buffer
, x
->src
.y_stride
, x
->e_mbd
.predictor
, 16, 0x7fffffff);
669 rate2
+= x
->mbmode_cost
[x
->e_mbd
.frame_type
][x
->e_mbd
.mode_info_context
->mbmi
.mode
];
670 this_rd
= RDCOST(x
->rdmult
, x
->rddiv
, rate2
, distortion2
);
672 if (this_rd
< best_intra_rd
)
674 best_intra_rd
= this_rd
;
675 *returnintra
= distortion2
;
685 int sadpb
= x
->sadperbit16
;
692 int tmp_col_min
= x
->mv_col_min
;
693 int tmp_col_max
= x
->mv_col_max
;
694 int tmp_row_min
= x
->mv_row_min
;
695 int tmp_row_max
= x
->mv_row_max
;
697 int speed_adjust
= (cpi
->Speed
> 5) ? ((cpi
->Speed
>= 8)? 3 : 2) : 1;
699 // Further step/diamond searches as necessary
700 step_param
= cpi
->sf
.first_step
+ speed_adjust
;
702 if(cpi
->sf
.improved_mv_pred
)
705 //adjust search range according to sr from mv prediction
709 col_min
= (best_ref_mv
.col
- MAX_FULL_PEL_VAL
) >>3;
710 col_max
= (best_ref_mv
.col
+ MAX_FULL_PEL_VAL
) >>3;
711 row_min
= (best_ref_mv
.row
- MAX_FULL_PEL_VAL
) >>3;
712 row_max
= (best_ref_mv
.row
+ MAX_FULL_PEL_VAL
) >>3;
714 // Get intersection of UMV window and valid MV window to reduce # of checks in diamond search.
715 if (x
->mv_col_min
< col_min
)
716 x
->mv_col_min
= col_min
;
717 if (x
->mv_col_max
> col_max
)
718 x
->mv_col_max
= col_max
;
719 if (x
->mv_row_min
< row_min
)
720 x
->mv_row_min
= row_min
;
721 if (x
->mv_row_max
> row_max
)
722 x
->mv_row_max
= row_max
;
725 mvp
.row
= best_ref_mv
.row
;
726 mvp
.col
= best_ref_mv
.col
;
729 further_steps
= (cpi
->Speed
>= 8)? 0: (cpi
->sf
.max_step_search_steps
- 1 - step_param
);
731 if (cpi
->sf
.search_method
== HEX
)
733 bestsme
= vp8_hex_search(x
, b
, d
, &mvp
, &d
->bmi
.mv
.as_mv
, step_param
, sadpb
/*x->errorperbit*/, &num00
, &cpi
->fn_ptr
[BLOCK_16X16
], x
->mvsadcost
, x
->mvcost
, &best_ref_mv
);
734 mode_mv
[NEWMV
].row
= d
->bmi
.mv
.as_mv
.row
;
735 mode_mv
[NEWMV
].col
= d
->bmi
.mv
.as_mv
.col
;
739 bestsme
= cpi
->diamond_search_sad(x
, b
, d
, &mvp
, &d
->bmi
.mv
.as_mv
, step_param
, sadpb
/ 2/*x->errorperbit*/, &num00
, &cpi
->fn_ptr
[BLOCK_16X16
], x
->mvcost
, &best_ref_mv
); //sadpb < 9
740 mode_mv
[NEWMV
].row
= d
->bmi
.mv
.as_mv
.row
;
741 mode_mv
[NEWMV
].col
= d
->bmi
.mv
.as_mv
.col
;
743 // Further step/diamond searches as necessary
745 //further_steps = (cpi->sf.max_step_search_steps - 1) - step_param;
750 while (n
< further_steps
)
758 thissme
= cpi
->diamond_search_sad(x
, b
, d
, &mvp
, &d
->bmi
.mv
.as_mv
, step_param
+ n
, sadpb
/ 4/*x->errorperbit*/, &num00
, &cpi
->fn_ptr
[BLOCK_16X16
], x
->mvcost
, &best_ref_mv
); //sadpb = 9
760 if (thissme
< bestsme
)
763 mode_mv
[NEWMV
].row
= d
->bmi
.mv
.as_mv
.row
;
764 mode_mv
[NEWMV
].col
= d
->bmi
.mv
.as_mv
.col
;
768 d
->bmi
.mv
.as_mv
.row
= mode_mv
[NEWMV
].row
;
769 d
->bmi
.mv
.as_mv
.col
= mode_mv
[NEWMV
].col
;
775 if(cpi
->sf
.improved_mv_pred
)
777 x
->mv_col_min
= tmp_col_min
;
778 x
->mv_col_max
= tmp_col_max
;
779 x
->mv_row_min
= tmp_row_min
;
780 x
->mv_row_max
= tmp_row_max
;
783 if (bestsme
< INT_MAX
)
784 cpi
->find_fractional_mv_step(x
, b
, d
, &d
->bmi
.mv
.as_mv
, &best_ref_mv
, x
->errorperbit
, &cpi
->fn_ptr
[BLOCK_16X16
], cpi
->mb
.mvcost
);
786 mode_mv
[NEWMV
].row
= d
->bmi
.mv
.as_mv
.row
;
787 mode_mv
[NEWMV
].col
= d
->bmi
.mv
.as_mv
.col
;
790 rate2
+= vp8_mv_bit_cost(&mode_mv
[NEWMV
], &best_ref_mv
, cpi
->mb
.mvcost
, 128);
796 if (mode_mv
[this_mode
].row
== 0 && mode_mv
[this_mode
].col
== 0)
801 // Trap vectors that reach beyond the UMV borders
802 // Note that ALL New MV, Nearest MV Near MV and Zero MV code drops through to this point
803 // because of the lack of break statements in the previous two cases.
804 if (((mode_mv
[this_mode
].row
>> 3) < x
->mv_row_min
) || ((mode_mv
[this_mode
].row
>> 3) > x
->mv_row_max
) ||
805 ((mode_mv
[this_mode
].col
>> 3) < x
->mv_col_min
) || ((mode_mv
[this_mode
].col
>> 3) > x
->mv_col_max
))
808 rate2
+= vp8_cost_mv_ref(this_mode
, mdcounts
);
809 x
->e_mbd
.mode_info_context
->mbmi
.mode
= this_mode
;
810 x
->e_mbd
.mode_info_context
->mbmi
.mv
.as_mv
= mode_mv
[this_mode
];
811 x
->e_mbd
.block
[0].bmi
.mode
= this_mode
;
812 x
->e_mbd
.block
[0].bmi
.mv
.as_int
= x
->e_mbd
.mode_info_context
->mbmi
.mv
.as_int
;
814 distortion2
= get_inter_mbpred_error(x
, &cpi
->fn_ptr
[BLOCK_16X16
], (unsigned int *)(&sse
));
816 this_rd
= RDCOST(x
->rdmult
, x
->rddiv
, rate2
, distortion2
);
818 if (cpi
->active_map_enabled
&& x
->active_ptr
[0] == 0)
822 else if (sse
< x
->encode_breakout
)
824 // Check u and v to make sure skip is ok
827 sse2
= VP8_UVSSE(x
, IF_RTCD(&cpi
->rtcd
.variance
));
829 if (sse2
* 2 < x
->encode_breakout
)
840 // Experimental debug code.
841 //all_rds[mode_index] = this_rd;
843 if (this_rd
< best_rd
|| x
->skip
)
845 // Note index of best mode
846 best_mode_index
= mode_index
;
849 *returndistortion
= distortion2
;
851 vpx_memcpy(&best_mbmode
, &x
->e_mbd
.mode_info_context
->mbmi
, sizeof(MB_MODE_INFO
));
852 vpx_memcpy(&best_partition
, x
->partition_info
, sizeof(PARTITION_INFO
));
854 if (this_mode
== B_PRED
|| this_mode
== SPLITMV
)
855 for (i
= 0; i
< 16; i
++)
857 vpx_memcpy(&best_bmodes
[i
], &x
->e_mbd
.block
[i
].bmi
, sizeof(B_MODE_INFO
));
861 best_bmodes
[0].mv
= x
->e_mbd
.block
[0].bmi
.mv
;
864 // Testing this mode gave rise to an improvement in best error score. Lower threshold a bit for next time
865 cpi
->rd_thresh_mult
[mode_index
] = (cpi
->rd_thresh_mult
[mode_index
] >= (MIN_THRESHMULT
+ 2)) ? cpi
->rd_thresh_mult
[mode_index
] - 2 : MIN_THRESHMULT
;
866 cpi
->rd_threshes
[mode_index
] = (cpi
->rd_baseline_thresh
[mode_index
] >> 7) * cpi
->rd_thresh_mult
[mode_index
];
869 // If the mode did not help improve the best error case then raise the threshold for testing that mode next time around.
872 cpi
->rd_thresh_mult
[mode_index
] += 4;
874 if (cpi
->rd_thresh_mult
[mode_index
] > MAX_THRESHMULT
)
875 cpi
->rd_thresh_mult
[mode_index
] = MAX_THRESHMULT
;
877 cpi
->rd_threshes
[mode_index
] = (cpi
->rd_baseline_thresh
[mode_index
] >> 7) * cpi
->rd_thresh_mult
[mode_index
];
884 // Reduce the activation RD thresholds for the best choice mode
885 if ((cpi
->rd_baseline_thresh
[best_mode_index
] > 0) && (cpi
->rd_baseline_thresh
[best_mode_index
] < (INT_MAX
>> 2)))
887 int best_adjustment
= (cpi
->rd_thresh_mult
[best_mode_index
] >> 3);
889 cpi
->rd_thresh_mult
[best_mode_index
] = (cpi
->rd_thresh_mult
[best_mode_index
] >= (MIN_THRESHMULT
+ best_adjustment
)) ? cpi
->rd_thresh_mult
[best_mode_index
] - best_adjustment
: MIN_THRESHMULT
;
890 cpi
->rd_threshes
[best_mode_index
] = (cpi
->rd_baseline_thresh
[best_mode_index
] >> 7) * cpi
->rd_thresh_mult
[best_mode_index
];
893 // Keep a record of best mode index for use in next loop
894 cpi
->last_best_mode_index
= best_mode_index
;
896 if (best_mbmode
.mode
<= B_PRED
)
898 x
->e_mbd
.mode_info_context
->mbmi
.ref_frame
= INTRA_FRAME
;
899 vp8_pick_intra_mbuv_mode(x
);
900 best_mbmode
.uv_mode
= x
->e_mbd
.mode_info_context
->mbmi
.uv_mode
;
905 int this_rdbin
= (*returndistortion
>> 7);
907 if (this_rdbin
>= 1024)
912 cpi
->error_bins
[this_rdbin
] ++;
916 if (cpi
->is_src_frame_alt_ref
&& (best_mbmode
.mode
!= ZEROMV
|| best_mbmode
.ref_frame
!= ALTREF_FRAME
))
918 best_mbmode
.mode
= ZEROMV
;
919 best_mbmode
.ref_frame
= ALTREF_FRAME
;
920 best_mbmode
.mv
.as_int
= 0;
921 best_mbmode
.uv_mode
= 0;
922 best_mbmode
.mb_skip_coeff
= (cpi
->common
.mb_no_coeff_skip
) ? 1 : 0;
923 best_mbmode
.partitioning
= 0;
924 best_mbmode
.dc_diff
= 0;
926 vpx_memcpy(&x
->e_mbd
.mode_info_context
->mbmi
, &best_mbmode
, sizeof(MB_MODE_INFO
));
927 vpx_memcpy(x
->partition_info
, &best_partition
, sizeof(PARTITION_INFO
));
929 for (i
= 0; i
< 16; i
++)
931 vpx_memset(&x
->e_mbd
.block
[i
].bmi
, 0, sizeof(B_MODE_INFO
));
934 x
->e_mbd
.mode_info_context
->mbmi
.mv
.as_int
= 0;
940 vpx_memcpy(&x
->e_mbd
.mode_info_context
->mbmi
, &best_mbmode
, sizeof(MB_MODE_INFO
));
941 vpx_memcpy(x
->partition_info
, &best_partition
, sizeof(PARTITION_INFO
));
943 if (x
->e_mbd
.mode_info_context
->mbmi
.mode
== B_PRED
|| x
->e_mbd
.mode_info_context
->mbmi
.mode
== SPLITMV
)
944 for (i
= 0; i
< 16; i
++)
946 vpx_memcpy(&x
->e_mbd
.block
[i
].bmi
, &best_bmodes
[i
], sizeof(B_MODE_INFO
));
951 vp8_set_mbmode_and_mvs(x
, x
->e_mbd
.mode_info_context
->mbmi
.mode
, &best_bmodes
[0].mv
.as_mv
);
954 x
->e_mbd
.mode_info_context
->mbmi
.mv
.as_mv
= x
->e_mbd
.block
[15].bmi
.mv
.as_mv
;