2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
13 #include "vpx_ports/config.h"
15 #include "modecosts.h"
16 #include "encodeintra.h"
17 #include "vp8/common/entropymode.h"
18 #include "pickinter.h"
19 #include "vp8/common/findnearmv.h"
21 #include "vp8/common/reconinter.h"
22 #include "vp8/common/reconintra.h"
23 #include "vp8/common/reconintra4x4.h"
24 #include "vp8/common/g_common.h"
28 #include "vpx_mem/vpx_mem.h"
30 #if CONFIG_RUNTIME_CPU_DETECT
31 #define IF_RTCD(x) (x)
33 #define IF_RTCD(x) NULL
36 extern int VP8_UVSSE(MACROBLOCK
*x
, const vp8_variance_rtcd_vtable_t
*rtcd
);
39 extern unsigned int cnt_pm
;
42 extern const MV_REFERENCE_FRAME vp8_ref_frame_order
[MAX_MODES
];
43 extern const MB_PREDICTION_MODE vp8_mode_order
[MAX_MODES
];
45 extern unsigned int (*vp8_get4x4sse_cs
)(unsigned char *src_ptr
, int source_stride
, unsigned char *ref_ptr
, int recon_stride
);
46 extern int vp8_cost_mv_ref(MB_PREDICTION_MODE m
, const int near_mv_ref_ct
[4]);
49 int vp8_skip_fractional_mv_step(MACROBLOCK
*mb
, BLOCK
*b
, BLOCKD
*d
,
50 int_mv
*bestmv
, int_mv
*ref_mv
,
52 const vp8_variance_fn_ptr_t
*vfp
,
53 int *mvcost
[2], int *distortion
,
64 bestmv
->as_mv
.row
<<= 3;
65 bestmv
->as_mv
.col
<<= 3;
70 static int get_inter_mbpred_error(MACROBLOCK
*mb
,
71 const vp8_variance_fn_ptr_t
*vfp
,
76 BLOCK
*b
= &mb
->block
[0];
77 BLOCKD
*d
= &mb
->e_mbd
.block
[0];
78 unsigned char *what
= (*(b
->base_src
) + b
->src
);
79 int what_stride
= b
->src_stride
;
80 unsigned char *in_what
= *(d
->base_pre
) + d
->pre
;
81 int in_what_stride
= d
->pre_stride
;
82 int xoffset
= this_mv
.as_mv
.col
& 7;
83 int yoffset
= this_mv
.as_mv
.row
& 7;
85 in_what
+= (this_mv
.as_mv
.row
>> 3) * d
->pre_stride
+ (this_mv
.as_mv
.col
>> 3);
87 if (xoffset
| yoffset
)
89 return vfp
->svf(in_what
, in_what_stride
, xoffset
, yoffset
, what
, what_stride
, sse
);
93 return vfp
->vf(what
, what_stride
, in_what
, in_what_stride
, sse
);
99 unsigned int vp8_get4x4sse_cs_c
101 const unsigned char *src_ptr
,
103 const unsigned char *ref_ptr
,
110 for (r
= 0; r
< 4; r
++)
112 for (c
= 0; c
< 4; c
++)
114 int diff
= src_ptr
[c
] - ref_ptr
[c
];
115 distortion
+= diff
* diff
;
118 src_ptr
+= source_stride
;
119 ref_ptr
+= recon_stride
;
125 static int get_prediction_error(BLOCK
*be
, BLOCKD
*b
, const vp8_variance_rtcd_vtable_t
*rtcd
)
129 sptr
= (*(be
->base_src
) + be
->src
);
132 return VARIANCE_INVOKE(rtcd
, get4x4sse_cs
)(sptr
, be
->src_stride
, dptr
, 16);
136 static int pick_intra4x4block(
137 const VP8_ENCODER_RTCD
*rtcd
,
140 B_PREDICTION_MODE
*best_mode
,
141 unsigned int *mode_costs
,
147 BLOCKD
*b
= &x
->e_mbd
.block
[ib
];
148 BLOCK
*be
= &x
->block
[ib
];
149 B_PREDICTION_MODE mode
;
150 int best_rd
= INT_MAX
; // 1<<30
154 for (mode
= B_DC_PRED
; mode
<= B_HE_PRED
/*B_HU_PRED*/; mode
++)
158 rate
= mode_costs
[mode
];
159 RECON_INVOKE(&rtcd
->common
->recon
, intra4x4_predict
)
160 (b
, mode
, b
->predictor
);
161 distortion
= get_prediction_error(be
, b
, &rtcd
->variance
);
162 this_rd
= RDCOST(x
->rdmult
, x
->rddiv
, rate
, distortion
);
164 if (this_rd
< best_rd
)
167 *bestdistortion
= distortion
;
173 b
->bmi
.as_mode
= (B_PREDICTION_MODE
)(*best_mode
);
174 vp8_encode_intra4x4block(rtcd
, x
, ib
);
179 static int pick_intra4x4mby_modes
181 const VP8_ENCODER_RTCD
*rtcd
,
187 MACROBLOCKD
*const xd
= &mb
->e_mbd
;
189 int cost
= mb
->mbmode_cost
[xd
->frame_type
] [B_PRED
];
192 unsigned int *bmode_costs
;
194 vp8_intra_prediction_down_copy(xd
);
196 bmode_costs
= mb
->inter_bmode_costs
;
198 for (i
= 0; i
< 16; i
++)
200 MODE_INFO
*const mic
= xd
->mode_info_context
;
201 const int mis
= xd
->mode_info_stride
;
203 B_PREDICTION_MODE
UNINITIALIZED_IS_SAFE(best_mode
);
204 int UNINITIALIZED_IS_SAFE(r
), UNINITIALIZED_IS_SAFE(d
);
206 if (mb
->e_mbd
.frame_type
== KEY_FRAME
)
208 const B_PREDICTION_MODE A
= above_block_mode(mic
, i
, mis
);
209 const B_PREDICTION_MODE L
= left_block_mode(mic
, i
);
211 bmode_costs
= mb
->bmode_costs
[A
][L
];
215 pick_intra4x4block(rtcd
, mb
, i
, &best_mode
, bmode_costs
, &r
, &d
);
219 mic
->bmi
[i
].as_mode
= best_mode
;
221 // Break out case where we have already exceeded best so far value
222 // that was passed in
223 if (distortion
> *best_dist
)
231 *best_dist
= distortion
;
232 error
= RDCOST(mb
->rdmult
, mb
->rddiv
, cost
, distortion
);
236 *best_dist
= INT_MAX
;
243 static void pick_intra_mbuv_mode(MACROBLOCK
*mb
)
246 MACROBLOCKD
*x
= &mb
->e_mbd
;
247 unsigned char *uabove_row
= x
->dst
.u_buffer
- x
->dst
.uv_stride
;
248 unsigned char *vabove_row
= x
->dst
.v_buffer
- x
->dst
.uv_stride
;
249 unsigned char *usrc_ptr
= (mb
->block
[16].src
+ *mb
->block
[16].base_src
);
250 unsigned char *vsrc_ptr
= (mb
->block
[20].src
+ *mb
->block
[20].base_src
);
251 int uvsrc_stride
= mb
->block
[16].src_stride
;
252 unsigned char uleft_col
[8];
253 unsigned char vleft_col
[8];
254 unsigned char utop_left
= uabove_row
[-1];
255 unsigned char vtop_left
= vabove_row
[-1];
263 int pred_error
[4] = {0, 0, 0, 0}, best_error
= INT_MAX
;
264 MB_PREDICTION_MODE
UNINITIALIZED_IS_SAFE(best_mode
);
267 for (i
= 0; i
< 8; i
++)
269 uleft_col
[i
] = x
->dst
.u_buffer
[i
* x
->dst
.uv_stride
-1];
270 vleft_col
[i
] = x
->dst
.v_buffer
[i
* x
->dst
.uv_stride
-1];
273 if (!x
->up_available
&& !x
->left_available
)
285 for (i
= 0; i
< 8; i
++)
287 Uaverage
+= uabove_row
[i
];
288 Vaverage
+= vabove_row
[i
];
295 if (x
->left_available
)
297 for (i
= 0; i
< 8; i
++)
299 Uaverage
+= uleft_col
[i
];
300 Vaverage
+= vleft_col
[i
];
307 expected_udc
= (Uaverage
+ (1 << (shift
- 1))) >> shift
;
308 expected_vdc
= (Vaverage
+ (1 << (shift
- 1))) >> shift
;
312 for (i
= 0; i
< 8; i
++)
314 for (j
= 0; j
< 8; j
++)
317 int predu
= uleft_col
[i
] + uabove_row
[j
] - utop_left
;
318 int predv
= vleft_col
[i
] + vabove_row
[j
] - vtop_left
;
337 diff
= u_p
- expected_udc
;
338 pred_error
[DC_PRED
] += diff
* diff
;
339 diff
= v_p
- expected_vdc
;
340 pred_error
[DC_PRED
] += diff
* diff
;
343 diff
= u_p
- uabove_row
[j
];
344 pred_error
[V_PRED
] += diff
* diff
;
345 diff
= v_p
- vabove_row
[j
];
346 pred_error
[V_PRED
] += diff
* diff
;
349 diff
= u_p
- uleft_col
[i
];
350 pred_error
[H_PRED
] += diff
* diff
;
351 diff
= v_p
- vleft_col
[i
];
352 pred_error
[H_PRED
] += diff
* diff
;
356 pred_error
[TM_PRED
] += diff
* diff
;
358 pred_error
[TM_PRED
] += diff
* diff
;
363 usrc_ptr
+= uvsrc_stride
;
364 vsrc_ptr
+= uvsrc_stride
;
368 usrc_ptr
= (mb
->block
[18].src
+ *mb
->block
[18].base_src
);
369 vsrc_ptr
= (mb
->block
[22].src
+ *mb
->block
[22].base_src
);
377 for (i
= DC_PRED
; i
<= TM_PRED
; i
++)
379 if (best_error
> pred_error
[i
])
381 best_error
= pred_error
[i
];
382 best_mode
= (MB_PREDICTION_MODE
)i
;
387 mb
->e_mbd
.mode_info_context
->mbmi
.uv_mode
= best_mode
;
391 static void update_mvcount(VP8_COMP
*cpi
, MACROBLOCKD
*xd
, int_mv
*best_ref_mv
)
393 /* Split MV modes currently not supported when RD is nopt enabled,
394 * therefore, only need to modify MVcount in NEWMV mode. */
395 if (xd
->mode_info_context
->mbmi
.mode
== NEWMV
)
397 cpi
->MVcount
[0][mv_max
+((xd
->mode_info_context
->mbmi
.mv
.as_mv
.row
-
398 best_ref_mv
->as_mv
.row
) >> 1)]++;
399 cpi
->MVcount
[1][mv_max
+((xd
->mode_info_context
->mbmi
.mv
.as_mv
.col
-
400 best_ref_mv
->as_mv
.col
) >> 1)]++;
404 void vp8_pick_inter_mode(VP8_COMP
*cpi
, MACROBLOCK
*x
, int recon_yoffset
,
405 int recon_uvoffset
, int *returnrate
,
406 int *returndistortion
, int *returnintra
)
408 BLOCK
*b
= &x
->block
[0];
409 BLOCKD
*d
= &x
->e_mbd
.block
[0];
410 MACROBLOCKD
*xd
= &x
->e_mbd
;
411 MB_MODE_INFO best_mbmode
;
414 int_mv mode_mv
[MB_MODE_COUNT
];
415 MB_PREDICTION_MODE this_mode
;
418 int best_rd
= INT_MAX
; // 1 << 30;
419 int best_intra_rd
= INT_MAX
;
425 //int all_rds[MAX_MODES]; // Experimental debug code.
426 int best_mode_index
= 0;
427 unsigned int sse
= INT_MAX
, best_sse
= INT_MAX
;
430 int near_sadidx
[8] = {0, 1, 2, 3, 4, 5, 6, 7};
432 int sr
=0; //search range got from mv_pred(). It uses step_param levels. (0-7)
434 int_mv nearest_mv
[4];
436 int_mv frame_best_ref_mv
[4];
438 unsigned char *y_buffer
[4];
439 unsigned char *u_buffer
[4];
440 unsigned char *v_buffer
[4];
442 int skip_mode
[4] = {0, 0, 0, 0};
444 int have_subp_search
= cpi
->sf
.half_pixel_search
; /* In real-time mode, when Speed >= 15, no sub-pixel search. */
446 vpx_memset(mode_mv
, 0, sizeof(mode_mv
));
447 vpx_memset(nearest_mv
, 0, sizeof(nearest_mv
));
448 vpx_memset(near_mv
, 0, sizeof(near_mv
));
449 vpx_memset(&best_mbmode
, 0, sizeof(best_mbmode
));
452 // set up all the refframe dependent pointers.
453 if (cpi
->ref_frame_flags
& VP8_LAST_FLAG
)
455 YV12_BUFFER_CONFIG
*lst_yv12
= &cpi
->common
.yv12_fb
[cpi
->common
.lst_fb_idx
];
457 vp8_find_near_mvs(&x
->e_mbd
, x
->e_mbd
.mode_info_context
, &nearest_mv
[LAST_FRAME
], &near_mv
[LAST_FRAME
],
458 &frame_best_ref_mv
[LAST_FRAME
], MDCounts
[LAST_FRAME
], LAST_FRAME
, cpi
->common
.ref_frame_sign_bias
);
460 y_buffer
[LAST_FRAME
] = lst_yv12
->y_buffer
+ recon_yoffset
;
461 u_buffer
[LAST_FRAME
] = lst_yv12
->u_buffer
+ recon_uvoffset
;
462 v_buffer
[LAST_FRAME
] = lst_yv12
->v_buffer
+ recon_uvoffset
;
465 skip_mode
[LAST_FRAME
] = 1;
467 if (cpi
->ref_frame_flags
& VP8_GOLD_FLAG
)
469 YV12_BUFFER_CONFIG
*gld_yv12
= &cpi
->common
.yv12_fb
[cpi
->common
.gld_fb_idx
];
471 vp8_find_near_mvs(&x
->e_mbd
, x
->e_mbd
.mode_info_context
, &nearest_mv
[GOLDEN_FRAME
], &near_mv
[GOLDEN_FRAME
],
472 &frame_best_ref_mv
[GOLDEN_FRAME
], MDCounts
[GOLDEN_FRAME
], GOLDEN_FRAME
, cpi
->common
.ref_frame_sign_bias
);
474 y_buffer
[GOLDEN_FRAME
] = gld_yv12
->y_buffer
+ recon_yoffset
;
475 u_buffer
[GOLDEN_FRAME
] = gld_yv12
->u_buffer
+ recon_uvoffset
;
476 v_buffer
[GOLDEN_FRAME
] = gld_yv12
->v_buffer
+ recon_uvoffset
;
479 skip_mode
[GOLDEN_FRAME
] = 1;
481 if (cpi
->ref_frame_flags
& VP8_ALT_FLAG
&& cpi
->source_alt_ref_active
)
483 YV12_BUFFER_CONFIG
*alt_yv12
= &cpi
->common
.yv12_fb
[cpi
->common
.alt_fb_idx
];
485 vp8_find_near_mvs(&x
->e_mbd
, x
->e_mbd
.mode_info_context
, &nearest_mv
[ALTREF_FRAME
], &near_mv
[ALTREF_FRAME
],
486 &frame_best_ref_mv
[ALTREF_FRAME
], MDCounts
[ALTREF_FRAME
], ALTREF_FRAME
, cpi
->common
.ref_frame_sign_bias
);
488 y_buffer
[ALTREF_FRAME
] = alt_yv12
->y_buffer
+ recon_yoffset
;
489 u_buffer
[ALTREF_FRAME
] = alt_yv12
->u_buffer
+ recon_uvoffset
;
490 v_buffer
[ALTREF_FRAME
] = alt_yv12
->v_buffer
+ recon_uvoffset
;
493 skip_mode
[ALTREF_FRAME
] = 1;
495 cpi
->mbs_tested_so_far
++; // Count of the number of MBs tested so far this frame
497 *returnintra
= INT_MAX
;
500 x
->e_mbd
.mode_info_context
->mbmi
.ref_frame
= INTRA_FRAME
;
502 // if we encode a new mv this is important
503 // find the best new motion vector
504 for (mode_index
= 0; mode_index
< MAX_MODES
; mode_index
++)
507 int this_rd
= INT_MAX
;
509 if (best_rd
<= cpi
->rd_threshes
[mode_index
])
512 x
->e_mbd
.mode_info_context
->mbmi
.ref_frame
= vp8_ref_frame_order
[mode_index
];
514 if (skip_mode
[x
->e_mbd
.mode_info_context
->mbmi
.ref_frame
])
517 // Check to see if the testing frequency for this mode is at its max
518 // If so then prevent it from being tested and increase the threshold for its testing
519 if (cpi
->mode_test_hit_counts
[mode_index
] && (cpi
->mode_check_freq
[mode_index
] > 1))
521 //if ( (cpi->mbs_tested_so_far / cpi->mode_test_hit_counts[mode_index]) <= cpi->mode_check_freq[mode_index] )
522 if (cpi
->mbs_tested_so_far
<= (cpi
->mode_check_freq
[mode_index
] * cpi
->mode_test_hit_counts
[mode_index
]))
524 // Increase the threshold for coding this mode to make it less likely to be chosen
525 cpi
->rd_thresh_mult
[mode_index
] += 4;
527 if (cpi
->rd_thresh_mult
[mode_index
] > MAX_THRESHMULT
)
528 cpi
->rd_thresh_mult
[mode_index
] = MAX_THRESHMULT
;
530 cpi
->rd_threshes
[mode_index
] = (cpi
->rd_baseline_thresh
[mode_index
] >> 7) * cpi
->rd_thresh_mult
[mode_index
];
536 // We have now reached the point where we are going to test the current mode so increment the counter for the number of times it has been tested
537 cpi
->mode_test_hit_counts
[mode_index
] ++;
542 this_mode
= vp8_mode_order
[mode_index
];
544 // Experimental debug code.
545 //all_rds[mode_index] = -1;
547 x
->e_mbd
.mode_info_context
->mbmi
.mode
= this_mode
;
548 x
->e_mbd
.mode_info_context
->mbmi
.uv_mode
= DC_PRED
;
550 // Work out the cost assosciated with selecting the reference frame
552 x
->e_mbd
.ref_frame_cost
[x
->e_mbd
.mode_info_context
->mbmi
.ref_frame
];
555 // everything but intra
556 if (x
->e_mbd
.mode_info_context
->mbmi
.ref_frame
)
558 x
->e_mbd
.pre
.y_buffer
= y_buffer
[x
->e_mbd
.mode_info_context
->mbmi
.ref_frame
];
559 x
->e_mbd
.pre
.u_buffer
= u_buffer
[x
->e_mbd
.mode_info_context
->mbmi
.ref_frame
];
560 x
->e_mbd
.pre
.v_buffer
= v_buffer
[x
->e_mbd
.mode_info_context
->mbmi
.ref_frame
];
561 mode_mv
[NEARESTMV
] = nearest_mv
[x
->e_mbd
.mode_info_context
->mbmi
.ref_frame
];
562 mode_mv
[NEARMV
] = near_mv
[x
->e_mbd
.mode_info_context
->mbmi
.ref_frame
];
563 best_ref_mv
= frame_best_ref_mv
[x
->e_mbd
.mode_info_context
->mbmi
.ref_frame
];
564 memcpy(mdcounts
, MDCounts
[x
->e_mbd
.mode_info_context
->mbmi
.ref_frame
], sizeof(mdcounts
));
567 // Only consider ZEROMV/ALTREF_FRAME for alt ref frame,
568 // unless ARNR filtering is enabled in which case we want
569 // an unfiltered alternative
570 if (cpi
->is_src_frame_alt_ref
&& (cpi
->oxcf
.arnr_max_frames
== 0))
572 if (this_mode
!= ZEROMV
|| x
->e_mbd
.mode_info_context
->mbmi
.ref_frame
!= ALTREF_FRAME
)
579 // Pass best so far to pick_intra4x4mby_modes to use as breakout
580 distortion2
= best_sse
;
581 pick_intra4x4mby_modes(IF_RTCD(&cpi
->rtcd
), x
, &rate
, &distortion2
);
583 if (distortion2
== INT_MAX
)
590 distortion2
= VARIANCE_INVOKE
591 (&cpi
->rtcd
.variance
, var16x16
)(
592 *(b
->base_src
), b
->src_stride
,
593 x
->e_mbd
.predictor
, 16, &sse
);
594 this_rd
= RDCOST(x
->rdmult
, x
->rddiv
, rate2
, distortion2
);
596 if (this_rd
< best_intra_rd
)
598 best_intra_rd
= this_rd
;
599 *returnintra
= distortion2
;
607 // Split MV modes currently not supported when RD is nopt enabled.
614 RECON_INVOKE(&cpi
->common
.rtcd
.recon
, build_intra_predictors_mby
)
616 distortion2
= VARIANCE_INVOKE(&cpi
->rtcd
.variance
, var16x16
)
617 (*(b
->base_src
), b
->src_stride
,
618 x
->e_mbd
.predictor
, 16, &sse
);
619 rate2
+= x
->mbmode_cost
[x
->e_mbd
.frame_type
][x
->e_mbd
.mode_info_context
->mbmi
.mode
];
620 this_rd
= RDCOST(x
->rdmult
, x
->rddiv
, rate2
, distortion2
);
622 if (this_rd
< best_intra_rd
)
624 best_intra_rd
= this_rd
;
625 *returnintra
= distortion2
;
635 int sadpb
= x
->sadperbit16
;
638 int col_min
= (best_ref_mv
.as_mv
.col
>>3) - MAX_FULL_PEL_VAL
+ ((best_ref_mv
.as_mv
.col
& 7)?1:0);
639 int row_min
= (best_ref_mv
.as_mv
.row
>>3) - MAX_FULL_PEL_VAL
+ ((best_ref_mv
.as_mv
.row
& 7)?1:0);
640 int col_max
= (best_ref_mv
.as_mv
.col
>>3) + MAX_FULL_PEL_VAL
;
641 int row_max
= (best_ref_mv
.as_mv
.row
>>3) + MAX_FULL_PEL_VAL
;
643 int tmp_col_min
= x
->mv_col_min
;
644 int tmp_col_max
= x
->mv_col_max
;
645 int tmp_row_min
= x
->mv_row_min
;
646 int tmp_row_max
= x
->mv_row_max
;
648 int speed_adjust
= (cpi
->Speed
> 5) ? ((cpi
->Speed
>= 8)? 3 : 2) : 1;
650 // Further step/diamond searches as necessary
651 step_param
= cpi
->sf
.first_step
+ speed_adjust
;
653 if(cpi
->sf
.improved_mv_pred
)
657 vp8_cal_sad(cpi
,xd
,x
, recon_yoffset
,&near_sadidx
[0] );
661 vp8_mv_pred(cpi
, &x
->e_mbd
, x
->e_mbd
.mode_info_context
, &mvp
,
662 x
->e_mbd
.mode_info_context
->mbmi
.ref_frame
, cpi
->common
.ref_frame_sign_bias
, &sr
, &near_sadidx
[0]);
665 //adjust search range according to sr from mv prediction
669 mvp_full
.as_mv
.col
= mvp
.as_mv
.col
>>3;
670 mvp_full
.as_mv
.row
= mvp
.as_mv
.row
>>3;
674 mvp
.as_int
= best_ref_mv
.as_int
;
675 mvp_full
.as_mv
.col
= best_ref_mv
.as_mv
.col
>>3;
676 mvp_full
.as_mv
.row
= best_ref_mv
.as_mv
.row
>>3;
679 // Get intersection of UMV window and valid MV window to reduce # of checks in diamond search.
680 if (x
->mv_col_min
< col_min
)
681 x
->mv_col_min
= col_min
;
682 if (x
->mv_col_max
> col_max
)
683 x
->mv_col_max
= col_max
;
684 if (x
->mv_row_min
< row_min
)
685 x
->mv_row_min
= row_min
;
686 if (x
->mv_row_max
> row_max
)
687 x
->mv_row_max
= row_max
;
689 further_steps
= (cpi
->Speed
>= 8)? 0: (cpi
->sf
.max_step_search_steps
- 1 - step_param
);
691 if (cpi
->sf
.search_method
== HEX
)
693 bestsme
= vp8_hex_search(x
, b
, d
, &mvp_full
, &d
->bmi
.mv
, step_param
,
694 sadpb
, &cpi
->fn_ptr
[BLOCK_16X16
],
695 x
->mvsadcost
, x
->mvcost
, &best_ref_mv
);
696 mode_mv
[NEWMV
].as_int
= d
->bmi
.mv
.as_int
;
700 bestsme
= cpi
->diamond_search_sad(x
, b
, d
, &mvp_full
, &d
->bmi
.mv
,
701 step_param
, sadpb
, &num00
,
702 &cpi
->fn_ptr
[BLOCK_16X16
],
703 x
->mvcost
, &best_ref_mv
);
704 mode_mv
[NEWMV
].as_int
= d
->bmi
.mv
.as_int
;
706 // Further step/diamond searches as necessary
708 //further_steps = (cpi->sf.max_step_search_steps - 1) - step_param;
713 while (n
< further_steps
)
722 cpi
->diamond_search_sad(x
, b
, d
, &mvp_full
,
726 &cpi
->fn_ptr
[BLOCK_16X16
],
727 x
->mvcost
, &best_ref_mv
);
728 if (thissme
< bestsme
)
731 mode_mv
[NEWMV
].as_int
= d
->bmi
.mv
.as_int
;
735 d
->bmi
.mv
.as_int
= mode_mv
[NEWMV
].as_int
;
741 x
->mv_col_min
= tmp_col_min
;
742 x
->mv_col_max
= tmp_col_max
;
743 x
->mv_row_min
= tmp_row_min
;
744 x
->mv_row_max
= tmp_row_max
;
746 if (bestsme
< INT_MAX
)
747 cpi
->find_fractional_mv_step(x
, b
, d
, &d
->bmi
.mv
, &best_ref_mv
,
749 &cpi
->fn_ptr
[BLOCK_16X16
],
753 mode_mv
[NEWMV
].as_int
= d
->bmi
.mv
.as_int
;
756 rate2
+= vp8_mv_bit_cost(&mode_mv
[NEWMV
], &best_ref_mv
, cpi
->mb
.mvcost
, 128);
762 if (mode_mv
[this_mode
].as_int
== 0)
767 // Trap vectors that reach beyond the UMV borders
768 // Note that ALL New MV, Nearest MV Near MV and Zero MV code drops through to this point
769 // because of the lack of break statements in the previous two cases.
770 if (((mode_mv
[this_mode
].as_mv
.row
>> 3) < x
->mv_row_min
) || ((mode_mv
[this_mode
].as_mv
.row
>> 3) > x
->mv_row_max
) ||
771 ((mode_mv
[this_mode
].as_mv
.col
>> 3) < x
->mv_col_min
) || ((mode_mv
[this_mode
].as_mv
.col
>> 3) > x
->mv_col_max
))
774 rate2
+= vp8_cost_mv_ref(this_mode
, mdcounts
);
775 x
->e_mbd
.mode_info_context
->mbmi
.mv
.as_int
=
776 mode_mv
[this_mode
].as_int
;
778 /* Exit early and don't compute the distortion if this macroblock is marked inactive. */
779 if (cpi
->active_map_enabled
&& x
->active_ptr
[0] == 0)
787 if((this_mode
!= NEWMV
) ||
788 !(have_subp_search
) || cpi
->common
.full_pixel
==1)
789 distortion2
= get_inter_mbpred_error(x
,
790 &cpi
->fn_ptr
[BLOCK_16X16
],
791 &sse
, mode_mv
[this_mode
]);
793 this_rd
= RDCOST(x
->rdmult
, x
->rddiv
, rate2
, distortion2
);
795 if (sse
< x
->encode_breakout
)
797 // Check u and v to make sure skip is ok
800 sse2
= VP8_UVSSE(x
, IF_RTCD(&cpi
->rtcd
.variance
));
802 if (sse2
* 2 < x
->encode_breakout
)
813 // Experimental debug code.
814 //all_rds[mode_index] = this_rd;
816 if (this_rd
< best_rd
|| x
->skip
)
818 // Note index of best mode
819 best_mode_index
= mode_index
;
822 *returndistortion
= distortion2
;
825 vpx_memcpy(&best_mbmode
, &x
->e_mbd
.mode_info_context
->mbmi
, sizeof(MB_MODE_INFO
));
827 // Testing this mode gave rise to an improvement in best error score. Lower threshold a bit for next time
828 cpi
->rd_thresh_mult
[mode_index
] = (cpi
->rd_thresh_mult
[mode_index
] >= (MIN_THRESHMULT
+ 2)) ? cpi
->rd_thresh_mult
[mode_index
] - 2 : MIN_THRESHMULT
;
829 cpi
->rd_threshes
[mode_index
] = (cpi
->rd_baseline_thresh
[mode_index
] >> 7) * cpi
->rd_thresh_mult
[mode_index
];
832 // If the mode did not help improve the best error case then raise the threshold for testing that mode next time around.
835 cpi
->rd_thresh_mult
[mode_index
] += 4;
837 if (cpi
->rd_thresh_mult
[mode_index
] > MAX_THRESHMULT
)
838 cpi
->rd_thresh_mult
[mode_index
] = MAX_THRESHMULT
;
840 cpi
->rd_threshes
[mode_index
] = (cpi
->rd_baseline_thresh
[mode_index
] >> 7) * cpi
->rd_thresh_mult
[mode_index
];
847 // Reduce the activation RD thresholds for the best choice mode
848 if ((cpi
->rd_baseline_thresh
[best_mode_index
] > 0) && (cpi
->rd_baseline_thresh
[best_mode_index
] < (INT_MAX
>> 2)))
850 int best_adjustment
= (cpi
->rd_thresh_mult
[best_mode_index
] >> 3);
852 cpi
->rd_thresh_mult
[best_mode_index
] = (cpi
->rd_thresh_mult
[best_mode_index
] >= (MIN_THRESHMULT
+ best_adjustment
)) ? cpi
->rd_thresh_mult
[best_mode_index
] - best_adjustment
: MIN_THRESHMULT
;
853 cpi
->rd_threshes
[best_mode_index
] = (cpi
->rd_baseline_thresh
[best_mode_index
] >> 7) * cpi
->rd_thresh_mult
[best_mode_index
];
858 int this_rdbin
= (*returndistortion
>> 7);
860 if (this_rdbin
>= 1024)
865 cpi
->error_bins
[this_rdbin
] ++;
868 if (cpi
->is_src_frame_alt_ref
&&
869 (best_mbmode
.mode
!= ZEROMV
|| best_mbmode
.ref_frame
!= ALTREF_FRAME
))
871 x
->e_mbd
.mode_info_context
->mbmi
.mode
= ZEROMV
;
872 x
->e_mbd
.mode_info_context
->mbmi
.ref_frame
= ALTREF_FRAME
;
873 x
->e_mbd
.mode_info_context
->mbmi
.mv
.as_int
= 0;
874 x
->e_mbd
.mode_info_context
->mbmi
.uv_mode
= DC_PRED
;
875 x
->e_mbd
.mode_info_context
->mbmi
.mb_skip_coeff
=
876 (cpi
->common
.mb_no_coeff_skip
) ? 1 : 0;
877 x
->e_mbd
.mode_info_context
->mbmi
.partitioning
= 0;
882 /* set to the best mb mode */
883 vpx_memcpy(&x
->e_mbd
.mode_info_context
->mbmi
, &best_mbmode
, sizeof(MB_MODE_INFO
));
885 if (best_mbmode
.mode
<= B_PRED
)
887 /* set mode_info_context->mbmi.uv_mode */
888 pick_intra_mbuv_mode(x
);
891 update_mvcount(cpi
, &x
->e_mbd
, &frame_best_ref_mv
[xd
->mode_info_context
->mbmi
.ref_frame
]);
895 void vp8_pick_intra_mode(VP8_COMP
*cpi
, MACROBLOCK
*x
, int *rate_
)
897 int error4x4
, error16x16
= INT_MAX
;
898 int rate
, best_rate
= 0, distortion
, best_sse
;
899 MB_PREDICTION_MODE mode
, best_mode
= DC_PRED
;
902 BLOCK
*b
= &x
->block
[0];
904 x
->e_mbd
.mode_info_context
->mbmi
.ref_frame
= INTRA_FRAME
;
906 pick_intra_mbuv_mode(x
);
908 for (mode
= DC_PRED
; mode
<= TM_PRED
; mode
++)
910 x
->e_mbd
.mode_info_context
->mbmi
.mode
= mode
;
911 RECON_INVOKE(&cpi
->common
.rtcd
.recon
, build_intra_predictors_mby
)
913 distortion
= VARIANCE_INVOKE(&cpi
->rtcd
.variance
, var16x16
)
914 (*(b
->base_src
), b
->src_stride
, x
->e_mbd
.predictor
, 16, &sse
);
915 rate
= x
->mbmode_cost
[x
->e_mbd
.frame_type
][mode
];
916 this_rd
= RDCOST(x
->rdmult
, x
->rddiv
, rate
, distortion
);
918 if (error16x16
> this_rd
)
920 error16x16
= this_rd
;
926 x
->e_mbd
.mode_info_context
->mbmi
.mode
= best_mode
;
928 error4x4
= pick_intra4x4mby_modes(IF_RTCD(&cpi
->rtcd
), x
, &rate
,
930 if (error4x4
< error16x16
)
932 x
->e_mbd
.mode_info_context
->mbmi
.mode
= B_PRED
;